1/*
2 * Copyright (c) 2006-2020 Apple Inc. All rights reserved.
3 *
4 * @APPLE_OSREFERENCE_LICENSE_HEADER_START@
5 *
6 * This file contains Original Code and/or Modifications of Original Code
7 * as defined in and that are subject to the Apple Public Source License
8 * Version 2.0 (the 'License'). You may not use this file except in
9 * compliance with the License. The rights granted to you under the License
10 * may not be used to create, or enable the creation or redistribution of,
11 * unlawful or unlicensed copies of an Apple operating system, or to
12 * circumvent, violate, or enable the circumvention or violation of, any
13 * terms of an Apple operating system software license agreement.
14 *
15 * Please obtain a copy of the License at
16 * http://www.opensource.apple.com/apsl/ and read it before using this file.
17 *
18 * The Original Code and all software distributed under the License are
19 * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER
20 * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES,
21 * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY,
22 * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT.
23 * Please see the License for the specific language governing rights and
24 * limitations under the License.
25 *
26 * @APPLE_OSREFERENCE_LICENSE_HEADER_END@
27 */
28
29#include <sys/errno.h>
30
31#include <mach/mach_types.h>
32#include <mach/mach_traps.h>
33#include <mach/host_priv.h>
34#include <mach/kern_return.h>
35#include <mach/memory_object_control.h>
36#include <mach/memory_object_types.h>
37#include <mach/port.h>
38#include <mach/policy.h>
39#include <mach/upl.h>
40#include <mach/thread_act.h>
41#include <mach/mach_vm.h>
42
43#include <kern/host.h>
44#include <kern/kalloc.h>
45#include <kern/page_decrypt.h>
46#include <kern/queue.h>
47#include <kern/thread.h>
48#include <kern/ipc_kobject.h>
49#include <os/refcnt.h>
50
51#include <sys/kdebug_triage.h>
52
53#include <ipc/ipc_port.h>
54#include <ipc/ipc_space.h>
55
56#include <vm/vm_fault.h>
57#include <vm/vm_map.h>
58#include <vm/vm_pageout.h>
59#include <vm/memory_object.h>
60#include <vm/vm_pageout.h>
61#include <vm/vm_protos.h>
62#include <vm/vm_kern.h>
63
64/*
65 * APPLE PROTECT MEMORY PAGER
66 *
67 * This external memory manager (EMM) handles memory from the encrypted
68 * sections of some executables protected by the DSMOS kernel extension.
69 *
70 * It mostly handles page-in requests (from memory_object_data_request()) by
71 * getting the encrypted data from its backing VM object, itself backed by
72 * the encrypted file, decrypting it and providing it to VM.
73 *
74 * The decrypted pages will never be dirtied, so the memory manager doesn't
75 * need to handle page-out requests (from memory_object_data_return()). The
76 * pages need to be mapped copy-on-write, so that the originals stay clean.
77 *
78 * We don't expect to have to handle a large number of apple-protected
79 * binaries, so the data structures are very simple (simple linked list)
80 * for now.
81 */
82
83/* forward declarations */
84void apple_protect_pager_reference(memory_object_t mem_obj);
85void apple_protect_pager_deallocate(memory_object_t mem_obj);
86kern_return_t apple_protect_pager_init(memory_object_t mem_obj,
87 memory_object_control_t control,
88 memory_object_cluster_size_t pg_size);
89kern_return_t apple_protect_pager_terminate(memory_object_t mem_obj);
90kern_return_t apple_protect_pager_data_request(memory_object_t mem_obj,
91 memory_object_offset_t offset,
92 memory_object_cluster_size_t length,
93 vm_prot_t protection_required,
94 memory_object_fault_info_t fault_info);
95kern_return_t apple_protect_pager_data_return(memory_object_t mem_obj,
96 memory_object_offset_t offset,
97 memory_object_cluster_size_t data_cnt,
98 memory_object_offset_t *resid_offset,
99 int *io_error,
100 boolean_t dirty,
101 boolean_t kernel_copy,
102 int upl_flags);
103kern_return_t apple_protect_pager_data_initialize(memory_object_t mem_obj,
104 memory_object_offset_t offset,
105 memory_object_cluster_size_t data_cnt);
106kern_return_t apple_protect_pager_map(memory_object_t mem_obj,
107 vm_prot_t prot);
108kern_return_t apple_protect_pager_last_unmap(memory_object_t mem_obj);
109boolean_t apple_protect_pager_backing_object(
110 memory_object_t mem_obj,
111 memory_object_offset_t mem_obj_offset,
112 vm_object_t *backing_object,
113 vm_object_offset_t *backing_offset);
114
115#define CRYPT_INFO_DEBUG 0
116void crypt_info_reference(struct pager_crypt_info *crypt_info);
117void crypt_info_deallocate(struct pager_crypt_info *crypt_info);
118
119/*
120 * Vector of VM operations for this EMM.
121 * These routines are invoked by VM via the memory_object_*() interfaces.
122 */
123const struct memory_object_pager_ops apple_protect_pager_ops = {
124 .memory_object_reference = apple_protect_pager_reference,
125 .memory_object_deallocate = apple_protect_pager_deallocate,
126 .memory_object_init = apple_protect_pager_init,
127 .memory_object_terminate = apple_protect_pager_terminate,
128 .memory_object_data_request = apple_protect_pager_data_request,
129 .memory_object_data_return = apple_protect_pager_data_return,
130 .memory_object_data_initialize = apple_protect_pager_data_initialize,
131 .memory_object_map = apple_protect_pager_map,
132 .memory_object_last_unmap = apple_protect_pager_last_unmap,
133 .memory_object_backing_object = apple_protect_pager_backing_object,
134 .memory_object_pager_name = "apple_protect"
135};
136
137/*
138 * The "apple_protect_pager" describes a memory object backed by
139 * the "apple protect" EMM.
140 */
141typedef struct apple_protect_pager {
142 /* mandatory generic header */
143 struct memory_object ap_pgr_hdr;
144
145 /* pager-specific data */
146 queue_chain_t pager_queue; /* next & prev pagers */
147#if MEMORY_OBJECT_HAS_REFCOUNT
148#define ap_pgr_hdr_ref ap_pgr_hdr.mo_ref
149#else
150 os_ref_atomic_t ap_pgr_hdr_ref; /* reference count */
151#endif
152 bool is_ready; /* is this pager ready ? */
153 bool is_mapped; /* is this mem_obj mapped ? */
154 bool is_cached; /* is this pager cached ? */
155 vm_object_t backing_object; /* VM obj w/ encrypted data */
156 vm_object_offset_t backing_offset;
157 vm_object_offset_t crypto_backing_offset; /* for key... */
158 vm_object_offset_t crypto_start;
159 vm_object_offset_t crypto_end;
160 struct pager_crypt_info *crypt_info;
161} *apple_protect_pager_t;
162#define APPLE_PROTECT_PAGER_NULL ((apple_protect_pager_t) NULL)
163
164/*
165 * List of memory objects managed by this EMM.
166 * The list is protected by the "apple_protect_pager_lock" lock.
167 */
168unsigned int apple_protect_pager_count = 0; /* number of pagers */
169unsigned int apple_protect_pager_count_mapped = 0; /* number of unmapped pagers */
170queue_head_t apple_protect_pager_queue = QUEUE_HEAD_INITIALIZER(apple_protect_pager_queue);
171LCK_GRP_DECLARE(apple_protect_pager_lck_grp, "apple_protect");
172LCK_MTX_DECLARE(apple_protect_pager_lock, &apple_protect_pager_lck_grp);
173
174/*
175 * Maximum number of unmapped pagers we're willing to keep around.
176 */
177unsigned int apple_protect_pager_cache_limit = 20;
178
179/*
180 * Statistics & counters.
181 */
182unsigned int apple_protect_pager_count_max = 0;
183unsigned int apple_protect_pager_count_unmapped_max = 0;
184unsigned int apple_protect_pager_num_trim_max = 0;
185unsigned int apple_protect_pager_num_trim_total = 0;
186
187
188
189/* internal prototypes */
190apple_protect_pager_t apple_protect_pager_create(
191 vm_object_t backing_object,
192 vm_object_offset_t backing_offset,
193 vm_object_offset_t crypto_backing_offset,
194 struct pager_crypt_info *crypt_info,
195 vm_object_offset_t crypto_start,
196 vm_object_offset_t crypto_end,
197 boolean_t cache_pager);
198apple_protect_pager_t apple_protect_pager_lookup(memory_object_t mem_obj);
199void apple_protect_pager_dequeue(apple_protect_pager_t pager);
200void apple_protect_pager_deallocate_internal(apple_protect_pager_t pager,
201 boolean_t locked);
202void apple_protect_pager_terminate_internal(apple_protect_pager_t pager);
203void apple_protect_pager_trim(void);
204
205
206#if DEBUG
207int apple_protect_pagerdebug = 0;
208#define PAGER_ALL 0xffffffff
209#define PAGER_INIT 0x00000001
210#define PAGER_PAGEIN 0x00000002
211
212#define PAGER_DEBUG(LEVEL, A) \
213 MACRO_BEGIN \
214 if ((apple_protect_pagerdebug & LEVEL)==LEVEL) { \
215 printf A; \
216 } \
217 MACRO_END
218#else
219#define PAGER_DEBUG(LEVEL, A)
220#endif
221
222/*
223 * apple_protect_pager_init()
224 *
225 * Initialize the memory object and makes it ready to be used and mapped.
226 */
227kern_return_t
228apple_protect_pager_init(
229 memory_object_t mem_obj,
230 memory_object_control_t control,
231#if !DEBUG
232 __unused
233#endif
234 memory_object_cluster_size_t pg_size)
235{
236 apple_protect_pager_t pager;
237 kern_return_t kr;
238 memory_object_attr_info_data_t attributes;
239
240 PAGER_DEBUG(PAGER_ALL,
241 ("apple_protect_pager_init: %p, %p, %x\n",
242 mem_obj, control, pg_size));
243
244 if (control == MEMORY_OBJECT_CONTROL_NULL) {
245 return KERN_INVALID_ARGUMENT;
246 }
247
248 pager = apple_protect_pager_lookup(mem_obj);
249
250 memory_object_control_reference(control);
251
252 pager->ap_pgr_hdr.mo_control = control;
253
254 attributes.copy_strategy = MEMORY_OBJECT_COPY_DELAY;
255 /* attributes.cluster_size = (1 << (CLUSTER_SHIFT + PAGE_SHIFT));*/
256 attributes.cluster_size = (1 << (PAGE_SHIFT));
257 attributes.may_cache_object = FALSE;
258 attributes.temporary = TRUE;
259
260 kr = memory_object_change_attributes(
261 memory_control: control,
262 MEMORY_OBJECT_ATTRIBUTE_INFO,
263 attributes: (memory_object_info_t) &attributes,
264 MEMORY_OBJECT_ATTR_INFO_COUNT);
265 if (kr != KERN_SUCCESS) {
266 panic("apple_protect_pager_init: "
267 "memory_object_change_attributes() failed");
268 }
269
270#if CONFIG_SECLUDED_MEMORY
271 if (secluded_for_filecache) {
272 memory_object_mark_eligible_for_secluded(control, TRUE);
273 }
274#endif /* CONFIG_SECLUDED_MEMORY */
275
276 return KERN_SUCCESS;
277}
278
279/*
280 * apple_protect_data_return()
281 *
282 * Handles page-out requests from VM. This should never happen since
283 * the pages provided by this EMM are not supposed to be dirty or dirtied
284 * and VM should simply discard the contents and reclaim the pages if it
285 * needs to.
286 */
287kern_return_t
288apple_protect_pager_data_return(
289 __unused memory_object_t mem_obj,
290 __unused memory_object_offset_t offset,
291 __unused memory_object_cluster_size_t data_cnt,
292 __unused memory_object_offset_t *resid_offset,
293 __unused int *io_error,
294 __unused boolean_t dirty,
295 __unused boolean_t kernel_copy,
296 __unused int upl_flags)
297{
298 panic("apple_protect_pager_data_return: should never get called");
299 return KERN_FAILURE;
300}
301
302kern_return_t
303apple_protect_pager_data_initialize(
304 __unused memory_object_t mem_obj,
305 __unused memory_object_offset_t offset,
306 __unused memory_object_cluster_size_t data_cnt)
307{
308 panic("apple_protect_pager_data_initialize: should never get called");
309 return KERN_FAILURE;
310}
311
312/*
313 * apple_protect_pager_data_request()
314 *
315 * Handles page-in requests from VM.
316 */
317int apple_protect_pager_data_request_debug = 0;
318kern_return_t
319apple_protect_pager_data_request(
320 memory_object_t mem_obj,
321 memory_object_offset_t offset,
322 memory_object_cluster_size_t length,
323#if !DEBUG
324 __unused
325#endif
326 vm_prot_t protection_required,
327 memory_object_fault_info_t mo_fault_info)
328{
329 apple_protect_pager_t pager;
330 memory_object_control_t mo_control;
331 upl_t upl;
332 int upl_flags;
333 upl_size_t upl_size;
334 upl_page_info_t *upl_pl;
335 unsigned int pl_count;
336 vm_object_t src_top_object, src_page_object, dst_object;
337 kern_return_t kr, retval;
338 vm_offset_t src_vaddr, dst_vaddr;
339 vm_offset_t cur_offset;
340 vm_offset_t offset_in_page;
341 kern_return_t error_code;
342 vm_prot_t prot;
343 vm_page_t src_page, top_page;
344 int interruptible;
345 struct vm_object_fault_info fault_info;
346 int ret;
347
348 PAGER_DEBUG(PAGER_ALL, ("apple_protect_pager_data_request: %p, %llx, %x, %x\n", mem_obj, offset, length, protection_required));
349
350 retval = KERN_SUCCESS;
351 src_top_object = VM_OBJECT_NULL;
352 src_page_object = VM_OBJECT_NULL;
353 upl = NULL;
354 upl_pl = NULL;
355 fault_info = *((struct vm_object_fault_info *)(uintptr_t)mo_fault_info);
356 fault_info.stealth = TRUE;
357 fault_info.io_sync = FALSE;
358 fault_info.mark_zf_absent = FALSE;
359 fault_info.batch_pmap_op = FALSE;
360 interruptible = fault_info.interruptible;
361
362 pager = apple_protect_pager_lookup(mem_obj);
363 assert(pager->is_ready);
364 assert(os_ref_get_count_raw(&pager->ap_pgr_hdr_ref) > 1); /* pager is alive and mapped */
365
366 PAGER_DEBUG(PAGER_PAGEIN, ("apple_protect_pager_data_request: %p, %llx, %x, %x, pager %p\n", mem_obj, offset, length, protection_required, pager));
367
368 fault_info.lo_offset += pager->backing_offset;
369 fault_info.hi_offset += pager->backing_offset;
370
371 /*
372 * Gather in a UPL all the VM pages requested by VM.
373 */
374 mo_control = pager->ap_pgr_hdr.mo_control;
375
376 upl_size = length;
377 upl_flags =
378 UPL_RET_ONLY_ABSENT |
379 UPL_SET_LITE |
380 UPL_NO_SYNC |
381 UPL_CLEAN_IN_PLACE | /* triggers UPL_CLEAR_DIRTY */
382 UPL_SET_INTERNAL;
383 pl_count = 0;
384 kr = memory_object_upl_request(memory_control: mo_control,
385 offset, size: upl_size,
386 upl: &upl, NULL, NULL, cntrl_flags: upl_flags, VM_KERN_MEMORY_SECURITY);
387 if (kr != KERN_SUCCESS) {
388 retval = kr;
389 goto done;
390 }
391 dst_object = memory_object_control_to_vm_object(control: mo_control);
392 assert(dst_object != VM_OBJECT_NULL);
393
394 /*
395 * We'll map the encrypted data in the kernel address space from the
396 * backing VM object (itself backed by the encrypted file via
397 * the vnode pager).
398 */
399 src_top_object = pager->backing_object;
400 assert(src_top_object != VM_OBJECT_NULL);
401 vm_object_reference(src_top_object); /* keep the source object alive */
402
403 /*
404 * Fill in the contents of the pages requested by VM.
405 */
406 upl_pl = UPL_GET_INTERNAL_PAGE_LIST(upl);
407 pl_count = length / PAGE_SIZE;
408 for (cur_offset = 0;
409 retval == KERN_SUCCESS && cur_offset < length;
410 cur_offset += PAGE_SIZE) {
411 ppnum_t dst_pnum;
412
413 if (!upl_page_present(upl: upl_pl, index: (int)(cur_offset / PAGE_SIZE))) {
414 /* this page is not in the UPL: skip it */
415 continue;
416 }
417
418 /*
419 * Map the source (encrypted) page in the kernel's
420 * virtual address space.
421 * We already hold a reference on the src_top_object.
422 */
423retry_src_fault:
424 vm_object_lock(src_top_object);
425 vm_object_paging_begin(src_top_object);
426 error_code = 0;
427 prot = VM_PROT_READ;
428 src_page = VM_PAGE_NULL;
429 kr = vm_fault_page(first_object: src_top_object,
430 first_offset: pager->backing_offset + offset + cur_offset,
431 VM_PROT_READ,
432 FALSE,
433 FALSE, /* src_page not looked up */
434 protection: &prot,
435 result_page: &src_page,
436 top_page: &top_page,
437 NULL,
438 error_code: &error_code,
439 FALSE,
440 fault_info: &fault_info);
441 switch (kr) {
442 case VM_FAULT_SUCCESS:
443 break;
444 case VM_FAULT_RETRY:
445 goto retry_src_fault;
446 case VM_FAULT_MEMORY_SHORTAGE:
447 if (vm_page_wait(interruptible)) {
448 goto retry_src_fault;
449 }
450 ktriage_record(thread_id: thread_tid(thread: current_thread()), KDBG_TRIAGE_EVENTID(KDBG_TRIAGE_SUBSYS_APPLE_PROTECT_PAGER, KDBG_TRIAGE_RESERVED, KDBG_TRIAGE_APPLE_PROTECT_PAGER_MEMORY_SHORTAGE), arg: 0 /* arg */);
451 OS_FALLTHROUGH;
452 case VM_FAULT_INTERRUPTED:
453 retval = MACH_SEND_INTERRUPTED;
454 goto done;
455 case VM_FAULT_SUCCESS_NO_VM_PAGE:
456 /* success but no VM page: fail */
457 vm_object_paging_end(src_top_object);
458 vm_object_unlock(src_top_object);
459 OS_FALLTHROUGH;
460 case VM_FAULT_MEMORY_ERROR:
461 /* the page is not there ! */
462 if (error_code) {
463 retval = error_code;
464 } else {
465 retval = KERN_MEMORY_ERROR;
466 }
467 goto done;
468 default:
469 panic("apple_protect_pager_data_request: "
470 "vm_fault_page() unexpected error 0x%x\n",
471 kr);
472 }
473 assert(src_page != VM_PAGE_NULL);
474 assert(src_page->vmp_busy);
475
476 if (src_page->vmp_q_state != VM_PAGE_ON_SPECULATIVE_Q) {
477 vm_page_lockspin_queues();
478
479 if (src_page->vmp_q_state != VM_PAGE_ON_SPECULATIVE_Q) {
480 vm_page_speculate(page: src_page, FALSE);
481 }
482 vm_page_unlock_queues();
483 }
484
485 /*
486 * Establish pointers to the source
487 * and destination physical pages.
488 */
489 dst_pnum = (ppnum_t)
490 upl_phys_page(upl: upl_pl, index: (int)(cur_offset / PAGE_SIZE));
491 assert(dst_pnum != 0);
492
493 src_vaddr = (vm_map_offset_t)
494 phystokv(pa: (pmap_paddr_t)VM_PAGE_GET_PHYS_PAGE(m: src_page)
495 << PAGE_SHIFT);
496 dst_vaddr = (vm_map_offset_t)
497 phystokv(pa: (pmap_paddr_t)dst_pnum << PAGE_SHIFT);
498
499 src_page_object = VM_PAGE_OBJECT(src_page);
500
501 /*
502 * Validate the original page...
503 */
504 if (src_page_object->code_signed) {
505 vm_page_validate_cs_mapped(
506 page: src_page, PAGE_SIZE, fault_phys_offset: 0,
507 kaddr: (const void *) src_vaddr);
508 }
509 /*
510 * ... and transfer the results to the destination page.
511 */
512 UPL_SET_CS_VALIDATED(upl_pl, cur_offset / PAGE_SIZE,
513 src_page->vmp_cs_validated);
514 UPL_SET_CS_TAINTED(upl_pl, cur_offset / PAGE_SIZE,
515 src_page->vmp_cs_tainted);
516 UPL_SET_CS_NX(upl_pl, cur_offset / PAGE_SIZE,
517 src_page->vmp_cs_nx);
518
519 /*
520 * page_decrypt() might access a mapped file, so let's release
521 * the object lock for the source page to avoid a potential
522 * deadlock. The source page is kept busy and we have a
523 * "paging_in_progress" reference on its object, so it's safe
524 * to unlock the object here.
525 */
526 assert(src_page->vmp_busy);
527 assert(src_page_object->paging_in_progress > 0);
528 vm_object_unlock(src_page_object);
529
530 /*
531 * Decrypt the encrypted contents of the source page
532 * into the destination page.
533 */
534 for (offset_in_page = 0;
535 offset_in_page < PAGE_SIZE;
536 offset_in_page += 4096) {
537 if (offset + cur_offset + offset_in_page <
538 pager->crypto_start ||
539 offset + cur_offset + offset_in_page >=
540 pager->crypto_end) {
541 /* not encrypted: just copy */
542 bcopy(src: (const char *)(src_vaddr +
543 offset_in_page),
544 dst: (char *)(dst_vaddr + offset_in_page),
545 n: 4096);
546
547 if (apple_protect_pager_data_request_debug) {
548 printf(format: "apple_protect_data_request"
549 "(%p,0x%llx+0x%llx+0x%04llx): "
550 "out of crypto range "
551 "[0x%llx:0x%llx]: "
552 "COPY [0x%016llx 0x%016llx] "
553 "code_signed=%d "
554 "cs_validated=%d "
555 "cs_tainted=%d "
556 "cs_nx=%d\n",
557 pager,
558 offset,
559 (uint64_t) cur_offset,
560 (uint64_t) offset_in_page,
561 pager->crypto_start,
562 pager->crypto_end,
563 *(uint64_t *)(dst_vaddr +
564 offset_in_page),
565 *(uint64_t *)(dst_vaddr +
566 offset_in_page + 8),
567 src_page_object->code_signed,
568 src_page->vmp_cs_validated,
569 src_page->vmp_cs_tainted,
570 src_page->vmp_cs_nx);
571 }
572 ret = 0;
573 continue;
574 }
575 ret = pager->crypt_info->page_decrypt(
576 (const void *)(src_vaddr + offset_in_page),
577 (void *)(dst_vaddr + offset_in_page),
578 ((pager->crypto_backing_offset -
579 pager->crypto_start) + /* XXX ? */
580 offset +
581 cur_offset +
582 offset_in_page),
583 pager->crypt_info->crypt_ops);
584
585 if (apple_protect_pager_data_request_debug) {
586 printf(format: "apple_protect_data_request"
587 "(%p,0x%llx+0x%llx+0x%04llx): "
588 "in crypto range [0x%llx:0x%llx]: "
589 "DECRYPT offset 0x%llx="
590 "(0x%llx-0x%llx+0x%llx+0x%llx+0x%04llx)"
591 "[0x%016llx 0x%016llx] "
592 "code_signed=%d "
593 "cs_validated=%d "
594 "cs_tainted=%d "
595 "cs_nx=%d "
596 "ret=0x%x\n",
597 pager,
598 offset,
599 (uint64_t) cur_offset,
600 (uint64_t) offset_in_page,
601 pager->crypto_start, pager->crypto_end,
602 ((pager->crypto_backing_offset -
603 pager->crypto_start) +
604 offset +
605 cur_offset +
606 offset_in_page),
607 pager->crypto_backing_offset,
608 pager->crypto_start,
609 offset,
610 (uint64_t) cur_offset,
611 (uint64_t) offset_in_page,
612 *(uint64_t *)(dst_vaddr + offset_in_page),
613 *(uint64_t *)(dst_vaddr + offset_in_page + 8),
614 src_page_object->code_signed,
615 src_page->vmp_cs_validated,
616 src_page->vmp_cs_tainted,
617 src_page->vmp_cs_nx,
618 ret);
619 }
620 if (ret) {
621 break;
622 }
623 }
624 if (ret) {
625 /*
626 * Decryption failed. Abort the fault.
627 */
628 retval = KERN_ABORTED;
629 }
630
631 assert(VM_PAGE_OBJECT(src_page) == src_page_object);
632 assert(src_page->vmp_busy);
633 assert(src_page_object->paging_in_progress > 0);
634 vm_object_lock(src_page_object);
635
636 /*
637 * Cleanup the result of vm_fault_page() of the source page.
638 */
639 PAGE_WAKEUP_DONE(src_page);
640 src_page = VM_PAGE_NULL;
641 vm_object_paging_end(src_page_object);
642 vm_object_unlock(src_page_object);
643
644 if (top_page != VM_PAGE_NULL) {
645 assert(VM_PAGE_OBJECT(top_page) == src_top_object);
646 vm_object_lock(src_top_object);
647 VM_PAGE_FREE(top_page);
648 vm_object_paging_end(src_top_object);
649 vm_object_unlock(src_top_object);
650 }
651 }
652
653done:
654 if (upl != NULL) {
655 /* clean up the UPL */
656
657 /*
658 * The pages are currently dirty because we've just been
659 * writing on them, but as far as we're concerned, they're
660 * clean since they contain their "original" contents as
661 * provided by us, the pager.
662 * Tell the UPL to mark them "clean".
663 */
664 upl_clear_dirty(upl, TRUE);
665
666 /* abort or commit the UPL */
667 if (retval != KERN_SUCCESS) {
668 upl_abort(upl_object: upl, abort_cond: 0);
669 if (retval == KERN_ABORTED) {
670 wait_result_t wait_result;
671
672 /*
673 * We aborted the fault and did not provide
674 * any contents for the requested pages but
675 * the pages themselves are not invalid, so
676 * let's return success and let the caller
677 * retry the fault, in case it might succeed
678 * later (when the decryption code is up and
679 * running in the kernel, for example).
680 */
681 retval = KERN_SUCCESS;
682 /*
683 * Wait a little bit first to avoid using
684 * too much CPU time retrying and failing
685 * the same fault over and over again.
686 */
687 wait_result = assert_wait_timeout(
688 event: (event_t) apple_protect_pager_data_request,
689 THREAD_UNINT,
690 interval: 10000, /* 10ms */
691 NSEC_PER_USEC);
692 assert(wait_result == THREAD_WAITING);
693 wait_result = thread_block(THREAD_CONTINUE_NULL);
694 assert(wait_result == THREAD_TIMED_OUT);
695 }
696 } else {
697 boolean_t empty;
698 assertf(page_aligned(upl->u_offset) && page_aligned(upl->u_size),
699 "upl %p offset 0x%llx size 0x%x",
700 upl, upl->u_offset, upl->u_size);
701 upl_commit_range(upl_object: upl, offset: 0, size: upl->u_size,
702 UPL_COMMIT_CS_VALIDATED | UPL_COMMIT_WRITTEN_BY_KERNEL,
703 page_list: upl_pl, page_listCnt: pl_count, empty: &empty);
704 }
705
706 /* and deallocate the UPL */
707 upl_deallocate(upl);
708 upl = NULL;
709 }
710 if (src_top_object != VM_OBJECT_NULL) {
711 vm_object_deallocate(object: src_top_object);
712 }
713 return retval;
714}
715
716/*
717 * apple_protect_pager_reference()
718 *
719 * Get a reference on this memory object.
720 * For external usage only. Assumes that the initial reference count is not 0,
721 * i.e one should not "revive" a dead pager this way.
722 */
723void
724apple_protect_pager_reference(
725 memory_object_t mem_obj)
726{
727 apple_protect_pager_t pager;
728
729 pager = apple_protect_pager_lookup(mem_obj);
730
731 lck_mtx_lock(lck: &apple_protect_pager_lock);
732 os_ref_retain_locked_raw(&pager->ap_pgr_hdr_ref, NULL);
733 lck_mtx_unlock(lck: &apple_protect_pager_lock);
734}
735
736
737/*
738 * apple_protect_pager_dequeue:
739 *
740 * Removes a pager from the list of pagers.
741 *
742 * The caller must hold "apple_protect_pager_lock".
743 */
744void
745apple_protect_pager_dequeue(
746 apple_protect_pager_t pager)
747{
748 assert(!pager->is_mapped);
749
750 queue_remove(&apple_protect_pager_queue,
751 pager,
752 apple_protect_pager_t,
753 pager_queue);
754 pager->pager_queue.next = NULL;
755 pager->pager_queue.prev = NULL;
756
757 apple_protect_pager_count--;
758}
759
760/*
761 * apple_protect_pager_terminate_internal:
762 *
763 * Trigger the asynchronous termination of the memory object associated
764 * with this pager.
765 * When the memory object is terminated, there will be one more call
766 * to memory_object_deallocate() (i.e. apple_protect_pager_deallocate())
767 * to finish the clean up.
768 *
769 * "apple_protect_pager_lock" should not be held by the caller.
770 * We don't need the lock because the pager has already been removed from
771 * the pagers' list and is now ours exclusively.
772 */
773void
774apple_protect_pager_terminate_internal(
775 apple_protect_pager_t pager)
776{
777 assert(pager->is_ready);
778 assert(!pager->is_mapped);
779
780 if (pager->backing_object != VM_OBJECT_NULL) {
781 vm_object_deallocate(object: pager->backing_object);
782 pager->backing_object = VM_OBJECT_NULL;
783 }
784
785 /* one less pager using this "pager_crypt_info" */
786#if CRYPT_INFO_DEBUG
787 printf("CRYPT_INFO %s: deallocate %p ref %d\n",
788 __FUNCTION__,
789 pager->crypt_info,
790 pager->crypt_info->crypt_refcnt);
791#endif /* CRYPT_INFO_DEBUG */
792 crypt_info_deallocate(crypt_info: pager->crypt_info);
793 pager->crypt_info = NULL;
794
795 /* trigger the destruction of the memory object */
796 memory_object_destroy(memory_control: pager->ap_pgr_hdr.mo_control, reason: VM_OBJECT_DESTROY_UNKNOWN_REASON);
797}
798
799/*
800 * apple_protect_pager_deallocate_internal()
801 *
802 * Release a reference on this pager and free it when the last
803 * reference goes away.
804 * Can be called with apple_protect_pager_lock held or not but always returns
805 * with it unlocked.
806 */
807void
808apple_protect_pager_deallocate_internal(
809 apple_protect_pager_t pager,
810 boolean_t locked)
811{
812 boolean_t needs_trimming;
813 unsigned int count_unmapped;
814 os_ref_count_t ref_count;
815
816 if (!locked) {
817 lck_mtx_lock(lck: &apple_protect_pager_lock);
818 }
819
820 count_unmapped = (apple_protect_pager_count -
821 apple_protect_pager_count_mapped);
822 if (count_unmapped > apple_protect_pager_cache_limit) {
823 /* we have too many unmapped pagers: trim some */
824 needs_trimming = TRUE;
825 } else {
826 needs_trimming = FALSE;
827 }
828
829 /* drop a reference on this pager */
830 ref_count = os_ref_release_locked_raw(&pager->ap_pgr_hdr_ref, NULL);
831
832 if (ref_count == 1) {
833 /*
834 * Only the "named" reference is left, which means that
835 * no one is really holding on to this pager anymore.
836 * Terminate it.
837 */
838 apple_protect_pager_dequeue(pager);
839 /* the pager is all ours: no need for the lock now */
840 lck_mtx_unlock(lck: &apple_protect_pager_lock);
841 apple_protect_pager_terminate_internal(pager);
842 } else if (ref_count == 0) {
843 /*
844 * Dropped the existence reference; the memory object has
845 * been terminated. Do some final cleanup and release the
846 * pager structure.
847 */
848 lck_mtx_unlock(lck: &apple_protect_pager_lock);
849 if (pager->ap_pgr_hdr.mo_control != MEMORY_OBJECT_CONTROL_NULL) {
850 memory_object_control_deallocate(control: pager->ap_pgr_hdr.mo_control);
851 pager->ap_pgr_hdr.mo_control = MEMORY_OBJECT_CONTROL_NULL;
852 }
853 kfree_type(struct apple_protect_pager, pager);
854 pager = APPLE_PROTECT_PAGER_NULL;
855 } else {
856 /* there are still plenty of references: keep going... */
857 lck_mtx_unlock(lck: &apple_protect_pager_lock);
858 }
859
860 if (needs_trimming) {
861 apple_protect_pager_trim();
862 }
863 /* caution: lock is not held on return... */
864}
865
866/*
867 * apple_protect_pager_deallocate()
868 *
869 * Release a reference on this pager and free it when the last
870 * reference goes away.
871 */
872void
873apple_protect_pager_deallocate(
874 memory_object_t mem_obj)
875{
876 apple_protect_pager_t pager;
877
878 PAGER_DEBUG(PAGER_ALL, ("apple_protect_pager_deallocate: %p\n", mem_obj));
879 pager = apple_protect_pager_lookup(mem_obj);
880 apple_protect_pager_deallocate_internal(pager, FALSE);
881}
882
883/*
884 *
885 */
886kern_return_t
887apple_protect_pager_terminate(
888#if !DEBUG
889 __unused
890#endif
891 memory_object_t mem_obj)
892{
893 PAGER_DEBUG(PAGER_ALL, ("apple_protect_pager_terminate: %p\n", mem_obj));
894
895 return KERN_SUCCESS;
896}
897
898/*
899 * apple_protect_pager_map()
900 *
901 * This allows VM to let us, the EMM, know that this memory object
902 * is currently mapped one or more times. This is called by VM each time
903 * the memory object gets mapped and we take one extra reference on the
904 * memory object to account for all its mappings.
905 */
906kern_return_t
907apple_protect_pager_map(
908 memory_object_t mem_obj,
909 __unused vm_prot_t prot)
910{
911 apple_protect_pager_t pager;
912
913 PAGER_DEBUG(PAGER_ALL, ("apple_protect_pager_map: %p\n", mem_obj));
914
915 pager = apple_protect_pager_lookup(mem_obj);
916
917 lck_mtx_lock(lck: &apple_protect_pager_lock);
918 assert(pager->is_ready);
919 assert(os_ref_get_count_raw(&pager->ap_pgr_hdr_ref) > 0); /* pager is alive */
920 if (pager->is_mapped == FALSE) {
921 /*
922 * First mapping of this pager: take an extra reference
923 * that will remain until all the mappings of this pager
924 * are removed.
925 */
926 pager->is_mapped = TRUE;
927 os_ref_retain_locked_raw(&pager->ap_pgr_hdr_ref, NULL);
928 apple_protect_pager_count_mapped++;
929 }
930 lck_mtx_unlock(lck: &apple_protect_pager_lock);
931
932 return KERN_SUCCESS;
933}
934
935/*
936 * apple_protect_pager_last_unmap()
937 *
938 * This is called by VM when this memory object is no longer mapped anywhere.
939 */
940kern_return_t
941apple_protect_pager_last_unmap(
942 memory_object_t mem_obj)
943{
944 apple_protect_pager_t pager;
945 unsigned int count_unmapped;
946
947 PAGER_DEBUG(PAGER_ALL,
948 ("apple_protect_pager_last_unmap: %p\n", mem_obj));
949
950 pager = apple_protect_pager_lookup(mem_obj);
951
952 lck_mtx_lock(lck: &apple_protect_pager_lock);
953 if (pager->is_mapped) {
954 /*
955 * All the mappings are gone, so let go of the one extra
956 * reference that represents all the mappings of this pager.
957 */
958 apple_protect_pager_count_mapped--;
959 count_unmapped = (apple_protect_pager_count -
960 apple_protect_pager_count_mapped);
961 if (count_unmapped > apple_protect_pager_count_unmapped_max) {
962 apple_protect_pager_count_unmapped_max = count_unmapped;
963 }
964 pager->is_mapped = FALSE;
965 apple_protect_pager_deallocate_internal(pager, TRUE);
966 /* caution: deallocate_internal() released the lock ! */
967 } else {
968 lck_mtx_unlock(lck: &apple_protect_pager_lock);
969 }
970
971 return KERN_SUCCESS;
972}
973
974boolean_t
975apple_protect_pager_backing_object(
976 memory_object_t mem_obj,
977 memory_object_offset_t offset,
978 vm_object_t *backing_object,
979 vm_object_offset_t *backing_offset)
980{
981 apple_protect_pager_t pager;
982
983 PAGER_DEBUG(PAGER_ALL,
984 ("apple_protect_pager_backing_object: %p\n", mem_obj));
985
986 pager = apple_protect_pager_lookup(mem_obj);
987
988 *backing_object = pager->backing_object;
989 *backing_offset = pager->backing_offset + offset;
990
991 return TRUE;
992}
993
994/*
995 *
996 */
997apple_protect_pager_t
998apple_protect_pager_lookup(
999 memory_object_t mem_obj)
1000{
1001 apple_protect_pager_t pager;
1002
1003 assert(mem_obj->mo_pager_ops == &apple_protect_pager_ops);
1004 pager = (apple_protect_pager_t)(uintptr_t) mem_obj;
1005 assert(os_ref_get_count_raw(&pager->ap_pgr_hdr_ref) > 0);
1006 return pager;
1007}
1008
1009apple_protect_pager_t
1010apple_protect_pager_create(
1011 vm_object_t backing_object,
1012 vm_object_offset_t backing_offset,
1013 vm_object_offset_t crypto_backing_offset,
1014 struct pager_crypt_info *crypt_info,
1015 vm_object_offset_t crypto_start,
1016 vm_object_offset_t crypto_end,
1017 boolean_t cache_pager)
1018{
1019 apple_protect_pager_t pager, pager2;
1020 memory_object_control_t control;
1021 kern_return_t kr;
1022 struct pager_crypt_info *old_crypt_info;
1023
1024 pager = kalloc_type(struct apple_protect_pager, Z_WAITOK | Z_NOFAIL);
1025
1026 /*
1027 * The vm_map call takes both named entry ports and raw memory
1028 * objects in the same parameter. We need to make sure that
1029 * vm_map does not see this object as a named entry port. So,
1030 * we reserve the first word in the object for a fake ip_kotype
1031 * setting - that will tell vm_map to use it as a memory object.
1032 */
1033 pager->ap_pgr_hdr.mo_ikot = IKOT_MEMORY_OBJECT;
1034 pager->ap_pgr_hdr.mo_pager_ops = &apple_protect_pager_ops;
1035 pager->ap_pgr_hdr.mo_control = MEMORY_OBJECT_CONTROL_NULL;
1036
1037 pager->is_ready = FALSE;/* not ready until it has a "name" */
1038 /* one reference for the caller */
1039 os_ref_init_count_raw(&pager->ap_pgr_hdr_ref, NULL, 1);
1040 pager->is_mapped = FALSE;
1041 if (cache_pager) {
1042 /* extra reference for the cache */
1043 os_ref_retain_locked_raw(&pager->ap_pgr_hdr_ref, NULL);
1044 pager->is_cached = true;
1045 } else {
1046 pager->is_cached = false;
1047 }
1048 pager->backing_object = backing_object;
1049 pager->backing_offset = backing_offset;
1050 pager->crypto_backing_offset = crypto_backing_offset;
1051 pager->crypto_start = crypto_start;
1052 pager->crypto_end = crypto_end;
1053 pager->crypt_info = crypt_info; /* allocated by caller */
1054
1055#if CRYPT_INFO_DEBUG
1056 printf("CRYPT_INFO %s: crypt_info %p [%p,%p,%p,%d]\n",
1057 __FUNCTION__,
1058 crypt_info,
1059 crypt_info->page_decrypt,
1060 crypt_info->crypt_end,
1061 crypt_info->crypt_ops,
1062 crypt_info->crypt_refcnt);
1063#endif /* CRYPT_INFO_DEBUG */
1064
1065 vm_object_reference(backing_object);
1066
1067 old_crypt_info = NULL;
1068
1069 lck_mtx_lock(lck: &apple_protect_pager_lock);
1070 /* see if anyone raced us to create a pager for the same object */
1071 queue_iterate(&apple_protect_pager_queue,
1072 pager2,
1073 apple_protect_pager_t,
1074 pager_queue) {
1075 if ((pager2->crypt_info->page_decrypt !=
1076 crypt_info->page_decrypt) ||
1077 (pager2->crypt_info->crypt_end !=
1078 crypt_info->crypt_end) ||
1079 (pager2->crypt_info->crypt_ops !=
1080 crypt_info->crypt_ops)) {
1081 /* crypt_info contents do not match: next pager */
1082 continue;
1083 }
1084
1085 /* found a match for crypt_info ... */
1086 if (old_crypt_info) {
1087 /* ... already switched to that crypt_info */
1088 assert(old_crypt_info == pager2->crypt_info);
1089 } else if (pager2->crypt_info != crypt_info) {
1090 /* ... switch to that pager's crypt_info */
1091#if CRYPT_INFO_DEBUG
1092 printf("CRYPT_INFO %s: reference %p ref %d "
1093 "(create match)\n",
1094 __FUNCTION__,
1095 pager2->crypt_info,
1096 pager2->crypt_info->crypt_refcnt);
1097#endif /* CRYPT_INFO_DEBUG */
1098 old_crypt_info = pager2->crypt_info;
1099 crypt_info_reference(crypt_info: old_crypt_info);
1100 pager->crypt_info = old_crypt_info;
1101 }
1102
1103 if (pager2->backing_object == backing_object &&
1104 pager2->backing_offset == backing_offset &&
1105 pager2->crypto_backing_offset == crypto_backing_offset &&
1106 pager2->crypto_start == crypto_start &&
1107 pager2->crypto_end == crypto_end) {
1108 /* full match: use that pager */
1109 break;
1110 }
1111 }
1112 if (!queue_end(&apple_protect_pager_queue,
1113 (queue_entry_t) pager2)) {
1114 /* we lost the race, down with the loser... */
1115 lck_mtx_unlock(lck: &apple_protect_pager_lock);
1116 vm_object_deallocate(object: pager->backing_object);
1117 pager->backing_object = VM_OBJECT_NULL;
1118#if CRYPT_INFO_DEBUG
1119 printf("CRYPT_INFO %s: %p ref %d (create pager match)\n",
1120 __FUNCTION__,
1121 pager->crypt_info,
1122 pager->crypt_info->crypt_refcnt);
1123#endif /* CRYPT_INFO_DEBUG */
1124 crypt_info_deallocate(crypt_info: pager->crypt_info);
1125 pager->crypt_info = NULL;
1126 kfree_type(struct apple_protect_pager, pager);
1127 /* ... and go with the winner */
1128 pager = pager2;
1129 /* let the winner make sure the pager gets ready */
1130 return pager;
1131 }
1132
1133 /* enter new pager at the head of our list of pagers */
1134 queue_enter_first(&apple_protect_pager_queue,
1135 pager,
1136 apple_protect_pager_t,
1137 pager_queue);
1138 apple_protect_pager_count++;
1139 if (apple_protect_pager_count > apple_protect_pager_count_max) {
1140 apple_protect_pager_count_max = apple_protect_pager_count;
1141 }
1142 lck_mtx_unlock(lck: &apple_protect_pager_lock);
1143
1144 kr = memory_object_create_named(pager: (memory_object_t) pager,
1145 size: 0,
1146 control: &control);
1147 assert(kr == KERN_SUCCESS);
1148
1149 memory_object_mark_trusted(control);
1150
1151 lck_mtx_lock(lck: &apple_protect_pager_lock);
1152 /* the new pager is now ready to be used */
1153 pager->is_ready = TRUE;
1154 lck_mtx_unlock(lck: &apple_protect_pager_lock);
1155
1156 /* wakeup anyone waiting for this pager to be ready */
1157 thread_wakeup(&pager->is_ready);
1158
1159 if (old_crypt_info != NULL &&
1160 old_crypt_info != crypt_info) {
1161 /* we re-used an old crypt_info instead of using our new one */
1162#if CRYPT_INFO_DEBUG
1163 printf("CRYPT_INFO %s: deallocate %p ref %d "
1164 "(create used old)\n",
1165 __FUNCTION__,
1166 crypt_info,
1167 crypt_info->crypt_refcnt);
1168#endif /* CRYPT_INFO_DEBUG */
1169 crypt_info_deallocate(crypt_info);
1170 crypt_info = NULL;
1171 }
1172
1173 return pager;
1174}
1175
1176/*
1177 * apple_protect_pager_setup()
1178 *
1179 * Provide the caller with a memory object backed by the provided
1180 * "backing_object" VM object. If such a memory object already exists,
1181 * re-use it, otherwise create a new memory object.
1182 */
1183memory_object_t
1184apple_protect_pager_setup(
1185 vm_object_t backing_object,
1186 vm_object_offset_t backing_offset,
1187 vm_object_offset_t crypto_backing_offset,
1188 struct pager_crypt_info *crypt_info,
1189 vm_object_offset_t crypto_start,
1190 vm_object_offset_t crypto_end,
1191 boolean_t cache_pager)
1192{
1193 apple_protect_pager_t pager;
1194 struct pager_crypt_info *old_crypt_info, *new_crypt_info;
1195
1196#if CRYPT_INFO_DEBUG
1197 printf("CRYPT_INFO %s: crypt_info=%p [%p,%p,%p,%d]\n",
1198 __FUNCTION__,
1199 crypt_info,
1200 crypt_info->page_decrypt,
1201 crypt_info->crypt_end,
1202 crypt_info->crypt_ops,
1203 crypt_info->crypt_refcnt);
1204#endif /* CRYPT_INFO_DEBUG */
1205
1206 old_crypt_info = NULL;
1207
1208 lck_mtx_lock(lck: &apple_protect_pager_lock);
1209
1210 queue_iterate(&apple_protect_pager_queue,
1211 pager,
1212 apple_protect_pager_t,
1213 pager_queue) {
1214 if ((pager->crypt_info->page_decrypt !=
1215 crypt_info->page_decrypt) ||
1216 (pager->crypt_info->crypt_end !=
1217 crypt_info->crypt_end) ||
1218 (pager->crypt_info->crypt_ops !=
1219 crypt_info->crypt_ops)) {
1220 /* no match for "crypt_info": next pager */
1221 continue;
1222 }
1223 /* found a match for crypt_info ... */
1224 if (old_crypt_info) {
1225 /* ... already switched to that crypt_info */
1226 assert(old_crypt_info == pager->crypt_info);
1227 } else {
1228 /* ... switch to that pager's crypt_info */
1229 old_crypt_info = pager->crypt_info;
1230#if CRYPT_INFO_DEBUG
1231 printf("CRYPT_INFO %s: "
1232 "switching crypt_info from %p [%p,%p,%p,%d] "
1233 "to %p [%p,%p,%p,%d] from pager %p\n",
1234 __FUNCTION__,
1235 crypt_info,
1236 crypt_info->page_decrypt,
1237 crypt_info->crypt_end,
1238 crypt_info->crypt_ops,
1239 crypt_info->crypt_refcnt,
1240 old_crypt_info,
1241 old_crypt_info->page_decrypt,
1242 old_crypt_info->crypt_end,
1243 old_crypt_info->crypt_ops,
1244 old_crypt_info->crypt_refcnt,
1245 pager);
1246 printf("CRYPT_INFO %s: %p ref %d (setup match)\n",
1247 __FUNCTION__,
1248 pager->crypt_info,
1249 pager->crypt_info->crypt_refcnt);
1250#endif /* CRYPT_INFO_DEBUG */
1251 crypt_info_reference(crypt_info: pager->crypt_info);
1252 }
1253
1254 if (pager->backing_object == backing_object &&
1255 pager->backing_offset == backing_offset &&
1256 pager->crypto_backing_offset == crypto_backing_offset &&
1257 pager->crypto_start == crypto_start &&
1258 pager->crypto_end == crypto_end) {
1259 /* full match: use that pager! */
1260 assert(old_crypt_info == pager->crypt_info);
1261 assert(old_crypt_info->crypt_refcnt > 1);
1262#if CRYPT_INFO_DEBUG
1263 printf("CRYPT_INFO %s: "
1264 "pager match with %p crypt_info %p\n",
1265 __FUNCTION__,
1266 pager,
1267 pager->crypt_info);
1268 printf("CRYPT_INFO %s: deallocate %p ref %d "
1269 "(pager match)\n",
1270 __FUNCTION__,
1271 old_crypt_info,
1272 old_crypt_info->crypt_refcnt);
1273#endif /* CRYPT_INFO_DEBUG */
1274 /* release the extra ref on crypt_info we got above */
1275 crypt_info_deallocate(crypt_info: old_crypt_info);
1276 assert(old_crypt_info->crypt_refcnt > 0);
1277 /* give extra reference on pager to the caller */
1278 os_ref_retain_locked_raw(&pager->ap_pgr_hdr_ref, NULL);
1279 break;
1280 }
1281 }
1282 if (queue_end(&apple_protect_pager_queue,
1283 (queue_entry_t) pager)) {
1284 lck_mtx_unlock(lck: &apple_protect_pager_lock);
1285 /* no existing pager for this backing object */
1286 pager = APPLE_PROTECT_PAGER_NULL;
1287 if (old_crypt_info) {
1288 /* use this old crypt_info for new pager */
1289 new_crypt_info = old_crypt_info;
1290#if CRYPT_INFO_DEBUG
1291 printf("CRYPT_INFO %s: "
1292 "will use old_crypt_info %p for new pager\n",
1293 __FUNCTION__,
1294 old_crypt_info);
1295#endif /* CRYPT_INFO_DEBUG */
1296 } else {
1297 /* allocate a new crypt_info for new pager */
1298 new_crypt_info = kalloc_type(struct pager_crypt_info, Z_WAITOK);
1299 *new_crypt_info = *crypt_info;
1300 new_crypt_info->crypt_refcnt = 1;
1301#if CRYPT_INFO_DEBUG
1302 printf("CRYPT_INFO %s: "
1303 "will use new_crypt_info %p for new pager\n",
1304 __FUNCTION__,
1305 new_crypt_info);
1306#endif /* CRYPT_INFO_DEBUG */
1307 }
1308 if (new_crypt_info == NULL) {
1309 /* can't create new pager without a crypt_info */
1310 } else {
1311 /* create new pager */
1312 pager = apple_protect_pager_create(
1313 backing_object,
1314 backing_offset,
1315 crypto_backing_offset,
1316 crypt_info: new_crypt_info,
1317 crypto_start,
1318 crypto_end,
1319 cache_pager);
1320 }
1321 if (pager == APPLE_PROTECT_PAGER_NULL) {
1322 /* could not create a new pager */
1323 if (new_crypt_info == old_crypt_info) {
1324 /* release extra reference on old_crypt_info */
1325#if CRYPT_INFO_DEBUG
1326 printf("CRYPT_INFO %s: deallocate %p ref %d "
1327 "(create fail old_crypt_info)\n",
1328 __FUNCTION__,
1329 old_crypt_info,
1330 old_crypt_info->crypt_refcnt);
1331#endif /* CRYPT_INFO_DEBUG */
1332 crypt_info_deallocate(crypt_info: old_crypt_info);
1333 old_crypt_info = NULL;
1334 } else {
1335 /* release unused new_crypt_info */
1336 assert(new_crypt_info->crypt_refcnt == 1);
1337#if CRYPT_INFO_DEBUG
1338 printf("CRYPT_INFO %s: deallocate %p ref %d "
1339 "(create fail new_crypt_info)\n",
1340 __FUNCTION__,
1341 new_crypt_info,
1342 new_crypt_info->crypt_refcnt);
1343#endif /* CRYPT_INFO_DEBUG */
1344 crypt_info_deallocate(crypt_info: new_crypt_info);
1345 new_crypt_info = NULL;
1346 }
1347 return MEMORY_OBJECT_NULL;
1348 }
1349 lck_mtx_lock(lck: &apple_protect_pager_lock);
1350 } else {
1351 assert(old_crypt_info == pager->crypt_info);
1352 }
1353
1354 while (!pager->is_ready) {
1355 lck_mtx_sleep(lck: &apple_protect_pager_lock,
1356 lck_sleep_action: LCK_SLEEP_DEFAULT,
1357 event: &pager->is_ready,
1358 THREAD_UNINT);
1359 }
1360 lck_mtx_unlock(lck: &apple_protect_pager_lock);
1361
1362 return (memory_object_t) pager;
1363}
1364
1365void
1366apple_protect_pager_trim(void)
1367{
1368 apple_protect_pager_t pager, prev_pager;
1369 queue_head_t trim_queue;
1370 unsigned int num_trim;
1371 unsigned int count_unmapped;
1372
1373 lck_mtx_lock(lck: &apple_protect_pager_lock);
1374
1375 /*
1376 * We have too many pagers, try and trim some unused ones,
1377 * starting with the oldest pager at the end of the queue.
1378 */
1379 queue_init(&trim_queue);
1380 num_trim = 0;
1381
1382 for (pager = (apple_protect_pager_t)
1383 queue_last(&apple_protect_pager_queue);
1384 !queue_end(&apple_protect_pager_queue,
1385 (queue_entry_t) pager);
1386 pager = prev_pager) {
1387 /* get prev elt before we dequeue */
1388 prev_pager = (apple_protect_pager_t)
1389 queue_prev(&pager->pager_queue);
1390
1391 if (pager->is_cached &&
1392 os_ref_get_count_raw(rc: &pager->ap_pgr_hdr_ref) == 2 &&
1393 pager->is_ready &&
1394 !pager->is_mapped) {
1395 /* this pager can be trimmed */
1396 num_trim++;
1397 /* remove this pager from the main list ... */
1398 apple_protect_pager_dequeue(pager);
1399 /* ... and add it to our trim queue */
1400 queue_enter_first(&trim_queue,
1401 pager,
1402 apple_protect_pager_t,
1403 pager_queue);
1404
1405 count_unmapped = (apple_protect_pager_count -
1406 apple_protect_pager_count_mapped);
1407 if (count_unmapped <= apple_protect_pager_cache_limit) {
1408 /* we have enough pagers to trim */
1409 break;
1410 }
1411 }
1412 }
1413 if (num_trim > apple_protect_pager_num_trim_max) {
1414 apple_protect_pager_num_trim_max = num_trim;
1415 }
1416 apple_protect_pager_num_trim_total += num_trim;
1417
1418 lck_mtx_unlock(lck: &apple_protect_pager_lock);
1419
1420 /* terminate the trimmed pagers */
1421 while (!queue_empty(&trim_queue)) {
1422 queue_remove_first(&trim_queue,
1423 pager,
1424 apple_protect_pager_t,
1425 pager_queue);
1426 assert(pager->is_cached);
1427 pager->is_cached = false;
1428 pager->pager_queue.next = NULL;
1429 pager->pager_queue.prev = NULL;
1430 /*
1431 * We can't call deallocate_internal() because the pager
1432 * has already been dequeued, but we still need to remove
1433 * a reference.
1434 */
1435 os_ref_count_t __assert_only count;
1436 count = os_ref_release_locked_raw(&pager->ap_pgr_hdr_ref, NULL);
1437 assert(count == 1);
1438 apple_protect_pager_terminate_internal(pager);
1439 }
1440}
1441
1442
1443void
1444crypt_info_reference(
1445 struct pager_crypt_info *crypt_info)
1446{
1447 assert(crypt_info->crypt_refcnt != 0);
1448#if CRYPT_INFO_DEBUG
1449 printf("CRYPT_INFO %s: %p ref %d -> %d\n",
1450 __FUNCTION__,
1451 crypt_info,
1452 crypt_info->crypt_refcnt,
1453 crypt_info->crypt_refcnt + 1);
1454#endif /* CRYPT_INFO_DEBUG */
1455 OSAddAtomic(+1, &crypt_info->crypt_refcnt);
1456}
1457
1458void
1459crypt_info_deallocate(
1460 struct pager_crypt_info *crypt_info)
1461{
1462#if CRYPT_INFO_DEBUG
1463 printf("CRYPT_INFO %s: %p ref %d -> %d\n",
1464 __FUNCTION__,
1465 crypt_info,
1466 crypt_info->crypt_refcnt,
1467 crypt_info->crypt_refcnt - 1);
1468#endif /* CRYPT_INFO_DEBUG */
1469 OSAddAtomic(-1, &crypt_info->crypt_refcnt);
1470 if (crypt_info->crypt_refcnt == 0) {
1471 /* deallocate any crypt module data */
1472 if (crypt_info->crypt_end) {
1473 crypt_info->crypt_end(crypt_info->crypt_ops);
1474 crypt_info->crypt_end = NULL;
1475 }
1476#if CRYPT_INFO_DEBUG
1477 printf("CRYPT_INFO %s: freeing %p\n",
1478 __FUNCTION__,
1479 crypt_info);
1480#endif /* CRYPT_INFO_DEBUG */
1481 kfree_type(struct pager_crypt_info, crypt_info);
1482 }
1483}
1484
1485static uint64_t
1486apple_protect_pager_purge(
1487 apple_protect_pager_t pager)
1488{
1489 uint64_t pages_purged;
1490 vm_object_t object;
1491
1492 pages_purged = 0;
1493 object = memory_object_to_vm_object(mem_obj: (memory_object_t) pager);
1494 assert(object != VM_OBJECT_NULL);
1495 vm_object_lock(object);
1496 pages_purged = object->resident_page_count;
1497 vm_object_reap_pages(object, REAP_DATA_FLUSH);
1498 pages_purged -= object->resident_page_count;
1499// printf(" %s:%d pager %p object %p purged %llu left %d\n", __FUNCTION__, __LINE__, pager, object, pages_purged, object->resident_page_count);
1500 vm_object_unlock(object);
1501 return pages_purged;
1502}
1503
1504uint64_t
1505apple_protect_pager_purge_all(void)
1506{
1507 uint64_t pages_purged;
1508 apple_protect_pager_t pager;
1509
1510 pages_purged = 0;
1511 lck_mtx_lock(lck: &apple_protect_pager_lock);
1512 queue_iterate(&apple_protect_pager_queue, pager, apple_protect_pager_t, pager_queue) {
1513 pages_purged += apple_protect_pager_purge(pager);
1514 }
1515 lck_mtx_unlock(lck: &apple_protect_pager_lock);
1516#if DEVELOPMENT || DEBUG
1517 printf(" %s:%d pages purged: %llu\n", __FUNCTION__, __LINE__, pages_purged);
1518#endif /* DEVELOPMENT || DEBUG */
1519 return pages_purged;
1520}
1521