1 | /* |
2 | * Copyright (c) 2006 Apple Computer, Inc. All rights reserved. |
3 | * |
4 | * @APPLE_OSREFERENCE_LICENSE_HEADER_START@ |
5 | * |
6 | * This file contains Original Code and/or Modifications of Original Code |
7 | * as defined in and that are subject to the Apple Public Source License |
8 | * Version 2.0 (the 'License'). You may not use this file except in |
9 | * compliance with the License. The rights granted to you under the License |
10 | * may not be used to create, or enable the creation or redistribution of, |
11 | * unlawful or unlicensed copies of an Apple operating system, or to |
12 | * circumvent, violate, or enable the circumvention or violation of, any |
13 | * terms of an Apple operating system software license agreement. |
14 | * |
15 | * Please obtain a copy of the License at |
16 | * http://www.opensource.apple.com/apsl/ and read it before using this file. |
17 | * |
18 | * The Original Code and all software distributed under the License are |
19 | * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER |
20 | * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES, |
21 | * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY, |
22 | * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT. |
23 | * Please see the License for the specific language governing rights and |
24 | * limitations under the License. |
25 | * |
26 | * @APPLE_OSREFERENCE_LICENSE_HEADER_END@ |
27 | */ |
28 | |
29 | #include <sys/errno.h> |
30 | |
31 | #include <mach/mach_types.h> |
32 | #include <mach/mach_traps.h> |
33 | #include <mach/host_priv.h> |
34 | #include <mach/kern_return.h> |
35 | #include <mach/memory_object_control.h> |
36 | #include <mach/memory_object_types.h> |
37 | #include <mach/port.h> |
38 | #include <mach/policy.h> |
39 | #include <mach/upl.h> |
40 | #include <mach/thread_act.h> |
41 | #include <mach/mach_vm.h> |
42 | |
43 | #include <kern/host.h> |
44 | #include <kern/kalloc.h> |
45 | #include <kern/page_decrypt.h> |
46 | #include <kern/queue.h> |
47 | #include <kern/thread.h> |
48 | #include <kern/ipc_kobject.h> |
49 | |
50 | #include <ipc/ipc_port.h> |
51 | #include <ipc/ipc_space.h> |
52 | |
53 | #include <vm/vm_fault.h> |
54 | #include <vm/vm_map.h> |
55 | #include <vm/vm_pageout.h> |
56 | #include <vm/memory_object.h> |
57 | #include <vm/vm_pageout.h> |
58 | #include <vm/vm_protos.h> |
59 | #include <vm/vm_kern.h> |
60 | |
61 | |
62 | /* |
63 | * APPLE PROTECT MEMORY PAGER |
64 | * |
65 | * This external memory manager (EMM) handles memory from the encrypted |
66 | * sections of some executables protected by the DSMOS kernel extension. |
67 | * |
68 | * It mostly handles page-in requests (from memory_object_data_request()) by |
69 | * getting the encrypted data from its backing VM object, itself backed by |
70 | * the encrypted file, decrypting it and providing it to VM. |
71 | * |
72 | * The decrypted pages will never be dirtied, so the memory manager doesn't |
73 | * need to handle page-out requests (from memory_object_data_return()). The |
74 | * pages need to be mapped copy-on-write, so that the originals stay clean. |
75 | * |
76 | * We don't expect to have to handle a large number of apple-protected |
77 | * binaries, so the data structures are very simple (simple linked list) |
78 | * for now. |
79 | */ |
80 | |
81 | /* forward declarations */ |
82 | void apple_protect_pager_reference(memory_object_t mem_obj); |
83 | void apple_protect_pager_deallocate(memory_object_t mem_obj); |
84 | kern_return_t apple_protect_pager_init(memory_object_t mem_obj, |
85 | memory_object_control_t control, |
86 | memory_object_cluster_size_t pg_size); |
87 | kern_return_t apple_protect_pager_terminate(memory_object_t mem_obj); |
88 | kern_return_t apple_protect_pager_data_request(memory_object_t mem_obj, |
89 | memory_object_offset_t offset, |
90 | memory_object_cluster_size_t length, |
91 | vm_prot_t protection_required, |
92 | memory_object_fault_info_t fault_info); |
93 | kern_return_t apple_protect_pager_data_return(memory_object_t mem_obj, |
94 | memory_object_offset_t offset, |
95 | memory_object_cluster_size_t data_cnt, |
96 | memory_object_offset_t *resid_offset, |
97 | int *io_error, |
98 | boolean_t dirty, |
99 | boolean_t kernel_copy, |
100 | int upl_flags); |
101 | kern_return_t apple_protect_pager_data_initialize(memory_object_t mem_obj, |
102 | memory_object_offset_t offset, |
103 | memory_object_cluster_size_t data_cnt); |
104 | kern_return_t apple_protect_pager_data_unlock(memory_object_t mem_obj, |
105 | memory_object_offset_t offset, |
106 | memory_object_size_t size, |
107 | vm_prot_t desired_access); |
108 | kern_return_t apple_protect_pager_synchronize(memory_object_t mem_obj, |
109 | memory_object_offset_t offset, |
110 | memory_object_size_t length, |
111 | vm_sync_t sync_flags); |
112 | kern_return_t apple_protect_pager_map(memory_object_t mem_obj, |
113 | vm_prot_t prot); |
114 | kern_return_t apple_protect_pager_last_unmap(memory_object_t mem_obj); |
115 | |
116 | #define CRYPT_INFO_DEBUG 0 |
117 | void crypt_info_reference(struct pager_crypt_info *crypt_info); |
118 | void crypt_info_deallocate(struct pager_crypt_info *crypt_info); |
119 | |
120 | /* |
121 | * Vector of VM operations for this EMM. |
122 | * These routines are invoked by VM via the memory_object_*() interfaces. |
123 | */ |
124 | const struct memory_object_pager_ops = { |
125 | apple_protect_pager_reference, |
126 | apple_protect_pager_deallocate, |
127 | apple_protect_pager_init, |
128 | apple_protect_pager_terminate, |
129 | apple_protect_pager_data_request, |
130 | apple_protect_pager_data_return, |
131 | apple_protect_pager_data_initialize, |
132 | apple_protect_pager_data_unlock, |
133 | apple_protect_pager_synchronize, |
134 | apple_protect_pager_map, |
135 | apple_protect_pager_last_unmap, |
136 | NULL, /* data_reclaim */ |
137 | "apple_protect" |
138 | }; |
139 | |
140 | /* |
141 | * The "apple_protect_pager" describes a memory object backed by |
142 | * the "apple protect" EMM. |
143 | */ |
144 | typedef struct { |
145 | /* mandatory generic header */ |
146 | struct memory_object ; |
147 | |
148 | /* pager-specific data */ |
149 | queue_chain_t ; /* next & prev pagers */ |
150 | unsigned int ; /* reference count */ |
151 | boolean_t ; /* is this pager ready ? */ |
152 | boolean_t ; /* is this mem_obj mapped ? */ |
153 | vm_object_t ; /* VM obj w/ encrypted data */ |
154 | vm_object_offset_t ; |
155 | vm_object_offset_t ; /* for key... */ |
156 | vm_object_offset_t ; |
157 | vm_object_offset_t ; |
158 | struct pager_crypt_info *; |
159 | } *; |
160 | #define ((apple_protect_pager_t) NULL) |
161 | |
162 | /* |
163 | * List of memory objects managed by this EMM. |
164 | * The list is protected by the "apple_protect_pager_lock" lock. |
165 | */ |
166 | int = 0; /* number of pagers */ |
167 | int = 0; /* number of unmapped pagers */ |
168 | queue_head_t ; |
169 | decl_lck_mtx_data(,) |
170 | |
171 | /* |
172 | * Maximum number of unmapped pagers we're willing to keep around. |
173 | */ |
174 | int = 20; |
175 | |
176 | /* |
177 | * Statistics & counters. |
178 | */ |
179 | int = 0; |
180 | int = 0; |
181 | int = 0; |
182 | int = 0; |
183 | |
184 | |
185 | lck_grp_t ; |
186 | lck_grp_attr_t ; |
187 | lck_attr_t ; |
188 | |
189 | |
190 | /* internal prototypes */ |
191 | apple_protect_pager_t apple_protect_pager_create( |
192 | vm_object_t backing_object, |
193 | vm_object_offset_t backing_offset, |
194 | vm_object_offset_t crypto_backing_offset, |
195 | struct pager_crypt_info *crypt_info, |
196 | vm_object_offset_t crypto_start, |
197 | vm_object_offset_t crypto_end); |
198 | apple_protect_pager_t apple_protect_pager_lookup(memory_object_t mem_obj); |
199 | void apple_protect_pager_dequeue(apple_protect_pager_t ); |
200 | void apple_protect_pager_deallocate_internal(apple_protect_pager_t , |
201 | boolean_t locked); |
202 | void apple_protect_pager_terminate_internal(apple_protect_pager_t ); |
203 | void apple_protect_pager_trim(void); |
204 | |
205 | |
206 | #if DEBUG |
207 | int apple_protect_pagerdebug = 0; |
208 | #define PAGER_ALL 0xffffffff |
209 | #define PAGER_INIT 0x00000001 |
210 | #define PAGER_PAGEIN 0x00000002 |
211 | |
212 | #define PAGER_DEBUG(LEVEL, A) \ |
213 | MACRO_BEGIN \ |
214 | if ((apple_protect_pagerdebug & LEVEL)==LEVEL) { \ |
215 | printf A; \ |
216 | } \ |
217 | MACRO_END |
218 | #else |
219 | #define (LEVEL, A) |
220 | #endif |
221 | |
222 | |
223 | void |
224 | (void) |
225 | { |
226 | lck_grp_attr_setdefault(&apple_protect_pager_lck_grp_attr); |
227 | lck_grp_init(&apple_protect_pager_lck_grp, "apple_protect" , &apple_protect_pager_lck_grp_attr); |
228 | lck_attr_setdefault(&apple_protect_pager_lck_attr); |
229 | lck_mtx_init(&apple_protect_pager_lock, &apple_protect_pager_lck_grp, &apple_protect_pager_lck_attr); |
230 | queue_init(&apple_protect_pager_queue); |
231 | } |
232 | |
233 | /* |
234 | * apple_protect_pager_init() |
235 | * |
236 | * Initialize the memory object and makes it ready to be used and mapped. |
237 | */ |
238 | kern_return_t |
239 | ( |
240 | memory_object_t mem_obj, |
241 | memory_object_control_t control, |
242 | #if !DEBUG |
243 | __unused |
244 | #endif |
245 | memory_object_cluster_size_t pg_size) |
246 | { |
247 | apple_protect_pager_t ; |
248 | kern_return_t kr; |
249 | memory_object_attr_info_data_t attributes; |
250 | |
251 | PAGER_DEBUG(PAGER_ALL, |
252 | ("apple_protect_pager_init: %p, %p, %x\n" , |
253 | mem_obj, control, pg_size)); |
254 | |
255 | if (control == MEMORY_OBJECT_CONTROL_NULL) |
256 | return KERN_INVALID_ARGUMENT; |
257 | |
258 | pager = apple_protect_pager_lookup(mem_obj); |
259 | |
260 | memory_object_control_reference(control); |
261 | |
262 | pager->ap_pgr_hdr.mo_control = control; |
263 | |
264 | attributes.copy_strategy = MEMORY_OBJECT_COPY_DELAY; |
265 | /* attributes.cluster_size = (1 << (CLUSTER_SHIFT + PAGE_SHIFT));*/ |
266 | attributes.cluster_size = (1 << (PAGE_SHIFT)); |
267 | attributes.may_cache_object = FALSE; |
268 | attributes.temporary = TRUE; |
269 | |
270 | kr = memory_object_change_attributes( |
271 | control, |
272 | MEMORY_OBJECT_ATTRIBUTE_INFO, |
273 | (memory_object_info_t) &attributes, |
274 | MEMORY_OBJECT_ATTR_INFO_COUNT); |
275 | if (kr != KERN_SUCCESS) |
276 | panic("apple_protect_pager_init: " |
277 | "memory_object_change_attributes() failed" ); |
278 | |
279 | #if CONFIG_SECLUDED_MEMORY |
280 | if (secluded_for_filecache) { |
281 | memory_object_mark_eligible_for_secluded(control, TRUE); |
282 | } |
283 | #endif /* CONFIG_SECLUDED_MEMORY */ |
284 | |
285 | return KERN_SUCCESS; |
286 | } |
287 | |
288 | /* |
289 | * apple_protect_data_return() |
290 | * |
291 | * Handles page-out requests from VM. This should never happen since |
292 | * the pages provided by this EMM are not supposed to be dirty or dirtied |
293 | * and VM should simply discard the contents and reclaim the pages if it |
294 | * needs to. |
295 | */ |
296 | kern_return_t |
297 | ( |
298 | __unused memory_object_t mem_obj, |
299 | __unused memory_object_offset_t offset, |
300 | __unused memory_object_cluster_size_t data_cnt, |
301 | __unused memory_object_offset_t *resid_offset, |
302 | __unused int *io_error, |
303 | __unused boolean_t dirty, |
304 | __unused boolean_t kernel_copy, |
305 | __unused int upl_flags) |
306 | { |
307 | panic("apple_protect_pager_data_return: should never get called" ); |
308 | return KERN_FAILURE; |
309 | } |
310 | |
311 | kern_return_t |
312 | ( |
313 | __unused memory_object_t mem_obj, |
314 | __unused memory_object_offset_t offset, |
315 | __unused memory_object_cluster_size_t data_cnt) |
316 | { |
317 | panic("apple_protect_pager_data_initialize: should never get called" ); |
318 | return KERN_FAILURE; |
319 | } |
320 | |
321 | kern_return_t |
322 | ( |
323 | __unused memory_object_t mem_obj, |
324 | __unused memory_object_offset_t offset, |
325 | __unused memory_object_size_t size, |
326 | __unused vm_prot_t desired_access) |
327 | { |
328 | return KERN_FAILURE; |
329 | } |
330 | |
331 | /* |
332 | * apple_protect_pager_data_request() |
333 | * |
334 | * Handles page-in requests from VM. |
335 | */ |
336 | int = 0; |
337 | kern_return_t |
338 | ( |
339 | memory_object_t mem_obj, |
340 | memory_object_offset_t offset, |
341 | memory_object_cluster_size_t length, |
342 | #if !DEBUG |
343 | __unused |
344 | #endif |
345 | vm_prot_t protection_required, |
346 | memory_object_fault_info_t mo_fault_info) |
347 | { |
348 | apple_protect_pager_t ; |
349 | memory_object_control_t mo_control; |
350 | upl_t upl; |
351 | int upl_flags; |
352 | upl_size_t upl_size; |
353 | upl_page_info_t *upl_pl; |
354 | unsigned int pl_count; |
355 | vm_object_t src_top_object, src_page_object, dst_object; |
356 | kern_return_t kr, retval; |
357 | vm_offset_t src_vaddr, dst_vaddr; |
358 | vm_offset_t cur_offset; |
359 | vm_offset_t offset_in_page; |
360 | kern_return_t error_code; |
361 | vm_prot_t prot; |
362 | vm_page_t src_page, top_page; |
363 | int interruptible; |
364 | struct vm_object_fault_info fault_info; |
365 | int ret; |
366 | |
367 | PAGER_DEBUG(PAGER_ALL, ("apple_protect_pager_data_request: %p, %llx, %x, %x\n" , mem_obj, offset, length, protection_required)); |
368 | |
369 | retval = KERN_SUCCESS; |
370 | src_top_object = VM_OBJECT_NULL; |
371 | src_page_object = VM_OBJECT_NULL; |
372 | upl = NULL; |
373 | upl_pl = NULL; |
374 | fault_info = *((struct vm_object_fault_info *)(uintptr_t)mo_fault_info); |
375 | fault_info.stealth = TRUE; |
376 | fault_info.io_sync = FALSE; |
377 | fault_info.mark_zf_absent = FALSE; |
378 | fault_info.batch_pmap_op = FALSE; |
379 | interruptible = fault_info.interruptible; |
380 | |
381 | pager = apple_protect_pager_lookup(mem_obj); |
382 | assert(pager->is_ready); |
383 | assert(pager->ref_count > 1); /* pager is alive and mapped */ |
384 | |
385 | PAGER_DEBUG(PAGER_PAGEIN, ("apple_protect_pager_data_request: %p, %llx, %x, %x, pager %p\n" , mem_obj, offset, length, protection_required, pager)); |
386 | |
387 | fault_info.lo_offset += pager->backing_offset; |
388 | fault_info.hi_offset += pager->backing_offset; |
389 | |
390 | /* |
391 | * Gather in a UPL all the VM pages requested by VM. |
392 | */ |
393 | mo_control = pager->ap_pgr_hdr.mo_control; |
394 | |
395 | upl_size = length; |
396 | upl_flags = |
397 | UPL_RET_ONLY_ABSENT | |
398 | UPL_SET_LITE | |
399 | UPL_NO_SYNC | |
400 | UPL_CLEAN_IN_PLACE | /* triggers UPL_CLEAR_DIRTY */ |
401 | UPL_SET_INTERNAL; |
402 | pl_count = 0; |
403 | kr = memory_object_upl_request(mo_control, |
404 | offset, upl_size, |
405 | &upl, NULL, NULL, upl_flags, VM_KERN_MEMORY_SECURITY); |
406 | if (kr != KERN_SUCCESS) { |
407 | retval = kr; |
408 | goto done; |
409 | } |
410 | dst_object = mo_control->moc_object; |
411 | assert(dst_object != VM_OBJECT_NULL); |
412 | |
413 | /* |
414 | * We'll map the encrypted data in the kernel address space from the |
415 | * backing VM object (itself backed by the encrypted file via |
416 | * the vnode pager). |
417 | */ |
418 | src_top_object = pager->backing_object; |
419 | assert(src_top_object != VM_OBJECT_NULL); |
420 | vm_object_reference(src_top_object); /* keep the source object alive */ |
421 | |
422 | /* |
423 | * Fill in the contents of the pages requested by VM. |
424 | */ |
425 | upl_pl = UPL_GET_INTERNAL_PAGE_LIST(upl); |
426 | pl_count = length / PAGE_SIZE; |
427 | for (cur_offset = 0; |
428 | retval == KERN_SUCCESS && cur_offset < length; |
429 | cur_offset += PAGE_SIZE) { |
430 | ppnum_t dst_pnum; |
431 | |
432 | if (!upl_page_present(upl_pl, (int)(cur_offset / PAGE_SIZE))) { |
433 | /* this page is not in the UPL: skip it */ |
434 | continue; |
435 | } |
436 | |
437 | /* |
438 | * Map the source (encrypted) page in the kernel's |
439 | * virtual address space. |
440 | * We already hold a reference on the src_top_object. |
441 | */ |
442 | retry_src_fault: |
443 | vm_object_lock(src_top_object); |
444 | vm_object_paging_begin(src_top_object); |
445 | error_code = 0; |
446 | prot = VM_PROT_READ; |
447 | src_page = VM_PAGE_NULL; |
448 | kr = vm_fault_page(src_top_object, |
449 | pager->backing_offset + offset + cur_offset, |
450 | VM_PROT_READ, |
451 | FALSE, |
452 | FALSE, /* src_page not looked up */ |
453 | &prot, |
454 | &src_page, |
455 | &top_page, |
456 | NULL, |
457 | &error_code, |
458 | FALSE, |
459 | FALSE, |
460 | &fault_info); |
461 | switch (kr) { |
462 | case VM_FAULT_SUCCESS: |
463 | break; |
464 | case VM_FAULT_RETRY: |
465 | goto retry_src_fault; |
466 | case VM_FAULT_MEMORY_SHORTAGE: |
467 | if (vm_page_wait(interruptible)) { |
468 | goto retry_src_fault; |
469 | } |
470 | /* fall thru */ |
471 | case VM_FAULT_INTERRUPTED: |
472 | retval = MACH_SEND_INTERRUPTED; |
473 | goto done; |
474 | case VM_FAULT_SUCCESS_NO_VM_PAGE: |
475 | /* success but no VM page: fail */ |
476 | vm_object_paging_end(src_top_object); |
477 | vm_object_unlock(src_top_object); |
478 | /*FALLTHROUGH*/ |
479 | case VM_FAULT_MEMORY_ERROR: |
480 | /* the page is not there ! */ |
481 | if (error_code) { |
482 | retval = error_code; |
483 | } else { |
484 | retval = KERN_MEMORY_ERROR; |
485 | } |
486 | goto done; |
487 | default: |
488 | panic("apple_protect_pager_data_request: " |
489 | "vm_fault_page() unexpected error 0x%x\n" , |
490 | kr); |
491 | } |
492 | assert(src_page != VM_PAGE_NULL); |
493 | assert(src_page->vmp_busy); |
494 | |
495 | if (src_page->vmp_q_state != VM_PAGE_ON_SPECULATIVE_Q) { |
496 | |
497 | vm_page_lockspin_queues(); |
498 | |
499 | if (src_page->vmp_q_state != VM_PAGE_ON_SPECULATIVE_Q) { |
500 | vm_page_speculate(src_page, FALSE); |
501 | } |
502 | vm_page_unlock_queues(); |
503 | } |
504 | |
505 | /* |
506 | * Establish pointers to the source |
507 | * and destination physical pages. |
508 | */ |
509 | dst_pnum = (ppnum_t) |
510 | upl_phys_page(upl_pl, (int)(cur_offset / PAGE_SIZE)); |
511 | assert(dst_pnum != 0); |
512 | #if __x86_64__ |
513 | src_vaddr = (vm_map_offset_t) |
514 | PHYSMAP_PTOV((pmap_paddr_t)VM_PAGE_GET_PHYS_PAGE(src_page) |
515 | << PAGE_SHIFT); |
516 | dst_vaddr = (vm_map_offset_t) |
517 | PHYSMAP_PTOV((pmap_paddr_t)dst_pnum << PAGE_SHIFT); |
518 | |
519 | #elif __arm__ || __arm64__ |
520 | src_vaddr = (vm_map_offset_t) |
521 | phystokv((pmap_paddr_t)VM_PAGE_GET_PHYS_PAGE(src_page) |
522 | << PAGE_SHIFT); |
523 | dst_vaddr = (vm_map_offset_t) |
524 | phystokv((pmap_paddr_t)dst_pnum << PAGE_SHIFT); |
525 | #else |
526 | #error "vm_paging_map_object: no 1-to-1 kernel mapping of physical memory..." |
527 | src_vaddr = 0; |
528 | dst_vaddr = 0; |
529 | #endif |
530 | src_page_object = VM_PAGE_OBJECT(src_page); |
531 | |
532 | /* |
533 | * Validate the original page... |
534 | */ |
535 | if (src_page_object->code_signed) { |
536 | vm_page_validate_cs_mapped( |
537 | src_page, |
538 | (const void *) src_vaddr); |
539 | } |
540 | /* |
541 | * ... and transfer the results to the destination page. |
542 | */ |
543 | UPL_SET_CS_VALIDATED(upl_pl, cur_offset / PAGE_SIZE, |
544 | src_page->vmp_cs_validated); |
545 | UPL_SET_CS_TAINTED(upl_pl, cur_offset / PAGE_SIZE, |
546 | src_page->vmp_cs_tainted); |
547 | UPL_SET_CS_NX(upl_pl, cur_offset / PAGE_SIZE, |
548 | src_page->vmp_cs_nx); |
549 | |
550 | /* |
551 | * page_decrypt() might access a mapped file, so let's release |
552 | * the object lock for the source page to avoid a potential |
553 | * deadlock. The source page is kept busy and we have a |
554 | * "paging_in_progress" reference on its object, so it's safe |
555 | * to unlock the object here. |
556 | */ |
557 | assert(src_page->vmp_busy); |
558 | assert(src_page_object->paging_in_progress > 0); |
559 | vm_object_unlock(src_page_object); |
560 | |
561 | /* |
562 | * Decrypt the encrypted contents of the source page |
563 | * into the destination page. |
564 | */ |
565 | for (offset_in_page = 0; |
566 | offset_in_page < PAGE_SIZE; |
567 | offset_in_page += 4096) { |
568 | if (offset + cur_offset + offset_in_page < |
569 | pager->crypto_start || |
570 | offset + cur_offset + offset_in_page >= |
571 | pager->crypto_end) { |
572 | /* not encrypted: just copy */ |
573 | bcopy((const char *)(src_vaddr + |
574 | offset_in_page), |
575 | (char *)(dst_vaddr + offset_in_page), |
576 | 4096); |
577 | |
578 | if (apple_protect_pager_data_request_debug) { |
579 | printf("apple_protect_data_request" |
580 | "(%p,0x%llx+0x%llx+0x%04llx): " |
581 | "out of crypto range " |
582 | "[0x%llx:0x%llx]: " |
583 | "COPY [0x%016llx 0x%016llx] " |
584 | "code_signed=%d " |
585 | "cs_validated=%d " |
586 | "cs_tainted=%d " |
587 | "cs_nx=%d\n" , |
588 | pager, |
589 | offset, |
590 | (uint64_t) cur_offset, |
591 | (uint64_t) offset_in_page, |
592 | pager->crypto_start, |
593 | pager->crypto_end, |
594 | *(uint64_t *)(dst_vaddr+ |
595 | offset_in_page), |
596 | *(uint64_t *)(dst_vaddr+ |
597 | offset_in_page+8), |
598 | src_page_object->code_signed, |
599 | src_page->vmp_cs_validated, |
600 | src_page->vmp_cs_tainted, |
601 | src_page->vmp_cs_nx); |
602 | } |
603 | ret = 0; |
604 | continue; |
605 | } |
606 | ret = pager->crypt_info->page_decrypt( |
607 | (const void *)(src_vaddr + offset_in_page), |
608 | (void *)(dst_vaddr + offset_in_page), |
609 | ((pager->crypto_backing_offset - |
610 | pager->crypto_start) + /* XXX ? */ |
611 | offset + |
612 | cur_offset + |
613 | offset_in_page), |
614 | pager->crypt_info->crypt_ops); |
615 | |
616 | if (apple_protect_pager_data_request_debug) { |
617 | printf("apple_protect_data_request" |
618 | "(%p,0x%llx+0x%llx+0x%04llx): " |
619 | "in crypto range [0x%llx:0x%llx]: " |
620 | "DECRYPT offset 0x%llx=" |
621 | "(0x%llx-0x%llx+0x%llx+0x%llx+0x%04llx)" |
622 | "[0x%016llx 0x%016llx] " |
623 | "code_signed=%d " |
624 | "cs_validated=%d " |
625 | "cs_tainted=%d " |
626 | "cs_nx=%d " |
627 | "ret=0x%x\n" , |
628 | pager, |
629 | offset, |
630 | (uint64_t) cur_offset, |
631 | (uint64_t) offset_in_page, |
632 | pager->crypto_start, pager->crypto_end, |
633 | ((pager->crypto_backing_offset - |
634 | pager->crypto_start) + |
635 | offset + |
636 | cur_offset + |
637 | offset_in_page), |
638 | pager->crypto_backing_offset, |
639 | pager->crypto_start, |
640 | offset, |
641 | (uint64_t) cur_offset, |
642 | (uint64_t) offset_in_page, |
643 | *(uint64_t *)(dst_vaddr+offset_in_page), |
644 | *(uint64_t *)(dst_vaddr+offset_in_page+8), |
645 | src_page_object->code_signed, |
646 | src_page->vmp_cs_validated, |
647 | src_page->vmp_cs_tainted, |
648 | src_page->vmp_cs_nx, |
649 | ret); |
650 | } |
651 | if (ret) { |
652 | break; |
653 | } |
654 | } |
655 | if (ret) { |
656 | /* |
657 | * Decryption failed. Abort the fault. |
658 | */ |
659 | retval = KERN_ABORTED; |
660 | } |
661 | |
662 | assert(VM_PAGE_OBJECT(src_page) == src_page_object); |
663 | assert(src_page->vmp_busy); |
664 | assert(src_page_object->paging_in_progress > 0); |
665 | vm_object_lock(src_page_object); |
666 | |
667 | /* |
668 | * Cleanup the result of vm_fault_page() of the source page. |
669 | */ |
670 | PAGE_WAKEUP_DONE(src_page); |
671 | src_page = VM_PAGE_NULL; |
672 | vm_object_paging_end(src_page_object); |
673 | vm_object_unlock(src_page_object); |
674 | |
675 | if (top_page != VM_PAGE_NULL) { |
676 | assert(VM_PAGE_OBJECT(top_page) == src_top_object); |
677 | vm_object_lock(src_top_object); |
678 | VM_PAGE_FREE(top_page); |
679 | vm_object_paging_end(src_top_object); |
680 | vm_object_unlock(src_top_object); |
681 | } |
682 | } |
683 | |
684 | done: |
685 | if (upl != NULL) { |
686 | /* clean up the UPL */ |
687 | |
688 | /* |
689 | * The pages are currently dirty because we've just been |
690 | * writing on them, but as far as we're concerned, they're |
691 | * clean since they contain their "original" contents as |
692 | * provided by us, the pager. |
693 | * Tell the UPL to mark them "clean". |
694 | */ |
695 | upl_clear_dirty(upl, TRUE); |
696 | |
697 | /* abort or commit the UPL */ |
698 | if (retval != KERN_SUCCESS) { |
699 | upl_abort(upl, 0); |
700 | if (retval == KERN_ABORTED) { |
701 | wait_result_t wait_result; |
702 | |
703 | /* |
704 | * We aborted the fault and did not provide |
705 | * any contents for the requested pages but |
706 | * the pages themselves are not invalid, so |
707 | * let's return success and let the caller |
708 | * retry the fault, in case it might succeed |
709 | * later (when the decryption code is up and |
710 | * running in the kernel, for example). |
711 | */ |
712 | retval = KERN_SUCCESS; |
713 | /* |
714 | * Wait a little bit first to avoid using |
715 | * too much CPU time retrying and failing |
716 | * the same fault over and over again. |
717 | */ |
718 | wait_result = assert_wait_timeout( |
719 | (event_t) apple_protect_pager_data_request, |
720 | THREAD_UNINT, |
721 | 10000, /* 10ms */ |
722 | NSEC_PER_USEC); |
723 | assert(wait_result == THREAD_WAITING); |
724 | wait_result = thread_block(THREAD_CONTINUE_NULL); |
725 | assert(wait_result == THREAD_TIMED_OUT); |
726 | } |
727 | } else { |
728 | boolean_t empty; |
729 | upl_commit_range(upl, 0, upl->size, |
730 | UPL_COMMIT_CS_VALIDATED | UPL_COMMIT_WRITTEN_BY_KERNEL, |
731 | upl_pl, pl_count, &empty); |
732 | } |
733 | |
734 | /* and deallocate the UPL */ |
735 | upl_deallocate(upl); |
736 | upl = NULL; |
737 | } |
738 | if (src_top_object != VM_OBJECT_NULL) { |
739 | vm_object_deallocate(src_top_object); |
740 | } |
741 | return retval; |
742 | } |
743 | |
744 | /* |
745 | * apple_protect_pager_reference() |
746 | * |
747 | * Get a reference on this memory object. |
748 | * For external usage only. Assumes that the initial reference count is not 0, |
749 | * i.e one should not "revive" a dead pager this way. |
750 | */ |
751 | void |
752 | ( |
753 | memory_object_t mem_obj) |
754 | { |
755 | apple_protect_pager_t ; |
756 | |
757 | pager = apple_protect_pager_lookup(mem_obj); |
758 | |
759 | lck_mtx_lock(&apple_protect_pager_lock); |
760 | assert(pager->ref_count > 0); |
761 | pager->ref_count++; |
762 | lck_mtx_unlock(&apple_protect_pager_lock); |
763 | } |
764 | |
765 | |
766 | /* |
767 | * apple_protect_pager_dequeue: |
768 | * |
769 | * Removes a pager from the list of pagers. |
770 | * |
771 | * The caller must hold "apple_protect_pager_lock". |
772 | */ |
773 | void |
774 | ( |
775 | apple_protect_pager_t ) |
776 | { |
777 | assert(!pager->is_mapped); |
778 | |
779 | queue_remove(&apple_protect_pager_queue, |
780 | pager, |
781 | apple_protect_pager_t, |
782 | pager_queue); |
783 | pager->pager_queue.next = NULL; |
784 | pager->pager_queue.prev = NULL; |
785 | |
786 | apple_protect_pager_count--; |
787 | } |
788 | |
789 | /* |
790 | * apple_protect_pager_terminate_internal: |
791 | * |
792 | * Trigger the asynchronous termination of the memory object associated |
793 | * with this pager. |
794 | * When the memory object is terminated, there will be one more call |
795 | * to memory_object_deallocate() (i.e. apple_protect_pager_deallocate()) |
796 | * to finish the clean up. |
797 | * |
798 | * "apple_protect_pager_lock" should not be held by the caller. |
799 | * We don't need the lock because the pager has already been removed from |
800 | * the pagers' list and is now ours exclusively. |
801 | */ |
802 | void |
803 | ( |
804 | apple_protect_pager_t ) |
805 | { |
806 | assert(pager->is_ready); |
807 | assert(!pager->is_mapped); |
808 | |
809 | if (pager->backing_object != VM_OBJECT_NULL) { |
810 | vm_object_deallocate(pager->backing_object); |
811 | pager->backing_object = VM_OBJECT_NULL; |
812 | } |
813 | |
814 | /* one less pager using this "pager_crypt_info" */ |
815 | #if CRYPT_INFO_DEBUG |
816 | printf("CRYPT_INFO %s: deallocate %p ref %d\n" , |
817 | __FUNCTION__, |
818 | pager->crypt_info, |
819 | pager->crypt_info->crypt_refcnt); |
820 | #endif /* CRYPT_INFO_DEBUG */ |
821 | crypt_info_deallocate(pager->crypt_info); |
822 | pager->crypt_info = NULL; |
823 | |
824 | /* trigger the destruction of the memory object */ |
825 | memory_object_destroy(pager->ap_pgr_hdr.mo_control, 0); |
826 | } |
827 | |
828 | /* |
829 | * apple_protect_pager_deallocate_internal() |
830 | * |
831 | * Release a reference on this pager and free it when the last |
832 | * reference goes away. |
833 | * Can be called with apple_protect_pager_lock held or not but always returns |
834 | * with it unlocked. |
835 | */ |
836 | void |
837 | ( |
838 | apple_protect_pager_t , |
839 | boolean_t locked) |
840 | { |
841 | boolean_t needs_trimming; |
842 | int count_unmapped; |
843 | |
844 | if (! locked) { |
845 | lck_mtx_lock(&apple_protect_pager_lock); |
846 | } |
847 | |
848 | count_unmapped = (apple_protect_pager_count - |
849 | apple_protect_pager_count_mapped); |
850 | if (count_unmapped > apple_protect_pager_cache_limit) { |
851 | /* we have too many unmapped pagers: trim some */ |
852 | needs_trimming = TRUE; |
853 | } else { |
854 | needs_trimming = FALSE; |
855 | } |
856 | |
857 | /* drop a reference on this pager */ |
858 | pager->ref_count--; |
859 | |
860 | if (pager->ref_count == 1) { |
861 | /* |
862 | * Only the "named" reference is left, which means that |
863 | * no one is really holding on to this pager anymore. |
864 | * Terminate it. |
865 | */ |
866 | apple_protect_pager_dequeue(pager); |
867 | /* the pager is all ours: no need for the lock now */ |
868 | lck_mtx_unlock(&apple_protect_pager_lock); |
869 | apple_protect_pager_terminate_internal(pager); |
870 | } else if (pager->ref_count == 0) { |
871 | /* |
872 | * Dropped the existence reference; the memory object has |
873 | * been terminated. Do some final cleanup and release the |
874 | * pager structure. |
875 | */ |
876 | lck_mtx_unlock(&apple_protect_pager_lock); |
877 | if (pager->ap_pgr_hdr.mo_control != MEMORY_OBJECT_CONTROL_NULL) { |
878 | memory_object_control_deallocate(pager->ap_pgr_hdr.mo_control); |
879 | pager->ap_pgr_hdr.mo_control = MEMORY_OBJECT_CONTROL_NULL; |
880 | } |
881 | kfree(pager, sizeof (*pager)); |
882 | pager = APPLE_PROTECT_PAGER_NULL; |
883 | } else { |
884 | /* there are still plenty of references: keep going... */ |
885 | lck_mtx_unlock(&apple_protect_pager_lock); |
886 | } |
887 | |
888 | if (needs_trimming) { |
889 | apple_protect_pager_trim(); |
890 | } |
891 | /* caution: lock is not held on return... */ |
892 | } |
893 | |
894 | /* |
895 | * apple_protect_pager_deallocate() |
896 | * |
897 | * Release a reference on this pager and free it when the last |
898 | * reference goes away. |
899 | */ |
900 | void |
901 | ( |
902 | memory_object_t mem_obj) |
903 | { |
904 | apple_protect_pager_t ; |
905 | |
906 | PAGER_DEBUG(PAGER_ALL, ("apple_protect_pager_deallocate: %p\n" , mem_obj)); |
907 | pager = apple_protect_pager_lookup(mem_obj); |
908 | apple_protect_pager_deallocate_internal(pager, FALSE); |
909 | } |
910 | |
911 | /* |
912 | * |
913 | */ |
914 | kern_return_t |
915 | ( |
916 | #if !DEBUG |
917 | __unused |
918 | #endif |
919 | memory_object_t mem_obj) |
920 | { |
921 | PAGER_DEBUG(PAGER_ALL, ("apple_protect_pager_terminate: %p\n" , mem_obj)); |
922 | |
923 | return KERN_SUCCESS; |
924 | } |
925 | |
926 | /* |
927 | * |
928 | */ |
929 | kern_return_t |
930 | ( |
931 | __unused memory_object_t mem_obj, |
932 | __unused memory_object_offset_t offset, |
933 | __unused memory_object_size_t length, |
934 | __unused vm_sync_t sync_flags) |
935 | { |
936 | panic("apple_protect_pager_synchronize: memory_object_synchronize no longer supported\n" ); |
937 | return KERN_FAILURE; |
938 | } |
939 | |
940 | /* |
941 | * apple_protect_pager_map() |
942 | * |
943 | * This allows VM to let us, the EMM, know that this memory object |
944 | * is currently mapped one or more times. This is called by VM each time |
945 | * the memory object gets mapped and we take one extra reference on the |
946 | * memory object to account for all its mappings. |
947 | */ |
948 | kern_return_t |
949 | ( |
950 | memory_object_t mem_obj, |
951 | __unused vm_prot_t prot) |
952 | { |
953 | apple_protect_pager_t ; |
954 | |
955 | PAGER_DEBUG(PAGER_ALL, ("apple_protect_pager_map: %p\n" , mem_obj)); |
956 | |
957 | pager = apple_protect_pager_lookup(mem_obj); |
958 | |
959 | lck_mtx_lock(&apple_protect_pager_lock); |
960 | assert(pager->is_ready); |
961 | assert(pager->ref_count > 0); /* pager is alive */ |
962 | if (pager->is_mapped == FALSE) { |
963 | /* |
964 | * First mapping of this pager: take an extra reference |
965 | * that will remain until all the mappings of this pager |
966 | * are removed. |
967 | */ |
968 | pager->is_mapped = TRUE; |
969 | pager->ref_count++; |
970 | apple_protect_pager_count_mapped++; |
971 | } |
972 | lck_mtx_unlock(&apple_protect_pager_lock); |
973 | |
974 | return KERN_SUCCESS; |
975 | } |
976 | |
977 | /* |
978 | * apple_protect_pager_last_unmap() |
979 | * |
980 | * This is called by VM when this memory object is no longer mapped anywhere. |
981 | */ |
982 | kern_return_t |
983 | ( |
984 | memory_object_t mem_obj) |
985 | { |
986 | apple_protect_pager_t ; |
987 | int count_unmapped; |
988 | |
989 | PAGER_DEBUG(PAGER_ALL, |
990 | ("apple_protect_pager_last_unmap: %p\n" , mem_obj)); |
991 | |
992 | pager = apple_protect_pager_lookup(mem_obj); |
993 | |
994 | lck_mtx_lock(&apple_protect_pager_lock); |
995 | if (pager->is_mapped) { |
996 | /* |
997 | * All the mappings are gone, so let go of the one extra |
998 | * reference that represents all the mappings of this pager. |
999 | */ |
1000 | apple_protect_pager_count_mapped--; |
1001 | count_unmapped = (apple_protect_pager_count - |
1002 | apple_protect_pager_count_mapped); |
1003 | if (count_unmapped > apple_protect_pager_count_unmapped_max) { |
1004 | apple_protect_pager_count_unmapped_max = count_unmapped; |
1005 | } |
1006 | pager->is_mapped = FALSE; |
1007 | apple_protect_pager_deallocate_internal(pager, TRUE); |
1008 | /* caution: deallocate_internal() released the lock ! */ |
1009 | } else { |
1010 | lck_mtx_unlock(&apple_protect_pager_lock); |
1011 | } |
1012 | |
1013 | return KERN_SUCCESS; |
1014 | } |
1015 | |
1016 | |
1017 | /* |
1018 | * |
1019 | */ |
1020 | apple_protect_pager_t |
1021 | ( |
1022 | memory_object_t mem_obj) |
1023 | { |
1024 | apple_protect_pager_t ; |
1025 | |
1026 | assert(mem_obj->mo_pager_ops == &apple_protect_pager_ops); |
1027 | pager = (apple_protect_pager_t)(uintptr_t) mem_obj; |
1028 | assert(pager->ref_count > 0); |
1029 | return pager; |
1030 | } |
1031 | |
1032 | apple_protect_pager_t |
1033 | ( |
1034 | vm_object_t backing_object, |
1035 | vm_object_offset_t backing_offset, |
1036 | vm_object_offset_t crypto_backing_offset, |
1037 | struct pager_crypt_info *crypt_info, |
1038 | vm_object_offset_t crypto_start, |
1039 | vm_object_offset_t crypto_end) |
1040 | { |
1041 | apple_protect_pager_t , ; |
1042 | memory_object_control_t control; |
1043 | kern_return_t kr; |
1044 | struct pager_crypt_info *old_crypt_info; |
1045 | |
1046 | pager = (apple_protect_pager_t) kalloc(sizeof (*pager)); |
1047 | if (pager == APPLE_PROTECT_PAGER_NULL) { |
1048 | return APPLE_PROTECT_PAGER_NULL; |
1049 | } |
1050 | |
1051 | /* |
1052 | * The vm_map call takes both named entry ports and raw memory |
1053 | * objects in the same parameter. We need to make sure that |
1054 | * vm_map does not see this object as a named entry port. So, |
1055 | * we reserve the first word in the object for a fake ip_kotype |
1056 | * setting - that will tell vm_map to use it as a memory object. |
1057 | */ |
1058 | pager->ap_pgr_hdr.mo_ikot = IKOT_MEMORY_OBJECT; |
1059 | pager->ap_pgr_hdr.mo_pager_ops = &apple_protect_pager_ops; |
1060 | pager->ap_pgr_hdr.mo_control = MEMORY_OBJECT_CONTROL_NULL; |
1061 | |
1062 | pager->is_ready = FALSE;/* not ready until it has a "name" */ |
1063 | pager->ref_count = 1; /* existence reference (for the cache) */ |
1064 | pager->ref_count++; /* for the caller */ |
1065 | pager->is_mapped = FALSE; |
1066 | pager->backing_object = backing_object; |
1067 | pager->backing_offset = backing_offset; |
1068 | pager->crypto_backing_offset = crypto_backing_offset; |
1069 | pager->crypto_start = crypto_start; |
1070 | pager->crypto_end = crypto_end; |
1071 | pager->crypt_info = crypt_info; /* allocated by caller */ |
1072 | |
1073 | #if CRYPT_INFO_DEBUG |
1074 | printf("CRYPT_INFO %s: crypt_info %p [%p,%p,%p,%d]\n" , |
1075 | __FUNCTION__, |
1076 | crypt_info, |
1077 | crypt_info->page_decrypt, |
1078 | crypt_info->crypt_end, |
1079 | crypt_info->crypt_ops, |
1080 | crypt_info->crypt_refcnt); |
1081 | #endif /* CRYPT_INFO_DEBUG */ |
1082 | |
1083 | vm_object_reference(backing_object); |
1084 | |
1085 | old_crypt_info = NULL; |
1086 | |
1087 | lck_mtx_lock(&apple_protect_pager_lock); |
1088 | /* see if anyone raced us to create a pager for the same object */ |
1089 | queue_iterate(&apple_protect_pager_queue, |
1090 | pager2, |
1091 | apple_protect_pager_t, |
1092 | pager_queue) { |
1093 | if ((pager2->crypt_info->page_decrypt != |
1094 | crypt_info->page_decrypt) || |
1095 | (pager2->crypt_info->crypt_end != |
1096 | crypt_info->crypt_end) || |
1097 | (pager2->crypt_info->crypt_ops != |
1098 | crypt_info->crypt_ops)) { |
1099 | /* crypt_info contents do not match: next pager */ |
1100 | continue; |
1101 | } |
1102 | |
1103 | /* found a match for crypt_info ... */ |
1104 | if (old_crypt_info) { |
1105 | /* ... already switched to that crypt_info */ |
1106 | assert(old_crypt_info == pager2->crypt_info); |
1107 | } else if (pager2->crypt_info != crypt_info) { |
1108 | /* ... switch to that pager's crypt_info */ |
1109 | #if CRYPT_INFO_DEBUG |
1110 | printf("CRYPT_INFO %s: reference %p ref %d " |
1111 | "(create match)\n" , |
1112 | __FUNCTION__, |
1113 | pager2->crypt_info, |
1114 | pager2->crypt_info->crypt_refcnt); |
1115 | #endif /* CRYPT_INFO_DEBUG */ |
1116 | old_crypt_info = pager2->crypt_info; |
1117 | crypt_info_reference(old_crypt_info); |
1118 | pager->crypt_info = old_crypt_info; |
1119 | } |
1120 | |
1121 | if (pager2->backing_object == backing_object && |
1122 | pager2->backing_offset == backing_offset && |
1123 | pager2->crypto_backing_offset == crypto_backing_offset && |
1124 | pager2->crypto_start == crypto_start && |
1125 | pager2->crypto_end == crypto_end) { |
1126 | /* full match: use that pager */ |
1127 | break; |
1128 | } |
1129 | } |
1130 | if (! queue_end(&apple_protect_pager_queue, |
1131 | (queue_entry_t) pager2)) { |
1132 | /* we lost the race, down with the loser... */ |
1133 | lck_mtx_unlock(&apple_protect_pager_lock); |
1134 | vm_object_deallocate(pager->backing_object); |
1135 | pager->backing_object = VM_OBJECT_NULL; |
1136 | #if CRYPT_INFO_DEBUG |
1137 | printf("CRYPT_INFO %s: %p ref %d (create pager match)\n" , |
1138 | __FUNCTION__, |
1139 | pager->crypt_info, |
1140 | pager->crypt_info->crypt_refcnt); |
1141 | #endif /* CRYPT_INFO_DEBUG */ |
1142 | crypt_info_deallocate(pager->crypt_info); |
1143 | pager->crypt_info = NULL; |
1144 | kfree(pager, sizeof (*pager)); |
1145 | /* ... and go with the winner */ |
1146 | pager = pager2; |
1147 | /* let the winner make sure the pager gets ready */ |
1148 | return pager; |
1149 | } |
1150 | |
1151 | /* enter new pager at the head of our list of pagers */ |
1152 | queue_enter_first(&apple_protect_pager_queue, |
1153 | pager, |
1154 | apple_protect_pager_t, |
1155 | pager_queue); |
1156 | apple_protect_pager_count++; |
1157 | if (apple_protect_pager_count > apple_protect_pager_count_max) { |
1158 | apple_protect_pager_count_max = apple_protect_pager_count; |
1159 | } |
1160 | lck_mtx_unlock(&apple_protect_pager_lock); |
1161 | |
1162 | kr = memory_object_create_named((memory_object_t) pager, |
1163 | 0, |
1164 | &control); |
1165 | assert(kr == KERN_SUCCESS); |
1166 | |
1167 | lck_mtx_lock(&apple_protect_pager_lock); |
1168 | /* the new pager is now ready to be used */ |
1169 | pager->is_ready = TRUE; |
1170 | lck_mtx_unlock(&apple_protect_pager_lock); |
1171 | |
1172 | /* wakeup anyone waiting for this pager to be ready */ |
1173 | thread_wakeup(&pager->is_ready); |
1174 | |
1175 | if (old_crypt_info != NULL && |
1176 | old_crypt_info != crypt_info) { |
1177 | /* we re-used an old crypt_info instead of using our new one */ |
1178 | #if CRYPT_INFO_DEBUG |
1179 | printf("CRYPT_INFO %s: deallocate %p ref %d " |
1180 | "(create used old)\n" , |
1181 | __FUNCTION__, |
1182 | crypt_info, |
1183 | crypt_info->crypt_refcnt); |
1184 | #endif /* CRYPT_INFO_DEBUG */ |
1185 | crypt_info_deallocate(crypt_info); |
1186 | crypt_info = NULL; |
1187 | } |
1188 | |
1189 | return pager; |
1190 | } |
1191 | |
1192 | /* |
1193 | * apple_protect_pager_setup() |
1194 | * |
1195 | * Provide the caller with a memory object backed by the provided |
1196 | * "backing_object" VM object. If such a memory object already exists, |
1197 | * re-use it, otherwise create a new memory object. |
1198 | */ |
1199 | memory_object_t |
1200 | ( |
1201 | vm_object_t backing_object, |
1202 | vm_object_offset_t backing_offset, |
1203 | vm_object_offset_t crypto_backing_offset, |
1204 | struct pager_crypt_info *crypt_info, |
1205 | vm_object_offset_t crypto_start, |
1206 | vm_object_offset_t crypto_end) |
1207 | { |
1208 | apple_protect_pager_t ; |
1209 | struct pager_crypt_info *old_crypt_info, *new_crypt_info; |
1210 | |
1211 | #if CRYPT_INFO_DEBUG |
1212 | printf("CRYPT_INFO %s: crypt_info=%p [%p,%p,%p,%d]\n" , |
1213 | __FUNCTION__, |
1214 | crypt_info, |
1215 | crypt_info->page_decrypt, |
1216 | crypt_info->crypt_end, |
1217 | crypt_info->crypt_ops, |
1218 | crypt_info->crypt_refcnt); |
1219 | #endif /* CRYPT_INFO_DEBUG */ |
1220 | |
1221 | old_crypt_info = NULL; |
1222 | |
1223 | lck_mtx_lock(&apple_protect_pager_lock); |
1224 | |
1225 | queue_iterate(&apple_protect_pager_queue, |
1226 | pager, |
1227 | apple_protect_pager_t, |
1228 | pager_queue) { |
1229 | if ((pager->crypt_info->page_decrypt != |
1230 | crypt_info->page_decrypt) || |
1231 | (pager->crypt_info->crypt_end != |
1232 | crypt_info->crypt_end) || |
1233 | (pager->crypt_info->crypt_ops != |
1234 | crypt_info->crypt_ops)) { |
1235 | /* no match for "crypt_info": next pager */ |
1236 | continue; |
1237 | } |
1238 | /* found a match for crypt_info ... */ |
1239 | if (old_crypt_info) { |
1240 | /* ... already switched to that crypt_info */ |
1241 | assert(old_crypt_info == pager->crypt_info); |
1242 | } else { |
1243 | /* ... switch to that pager's crypt_info */ |
1244 | old_crypt_info = pager->crypt_info; |
1245 | #if CRYPT_INFO_DEBUG |
1246 | printf("CRYPT_INFO %s: " |
1247 | "switching crypt_info from %p [%p,%p,%p,%d] " |
1248 | "to %p [%p,%p,%p,%d] from pager %p\n" , |
1249 | __FUNCTION__, |
1250 | crypt_info, |
1251 | crypt_info->page_decrypt, |
1252 | crypt_info->crypt_end, |
1253 | crypt_info->crypt_ops, |
1254 | crypt_info->crypt_refcnt, |
1255 | old_crypt_info, |
1256 | old_crypt_info->page_decrypt, |
1257 | old_crypt_info->crypt_end, |
1258 | old_crypt_info->crypt_ops, |
1259 | old_crypt_info->crypt_refcnt, |
1260 | pager); |
1261 | printf("CRYPT_INFO %s: %p ref %d (setup match)\n" , |
1262 | __FUNCTION__, |
1263 | pager->crypt_info, |
1264 | pager->crypt_info->crypt_refcnt); |
1265 | #endif /* CRYPT_INFO_DEBUG */ |
1266 | crypt_info_reference(pager->crypt_info); |
1267 | } |
1268 | |
1269 | if (pager->backing_object == backing_object && |
1270 | pager->backing_offset == backing_offset && |
1271 | pager->crypto_backing_offset == crypto_backing_offset && |
1272 | pager->crypto_start == crypto_start && |
1273 | pager->crypto_end == crypto_end) { |
1274 | /* full match: use that pager! */ |
1275 | assert(old_crypt_info == pager->crypt_info); |
1276 | assert(old_crypt_info->crypt_refcnt > 1); |
1277 | #if CRYPT_INFO_DEBUG |
1278 | printf("CRYPT_INFO %s: " |
1279 | "pager match with %p crypt_info %p\n" , |
1280 | __FUNCTION__, |
1281 | pager, |
1282 | pager->crypt_info); |
1283 | printf("CRYPT_INFO %s: deallocate %p ref %d " |
1284 | "(pager match)\n" , |
1285 | __FUNCTION__, |
1286 | old_crypt_info, |
1287 | old_crypt_info->crypt_refcnt); |
1288 | #endif /* CRYPT_INFO_DEBUG */ |
1289 | /* release the extra ref on crypt_info we got above */ |
1290 | crypt_info_deallocate(old_crypt_info); |
1291 | assert(old_crypt_info->crypt_refcnt > 0); |
1292 | /* give extra reference on pager to the caller */ |
1293 | assert(pager->ref_count > 0); |
1294 | pager->ref_count++; |
1295 | break; |
1296 | } |
1297 | } |
1298 | if (queue_end(&apple_protect_pager_queue, |
1299 | (queue_entry_t) pager)) { |
1300 | lck_mtx_unlock(&apple_protect_pager_lock); |
1301 | /* no existing pager for this backing object */ |
1302 | pager = APPLE_PROTECT_PAGER_NULL; |
1303 | if (old_crypt_info) { |
1304 | /* use this old crypt_info for new pager */ |
1305 | new_crypt_info = old_crypt_info; |
1306 | #if CRYPT_INFO_DEBUG |
1307 | printf("CRYPT_INFO %s: " |
1308 | "will use old_crypt_info %p for new pager\n" , |
1309 | __FUNCTION__, |
1310 | old_crypt_info); |
1311 | #endif /* CRYPT_INFO_DEBUG */ |
1312 | } else { |
1313 | /* allocate a new crypt_info for new pager */ |
1314 | new_crypt_info = kalloc(sizeof (*new_crypt_info)); |
1315 | *new_crypt_info = *crypt_info; |
1316 | new_crypt_info->crypt_refcnt = 1; |
1317 | #if CRYPT_INFO_DEBUG |
1318 | printf("CRYPT_INFO %s: " |
1319 | "will use new_crypt_info %p for new pager\n" , |
1320 | __FUNCTION__, |
1321 | new_crypt_info); |
1322 | #endif /* CRYPT_INFO_DEBUG */ |
1323 | } |
1324 | if (new_crypt_info == NULL) { |
1325 | /* can't create new pager without a crypt_info */ |
1326 | } else { |
1327 | /* create new pager */ |
1328 | pager = apple_protect_pager_create( |
1329 | backing_object, |
1330 | backing_offset, |
1331 | crypto_backing_offset, |
1332 | new_crypt_info, |
1333 | crypto_start, |
1334 | crypto_end); |
1335 | } |
1336 | if (pager == APPLE_PROTECT_PAGER_NULL) { |
1337 | /* could not create a new pager */ |
1338 | if (new_crypt_info == old_crypt_info) { |
1339 | /* release extra reference on old_crypt_info */ |
1340 | #if CRYPT_INFO_DEBUG |
1341 | printf("CRYPT_INFO %s: deallocate %p ref %d " |
1342 | "(create fail old_crypt_info)\n" , |
1343 | __FUNCTION__, |
1344 | old_crypt_info, |
1345 | old_crypt_info->crypt_refcnt); |
1346 | #endif /* CRYPT_INFO_DEBUG */ |
1347 | crypt_info_deallocate(old_crypt_info); |
1348 | old_crypt_info = NULL; |
1349 | } else { |
1350 | /* release unused new_crypt_info */ |
1351 | assert(new_crypt_info->crypt_refcnt == 1); |
1352 | #if CRYPT_INFO_DEBUG |
1353 | printf("CRYPT_INFO %s: deallocate %p ref %d " |
1354 | "(create fail new_crypt_info)\n" , |
1355 | __FUNCTION__, |
1356 | new_crypt_info, |
1357 | new_crypt_info->crypt_refcnt); |
1358 | #endif /* CRYPT_INFO_DEBUG */ |
1359 | crypt_info_deallocate(new_crypt_info); |
1360 | new_crypt_info = NULL; |
1361 | } |
1362 | return MEMORY_OBJECT_NULL; |
1363 | } |
1364 | lck_mtx_lock(&apple_protect_pager_lock); |
1365 | } else { |
1366 | assert(old_crypt_info == pager->crypt_info); |
1367 | } |
1368 | |
1369 | while (!pager->is_ready) { |
1370 | lck_mtx_sleep(&apple_protect_pager_lock, |
1371 | LCK_SLEEP_DEFAULT, |
1372 | &pager->is_ready, |
1373 | THREAD_UNINT); |
1374 | } |
1375 | lck_mtx_unlock(&apple_protect_pager_lock); |
1376 | |
1377 | return (memory_object_t) pager; |
1378 | } |
1379 | |
1380 | void |
1381 | (void) |
1382 | { |
1383 | apple_protect_pager_t , ; |
1384 | queue_head_t trim_queue; |
1385 | int num_trim; |
1386 | int count_unmapped; |
1387 | |
1388 | lck_mtx_lock(&apple_protect_pager_lock); |
1389 | |
1390 | /* |
1391 | * We have too many pagers, try and trim some unused ones, |
1392 | * starting with the oldest pager at the end of the queue. |
1393 | */ |
1394 | queue_init(&trim_queue); |
1395 | num_trim = 0; |
1396 | |
1397 | for (pager = (apple_protect_pager_t) |
1398 | queue_last(&apple_protect_pager_queue); |
1399 | !queue_end(&apple_protect_pager_queue, |
1400 | (queue_entry_t) pager); |
1401 | pager = prev_pager) { |
1402 | /* get prev elt before we dequeue */ |
1403 | prev_pager = (apple_protect_pager_t) |
1404 | queue_prev(&pager->pager_queue); |
1405 | |
1406 | if (pager->ref_count == 2 && |
1407 | pager->is_ready && |
1408 | !pager->is_mapped) { |
1409 | /* this pager can be trimmed */ |
1410 | num_trim++; |
1411 | /* remove this pager from the main list ... */ |
1412 | apple_protect_pager_dequeue(pager); |
1413 | /* ... and add it to our trim queue */ |
1414 | queue_enter_first(&trim_queue, |
1415 | pager, |
1416 | apple_protect_pager_t, |
1417 | pager_queue); |
1418 | |
1419 | count_unmapped = (apple_protect_pager_count - |
1420 | apple_protect_pager_count_mapped); |
1421 | if (count_unmapped <= apple_protect_pager_cache_limit) { |
1422 | /* we have enough pagers to trim */ |
1423 | break; |
1424 | } |
1425 | } |
1426 | } |
1427 | if (num_trim > apple_protect_pager_num_trim_max) { |
1428 | apple_protect_pager_num_trim_max = num_trim; |
1429 | } |
1430 | apple_protect_pager_num_trim_total += num_trim; |
1431 | |
1432 | lck_mtx_unlock(&apple_protect_pager_lock); |
1433 | |
1434 | /* terminate the trimmed pagers */ |
1435 | while (!queue_empty(&trim_queue)) { |
1436 | queue_remove_first(&trim_queue, |
1437 | pager, |
1438 | apple_protect_pager_t, |
1439 | pager_queue); |
1440 | pager->pager_queue.next = NULL; |
1441 | pager->pager_queue.prev = NULL; |
1442 | assert(pager->ref_count == 2); |
1443 | /* |
1444 | * We can't call deallocate_internal() because the pager |
1445 | * has already been dequeued, but we still need to remove |
1446 | * a reference. |
1447 | */ |
1448 | pager->ref_count--; |
1449 | apple_protect_pager_terminate_internal(pager); |
1450 | } |
1451 | } |
1452 | |
1453 | |
1454 | void |
1455 | crypt_info_reference( |
1456 | struct pager_crypt_info *crypt_info) |
1457 | { |
1458 | assert(crypt_info->crypt_refcnt != 0); |
1459 | #if CRYPT_INFO_DEBUG |
1460 | printf("CRYPT_INFO %s: %p ref %d -> %d\n" , |
1461 | __FUNCTION__, |
1462 | crypt_info, |
1463 | crypt_info->crypt_refcnt, |
1464 | crypt_info->crypt_refcnt + 1); |
1465 | #endif /* CRYPT_INFO_DEBUG */ |
1466 | OSAddAtomic(+1, &crypt_info->crypt_refcnt); |
1467 | } |
1468 | |
1469 | void |
1470 | crypt_info_deallocate( |
1471 | struct pager_crypt_info *crypt_info) |
1472 | { |
1473 | #if CRYPT_INFO_DEBUG |
1474 | printf("CRYPT_INFO %s: %p ref %d -> %d\n" , |
1475 | __FUNCTION__, |
1476 | crypt_info, |
1477 | crypt_info->crypt_refcnt, |
1478 | crypt_info->crypt_refcnt - 1); |
1479 | #endif /* CRYPT_INFO_DEBUG */ |
1480 | OSAddAtomic(-1, &crypt_info->crypt_refcnt); |
1481 | if (crypt_info->crypt_refcnt == 0) { |
1482 | /* deallocate any crypt module data */ |
1483 | if (crypt_info->crypt_end) { |
1484 | crypt_info->crypt_end(crypt_info->crypt_ops); |
1485 | crypt_info->crypt_end = NULL; |
1486 | } |
1487 | #if CRYPT_INFO_DEBUG |
1488 | printf("CRYPT_INFO %s: freeing %p\n" , |
1489 | __FUNCTION__, |
1490 | crypt_info); |
1491 | #endif /* CRYPT_INFO_DEBUG */ |
1492 | kfree(crypt_info, sizeof (*crypt_info)); |
1493 | crypt_info = NULL; |
1494 | } |
1495 | } |
1496 | |