1 | /* |
2 | * Copyright (c) 2018-2020 Apple Inc. All rights reserved. |
3 | * |
4 | * @APPLE_OSREFERENCE_LICENSE_HEADER_START@ |
5 | * |
6 | * This file contains Original Code and/or Modifications of Original Code |
7 | * as defined in and that are subject to the Apple Public Source License |
8 | * Version 2.0 (the 'License'). You may not use this file except in |
9 | * compliance with the License. The rights granted to you under the License |
10 | * may not be used to create, or enable the creation or redistribution of, |
11 | * unlawful or unlicensed copies of an Apple operating system, or to |
12 | * circumvent, violate, or enable the circumvention or violation of, any |
13 | * terms of an Apple operating system software license agreement. |
14 | * |
15 | * Please obtain a copy of the License at |
16 | * http://www.opensource.apple.com/apsl/ and read it before using this file. |
17 | * |
18 | * The Original Code and all software distributed under the License are |
19 | * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER |
20 | * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES, |
21 | * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY, |
22 | * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT. |
23 | * Please see the License for the specific language governing rights and |
24 | * limitations under the License. |
25 | * |
26 | * @APPLE_OSREFERENCE_LICENSE_HEADER_END@ |
27 | */ |
28 | |
29 | #include <sys/errno.h> |
30 | |
31 | #include <mach/mach_types.h> |
32 | #include <mach/mach_traps.h> |
33 | #include <mach/host_priv.h> |
34 | #include <mach/kern_return.h> |
35 | #include <mach/memory_object_control.h> |
36 | #include <mach/memory_object_types.h> |
37 | #include <mach/port.h> |
38 | #include <mach/policy.h> |
39 | #include <mach/upl.h> |
40 | #include <mach/thread_act.h> |
41 | #include <mach/mach_vm.h> |
42 | |
43 | #include <kern/host.h> |
44 | #include <kern/kalloc.h> |
45 | #include <kern/queue.h> |
46 | #include <kern/thread.h> |
47 | #include <kern/ipc_kobject.h> |
48 | |
49 | #include <ipc/ipc_port.h> |
50 | #include <ipc/ipc_space.h> |
51 | |
52 | #include <vm/memory_object.h> |
53 | #include <vm/vm_kern.h> |
54 | #include <vm/vm_fault.h> |
55 | #include <vm/vm_map.h> |
56 | #include <vm/vm_pageout.h> |
57 | #include <vm/vm_protos.h> |
58 | #include <vm/vm_shared_region.h> |
59 | |
60 | #include <sys/kdebug_triage.h> |
61 | |
62 | #if __has_feature(ptrauth_calls) |
63 | #include <ptrauth.h> |
64 | extern boolean_t diversify_user_jop; |
65 | #endif /* __has_feature(ptrauth_calls) */ |
66 | |
67 | /* |
68 | * SHARED REGION MEMORY PAGER |
69 | * |
70 | * This external memory manager (EMM) handles mappings of a dyld shared cache |
71 | * in shared regions, applying any necessary modifications (sliding, |
72 | * pointer signing, ...). |
73 | * |
74 | * It mostly handles page-in requests (from memory_object_data_request()) by |
75 | * getting the original data from its backing VM object, itself backed by |
76 | * the dyld shared cache file, modifying it if needed and providing it to VM. |
77 | * |
78 | * The modified pages will never be dirtied, so the memory manager doesn't |
79 | * need to handle page-out requests (from memory_object_data_return()). The |
80 | * pages need to be mapped copy-on-write, so that the originals stay clean. |
81 | * |
82 | * We don't expect to have to handle a large number of shared cache files, |
83 | * so the data structures are very simple (simple linked list) for now. |
84 | */ |
85 | |
86 | /* forward declarations */ |
87 | void shared_region_pager_reference(memory_object_t mem_obj); |
88 | void shared_region_pager_deallocate(memory_object_t mem_obj); |
89 | kern_return_t shared_region_pager_init(memory_object_t mem_obj, |
90 | memory_object_control_t control, |
91 | memory_object_cluster_size_t pg_size); |
92 | kern_return_t shared_region_pager_terminate(memory_object_t mem_obj); |
93 | kern_return_t shared_region_pager_data_request(memory_object_t mem_obj, |
94 | memory_object_offset_t offset, |
95 | memory_object_cluster_size_t length, |
96 | vm_prot_t protection_required, |
97 | memory_object_fault_info_t fault_info); |
98 | kern_return_t shared_region_pager_data_return(memory_object_t mem_obj, |
99 | memory_object_offset_t offset, |
100 | memory_object_cluster_size_t data_cnt, |
101 | memory_object_offset_t *resid_offset, |
102 | int *io_error, |
103 | boolean_t dirty, |
104 | boolean_t kernel_copy, |
105 | int upl_flags); |
106 | kern_return_t shared_region_pager_data_initialize(memory_object_t mem_obj, |
107 | memory_object_offset_t offset, |
108 | memory_object_cluster_size_t data_cnt); |
109 | kern_return_t shared_region_pager_map(memory_object_t mem_obj, |
110 | vm_prot_t prot); |
111 | kern_return_t shared_region_pager_last_unmap(memory_object_t mem_obj); |
112 | boolean_t shared_region_pager_backing_object( |
113 | memory_object_t mem_obj, |
114 | memory_object_offset_t mem_obj_offset, |
115 | vm_object_t *backing_object, |
116 | vm_object_offset_t *backing_offset); |
117 | |
118 | /* |
119 | * Vector of VM operations for this EMM. |
120 | * These routines are invoked by VM via the memory_object_*() interfaces. |
121 | */ |
122 | const struct memory_object_pager_ops = { |
123 | .memory_object_reference = shared_region_pager_reference, |
124 | .memory_object_deallocate = shared_region_pager_deallocate, |
125 | .memory_object_init = shared_region_pager_init, |
126 | .memory_object_terminate = shared_region_pager_terminate, |
127 | .memory_object_data_request = shared_region_pager_data_request, |
128 | .memory_object_data_return = shared_region_pager_data_return, |
129 | .memory_object_data_initialize = shared_region_pager_data_initialize, |
130 | .memory_object_map = shared_region_pager_map, |
131 | .memory_object_last_unmap = shared_region_pager_last_unmap, |
132 | .memory_object_backing_object = shared_region_pager_backing_object, |
133 | .memory_object_pager_name = "shared_region" |
134 | }; |
135 | |
136 | #if __has_feature(ptrauth_calls) |
137 | /* |
138 | * Track mappings between shared_region_id and the key used to sign |
139 | * authenticated pointers. |
140 | */ |
141 | typedef struct shared_region_jop_key_map { |
142 | queue_chain_t srk_queue; |
143 | char *srk_shared_region_id; |
144 | uint64_t srk_jop_key; |
145 | os_refcnt_t srk_ref_count; /* count of tasks active with this shared_region_id */ |
146 | } *shared_region_jop_key_map_t; |
147 | |
148 | os_refgrp_decl(static, srk_refgrp, "shared region key ref cnts" , NULL); |
149 | |
150 | /* |
151 | * The list is protected by the "shared_region_key_map" lock. |
152 | */ |
153 | int shared_region_key_count = 0; /* number of active shared_region_id keys */ |
154 | queue_head_t shared_region_jop_key_queue = QUEUE_HEAD_INITIALIZER(shared_region_jop_key_queue); |
155 | LCK_GRP_DECLARE(shared_region_jop_key_lck_grp, "shared_region_jop_key" ); |
156 | LCK_MTX_DECLARE(shared_region_jop_key_lock, &shared_region_jop_key_lck_grp); |
157 | |
158 | /* |
159 | * Find the pointer signing key for the give shared_region_id. |
160 | */ |
161 | uint64_t |
162 | shared_region_find_key(char *shared_region_id) |
163 | { |
164 | shared_region_jop_key_map_t region; |
165 | uint64_t key; |
166 | |
167 | lck_mtx_lock(&shared_region_jop_key_lock); |
168 | queue_iterate(&shared_region_jop_key_queue, region, shared_region_jop_key_map_t, srk_queue) { |
169 | if (strcmp(region->srk_shared_region_id, shared_region_id) == 0) { |
170 | goto found; |
171 | } |
172 | } |
173 | panic("shared_region_find_key() no key for region '%s'" , shared_region_id); |
174 | |
175 | found: |
176 | key = region->srk_jop_key; |
177 | lck_mtx_unlock(&shared_region_jop_key_lock); |
178 | return key; |
179 | } |
180 | |
181 | /* |
182 | * Return a authentication key to use for the given shared_region_id. |
183 | * If inherit is TRUE, then the key must match inherited_key. |
184 | * Creates an additional reference when successful. |
185 | */ |
186 | void |
187 | shared_region_key_alloc(char *shared_region_id, bool inherit, uint64_t inherited_key) |
188 | { |
189 | shared_region_jop_key_map_t region; |
190 | shared_region_jop_key_map_t new = NULL; |
191 | |
192 | assert(shared_region_id != NULL); |
193 | again: |
194 | lck_mtx_lock(&shared_region_jop_key_lock); |
195 | queue_iterate(&shared_region_jop_key_queue, region, shared_region_jop_key_map_t, srk_queue) { |
196 | if (strcmp(region->srk_shared_region_id, shared_region_id) == 0) { |
197 | os_ref_retain_locked(®ion->srk_ref_count); |
198 | goto done; |
199 | } |
200 | } |
201 | |
202 | /* |
203 | * ID was not found, if first time, allocate a new one and redo the lookup. |
204 | */ |
205 | if (new == NULL) { |
206 | lck_mtx_unlock(&shared_region_jop_key_lock); |
207 | new = kalloc_type(struct shared_region_jop_key_map, Z_WAITOK); |
208 | uint_t len = strlen(shared_region_id) + 1; |
209 | new->srk_shared_region_id = kalloc_data(len, Z_WAITOK); |
210 | strlcpy(new->srk_shared_region_id, shared_region_id, len); |
211 | os_ref_init(&new->srk_ref_count, &srk_refgrp); |
212 | |
213 | if (diversify_user_jop && inherit) { |
214 | new->srk_jop_key = inherited_key; |
215 | } else if (diversify_user_jop && strlen(shared_region_id) > 0) { |
216 | new->srk_jop_key = generate_jop_key(); |
217 | } else { |
218 | new->srk_jop_key = ml_default_jop_pid(); |
219 | } |
220 | |
221 | goto again; |
222 | } |
223 | |
224 | /* |
225 | * Use the newly allocated entry |
226 | */ |
227 | ++shared_region_key_count; |
228 | queue_enter_first(&shared_region_jop_key_queue, new, shared_region_jop_key_map_t, srk_queue); |
229 | region = new; |
230 | new = NULL; |
231 | |
232 | done: |
233 | if (inherit && inherited_key != region->srk_jop_key) { |
234 | panic("shared_region_key_alloc() inherited key mismatch" ); |
235 | } |
236 | lck_mtx_unlock(&shared_region_jop_key_lock); |
237 | |
238 | /* |
239 | * free any unused new entry |
240 | */ |
241 | if (new != NULL) { |
242 | kfree_data(new->srk_shared_region_id, |
243 | strlen(new->srk_shared_region_id) + 1); |
244 | kfree_type(struct shared_region_jop_key_map, new); |
245 | } |
246 | } |
247 | |
248 | /* |
249 | * Mark the end of using a shared_region_id's key |
250 | */ |
251 | extern void |
252 | shared_region_key_dealloc(char *shared_region_id) |
253 | { |
254 | shared_region_jop_key_map_t region; |
255 | |
256 | assert(shared_region_id != NULL); |
257 | lck_mtx_lock(&shared_region_jop_key_lock); |
258 | queue_iterate(&shared_region_jop_key_queue, region, shared_region_jop_key_map_t, srk_queue) { |
259 | if (strcmp(region->srk_shared_region_id, shared_region_id) == 0) { |
260 | goto done; |
261 | } |
262 | } |
263 | panic("shared_region_key_dealloc() Shared region ID '%s' not found" , shared_region_id); |
264 | |
265 | done: |
266 | if (os_ref_release_locked(®ion->srk_ref_count) == 0) { |
267 | queue_remove(&shared_region_jop_key_queue, region, shared_region_jop_key_map_t, srk_queue); |
268 | --shared_region_key_count; |
269 | } else { |
270 | region = NULL; |
271 | } |
272 | lck_mtx_unlock(&shared_region_jop_key_lock); |
273 | |
274 | if (region != NULL) { |
275 | kfree_data(region->srk_shared_region_id, |
276 | strlen(region->srk_shared_region_id) + 1); |
277 | kfree_type(struct shared_region_jop_key_map, region); |
278 | } |
279 | } |
280 | #endif /* __has_feature(ptrauth_calls) */ |
281 | |
282 | /* |
283 | * The "shared_region_pager" describes a memory object backed by |
284 | * the "shared_region" EMM. |
285 | */ |
286 | typedef struct { |
287 | struct memory_object ; /* mandatory generic header */ |
288 | |
289 | /* pager-specific data */ |
290 | queue_chain_t ; /* next & prev pagers */ |
291 | #if MEMORY_OBJECT_HAS_REFCOUNT |
292 | #define srp_ref_count srp_header.mo_ref |
293 | #else |
294 | os_ref_atomic_t srp_ref_count; /* active uses */ |
295 | #endif |
296 | bool ; /* has active mappings */ |
297 | bool ; /* is this pager ready? */ |
298 | vm_object_t ; /* VM object for shared cache */ |
299 | vm_object_offset_t ; |
300 | vm_shared_region_slide_info_t ; |
301 | #if __has_feature(ptrauth_calls) |
302 | uint64_t srp_jop_key; /* zero if used for arm64 */ |
303 | #endif /* __has_feature(ptrauth_calls) */ |
304 | } *; |
305 | #define ((shared_region_pager_t) NULL) |
306 | |
307 | /* |
308 | * List of memory objects managed by this EMM. |
309 | * The list is protected by the "shared_region_pager_lock" lock. |
310 | */ |
311 | int = 0; /* number of pagers */ |
312 | int = 0; /* number of unmapped pagers */ |
313 | queue_head_t = QUEUE_HEAD_INITIALIZER(shared_region_pager_queue); |
314 | LCK_GRP_DECLARE(, "shared_region_pager" ); |
315 | LCK_MTX_DECLARE(, &shared_region_pager_lck_grp); |
316 | |
317 | /* |
318 | * Maximum number of unmapped pagers we're willing to keep around. |
319 | */ |
320 | int = 0; |
321 | |
322 | /* |
323 | * Statistics & counters. |
324 | */ |
325 | int = 0; |
326 | int = 0; |
327 | int = 0; |
328 | int = 0; |
329 | |
330 | uint64_t = 0; |
331 | uint64_t = 0; |
332 | uint64_t = 0; |
333 | uint64_t = 0; |
334 | |
335 | /* internal prototypes */ |
336 | shared_region_pager_t shared_region_pager_lookup(memory_object_t mem_obj); |
337 | void shared_region_pager_dequeue(shared_region_pager_t ); |
338 | void shared_region_pager_deallocate_internal(shared_region_pager_t , |
339 | boolean_t locked); |
340 | void shared_region_pager_terminate_internal(shared_region_pager_t ); |
341 | void shared_region_pager_trim(void); |
342 | |
343 | |
344 | #if DEBUG |
345 | int shared_region_pagerdebug = 0; |
346 | #define PAGER_ALL 0xffffffff |
347 | #define PAGER_INIT 0x00000001 |
348 | #define PAGER_PAGEIN 0x00000002 |
349 | |
350 | #define PAGER_DEBUG(LEVEL, A) \ |
351 | MACRO_BEGIN \ |
352 | if ((shared_region_pagerdebug & (LEVEL)) == (LEVEL)) { \ |
353 | printf A; \ |
354 | } \ |
355 | MACRO_END |
356 | #else |
357 | #define (LEVEL, A) |
358 | #endif |
359 | |
360 | /* |
361 | * shared_region_pager_init() |
362 | * |
363 | * Initialize the memory object and makes it ready to be used and mapped. |
364 | */ |
365 | kern_return_t |
366 | ( |
367 | memory_object_t mem_obj, |
368 | memory_object_control_t control, |
369 | #if !DEBUG |
370 | __unused |
371 | #endif |
372 | memory_object_cluster_size_t pg_size) |
373 | { |
374 | shared_region_pager_t ; |
375 | kern_return_t kr; |
376 | memory_object_attr_info_data_t attributes; |
377 | |
378 | PAGER_DEBUG(PAGER_ALL, |
379 | ("shared_region_pager_init: %p, %p, %x\n" , |
380 | mem_obj, control, pg_size)); |
381 | |
382 | if (control == MEMORY_OBJECT_CONTROL_NULL) { |
383 | return KERN_INVALID_ARGUMENT; |
384 | } |
385 | |
386 | pager = shared_region_pager_lookup(mem_obj); |
387 | |
388 | memory_object_control_reference(control); |
389 | |
390 | pager->srp_header.mo_control = control; |
391 | |
392 | attributes.copy_strategy = MEMORY_OBJECT_COPY_DELAY; |
393 | /* attributes.cluster_size = (1 << (CLUSTER_SHIFT + PAGE_SHIFT));*/ |
394 | attributes.cluster_size = (1 << (PAGE_SHIFT)); |
395 | attributes.may_cache_object = FALSE; |
396 | attributes.temporary = TRUE; |
397 | |
398 | kr = memory_object_change_attributes( |
399 | memory_control: control, |
400 | MEMORY_OBJECT_ATTRIBUTE_INFO, |
401 | attributes: (memory_object_info_t) &attributes, |
402 | MEMORY_OBJECT_ATTR_INFO_COUNT); |
403 | if (kr != KERN_SUCCESS) { |
404 | panic("shared_region_pager_init: " |
405 | "memory_object_change_attributes() failed" ); |
406 | } |
407 | |
408 | #if CONFIG_SECLUDED_MEMORY |
409 | if (secluded_for_filecache) { |
410 | #if 00 |
411 | /* |
412 | * XXX FBDP do we want this in the secluded pool? |
413 | * Ideally, we'd want the shared region used by Camera to |
414 | * NOT be in the secluded pool, but all other shared regions |
415 | * in the secluded pool... |
416 | */ |
417 | memory_object_mark_eligible_for_secluded(control, TRUE); |
418 | #endif /* 00 */ |
419 | } |
420 | #endif /* CONFIG_SECLUDED_MEMORY */ |
421 | |
422 | return KERN_SUCCESS; |
423 | } |
424 | |
425 | /* |
426 | * shared_region_data_return() |
427 | * |
428 | * Handles page-out requests from VM. This should never happen since |
429 | * the pages provided by this EMM are not supposed to be dirty or dirtied |
430 | * and VM should simply discard the contents and reclaim the pages if it |
431 | * needs to. |
432 | */ |
433 | kern_return_t |
434 | ( |
435 | __unused memory_object_t mem_obj, |
436 | __unused memory_object_offset_t offset, |
437 | __unused memory_object_cluster_size_t data_cnt, |
438 | __unused memory_object_offset_t *resid_offset, |
439 | __unused int *io_error, |
440 | __unused boolean_t dirty, |
441 | __unused boolean_t kernel_copy, |
442 | __unused int upl_flags) |
443 | { |
444 | panic("shared_region_pager_data_return: should never get called" ); |
445 | return KERN_FAILURE; |
446 | } |
447 | |
448 | kern_return_t |
449 | ( |
450 | __unused memory_object_t mem_obj, |
451 | __unused memory_object_offset_t offset, |
452 | __unused memory_object_cluster_size_t data_cnt) |
453 | { |
454 | panic("shared_region_pager_data_initialize: should never get called" ); |
455 | return KERN_FAILURE; |
456 | } |
457 | |
458 | /* |
459 | * shared_region_pager_data_request() |
460 | * |
461 | * Handles page-in requests from VM. |
462 | */ |
463 | int = 0; |
464 | kern_return_t |
465 | ( |
466 | memory_object_t mem_obj, |
467 | memory_object_offset_t offset, |
468 | memory_object_cluster_size_t length, |
469 | #if !DEBUG |
470 | __unused |
471 | #endif |
472 | vm_prot_t protection_required, |
473 | memory_object_fault_info_t mo_fault_info) |
474 | { |
475 | shared_region_pager_t ; |
476 | memory_object_control_t mo_control; |
477 | upl_t upl; |
478 | int upl_flags; |
479 | upl_size_t upl_size; |
480 | upl_page_info_t *upl_pl; |
481 | unsigned int pl_count; |
482 | vm_object_t src_top_object, src_page_object, dst_object; |
483 | kern_return_t kr, retval; |
484 | vm_offset_t src_vaddr, dst_vaddr; |
485 | vm_offset_t cur_offset; |
486 | vm_offset_t offset_in_page; |
487 | kern_return_t error_code; |
488 | vm_prot_t prot; |
489 | vm_page_t src_page, top_page; |
490 | int interruptible; |
491 | struct vm_object_fault_info fault_info; |
492 | mach_vm_offset_t slide_start_address; |
493 | u_int32_t slide_info_page_size; |
494 | |
495 | PAGER_DEBUG(PAGER_ALL, ("shared_region_pager_data_request: %p, %llx, %x, %x\n" , mem_obj, offset, length, protection_required)); |
496 | |
497 | retval = KERN_SUCCESS; |
498 | src_top_object = VM_OBJECT_NULL; |
499 | src_page_object = VM_OBJECT_NULL; |
500 | upl = NULL; |
501 | upl_pl = NULL; |
502 | fault_info = *((struct vm_object_fault_info *)(uintptr_t)mo_fault_info); |
503 | fault_info.stealth = TRUE; |
504 | fault_info.io_sync = FALSE; |
505 | fault_info.mark_zf_absent = FALSE; |
506 | fault_info.batch_pmap_op = FALSE; |
507 | interruptible = fault_info.interruptible; |
508 | |
509 | pager = shared_region_pager_lookup(mem_obj); |
510 | assert(pager->srp_is_ready); |
511 | assert(os_ref_get_count_raw(&pager->srp_ref_count) > 1); /* pager is alive */ |
512 | assert(pager->srp_is_mapped); /* pager is mapped */ |
513 | |
514 | PAGER_DEBUG(PAGER_PAGEIN, ("shared_region_pager_data_request: %p, %llx, %x, %x, pager %p\n" , mem_obj, offset, length, protection_required, pager)); |
515 | |
516 | /* |
517 | * Gather in a UPL all the VM pages requested by VM. |
518 | */ |
519 | mo_control = pager->srp_header.mo_control; |
520 | |
521 | upl_size = length; |
522 | upl_flags = |
523 | UPL_RET_ONLY_ABSENT | |
524 | UPL_SET_LITE | |
525 | UPL_NO_SYNC | |
526 | UPL_CLEAN_IN_PLACE | /* triggers UPL_CLEAR_DIRTY */ |
527 | UPL_SET_INTERNAL; |
528 | pl_count = 0; |
529 | kr = memory_object_upl_request(memory_control: mo_control, |
530 | offset, size: upl_size, |
531 | upl: &upl, NULL, NULL, cntrl_flags: upl_flags, VM_KERN_MEMORY_SECURITY); |
532 | if (kr != KERN_SUCCESS) { |
533 | ktriage_record(thread_id: thread_tid(thread: current_thread()), KDBG_TRIAGE_EVENTID(KDBG_TRIAGE_SUBSYS_SHARED_REGION, KDBG_TRIAGE_RESERVED, KDBG_TRIAGE_SHARED_REGION_NO_UPL), arg: 0 /* arg */); |
534 | retval = kr; |
535 | goto done; |
536 | } |
537 | dst_object = memory_object_control_to_vm_object(control: mo_control); |
538 | assert(dst_object != VM_OBJECT_NULL); |
539 | |
540 | /* |
541 | * We'll map the original data in the kernel address space from the |
542 | * backing VM object (itself backed by the shared cache file via |
543 | * the vnode pager). |
544 | */ |
545 | src_top_object = pager->srp_backing_object; |
546 | assert(src_top_object != VM_OBJECT_NULL); |
547 | vm_object_reference(src_top_object); /* keep the source object alive */ |
548 | |
549 | slide_start_address = pager->srp_slide_info->si_slid_address; |
550 | slide_info_page_size = pager->srp_slide_info->si_slide_info_entry->version == 1 ? PAGE_SIZE_FOR_SR_SLIDE : pager->srp_slide_info->si_slide_info_entry->page_size; |
551 | |
552 | fault_info.lo_offset += pager->srp_backing_offset; |
553 | fault_info.hi_offset += pager->srp_backing_offset; |
554 | |
555 | /* |
556 | * Fill in the contents of the pages requested by VM. |
557 | */ |
558 | upl_pl = UPL_GET_INTERNAL_PAGE_LIST(upl); |
559 | pl_count = length / PAGE_SIZE; |
560 | for (cur_offset = 0; |
561 | retval == KERN_SUCCESS && cur_offset < length; |
562 | cur_offset += PAGE_SIZE) { |
563 | ppnum_t dst_pnum; |
564 | |
565 | if (!upl_page_present(upl: upl_pl, index: (int)(cur_offset / PAGE_SIZE))) { |
566 | /* this page is not in the UPL: skip it */ |
567 | continue; |
568 | } |
569 | |
570 | /* |
571 | * Map the source (dyld shared cache) page in the kernel's |
572 | * virtual address space. |
573 | * We already hold a reference on the src_top_object. |
574 | */ |
575 | retry_src_fault: |
576 | vm_object_lock(src_top_object); |
577 | vm_object_paging_begin(src_top_object); |
578 | error_code = 0; |
579 | prot = VM_PROT_READ; |
580 | src_page = VM_PAGE_NULL; |
581 | kr = vm_fault_page(first_object: src_top_object, |
582 | first_offset: pager->srp_backing_offset + offset + cur_offset, |
583 | VM_PROT_READ, |
584 | FALSE, |
585 | FALSE, /* src_page not looked up */ |
586 | protection: &prot, |
587 | result_page: &src_page, |
588 | top_page: &top_page, |
589 | NULL, |
590 | error_code: &error_code, |
591 | FALSE, |
592 | fault_info: &fault_info); |
593 | switch (kr) { |
594 | case VM_FAULT_SUCCESS: |
595 | break; |
596 | case VM_FAULT_RETRY: |
597 | goto retry_src_fault; |
598 | case VM_FAULT_MEMORY_SHORTAGE: |
599 | if (vm_page_wait(interruptible)) { |
600 | goto retry_src_fault; |
601 | } |
602 | ktriage_record(thread_id: thread_tid(thread: current_thread()), KDBG_TRIAGE_EVENTID(KDBG_TRIAGE_SUBSYS_SHARED_REGION, KDBG_TRIAGE_RESERVED, KDBG_TRIAGE_SHARED_REGION_PAGER_MEMORY_SHORTAGE), arg: 0 /* arg */); |
603 | OS_FALLTHROUGH; |
604 | case VM_FAULT_INTERRUPTED: |
605 | retval = MACH_SEND_INTERRUPTED; |
606 | goto done; |
607 | case VM_FAULT_SUCCESS_NO_VM_PAGE: |
608 | /* success but no VM page: fail */ |
609 | vm_object_paging_end(src_top_object); |
610 | vm_object_unlock(src_top_object); |
611 | OS_FALLTHROUGH; |
612 | case VM_FAULT_MEMORY_ERROR: |
613 | /* the page is not there ! */ |
614 | if (error_code) { |
615 | retval = error_code; |
616 | } else { |
617 | retval = KERN_MEMORY_ERROR; |
618 | } |
619 | goto done; |
620 | default: |
621 | panic("shared_region_pager_data_request: " |
622 | "vm_fault_page() unexpected error 0x%x\n" , |
623 | kr); |
624 | } |
625 | assert(src_page != VM_PAGE_NULL); |
626 | assert(src_page->vmp_busy); |
627 | |
628 | if (src_page->vmp_q_state != VM_PAGE_ON_SPECULATIVE_Q) { |
629 | vm_page_lockspin_queues(); |
630 | if (src_page->vmp_q_state != VM_PAGE_ON_SPECULATIVE_Q) { |
631 | vm_page_speculate(page: src_page, FALSE); |
632 | } |
633 | vm_page_unlock_queues(); |
634 | } |
635 | |
636 | /* |
637 | * Establish pointers to the source |
638 | * and destination physical pages. |
639 | */ |
640 | dst_pnum = (ppnum_t) |
641 | upl_phys_page(upl: upl_pl, index: (int)(cur_offset / PAGE_SIZE)); |
642 | assert(dst_pnum != 0); |
643 | |
644 | src_vaddr = (vm_map_offset_t) |
645 | phystokv(pa: (pmap_paddr_t)VM_PAGE_GET_PHYS_PAGE(m: src_page) |
646 | << PAGE_SHIFT); |
647 | dst_vaddr = (vm_map_offset_t) |
648 | phystokv(pa: (pmap_paddr_t)dst_pnum << PAGE_SHIFT); |
649 | src_page_object = VM_PAGE_OBJECT(src_page); |
650 | |
651 | /* |
652 | * Validate the original page... |
653 | */ |
654 | if (src_page_object->code_signed) { |
655 | vm_page_validate_cs_mapped( |
656 | page: src_page, PAGE_SIZE, fault_phys_offset: 0, |
657 | kaddr: (const void *) src_vaddr); |
658 | } |
659 | /* |
660 | * ... and transfer the results to the destination page. |
661 | */ |
662 | UPL_SET_CS_VALIDATED(upl_pl, cur_offset / PAGE_SIZE, |
663 | src_page->vmp_cs_validated); |
664 | UPL_SET_CS_TAINTED(upl_pl, cur_offset / PAGE_SIZE, |
665 | src_page->vmp_cs_tainted); |
666 | UPL_SET_CS_NX(upl_pl, cur_offset / PAGE_SIZE, |
667 | src_page->vmp_cs_nx); |
668 | |
669 | /* |
670 | * The page provider might access a mapped file, so let's |
671 | * release the object lock for the source page to avoid a |
672 | * potential deadlock. |
673 | * The source page is kept busy and we have a |
674 | * "paging_in_progress" reference on its object, so it's safe |
675 | * to unlock the object here. |
676 | */ |
677 | assert(src_page->vmp_busy); |
678 | assert(src_page_object->paging_in_progress > 0); |
679 | vm_object_unlock(src_page_object); |
680 | |
681 | /* |
682 | * Process the original contents of the source page |
683 | * into the destination page. |
684 | */ |
685 | for (offset_in_page = 0; |
686 | offset_in_page < PAGE_SIZE; |
687 | offset_in_page += slide_info_page_size) { |
688 | vm_object_offset_t chunk_offset; |
689 | vm_object_offset_t offset_in_backing_object; |
690 | vm_object_offset_t offset_in_sliding_range; |
691 | |
692 | chunk_offset = offset + cur_offset + offset_in_page; |
693 | |
694 | bcopy(src: (const char *)(src_vaddr + |
695 | offset_in_page), |
696 | dst: (char *)(dst_vaddr + offset_in_page), |
697 | n: slide_info_page_size); |
698 | |
699 | offset_in_backing_object = (chunk_offset + |
700 | pager->srp_backing_offset); |
701 | if ((offset_in_backing_object < pager->srp_slide_info->si_start) || |
702 | (offset_in_backing_object >= pager->srp_slide_info->si_end)) { |
703 | /* chunk is outside of sliding range: done */ |
704 | shared_region_pager_copied++; |
705 | continue; |
706 | } |
707 | |
708 | offset_in_sliding_range = offset_in_backing_object - pager->srp_slide_info->si_start; |
709 | kr = vm_shared_region_slide_page(si: pager->srp_slide_info, |
710 | vaddr: dst_vaddr + offset_in_page, |
711 | uservaddr: (mach_vm_offset_t) (offset_in_sliding_range + slide_start_address), |
712 | pageIndex: (uint32_t) (offset_in_sliding_range / slide_info_page_size), |
713 | #if __has_feature(ptrauth_calls) |
714 | pager->srp_slide_info->si_ptrauth ? pager->srp_jop_key : 0 |
715 | #else /* __has_feature(ptrauth_calls) */ |
716 | jop_key: 0 |
717 | #endif /* __has_feature(ptrauth_calls) */ |
718 | ); |
719 | if (shared_region_pager_data_request_debug) { |
720 | printf(format: "shared_region_data_request" |
721 | "(%p,0x%llx+0x%llx+0x%04llx): 0x%llx " |
722 | "in sliding range [0x%llx:0x%llx]: " |
723 | "SLIDE offset 0x%llx=" |
724 | "(0x%llx+0x%llx+0x%llx+0x%04llx)" |
725 | "[0x%016llx 0x%016llx] " |
726 | "code_signed=%d " |
727 | "cs_validated=%d " |
728 | "cs_tainted=%d " |
729 | "cs_nx=%d " |
730 | "kr=0x%x\n" , |
731 | pager, |
732 | offset, |
733 | (uint64_t) cur_offset, |
734 | (uint64_t) offset_in_page, |
735 | chunk_offset, |
736 | pager->srp_slide_info->si_start, |
737 | pager->srp_slide_info->si_end, |
738 | (pager->srp_backing_offset + |
739 | offset + |
740 | cur_offset + |
741 | offset_in_page), |
742 | pager->srp_backing_offset, |
743 | offset, |
744 | (uint64_t) cur_offset, |
745 | (uint64_t) offset_in_page, |
746 | *(uint64_t *)(dst_vaddr + offset_in_page), |
747 | *(uint64_t *)(dst_vaddr + offset_in_page + 8), |
748 | src_page_object->code_signed, |
749 | src_page->vmp_cs_validated, |
750 | src_page->vmp_cs_tainted, |
751 | src_page->vmp_cs_nx, |
752 | kr); |
753 | } |
754 | if (kr != KERN_SUCCESS) { |
755 | ktriage_record(thread_id: thread_tid(thread: current_thread()), KDBG_TRIAGE_EVENTID(KDBG_TRIAGE_SUBSYS_SHARED_REGION, KDBG_TRIAGE_RESERVED, KDBG_TRIAGE_SHARED_REGION_SLIDE_ERROR), arg: 0 /* arg */); |
756 | shared_region_pager_slid_error++; |
757 | retval = KERN_MEMORY_ERROR; |
758 | break; |
759 | } |
760 | shared_region_pager_slid++; |
761 | } |
762 | |
763 | assert(VM_PAGE_OBJECT(src_page) == src_page_object); |
764 | assert(src_page->vmp_busy); |
765 | assert(src_page_object->paging_in_progress > 0); |
766 | vm_object_lock(src_page_object); |
767 | |
768 | /* |
769 | * Cleanup the result of vm_fault_page() of the source page. |
770 | */ |
771 | PAGE_WAKEUP_DONE(src_page); |
772 | src_page = VM_PAGE_NULL; |
773 | vm_object_paging_end(src_page_object); |
774 | vm_object_unlock(src_page_object); |
775 | |
776 | if (top_page != VM_PAGE_NULL) { |
777 | assert(VM_PAGE_OBJECT(top_page) == src_top_object); |
778 | vm_object_lock(src_top_object); |
779 | VM_PAGE_FREE(top_page); |
780 | vm_object_paging_end(src_top_object); |
781 | vm_object_unlock(src_top_object); |
782 | } |
783 | } |
784 | |
785 | done: |
786 | if (upl != NULL) { |
787 | /* clean up the UPL */ |
788 | |
789 | /* |
790 | * The pages are currently dirty because we've just been |
791 | * writing on them, but as far as we're concerned, they're |
792 | * clean since they contain their "original" contents as |
793 | * provided by us, the pager. |
794 | * Tell the UPL to mark them "clean". |
795 | */ |
796 | upl_clear_dirty(upl, TRUE); |
797 | |
798 | /* abort or commit the UPL */ |
799 | if (retval != KERN_SUCCESS) { |
800 | upl_abort(upl_object: upl, abort_cond: 0); |
801 | } else { |
802 | boolean_t empty; |
803 | assertf(page_aligned(upl->u_offset) && page_aligned(upl->u_size), |
804 | "upl %p offset 0x%llx size 0x%x\n" , |
805 | upl, upl->u_offset, upl->u_size); |
806 | upl_commit_range(upl_object: upl, offset: 0, size: upl->u_size, |
807 | UPL_COMMIT_CS_VALIDATED | UPL_COMMIT_WRITTEN_BY_KERNEL, |
808 | page_list: upl_pl, page_listCnt: pl_count, empty: &empty); |
809 | } |
810 | |
811 | /* and deallocate the UPL */ |
812 | upl_deallocate(upl); |
813 | upl = NULL; |
814 | } |
815 | if (src_top_object != VM_OBJECT_NULL) { |
816 | vm_object_deallocate(object: src_top_object); |
817 | } |
818 | return retval; |
819 | } |
820 | |
821 | /* |
822 | * shared_region_pager_reference() |
823 | * |
824 | * Get a reference on this memory object. |
825 | * For external usage only. Assumes that the initial reference count is not 0, |
826 | * i.e one should not "revive" a dead pager this way. |
827 | */ |
828 | void |
829 | ( |
830 | memory_object_t mem_obj) |
831 | { |
832 | shared_region_pager_t ; |
833 | |
834 | pager = shared_region_pager_lookup(mem_obj); |
835 | |
836 | lck_mtx_lock(lck: &shared_region_pager_lock); |
837 | os_ref_retain_locked_raw(&pager->srp_ref_count, NULL); |
838 | lck_mtx_unlock(lck: &shared_region_pager_lock); |
839 | } |
840 | |
841 | |
842 | /* |
843 | * shared_region_pager_dequeue: |
844 | * |
845 | * Removes a pager from the list of pagers. |
846 | * |
847 | * The caller must hold "shared_region_pager_lock". |
848 | */ |
849 | void |
850 | ( |
851 | shared_region_pager_t ) |
852 | { |
853 | assert(!pager->srp_is_mapped); |
854 | |
855 | queue_remove(&shared_region_pager_queue, |
856 | pager, |
857 | shared_region_pager_t, |
858 | srp_queue); |
859 | pager->srp_queue.next = NULL; |
860 | pager->srp_queue.prev = NULL; |
861 | |
862 | shared_region_pager_count--; |
863 | } |
864 | |
865 | /* |
866 | * shared_region_pager_terminate_internal: |
867 | * |
868 | * Trigger the asynchronous termination of the memory object associated |
869 | * with this pager. |
870 | * When the memory object is terminated, there will be one more call |
871 | * to memory_object_deallocate() (i.e. shared_region_pager_deallocate()) |
872 | * to finish the clean up. |
873 | * |
874 | * "shared_region_pager_lock" should not be held by the caller. |
875 | * We don't need the lock because the pager has already been removed from |
876 | * the pagers' list and is now ours exclusively. |
877 | */ |
878 | void |
879 | ( |
880 | shared_region_pager_t ) |
881 | { |
882 | assert(pager->srp_is_ready); |
883 | assert(!pager->srp_is_mapped); |
884 | assert(os_ref_get_count_raw(&pager->srp_ref_count) == 1); |
885 | |
886 | if (pager->srp_backing_object != VM_OBJECT_NULL) { |
887 | vm_object_deallocate(object: pager->srp_backing_object); |
888 | pager->srp_backing_object = VM_OBJECT_NULL; |
889 | } |
890 | /* trigger the destruction of the memory object */ |
891 | memory_object_destroy(memory_control: pager->srp_header.mo_control, reason: VM_OBJECT_DESTROY_UNKNOWN_REASON); |
892 | } |
893 | |
894 | /* |
895 | * shared_region_pager_deallocate_internal() |
896 | * |
897 | * Release a reference on this pager and free it when the last reference goes away. |
898 | * Can be called with shared_region_pager_lock held or not, but always returns |
899 | * with it unlocked. |
900 | */ |
901 | void |
902 | ( |
903 | shared_region_pager_t , |
904 | boolean_t locked) |
905 | { |
906 | boolean_t needs_trimming; |
907 | int count_unmapped; |
908 | os_ref_count_t ref_count; |
909 | |
910 | if (!locked) { |
911 | lck_mtx_lock(lck: &shared_region_pager_lock); |
912 | } |
913 | |
914 | /* if we have too many unmapped pagers, trim some */ |
915 | count_unmapped = shared_region_pager_count - shared_region_pager_count_mapped; |
916 | needs_trimming = (count_unmapped > shared_region_pager_cache_limit); |
917 | |
918 | /* drop a reference on this pager */ |
919 | ref_count = os_ref_release_locked_raw(&pager->srp_ref_count, NULL); |
920 | |
921 | if (ref_count == 1) { |
922 | /* |
923 | * Only the "named" reference is left, which means that |
924 | * no one is really holding on to this pager anymore. |
925 | * Terminate it. |
926 | */ |
927 | shared_region_pager_dequeue(pager); |
928 | /* the pager is all ours: no need for the lock now */ |
929 | lck_mtx_unlock(lck: &shared_region_pager_lock); |
930 | shared_region_pager_terminate_internal(pager); |
931 | } else if (ref_count == 0) { |
932 | /* |
933 | * Dropped the existence reference; the memory object has |
934 | * been terminated. Do some final cleanup and release the |
935 | * pager structure. |
936 | */ |
937 | lck_mtx_unlock(lck: &shared_region_pager_lock); |
938 | |
939 | vm_shared_region_slide_info_t si = pager->srp_slide_info; |
940 | #if __has_feature(ptrauth_calls) |
941 | /* |
942 | * The slide_info for auth sections lives in the shared region. |
943 | * Just deallocate() on the shared region and clear the field. |
944 | */ |
945 | if (si != NULL) { |
946 | if (si->si_shared_region != NULL) { |
947 | assert(si->si_ptrauth); |
948 | vm_shared_region_deallocate(si->si_shared_region); |
949 | pager->srp_slide_info = NULL; |
950 | si = NULL; |
951 | } |
952 | } |
953 | #endif /* __has_feature(ptrauth_calls) */ |
954 | if (si != NULL) { |
955 | vm_object_deallocate(object: si->si_slide_object); |
956 | /* free the slide_info_entry */ |
957 | kfree_data(si->si_slide_info_entry, |
958 | si->si_slide_info_size); |
959 | kfree_type(struct vm_shared_region_slide_info, si); |
960 | pager->srp_slide_info = NULL; |
961 | } |
962 | |
963 | if (pager->srp_header.mo_control != MEMORY_OBJECT_CONTROL_NULL) { |
964 | memory_object_control_deallocate(control: pager->srp_header.mo_control); |
965 | pager->srp_header.mo_control = MEMORY_OBJECT_CONTROL_NULL; |
966 | } |
967 | kfree_type(struct shared_region_pager, pager); |
968 | pager = SHARED_REGION_PAGER_NULL; |
969 | } else { |
970 | /* there are still plenty of references: keep going... */ |
971 | lck_mtx_unlock(lck: &shared_region_pager_lock); |
972 | } |
973 | |
974 | if (needs_trimming) { |
975 | shared_region_pager_trim(); |
976 | } |
977 | /* caution: lock is not held on return... */ |
978 | } |
979 | |
980 | /* |
981 | * shared_region_pager_deallocate() |
982 | * |
983 | * Release a reference on this pager and free it when the last |
984 | * reference goes away. |
985 | */ |
986 | void |
987 | ( |
988 | memory_object_t mem_obj) |
989 | { |
990 | shared_region_pager_t ; |
991 | |
992 | PAGER_DEBUG(PAGER_ALL, ("shared_region_pager_deallocate: %p\n" , mem_obj)); |
993 | pager = shared_region_pager_lookup(mem_obj); |
994 | shared_region_pager_deallocate_internal(pager, FALSE); |
995 | } |
996 | |
997 | /* |
998 | * |
999 | */ |
1000 | kern_return_t |
1001 | ( |
1002 | #if !DEBUG |
1003 | __unused |
1004 | #endif |
1005 | memory_object_t mem_obj) |
1006 | { |
1007 | PAGER_DEBUG(PAGER_ALL, ("shared_region_pager_terminate: %p\n" , mem_obj)); |
1008 | |
1009 | return KERN_SUCCESS; |
1010 | } |
1011 | |
1012 | /* |
1013 | * shared_region_pager_map() |
1014 | * |
1015 | * This allows VM to let us, the EMM, know that this memory object |
1016 | * is currently mapped one or more times. This is called by VM each time |
1017 | * the memory object gets mapped, but we only take one extra reference the |
1018 | * first time it is called. |
1019 | */ |
1020 | kern_return_t |
1021 | ( |
1022 | memory_object_t mem_obj, |
1023 | __unused vm_prot_t prot) |
1024 | { |
1025 | shared_region_pager_t ; |
1026 | |
1027 | PAGER_DEBUG(PAGER_ALL, ("shared_region_pager_map: %p\n" , mem_obj)); |
1028 | |
1029 | pager = shared_region_pager_lookup(mem_obj); |
1030 | |
1031 | lck_mtx_lock(lck: &shared_region_pager_lock); |
1032 | assert(pager->srp_is_ready); |
1033 | assert(os_ref_get_count_raw(&pager->srp_ref_count) > 0); /* pager is alive */ |
1034 | if (!pager->srp_is_mapped) { |
1035 | pager->srp_is_mapped = TRUE; |
1036 | os_ref_retain_locked_raw(&pager->srp_ref_count, NULL); |
1037 | shared_region_pager_count_mapped++; |
1038 | } |
1039 | lck_mtx_unlock(lck: &shared_region_pager_lock); |
1040 | |
1041 | return KERN_SUCCESS; |
1042 | } |
1043 | |
1044 | /* |
1045 | * shared_region_pager_last_unmap() |
1046 | * |
1047 | * This is called by VM when this memory object is no longer mapped anywhere. |
1048 | */ |
1049 | kern_return_t |
1050 | ( |
1051 | memory_object_t mem_obj) |
1052 | { |
1053 | shared_region_pager_t ; |
1054 | int count_unmapped; |
1055 | |
1056 | PAGER_DEBUG(PAGER_ALL, |
1057 | ("shared_region_pager_last_unmap: %p\n" , mem_obj)); |
1058 | |
1059 | pager = shared_region_pager_lookup(mem_obj); |
1060 | |
1061 | lck_mtx_lock(lck: &shared_region_pager_lock); |
1062 | if (pager->srp_is_mapped) { |
1063 | /* |
1064 | * All the mappings are gone, so let go of the one extra |
1065 | * reference that represents all the mappings of this pager. |
1066 | */ |
1067 | shared_region_pager_count_mapped--; |
1068 | count_unmapped = (shared_region_pager_count - shared_region_pager_count_mapped); |
1069 | if (count_unmapped > shared_region_pager_count_unmapped_max) { |
1070 | shared_region_pager_count_unmapped_max = count_unmapped; |
1071 | } |
1072 | pager->srp_is_mapped = FALSE; |
1073 | shared_region_pager_deallocate_internal(pager, TRUE); |
1074 | /* caution: deallocate_internal() released the lock ! */ |
1075 | } else { |
1076 | lck_mtx_unlock(lck: &shared_region_pager_lock); |
1077 | } |
1078 | |
1079 | return KERN_SUCCESS; |
1080 | } |
1081 | |
1082 | boolean_t |
1083 | ( |
1084 | memory_object_t mem_obj, |
1085 | memory_object_offset_t offset, |
1086 | vm_object_t *backing_object, |
1087 | vm_object_offset_t *backing_offset) |
1088 | { |
1089 | shared_region_pager_t ; |
1090 | |
1091 | PAGER_DEBUG(PAGER_ALL, |
1092 | ("shared_region_pager_backing_object: %p\n" , mem_obj)); |
1093 | |
1094 | pager = shared_region_pager_lookup(mem_obj); |
1095 | |
1096 | *backing_object = pager->srp_backing_object; |
1097 | *backing_offset = pager->srp_backing_offset + offset; |
1098 | |
1099 | return TRUE; |
1100 | } |
1101 | |
1102 | |
1103 | /* |
1104 | * |
1105 | */ |
1106 | shared_region_pager_t |
1107 | ( |
1108 | memory_object_t mem_obj) |
1109 | { |
1110 | shared_region_pager_t ; |
1111 | |
1112 | assert(mem_obj->mo_pager_ops == &shared_region_pager_ops); |
1113 | pager = (shared_region_pager_t)(uintptr_t) mem_obj; |
1114 | assert(os_ref_get_count_raw(&pager->srp_ref_count) > 0); |
1115 | return pager; |
1116 | } |
1117 | |
1118 | /* |
1119 | * Create and return a pager for the given object with the |
1120 | * given slide information. |
1121 | */ |
1122 | static shared_region_pager_t |
1123 | ( |
1124 | vm_object_t backing_object, |
1125 | vm_object_offset_t backing_offset, |
1126 | struct vm_shared_region_slide_info *slide_info, |
1127 | #if !__has_feature(ptrauth_calls) |
1128 | __unused |
1129 | #endif /* !__has_feature(ptrauth_calls) */ |
1130 | uint64_t jop_key) |
1131 | { |
1132 | shared_region_pager_t ; |
1133 | memory_object_control_t control; |
1134 | kern_return_t kr; |
1135 | vm_object_t object; |
1136 | |
1137 | pager = kalloc_type(struct shared_region_pager, Z_WAITOK); |
1138 | if (pager == SHARED_REGION_PAGER_NULL) { |
1139 | return SHARED_REGION_PAGER_NULL; |
1140 | } |
1141 | |
1142 | /* |
1143 | * The vm_map call takes both named entry ports and raw memory |
1144 | * objects in the same parameter. We need to make sure that |
1145 | * vm_map does not see this object as a named entry port. So, |
1146 | * we reserve the first word in the object for a fake ip_kotype |
1147 | * setting - that will tell vm_map to use it as a memory object. |
1148 | */ |
1149 | pager->srp_header.mo_ikot = IKOT_MEMORY_OBJECT; |
1150 | pager->srp_header.mo_pager_ops = &shared_region_pager_ops; |
1151 | pager->srp_header.mo_control = MEMORY_OBJECT_CONTROL_NULL; |
1152 | |
1153 | pager->srp_is_ready = FALSE;/* not ready until it has a "name" */ |
1154 | /* existence reference (for the cache) + 1 for the caller */ |
1155 | os_ref_init_count_raw(&pager->srp_ref_count, NULL, 2); |
1156 | pager->srp_is_mapped = FALSE; |
1157 | pager->srp_backing_object = backing_object; |
1158 | pager->srp_backing_offset = backing_offset; |
1159 | pager->srp_slide_info = slide_info; |
1160 | #if __has_feature(ptrauth_calls) |
1161 | pager->srp_jop_key = jop_key; |
1162 | /* |
1163 | * If we're getting slide_info from the shared_region, |
1164 | * take a reference, so it can't disappear from under us. |
1165 | */ |
1166 | if (slide_info->si_shared_region) { |
1167 | assert(slide_info->si_ptrauth); |
1168 | vm_shared_region_reference(slide_info->si_shared_region); |
1169 | } |
1170 | #endif /* __has_feature(ptrauth_calls) */ |
1171 | |
1172 | vm_object_reference(backing_object); |
1173 | |
1174 | lck_mtx_lock(lck: &shared_region_pager_lock); |
1175 | /* enter new pager at the head of our list of pagers */ |
1176 | queue_enter_first(&shared_region_pager_queue, |
1177 | pager, |
1178 | shared_region_pager_t, |
1179 | srp_queue); |
1180 | shared_region_pager_count++; |
1181 | if (shared_region_pager_count > shared_region_pager_count_max) { |
1182 | shared_region_pager_count_max = shared_region_pager_count; |
1183 | } |
1184 | lck_mtx_unlock(lck: &shared_region_pager_lock); |
1185 | |
1186 | kr = memory_object_create_named(pager: (memory_object_t) pager, |
1187 | size: 0, |
1188 | control: &control); |
1189 | assert(kr == KERN_SUCCESS); |
1190 | |
1191 | memory_object_mark_trusted(control); |
1192 | |
1193 | lck_mtx_lock(lck: &shared_region_pager_lock); |
1194 | /* the new pager is now ready to be used */ |
1195 | pager->srp_is_ready = TRUE; |
1196 | object = memory_object_to_vm_object(mem_obj: (memory_object_t) pager); |
1197 | assert(object); |
1198 | /* |
1199 | * No one knows about this object and so we get away without the object lock. |
1200 | * This object is _eventually_ backed by the dyld shared cache and so we want |
1201 | * to benefit from the lock priority boosting. |
1202 | */ |
1203 | object->object_is_shared_cache = TRUE; |
1204 | lck_mtx_unlock(lck: &shared_region_pager_lock); |
1205 | |
1206 | /* wakeup anyone waiting for this pager to be ready */ |
1207 | thread_wakeup(&pager->srp_is_ready); |
1208 | |
1209 | return pager; |
1210 | } |
1211 | |
1212 | /* |
1213 | * shared_region_pager_setup() |
1214 | * |
1215 | * Provide the caller with a memory object backed by the provided |
1216 | * "backing_object" VM object. |
1217 | */ |
1218 | memory_object_t |
1219 | ( |
1220 | vm_object_t backing_object, |
1221 | vm_object_offset_t backing_offset, |
1222 | struct vm_shared_region_slide_info *slide_info, |
1223 | uint64_t jop_key) |
1224 | { |
1225 | shared_region_pager_t ; |
1226 | |
1227 | /* create new pager */ |
1228 | pager = shared_region_pager_create(backing_object, |
1229 | backing_offset, slide_info, jop_key); |
1230 | if (pager == SHARED_REGION_PAGER_NULL) { |
1231 | /* could not create a new pager */ |
1232 | return MEMORY_OBJECT_NULL; |
1233 | } |
1234 | |
1235 | lck_mtx_lock(lck: &shared_region_pager_lock); |
1236 | while (!pager->srp_is_ready) { |
1237 | lck_mtx_sleep(lck: &shared_region_pager_lock, |
1238 | lck_sleep_action: LCK_SLEEP_DEFAULT, |
1239 | event: &pager->srp_is_ready, |
1240 | THREAD_UNINT); |
1241 | } |
1242 | lck_mtx_unlock(lck: &shared_region_pager_lock); |
1243 | |
1244 | return (memory_object_t) pager; |
1245 | } |
1246 | |
1247 | #if __has_feature(ptrauth_calls) |
1248 | /* |
1249 | * shared_region_pager_match() |
1250 | * |
1251 | * Provide the caller with a memory object backed by the provided |
1252 | * "backing_object" VM object. |
1253 | */ |
1254 | memory_object_t |
1255 | shared_region_pager_match( |
1256 | vm_object_t backing_object, |
1257 | vm_object_offset_t backing_offset, |
1258 | vm_shared_region_slide_info_t slide_info, |
1259 | uint64_t jop_key) |
1260 | { |
1261 | shared_region_pager_t pager; |
1262 | vm_shared_region_slide_info_t si; |
1263 | |
1264 | lck_mtx_lock(&shared_region_pager_lock); |
1265 | queue_iterate(&shared_region_pager_queue, pager, shared_region_pager_t, srp_queue) { |
1266 | if (pager->srp_backing_object != backing_object->vo_copy) { |
1267 | continue; |
1268 | } |
1269 | if (pager->srp_backing_offset != backing_offset) { |
1270 | continue; |
1271 | } |
1272 | si = pager->srp_slide_info; |
1273 | |
1274 | /* If there's no AUTH section then it can't match (slide_info is always !NULL) */ |
1275 | if (!si->si_ptrauth) { |
1276 | continue; |
1277 | } |
1278 | if (pager->srp_jop_key != jop_key) { |
1279 | continue; |
1280 | } |
1281 | if (si->si_slide != slide_info->si_slide) { |
1282 | continue; |
1283 | } |
1284 | if (si->si_start != slide_info->si_start) { |
1285 | continue; |
1286 | } |
1287 | if (si->si_end != slide_info->si_end) { |
1288 | continue; |
1289 | } |
1290 | if (si->si_slide_object != slide_info->si_slide_object) { |
1291 | continue; |
1292 | } |
1293 | if (si->si_slide_info_size != slide_info->si_slide_info_size) { |
1294 | continue; |
1295 | } |
1296 | if (memcmp(si->si_slide_info_entry, slide_info->si_slide_info_entry, si->si_slide_info_size) != 0) { |
1297 | continue; |
1298 | } |
1299 | /* the caller expects a reference on this */ |
1300 | os_ref_retain_locked_raw(&pager->srp_ref_count, NULL); |
1301 | lck_mtx_unlock(&shared_region_pager_lock); |
1302 | return (memory_object_t)pager; |
1303 | } |
1304 | |
1305 | /* |
1306 | * We didn't find a pre-existing pager, so create one. |
1307 | * |
1308 | * Note slight race condition here since we drop the lock. This could lead to more than one |
1309 | * thread calling setup with the same arguments here. That shouldn't break anything, just |
1310 | * waste a little memory. |
1311 | */ |
1312 | lck_mtx_unlock(&shared_region_pager_lock); |
1313 | return shared_region_pager_setup(backing_object->vo_copy, backing_offset, slide_info, jop_key); |
1314 | } |
1315 | |
1316 | void |
1317 | shared_region_pager_match_task_key(memory_object_t memobj, __unused task_t task) |
1318 | { |
1319 | __unused shared_region_pager_t pager = (shared_region_pager_t)memobj; |
1320 | |
1321 | assert(pager->srp_jop_key == task->jop_pid); |
1322 | } |
1323 | #endif /* __has_feature(ptrauth_calls) */ |
1324 | |
1325 | void |
1326 | (void) |
1327 | { |
1328 | shared_region_pager_t , ; |
1329 | queue_head_t trim_queue; |
1330 | int num_trim; |
1331 | int count_unmapped; |
1332 | |
1333 | lck_mtx_lock(lck: &shared_region_pager_lock); |
1334 | |
1335 | /* |
1336 | * We have too many pagers, try and trim some unused ones, |
1337 | * starting with the oldest pager at the end of the queue. |
1338 | */ |
1339 | queue_init(&trim_queue); |
1340 | num_trim = 0; |
1341 | |
1342 | for (pager = (shared_region_pager_t)queue_last(&shared_region_pager_queue); |
1343 | !queue_end(&shared_region_pager_queue, (queue_entry_t) pager); |
1344 | pager = prev_pager) { |
1345 | /* get prev elt before we dequeue */ |
1346 | prev_pager = (shared_region_pager_t)queue_prev(&pager->srp_queue); |
1347 | |
1348 | if (os_ref_get_count_raw(rc: &pager->srp_ref_count) == 2 && |
1349 | pager->srp_is_ready && |
1350 | !pager->srp_is_mapped) { |
1351 | /* this pager can be trimmed */ |
1352 | num_trim++; |
1353 | /* remove this pager from the main list ... */ |
1354 | shared_region_pager_dequeue(pager); |
1355 | /* ... and add it to our trim queue */ |
1356 | queue_enter_first(&trim_queue, |
1357 | pager, |
1358 | shared_region_pager_t, |
1359 | srp_queue); |
1360 | |
1361 | /* do we have enough pagers to trim? */ |
1362 | count_unmapped = (shared_region_pager_count - shared_region_pager_count_mapped); |
1363 | if (count_unmapped <= shared_region_pager_cache_limit) { |
1364 | break; |
1365 | } |
1366 | } |
1367 | } |
1368 | if (num_trim > shared_region_pager_num_trim_max) { |
1369 | shared_region_pager_num_trim_max = num_trim; |
1370 | } |
1371 | shared_region_pager_num_trim_total += num_trim; |
1372 | |
1373 | lck_mtx_unlock(lck: &shared_region_pager_lock); |
1374 | |
1375 | /* terminate the trimmed pagers */ |
1376 | while (!queue_empty(&trim_queue)) { |
1377 | queue_remove_first(&trim_queue, |
1378 | pager, |
1379 | shared_region_pager_t, |
1380 | srp_queue); |
1381 | pager->srp_queue.next = NULL; |
1382 | pager->srp_queue.prev = NULL; |
1383 | assert(os_ref_get_count_raw(&pager->srp_ref_count) == 2); |
1384 | /* |
1385 | * We can't call deallocate_internal() because the pager |
1386 | * has already been dequeued, but we still need to remove |
1387 | * a reference. |
1388 | */ |
1389 | (void)os_ref_release_locked_raw(&pager->srp_ref_count, NULL); |
1390 | shared_region_pager_terminate_internal(pager); |
1391 | } |
1392 | } |
1393 | |
1394 | static uint64_t |
1395 | ( |
1396 | shared_region_pager_t ) |
1397 | { |
1398 | uint64_t pages_purged; |
1399 | vm_object_t object; |
1400 | |
1401 | pages_purged = 0; |
1402 | object = memory_object_to_vm_object(mem_obj: (memory_object_t) pager); |
1403 | assert(object != VM_OBJECT_NULL); |
1404 | vm_object_lock(object); |
1405 | pages_purged = object->resident_page_count; |
1406 | vm_object_reap_pages(object, REAP_DATA_FLUSH); |
1407 | pages_purged -= object->resident_page_count; |
1408 | // printf(" %s:%d pager %p object %p purged %llu left %d\n", __FUNCTION__, __LINE__, pager, object, pages_purged, object->resident_page_count); |
1409 | vm_object_unlock(object); |
1410 | return pages_purged; |
1411 | } |
1412 | |
1413 | uint64_t |
1414 | (void) |
1415 | { |
1416 | uint64_t pages_purged; |
1417 | shared_region_pager_t ; |
1418 | |
1419 | pages_purged = 0; |
1420 | lck_mtx_lock(lck: &shared_region_pager_lock); |
1421 | queue_iterate(&shared_region_pager_queue, pager, shared_region_pager_t, srp_queue) { |
1422 | pages_purged += shared_region_pager_purge(pager); |
1423 | } |
1424 | lck_mtx_unlock(lck: &shared_region_pager_lock); |
1425 | #if DEVELOPMENT || DEBUG |
1426 | printf(" %s:%d pages purged: %llu\n" , __FUNCTION__, __LINE__, pages_purged); |
1427 | #endif /* DEVELOPMENT || DEBUG */ |
1428 | return pages_purged; |
1429 | } |
1430 | |