| 1 | /* |
| 2 | * Copyright (c) 2021 Apple Inc. All rights reserved. |
| 3 | * |
| 4 | * @APPLE_OSREFERENCE_LICENSE_HEADER_START@ |
| 5 | * |
| 6 | * This file contains Original Code and/or Modifications of Original Code |
| 7 | * as defined in and that are subject to the Apple Public Source License |
| 8 | * Version 2.0 (the 'License'). You may not use this file except in |
| 9 | * compliance with the License. The rights granted to you under the License |
| 10 | * may not be used to create, or enable the creation or redistribution of, |
| 11 | * unlawful or unlicensed copies of an Apple operating system, or to |
| 12 | * circumvent, violate, or enable the circumvention or violation of, any |
| 13 | * terms of an Apple operating system software license agreement. |
| 14 | * |
| 15 | * Please obtain a copy of the License at |
| 16 | * http://www.opensource.apple.com/apsl/ and read it before using this file. |
| 17 | * |
| 18 | * The Original Code and all software distributed under the License are |
| 19 | * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER |
| 20 | * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES, |
| 21 | * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY, |
| 22 | * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT. |
| 23 | * Please see the License for the specific language governing rights and |
| 24 | * limitations under the License. |
| 25 | * |
| 26 | * @APPLE_OSREFERENCE_LICENSE_HEADER_END@ |
| 27 | */ |
| 28 | |
| 29 | #include <sys/errno.h> |
| 30 | |
| 31 | #include <mach/mach_types.h> |
| 32 | #include <mach/mach_traps.h> |
| 33 | #include <mach/host_priv.h> |
| 34 | #include <mach/kern_return.h> |
| 35 | #include <mach/memory_object_control.h> |
| 36 | #include <mach/memory_object_types.h> |
| 37 | #include <mach/port.h> |
| 38 | #include <mach/policy.h> |
| 39 | #include <mach/upl.h> |
| 40 | #include <mach/thread_act.h> |
| 41 | #include <mach/mach_vm.h> |
| 42 | |
| 43 | #include <kern/host.h> |
| 44 | #include <kern/kalloc.h> |
| 45 | #include <kern/thread.h> |
| 46 | #include <kern/ipc_kobject.h> |
| 47 | |
| 48 | #include <ipc/ipc_port.h> |
| 49 | #include <ipc/ipc_space.h> |
| 50 | |
| 51 | #include <vm/memory_object.h> |
| 52 | #include <vm/vm_kern.h> |
| 53 | #include <vm/vm_fault.h> |
| 54 | #include <vm/vm_map.h> |
| 55 | #include <vm/vm_pageout.h> |
| 56 | #include <vm/vm_protos.h> |
| 57 | #include <vm/vm_dyld_pager.h> |
| 58 | |
| 59 | #include <sys/kdebug_triage.h> |
| 60 | #include <mach-o/fixup-chains.h> |
| 61 | #if defined(HAS_APPLE_PAC) |
| 62 | #include <ptrauth.h> |
| 63 | #include <arm/misc_protos.h> |
| 64 | #endif /* defined(HAS_APPLE_PAC) */ |
| 65 | |
| 66 | /* |
| 67 | * DYLD page in linking pager. |
| 68 | * |
| 69 | * This external memory manager (EMM) applies dyld fixup to data |
| 70 | * pages, allowing the modified page to appear "clean". |
| 71 | * |
| 72 | * The modified pages will never be dirtied, so the memory manager doesn't |
| 73 | * need to handle page-out requests (from memory_object_data_return()). The |
| 74 | * pages are mapped copy-on-write, so that the originals stay clean. |
| 75 | */ |
| 76 | |
| 77 | /* forward declarations */ |
| 78 | typedef struct dyld_pager *; |
| 79 | static void dyld_pager_reference(memory_object_t mem_obj); |
| 80 | static void dyld_pager_deallocate(memory_object_t mem_obj); |
| 81 | static void dyld_pager_deallocate_internal(dyld_pager_t , bool locked); |
| 82 | static kern_return_t dyld_pager_init(memory_object_t mem_obj, |
| 83 | memory_object_control_t control, |
| 84 | memory_object_cluster_size_t pg_size); |
| 85 | static kern_return_t dyld_pager_terminate(memory_object_t mem_obj); |
| 86 | static void dyld_pager_terminate_internal(dyld_pager_t ); |
| 87 | static kern_return_t dyld_pager_data_request(memory_object_t mem_obj, |
| 88 | memory_object_offset_t offset, |
| 89 | memory_object_cluster_size_t length, |
| 90 | vm_prot_t protection_required, |
| 91 | memory_object_fault_info_t fault_info); |
| 92 | static kern_return_t dyld_pager_data_return(memory_object_t mem_obj, |
| 93 | memory_object_offset_t offset, |
| 94 | memory_object_cluster_size_t data_cnt, |
| 95 | memory_object_offset_t *resid_offset, |
| 96 | int *io_error, |
| 97 | boolean_t dirty, |
| 98 | boolean_t kernel_copy, |
| 99 | int upl_flags); |
| 100 | static kern_return_t dyld_pager_data_initialize(memory_object_t mem_obj, |
| 101 | memory_object_offset_t offset, |
| 102 | memory_object_cluster_size_t data_cnt); |
| 103 | static kern_return_t dyld_pager_map(memory_object_t mem_obj, |
| 104 | vm_prot_t prot); |
| 105 | static kern_return_t dyld_pager_last_unmap(memory_object_t mem_obj); |
| 106 | static boolean_t dyld_pager_backing_object( |
| 107 | memory_object_t mem_obj, |
| 108 | memory_object_offset_t mem_obj_offset, |
| 109 | vm_object_t *backing_object, |
| 110 | vm_object_offset_t *backing_offset); |
| 111 | static dyld_pager_t dyld_pager_lookup(memory_object_t mem_obj); |
| 112 | |
| 113 | /* |
| 114 | * Vector of VM operations for this EMM. |
| 115 | * These routines are invoked by VM via the memory_object_*() interfaces. |
| 116 | */ |
| 117 | const struct memory_object_pager_ops = { |
| 118 | .memory_object_reference = dyld_pager_reference, |
| 119 | .memory_object_deallocate = dyld_pager_deallocate, |
| 120 | .memory_object_init = dyld_pager_init, |
| 121 | .memory_object_terminate = dyld_pager_terminate, |
| 122 | .memory_object_data_request = dyld_pager_data_request, |
| 123 | .memory_object_data_return = dyld_pager_data_return, |
| 124 | .memory_object_data_initialize = dyld_pager_data_initialize, |
| 125 | .memory_object_map = dyld_pager_map, |
| 126 | .memory_object_last_unmap = dyld_pager_last_unmap, |
| 127 | .memory_object_backing_object = dyld_pager_backing_object, |
| 128 | .memory_object_pager_name = "dyld" |
| 129 | }; |
| 130 | |
| 131 | /* |
| 132 | * The "dyld_pager" structure. We create one of these for each use of |
| 133 | * map_with_linking_np() that dyld uses. |
| 134 | */ |
| 135 | struct { |
| 136 | struct memory_object ; /* mandatory generic header */ |
| 137 | |
| 138 | #if MEMORY_OBJECT_HAS_REFCOUNT |
| 139 | #define dyld_ref_count dyld_header.mo_ref |
| 140 | #else |
| 141 | os_ref_atomic_t dyld_ref_count; /* active uses */ |
| 142 | #endif |
| 143 | queue_chain_t ; /* next & prev pagers */ |
| 144 | bool ; /* has active mappings */ |
| 145 | bool ; /* is this pager ready? */ |
| 146 | vm_object_t ; /* VM object for shared cache */ |
| 147 | void *; |
| 148 | uint32_t ; |
| 149 | uint32_t ; |
| 150 | memory_object_offset_t [MWL_MAX_REGION_COUNT]; |
| 151 | mach_vm_address_t [MWL_MAX_REGION_COUNT]; |
| 152 | mach_vm_size_t [MWL_MAX_REGION_COUNT]; |
| 153 | #if defined(HAS_APPLE_PAC) |
| 154 | uint64_t ; |
| 155 | #endif /* defined(HAS_APPLE_PAC) */ |
| 156 | }; |
| 157 | |
| 158 | queue_head_t = QUEUE_HEAD_INITIALIZER(dyld_pager_queue); |
| 159 | |
| 160 | /* |
| 161 | * "dyld_pager_lock" for counters, ref counting, etc. |
| 162 | */ |
| 163 | LCK_GRP_DECLARE(, "dyld_pager" ); |
| 164 | LCK_MTX_DECLARE(, &dyld_pager_lck_grp); |
| 165 | |
| 166 | /* |
| 167 | * Statistics & counters. |
| 168 | */ |
| 169 | uint32_t = 0; |
| 170 | uint32_t = 0; |
| 171 | |
| 172 | /* |
| 173 | * dyld_pager_dequeue() |
| 174 | * |
| 175 | * Removes a pager from the list of pagers. |
| 176 | * |
| 177 | * The caller must hold "dyld_pager". |
| 178 | */ |
| 179 | static void |
| 180 | ( |
| 181 | __unused dyld_pager_t ) |
| 182 | { |
| 183 | queue_remove(&dyld_pager_queue, |
| 184 | pager, |
| 185 | dyld_pager_t, |
| 186 | dyld_pager_queue); |
| 187 | pager->dyld_pager_queue.next = NULL; |
| 188 | pager->dyld_pager_queue.prev = NULL; |
| 189 | dyld_pager_count--; |
| 190 | } |
| 191 | |
| 192 | /* |
| 193 | * dyld_pager_init() |
| 194 | * |
| 195 | * Initialize the memory object and makes it ready to be used and mapped. |
| 196 | */ |
| 197 | static kern_return_t |
| 198 | ( |
| 199 | memory_object_t mem_obj, |
| 200 | memory_object_control_t control, |
| 201 | __unused |
| 202 | memory_object_cluster_size_t pg_size) |
| 203 | { |
| 204 | dyld_pager_t ; |
| 205 | kern_return_t kr; |
| 206 | memory_object_attr_info_data_t attributes; |
| 207 | |
| 208 | if (control == MEMORY_OBJECT_CONTROL_NULL) { |
| 209 | printf(format: "%s(): control NULL\n" , __func__); |
| 210 | return KERN_INVALID_ARGUMENT; |
| 211 | } |
| 212 | |
| 213 | pager = dyld_pager_lookup(mem_obj); |
| 214 | |
| 215 | memory_object_control_reference(control); |
| 216 | |
| 217 | pager->dyld_header.mo_control = control; |
| 218 | |
| 219 | attributes.copy_strategy = MEMORY_OBJECT_COPY_DELAY; |
| 220 | attributes.cluster_size = (1 << (PAGE_SHIFT)); |
| 221 | attributes.may_cache_object = FALSE; |
| 222 | attributes.temporary = TRUE; |
| 223 | |
| 224 | kr = memory_object_change_attributes( |
| 225 | memory_control: control, |
| 226 | MEMORY_OBJECT_ATTRIBUTE_INFO, |
| 227 | attributes: (memory_object_info_t) &attributes, |
| 228 | MEMORY_OBJECT_ATTR_INFO_COUNT); |
| 229 | if (kr != KERN_SUCCESS) { |
| 230 | panic("dyld_pager_init: " "memory_object_change_attributes() failed" ); |
| 231 | } |
| 232 | |
| 233 | return KERN_SUCCESS; |
| 234 | } |
| 235 | |
| 236 | /* |
| 237 | * dyld_data_return() |
| 238 | * |
| 239 | * A page-out request from VM -- should never happen so panic. |
| 240 | */ |
| 241 | static kern_return_t |
| 242 | ( |
| 243 | __unused memory_object_t mem_obj, |
| 244 | __unused memory_object_offset_t offset, |
| 245 | __unused memory_object_cluster_size_t data_cnt, |
| 246 | __unused memory_object_offset_t *resid_offset, |
| 247 | __unused int *io_error, |
| 248 | __unused boolean_t dirty, |
| 249 | __unused boolean_t kernel_copy, |
| 250 | __unused int upl_flags) |
| 251 | { |
| 252 | panic("dyld_pager_data_return: should never happen!" ); |
| 253 | return KERN_FAILURE; |
| 254 | } |
| 255 | |
| 256 | static kern_return_t |
| 257 | ( |
| 258 | __unused memory_object_t mem_obj, |
| 259 | __unused memory_object_offset_t offset, |
| 260 | __unused memory_object_cluster_size_t data_cnt) |
| 261 | { |
| 262 | panic("dyld_pager_data_initialize: should never happen" ); |
| 263 | return KERN_FAILURE; |
| 264 | } |
| 265 | |
| 266 | |
| 267 | /* |
| 268 | * Apply fixups to a page used by a 64 bit process. |
| 269 | */ |
| 270 | static kern_return_t |
| 271 | fixupPage64( |
| 272 | uint64_t userVA, |
| 273 | vm_offset_t contents, |
| 274 | vm_offset_t end_contents, |
| 275 | void *link_info, |
| 276 | struct dyld_chained_starts_in_segment *segInfo, |
| 277 | uint32_t pageIndex, |
| 278 | bool offsetBased) |
| 279 | { |
| 280 | struct mwl_info_hdr *hdr = (struct mwl_info_hdr *)link_info; |
| 281 | uint64_t *bindsArray = (uint64_t *)((uintptr_t)hdr + hdr->mwli_binds_offset); |
| 282 | uint16_t firstStartOffset = segInfo->page_start[pageIndex]; |
| 283 | |
| 284 | /* |
| 285 | * Done if no fixups on the page |
| 286 | */ |
| 287 | if (firstStartOffset == DYLD_CHAINED_PTR_START_NONE) { |
| 288 | return KERN_SUCCESS; |
| 289 | } |
| 290 | |
| 291 | /* |
| 292 | * walk the chain |
| 293 | */ |
| 294 | uint64_t *chain = (uint64_t *)(contents + firstStartOffset); |
| 295 | uint64_t targetAdjust = (offsetBased ? hdr->mwli_image_address : hdr->mwli_slide); |
| 296 | uint64_t delta = 0; |
| 297 | do { |
| 298 | if ((uintptr_t)chain < contents || (uintptr_t)chain + sizeof(*chain) > end_contents) { |
| 299 | ktriage_record(thread_id: thread_tid(thread: current_thread()), KDBG_TRIAGE_EVENTID(KDBG_TRIAGE_SUBSYS_DYLD_PAGER, KDBG_TRIAGE_RESERVED, KDBG_TRIAGE_DYLD_PAGER_CHAIN_OUT_OF_RANGE), arg: (uintptr_t)userVA); |
| 300 | printf(format: "%s(): chain 0x%llx out of range 0x%llx..0x%llx" , __func__, |
| 301 | (long long)chain, (long long)contents, (long long)end_contents); |
| 302 | return KERN_FAILURE; |
| 303 | } |
| 304 | uint64_t value = *chain; |
| 305 | bool isBind = (value & 0x8000000000000000ULL); |
| 306 | delta = (value >> 51) & 0xFFF; |
| 307 | if (isBind) { |
| 308 | uint32_t bindOrdinal = value & 0x00FFFFFF; |
| 309 | if (bindOrdinal >= hdr->mwli_binds_count) { |
| 310 | ktriage_record(thread_id: thread_tid(thread: current_thread()), KDBG_TRIAGE_EVENTID(KDBG_TRIAGE_SUBSYS_DYLD_PAGER, KDBG_TRIAGE_RESERVED, KDBG_TRIAGE_DYLD_PAGER_BIND_ORDINAL), arg: (uintptr_t)userVA); |
| 311 | printf(format: "%s out of range bind ordinal %u (max %u)\n" , __func__, |
| 312 | bindOrdinal, hdr->mwli_binds_count); |
| 313 | return KERN_FAILURE; |
| 314 | } |
| 315 | uint32_t addend = (value >> 24) & 0xFF; |
| 316 | *chain = bindsArray[bindOrdinal] + addend; |
| 317 | } else { |
| 318 | /* is rebase */ |
| 319 | uint64_t target = value & 0xFFFFFFFFFULL; |
| 320 | uint64_t high8 = (value >> 36) & 0xFF; |
| 321 | *chain = target + targetAdjust + (high8 << 56); |
| 322 | } |
| 323 | if (delta * 4 >= PAGE_SIZE) { |
| 324 | ktriage_record(thread_id: thread_tid(thread: current_thread()), KDBG_TRIAGE_EVENTID(KDBG_TRIAGE_SUBSYS_DYLD_PAGER, KDBG_TRIAGE_RESERVED, KDBG_TRIAGE_DYLD_PAGER_DELTA_TOO_LARGE), arg: (uintptr_t)userVA); |
| 325 | printf(format: "%s(): delta offset > page size %lld\n" , __func__, delta * 4); |
| 326 | return KERN_FAILURE; |
| 327 | } |
| 328 | chain = (uint64_t *)((uintptr_t)chain + (delta * 4)); // 4-byte stride |
| 329 | } while (delta != 0); |
| 330 | return KERN_SUCCESS; |
| 331 | } |
| 332 | |
| 333 | |
| 334 | /* |
| 335 | * Apply fixups within a page used by a 32 bit process. |
| 336 | */ |
| 337 | static kern_return_t |
| 338 | fixupChain32( |
| 339 | uint64_t userVA, |
| 340 | uint32_t *chain, |
| 341 | vm_offset_t contents, |
| 342 | vm_offset_t end_contents, |
| 343 | void *link_info, |
| 344 | struct dyld_chained_starts_in_segment *segInfo, |
| 345 | uint32_t *bindsArray) |
| 346 | { |
| 347 | struct mwl_info_hdr *hdr = (struct mwl_info_hdr *)link_info; |
| 348 | uint32_t delta = 0; |
| 349 | |
| 350 | do { |
| 351 | if ((uintptr_t)chain < contents || (uintptr_t)chain + sizeof(*chain) > end_contents) { |
| 352 | ktriage_record(thread_id: thread_tid(thread: current_thread()), KDBG_TRIAGE_EVENTID(KDBG_TRIAGE_SUBSYS_DYLD_PAGER, KDBG_TRIAGE_RESERVED, KDBG_TRIAGE_DYLD_PAGER_CHAIN_OUT_OF_RANGE), arg: (uintptr_t)userVA); |
| 353 | printf(format: "%s(): chain 0x%llx out of range 0x%llx..0x%llx" , __func__, |
| 354 | (long long)chain, (long long)contents, (long long)end_contents); |
| 355 | return KERN_FAILURE; |
| 356 | } |
| 357 | uint32_t value = *chain; |
| 358 | delta = (value >> 26) & 0x1F; |
| 359 | if (value & 0x80000000) { |
| 360 | // is bind |
| 361 | uint32_t bindOrdinal = value & 0x000FFFFF; |
| 362 | if (bindOrdinal >= hdr->mwli_binds_count) { |
| 363 | ktriage_record(thread_id: thread_tid(thread: current_thread()), KDBG_TRIAGE_EVENTID(KDBG_TRIAGE_SUBSYS_DYLD_PAGER, KDBG_TRIAGE_RESERVED, KDBG_TRIAGE_DYLD_PAGER_BIND_ORDINAL), arg: (uintptr_t)userVA); |
| 364 | printf(format: "%s(): out of range bind ordinal %u (max %u)" , |
| 365 | __func__, bindOrdinal, hdr->mwli_binds_count); |
| 366 | return KERN_FAILURE; |
| 367 | } |
| 368 | uint32_t addend = (value >> 20) & 0x3F; |
| 369 | *chain = bindsArray[bindOrdinal] + addend; |
| 370 | } else { |
| 371 | // is rebase |
| 372 | uint32_t target = value & 0x03FFFFFF; |
| 373 | if (target > segInfo->max_valid_pointer) { |
| 374 | // handle non-pointers in chain |
| 375 | uint32_t bias = (0x04000000 + segInfo->max_valid_pointer) / 2; |
| 376 | *chain = target - bias; |
| 377 | } else { |
| 378 | *chain = target + (uint32_t)hdr->mwli_slide; |
| 379 | } |
| 380 | } |
| 381 | chain += delta; |
| 382 | } while (delta != 0); |
| 383 | return KERN_SUCCESS; |
| 384 | } |
| 385 | |
| 386 | |
| 387 | /* |
| 388 | * Apply fixups to a page used by a 32 bit process. |
| 389 | */ |
| 390 | static kern_return_t |
| 391 | fixupPage32( |
| 392 | uint64_t userVA, |
| 393 | vm_offset_t contents, |
| 394 | vm_offset_t end_contents, |
| 395 | void *link_info, |
| 396 | uint32_t link_info_size, |
| 397 | struct dyld_chained_starts_in_segment *segInfo, |
| 398 | uint32_t pageIndex) |
| 399 | { |
| 400 | struct mwl_info_hdr *hdr = (struct mwl_info_hdr *)link_info; |
| 401 | uint32_t *bindsArray = (uint32_t *)((uintptr_t)hdr + hdr->mwli_binds_offset); |
| 402 | uint16_t startOffset = segInfo->page_start[pageIndex]; |
| 403 | |
| 404 | /* |
| 405 | * done if no fixups |
| 406 | */ |
| 407 | if (startOffset == DYLD_CHAINED_PTR_START_NONE) { |
| 408 | return KERN_SUCCESS; |
| 409 | } |
| 410 | |
| 411 | if (startOffset & DYLD_CHAINED_PTR_START_MULTI) { |
| 412 | // some fixups in the page are too far apart, so page has multiple starts |
| 413 | uint32_t overflowIndex = startOffset & ~DYLD_CHAINED_PTR_START_MULTI; |
| 414 | bool chainEnd = false; |
| 415 | while (!chainEnd) { |
| 416 | /* |
| 417 | * range check against link_info, note +1 to include data we'll dereference |
| 418 | */ |
| 419 | if ((uintptr_t)&segInfo->page_start[overflowIndex + 1] > (uintptr_t)link_info + link_info_size) { |
| 420 | ktriage_record(thread_id: thread_tid(thread: current_thread()), KDBG_TRIAGE_EVENTID(KDBG_TRIAGE_SUBSYS_DYLD_PAGER, KDBG_TRIAGE_RESERVED, KDBG_TRIAGE_DYLD_PAGER_PAGE_START_OUT_OF_RANGE), arg: (uintptr_t)userVA); |
| 421 | printf(format: "%s(): out of range segInfo->page_start[overflowIndex]" , __func__); |
| 422 | return KERN_FAILURE; |
| 423 | } |
| 424 | chainEnd = (segInfo->page_start[overflowIndex] & DYLD_CHAINED_PTR_START_LAST); |
| 425 | startOffset = (segInfo->page_start[overflowIndex] & ~DYLD_CHAINED_PTR_START_LAST); |
| 426 | uint32_t *chain = (uint32_t *)(contents + startOffset); |
| 427 | fixupChain32(userVA, chain, contents, end_contents, link_info, segInfo, bindsArray); |
| 428 | ++overflowIndex; |
| 429 | } |
| 430 | } else { |
| 431 | uint32_t *chain = (uint32_t *)(contents + startOffset); |
| 432 | fixupChain32(userVA, chain, contents, end_contents, link_info, segInfo, bindsArray); |
| 433 | } |
| 434 | return KERN_SUCCESS; |
| 435 | } |
| 436 | |
| 437 | #if defined(HAS_APPLE_PAC) |
| 438 | /* |
| 439 | * Sign a pointer needed for fixups. |
| 440 | */ |
| 441 | static kern_return_t |
| 442 | signPointer( |
| 443 | uint64_t unsignedAddr, |
| 444 | void *loc, |
| 445 | bool addrDiv, |
| 446 | uint16_t diversity, |
| 447 | ptrauth_key key, |
| 448 | dyld_pager_t , |
| 449 | uint64_t *signedAddr) |
| 450 | { |
| 451 | // don't sign NULL |
| 452 | if (unsignedAddr == 0) { |
| 453 | *signedAddr = 0; |
| 454 | return KERN_SUCCESS; |
| 455 | } |
| 456 | |
| 457 | uint64_t extendedDiscriminator = diversity; |
| 458 | if (addrDiv) { |
| 459 | extendedDiscriminator = __builtin_ptrauth_blend_discriminator(loc, extendedDiscriminator); |
| 460 | } |
| 461 | |
| 462 | switch (key) { |
| 463 | case ptrauth_key_asia: |
| 464 | case ptrauth_key_asda: |
| 465 | if (pager->dyld_a_key == 0 || arm_user_jop_disabled()) { |
| 466 | *signedAddr = unsignedAddr; |
| 467 | } else { |
| 468 | *signedAddr = (uintptr_t)pmap_sign_user_ptr(value: (void *)unsignedAddr, key, data: extendedDiscriminator, jop_key: pager->dyld_a_key); |
| 469 | } |
| 470 | break; |
| 471 | |
| 472 | default: |
| 473 | ktriage_record(thread_id: thread_tid(thread: current_thread()), KDBG_TRIAGE_EVENTID(KDBG_TRIAGE_SUBSYS_DYLD_PAGER, KDBG_TRIAGE_RESERVED, KDBG_TRIAGE_DYLD_PAGER_INVALID_AUTH_KEY), arg: (uintptr_t)unsignedAddr); |
| 474 | printf(format: "%s(): Invalid ptr auth key %d\n" , __func__, key); |
| 475 | return KERN_FAILURE; |
| 476 | } |
| 477 | return KERN_SUCCESS; |
| 478 | } |
| 479 | |
| 480 | /* |
| 481 | * Apply fixups to a page used by a 64 bit process using pointer authentication. |
| 482 | */ |
| 483 | static kern_return_t |
| 484 | fixupPageAuth64( |
| 485 | uint64_t userVA, |
| 486 | vm_offset_t contents, |
| 487 | vm_offset_t end_contents, |
| 488 | dyld_pager_t , |
| 489 | struct dyld_chained_starts_in_segment *segInfo, |
| 490 | uint32_t pageIndex, |
| 491 | bool offsetBased) |
| 492 | { |
| 493 | void *link_info = pager->dyld_link_info; |
| 494 | uint32_t link_info_size = pager->dyld_link_info_size; |
| 495 | struct mwl_info_hdr *hdr = (struct mwl_info_hdr *)link_info; |
| 496 | uint64_t *bindsArray = (uint64_t*)((uintptr_t)link_info + hdr->mwli_binds_offset); |
| 497 | |
| 498 | /* |
| 499 | * range check against link_info, note +1 to include data we'll dereference |
| 500 | */ |
| 501 | if ((uintptr_t)&segInfo->page_start[pageIndex + 1] > (uintptr_t)link_info + link_info_size) { |
| 502 | ktriage_record(thread_id: thread_tid(thread: current_thread()), KDBG_TRIAGE_EVENTID(KDBG_TRIAGE_SUBSYS_DYLD_PAGER, KDBG_TRIAGE_RESERVED, KDBG_TRIAGE_DYLD_PAGER_PAGE_START_OUT_OF_RANGE), arg: (uintptr_t)userVA); |
| 503 | printf(format: "%s(): out of range segInfo->page_start[pageIndex]" , __func__); |
| 504 | return KERN_FAILURE; |
| 505 | } |
| 506 | uint16_t firstStartOffset = segInfo->page_start[pageIndex]; |
| 507 | |
| 508 | /* |
| 509 | * All done if no fixups on the page |
| 510 | */ |
| 511 | if (firstStartOffset == DYLD_CHAINED_PTR_START_NONE) { |
| 512 | return KERN_SUCCESS; |
| 513 | } |
| 514 | |
| 515 | /* |
| 516 | * Walk the chain of offsets to fix up |
| 517 | */ |
| 518 | uint64_t *chain = (uint64_t *)(contents + firstStartOffset); |
| 519 | uint64_t targetAdjust = (offsetBased ? hdr->mwli_image_address : hdr->mwli_slide); |
| 520 | uint64_t delta = 0; |
| 521 | do { |
| 522 | if ((uintptr_t)chain < contents || (uintptr_t)chain + sizeof(*chain) > end_contents) { |
| 523 | ktriage_record(thread_id: thread_tid(thread: current_thread()), KDBG_TRIAGE_EVENTID(KDBG_TRIAGE_SUBSYS_DYLD_PAGER, KDBG_TRIAGE_RESERVED, KDBG_TRIAGE_DYLD_PAGER_CHAIN_OUT_OF_RANGE), arg: (uintptr_t)userVA); |
| 524 | printf(format: "%s(): chain 0x%llx out of range 0x%llx..0x%llx" , __func__, |
| 525 | (long long)chain, (long long)contents, (long long)end_contents); |
| 526 | return KERN_FAILURE; |
| 527 | } |
| 528 | uint64_t value = *chain; |
| 529 | delta = (value >> 51) & 0x7FF; |
| 530 | bool isAuth = (value & 0x8000000000000000ULL); |
| 531 | bool isBind = (value & 0x4000000000000000ULL); |
| 532 | if (isAuth) { |
| 533 | ptrauth_key key = (ptrauth_key)((value >> 49) & 0x3); |
| 534 | bool addrDiv = ((value & (1ULL << 48)) != 0); |
| 535 | uint16_t diversity = (uint16_t)((value >> 32) & 0xFFFF); |
| 536 | uintptr_t uVA = userVA + ((uintptr_t)chain - contents); |
| 537 | if (isBind) { |
| 538 | uint32_t bindOrdinal = value & 0x00FFFFFF; |
| 539 | if (bindOrdinal >= hdr->mwli_binds_count) { |
| 540 | ktriage_record(thread_id: thread_tid(thread: current_thread()), KDBG_TRIAGE_EVENTID(KDBG_TRIAGE_SUBSYS_DYLD_PAGER, KDBG_TRIAGE_RESERVED, KDBG_TRIAGE_DYLD_PAGER_BIND_ORDINAL), arg: (uintptr_t)userVA); |
| 541 | printf(format: "%s(): out of range bind ordinal %u (max %u)" , |
| 542 | __func__, bindOrdinal, hdr->mwli_binds_count); |
| 543 | return KERN_FAILURE; |
| 544 | } |
| 545 | if (signPointer(unsignedAddr: bindsArray[bindOrdinal], loc: (void *)uVA, addrDiv, diversity, key, pager, signedAddr: chain) != KERN_SUCCESS) { |
| 546 | return KERN_FAILURE; |
| 547 | } |
| 548 | } else { |
| 549 | /* note: in auth rebases only have 32-bits, so target is always offset - never vmaddr */ |
| 550 | uint64_t target = (value & 0xFFFFFFFF) + hdr->mwli_image_address; |
| 551 | if (signPointer(unsignedAddr: target, loc: (void *)uVA, addrDiv, diversity, key, pager, signedAddr: chain) != KERN_SUCCESS) { |
| 552 | return KERN_FAILURE; |
| 553 | } |
| 554 | } |
| 555 | } else { |
| 556 | if (isBind) { |
| 557 | uint32_t bindOrdinal = value & 0x00FFFFFF; |
| 558 | if (bindOrdinal >= hdr->mwli_binds_count) { |
| 559 | ktriage_record(thread_id: thread_tid(thread: current_thread()), KDBG_TRIAGE_EVENTID(KDBG_TRIAGE_SUBSYS_DYLD_PAGER, KDBG_TRIAGE_RESERVED, KDBG_TRIAGE_DYLD_PAGER_BIND_ORDINAL), arg: (uintptr_t)userVA); |
| 560 | printf(format: "%s(): out of range bind ordinal %u (max %u)" , |
| 561 | __func__, bindOrdinal, hdr->mwli_binds_count); |
| 562 | return KERN_FAILURE; |
| 563 | } else { |
| 564 | uint64_t addend19 = (value >> 32) & 0x0007FFFF; |
| 565 | if (addend19 & 0x40000) { |
| 566 | addend19 |= 0xFFFFFFFFFFFC0000ULL; |
| 567 | } |
| 568 | *chain = bindsArray[bindOrdinal] + addend19; |
| 569 | } |
| 570 | } else { |
| 571 | uint64_t target = (value & 0x7FFFFFFFFFFULL); |
| 572 | uint64_t high8 = (value << 13) & 0xFF00000000000000ULL; |
| 573 | *chain = target + targetAdjust + high8; |
| 574 | } |
| 575 | } |
| 576 | chain += delta; |
| 577 | } while (delta != 0); |
| 578 | return KERN_SUCCESS; |
| 579 | } |
| 580 | |
| 581 | /* |
| 582 | * Apply fixups to a page used by a 64 bit process using pointer authentication. |
| 583 | */ |
| 584 | static kern_return_t |
| 585 | fixupCachePageAuth64( |
| 586 | uint64_t userVA, |
| 587 | vm_offset_t contents, |
| 588 | vm_offset_t end_contents, |
| 589 | dyld_pager_t , |
| 590 | struct dyld_chained_starts_in_segment *segInfo, |
| 591 | uint32_t pageIndex) |
| 592 | { |
| 593 | void *link_info = pager->dyld_link_info; |
| 594 | uint32_t link_info_size = pager->dyld_link_info_size; |
| 595 | struct mwl_info_hdr *hdr = (struct mwl_info_hdr *)link_info; |
| 596 | |
| 597 | /* |
| 598 | * range check against link_info, note +1 to include data we'll dereference |
| 599 | */ |
| 600 | if ((uintptr_t)&segInfo->page_start[pageIndex + 1] > (uintptr_t)link_info + link_info_size) { |
| 601 | ktriage_record(thread_id: thread_tid(thread: current_thread()), KDBG_TRIAGE_EVENTID(KDBG_TRIAGE_SUBSYS_DYLD_PAGER, KDBG_TRIAGE_RESERVED, KDBG_TRIAGE_DYLD_PAGER_PAGE_START_OUT_OF_RANGE), arg: (uintptr_t)userVA); |
| 602 | printf(format: "%s(): out of range segInfo->page_start[pageIndex]" , __func__); |
| 603 | return KERN_FAILURE; |
| 604 | } |
| 605 | uint16_t firstStartOffset = segInfo->page_start[pageIndex]; |
| 606 | |
| 607 | /* |
| 608 | * All done if no fixups on the page |
| 609 | */ |
| 610 | if (firstStartOffset == DYLD_CHAINED_PTR_START_NONE) { |
| 611 | return KERN_SUCCESS; |
| 612 | } |
| 613 | |
| 614 | /* |
| 615 | * Walk the chain of offsets to fix up |
| 616 | */ |
| 617 | uint64_t *chain = (uint64_t *)(contents + firstStartOffset); |
| 618 | uint64_t delta = 0; |
| 619 | do { |
| 620 | if ((uintptr_t)chain < contents || (uintptr_t)chain + sizeof(*chain) > end_contents) { |
| 621 | ktriage_record(thread_id: thread_tid(thread: current_thread()), KDBG_TRIAGE_EVENTID(KDBG_TRIAGE_SUBSYS_DYLD_PAGER, KDBG_TRIAGE_RESERVED, KDBG_TRIAGE_DYLD_PAGER_CHAIN_OUT_OF_RANGE), arg: (uintptr_t)userVA); |
| 622 | printf(format: "%s(): chain 0x%llx out of range 0x%llx..0x%llx" , __func__, |
| 623 | (long long)chain, (long long)contents, (long long)end_contents); |
| 624 | return KERN_FAILURE; |
| 625 | } |
| 626 | uint64_t value = *chain; |
| 627 | delta = (value >> 52) & 0x7FF; |
| 628 | bool isAuth = (value & 0x8000000000000000ULL); |
| 629 | if (isAuth) { |
| 630 | bool addrDiv = ((value & (1ULL << 50)) != 0); |
| 631 | bool keyIsData = ((value & (1ULL << 51)) != 0); |
| 632 | // the key is always A, and the bit tells us if its IA or ID |
| 633 | ptrauth_key key = keyIsData ? ptrauth_key_asda : ptrauth_key_asia; |
| 634 | uint16_t diversity = (uint16_t)((value >> 34) & 0xFFFF); |
| 635 | uintptr_t uVA = userVA + ((uintptr_t)chain - contents); |
| 636 | // target is always a 34-bit runtime offset, never a vmaddr |
| 637 | uint64_t target = (value & 0x3FFFFFFFFULL) + hdr->mwli_image_address; |
| 638 | if (signPointer(unsignedAddr: target, loc: (void *)uVA, addrDiv, diversity, key, pager, signedAddr: chain) != KERN_SUCCESS) { |
| 639 | return KERN_FAILURE; |
| 640 | } |
| 641 | } else { |
| 642 | // target is always a 34-bit runtime offset, never a vmaddr |
| 643 | uint64_t target = (value & 0x3FFFFFFFFULL) + hdr->mwli_image_address; |
| 644 | uint64_t high8 = (value << 22) & 0xFF00000000000000ULL; |
| 645 | *chain = target + high8; |
| 646 | } |
| 647 | chain += delta; |
| 648 | } while (delta != 0); |
| 649 | return KERN_SUCCESS; |
| 650 | } |
| 651 | #endif /* defined(HAS_APPLE_PAC) */ |
| 652 | |
| 653 | |
| 654 | /* |
| 655 | * Handle dyld fixups for a page. |
| 656 | */ |
| 657 | static kern_return_t |
| 658 | fixup_page( |
| 659 | vm_offset_t contents, |
| 660 | vm_offset_t end_contents, |
| 661 | uint64_t userVA, |
| 662 | dyld_pager_t ) |
| 663 | { |
| 664 | void *link_info = pager->dyld_link_info; |
| 665 | uint32_t link_info_size = pager->dyld_link_info_size; |
| 666 | struct mwl_info_hdr *hdr = (struct mwl_info_hdr *)link_info; |
| 667 | struct dyld_chained_starts_in_segment *segInfo = NULL; |
| 668 | uint32_t pageIndex = 0; |
| 669 | uint32_t segIndex; |
| 670 | struct dyld_chained_starts_in_image *startsInfo; |
| 671 | struct dyld_chained_starts_in_segment *seg; |
| 672 | uint64_t segStartAddress; |
| 673 | uint64_t segEndAddress; |
| 674 | |
| 675 | /* |
| 676 | * Note this is a linear search done for every page we have to fix up. |
| 677 | * However, it should be quick as there should only be 2 or 4 segments: |
| 678 | * - data |
| 679 | * - data const |
| 680 | * - data auth (for arm64e) |
| 681 | * - data const auth (for arm64e) |
| 682 | */ |
| 683 | startsInfo = (struct dyld_chained_starts_in_image *)((uintptr_t)hdr + hdr->mwli_chains_offset); |
| 684 | for (segIndex = 0; segIndex < startsInfo->seg_count; ++segIndex) { |
| 685 | seg = (struct dyld_chained_starts_in_segment *) |
| 686 | ((uintptr_t)startsInfo + startsInfo->seg_info_offset[segIndex]); |
| 687 | |
| 688 | /* |
| 689 | * ensure we don't go out of bounds of the link_info |
| 690 | */ |
| 691 | if ((uintptr_t)seg + sizeof(*seg) > (uintptr_t)link_info + link_info_size) { |
| 692 | ktriage_record(thread_id: thread_tid(thread: current_thread()), KDBG_TRIAGE_EVENTID(KDBG_TRIAGE_SUBSYS_DYLD_PAGER, KDBG_TRIAGE_RESERVED, KDBG_TRIAGE_DYLD_PAGER_SEG_INFO_OUT_OF_RANGE), arg: (uintptr_t)userVA); |
| 693 | printf(format: "%s(): seg_info out of bounds\n" , __func__); |
| 694 | return KERN_FAILURE; |
| 695 | } |
| 696 | |
| 697 | segStartAddress = hdr->mwli_image_address + seg->segment_offset; |
| 698 | segEndAddress = segStartAddress + seg->page_count * seg->page_size; |
| 699 | if (segStartAddress <= userVA && userVA < segEndAddress) { |
| 700 | segInfo = seg; |
| 701 | pageIndex = (uint32_t)(userVA - segStartAddress) / PAGE_SIZE; |
| 702 | |
| 703 | /* ensure seg->size fits in link_info_size */ |
| 704 | if ((uintptr_t)seg + seg->size > (uintptr_t)link_info + link_info_size) { |
| 705 | ktriage_record(thread_id: thread_tid(thread: current_thread()), KDBG_TRIAGE_EVENTID(KDBG_TRIAGE_SUBSYS_DYLD_PAGER, KDBG_TRIAGE_RESERVED, KDBG_TRIAGE_DYLD_PAGER_SEG_SIZE_OUT_OF_RANGE), arg: (uintptr_t)userVA); |
| 706 | printf(format: "%s(): seg->size out of bounds\n" , __func__); |
| 707 | return KERN_FAILURE; |
| 708 | } |
| 709 | if (seg->size < sizeof(struct dyld_chained_starts_in_segment)) { |
| 710 | ktriage_record(thread_id: thread_tid(thread: current_thread()), KDBG_TRIAGE_EVENTID(KDBG_TRIAGE_SUBSYS_DYLD_PAGER, KDBG_TRIAGE_RESERVED, KDBG_TRIAGE_DYLD_PAGER_SEG_SIZE_OUT_OF_RANGE), arg: (uintptr_t)userVA); |
| 711 | printf(format: "%s(): seg->size too small\n" , __func__); |
| 712 | return KERN_FAILURE; |
| 713 | } |
| 714 | /* ensure page_count and pageIndex are valid too */ |
| 715 | if ((uintptr_t)&seg->page_start[seg->page_count] > (uintptr_t)link_info + link_info_size) { |
| 716 | ktriage_record(thread_id: thread_tid(thread: current_thread()), KDBG_TRIAGE_EVENTID(KDBG_TRIAGE_SUBSYS_DYLD_PAGER, KDBG_TRIAGE_RESERVED, KDBG_TRIAGE_DYLD_PAGER_SEG_PAGE_CNT_OUT_OF_RANGE), arg: (uintptr_t)userVA); |
| 717 | printf(format: "%s(): seg->page_count out of bounds\n" , __func__); |
| 718 | return KERN_FAILURE; |
| 719 | } |
| 720 | if (pageIndex >= seg->page_count) { |
| 721 | ktriage_record(thread_id: thread_tid(thread: current_thread()), KDBG_TRIAGE_EVENTID(KDBG_TRIAGE_SUBSYS_DYLD_PAGER, KDBG_TRIAGE_RESERVED, KDBG_TRIAGE_DYLD_PAGER_SEG_PAGE_CNT_OUT_OF_RANGE), arg: (uintptr_t)userVA); |
| 722 | printf(format: "%s(): seg->page_count too small\n" , __func__); |
| 723 | return KERN_FAILURE; |
| 724 | } |
| 725 | |
| 726 | break; |
| 727 | } |
| 728 | } |
| 729 | |
| 730 | /* |
| 731 | * Question for Nick.. or can we make this OK and just return KERN_SUCCESS, nothing to do? |
| 732 | */ |
| 733 | if (segInfo == NULL) { |
| 734 | ktriage_record(thread_id: thread_tid(thread: current_thread()), KDBG_TRIAGE_EVENTID(KDBG_TRIAGE_SUBSYS_DYLD_PAGER, KDBG_TRIAGE_RESERVED, KDBG_TRIAGE_DYLD_PAGER_NO_SEG_FOR_VA), arg: (uintptr_t)userVA); |
| 735 | printf(format: "%s(): No segment for user VA 0x%llx\n" , __func__, (long long)userVA); |
| 736 | return KERN_FAILURE; |
| 737 | } |
| 738 | |
| 739 | /* |
| 740 | * Route to the appropriate fixup routine |
| 741 | */ |
| 742 | switch (hdr->mwli_pointer_format) { |
| 743 | #if defined(HAS_APPLE_PAC) |
| 744 | case DYLD_CHAINED_PTR_ARM64E: |
| 745 | fixupPageAuth64(userVA, contents, end_contents, pager, segInfo, pageIndex, false); |
| 746 | break; |
| 747 | case DYLD_CHAINED_PTR_ARM64E_USERLAND: |
| 748 | case DYLD_CHAINED_PTR_ARM64E_USERLAND24: |
| 749 | fixupPageAuth64(userVA, contents, end_contents, pager, segInfo, pageIndex, true); |
| 750 | break; |
| 751 | case DYLD_CHAINED_PTR_ARM64E_SHARED_CACHE: |
| 752 | fixupCachePageAuth64(userVA, contents, end_contents, pager, segInfo, pageIndex); |
| 753 | break; |
| 754 | #endif /* defined(HAS_APPLE_PAC) */ |
| 755 | case DYLD_CHAINED_PTR_64: |
| 756 | fixupPage64(userVA, contents, end_contents, link_info, segInfo, pageIndex, false); |
| 757 | break; |
| 758 | case DYLD_CHAINED_PTR_64_OFFSET: |
| 759 | fixupPage64(userVA, contents, end_contents, link_info, segInfo, pageIndex, true); |
| 760 | break; |
| 761 | case DYLD_CHAINED_PTR_32: |
| 762 | fixupPage32(userVA, contents, end_contents, link_info, link_info_size, segInfo, pageIndex); |
| 763 | break; |
| 764 | default: |
| 765 | ktriage_record(thread_id: thread_tid(thread: current_thread()), KDBG_TRIAGE_EVENTID(KDBG_TRIAGE_SUBSYS_DYLD_PAGER, KDBG_TRIAGE_RESERVED, KDBG_TRIAGE_DYLD_PAGER_BAD_POINTER_FMT), arg: (uintptr_t)userVA); |
| 766 | printf(format: "%s(): unknown pointer_format %d\n" , __func__, hdr->mwli_pointer_format); |
| 767 | return KERN_FAILURE; |
| 768 | } |
| 769 | return KERN_SUCCESS; |
| 770 | } |
| 771 | |
| 772 | /* |
| 773 | * dyld_pager_data_request() |
| 774 | * |
| 775 | * Handles page-in requests from VM. |
| 776 | */ |
| 777 | static kern_return_t |
| 778 | ( |
| 779 | memory_object_t mem_obj, |
| 780 | memory_object_offset_t offset, |
| 781 | memory_object_cluster_size_t length, |
| 782 | __unused vm_prot_t protection_required, |
| 783 | memory_object_fault_info_t mo_fault_info) |
| 784 | { |
| 785 | dyld_pager_t ; |
| 786 | memory_object_control_t mo_control; |
| 787 | upl_t upl = NULL; |
| 788 | int upl_flags; |
| 789 | upl_size_t upl_size; |
| 790 | upl_page_info_t *upl_pl = NULL; |
| 791 | unsigned int pl_count; |
| 792 | vm_object_t src_top_object = VM_OBJECT_NULL; |
| 793 | vm_object_t src_page_object = VM_OBJECT_NULL; |
| 794 | vm_object_t dst_object; |
| 795 | kern_return_t kr; |
| 796 | kern_return_t retval = KERN_SUCCESS; |
| 797 | vm_offset_t src_vaddr; |
| 798 | vm_offset_t dst_vaddr; |
| 799 | vm_offset_t cur_offset; |
| 800 | kern_return_t error_code; |
| 801 | vm_prot_t prot; |
| 802 | vm_page_t src_page, top_page; |
| 803 | int interruptible; |
| 804 | struct vm_object_fault_info fault_info = *((struct vm_object_fault_info *)(uintptr_t)mo_fault_info); |
| 805 | struct mwl_info_hdr *hdr; |
| 806 | uint32_t r; |
| 807 | uint64_t userVA; |
| 808 | |
| 809 | fault_info.stealth = TRUE; |
| 810 | fault_info.io_sync = FALSE; |
| 811 | fault_info.mark_zf_absent = FALSE; |
| 812 | fault_info.batch_pmap_op = FALSE; |
| 813 | interruptible = fault_info.interruptible; |
| 814 | |
| 815 | pager = dyld_pager_lookup(mem_obj); |
| 816 | assert(pager->dyld_is_ready); |
| 817 | assert(os_ref_get_count_raw(&pager->dyld_ref_count) > 1); /* pager is alive */ |
| 818 | assert(pager->dyld_is_mapped); /* pager is mapped */ |
| 819 | hdr = (struct mwl_info_hdr *)pager->dyld_link_info; |
| 820 | |
| 821 | /* |
| 822 | * Gather in a UPL all the VM pages requested by VM. |
| 823 | */ |
| 824 | mo_control = pager->dyld_header.mo_control; |
| 825 | |
| 826 | upl_size = length; |
| 827 | upl_flags = |
| 828 | UPL_RET_ONLY_ABSENT | |
| 829 | UPL_SET_LITE | |
| 830 | UPL_NO_SYNC | |
| 831 | UPL_CLEAN_IN_PLACE | /* triggers UPL_CLEAR_DIRTY */ |
| 832 | UPL_SET_INTERNAL; |
| 833 | pl_count = 0; |
| 834 | kr = memory_object_upl_request(memory_control: mo_control, |
| 835 | offset, size: upl_size, |
| 836 | upl: &upl, NULL, NULL, cntrl_flags: upl_flags, VM_KERN_MEMORY_SECURITY); |
| 837 | if (kr != KERN_SUCCESS) { |
| 838 | ktriage_record(thread_id: thread_tid(thread: current_thread()), KDBG_TRIAGE_EVENTID(KDBG_TRIAGE_SUBSYS_DYLD_PAGER, KDBG_TRIAGE_RESERVED, KDBG_TRIAGE_DYLD_PAGER_NO_UPL), arg: 0 /* arg */); |
| 839 | retval = kr; |
| 840 | goto done; |
| 841 | } |
| 842 | dst_object = memory_object_control_to_vm_object(control: mo_control); |
| 843 | assert(dst_object != VM_OBJECT_NULL); |
| 844 | |
| 845 | /* |
| 846 | * We'll map the original data in the kernel address space from the |
| 847 | * backing VM object, itself backed by the executable/library file via |
| 848 | * the vnode pager. |
| 849 | */ |
| 850 | src_top_object = pager->dyld_backing_object; |
| 851 | assert(src_top_object != VM_OBJECT_NULL); |
| 852 | vm_object_reference(src_top_object); /* keep the source object alive */ |
| 853 | |
| 854 | /* |
| 855 | * Fill in the contents of the pages requested by VM. |
| 856 | */ |
| 857 | upl_pl = UPL_GET_INTERNAL_PAGE_LIST(upl); |
| 858 | pl_count = length / PAGE_SIZE; |
| 859 | for (cur_offset = 0; |
| 860 | retval == KERN_SUCCESS && cur_offset < length; |
| 861 | cur_offset += PAGE_SIZE) { |
| 862 | ppnum_t dst_pnum; |
| 863 | |
| 864 | if (!upl_page_present(upl: upl_pl, index: (int)(cur_offset / PAGE_SIZE))) { |
| 865 | /* this page is not in the UPL: skip it */ |
| 866 | continue; |
| 867 | } |
| 868 | |
| 869 | /* |
| 870 | * Map the source page in the kernel's virtual address space. |
| 871 | * We already hold a reference on the src_top_object. |
| 872 | */ |
| 873 | retry_src_fault: |
| 874 | vm_object_lock(src_top_object); |
| 875 | vm_object_paging_begin(src_top_object); |
| 876 | error_code = 0; |
| 877 | prot = VM_PROT_READ; |
| 878 | src_page = VM_PAGE_NULL; |
| 879 | kr = vm_fault_page(first_object: src_top_object, |
| 880 | first_offset: offset + cur_offset, |
| 881 | VM_PROT_READ, |
| 882 | FALSE, |
| 883 | FALSE, /* src_page not looked up */ |
| 884 | protection: &prot, |
| 885 | result_page: &src_page, |
| 886 | top_page: &top_page, |
| 887 | NULL, |
| 888 | error_code: &error_code, |
| 889 | FALSE, |
| 890 | fault_info: &fault_info); |
| 891 | switch (kr) { |
| 892 | case VM_FAULT_SUCCESS: |
| 893 | break; |
| 894 | case VM_FAULT_RETRY: |
| 895 | goto retry_src_fault; |
| 896 | case VM_FAULT_MEMORY_SHORTAGE: |
| 897 | if (vm_page_wait(interruptible)) { |
| 898 | goto retry_src_fault; |
| 899 | } |
| 900 | ktriage_record(thread_id: thread_tid(thread: current_thread()), KDBG_TRIAGE_EVENTID(KDBG_TRIAGE_SUBSYS_DYLD_PAGER, KDBG_TRIAGE_RESERVED, KDBG_TRIAGE_DYLD_PAGER_MEMORY_SHORTAGE), arg: 0 /* arg */); |
| 901 | OS_FALLTHROUGH; |
| 902 | case VM_FAULT_INTERRUPTED: |
| 903 | retval = MACH_SEND_INTERRUPTED; |
| 904 | goto done; |
| 905 | case VM_FAULT_SUCCESS_NO_VM_PAGE: |
| 906 | /* success but no VM page: fail */ |
| 907 | vm_object_paging_end(src_top_object); |
| 908 | vm_object_unlock(src_top_object); |
| 909 | OS_FALLTHROUGH; |
| 910 | case VM_FAULT_MEMORY_ERROR: |
| 911 | /* the page is not there ! */ |
| 912 | if (error_code) { |
| 913 | retval = error_code; |
| 914 | } else { |
| 915 | retval = KERN_MEMORY_ERROR; |
| 916 | } |
| 917 | goto done; |
| 918 | default: |
| 919 | panic("dyld_pager_data_request: vm_fault_page() unexpected error 0x%x\n" , kr); |
| 920 | } |
| 921 | assert(src_page != VM_PAGE_NULL); |
| 922 | assert(src_page->vmp_busy); |
| 923 | |
| 924 | if (src_page->vmp_q_state != VM_PAGE_ON_SPECULATIVE_Q) { |
| 925 | vm_page_lockspin_queues(); |
| 926 | if (src_page->vmp_q_state != VM_PAGE_ON_SPECULATIVE_Q) { |
| 927 | vm_page_speculate(page: src_page, FALSE); |
| 928 | } |
| 929 | vm_page_unlock_queues(); |
| 930 | } |
| 931 | |
| 932 | /* |
| 933 | * Establish pointers to the source and destination physical pages. |
| 934 | */ |
| 935 | dst_pnum = (ppnum_t)upl_phys_page(upl: upl_pl, index: (int)(cur_offset / PAGE_SIZE)); |
| 936 | assert(dst_pnum != 0); |
| 937 | |
| 938 | src_vaddr = (vm_map_offset_t)phystokv(pa: (pmap_paddr_t)VM_PAGE_GET_PHYS_PAGE(m: src_page) << PAGE_SHIFT); |
| 939 | dst_vaddr = (vm_map_offset_t)phystokv(pa: (pmap_paddr_t)dst_pnum << PAGE_SHIFT); |
| 940 | src_page_object = VM_PAGE_OBJECT(src_page); |
| 941 | |
| 942 | /* |
| 943 | * Validate the original page... |
| 944 | */ |
| 945 | if (src_page_object->code_signed) { |
| 946 | vm_page_validate_cs_mapped(page: src_page, PAGE_SIZE, fault_phys_offset: 0, kaddr: (const void *)src_vaddr); |
| 947 | } |
| 948 | |
| 949 | /* |
| 950 | * ... and transfer the results to the destination page. |
| 951 | */ |
| 952 | UPL_SET_CS_VALIDATED(upl_pl, cur_offset / PAGE_SIZE, src_page->vmp_cs_validated); |
| 953 | UPL_SET_CS_TAINTED(upl_pl, cur_offset / PAGE_SIZE, src_page->vmp_cs_tainted); |
| 954 | UPL_SET_CS_NX(upl_pl, cur_offset / PAGE_SIZE, src_page->vmp_cs_nx); |
| 955 | |
| 956 | /* |
| 957 | * The page provider might access a mapped file, so let's |
| 958 | * release the object lock for the source page to avoid a |
| 959 | * potential deadlock. |
| 960 | * The source page is kept busy and we have a |
| 961 | * "paging_in_progress" reference on its object, so it's safe |
| 962 | * to unlock the object here. |
| 963 | */ |
| 964 | assert(src_page->vmp_busy); |
| 965 | assert(src_page_object->paging_in_progress > 0); |
| 966 | vm_object_unlock(src_page_object); |
| 967 | |
| 968 | /* |
| 969 | * Process the original contents of the source page |
| 970 | * into the destination page. |
| 971 | */ |
| 972 | bcopy(src: (const char *)src_vaddr, dst: (char *)dst_vaddr, PAGE_SIZE); |
| 973 | |
| 974 | /* |
| 975 | * Figure out what the original user virtual address was, based on the offset. |
| 976 | */ |
| 977 | userVA = 0; |
| 978 | for (r = 0; r < pager->dyld_num_range; ++r) { |
| 979 | vm_offset_t o = offset + cur_offset; |
| 980 | if (pager->dyld_file_offset[r] <= o && |
| 981 | o < pager->dyld_file_offset[r] + pager->dyld_size[r]) { |
| 982 | userVA = pager->dyld_address[r] + (o - pager->dyld_file_offset[r]); |
| 983 | break; |
| 984 | } |
| 985 | } |
| 986 | |
| 987 | /* |
| 988 | * If we have a valid range fixup the page. |
| 989 | */ |
| 990 | if (r == pager->dyld_num_range) { |
| 991 | ktriage_record(thread_id: thread_tid(thread: current_thread()), KDBG_TRIAGE_EVENTID(KDBG_TRIAGE_SUBSYS_DYLD_PAGER, KDBG_TRIAGE_RESERVED, KDBG_TRIAGE_DYLD_PAGER_RANGE_NOT_FOUND), arg: (uintptr_t)userVA); |
| 992 | printf(format: "%s(): Range not found for offset 0x%llx\n" , __func__, (long long)cur_offset); |
| 993 | retval = KERN_FAILURE; |
| 994 | } else if (fixup_page(contents: dst_vaddr, end_contents: dst_vaddr + PAGE_SIZE, userVA, pager) != KERN_SUCCESS) { |
| 995 | /* KDBG / printf was done under fixup_page() */ |
| 996 | retval = KERN_FAILURE; |
| 997 | } |
| 998 | if (retval != KERN_SUCCESS) { |
| 999 | ktriage_record(thread_id: thread_tid(thread: current_thread()), KDBG_TRIAGE_EVENTID(KDBG_TRIAGE_SUBSYS_DYLD_PAGER, KDBG_TRIAGE_RESERVED, KDBG_TRIAGE_DYLD_PAGER_SLIDE_ERROR), arg: userVA); |
| 1000 | } |
| 1001 | |
| 1002 | assert(VM_PAGE_OBJECT(src_page) == src_page_object); |
| 1003 | assert(src_page->vmp_busy); |
| 1004 | assert(src_page_object->paging_in_progress > 0); |
| 1005 | vm_object_lock(src_page_object); |
| 1006 | |
| 1007 | /* |
| 1008 | * Cleanup the result of vm_fault_page() of the source page. |
| 1009 | */ |
| 1010 | PAGE_WAKEUP_DONE(src_page); |
| 1011 | src_page = VM_PAGE_NULL; |
| 1012 | vm_object_paging_end(src_page_object); |
| 1013 | vm_object_unlock(src_page_object); |
| 1014 | |
| 1015 | if (top_page != VM_PAGE_NULL) { |
| 1016 | assert(VM_PAGE_OBJECT(top_page) == src_top_object); |
| 1017 | vm_object_lock(src_top_object); |
| 1018 | VM_PAGE_FREE(top_page); |
| 1019 | vm_object_paging_end(src_top_object); |
| 1020 | vm_object_unlock(src_top_object); |
| 1021 | } |
| 1022 | } |
| 1023 | |
| 1024 | done: |
| 1025 | if (upl != NULL) { |
| 1026 | /* clean up the UPL */ |
| 1027 | |
| 1028 | /* |
| 1029 | * The pages are currently dirty because we've just been |
| 1030 | * writing on them, but as far as we're concerned, they're |
| 1031 | * clean since they contain their "original" contents as |
| 1032 | * provided by us, the pager. |
| 1033 | * Tell the UPL to mark them "clean". |
| 1034 | */ |
| 1035 | upl_clear_dirty(upl, TRUE); |
| 1036 | |
| 1037 | /* abort or commit the UPL */ |
| 1038 | if (retval != KERN_SUCCESS) { |
| 1039 | upl_abort(upl_object: upl, abort_cond: 0); |
| 1040 | } else { |
| 1041 | boolean_t empty; |
| 1042 | assertf(page_aligned(upl->u_offset) && page_aligned(upl->u_size), |
| 1043 | "upl %p offset 0x%llx size 0x%x\n" , |
| 1044 | upl, upl->u_offset, upl->u_size); |
| 1045 | upl_commit_range(upl_object: upl, offset: 0, size: upl->u_size, |
| 1046 | UPL_COMMIT_CS_VALIDATED | UPL_COMMIT_WRITTEN_BY_KERNEL, |
| 1047 | page_list: upl_pl, page_listCnt: pl_count, empty: &empty); |
| 1048 | } |
| 1049 | |
| 1050 | /* and deallocate the UPL */ |
| 1051 | upl_deallocate(upl); |
| 1052 | upl = NULL; |
| 1053 | } |
| 1054 | if (src_top_object != VM_OBJECT_NULL) { |
| 1055 | vm_object_deallocate(object: src_top_object); |
| 1056 | } |
| 1057 | return retval; |
| 1058 | } |
| 1059 | |
| 1060 | /* |
| 1061 | * dyld_pager_reference() |
| 1062 | * |
| 1063 | * Get a reference on this memory object. |
| 1064 | * For external usage only. Assumes that the initial reference count is not 0, |
| 1065 | * i.e one should not "revive" a dead pager this way. |
| 1066 | */ |
| 1067 | static void |
| 1068 | ( |
| 1069 | memory_object_t mem_obj) |
| 1070 | { |
| 1071 | dyld_pager_t ; |
| 1072 | |
| 1073 | pager = dyld_pager_lookup(mem_obj); |
| 1074 | |
| 1075 | lck_mtx_lock(lck: &dyld_pager_lock); |
| 1076 | os_ref_retain_locked_raw(&pager->dyld_ref_count, NULL); |
| 1077 | lck_mtx_unlock(lck: &dyld_pager_lock); |
| 1078 | } |
| 1079 | |
| 1080 | |
| 1081 | |
| 1082 | /* |
| 1083 | * dyld_pager_terminate_internal: |
| 1084 | * |
| 1085 | * Trigger the asynchronous termination of the memory object associated |
| 1086 | * with this pager. |
| 1087 | * When the memory object is terminated, there will be one more call |
| 1088 | * to memory_object_deallocate() (i.e. dyld_pager_deallocate()) |
| 1089 | * to finish the clean up. |
| 1090 | * |
| 1091 | * "dyld_pager_lock" should not be held by the caller. |
| 1092 | */ |
| 1093 | static void |
| 1094 | ( |
| 1095 | dyld_pager_t ) |
| 1096 | { |
| 1097 | assert(pager->dyld_is_ready); |
| 1098 | assert(!pager->dyld_is_mapped); |
| 1099 | assert(os_ref_get_count_raw(&pager->dyld_ref_count) == 1); |
| 1100 | |
| 1101 | if (pager->dyld_backing_object != VM_OBJECT_NULL) { |
| 1102 | vm_object_deallocate(object: pager->dyld_backing_object); |
| 1103 | pager->dyld_backing_object = VM_OBJECT_NULL; |
| 1104 | } |
| 1105 | /* trigger the destruction of the memory object */ |
| 1106 | memory_object_destroy(memory_control: pager->dyld_header.mo_control, reason: VM_OBJECT_DESTROY_UNKNOWN_REASON); |
| 1107 | } |
| 1108 | |
| 1109 | /* |
| 1110 | * dyld_pager_deallocate_internal() |
| 1111 | * |
| 1112 | * Release a reference on this pager and free it when the last reference goes away. |
| 1113 | * Can be called with dyld_pager_lock held or not, but always returns |
| 1114 | * with it unlocked. |
| 1115 | */ |
| 1116 | static void |
| 1117 | ( |
| 1118 | dyld_pager_t , |
| 1119 | bool locked) |
| 1120 | { |
| 1121 | os_ref_count_t ref_count; |
| 1122 | |
| 1123 | if (!locked) { |
| 1124 | lck_mtx_lock(lck: &dyld_pager_lock); |
| 1125 | } |
| 1126 | |
| 1127 | /* drop a reference on this pager */ |
| 1128 | ref_count = os_ref_release_locked_raw(&pager->dyld_ref_count, NULL); |
| 1129 | |
| 1130 | if (ref_count == 1) { |
| 1131 | /* |
| 1132 | * Only this reference is left, which means that |
| 1133 | * no one is really holding on to this pager anymore. |
| 1134 | * Terminate it. |
| 1135 | */ |
| 1136 | dyld_pager_dequeue(pager); |
| 1137 | /* the pager is all ours: no need for the lock now */ |
| 1138 | lck_mtx_unlock(lck: &dyld_pager_lock); |
| 1139 | dyld_pager_terminate_internal(pager); |
| 1140 | } else if (ref_count == 0) { |
| 1141 | /* |
| 1142 | * Dropped all references; the memory object has |
| 1143 | * been terminated. Do some final cleanup and release the |
| 1144 | * pager structure. |
| 1145 | */ |
| 1146 | lck_mtx_unlock(lck: &dyld_pager_lock); |
| 1147 | |
| 1148 | kfree_data(pager->dyld_link_info, pager->dyld_link_info_size); |
| 1149 | pager->dyld_link_info = NULL; |
| 1150 | |
| 1151 | if (pager->dyld_header.mo_control != MEMORY_OBJECT_CONTROL_NULL) { |
| 1152 | memory_object_control_deallocate(control: pager->dyld_header.mo_control); |
| 1153 | pager->dyld_header.mo_control = MEMORY_OBJECT_CONTROL_NULL; |
| 1154 | } |
| 1155 | kfree_type(struct dyld_pager, pager); |
| 1156 | pager = NULL; |
| 1157 | } else { |
| 1158 | /* there are still plenty of references: keep going... */ |
| 1159 | lck_mtx_unlock(lck: &dyld_pager_lock); |
| 1160 | } |
| 1161 | |
| 1162 | /* caution: lock is not held on return... */ |
| 1163 | } |
| 1164 | |
| 1165 | /* |
| 1166 | * dyld_pager_deallocate() |
| 1167 | * |
| 1168 | * Release a reference on this pager and free it when the last |
| 1169 | * reference goes away. |
| 1170 | */ |
| 1171 | static void |
| 1172 | ( |
| 1173 | memory_object_t mem_obj) |
| 1174 | { |
| 1175 | dyld_pager_t ; |
| 1176 | |
| 1177 | pager = dyld_pager_lookup(mem_obj); |
| 1178 | dyld_pager_deallocate_internal(pager, FALSE); |
| 1179 | } |
| 1180 | |
| 1181 | /* |
| 1182 | * |
| 1183 | */ |
| 1184 | static kern_return_t |
| 1185 | ( |
| 1186 | #if !DEBUG |
| 1187 | __unused |
| 1188 | #endif |
| 1189 | memory_object_t mem_obj) |
| 1190 | { |
| 1191 | return KERN_SUCCESS; |
| 1192 | } |
| 1193 | |
| 1194 | /* |
| 1195 | * dyld_pager_map() |
| 1196 | * |
| 1197 | * This allows VM to let us, the EMM, know that this memory object |
| 1198 | * is currently mapped one or more times. This is called by VM each time |
| 1199 | * the memory object gets mapped, but we only take one extra reference the |
| 1200 | * first time it is called. |
| 1201 | */ |
| 1202 | static kern_return_t |
| 1203 | ( |
| 1204 | memory_object_t mem_obj, |
| 1205 | __unused vm_prot_t prot) |
| 1206 | { |
| 1207 | dyld_pager_t ; |
| 1208 | |
| 1209 | pager = dyld_pager_lookup(mem_obj); |
| 1210 | |
| 1211 | lck_mtx_lock(lck: &dyld_pager_lock); |
| 1212 | assert(pager->dyld_is_ready); |
| 1213 | assert(os_ref_get_count_raw(&pager->dyld_ref_count) > 0); /* pager is alive */ |
| 1214 | if (!pager->dyld_is_mapped) { |
| 1215 | pager->dyld_is_mapped = TRUE; |
| 1216 | os_ref_retain_locked_raw(&pager->dyld_ref_count, NULL); |
| 1217 | } |
| 1218 | lck_mtx_unlock(lck: &dyld_pager_lock); |
| 1219 | |
| 1220 | return KERN_SUCCESS; |
| 1221 | } |
| 1222 | |
| 1223 | /* |
| 1224 | * dyld_pager_last_unmap() |
| 1225 | * |
| 1226 | * This is called by VM when this memory object is no longer mapped anywhere. |
| 1227 | */ |
| 1228 | static kern_return_t |
| 1229 | ( |
| 1230 | memory_object_t mem_obj) |
| 1231 | { |
| 1232 | dyld_pager_t ; |
| 1233 | |
| 1234 | pager = dyld_pager_lookup(mem_obj); |
| 1235 | |
| 1236 | lck_mtx_lock(lck: &dyld_pager_lock); |
| 1237 | if (pager->dyld_is_mapped) { |
| 1238 | /* |
| 1239 | * All the mappings are gone, so let go of the one extra |
| 1240 | * reference that represents all the mappings of this pager. |
| 1241 | */ |
| 1242 | pager->dyld_is_mapped = FALSE; |
| 1243 | dyld_pager_deallocate_internal(pager, TRUE); |
| 1244 | /* caution: deallocate_internal() released the lock ! */ |
| 1245 | } else { |
| 1246 | lck_mtx_unlock(lck: &dyld_pager_lock); |
| 1247 | } |
| 1248 | |
| 1249 | return KERN_SUCCESS; |
| 1250 | } |
| 1251 | |
| 1252 | static boolean_t |
| 1253 | ( |
| 1254 | memory_object_t mem_obj, |
| 1255 | memory_object_offset_t offset, |
| 1256 | vm_object_t *backing_object, |
| 1257 | vm_object_offset_t *backing_offset) |
| 1258 | { |
| 1259 | dyld_pager_t ; |
| 1260 | |
| 1261 | pager = dyld_pager_lookup(mem_obj); |
| 1262 | |
| 1263 | *backing_object = pager->dyld_backing_object; |
| 1264 | *backing_offset = offset; |
| 1265 | |
| 1266 | return TRUE; |
| 1267 | } |
| 1268 | |
| 1269 | |
| 1270 | /* |
| 1271 | * Convert from memory_object to dyld_pager. |
| 1272 | */ |
| 1273 | static dyld_pager_t |
| 1274 | ( |
| 1275 | memory_object_t mem_obj) |
| 1276 | { |
| 1277 | dyld_pager_t ; |
| 1278 | |
| 1279 | assert(mem_obj->mo_pager_ops == &dyld_pager_ops); |
| 1280 | pager = (dyld_pager_t)(uintptr_t) mem_obj; |
| 1281 | assert(os_ref_get_count_raw(&pager->dyld_ref_count) > 0); |
| 1282 | return pager; |
| 1283 | } |
| 1284 | |
| 1285 | /* |
| 1286 | * Create and return a pager for the given object with the |
| 1287 | * given slide information. |
| 1288 | */ |
| 1289 | static dyld_pager_t |
| 1290 | ( |
| 1291 | #if !defined(HAS_APPLE_PAC) |
| 1292 | __unused |
| 1293 | #endif /* defined(HAS_APPLE_PAC) */ |
| 1294 | task_t task, |
| 1295 | vm_object_t backing_object, |
| 1296 | struct mwl_region *regions, |
| 1297 | uint32_t region_cnt, |
| 1298 | void *link_info, |
| 1299 | uint32_t link_info_size) |
| 1300 | { |
| 1301 | dyld_pager_t ; |
| 1302 | memory_object_control_t control; |
| 1303 | kern_return_t kr; |
| 1304 | |
| 1305 | pager = kalloc_type(struct dyld_pager, Z_WAITOK); |
| 1306 | if (pager == NULL) { |
| 1307 | return NULL; |
| 1308 | } |
| 1309 | |
| 1310 | /* |
| 1311 | * The vm_map call takes both named entry ports and raw memory |
| 1312 | * objects in the same parameter. We need to make sure that |
| 1313 | * vm_map does not see this object as a named entry port. So, |
| 1314 | * we reserve the first word in the object for a fake ip_kotype |
| 1315 | * setting - that will tell vm_map to use it as a memory object. |
| 1316 | */ |
| 1317 | pager->dyld_header.mo_ikot = IKOT_MEMORY_OBJECT; |
| 1318 | pager->dyld_header.mo_pager_ops = &dyld_pager_ops; |
| 1319 | pager->dyld_header.mo_control = MEMORY_OBJECT_CONTROL_NULL; |
| 1320 | |
| 1321 | pager->dyld_is_ready = FALSE;/* not ready until it has a "name" */ |
| 1322 | /* existence reference for the caller */ |
| 1323 | os_ref_init_count_raw(&pager->dyld_ref_count, NULL, 1); |
| 1324 | pager->dyld_is_mapped = FALSE; |
| 1325 | pager->dyld_backing_object = backing_object; |
| 1326 | pager->dyld_link_info = link_info; |
| 1327 | pager->dyld_link_info_size = link_info_size; |
| 1328 | #if defined(HAS_APPLE_PAC) |
| 1329 | pager->dyld_a_key = (task->map && task->map->pmap && !task->map->pmap->disable_jop) ? task->jop_pid : 0; |
| 1330 | #endif /* defined(HAS_APPLE_PAC) */ |
| 1331 | |
| 1332 | /* |
| 1333 | * Record the regions so the pager can find the offset from an address. |
| 1334 | */ |
| 1335 | pager->dyld_num_range = region_cnt; |
| 1336 | for (uint32_t r = 0; r < region_cnt; ++r) { |
| 1337 | pager->dyld_file_offset[r] = regions[r].mwlr_file_offset; |
| 1338 | pager->dyld_address[r] = regions[r].mwlr_address; |
| 1339 | pager->dyld_size[r] = regions[r].mwlr_size; |
| 1340 | } |
| 1341 | |
| 1342 | vm_object_reference(backing_object); |
| 1343 | lck_mtx_lock(lck: &dyld_pager_lock); |
| 1344 | queue_enter_first(&dyld_pager_queue, |
| 1345 | pager, |
| 1346 | dyld_pager_t, |
| 1347 | dyld_pager_queue); |
| 1348 | dyld_pager_count++; |
| 1349 | if (dyld_pager_count > dyld_pager_count_max) { |
| 1350 | dyld_pager_count_max = dyld_pager_count; |
| 1351 | } |
| 1352 | lck_mtx_unlock(lck: &dyld_pager_lock); |
| 1353 | |
| 1354 | kr = memory_object_create_named(pager: (memory_object_t) pager, size: 0, control: &control); |
| 1355 | assert(kr == KERN_SUCCESS); |
| 1356 | |
| 1357 | memory_object_mark_trusted(control); |
| 1358 | |
| 1359 | lck_mtx_lock(lck: &dyld_pager_lock); |
| 1360 | /* the new pager is now ready to be used */ |
| 1361 | pager->dyld_is_ready = TRUE; |
| 1362 | lck_mtx_unlock(lck: &dyld_pager_lock); |
| 1363 | |
| 1364 | /* wakeup anyone waiting for this pager to be ready */ |
| 1365 | thread_wakeup(&pager->dyld_is_ready); |
| 1366 | |
| 1367 | return pager; |
| 1368 | } |
| 1369 | |
| 1370 | /* |
| 1371 | * dyld_pager_setup() |
| 1372 | * |
| 1373 | * Provide the caller with a memory object backed by the provided |
| 1374 | * "backing_object" VM object. |
| 1375 | */ |
| 1376 | static memory_object_t |
| 1377 | ( |
| 1378 | task_t task, |
| 1379 | vm_object_t backing_object, |
| 1380 | struct mwl_region *regions, |
| 1381 | uint32_t region_cnt, |
| 1382 | void *link_info, |
| 1383 | uint32_t link_info_size) |
| 1384 | { |
| 1385 | dyld_pager_t ; |
| 1386 | |
| 1387 | /* create new pager */ |
| 1388 | pager = dyld_pager_create(task, backing_object, regions, region_cnt, link_info, link_info_size); |
| 1389 | if (pager == NULL) { |
| 1390 | /* could not create a new pager */ |
| 1391 | return MEMORY_OBJECT_NULL; |
| 1392 | } |
| 1393 | |
| 1394 | lck_mtx_lock(lck: &dyld_pager_lock); |
| 1395 | while (!pager->dyld_is_ready) { |
| 1396 | lck_mtx_sleep(lck: &dyld_pager_lock, |
| 1397 | lck_sleep_action: LCK_SLEEP_DEFAULT, |
| 1398 | event: &pager->dyld_is_ready, |
| 1399 | THREAD_UNINT); |
| 1400 | } |
| 1401 | lck_mtx_unlock(lck: &dyld_pager_lock); |
| 1402 | |
| 1403 | return (memory_object_t) pager; |
| 1404 | } |
| 1405 | |
| 1406 | /* |
| 1407 | * Set up regions which use a special pager to apply dyld fixups. |
| 1408 | * |
| 1409 | * The arguments to this function are mostly just used as input. |
| 1410 | * Except for the link_info! That is saved off in the pager that |
| 1411 | * gets created, so shouldn't be free'd by the caller, if KERN_SUCCES. |
| 1412 | */ |
| 1413 | kern_return_t |
| 1414 | vm_map_with_linking( |
| 1415 | task_t task, |
| 1416 | struct mwl_region *regions, |
| 1417 | uint32_t region_cnt, |
| 1418 | void *link_info, |
| 1419 | uint32_t link_info_size, |
| 1420 | memory_object_control_t file_control) |
| 1421 | { |
| 1422 | vm_map_t map = task->map; |
| 1423 | vm_object_t object = VM_OBJECT_NULL; |
| 1424 | memory_object_t = MEMORY_OBJECT_NULL; |
| 1425 | uint32_t r; |
| 1426 | vm_map_address_t map_addr; |
| 1427 | kern_return_t kr = KERN_SUCCESS; |
| 1428 | |
| 1429 | object = memory_object_control_to_vm_object(control: file_control); |
| 1430 | if (object == VM_OBJECT_NULL || object->internal) { |
| 1431 | printf(format: "%s no object for file_control\n" , __func__); |
| 1432 | object = VM_OBJECT_NULL; |
| 1433 | kr = KERN_INVALID_ADDRESS; |
| 1434 | goto done; |
| 1435 | } |
| 1436 | |
| 1437 | /* create a pager */ |
| 1438 | pager = dyld_pager_setup(task, backing_object: object, regions, region_cnt, link_info, link_info_size); |
| 1439 | if (pager == MEMORY_OBJECT_NULL) { |
| 1440 | kr = KERN_RESOURCE_SHORTAGE; |
| 1441 | goto done; |
| 1442 | } |
| 1443 | |
| 1444 | for (r = 0; r < region_cnt; ++r) { |
| 1445 | vm_map_kernel_flags_t vmk_flags = { |
| 1446 | .vmf_fixed = true, |
| 1447 | .vmf_overwrite = true, |
| 1448 | .vmkf_overwrite_immutable = true, |
| 1449 | }; |
| 1450 | struct mwl_region *rp = ®ions[r]; |
| 1451 | |
| 1452 | /* map that pager over the portion of the mapping that needs sliding */ |
| 1453 | map_addr = (vm_map_address_t)rp->mwlr_address; |
| 1454 | |
| 1455 | if (rp->mwlr_protections & VM_PROT_TPRO) { |
| 1456 | vmk_flags.vmf_tpro = TRUE; |
| 1457 | } |
| 1458 | |
| 1459 | kr = vm_map_enter_mem_object(map, |
| 1460 | address: &map_addr, |
| 1461 | size: rp->mwlr_size, |
| 1462 | mask: (mach_vm_offset_t) 0, |
| 1463 | vmk_flags, |
| 1464 | port: (ipc_port_t)(uintptr_t)pager, |
| 1465 | offset: rp->mwlr_file_offset, |
| 1466 | TRUE, /* copy == TRUE, as this is MAP_PRIVATE so COW may happen */ |
| 1467 | cur_protection: rp->mwlr_protections & VM_PROT_DEFAULT, |
| 1468 | max_protection: rp->mwlr_protections & VM_PROT_DEFAULT, |
| 1469 | VM_INHERIT_DEFAULT); |
| 1470 | if (kr != KERN_SUCCESS) { |
| 1471 | /* no need to clean up earlier regions, this will be process fatal */ |
| 1472 | goto done; |
| 1473 | } |
| 1474 | } |
| 1475 | |
| 1476 | /* success! */ |
| 1477 | kr = KERN_SUCCESS; |
| 1478 | |
| 1479 | done: |
| 1480 | |
| 1481 | if (pager != MEMORY_OBJECT_NULL) { |
| 1482 | /* |
| 1483 | * Release the pager reference obtained by dyld_pager_setup(). |
| 1484 | * The mapping, if it succeeded, is now holding a reference on the memory object. |
| 1485 | */ |
| 1486 | memory_object_deallocate(object: pager); |
| 1487 | pager = MEMORY_OBJECT_NULL; |
| 1488 | } |
| 1489 | return kr; |
| 1490 | } |
| 1491 | |
| 1492 | static uint64_t |
| 1493 | ( |
| 1494 | dyld_pager_t ) |
| 1495 | { |
| 1496 | uint64_t pages_purged; |
| 1497 | vm_object_t object; |
| 1498 | |
| 1499 | pages_purged = 0; |
| 1500 | object = memory_object_to_vm_object(mem_obj: (memory_object_t) pager); |
| 1501 | assert(object != VM_OBJECT_NULL); |
| 1502 | vm_object_lock(object); |
| 1503 | pages_purged = object->resident_page_count; |
| 1504 | vm_object_reap_pages(object, REAP_DATA_FLUSH); |
| 1505 | pages_purged -= object->resident_page_count; |
| 1506 | // printf(" %s:%d pager %p object %p purged %llu left %d\n", __FUNCTION__, __LINE__, pager, object, pages_purged, object->resident_page_count); |
| 1507 | vm_object_unlock(object); |
| 1508 | return pages_purged; |
| 1509 | } |
| 1510 | |
| 1511 | uint64_t |
| 1512 | (void) |
| 1513 | { |
| 1514 | uint64_t pages_purged; |
| 1515 | dyld_pager_t ; |
| 1516 | |
| 1517 | pages_purged = 0; |
| 1518 | lck_mtx_lock(lck: &dyld_pager_lock); |
| 1519 | queue_iterate(&dyld_pager_queue, pager, dyld_pager_t, dyld_pager_queue) { |
| 1520 | pages_purged += dyld_pager_purge(pager); |
| 1521 | } |
| 1522 | lck_mtx_unlock(lck: &dyld_pager_lock); |
| 1523 | #if DEVELOPMENT || DEBUG |
| 1524 | printf(" %s:%d pages purged: %llu\n" , __FUNCTION__, __LINE__, pages_purged); |
| 1525 | #endif /* DEVELOPMENT || DEBUG */ |
| 1526 | return pages_purged; |
| 1527 | } |
| 1528 | |