| 1 | /* |
| 2 | * Copyright (c) 2022 Apple Computer, Inc. All rights reserved. |
| 3 | * |
| 4 | * @APPLE_LICENSE_HEADER_START@ |
| 5 | * |
| 6 | * The contents of this file constitute Original Code as defined in and |
| 7 | * are subject to the Apple Public Source License Version 1.1 (the |
| 8 | * "License"). You may not use this file except in compliance with the |
| 9 | * License. Please obtain a copy of the License at |
| 10 | * http://www.apple.com/publicsource and read it before using this file. |
| 11 | * |
| 12 | * This Original Code and all software distributed under the License are |
| 13 | * distributed on an "AS IS" basis, WITHOUT WARRANTY OF ANY KIND, EITHER |
| 14 | * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES, |
| 15 | * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY, |
| 16 | * FITNESS FOR A PARTICULAR PURPOSE OR NON-INFRINGEMENT. Please see the |
| 17 | * License for the specific language governing rights and limitations |
| 18 | * under the License. |
| 19 | * |
| 20 | * @APPLE_LICENSE_HEADER_END@ |
| 21 | */ |
| 22 | |
| 23 | #include <os/overflow.h> |
| 24 | #include <machine/atomic.h> |
| 25 | #include <mach/vm_param.h> |
| 26 | #include <vm/vm_kern.h> |
| 27 | #include <kern/zalloc.h> |
| 28 | #include <kern/kalloc.h> |
| 29 | #include <kern/assert.h> |
| 30 | #include <kern/locks.h> |
| 31 | #include <kern/lock_rw.h> |
| 32 | #include <libkern/libkern.h> |
| 33 | #include <libkern/section_keywords.h> |
| 34 | #include <libkern/coretrust/coretrust.h> |
| 35 | #include <pexpert/pexpert.h> |
| 36 | #include <sys/vm.h> |
| 37 | #include <sys/proc.h> |
| 38 | #include <sys/codesign.h> |
| 39 | #include <sys/code_signing.h> |
| 40 | #include <uuid/uuid.h> |
| 41 | #include <IOKit/IOBSD.h> |
| 42 | |
| 43 | #if PMAP_CS_PPL_MONITOR |
| 44 | /* |
| 45 | * The Page Protection Layer layer implements the PMAP_CS monitor environment which |
| 46 | * provides code signing and memory isolation enforcements for data structures which |
| 47 | * are critical to ensuring that all code executed on the system is authorized to do |
| 48 | * so. |
| 49 | * |
| 50 | * Unless the data is managed by the PPL itself, XNU needs to page-align everything, |
| 51 | * and then reference the memory as read-only. |
| 52 | */ |
| 53 | |
| 54 | typedef uint64_t pmap_paddr_t __kernel_ptr_semantics; |
| 55 | extern vm_map_address_t phystokv(pmap_paddr_t pa); |
| 56 | extern pmap_paddr_t kvtophys_nofail(vm_offset_t va); |
| 57 | |
| 58 | #pragma mark Initialization |
| 59 | |
| 60 | void |
| 61 | code_signing_init() |
| 62 | { |
| 63 | /* Does nothing */ |
| 64 | } |
| 65 | |
| 66 | void |
| 67 | ppl_enter_lockdown_mode(void) |
| 68 | { |
| 69 | /* |
| 70 | * This function is expected to be called before read-only lockdown on the |
| 71 | * system. As a result, the PPL variable should be mutable. If not, then we |
| 72 | * will panic (as we should). |
| 73 | */ |
| 74 | ppl_lockdown_mode_enabled = true; |
| 75 | |
| 76 | printf("entered lockdown mode policy for the PPL" ); |
| 77 | } |
| 78 | |
| 79 | #pragma mark Developer Mode |
| 80 | |
| 81 | SECURITY_READ_ONLY_LATE(bool*) developer_mode_enabled = &ppl_developer_mode_storage; |
| 82 | |
| 83 | void |
| 84 | ppl_toggle_developer_mode( |
| 85 | bool state) |
| 86 | { |
| 87 | pmap_toggle_developer_mode(state); |
| 88 | } |
| 89 | |
| 90 | #pragma mark Code Signing and Provisioning Profiles |
| 91 | |
| 92 | bool |
| 93 | ppl_code_signing_enabled(void) |
| 94 | { |
| 95 | return pmap_cs_enabled(); |
| 96 | } |
| 97 | |
| 98 | kern_return_t |
| 99 | ppl_register_provisioning_profile( |
| 100 | const void *profile_blob, |
| 101 | const size_t profile_blob_size, |
| 102 | void **profile_obj) |
| 103 | { |
| 104 | pmap_profile_payload_t *pmap_payload = NULL; |
| 105 | vm_address_t payload_addr = 0; |
| 106 | vm_size_t payload_size = 0; |
| 107 | vm_size_t payload_size_aligned = 0; |
| 108 | kern_return_t ret = KERN_DENIED; |
| 109 | |
| 110 | if (os_add_overflow(sizeof(*pmap_payload), profile_blob_size, &payload_size)) { |
| 111 | panic("attempted to load a too-large profile: %lu bytes" , profile_blob_size); |
| 112 | } |
| 113 | payload_size_aligned = round_page(payload_size); |
| 114 | |
| 115 | ret = kmem_alloc(kernel_map, &payload_addr, payload_size_aligned, |
| 116 | KMA_KOBJECT | KMA_DATA | KMA_ZERO, VM_KERN_MEMORY_SECURITY); |
| 117 | if (ret != KERN_SUCCESS) { |
| 118 | printf("unable to allocate memory for pmap profile payload: %d\n" , ret); |
| 119 | goto exit; |
| 120 | } |
| 121 | |
| 122 | /* We need to setup the payload before we send it to the PPL */ |
| 123 | pmap_payload = (pmap_profile_payload_t*)payload_addr; |
| 124 | |
| 125 | pmap_payload->profile_blob_size = profile_blob_size; |
| 126 | memcpy(pmap_payload->profile_blob, profile_blob, profile_blob_size); |
| 127 | |
| 128 | ret = pmap_register_provisioning_profile(payload_addr, payload_size_aligned); |
| 129 | if (ret == KERN_SUCCESS) { |
| 130 | *profile_obj = &pmap_payload->profile_obj_storage; |
| 131 | *profile_obj = (pmap_cs_profile_t*)phystokv(kvtophys_nofail((vm_offset_t)*profile_obj)); |
| 132 | } |
| 133 | |
| 134 | exit: |
| 135 | if ((ret != KERN_SUCCESS) && (payload_addr != 0)) { |
| 136 | kmem_free(kernel_map, payload_addr, payload_size_aligned); |
| 137 | payload_addr = 0; |
| 138 | payload_size_aligned = 0; |
| 139 | } |
| 140 | |
| 141 | return ret; |
| 142 | } |
| 143 | |
| 144 | kern_return_t |
| 145 | ppl_unregister_provisioning_profile( |
| 146 | void *profile_obj) |
| 147 | { |
| 148 | pmap_cs_profile_t *ppl_profile_obj = profile_obj; |
| 149 | kern_return_t ret = KERN_DENIED; |
| 150 | |
| 151 | ret = pmap_unregister_provisioning_profile(ppl_profile_obj); |
| 152 | if (ret != KERN_SUCCESS) { |
| 153 | return ret; |
| 154 | } |
| 155 | |
| 156 | /* Get the original payload address */ |
| 157 | const pmap_profile_payload_t *pmap_payload = ppl_profile_obj->original_payload; |
| 158 | const vm_address_t payload_addr = (const vm_address_t)pmap_payload; |
| 159 | |
| 160 | /* Get the original payload size */ |
| 161 | vm_size_t payload_size = pmap_payload->profile_blob_size + sizeof(*pmap_payload); |
| 162 | payload_size = round_page(payload_size); |
| 163 | |
| 164 | /* Free the payload */ |
| 165 | kmem_free(kernel_map, payload_addr, payload_size); |
| 166 | pmap_payload = NULL; |
| 167 | |
| 168 | return KERN_SUCCESS; |
| 169 | } |
| 170 | |
| 171 | kern_return_t |
| 172 | ppl_associate_provisioning_profile( |
| 173 | void *sig_obj, |
| 174 | void *profile_obj) |
| 175 | { |
| 176 | return pmap_associate_provisioning_profile(sig_obj, profile_obj); |
| 177 | } |
| 178 | |
| 179 | kern_return_t |
| 180 | ppl_disassociate_provisioning_profile( |
| 181 | void *sig_obj) |
| 182 | { |
| 183 | return pmap_disassociate_provisioning_profile(sig_obj); |
| 184 | } |
| 185 | |
| 186 | void |
| 187 | ppl_set_compilation_service_cdhash( |
| 188 | const uint8_t cdhash[CS_CDHASH_LEN]) |
| 189 | { |
| 190 | pmap_set_compilation_service_cdhash(cdhash); |
| 191 | } |
| 192 | |
| 193 | bool |
| 194 | ppl_match_compilation_service_cdhash( |
| 195 | const uint8_t cdhash[CS_CDHASH_LEN]) |
| 196 | { |
| 197 | return pmap_match_compilation_service_cdhash(cdhash); |
| 198 | } |
| 199 | |
| 200 | void |
| 201 | ppl_set_local_signing_public_key( |
| 202 | const uint8_t public_key[XNU_LOCAL_SIGNING_KEY_SIZE]) |
| 203 | { |
| 204 | return pmap_set_local_signing_public_key(public_key); |
| 205 | } |
| 206 | |
| 207 | uint8_t* |
| 208 | ppl_get_local_signing_public_key(void) |
| 209 | { |
| 210 | return pmap_get_local_signing_public_key(); |
| 211 | } |
| 212 | |
| 213 | void |
| 214 | ppl_unrestrict_local_signing_cdhash( |
| 215 | const uint8_t cdhash[CS_CDHASH_LEN]) |
| 216 | { |
| 217 | pmap_unrestrict_local_signing(cdhash); |
| 218 | } |
| 219 | |
| 220 | vm_size_t |
| 221 | ppl_managed_code_signature_size(void) |
| 222 | { |
| 223 | return pmap_cs_blob_limit; |
| 224 | } |
| 225 | |
| 226 | kern_return_t |
| 227 | ppl_register_code_signature( |
| 228 | const vm_address_t signature_addr, |
| 229 | const vm_size_t signature_size, |
| 230 | const vm_offset_t code_directory_offset, |
| 231 | const char *signature_path, |
| 232 | void **sig_obj, |
| 233 | vm_address_t *ppl_signature_addr) |
| 234 | { |
| 235 | pmap_cs_code_directory_t *cd_entry = NULL; |
| 236 | |
| 237 | /* PPL doesn't care about the signature path */ |
| 238 | (void)signature_path; |
| 239 | |
| 240 | kern_return_t ret = pmap_cs_register_code_signature_blob( |
| 241 | signature_addr, |
| 242 | signature_size, |
| 243 | code_directory_offset, |
| 244 | (pmap_cs_code_directory_t**)sig_obj); |
| 245 | |
| 246 | if (ret != KERN_SUCCESS) { |
| 247 | return ret; |
| 248 | } |
| 249 | cd_entry = *((pmap_cs_code_directory_t**)sig_obj); |
| 250 | |
| 251 | if (ppl_signature_addr) { |
| 252 | *ppl_signature_addr = (vm_address_t)cd_entry->superblob; |
| 253 | } |
| 254 | |
| 255 | return KERN_SUCCESS; |
| 256 | } |
| 257 | |
| 258 | kern_return_t |
| 259 | ppl_unregister_code_signature( |
| 260 | void *sig_obj) |
| 261 | { |
| 262 | return pmap_cs_unregister_code_signature_blob(sig_obj); |
| 263 | } |
| 264 | |
| 265 | kern_return_t |
| 266 | ppl_verify_code_signature( |
| 267 | void *sig_obj) |
| 268 | { |
| 269 | return pmap_cs_verify_code_signature_blob(sig_obj); |
| 270 | } |
| 271 | |
| 272 | kern_return_t |
| 273 | ppl_reconstitute_code_signature( |
| 274 | void *sig_obj, |
| 275 | vm_address_t *unneeded_addr, |
| 276 | vm_size_t *unneeded_size) |
| 277 | { |
| 278 | return pmap_cs_unlock_unneeded_code_signature( |
| 279 | sig_obj, |
| 280 | unneeded_addr, |
| 281 | unneeded_size); |
| 282 | } |
| 283 | |
| 284 | #pragma mark Address Spaces |
| 285 | |
| 286 | kern_return_t |
| 287 | ppl_associate_code_signature( |
| 288 | pmap_t pmap, |
| 289 | void *sig_obj, |
| 290 | const vm_address_t region_addr, |
| 291 | const vm_size_t region_size, |
| 292 | const vm_offset_t region_offset) |
| 293 | { |
| 294 | return pmap_cs_associate( |
| 295 | pmap, |
| 296 | sig_obj, |
| 297 | region_addr, |
| 298 | region_size, |
| 299 | region_offset); |
| 300 | } |
| 301 | |
| 302 | kern_return_t |
| 303 | ppl_allow_jit_region( |
| 304 | __unused pmap_t pmap) |
| 305 | { |
| 306 | /* PPL does not support this API */ |
| 307 | return KERN_NOT_SUPPORTED; |
| 308 | } |
| 309 | |
| 310 | kern_return_t |
| 311 | ppl_associate_jit_region( |
| 312 | pmap_t pmap, |
| 313 | const vm_address_t region_addr, |
| 314 | const vm_size_t region_size) |
| 315 | { |
| 316 | return pmap_cs_associate( |
| 317 | pmap, |
| 318 | PMAP_CS_ASSOCIATE_JIT, |
| 319 | region_addr, |
| 320 | region_size, |
| 321 | 0); |
| 322 | } |
| 323 | |
| 324 | kern_return_t |
| 325 | ppl_associate_debug_region( |
| 326 | pmap_t pmap, |
| 327 | const vm_address_t region_addr, |
| 328 | const vm_size_t region_size) |
| 329 | { |
| 330 | return pmap_cs_associate( |
| 331 | pmap, |
| 332 | PMAP_CS_ASSOCIATE_COW, |
| 333 | region_addr, |
| 334 | region_size, |
| 335 | 0); |
| 336 | } |
| 337 | |
| 338 | kern_return_t |
| 339 | ppl_address_space_debugged( |
| 340 | pmap_t pmap) |
| 341 | { |
| 342 | /* |
| 343 | * ppl_associate_debug_region is a fairly idempotent function which simply |
| 344 | * checks if an address space is already debugged or not and returns a value |
| 345 | * based on that. The actual memory region is not inserted into the address |
| 346 | * space, so we can pass whatever in this case. The only caveat here though |
| 347 | * is that the memory region needs to be page-aligned and cannot be NULL. |
| 348 | */ |
| 349 | return ppl_associate_debug_region(pmap, PAGE_SIZE, PAGE_SIZE); |
| 350 | } |
| 351 | |
| 352 | kern_return_t |
| 353 | ppl_allow_invalid_code( |
| 354 | pmap_t pmap) |
| 355 | { |
| 356 | return pmap_cs_allow_invalid(pmap); |
| 357 | } |
| 358 | |
| 359 | kern_return_t |
| 360 | ppl_get_trust_level_kdp( |
| 361 | pmap_t pmap, |
| 362 | uint32_t *trust_level) |
| 363 | { |
| 364 | return pmap_get_trust_level_kdp(pmap, trust_level); |
| 365 | } |
| 366 | |
| 367 | kern_return_t |
| 368 | ppl_address_space_exempt( |
| 369 | const pmap_t pmap) |
| 370 | { |
| 371 | if (pmap_performs_stage2_translations(pmap) == true) { |
| 372 | return KERN_SUCCESS; |
| 373 | } |
| 374 | |
| 375 | return KERN_DENIED; |
| 376 | } |
| 377 | |
| 378 | kern_return_t |
| 379 | ppl_fork_prepare( |
| 380 | pmap_t old_pmap, |
| 381 | pmap_t new_pmap) |
| 382 | { |
| 383 | return pmap_cs_fork_prepare(old_pmap, new_pmap); |
| 384 | } |
| 385 | |
| 386 | kern_return_t |
| 387 | ppl_acquire_signing_identifier( |
| 388 | const void *sig_obj, |
| 389 | const char **signing_id) |
| 390 | { |
| 391 | const pmap_cs_code_directory_t *cd_entry = sig_obj; |
| 392 | |
| 393 | /* If we reach here, the identifier must have been setup */ |
| 394 | assert(cd_entry->identifier != NULL); |
| 395 | |
| 396 | if (signing_id) { |
| 397 | *signing_id = cd_entry->identifier; |
| 398 | } |
| 399 | |
| 400 | return KERN_SUCCESS; |
| 401 | } |
| 402 | |
| 403 | #pragma mark Entitlements |
| 404 | |
| 405 | kern_return_t |
| 406 | ppl_associate_kernel_entitlements( |
| 407 | void *sig_obj, |
| 408 | const void *kernel_entitlements) |
| 409 | { |
| 410 | pmap_cs_code_directory_t *cd_entry = sig_obj; |
| 411 | return pmap_associate_kernel_entitlements(cd_entry, kernel_entitlements); |
| 412 | } |
| 413 | |
| 414 | kern_return_t |
| 415 | ppl_resolve_kernel_entitlements( |
| 416 | pmap_t pmap, |
| 417 | const void **kernel_entitlements) |
| 418 | { |
| 419 | kern_return_t ret = KERN_DENIED; |
| 420 | const void *entitlements = NULL; |
| 421 | |
| 422 | ret = pmap_resolve_kernel_entitlements(pmap, &entitlements); |
| 423 | if ((ret == KERN_SUCCESS) && (kernel_entitlements != NULL)) { |
| 424 | *kernel_entitlements = entitlements; |
| 425 | } |
| 426 | |
| 427 | return ret; |
| 428 | } |
| 429 | |
| 430 | kern_return_t |
| 431 | ppl_accelerate_entitlements( |
| 432 | void *sig_obj, |
| 433 | CEQueryContext_t *ce_ctx) |
| 434 | { |
| 435 | pmap_cs_code_directory_t *cd_entry = sig_obj; |
| 436 | kern_return_t ret = KERN_DENIED; |
| 437 | |
| 438 | ret = pmap_accelerate_entitlements(cd_entry); |
| 439 | |
| 440 | /* |
| 441 | * We only ever get KERN_ABORTED when we cannot accelerate the entitlements |
| 442 | * because it would consume too much memory. In this case, we still want to |
| 443 | * return the ce_ctx since we don't want the system to fall-back to non-PPL |
| 444 | * locked down memory, so we switch this to a success case. |
| 445 | */ |
| 446 | if (ret == KERN_ABORTED) { |
| 447 | ret = KERN_SUCCESS; |
| 448 | } |
| 449 | |
| 450 | /* Return the accelerated context to the caller */ |
| 451 | if ((ret == KERN_SUCCESS) && (ce_ctx != NULL)) { |
| 452 | *ce_ctx = cd_entry->ce_ctx; |
| 453 | } |
| 454 | |
| 455 | return ret; |
| 456 | } |
| 457 | |
| 458 | #pragma mark Image4 |
| 459 | |
| 460 | void* |
| 461 | ppl_image4_storage_data( |
| 462 | size_t *allocated_size) |
| 463 | { |
| 464 | return pmap_image4_pmap_data(allocated_size); |
| 465 | } |
| 466 | |
| 467 | void |
| 468 | ppl_image4_set_nonce( |
| 469 | const img4_nonce_domain_index_t ndi, |
| 470 | const img4_nonce_t *nonce) |
| 471 | { |
| 472 | return pmap_image4_set_nonce(ndi, nonce); |
| 473 | } |
| 474 | |
| 475 | void |
| 476 | ppl_image4_roll_nonce( |
| 477 | const img4_nonce_domain_index_t ndi) |
| 478 | { |
| 479 | return pmap_image4_roll_nonce(ndi); |
| 480 | } |
| 481 | |
| 482 | errno_t |
| 483 | ppl_image4_copy_nonce( |
| 484 | const img4_nonce_domain_index_t ndi, |
| 485 | img4_nonce_t *nonce_out) |
| 486 | { |
| 487 | return pmap_image4_copy_nonce(ndi, nonce_out); |
| 488 | } |
| 489 | |
| 490 | errno_t |
| 491 | ppl_image4_execute_object( |
| 492 | img4_runtime_object_spec_index_t obj_spec_index, |
| 493 | const img4_buff_t *payload, |
| 494 | const img4_buff_t *manifest) |
| 495 | { |
| 496 | errno_t err = EINVAL; |
| 497 | kern_return_t kr = KERN_DENIED; |
| 498 | img4_buff_t payload_aligned = IMG4_BUFF_INIT; |
| 499 | img4_buff_t manifest_aligned = IMG4_BUFF_INIT; |
| 500 | vm_address_t payload_addr = 0; |
| 501 | vm_size_t payload_len_aligned = 0; |
| 502 | vm_address_t manifest_addr = 0; |
| 503 | vm_size_t manifest_len_aligned = 0; |
| 504 | |
| 505 | if (payload == NULL) { |
| 506 | printf("invalid object execution request: no payload\n" ); |
| 507 | goto out; |
| 508 | } |
| 509 | |
| 510 | /* |
| 511 | * The PPL will attempt to lockdown both the payload and the manifest before executing |
| 512 | * the object. In order for that to happen, both the artifacts need to be page-aligned. |
| 513 | */ |
| 514 | payload_len_aligned = round_page(payload->i4b_len); |
| 515 | if (manifest != NULL) { |
| 516 | manifest_len_aligned = round_page(manifest->i4b_len); |
| 517 | } |
| 518 | |
| 519 | kr = kmem_alloc( |
| 520 | kernel_map, |
| 521 | &payload_addr, |
| 522 | payload_len_aligned, |
| 523 | KMA_KOBJECT, |
| 524 | VM_KERN_MEMORY_SECURITY); |
| 525 | |
| 526 | if (kr != KERN_SUCCESS) { |
| 527 | printf("unable to allocate memory for image4 payload: %d\n" , kr); |
| 528 | err = ENOMEM; |
| 529 | goto out; |
| 530 | } |
| 531 | |
| 532 | /* Copy in the payload */ |
| 533 | memcpy((uint8_t*)payload_addr, payload->i4b_bytes, payload->i4b_len); |
| 534 | |
| 535 | /* Construct the aligned payload buffer */ |
| 536 | payload_aligned.i4b_bytes = (uint8_t*)payload_addr; |
| 537 | payload_aligned.i4b_len = payload->i4b_len; |
| 538 | |
| 539 | if (manifest != NULL) { |
| 540 | kr = kmem_alloc( |
| 541 | kernel_map, |
| 542 | &manifest_addr, |
| 543 | manifest_len_aligned, |
| 544 | KMA_KOBJECT, |
| 545 | VM_KERN_MEMORY_SECURITY); |
| 546 | |
| 547 | if (kr != KERN_SUCCESS) { |
| 548 | printf("unable to allocate memory for image4 manifest: %d\n" , kr); |
| 549 | err = ENOMEM; |
| 550 | goto out; |
| 551 | } |
| 552 | |
| 553 | /* Construct the aligned manifest buffer */ |
| 554 | manifest_aligned.i4b_bytes = (uint8_t*)manifest_addr; |
| 555 | manifest_aligned.i4b_len = manifest->i4b_len; |
| 556 | |
| 557 | /* Copy in the manifest */ |
| 558 | memcpy((uint8_t*)manifest_addr, manifest->i4b_bytes, manifest->i4b_len); |
| 559 | } |
| 560 | |
| 561 | err = pmap_image4_execute_object(obj_spec_index, &payload_aligned, &manifest_aligned); |
| 562 | if (err != 0) { |
| 563 | printf("unable to execute image4 object: %d\n" , err); |
| 564 | goto out; |
| 565 | } |
| 566 | |
| 567 | out: |
| 568 | /* We always free the manifest as it isn't required anymore */ |
| 569 | if (manifest_addr != 0) { |
| 570 | kmem_free(kernel_map, manifest_addr, manifest_len_aligned); |
| 571 | manifest_addr = 0; |
| 572 | manifest_len_aligned = 0; |
| 573 | } |
| 574 | |
| 575 | /* If we encountered an error -- free the allocated payload */ |
| 576 | if ((err != 0) && (payload_addr != 0)) { |
| 577 | kmem_free(kernel_map, payload_addr, payload_len_aligned); |
| 578 | payload_addr = 0; |
| 579 | payload_len_aligned = 0; |
| 580 | } |
| 581 | |
| 582 | return err; |
| 583 | } |
| 584 | |
| 585 | errno_t |
| 586 | ppl_image4_copy_object( |
| 587 | img4_runtime_object_spec_index_t obj_spec_index, |
| 588 | vm_address_t object_out, |
| 589 | size_t *object_length) |
| 590 | { |
| 591 | errno_t err = EINVAL; |
| 592 | kern_return_t kr = KERN_DENIED; |
| 593 | vm_address_t object_addr = 0; |
| 594 | vm_size_t object_len_aligned = 0; |
| 595 | |
| 596 | if (object_out == 0) { |
| 597 | printf("invalid object copy request: no object input buffer\n" ); |
| 598 | goto out; |
| 599 | } else if (object_length == NULL) { |
| 600 | printf("invalid object copy request: no object input length\n" ); |
| 601 | goto out; |
| 602 | } |
| 603 | |
| 604 | /* |
| 605 | * The PPL will attempt to pin the input buffer in order to ensure that the kernel |
| 606 | * didn't pass in PPL-owned buffers. The PPL cannot pin the same page more than once, |
| 607 | * and attempting to do so will panic the system. Hence, we allocate fresh pages for |
| 608 | * for the PPL to pin. |
| 609 | * |
| 610 | * We can send in the address for the length pointer since that is allocated on the |
| 611 | * stack, so the PPL can pin our stack for the duration of the call as no other |
| 612 | * thread can be using our stack, meaning the PPL will never attempt to double-pin |
| 613 | * the page. |
| 614 | */ |
| 615 | object_len_aligned = round_page(*object_length); |
| 616 | |
| 617 | kr = kmem_alloc( |
| 618 | kernel_map, |
| 619 | &object_addr, |
| 620 | object_len_aligned, |
| 621 | KMA_KOBJECT, |
| 622 | VM_KERN_MEMORY_SECURITY); |
| 623 | |
| 624 | if (kr != KERN_SUCCESS) { |
| 625 | printf("unable to allocate memory for image4 object: %d\n" , kr); |
| 626 | err = ENOMEM; |
| 627 | goto out; |
| 628 | } |
| 629 | |
| 630 | err = pmap_image4_copy_object(obj_spec_index, object_addr, object_length); |
| 631 | if (err != 0) { |
| 632 | printf("unable to copy image4 object: %d\n" , err); |
| 633 | goto out; |
| 634 | } |
| 635 | |
| 636 | /* Copy the data back into the caller passed buffer */ |
| 637 | memcpy((void*)object_out, (void*)object_addr, *object_length); |
| 638 | |
| 639 | out: |
| 640 | /* We don't ever need to keep around our page-aligned buffer */ |
| 641 | if (object_addr != 0) { |
| 642 | kmem_free(kernel_map, object_addr, object_len_aligned); |
| 643 | object_addr = 0; |
| 644 | object_len_aligned = 0; |
| 645 | } |
| 646 | |
| 647 | return err; |
| 648 | } |
| 649 | |
| 650 | const void* |
| 651 | ppl_image4_get_monitor_exports(void) |
| 652 | { |
| 653 | /* |
| 654 | * AppleImage4 can query the PMAP_CS runtime on its own since the PMAP_CS |
| 655 | * runtime is compiled within the kernel extension itself. As a result, we |
| 656 | * never expect this KPI to be called when the system uses the PPL monitor. |
| 657 | */ |
| 658 | |
| 659 | printf("explicit monitor-exports-get not required for the PPL\n" ); |
| 660 | return NULL; |
| 661 | } |
| 662 | |
| 663 | errno_t |
| 664 | ppl_image4_set_release_type( |
| 665 | __unused const char *release_type) |
| 666 | { |
| 667 | /* |
| 668 | * AppleImage4 stores the release type in the CTRR protected memory region |
| 669 | * of its kernel extension. This is accessible by the PMAP_CS runtime as the |
| 670 | * runtime is compiled alongside the kernel extension. As a result, we never |
| 671 | * expect this KPI to be called when the system uses the PPL monitor. |
| 672 | */ |
| 673 | |
| 674 | printf("explicit release-type-set set not required for the PPL\n" ); |
| 675 | return ENOTSUP; |
| 676 | } |
| 677 | |
| 678 | errno_t |
| 679 | ppl_image4_set_bnch_shadow( |
| 680 | __unused const img4_nonce_domain_index_t ndi) |
| 681 | { |
| 682 | /* |
| 683 | * AppleImage4 stores the BNCH shadow in the CTRR protected memory region |
| 684 | * of its kernel extension. This is accessible by the PMAP_CS runtime as the |
| 685 | * runtime is compiled alongside the kernel extension. As a result, we never |
| 686 | * expect this KPI to be called when the system uses the PPL monitor. |
| 687 | */ |
| 688 | |
| 689 | printf("explicit BNCH-shadow-set not required for the PPL\n" ); |
| 690 | return ENOTSUP; |
| 691 | } |
| 692 | |
| 693 | #pragma mark Image4 - New |
| 694 | |
| 695 | kern_return_t |
| 696 | ppl_image4_transfer_region( |
| 697 | __unused image4_cs_trap_t selector, |
| 698 | __unused vm_address_t region_addr, |
| 699 | __unused vm_size_t region_size) |
| 700 | { |
| 701 | /* All regions transfers happen internally with the PPL */ |
| 702 | return KERN_SUCCESS; |
| 703 | } |
| 704 | |
| 705 | kern_return_t |
| 706 | ppl_image4_reclaim_region( |
| 707 | __unused image4_cs_trap_t selector, |
| 708 | __unused vm_address_t region_addr, |
| 709 | __unused vm_size_t region_size) |
| 710 | { |
| 711 | /* All regions transfers happen internally with the PPL */ |
| 712 | return KERN_SUCCESS; |
| 713 | } |
| 714 | |
| 715 | errno_t |
| 716 | ppl_image4_monitor_trap( |
| 717 | image4_cs_trap_t selector, |
| 718 | const void *input_data, |
| 719 | size_t input_size) |
| 720 | { |
| 721 | return pmap_image4_monitor_trap(selector, input_data, input_size); |
| 722 | } |
| 723 | |
| 724 | #endif /* PMAP_CS_PPL_MONITOR */ |
| 725 | |