1/*
2 * Copyright (c) 2021 Apple Computer, Inc. All rights reserved.
3 *
4 * @APPLE_LICENSE_HEADER_START@
5 *
6 * The contents of this file constitute Original Code as defined in and
7 * are subject to the Apple Public Source License Version 1.1 (the
8 * "License"). You may not use this file except in compliance with the
9 * License. Please obtain a copy of the License at
10 * http://www.apple.com/publicsource and read it before using this file.
11 *
12 * This Original Code and all software distributed under the License are
13 * distributed on an "AS IS" basis, WITHOUT WARRANTY OF ANY KIND, EITHER
14 * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES,
15 * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY,
16 * FITNESS FOR A PARTICULAR PURPOSE OR NON-INFRINGEMENT. Please see the
17 * License for the specific language governing rights and limitations
18 * under the License.
19 *
20 * @APPLE_LICENSE_HEADER_END@
21 */
22
23#ifndef _VM_PMAP_CS_H_
24#define _VM_PMAP_CS_H_
25
26#ifdef KERNEL_PRIVATE
27/*
28 * All of PMAP_CS definitions are private and should remain accessible only within XNU
29 * and Apple internal kernel extensions.
30 */
31
32#include <mach/kern_return.h>
33#include <mach/vm_param.h>
34#include <mach/vm_types.h>
35#include <mach/boolean.h>
36#include <img4/firmware.h>
37#include <img4/nonce.h>
38
39__BEGIN_DECLS
40
41/**
42 * Check if the PPL based code signing is enabled on the system or not. With a bit of
43 * a refactor on how this function is defined, we could soon move this within the
44 * XNU_KERNEL_PRIVATE directive.
45 */
46bool
47pmap_cs_enabled(void);
48
49#if XNU_KERNEL_PRIVATE
50/*
51 * Any declarations for types or functions which don't need to be exported to kernel
52 * extensions should go here. Naturally, this means this section can also include
53 * headers which may not be available to kernel extensions.
54 */
55
56#if defined(__arm64__)
57#include <pexpert/arm64/board_config.h>
58#endif
59
60#include <vm/pmap.h>
61#include <kern/lock_rw.h>
62#include <libkern/image4/dlxk.h>
63#include <TrustCache/API.h>
64
65
66#if PMAP_CS
67#define PMAP_CS_INCLUDE_CODE_SIGNING 1
68#endif
69
70#if CONFIG_SPTM
71#define PMAP_CS_PPL_MONITOR 0
72#elif XNU_MONITOR
73#define PMAP_CS_PPL_MONITOR 1
74#else
75#define PMAP_CS_PPL_MONITOR 0
76#endif
77
78#if PMAP_CS_PPL_MONITOR
79
80/*
81 * XNU_MONITOR and PMAP_CS are both defined for the same targets in board_config.h.
82 * As a result, whenever XNU_MONITOR is defined, so is PMAP_CS. In an ideal world, we
83 * can remove the use of PMAP_CS macro and simply use XNU_MONITOR, but that would
84 * require a lot of changes throughout the codebase.
85 *
86 * PMAP_CS_PPL_MONITOR is defined when we have XNU_MONITOR _and_ we explicitly don't
87 * have CONFIG_SPTM. This effectively means that whenever we have PMAP_CS_PPL_MONITOR,
88 * we should also always have PMAP_CS_INCLUDE_CODE_SIGNING. Lets enforce this with a
89 * build check.
90 */
91#if !PMAP_CS_INCLUDE_CODE_SIGNING
92#error "PMAP_CS_INCLUDE_CODE_SIGNING not defined when under PMAP_CS_PPL_MONITOR"
93#endif
94
95/* Immutable part of the trust cache runtime */
96extern TrustCacheRuntime_t ppl_trust_cache_rt;
97
98/* Mutable part of the trust cache runtime */
99extern TrustCacheMutableRuntime_t ppl_trust_cache_mut_rt;
100
101/* Lock for the trust cache runtime */
102extern lck_rw_t ppl_trust_cache_rt_lock;
103
104typedef struct _pmap_img4_payload {
105 /* The trust cache data structure which wraps the payload */
106 TrustCache_t trust_cache;
107
108 /* The actual image4 trust cache payload */
109 uint8_t img4_payload[0];
110} pmap_img4_payload_t;
111
112/* State for whether developer mode has been set or not */
113extern bool ppl_developer_mode_set;
114
115/* State of developer mode on the system */
116extern bool ppl_developer_mode_storage;
117
118/*
119 * State of lockdown mode on the system. This variable is an exclusive view of
120 * lockdown mode state for the PPL, and we capture this because the kernel's
121 * view of lockdown mode isn't immutable.
122 */
123extern bool ppl_lockdown_mode_enabled;
124extern bool ppl_lockdown_mode_enforce_jit;
125
126/**
127 * Check the PPL trust cache runtime if a particular trust cache has already been
128 * loaded based on its UUID. The PPL trust cache runtime is kept locked as shared
129 * during the function.
130 */
131kern_return_t
132pmap_check_trust_cache_runtime_for_uuid(
133 const uint8_t check_uuid[kUUIDSize]);
134
135/**
136 * Load an image4 trust cache of a particular type into the PPL. If validation succeeds,
137 * the payload will remain locked, but the other artifacts will be unlocked. If validation
138 * fails, all artifacts will be unlocked.
139 *
140 * All the lengths passed in will first be rounded up to page-size, so it is expected that
141 * the caller allocates page-aligned data.
142 *
143 * Upon successful validation, the trust cache is added to the runtime maintained by the
144 * PPL.
145 */
146kern_return_t
147pmap_load_trust_cache_with_type(
148 TCType_t type,
149 const vm_address_t pmap_img4_payload, const vm_size_t pmap_img4_payload_len,
150 const vm_address_t img4_manifest, const vm_size_t img4_manifest_len,
151 const vm_address_t img4_aux_manifest, const vm_size_t img4_aux_manifest_len);
152
153/*
154 * Query a trust cache from within the PPL. This function can only be called when within
155 * the PPL and does not pin the query_token passed in.
156 */
157kern_return_t
158pmap_query_trust_cache_safe(
159 TCQueryType_t query_type,
160 const uint8_t cdhash[kTCEntryHashSize],
161 TrustCacheQueryToken_t *query_token);
162
163/**
164 * Query a trust cache of a particular type from the PPL. The query_token passed in will
165 * be pinned by the PPL runtime when the PPL is attempting to write to it. This is an API
166 * which can be used for callers external to the PPL.
167 */
168kern_return_t
169pmap_query_trust_cache(
170 TCQueryType_t query_type,
171 const uint8_t cdhash[kTCEntryHashSize],
172 TrustCacheQueryToken_t *query_token);
173
174/**
175 * Toggle the state of developer mode on the system. This function can only be called with
176 * a true value once in the lifecycle of a boot.
177 *
178 * Until this function is called once to set the state, the PPL will block non-platform
179 * code and JIT on the system.
180 */
181void
182pmap_toggle_developer_mode(
183 bool state);
184
185#endif /* PMAP_CS_PPL_MONITOR */
186
187#if PMAP_CS_INCLUDE_CODE_SIGNING
188
189#ifndef CORE_ENTITLEMENTS_I_KNOW_WHAT_IM_DOING
190#define CORE_ENTITLEMENTS_I_KNOW_WHAT_IM_DOING
191#endif
192
193#include <CoreEntitlements/CoreEntitlementsPriv.h>
194#include <kern/cs_blobs.h>
195#include <libkern/tree.h>
196#include <libkern/crypto/sha1.h>
197#include <libkern/crypto/sha2.h>
198#include <libkern/coretrust/coretrust.h>
199
200
201/* Validation data for a provisioning profile */
202typedef struct _pmap_cs_profile {
203 /*
204 * The PPL uses the physical aperture mapping to write to this structure. But
205 * we need to save a pointer to the original mapping for when we are going to
206 * unregister this profile from the PPL.
207 */
208 void *original_payload;
209
210 /* A CoreEntitlements context for querying the profile */
211 der_vm_context_t profile_ctx_storage;
212 const der_vm_context_t *profile_ctx;
213
214 /*
215 * Critical information regarding the profile. If a profile has not been verified,
216 * it cannot be associated with a code signature. Development profiles are only
217 * allowed under certain circumstances.
218 */
219 bool profile_validated;
220 bool development_profile;
221
222 /*
223 * Reference count for the number of code signatures which are currently using
224 * this provisioning profile for their constraint validation.
225 */
226 uint32_t reference_count;
227
228 /*
229 * The list of entitlements which are provisioned by this provisioning profile.
230 * If this list allows the debuggee entitlements, then this profile is considered
231 * a development profile.
232 */
233 struct CEQueryContext entitlements_ctx_storage;
234 struct CEQueryContext *entitlements_ctx;
235
236 /* Red-black tree linkage */
237 RB_ENTRY(_pmap_cs_profile) link;
238} pmap_cs_profile_t;
239
240/* This is how we expect the kernel to hand us provisioning profiles */
241typedef struct _pmap_profile_payload {
242 /* Storage for the provisioning profile */
243 pmap_cs_profile_t profile_obj_storage;
244
245 /* Size of the signed profile blob */
246 vm_size_t profile_blob_size;
247
248 /* The signed profile blob itself */
249 uint8_t profile_blob[0];
250} pmap_profile_payload_t;
251
252/* Trust levels are ordered, i.e. higher is more trust */
253typedef enum {
254 PMAP_CS_UNTRUSTED = 0,
255
256 /*
257 * Trust level given to code directory entries which have been retired and are
258 * no longer valid to be used for any purpose. These code directores are freed
259 * when their reference count touches 0.
260 */
261 PMAP_CS_RETIRED,
262
263 /*
264 * This trust level signifies that an application has been verified through the
265 * profile based certificate chain, but the profile in question itself has not
266 * been verified. Code directories with this trust aren't allowed to be run
267 * or mapped.
268 */
269 PMAP_CS_PROFILE_PREFLIGHT,
270
271 /*
272 * Signatures provided through the compilation service. These signatures are meant
273 * to only apply to loadable libraries, and therefore have the lowest acceptable trust.
274 */
275 PMAP_CS_COMPILATION_SERVICE,
276
277 /*
278 * Signature for out-of-process JIT. These can only be loaded by an entitled process
279 * and have a special library validation policy for being mapped within other processes.
280 * These represent a safer version of JIT.
281 */
282 PMAP_CS_OOP_JIT,
283
284 /*
285 * These signatures are those which are trusted because they have been signed by the
286 * device local signing key.
287 */
288 PMAP_CS_LOCAL_SIGNING,
289
290 /*
291 * These signatures belong to applications which are profile validated, and for those
292 * whose profiles have also been verified.
293 */
294 PMAP_CS_PROFILE_VALIDATED,
295
296 /*
297 * These signatures are those belonging to the app store.
298 */
299 PMAP_CS_APP_STORE,
300
301#if PMAP_CS_INCLUDE_INTERNAL_CODE
302 /*
303 * Engineering roots which are still Apple signed. These don't need to be platform
304 * because they are backed by a CMS signature and therefore would've never been
305 * platform anyways.
306 */
307 PMAP_CS_ENGINEERING_SIGNED_WITH_CMS,
308#endif
309
310 /*
311 * These signatures represent platform binaries which have the highest trust level.
312 */
313 PMAP_CS_IN_LOADED_TRUST_CACHE,
314 PMAP_CS_IN_STATIC_TRUST_CACHE,
315
316#if PMAP_CS_INCLUDE_INTERNAL_CODE
317 /*
318 * Engineering roots installed by engineers for development. These are given the
319 * highest trust level.
320 */
321 PMAP_CS_ENGINEERING_SIGNED,
322#endif
323} pmap_cs_trust_t;
324
325/* Everything with greater or equal trust is a platform binary */
326#define PMAP_CS_LOWEST_PLATFORM_BINARY_TRUST PMAP_CS_IN_LOADED_TRUST_CACHE
327
328/* Minimum trust level of a code signature to be run/mapped */
329#define PMAP_CS_LOWEST_ACCEPTABLE_TRUST PMAP_CS_COMPILATION_SERVICE
330
331typedef struct pmap_cs_code_directory {
332 union {
333 struct {
334 /* red-black tree linkage */
335 RB_ENTRY(pmap_cs_code_directory) link;
336
337 /*
338 * Blobs which are small enough are allocated and managed by the PPL. This field
339 * is NULL for large blobs.
340 */
341 struct pmap_cs_blob *managed_blob;
342 bool managed;
343
344 /*
345 * The superblob of the code signature. The length we store here is the length of the
346 * memory allocated by the kernel itself, which may be greater than the actual length
347 * of the code signature.
348 */
349 CS_SuperBlob *superblob;
350 vm_size_t superblob_size;
351 bool superblob_validated;
352
353 /*
354 * Code directories can be arbitrarily large, and hashing them can take a long time. We
355 * usually hash code directories in a continuable way, yielding our execution context
356 * after hashing some amount of the bytes.
357 */
358 union {
359 SHA384_CTX sha384_ctx;
360 SHA256_CTX sha256_ctx;
361 SHA1_CTX sha1_ctx;
362 };
363 uint32_t cd_length_hashed;
364
365 /*
366 * The best code directory is just an offset away from the superblob. This code directory
367 * is extensively validated for all of its fields.
368 */
369 const CS_CodeDirectory *cd;
370 bool cd_offset_matched;
371
372 /*
373 * The first code directory is used when validating the CMS blob attached to a code signature
374 * and is often not the best code directory.
375 */
376 bool first_cd_initialized;
377 bool first_cd_hashed;
378 uint8_t first_cdhash[CS_HASH_MAX_SIZE];
379 const uint8_t *first_cd;
380 size_t first_cd_length;
381 const uint8_t *cms_blob;
382 size_t cms_blob_length;
383 CoreTrustDigestType ct_digest_type;
384
385 /*
386 * Frequently accessed information from the code directory kept here as a cache.
387 */
388 const char *identifier;
389 const char *teamid;
390 bool main_binary;
391
392 /*
393 * The DER entitlements blob and CoreEntitlements context for querying this code
394 * signature for entitlements.
395 */
396 struct CEQueryContext core_entitlements_ctx;
397 struct CEQueryContext *ce_ctx;
398 const CS_GenericBlob *der_entitlements;
399 uint32_t der_entitlements_size;
400
401 /*
402 * This is parhaps the most important field in this structure. It signifies what
403 * level of confidence we have in this code directory and this trust level
404 * defines execution/mapping policies for this code directory.
405 */
406 pmap_cs_trust_t trust;
407
408 /*
409 * Reference count of how many regions this code directory is associated with through
410 * pmap_cs_associate.
411 */
412 uint32_t reference_count;
413
414 /*
415 * We maintain this field as it allows us to quickly index into a bucket of supported
416 * hash types, and choose the correct hashing algorithm for this code directory.
417 */
418 unsigned int hash_type;
419
420 /* Lock on this code directory */
421 decl_lck_rw_data(, rwlock);
422
423 /*
424 * The PPL may transform the code directory (e.g. for multilevel hashing),
425 * which changes its cdhash. We retain the cdhash of the original, canonical
426 * code directory here.
427 */
428 uint8_t cdhash[CS_CDHASH_LEN];
429
430 /*
431 * For performing provisioning profile validation in the PPL, we store the profile as
432 * PPL owned data so it cannot be changed during the validation time period.
433 *
434 * This interface for profile validation is deprecated.
435 */
436 struct {
437 /* The provisioning profile and its size */
438 const uint8_t *profile;
439 vm_size_t profile_size;
440
441 /* Size of memory allocated to hold the profile */
442 vm_size_t allocation_size;
443 } profile_data;
444
445 /*
446 * The provisioning profile object used for validating constrainst for profile validates
447 * signatures. This is the newer interface the PPL uses.
448 */
449 pmap_cs_profile_t *profile_obj;
450
451 /*
452 * The leaf certificate for CMS blobs as returned to us by CoreTrust. This is used when
453 * verifying a signature against a provisioning profile.
454 */
455 const uint8_t *cms_leaf;
456 vm_size_t cms_leaf_size;
457
458 /*
459 * A pointer to the entitlements structure maintained by the kernel. We don't really
460 * care about this other than maintaing a link to it in memory which isn't writable
461 * by the kernel.
462 */
463 const void *kernel_entitlements;
464
465 /*
466 * The UBC layer may request the PPL to unlock the unneeded part of the code signature.
467 * We hold this boolean to track whether we have unlocked those unneeded bits already or
468 * not.
469 */
470 bool unneeded_code_signature_unlocked;
471 };
472
473 /* Free list linkage */
474 struct pmap_cs_code_directory *pmap_cs_code_directory_next;
475 };
476} pmap_cs_code_directory_t;
477
478typedef struct pmap_cs_lookup_results {
479 /* Start of the code region */
480 vm_map_address_t region_addr;
481
482 /* Size of the code region */
483 vm_map_size_t region_size;
484
485 /* Code signature backing the code region */
486 struct pmap_cs_code_directory *region_sig;
487} pmap_cs_lookup_results_t;
488
489typedef struct _pmap_cs_ce_acceleration_buffer {
490 /* Magic to identify this structure */
491 uint16_t magic;
492
493 /*
494 * The acceleration buffer can come from one of two places. First, it can come
495 * from the extra space present within the locked down code signature as not
496 * all of it is used all the time. In this case, we don't need to free the
497 * buffer once we're done using it. Second, it can come from the bucket allocator
498 * within the PPL, in which case we need to deallocate this after we're done with
499 * it.
500 */
501 union {
502 uint16_t unused0;
503 bool allocated;
504 };
505
506 /* The length of the acceleration buffer */
507 uint32_t length;
508
509 /* The embedded buffer bytes */
510 uint8_t buffer[0];
511} __attribute__((packed)) pmap_cs_ce_acceleration_buffer_t;
512
513/* Ensure we have a known overhead here */
514_Static_assert(sizeof(pmap_cs_ce_acceleration_buffer_t) == 8,
515 "sizeof(pmap_cs_ce_acceleration_buffer_t) != 8");
516
517#define PMAP_CS_ACCELERATION_BUFFER_MAGIC (0x1337u)
518
519#define PMAP_CS_ASSOCIATE_JIT ((void *) -1)
520#define PMAP_CS_ASSOCIATE_COW ((void *) -2)
521#define PMAP_CS_LOCAL_SIGNING_KEY_SIZE 97
522
523/* Maximum blob sized managed by the PPL on its own */
524extern const size_t pmap_cs_blob_limit;
525
526/**
527 * Initialize the red-black tree and the locks for managing provisioning profiles within
528 * the PPL.
529 *
530 * This function doesn't trap into the PPL but writes to PPL protected data. Hence, this
531 * function needs to be called before the PPL is locked down, asn otherwise it will cause
532 * a system panic.
533 */
534void
535pmap_initialize_provisioning_profiles(void);
536
537/**
538 * Register a provisioning profile with the PPL. The payload address and size are both
539 * expected to be page aligned. The PPL will attempt to lockdown the address range before
540 * the profile validation.
541 *
542 * After validation, the profile will be added to an internal red-black tree, allowing
543 * the PPL to safely enumerate all registered profiles.
544 */
545kern_return_t
546pmap_register_provisioning_profile(
547 const vm_address_t payload_addr,
548 const vm_size_t payload_size);
549
550/**
551 * Unregister a provisioning profile from the PPL. The payload which was registered is
552 * unlocked, and the caller is free to do whatever they want with it. Unregistration is
553 * only successful when there are no reference counts on the profile object.
554 */
555kern_return_t
556pmap_unregister_provisioning_profile(
557 pmap_cs_profile_t *profile_obj);
558
559/**
560 * Associate a PPL profile object with a PPL code signature object. A code signature
561 * object can only have a single profile associated with it, and a successful association
562 * increments the reference count on the profile object.
563 */
564kern_return_t
565pmap_associate_provisioning_profile(
566 pmap_cs_code_directory_t *cd_entry,
567 pmap_cs_profile_t *profile_obj);
568
569/**
570 * Disassociate a PPL profile object from a PPL code signature object. Disassociation
571 * through this code path is only successful when the code signature object has been
572 * verified.
573 *
574 * This decrements the reference count on the profile object, potentially allowing it
575 * to be unregistered if the reference count hits zero.
576 */
577kern_return_t
578pmap_disassociate_provisioning_profile(
579 pmap_cs_code_directory_t *cd_entry);
580
581/**
582 * Store the compilation service CDHash within the PPL storage so that it may not be
583 * modified by an attacker. The CDHash being stored must represent a library and this
584 * is enforced during signature validation when a signature is trusted because it
585 * matched the compilation service CDHash.
586 */
587void
588pmap_set_compilation_service_cdhash(const uint8_t cdhash[CS_CDHASH_LEN]);
589
590/**
591 * Match a specified CDHash against the stored compilation service CDHash. The CDHash
592 * is protected with a lock, and that lock is held when the matching takes place in
593 * order to ensure we don't compare against a CDHash which is in the process of changing.
594 */
595bool
596pmap_match_compilation_service_cdhash(const uint8_t cdhash[CS_CDHASH_LEN]);
597
598/**
599 * Store the local signing public key in secured storage within the PPL. The PPL only
600 * allows setting a key once, and subsequent attempts to do this will panic the system.
601 *
602 * This key is used during CoreTrust validation of signatures during code signature
603 * verification.
604 */
605void
606pmap_set_local_signing_public_key(
607 const uint8_t public_key[PMAP_CS_LOCAL_SIGNING_KEY_SIZE]);
608
609/**
610 * Acquire the local signing public key which was previusly stored within the PPL. If
611 * there is no key stored in the PPL, then this function shall return NULL.
612 */
613uint8_t*
614pmap_get_local_signing_public_key(void);
615
616/**
617 * All locally signed main binaries need to be authorixed explicitly before they are
618 * allowed to run. As part of this, this API allows an application to register a CDHash
619 * for the main binary it is intending to run.
620 *
621 * Use of this API requires the appropriate entitlement.
622 */
623void
624pmap_unrestrict_local_signing(
625 const uint8_t cdhash[CS_CDHASH_LEN]);
626
627/**
628 * Register a code signature blob with the PPL. If the blob size is small enough, the
629 * PPL will copy the entire blob into its own allocated memory. On the other hand, if
630 * the blob is large, the PPL will attempt to lockdown the passed in blob, and doing
631 * so will require that the address and size provided are page aligned.
632 *
633 * After validation, the signature will be added to an internal red-black tree, allowing
634 * the PPL to safely enumerate all registered code signatures.
635 */
636kern_return_t
637pmap_cs_register_code_signature_blob(
638 vm_address_t blob_addr,
639 vm_size_t blob_size,
640 vm_offset_t code_directory_offset,
641 pmap_cs_code_directory_t **cd_entry);
642
643/**
644 * Unregister a code signature blob from the PPL. The signature address is either freed
645 * in case it was owned by the PPL, or it is unlocked in case it was XNU-owned by was PPL
646 * locked.
647 *
648 * If the memory is unlocked, then the kernel is free to do with the memory as it pleases.
649 * Note that this function may not deallocate the cd_entry itself, in case the cd_entry
650 * has any reference counts on it. In that case, the cd_entry is retired, and finally
651 * freed when the final code region which references the cd_entry is freed.
652 */
653kern_return_t
654pmap_cs_unregister_code_signature_blob(
655 pmap_cs_code_directory_t *cd_entry);
656
657/**
658 * Verify a signature within the PPL. Once a signature has been verified, it gets assigned
659 * a trust level, and based on that trust level, the cd_entry is then allowed to be
660 * associated with address spaces.
661 */
662kern_return_t
663pmap_cs_verify_code_signature_blob(
664 pmap_cs_code_directory_t *cd_entry);
665
666/**
667 * Once we've verified a code signature, not all blobs from the signature are required
668 * going forward. This function can be used to unlock parts of the code signature which
669 * can then be freed by the kernel to conserve memory.
670 */
671kern_return_t
672pmap_cs_unlock_unneeded_code_signature(
673 pmap_cs_code_directory_t *cd_entry,
674 vm_address_t *unneeded_addr,
675 vm_size_t *unneeded_size);
676
677/**
678 * Create an association of a cd_entry within a code region in the pmap. If the cd_entry
679 * is a main binary, then it is set as the main region of the pmap, otherwise the cd_entry
680 * is evaluated for a library validation policy against the main binary of the pmap.
681 */
682kern_return_t
683pmap_cs_associate(
684 pmap_t pmap,
685 pmap_cs_code_directory_t *cd_entry,
686 vm_map_address_t vaddr,
687 vm_map_size_t vsize,
688 vm_object_offset_t offset);
689
690/**
691 * Iterate through the code regions present in the SPLAY tree for checking if the specified
692 * address intersects with any code region or not.
693 */
694void
695pmap_cs_lookup(
696 pmap_t pmap,
697 vm_map_address_t vaddr,
698 pmap_cs_lookup_results_t *results);
699
700/**
701 * Let the PPL know that the associated pmap needs to be debugged and therefore it needs
702 * to allow invalid code to be mapped in. PPL shall only allow this when the pmap posseses
703 * the appropriate debuggee entitlement.
704 */
705kern_return_t
706pmap_cs_allow_invalid(pmap_t pmap);
707
708/**
709 * Acquire the trust level which is put onto a pmap based on the code signature associated
710 * with the main region. This function does NOT take a lock on the pmap and does not trap
711 * into the PPL.
712 */
713kern_return_t
714pmap_get_trust_level_kdp(
715 pmap_t pmap,
716 pmap_cs_trust_t *trust_level);
717
718/**
719 * Copy over the main binary association from the old address space to the new address
720 * space. This is required since a fork copies over all associations from one address space
721 * to another, and we need to make sure the main binary association is made before any
722 * libraries are mapped in.
723 */
724kern_return_t
725pmap_cs_fork_prepare(
726 pmap_t old_pmap,
727 pmap_t new_pmap);
728
729/**
730 * Keep a reference to the kernel entitlements data structure within the cd_entry in
731 * order to establish a read-only chain for the kernel to query in order to resolve the
732 * entitlements on an address space.
733 */
734kern_return_t
735pmap_associate_kernel_entitlements(
736 pmap_cs_code_directory_t *cd_entry,
737 const void *kernel_entitlements);
738
739/**
740 * Resolve the kernel entitlements object attached to the main binary of an address space
741 * and return it back to the kernel.
742 */
743kern_return_t
744pmap_resolve_kernel_entitlements(
745 pmap_t pmap,
746 const void **kernel_entitlements);
747
748/**
749 * Accelerate the CoreEntitlements context for a particular cd_entry. This operation can
750 * only be performed on reconstituted code signatures, and accelerates the context using
751 * memory which is locked by the PPL.
752 *
753 * If the code signature pages have enough space left within them, then that extra space
754 * is used for allocating the acceleration buffer, otherwise we tap into the allocator
755 * for it.
756 */
757kern_return_t
758pmap_accelerate_entitlements(
759 pmap_cs_code_directory_t *cd_entry);
760
761#endif /* PMAP_CS_INCLUDE_CODE_SIGNING */
762
763/**
764 * The PPl allocates some space for AppleImage4 to store some of its data. It needs to
765 * allocate this space since this region needs to be PPL protected, and the macro which
766 * makes a region PPL protected isn't available to kernel extensions.
767 *
768 * This function can be used to acquire the memory region which is PPL protected.
769 */
770void*
771pmap_image4_pmap_data(
772 size_t *allocated_size);
773
774/**
775 * Use the AppleImage4 API to set a nonce value based on a particular nonce index.
776 * AppleImage4 ensures that a particular nonce domain value can only be set once
777 * during the boot of the system.
778 */
779void
780pmap_image4_set_nonce(
781 const img4_nonce_domain_index_t ndi,
782 const img4_nonce_t *nonce);
783
784/**
785 * Use the AppleImage4 API to roll the nonce associated with a particular domain to
786 * make the nonce invalid.
787 */
788void
789pmap_image4_roll_nonce(
790 const img4_nonce_domain_index_t ndi);
791
792/**
793 * Use the AppleImage4 API to copy the nonce value associated with a particular domain.
794 *
795 * The PPL will attempt to "pin" the nonce_out parameter before writing to it.
796 */
797errno_t
798pmap_image4_copy_nonce(
799 const img4_nonce_domain_index_t ndi,
800 img4_nonce_t *nonce_out);
801
802/**
803 * Use the AppleImage4 API to perform object execution of a particular known object type.
804 *
805 * These are the supported object types:
806 * - IMG4_RUNTIME_OBJECT_SPEC_INDEX_SUPPLEMENTAL_ROOT
807 */
808errno_t
809pmap_image4_execute_object(
810 img4_runtime_object_spec_index_t obj_spec_index,
811 const img4_buff_t *payload,
812 const img4_buff_t *manifest);
813
814/**
815 * Use the AppleImage4 API to copy an executed objects contents into provided memroy.
816 *
817 * The PPL will attempt to "pin" the object_out parameter before writing to it.
818 */
819errno_t
820pmap_image4_copy_object(
821 img4_runtime_object_spec_index_t obj_spec_index,
822 vm_address_t object_out,
823 size_t *object_length);
824
825/**
826 * Entry point for the new AppleImage4 to enter the PPL monitor for it's variety of
827 * tasks.
828 */
829errno_t
830pmap_image4_monitor_trap(
831 image4_cs_trap_t selector,
832 const void *input_data,
833 size_t input_size);
834
835#endif /* XNU_KERNEL_PRIVATE */
836
837__END_DECLS
838
839#endif /* KERNEL_PRIVATE */
840#endif /* _VM_PMAP_CS_H_ */
841