1/*
2 * Copyright (c) 2021 Apple Computer, Inc. All rights reserved.
3 *
4 * @APPLE_LICENSE_HEADER_START@
5 *
6 * The contents of this file constitute Original Code as defined in and
7 * are subject to the Apple Public Source License Version 1.1 (the
8 * "License"). You may not use this file except in compliance with the
9 * License. Please obtain a copy of the License at
10 * http://www.apple.com/publicsource and read it before using this file.
11 *
12 * This Original Code and all software distributed under the License are
13 * distributed on an "AS IS" basis, WITHOUT WARRANTY OF ANY KIND, EITHER
14 * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES,
15 * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY,
16 * FITNESS FOR A PARTICULAR PURPOSE OR NON-INFRINGEMENT. Please see the
17 * License for the specific language governing rights and limitations
18 * under the License.
19 *
20 * @APPLE_LICENSE_HEADER_END@
21 */
22
23#include <os/overflow.h>
24#include <machine/atomic.h>
25#include <mach/vm_param.h>
26#include <vm/vm_kern.h>
27#include <vm/pmap.h>
28#include <vm/pmap_cs.h>
29#include <vm/vm_map.h>
30#include <kern/zalloc.h>
31#include <kern/kalloc.h>
32#include <kern/assert.h>
33#include <kern/locks.h>
34#include <kern/lock_rw.h>
35#include <libkern/libkern.h>
36#include <libkern/section_keywords.h>
37#include <libkern/coretrust/coretrust.h>
38#include <pexpert/pexpert.h>
39#include <sys/user.h>
40#include <sys/vm.h>
41#include <sys/proc.h>
42#include <sys/proc_require.h>
43#include <sys/codesign.h>
44#include <sys/code_signing.h>
45#include <sys/lockdown_mode.h>
46#include <sys/reason.h>
47#include <sys/kdebug_kernel.h>
48#include <sys/kdebug_triage.h>
49#include <sys/sysctl.h>
50#include <uuid/uuid.h>
51#include <IOKit/IOBSD.h>
52
53#if CONFIG_SPTM
54#include <sys/trusted_execution_monitor.h>
55#endif
56
57#if XNU_KERNEL_PRIVATE
58vm_address_t
59code_signing_allocate(
60 size_t alloc_size)
61{
62 vm_address_t alloc_addr = 0;
63
64 if (alloc_size == 0) {
65 panic("%s: zero allocation size", __FUNCTION__);
66 }
67 size_t aligned_size = round_page(x: alloc_size);
68
69 kern_return_t ret = kmem_alloc(
70 map: kernel_map,
71 addrp: &alloc_addr, size: aligned_size,
72 flags: KMA_KOBJECT | KMA_DATA | KMA_ZERO,
73 VM_KERN_MEMORY_SECURITY);
74
75 if (ret != KERN_SUCCESS) {
76 printf("%s: unable to allocate %lu bytes\n", __FUNCTION__, aligned_size);
77 } else if (alloc_addr == 0) {
78 printf("%s: invalid allocation\n", __FUNCTION__);
79 }
80
81 return alloc_addr;
82}
83
84void
85code_signing_deallocate(
86 vm_address_t *alloc_addr,
87 size_t alloc_size)
88{
89 if (alloc_addr == NULL) {
90 panic("%s: invalid pointer provided", __FUNCTION__);
91 } else if ((*alloc_addr == 0) || ((*alloc_addr & PAGE_MASK) != 0)) {
92 panic("%s: address provided: %p", __FUNCTION__, (void*)(*alloc_addr));
93 } else if (alloc_size == 0) {
94 panic("%s: zero allocation size", __FUNCTION__);
95 }
96 size_t aligned_size = round_page(x: alloc_size);
97
98 /* Free the allocation */
99 kmem_free(map: kernel_map, addr: *alloc_addr, size: aligned_size);
100
101 /* Clear the address */
102 *alloc_addr = 0;
103}
104#endif /* XNU_KERNEL_PRIVATE */
105
106SYSCTL_DECL(_security);
107SYSCTL_DECL(_security_codesigning);
108SYSCTL_NODE(_security, OID_AUTO, codesigning, CTLFLAG_RD, 0, "XNU Code Signing");
109
110static SECURITY_READ_ONLY_LATE(bool) cs_config_set = false;
111static SECURITY_READ_ONLY_LATE(code_signing_monitor_type_t) cs_monitor = CS_MONITOR_TYPE_NONE;
112static SECURITY_READ_ONLY_LATE(code_signing_config_t) cs_config = 0;
113
114SYSCTL_UINT(_security_codesigning, OID_AUTO, monitor, CTLFLAG_RD, &cs_monitor, 0, "code signing monitor type");
115SYSCTL_UINT(_security_codesigning, OID_AUTO, config, CTLFLAG_RD, &cs_config, 0, "code signing configuration");
116
117void
118code_signing_configuration(
119 code_signing_monitor_type_t *monitor_type_out,
120 code_signing_config_t *config_out)
121{
122 code_signing_monitor_type_t monitor_type = CS_MONITOR_TYPE_NONE;
123 code_signing_config_t config = 0;
124
125 /*
126 * Since we read this variable with load-acquire semantics, if we observe a value
127 * of true, it means we should be able to observe writes to cs_monitor and also
128 * cs_config.
129 */
130 if (os_atomic_load(&cs_config_set, acquire) == true) {
131 goto config_set;
132 }
133
134 /*
135 * Add support for all the code signing features. This function is called very
136 * early in the system boot, much before kernel extensions such as Apple Mobile
137 * File Integrity come online. As a result, this function assumes that all the
138 * code signing features are enabled, and later on, different components can
139 * disable support for different features using disable_code_signing_feature().
140 */
141 config |= CS_CONFIG_MAP_JIT;
142 config |= CS_CONFIG_DEVELOPER_MODE_SUPPORTED;
143 config |= CS_CONFIG_COMPILATION_SERVICE;
144 config |= CS_CONFIG_LOCAL_SIGNING;
145 config |= CS_CONFIG_OOP_JIT;
146
147#if CODE_SIGNING_MONITOR
148 /* Mark the code signing monitor as enabled if required */
149 if (csm_enabled() == true) {
150 config |= CS_CONFIG_CSM_ENABLED;
151 }
152
153#if CONFIG_SPTM
154 /*
155 * Since TrustedExecutionMonitor cannot call into any function within XNU, we
156 * query it's code signing configuration even before this function is called.
157 * Using that, we modify the state of the code signing features available.
158 */
159 if (csm_enabled() == true) {
160#if kTXMKernelAPIVersion >= 3
161 bool platform_code_only = txm_cs_config->systemPolicy->platformCodeOnly;
162#else
163 bool platform_code_only = txm_ro_data->platformCodeOnly;
164#endif
165
166 /* Disable unsupported features when enforcing platform-code-only */
167 if (platform_code_only == true) {
168 config &= ~CS_CONFIG_MAP_JIT;
169 config &= ~CS_CONFIG_COMPILATION_SERVICE;
170 config &= ~CS_CONFIG_LOCAL_SIGNING;
171 config &= ~CS_CONFIG_OOP_JIT;
172 }
173
174#if kTXMKernelAPIVersion >= 3
175 /* MAP_JIT support */
176 if (txm_cs_config->systemPolicy->featureSet.JIT == false) {
177 config &= ~CS_CONFIG_MAP_JIT;
178 }
179#endif
180
181 /* Developer mode support */
182 if (txm_cs_config->systemPolicy->featureSet.developerMode == false) {
183 config &= ~CS_CONFIG_DEVELOPER_MODE_SUPPORTED;
184 }
185
186 /* Compilation service support */
187 if (txm_cs_config->systemPolicy->featureSet.compilationService == false) {
188 config &= ~CS_CONFIG_COMPILATION_SERVICE;
189 }
190
191 /* Local signing support */
192 if (txm_cs_config->systemPolicy->featureSet.localSigning == false) {
193 config &= ~CS_CONFIG_LOCAL_SIGNING;
194 }
195
196 /* OOP-JIT support */
197 if (txm_cs_config->systemPolicy->featureSet.OOPJit == false) {
198 config &= ~CS_CONFIG_OOP_JIT;
199 }
200 }
201 monitor_type = CS_MONITOR_TYPE_TXM;
202#elif PMAP_CS_PPL_MONITOR
203 monitor_type = CS_MONITOR_TYPE_PPL;
204#endif /* CONFIG_SPTM */
205#endif /* CODE_SIGNING_MONITOR */
206
207#if DEVELOPMENT || DEBUG
208 /*
209 * We only ever need to parse for boot-args based exemption state on DEVELOPMENT
210 * or DEBUG builds as this state is not respected by any code signing component
211 * on RELEASE builds.
212 */
213
214#define CS_AMFI_MASK_UNRESTRICT_TASK_FOR_PID 0x01
215#define CS_AMFI_MASK_ALLOW_ANY_SIGNATURE 0x02
216#define CS_AMFI_MASK_GET_OUT_OF_MY_WAY 0x80
217
218 int amfi_mask = 0;
219 int amfi_allow_any_signature = 0;
220 int amfi_unrestrict_task_for_pid = 0;
221 int amfi_get_out_of_my_way = 0;
222 int cs_enforcement_disabled = 0;
223 int cs_integrity_skip = 0;
224
225 /* Parse the AMFI mask */
226 PE_parse_boot_argn("amfi", &amfi_mask, sizeof(amfi_mask));
227
228 /* Parse the AMFI soft-bypass */
229 PE_parse_boot_argn(
230 "amfi_allow_any_signature",
231 &amfi_allow_any_signature,
232 sizeof(amfi_allow_any_signature));
233
234 /* Parse the AMFI debug-bypass */
235 PE_parse_boot_argn(
236 "amfi_unrestrict_task_for_pid",
237 &amfi_unrestrict_task_for_pid,
238 sizeof(amfi_unrestrict_task_for_pid));
239
240 /* Parse the AMFI hard-bypass */
241 PE_parse_boot_argn(
242 "amfi_get_out_of_my_way",
243 &amfi_get_out_of_my_way,
244 sizeof(amfi_get_out_of_my_way));
245
246 /* Parse the system code signing hard-bypass */
247 PE_parse_boot_argn(
248 "cs_enforcement_disable",
249 &cs_enforcement_disabled,
250 sizeof(cs_enforcement_disabled));
251
252 /* Parse the system code signing integrity-check bypass */
253 PE_parse_boot_argn(
254 "cs_integrity_skip",
255 &cs_integrity_skip,
256 sizeof(cs_integrity_skip));
257
258 /* CS_CONFIG_UNRESTRICTED_DEBUGGING */
259 if (amfi_mask & CS_AMFI_MASK_UNRESTRICT_TASK_FOR_PID) {
260 config |= CS_CONFIG_UNRESTRICTED_DEBUGGING;
261 } else if (amfi_unrestrict_task_for_pid) {
262 config |= CS_CONFIG_UNRESTRICTED_DEBUGGING;
263 }
264
265 /* CS_CONFIG_ALLOW_ANY_SIGNATURE */
266 if (amfi_mask & CS_AMFI_MASK_ALLOW_ANY_SIGNATURE) {
267 config |= CS_CONFIG_ALLOW_ANY_SIGNATURE;
268 } else if (amfi_mask & CS_AMFI_MASK_GET_OUT_OF_MY_WAY) {
269 config |= CS_CONFIG_ALLOW_ANY_SIGNATURE;
270 } else if (amfi_allow_any_signature) {
271 config |= CS_CONFIG_ALLOW_ANY_SIGNATURE;
272 } else if (amfi_get_out_of_my_way) {
273 config |= CS_CONFIG_ALLOW_ANY_SIGNATURE;
274 } else if (cs_enforcement_disabled) {
275 config |= CS_CONFIG_ALLOW_ANY_SIGNATURE;
276 }
277
278 /* CS_CONFIG_ENFORCEMENT_DISABLED */
279 if (cs_enforcement_disabled) {
280 config |= CS_CONFIG_ENFORCEMENT_DISABLED;
281 }
282
283 /* CS_CONFIG_GET_OUT_OF_MY_WAY */
284 if (amfi_mask & CS_AMFI_MASK_GET_OUT_OF_MY_WAY) {
285 config |= CS_CONFIG_GET_OUT_OF_MY_WAY;
286 } else if (amfi_get_out_of_my_way) {
287 config |= CS_CONFIG_GET_OUT_OF_MY_WAY;
288 } else if (cs_enforcement_disabled) {
289 config |= CS_CONFIG_GET_OUT_OF_MY_WAY;
290 }
291
292 /* CS_CONFIG_INTEGRITY_SKIP */
293 if (cs_integrity_skip) {
294 config |= CS_CONFIG_INTEGRITY_SKIP;
295 }
296
297#if CONFIG_SPTM
298
299 if (csm_enabled() == true) {
300 /* allow_any_signature */
301 if (txm_cs_config->exemptions.allowAnySignature == false) {
302 config &= ~CS_CONFIG_ALLOW_ANY_SIGNATURE;
303 }
304
305 /* unrestrict_task_for_pid */
306 if (txm_ro_data && !txm_ro_data->exemptions.allowUnrestrictedDebugging) {
307 config &= ~CS_CONFIG_UNRESTRICTED_DEBUGGING;
308 }
309
310 /* cs_enforcement_disable */
311 if (txm_ro_data && !txm_ro_data->exemptions.allowModifiedCode) {
312 config &= ~CS_CONFIG_ENFORCEMENT_DISABLED;
313 }
314
315 /* get_out_of_my_way (skip_trust_evaluation) */
316 if (txm_cs_config->exemptions.skipTrustEvaluation == false) {
317 config &= ~CS_CONFIG_GET_OUT_OF_MY_WAY;
318 }
319 }
320
321#elif PMAP_CS_PPL_MONITOR
322
323 if (csm_enabled() == true) {
324 int pmap_cs_allow_any_signature = 0;
325 bool override = PE_parse_boot_argn(
326 "pmap_cs_allow_any_signature",
327 &pmap_cs_allow_any_signature,
328 sizeof(pmap_cs_allow_any_signature));
329
330 if (!pmap_cs_allow_any_signature && override) {
331 config &= ~CS_CONFIG_ALLOW_ANY_SIGNATURE;
332 }
333
334 int pmap_cs_unrestrict_task_for_pid = 0;
335 override = PE_parse_boot_argn(
336 "pmap_cs_unrestrict_pmap_cs_disable",
337 &pmap_cs_unrestrict_task_for_pid,
338 sizeof(pmap_cs_unrestrict_task_for_pid));
339
340 if (!pmap_cs_unrestrict_task_for_pid && override) {
341 config &= ~CS_CONFIG_UNRESTRICTED_DEBUGGING;
342 }
343
344 int pmap_cs_enforcement_disable = 0;
345 override = PE_parse_boot_argn(
346 "pmap_cs_allow_modified_code_pages",
347 &pmap_cs_enforcement_disable,
348 sizeof(pmap_cs_enforcement_disable));
349
350 if (!pmap_cs_enforcement_disable && override) {
351 config &= ~CS_CONFIG_ENFORCEMENT_DISABLED;
352 }
353 }
354
355#endif /* CONFIG_SPTM */
356#endif /* DEVELOPMENT || DEBUG */
357
358 os_atomic_store(&cs_monitor, monitor_type, relaxed);
359 os_atomic_store(&cs_config, config, relaxed);
360
361 /*
362 * We write the cs_config_set variable with store-release semantics which means
363 * no writes before this call will be re-ordered to after this call. Hence, if
364 * someone reads this variable with load-acquire semantics, and they observe a
365 * value of true, then they will be able to observe the correct values of the
366 * cs_monitor and the cs_config variables as well.
367 */
368 os_atomic_store(&cs_config_set, true, release);
369
370config_set:
371 /* Ensure configuration has been set */
372 assert(os_atomic_load(&cs_config_set, relaxed) == true);
373
374 /* Set the monitor type */
375 if (monitor_type_out) {
376 *monitor_type_out = os_atomic_load(&cs_monitor, relaxed);
377 }
378
379 /* Set the configuration */
380 if (config_out) {
381 *config_out = os_atomic_load(&cs_config, relaxed);
382 }
383}
384
385void
386disable_code_signing_feature(
387 code_signing_config_t feature)
388{
389 /*
390 * We require that this function be called only after the code signing config
391 * has been setup initially with a call to code_signing_configuration.
392 */
393 if (os_atomic_load(&cs_config_set, acquire) == false) {
394 panic("attempted to disable code signing feature without init: %u", feature);
395 }
396
397 /*
398 * We require that only a single feature be disabled through a single call to this
399 * function. Moreover, we ensure that only valid features are being disabled.
400 */
401 switch (feature) {
402 case CS_CONFIG_DEVELOPER_MODE_SUPPORTED:
403 cs_config &= ~CS_CONFIG_DEVELOPER_MODE_SUPPORTED;
404 break;
405
406 case CS_CONFIG_COMPILATION_SERVICE:
407 cs_config &= ~CS_CONFIG_COMPILATION_SERVICE;
408 break;
409
410 case CS_CONFIG_LOCAL_SIGNING:
411 cs_config &= ~CS_CONFIG_LOCAL_SIGNING;
412 break;
413
414 case CS_CONFIG_OOP_JIT:
415 cs_config &= ~CS_CONFIG_OOP_JIT;
416 break;
417
418 case CS_CONFIG_MAP_JIT:
419 cs_config &= ~CS_CONFIG_MAP_JIT;
420 break;
421
422 default:
423 panic("attempted to disable a code signing feature invalidly: %u", feature);
424 }
425
426 /* Ensure all readers can observe the latest data */
427#if defined(__arm64__)
428 __asm__ volatile ("dmb ish" ::: "memory");
429#elif defined(__x86_64__)
430 __asm__ volatile ("mfence" ::: "memory");
431#else
432#error "Unknown platform -- fence instruction unavailable"
433#endif
434}
435
436#pragma mark Developer Mode
437
438void
439enable_developer_mode(void)
440{
441 CSM_PREFIX(toggle_developer_mode)(true);
442}
443
444void
445disable_developer_mode(void)
446{
447 CSM_PREFIX(toggle_developer_mode)(false);
448}
449
450bool
451developer_mode_state(void)
452{
453 /* Assume false if the pointer isn't setup */
454 if (developer_mode_enabled == NULL) {
455 return false;
456 }
457
458 return os_atomic_load(developer_mode_enabled, relaxed);
459}
460
461#pragma mark Provisioning Profiles
462/*
463 * AMFI performs full profile validation by itself. XNU only needs to manage provisioning
464 * profiles when we have a monitor since the monitor needs to independently verify the
465 * profile data as well.
466 */
467
468void
469garbage_collect_provisioning_profiles(void)
470{
471#if CODE_SIGNING_MONITOR
472 csm_free_provisioning_profiles();
473#endif
474}
475
476#if CODE_SIGNING_MONITOR
477
478/* Structure used to maintain the set of registered profiles on the system */
479typedef struct _cs_profile {
480 /* The UUID of the registered profile */
481 uuid_t profile_uuid;
482
483 /* The profile validation object from the monitor */
484 void *profile_obj;
485
486 /*
487 * In order to minimize the number of times the same profile would need to be
488 * registered, we allow frequently used profiles to skip the garbage collector
489 * for one pass.
490 */
491 bool skip_collector;
492
493 /* Linked list linkage */
494 SLIST_ENTRY(_cs_profile) link;
495} cs_profile_t;
496
497/* Linked list head for registered profiles */
498static SLIST_HEAD(, _cs_profile) all_profiles = SLIST_HEAD_INITIALIZER(all_profiles);
499
500/* Lock for the provisioning profiles */
501LCK_GRP_DECLARE(profiles_lck_grp, "profiles_lck_grp");
502decl_lck_rw_data(, profiles_lock);
503
504void
505csm_initialize_provisioning_profiles(void)
506{
507 /* Ensure the CoreTrust kernel extension has loaded */
508 if (coretrust == NULL) {
509 panic("coretrust interface not available");
510 }
511
512 /* Initialize the provisoning profiles lock */
513 lck_rw_init(&profiles_lock, &profiles_lck_grp, 0);
514 printf("initialized XNU provisioning profile data\n");
515
516#if PMAP_CS_PPL_MONITOR
517 pmap_initialize_provisioning_profiles();
518#endif
519}
520
521static cs_profile_t*
522search_for_profile_uuid(
523 const uuid_t profile_uuid)
524{
525 cs_profile_t *profile = NULL;
526
527 /* Caller is required to acquire the lock */
528 lck_rw_assert(&profiles_lock, LCK_RW_ASSERT_HELD);
529
530 SLIST_FOREACH(profile, &all_profiles, link) {
531 if (uuid_compare(profile_uuid, profile->profile_uuid) == 0) {
532 return profile;
533 }
534 }
535
536 return NULL;
537}
538
539kern_return_t
540csm_register_provisioning_profile(
541 const uuid_t profile_uuid,
542 const void *profile_blob,
543 const size_t profile_blob_size)
544{
545 cs_profile_t *profile = NULL;
546 void *monitor_profile_obj = NULL;
547 kern_return_t ret = KERN_DENIED;
548
549 /* Allocate storage for the profile wrapper object */
550 profile = kalloc_type(cs_profile_t, Z_WAITOK_ZERO);
551 assert(profile != NULL);
552
553 /* Lock the profile set exclusively */
554 lck_rw_lock_exclusive(&profiles_lock);
555
556 /* Check to make sure this isn't a duplicate UUID */
557 cs_profile_t *dup_profile = search_for_profile_uuid(profile_uuid);
558 if (dup_profile != NULL) {
559 /* This profile might be used soon -- skip garbage collector */
560 dup_profile->skip_collector = true;
561
562 ret = KERN_ALREADY_IN_SET;
563 goto exit;
564 }
565
566 ret = CSM_PREFIX(register_provisioning_profile)(
567 profile_blob,
568 profile_blob_size,
569 &monitor_profile_obj);
570
571 if (ret == KERN_SUCCESS) {
572 /* Copy in the profile UUID */
573 uuid_copy(profile->profile_uuid, profile_uuid);
574
575 /* Setup the monitor's profile object */
576 profile->profile_obj = monitor_profile_obj;
577
578 /* This profile might be used soon -- skip garbage collector */
579 profile->skip_collector = true;
580
581 /* Insert at the head of the profile set */
582 SLIST_INSERT_HEAD(&all_profiles, profile, link);
583 }
584
585exit:
586 /* Unlock the profile set */
587 lck_rw_unlock_exclusive(&profiles_lock);
588
589 if (ret != KERN_SUCCESS) {
590 /* Free the profile wrapper object */
591 kfree_type(cs_profile_t, profile);
592 profile = NULL;
593
594 if (ret != KERN_ALREADY_IN_SET) {
595 printf("unable to register profile with monitor: %d\n", ret);
596 }
597 }
598
599 return ret;
600}
601
602kern_return_t
603csm_associate_provisioning_profile(
604 void *monitor_sig_obj,
605 const uuid_t profile_uuid)
606{
607 cs_profile_t *profile = NULL;
608 kern_return_t ret = KERN_DENIED;
609
610 if (csm_enabled() == false) {
611 return KERN_NOT_SUPPORTED;
612 }
613
614 /* Lock the profile set as shared */
615 lck_rw_lock_shared(&profiles_lock);
616
617 /* Search for the provisioning profile */
618 profile = search_for_profile_uuid(profile_uuid);
619 if (profile == NULL) {
620 ret = KERN_NOT_FOUND;
621 goto exit;
622 }
623
624 ret = CSM_PREFIX(associate_provisioning_profile)(
625 monitor_sig_obj,
626 profile->profile_obj);
627
628 if (ret == KERN_SUCCESS) {
629 /*
630 * This seems like an active profile -- let it skip the garbage collector on
631 * the next pass. We can modify this field even though we've only taken a shared
632 * lock as in this case we're always setting it to a fixed value.
633 */
634 profile->skip_collector = true;
635 }
636
637exit:
638 /* Unlock the profile set */
639 lck_rw_unlock_shared(&profiles_lock);
640
641 if (ret != KERN_SUCCESS) {
642 printf("unable to associate profile: %d\n", ret);
643 }
644 return ret;
645}
646
647kern_return_t
648csm_disassociate_provisioning_profile(
649 void *monitor_sig_obj)
650{
651 kern_return_t ret = KERN_DENIED;
652
653 if (csm_enabled() == false) {
654 return KERN_NOT_SUPPORTED;
655 }
656
657 /* Call out to the monitor */
658 ret = CSM_PREFIX(disassociate_provisioning_profile)(monitor_sig_obj);
659
660 if ((ret != KERN_SUCCESS) && (ret != KERN_NOT_FOUND)) {
661 printf("unable to disassociate profile: %d\n", ret);
662 }
663 return ret;
664}
665
666static kern_return_t
667unregister_provisioning_profile(
668 cs_profile_t *profile)
669{
670 kern_return_t ret = KERN_DENIED;
671
672 /* Call out to the monitor */
673 ret = CSM_PREFIX(unregister_provisioning_profile)(profile->profile_obj);
674
675 /*
676 * KERN_FAILURE represents the case when the unregistration failed because the
677 * monitor noted that the profile was still being used. Other than that, there
678 * is no other error expected out of this interface. In fact, there is no easy
679 * way to deal with other errors, as the profile state may be corrupted. If we
680 * see a different error, then we panic.
681 */
682 if ((ret != KERN_SUCCESS) && (ret != KERN_FAILURE)) {
683 panic("unable to unregister profile from monitor: %d | %p\n", ret, profile);
684 }
685
686 return ret;
687}
688
689void
690csm_free_provisioning_profiles(void)
691{
692 kern_return_t ret = KERN_DENIED;
693 cs_profile_t *profile = NULL;
694 cs_profile_t *temp_profile = NULL;
695
696 /* Lock the profile set exclusively */
697 lck_rw_lock_exclusive(&profiles_lock);
698
699 SLIST_FOREACH_SAFE(profile, &all_profiles, link, temp_profile) {
700 if (profile->skip_collector == true) {
701 profile->skip_collector = false;
702 continue;
703 }
704
705 /* Attempt to unregister this profile from the system */
706 ret = unregister_provisioning_profile(profile);
707 if (ret == KERN_SUCCESS) {
708 /* Remove the profile from the profile set */
709 SLIST_REMOVE(&all_profiles, profile, _cs_profile, link);
710
711 /* Free the memory consumed for the profile wrapper object */
712 kfree_type(cs_profile_t, profile);
713 profile = NULL;
714 }
715 }
716
717 /* Unlock the profile set */
718 lck_rw_unlock_exclusive(&profiles_lock);
719}
720
721#endif /* CODE_SIGNING_MONITOR */
722
723#pragma mark Code Signing
724/*
725 * AMFI performs full signature validation by itself. For some things, AMFI uses XNU in
726 * order to abstract away the underlying implementation for data storage, but for most of
727 * these, AMFI doesn't directly interact with them, and they're only required when we have
728 * a code signing monitor on the system.
729 */
730
731void
732set_compilation_service_cdhash(
733 const uint8_t cdhash[CS_CDHASH_LEN])
734{
735 CSM_PREFIX(set_compilation_service_cdhash)(cdhash);
736}
737
738bool
739match_compilation_service_cdhash(
740 const uint8_t cdhash[CS_CDHASH_LEN])
741{
742 return CSM_PREFIX(match_compilation_service_cdhash)(cdhash);
743}
744
745void
746set_local_signing_public_key(
747 const uint8_t public_key[XNU_LOCAL_SIGNING_KEY_SIZE])
748{
749 CSM_PREFIX(set_local_signing_public_key)(public_key);
750}
751
752uint8_t*
753get_local_signing_public_key(void)
754{
755 return CSM_PREFIX(get_local_signing_public_key)();
756}
757
758void
759unrestrict_local_signing_cdhash(
760 __unused const uint8_t cdhash[CS_CDHASH_LEN])
761{
762 /*
763 * Since AMFI manages code signing on its own, we only need to unrestrict the
764 * local signing cdhash when we have a monitor environment.
765 */
766
767#if CODE_SIGNING_MONITOR
768 CSM_PREFIX(unrestrict_local_signing_cdhash)(cdhash);
769#endif
770}
771
772kern_return_t
773get_trust_level_kdp(
774 __unused pmap_t pmap,
775 __unused uint32_t *trust_level)
776{
777#if CODE_SIGNING_MONITOR
778 return csm_get_trust_level_kdp(pmap, trust_level);
779#else
780 return KERN_NOT_SUPPORTED;
781#endif
782}
783
784kern_return_t
785csm_resolve_os_entitlements_from_proc(
786 __unused const proc_t process,
787 __unused const void **os_entitlements)
788{
789#if CODE_SIGNING_MONITOR
790 task_t task = NULL;
791 vm_map_t task_map = NULL;
792 pmap_t task_pmap = NULL;
793 kern_return_t ret = KERN_DENIED;
794
795 if (csm_enabled() == false) {
796 return KERN_NOT_SUPPORTED;
797 }
798
799 /* Ensure the process comes from the proc_task zone */
800 proc_require(process, PROC_REQUIRE_ALLOW_ALL);
801
802 /* Acquire the task from the proc */
803 task = proc_task(process);
804 if (task == NULL) {
805 return KERN_NOT_FOUND;
806 }
807
808 /* Acquire the virtual memory map from the task -- takes a reference on it */
809 task_map = get_task_map_reference(task);
810 if (task_map == NULL) {
811 return KERN_NOT_FOUND;
812 }
813
814 /* Acquire the pmap from the virtual memory map */
815 task_pmap = vm_map_get_pmap(task_map);
816 assert(task_pmap != NULL);
817
818 /* Call into the monitor to resolve the entitlements */
819 ret = CSM_PREFIX(resolve_kernel_entitlements)(task_pmap, os_entitlements);
820
821 /* Release the reference on the virtual memory map */
822 vm_map_deallocate(task_map);
823
824 return ret;
825#else
826 return KERN_NOT_SUPPORTED;
827#endif
828}
829
830kern_return_t
831address_space_debugged(
832 const proc_t process)
833{
834 /* Must pass in a valid proc_t */
835 if (process == NULL) {
836 printf("%s: provided a NULL process\n", __FUNCTION__);
837 return KERN_DENIED;
838 }
839 proc_require(proc: process, flags: PROC_REQUIRE_ALLOW_ALL);
840
841 /* Developer mode must always be enabled for this to return successfully */
842 if (developer_mode_state() == false) {
843 return KERN_DENIED;
844 }
845
846#if CODE_SIGNING_MONITOR
847 task_t task = NULL;
848 vm_map_t task_map = NULL;
849 pmap_t task_pmap = NULL;
850
851 if (csm_enabled() == true) {
852 /* Acquire the task from the proc */
853 task = proc_task(process);
854 if (task == NULL) {
855 return KERN_NOT_FOUND;
856 }
857
858 /* Acquire the virtual memory map from the task -- takes a reference on it */
859 task_map = get_task_map_reference(task);
860 if (task_map == NULL) {
861 return KERN_NOT_FOUND;
862 }
863
864 /* Acquire the pmap from the virtual memory map */
865 task_pmap = vm_map_get_pmap(task_map);
866 assert(task_pmap != NULL);
867
868 /* Acquire the state from the monitor */
869 kern_return_t ret = CSM_PREFIX(address_space_debugged)(task_pmap);
870
871 /* Release the reference on the virtual memory map */
872 vm_map_deallocate(task_map);
873
874 return ret;
875 }
876#endif /* CODE_SIGNING_MONITOR */
877
878 /* Check read-only process flags for state */
879 if (proc_getcsflags(process) & CS_DEBUGGED) {
880 return KERN_SUCCESS;
881 }
882
883 return KERN_DENIED;
884}
885
886#if CODE_SIGNING_MONITOR
887
888bool
889csm_enabled(void)
890{
891 return CSM_PREFIX(code_signing_enabled)();
892}
893
894vm_size_t
895csm_signature_size_limit(void)
896{
897 return CSM_PREFIX(managed_code_signature_size)();
898}
899
900void
901csm_check_lockdown_mode(void)
902{
903 if (get_lockdown_mode_state() == 0) {
904 return;
905 }
906
907 /* Inform the code signing monitor about lockdown mode */
908 CSM_PREFIX(enter_lockdown_mode)();
909
910#if CONFIG_SPTM
911#if kTXMKernelAPIVersion >= 3
912 /* MAP_JIT lockdown */
913 if (txm_cs_config->systemPolicy->featureSet.JIT == false) {
914 disable_code_signing_feature(CS_CONFIG_MAP_JIT);
915 }
916#endif
917
918 /* Compilation service lockdown */
919 if (txm_cs_config->systemPolicy->featureSet.compilationService == false) {
920 disable_code_signing_feature(CS_CONFIG_COMPILATION_SERVICE);
921 }
922
923 /* Local signing lockdown */
924 if (txm_cs_config->systemPolicy->featureSet.localSigning == false) {
925 disable_code_signing_feature(CS_CONFIG_LOCAL_SIGNING);
926 }
927
928 /* OOP-JIT lockdown */
929 if (txm_cs_config->systemPolicy->featureSet.OOPJit == false) {
930 disable_code_signing_feature(CS_CONFIG_OOP_JIT);
931 }
932#else
933 /*
934 * Lockdown mode is supposed to disable all forms of JIT on the system. For now,
935 * we leave JIT enabled by default until some blockers are resolved. The way this
936 * code is written, we don't need to change anything once we enforce MAP_JIT to
937 * be disabled for lockdown mode.
938 */
939 if (ppl_lockdown_mode_enforce_jit == true) {
940 disable_code_signing_feature(CS_CONFIG_MAP_JIT);
941 }
942 disable_code_signing_feature(CS_CONFIG_OOP_JIT);
943 disable_code_signing_feature(CS_CONFIG_LOCAL_SIGNING);
944 disable_code_signing_feature(CS_CONFIG_COMPILATION_SERVICE);
945#endif /* CONFIG_SPTM */
946}
947
948void
949csm_code_signing_violation(
950 proc_t proc,
951 vm_offset_t addr)
952{
953 os_reason_t kill_reason = OS_REASON_NULL;
954
955 /* No enforcement if code-signing-monitor is disabled */
956 if (csm_enabled() == false) {
957 return;
958 } else if (proc == PROC_NULL) {
959 panic("code-signing violation without a valid proc");
960 }
961
962 /*
963 * If the address space is being debugged, then we expect this task to undergo
964 * some code signing violations. In this case, we return without killing the
965 * task.
966 */
967 if (address_space_debugged(proc) == KERN_SUCCESS) {
968 return;
969 }
970
971 /* Leave a ktriage record */
972 ktriage_record(
973 thread_tid(current_thread()),
974 KDBG_TRIAGE_EVENTID(KDBG_TRIAGE_SUBSYS_VM, KDBG_TRIAGE_RESERVED, KDBG_TRIAGE_VM_CODE_SIGNING),
975 0);
976
977 /* Leave a log for triage purposes */
978 printf("[%s: killed] code-signing-violation at %p\n", proc_best_name(proc), (void*)addr);
979
980 /*
981 * For now, the only input into this function is from current_proc(), so using current_thread()
982 * over here is alright. If this function ever gets called from another location, we need to
983 * then change where we get the user thread from.
984 */
985 assert(proc == current_proc());
986
987 /*
988 * Create a reason for the SIGKILL and set it to allow generating crash reports,
989 * which is critical for better triaging these issues. set_thread_exit_reason will
990 * consume the kill_reason, so we don't have to free it.
991 */
992 kill_reason = os_reason_create(OS_REASON_CODESIGNING, CODESIGNING_EXIT_REASON_INVALID_PAGE);
993 if (kill_reason != NULL) {
994 kill_reason->osr_flags |= OS_REASON_FLAG_GENERATE_CRASH_REPORT;
995 }
996 set_thread_exit_reason(current_thread(), kill_reason, false);
997
998 /* Send a SIGKILL to the thread */
999 threadsignal(current_thread(), SIGKILL, EXC_BAD_ACCESS, false);
1000}
1001
1002kern_return_t
1003csm_register_code_signature(
1004 const vm_address_t signature_addr,
1005 const vm_size_t signature_size,
1006 const vm_offset_t code_directory_offset,
1007 const char *signature_path,
1008 void **monitor_sig_obj,
1009 vm_address_t *monitor_signature_addr)
1010{
1011 if (csm_enabled() == false) {
1012 return KERN_NOT_SUPPORTED;
1013 }
1014
1015 return CSM_PREFIX(register_code_signature)(
1016 signature_addr,
1017 signature_size,
1018 code_directory_offset,
1019 signature_path,
1020 monitor_sig_obj,
1021 monitor_signature_addr);
1022}
1023
1024kern_return_t
1025csm_unregister_code_signature(
1026 void *monitor_sig_obj)
1027{
1028 if (csm_enabled() == false) {
1029 return KERN_NOT_SUPPORTED;
1030 }
1031
1032 return CSM_PREFIX(unregister_code_signature)(monitor_sig_obj);
1033}
1034
1035kern_return_t
1036csm_verify_code_signature(
1037 void *monitor_sig_obj)
1038{
1039 if (csm_enabled() == false) {
1040 return KERN_NOT_SUPPORTED;
1041 }
1042
1043 return CSM_PREFIX(verify_code_signature)(monitor_sig_obj);
1044}
1045
1046kern_return_t
1047csm_reconstitute_code_signature(
1048 void *monitor_sig_obj,
1049 vm_address_t *unneeded_addr,
1050 vm_size_t *unneeded_size)
1051{
1052 if (csm_enabled() == false) {
1053 return KERN_NOT_SUPPORTED;
1054 }
1055
1056 return CSM_PREFIX(reconstitute_code_signature)(
1057 monitor_sig_obj,
1058 unneeded_addr,
1059 unneeded_size);
1060}
1061
1062kern_return_t
1063csm_associate_code_signature(
1064 pmap_t monitor_pmap,
1065 void *monitor_sig_obj,
1066 const vm_address_t region_addr,
1067 const vm_size_t region_size,
1068 const vm_offset_t region_offset)
1069{
1070 if (csm_enabled() == false) {
1071 return KERN_NOT_SUPPORTED;
1072 }
1073
1074 return CSM_PREFIX(associate_code_signature)(
1075 monitor_pmap,
1076 monitor_sig_obj,
1077 region_addr,
1078 region_size,
1079 region_offset);
1080}
1081
1082kern_return_t
1083csm_allow_jit_region(
1084 pmap_t monitor_pmap)
1085{
1086 if (csm_enabled() == false) {
1087 return KERN_SUCCESS;
1088 } else if (monitor_pmap == NULL) {
1089 return KERN_DENIED;
1090 }
1091
1092 kern_return_t ret = CSM_PREFIX(allow_jit_region)(monitor_pmap);
1093 if (ret == KERN_NOT_SUPPORTED) {
1094 /*
1095 * Some monitor environments do not support this API and as a result will
1096 * return KERN_NOT_SUPPORTED. The caller here should not interpret that as
1097 * a failure.
1098 */
1099 ret = KERN_SUCCESS;
1100 }
1101
1102 return ret;
1103}
1104
1105kern_return_t
1106csm_associate_jit_region(
1107 pmap_t monitor_pmap,
1108 const vm_address_t region_addr,
1109 const vm_size_t region_size)
1110{
1111 if (csm_enabled() == false) {
1112 return KERN_NOT_SUPPORTED;
1113 }
1114
1115 return CSM_PREFIX(associate_jit_region)(
1116 monitor_pmap,
1117 region_addr,
1118 region_size);
1119}
1120
1121kern_return_t
1122csm_associate_debug_region(
1123 pmap_t monitor_pmap,
1124 const vm_address_t region_addr,
1125 const vm_size_t region_size)
1126{
1127 if (csm_enabled() == false) {
1128 return KERN_NOT_SUPPORTED;
1129 }
1130
1131 return CSM_PREFIX(associate_debug_region)(
1132 monitor_pmap,
1133 region_addr,
1134 region_size);
1135}
1136
1137kern_return_t
1138csm_allow_invalid_code(
1139 pmap_t pmap)
1140{
1141 if (csm_enabled() == false) {
1142 return KERN_NOT_SUPPORTED;
1143 }
1144
1145 return CSM_PREFIX(allow_invalid_code)(pmap);
1146}
1147
1148kern_return_t
1149csm_get_trust_level_kdp(
1150 pmap_t pmap,
1151 uint32_t *trust_level)
1152{
1153 if (csm_enabled() == false) {
1154 return KERN_NOT_SUPPORTED;
1155 }
1156
1157 return CSM_PREFIX(get_trust_level_kdp)(pmap, trust_level);
1158}
1159
1160kern_return_t
1161csm_address_space_exempt(
1162 const pmap_t pmap)
1163{
1164 /*
1165 * These exemptions are actually orthogonal to the code signing enforcement. As
1166 * a result, we let each monitor explicitly decide how to deal with the exemption
1167 * in case code signing enforcement is disabled.
1168 */
1169
1170 return CSM_PREFIX(address_space_exempt)(pmap);
1171}
1172
1173kern_return_t
1174csm_fork_prepare(
1175 pmap_t old_pmap,
1176 pmap_t new_pmap)
1177{
1178 if (csm_enabled() == false) {
1179 return KERN_NOT_SUPPORTED;
1180 }
1181
1182 return CSM_PREFIX(fork_prepare)(old_pmap, new_pmap);
1183}
1184
1185kern_return_t
1186csm_acquire_signing_identifier(
1187 const void *monitor_sig_obj,
1188 const char **signing_id)
1189{
1190 if (csm_enabled() == false) {
1191 return KERN_NOT_SUPPORTED;
1192 }
1193
1194 return CSM_PREFIX(acquire_signing_identifier)(monitor_sig_obj, signing_id);
1195}
1196
1197kern_return_t
1198csm_associate_os_entitlements(
1199 void *monitor_sig_obj,
1200 const void *os_entitlements)
1201{
1202 if (csm_enabled() == false) {
1203 return KERN_NOT_SUPPORTED;
1204 } else if (os_entitlements == NULL) {
1205 /* Not every signature has entitlements */
1206 return KERN_SUCCESS;
1207 }
1208
1209 return CSM_PREFIX(associate_kernel_entitlements)(monitor_sig_obj, os_entitlements);
1210}
1211
1212kern_return_t
1213csm_accelerate_entitlements(
1214 void *monitor_sig_obj,
1215 CEQueryContext_t *ce_ctx)
1216{
1217 if (csm_enabled() == false) {
1218 return KERN_NOT_SUPPORTED;
1219 }
1220
1221 return CSM_PREFIX(accelerate_entitlements)(monitor_sig_obj, ce_ctx);
1222}
1223
1224#endif /* CODE_SIGNING_MONITOR */
1225
1226#pragma mark AppleImage4
1227/*
1228 * AppleImage4 uses the monitor environment to safeguard critical security data.
1229 * In order to ease the implementation specific, AppleImage4 always depends on these
1230 * abstracted APIs, regardless of whether the system has a monitor environment or
1231 * not.
1232 */
1233
1234void*
1235kernel_image4_storage_data(
1236 size_t *allocated_size)
1237{
1238 return CSM_PREFIX(image4_storage_data)(allocated_size);
1239}
1240
1241void
1242kernel_image4_set_nonce(
1243 const img4_nonce_domain_index_t ndi,
1244 const img4_nonce_t *nonce)
1245{
1246 return CSM_PREFIX(image4_set_nonce)(ndi, nonce);
1247}
1248
1249void
1250kernel_image4_roll_nonce(
1251 const img4_nonce_domain_index_t ndi)
1252{
1253 return CSM_PREFIX(image4_roll_nonce)(ndi);
1254}
1255
1256errno_t
1257kernel_image4_copy_nonce(
1258 const img4_nonce_domain_index_t ndi,
1259 img4_nonce_t *nonce_out)
1260{
1261 return CSM_PREFIX(image4_copy_nonce)(ndi, nonce_out);
1262}
1263
1264errno_t
1265kernel_image4_execute_object(
1266 img4_runtime_object_spec_index_t obj_spec_index,
1267 const img4_buff_t *payload,
1268 const img4_buff_t *manifest)
1269{
1270 return CSM_PREFIX(image4_execute_object)(
1271 obj_spec_index,
1272 payload,
1273 manifest);
1274}
1275
1276errno_t
1277kernel_image4_copy_object(
1278 img4_runtime_object_spec_index_t obj_spec_index,
1279 vm_address_t object_out,
1280 size_t *object_length)
1281{
1282 return CSM_PREFIX(image4_copy_object)(
1283 obj_spec_index,
1284 object_out,
1285 object_length);
1286}
1287
1288const void*
1289kernel_image4_get_monitor_exports(void)
1290{
1291 return CSM_PREFIX(image4_get_monitor_exports)();
1292}
1293
1294errno_t
1295kernel_image4_set_release_type(
1296 const char *release_type)
1297{
1298 return CSM_PREFIX(image4_set_release_type)(release_type);
1299}
1300
1301errno_t
1302kernel_image4_set_bnch_shadow(
1303 const img4_nonce_domain_index_t ndi)
1304{
1305 return CSM_PREFIX(image4_set_bnch_shadow)(ndi);
1306}
1307
1308#pragma mark Image4 - New
1309
1310
1311
1312static errno_t
1313_kernel_image4_monitor_trap_image_activate(
1314 image4_cs_trap_t selector,
1315 const void *input_data)
1316{
1317 /*
1318 * csmx_payload (csmx_payload_len) --> __cs_xfer
1319 * csmx_manifest (csmx_manifest_len) --> __cs_borrow
1320 */
1321 image4_cs_trap_argv(image_activate) input = {0};
1322 vm_address_t payload_addr = 0;
1323 vm_address_t manifest_addr = 0;
1324 errno_t err = EPERM;
1325
1326 /* Copy the input data */
1327 memcpy(dst: &input, src: input_data, n: sizeof(input));
1328
1329 payload_addr = code_signing_allocate(alloc_size: input.csmx_payload_len);
1330 if (payload_addr == 0) {
1331 goto out;
1332 }
1333 memcpy(dst: (void*)payload_addr, src: (void*)input.csmx_payload, n: input.csmx_payload_len);
1334
1335 manifest_addr = code_signing_allocate(alloc_size: input.csmx_manifest_len);
1336 if (manifest_addr == 0) {
1337 goto out;
1338 }
1339 memcpy(dst: (void*)manifest_addr, src: (void*)input.csmx_manifest, n: input.csmx_manifest_len);
1340
1341 /* Transfer both regions to the monitor */
1342 CSM_PREFIX(image4_transfer_region)(selector, region_addr: payload_addr, region_size: input.csmx_payload_len);
1343 CSM_PREFIX(image4_transfer_region)(selector, region_addr: manifest_addr, region_size: input.csmx_manifest_len);
1344
1345 /* Setup the input with new addresses */
1346 input.csmx_payload = payload_addr;
1347 input.csmx_manifest = manifest_addr;
1348
1349 /* Trap into the monitor for this selector */
1350 err = CSM_PREFIX(image4_monitor_trap)(selector, input_data: &input, input_size: sizeof(input));
1351
1352out:
1353 if ((err != 0) && (payload_addr != 0)) {
1354 /* Retyping only happens after allocating the manifest */
1355 if (manifest_addr != 0) {
1356 CSM_PREFIX(image4_reclaim_region)(
1357 selector, region_addr: payload_addr, region_size: input.csmx_payload_len);
1358 }
1359 code_signing_deallocate(alloc_addr: &payload_addr, alloc_size: input.csmx_payload_len);
1360 }
1361
1362 if (manifest_addr != 0) {
1363 /* Reclaim the manifest region -- will be retyped if not NULL */
1364 CSM_PREFIX(image4_reclaim_region)(
1365 selector, region_addr: manifest_addr, region_size: input.csmx_manifest_len);
1366
1367 /* Deallocate the manifest region */
1368 code_signing_deallocate(alloc_addr: &manifest_addr, alloc_size: input.csmx_manifest_len);
1369 }
1370
1371 return err;
1372}
1373
1374static errno_t
1375_kernel_image4_monitor_trap(
1376 image4_cs_trap_t selector,
1377 const void *input_data,
1378 size_t input_size)
1379{
1380 /* Validate input size for the selector */
1381 if (input_size != image4_cs_trap_vector_size(selector)) {
1382 printf("image4 dispatch: invalid input: %llu | %lu\n", selector, input_size);
1383 return EINVAL;
1384 }
1385
1386 switch (selector) {
1387 case IMAGE4_CS_TRAP_IMAGE_ACTIVATE:
1388 return _kernel_image4_monitor_trap_image_activate(selector, input_data);
1389
1390 default:
1391 return CSM_PREFIX(image4_monitor_trap)(selector, input_data, input_size);
1392 }
1393}
1394
1395errno_t
1396kernel_image4_monitor_trap(
1397 image4_cs_trap_t selector,
1398 const void *input_data,
1399 size_t input_size,
1400 __unused void *output_data,
1401 __unused size_t *output_size)
1402{
1403 size_t length_check = 0;
1404
1405 /* Input data is always required */
1406 if ((input_data == NULL) || (input_size == 0)) {
1407 printf("image4 dispatch: no input data: %llu\n", selector);
1408 return EINVAL;
1409 } else if (os_add_overflow((vm_address_t)input_data, input_size, &length_check)) {
1410 panic("image4_ dispatch: overflow on input: %p | %lu", input_data, input_size);
1411 }
1412
1413 return _kernel_image4_monitor_trap(selector, input_data, input_size);
1414}
1415