1/*
2 * Copyright (c) 2022 Apple Computer, Inc. All rights reserved.
3 *
4 * @APPLE_LICENSE_HEADER_START@
5 *
6 * The contents of this file constitute Original Code as defined in and
7 * are subject to the Apple Public Source License Version 1.1 (the
8 * "License"). You may not use this file except in compliance with the
9 * License. Please obtain a copy of the License at
10 * http://www.apple.com/publicsource and read it before using this file.
11 *
12 * This Original Code and all software distributed under the License are
13 * distributed on an "AS IS" basis, WITHOUT WARRANTY OF ANY KIND, EITHER
14 * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES,
15 * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY,
16 * FITNESS FOR A PARTICULAR PURPOSE OR NON-INFRINGEMENT. Please see the
17 * License for the specific language governing rights and limitations
18 * under the License.
19 *
20 * @APPLE_LICENSE_HEADER_END@
21 */
22
23#include <stdarg.h>
24#include <stdatomic.h>
25#include <os/overflow.h>
26#include <machine/atomic.h>
27#include <mach/vm_param.h>
28#include <mach/vm_map.h>
29#include <mach/shared_region.h>
30#include <vm/vm_kern.h>
31#include <kern/zalloc.h>
32#include <kern/kalloc.h>
33#include <kern/assert.h>
34#include <kern/locks.h>
35#include <kern/recount.h>
36#include <kern/sched_prim.h>
37#include <kern/lock_rw.h>
38#include <libkern/libkern.h>
39#include <libkern/section_keywords.h>
40#include <libkern/coretrust/coretrust.h>
41#include <libkern/amfi/amfi.h>
42#include <pexpert/pexpert.h>
43#include <sys/vm.h>
44#include <sys/proc.h>
45#include <sys/codesign.h>
46#include <sys/code_signing.h>
47#include <uuid/uuid.h>
48#include <IOKit/IOBSD.h>
49
50#if CONFIG_SPTM
51/*
52 * The TrustedExecutionMonitor environment works in tandem with the SPTM to provide code
53 * signing and memory isolation enforcement for data structures critical to ensuring that
54 * all code executed on the system is authorized to do so.
55 *
56 * Unless the data is managed by TXM itself, XNU needs to page-align everything, make the
57 * relevant type transfer, and then reference the memory as read-only.
58 *
59 * TXM enforces concurrency on its side, but through the use of try-locks. Upon a failure
60 * in acquiring the lock, TXM will panic. As a result, in order to ensure single-threaded
61 * behavior, the kernel also has to take some locks on its side befor calling into TXM.
62 */
63#include <sys/trusted_execution_monitor.h>
64#include <pexpert/arm64/board_config.h>
65
66/* Lock group used for all locks within the kernel for TXM */
67LCK_GRP_DECLARE(txm_lck_grp, "txm_code_signing_lck_grp");
68
69#pragma mark Utilities
70
71/* Number of thread stacks is known at build-time */
72#define NUM_TXM_THREAD_STACKS (MAX_CPUS)
73txm_thread_stack_t thread_stacks[NUM_TXM_THREAD_STACKS] = {0};
74
75/* Singly-linked-list head for thread stacks */
76SLIST_HEAD(thread_stack_head, _txm_thread_stack) thread_stacks_head =
77 SLIST_HEAD_INITIALIZER(thread_stacks_head);
78
79static decl_lck_mtx_data(, thread_stacks_lock);
80static void *thread_stack_event = NULL;
81
82static void
83setup_thread_stacks(void)
84{
85 extern const sptm_bootstrap_args_xnu_t *SPTMArgs;
86 txm_thread_stack_t *thread_stack = NULL;
87
88 /* Initialize each thread stack and add it to the list */
89 for (uint32_t i = 0; i < NUM_TXM_THREAD_STACKS; i++) {
90 thread_stack = &thread_stacks[i];
91
92 /* Acquire the thread stack virtual mapping */
93 thread_stack->thread_stack_papt = SPTMArgs->txm_thread_stacks[i];
94
95 /* Acquire the thread stack physical page */
96 thread_stack->thread_stack_phys = (uintptr_t)kvtophys_nofail(
97 thread_stack->thread_stack_papt);
98
99 /* Resolve the pointer to the thread stack data */
100 thread_stack->thread_stack_data =
101 (TXMThreadStack_t*)(thread_stack->thread_stack_papt + (PAGE_SIZE - 1024));
102
103 /* Add thread stack to the list head */
104 SLIST_INSERT_HEAD(&thread_stacks_head, thread_stack, link);
105 }
106
107 /* Initialize the thread stacks lock */
108 lck_mtx_init(&thread_stacks_lock, &txm_lck_grp, 0);
109}
110
111static txm_thread_stack_t*
112acquire_thread_stack(void)
113{
114 txm_thread_stack_t *thread_stack = NULL;
115
116 /* Lock the thread stack list */
117 lck_mtx_lock(&thread_stacks_lock);
118
119 while (SLIST_EMPTY(&thread_stacks_head) == true) {
120 lck_mtx_sleep(
121 &thread_stacks_lock,
122 LCK_SLEEP_DEFAULT,
123 &thread_stack_event,
124 THREAD_UNINT);
125 }
126
127 if (SLIST_EMPTY(&thread_stacks_head) == true) {
128 panic("unable to acquire a thread stack for TXM");
129 }
130
131 /* Use the first available thread stack */
132 thread_stack = SLIST_FIRST(&thread_stacks_head);
133
134 /* Remove the thread stack from the list */
135 SLIST_REMOVE_HEAD(&thread_stacks_head, link);
136
137 /* Unlock the thread stack list */
138 lck_mtx_unlock(&thread_stacks_lock);
139
140 /* Associate the thread stack with the current thread */
141 thread_associate_txm_thread_stack(thread_stack->thread_stack_phys);
142
143 return thread_stack;
144}
145
146static void
147release_thread_stack(
148 txm_thread_stack_t* thread_stack)
149{
150 /* Remove the TXM thread stack association with the current thread */
151 thread_disassociate_txm_thread_stack(thread_stack->thread_stack_phys);
152
153 /* Lock the thread stack list */
154 lck_mtx_lock(&thread_stacks_lock);
155
156 /* Add the thread stack at the list head */
157 SLIST_INSERT_HEAD(&thread_stacks_head, thread_stack, link);
158
159 /* Unlock the thread stack list */
160 lck_mtx_unlock(&thread_stacks_lock);
161
162 /* Wake up any threads waiting to acquire a thread stack */
163 thread_wakeup(&thread_stack_event);
164}
165
166static kern_return_t
167txm_parse_return(
168 TXMReturn_t txm_ret)
169{
170 switch (txm_ret.returnCode) {
171 case kTXMSuccess:
172 return KERN_SUCCESS;
173
174 case kTXMReturnOutOfMemory:
175 return KERN_RESOURCE_SHORTAGE;
176
177 case kTXMReturnNotFound:
178 return KERN_NOT_FOUND;
179
180 default:
181 return KERN_FAILURE;
182 }
183}
184
185static void
186txm_print_return(
187 TXMKernelSelector_t selector,
188 TXMReturn_t txm_ret)
189{
190 if (txm_ret.returnCode == kTXMSuccess) {
191 return;
192 } else if (txm_ret.returnCode == kTXMReturnTrustCache) {
193 printf("TXM [Error]: TrustCache: selector: %u | 0x%02X | 0x%02X | %u\n",
194 selector, txm_ret.tcRet.component, txm_ret.tcRet.error, txm_ret.tcRet.uniqueError);
195 } else if (txm_ret.returnCode == kTXMReturnCodeSignature) {
196 printf("TXM [Error]: CodeSignature: selector: %u | 0x%02X | 0x%02X | %u\n",
197 selector, txm_ret.csRet.component, txm_ret.csRet.error, txm_ret.csRet.uniqueError);
198 } else if (txm_ret.returnCode == kTXMReturnCodeErrno) {
199 printf("TXM [Error]: Errno: selector: %u | %d\n",
200 selector, txm_ret.errnoRet);
201 } else {
202 printf("TXM [Error]: selector: %u | %u\n",
203 selector, txm_ret.returnCode);
204 }
205}
206
207#pragma mark Page Allocation
208
209static void
210txm_add_page(void)
211{
212 txm_call_t txm_call = {
213 .selector = kTXMKernelSelectorAddFreeListPage,
214 .failure_fatal = true,
215 .num_input_args = 1
216 };
217
218 /* Allocate a page from the VM -- transfers page to TXM internally */
219 vm_map_address_t phys_addr = pmap_txm_allocate_page();
220
221 /* Add this page to the TXM free list */
222 txm_kernel_call(&txm_call, phys_addr);
223}
224
225#pragma mark Calls
226
227static void
228txm_kernel_call_registers_setup(
229 txm_call_t *parameters,
230 sptm_call_regs_t *registers,
231 va_list args)
232{
233 /*
234 * We are only ever allowed a maximum of 7 arguments for calling into TXM.
235 * This is because the SPTM dispatch only sets up registers x0-x7 for the
236 * call, and x0 is always reserved for passing in a thread stack for TXM
237 * to operate on.
238 */
239
240 switch (parameters->num_input_args) {
241 case 7:
242 registers->x1 = va_arg(args, uintptr_t);
243 registers->x2 = va_arg(args, uintptr_t);
244 registers->x3 = va_arg(args, uintptr_t);
245 registers->x4 = va_arg(args, uintptr_t);
246 registers->x5 = va_arg(args, uintptr_t);
247 registers->x6 = va_arg(args, uintptr_t);
248 registers->x7 = va_arg(args, uintptr_t);
249 break;
250
251 case 6:
252 registers->x1 = va_arg(args, uintptr_t);
253 registers->x2 = va_arg(args, uintptr_t);
254 registers->x3 = va_arg(args, uintptr_t);
255 registers->x4 = va_arg(args, uintptr_t);
256 registers->x5 = va_arg(args, uintptr_t);
257 registers->x6 = va_arg(args, uintptr_t);
258 break;
259
260 case 5:
261 registers->x1 = va_arg(args, uintptr_t);
262 registers->x2 = va_arg(args, uintptr_t);
263 registers->x3 = va_arg(args, uintptr_t);
264 registers->x4 = va_arg(args, uintptr_t);
265 registers->x5 = va_arg(args, uintptr_t);
266 break;
267
268 case 4:
269 registers->x1 = va_arg(args, uintptr_t);
270 registers->x2 = va_arg(args, uintptr_t);
271 registers->x3 = va_arg(args, uintptr_t);
272 registers->x4 = va_arg(args, uintptr_t);
273 break;
274
275 case 3:
276 registers->x1 = va_arg(args, uintptr_t);
277 registers->x2 = va_arg(args, uintptr_t);
278 registers->x3 = va_arg(args, uintptr_t);
279 break;
280
281 case 2:
282 registers->x1 = va_arg(args, uintptr_t);
283 registers->x2 = va_arg(args, uintptr_t);
284 break;
285
286 case 1:
287 registers->x1 = va_arg(args, uintptr_t);
288 break;
289
290 case 0:
291 break;
292
293 default:
294 panic("invalid number of arguments to TXM: selector: %u | %u",
295 parameters->selector, parameters->num_input_args);
296 }
297}
298
299static TXMReturn_t
300txm_kernel_call_internal(
301 txm_call_t *parameters,
302 va_list args)
303{
304 TXMReturn_t txm_ret = (TXMReturn_t){.returnCode = kTXMReturnGeneric};
305 sptm_call_regs_t txm_registers = {0};
306 txm_thread_stack_t *thread_stack = NULL;
307 const TXMThreadStack_t *thread_stack_data = NULL;
308 const TXMSharedContextData_t *shared_context_data = NULL;
309
310 /* Obtain a stack for this call */
311 thread_stack = acquire_thread_stack();
312 thread_stack_data = thread_stack->thread_stack_data;
313 shared_context_data = &thread_stack_data->sharedData;
314
315 /* Setup argument registers */
316 txm_registers.x0 = thread_stack->thread_stack_phys;
317 txm_kernel_call_registers_setup(parameters, &txm_registers, args);
318
319 /* Track resource usage */
320 recount_enter_secure();
321
322 /* Call into TXM */
323 txm_enter(parameters->selector, &txm_registers);
324
325 recount_leave_secure();
326
327 txm_ret = (TXMReturn_t){.rawValue = shared_context_data->txmReturnCode};
328 parameters->txm_ret = txm_ret;
329
330 if (parameters->txm_ret.returnCode == kTXMSuccess) {
331 parameters->num_return_words = shared_context_data->txmNumReturnWords;
332 if (parameters->num_return_words > kTXMStackReturnWords) {
333 panic("received excessive return words from TXM: selector: %u | %llu",
334 parameters->selector, parameters->num_return_words);
335 }
336
337 for (uint64_t i = 0; i < parameters->num_return_words; i++) {
338 parameters->return_words[i] = shared_context_data->txmReturnWords[i];
339 }
340 }
341
342 /* Release the thread stack as it is no longer needed */
343 release_thread_stack(thread_stack);
344 thread_stack_data = NULL;
345 shared_context_data = NULL;
346
347 return txm_ret;
348}
349
350kern_return_t
351txm_kernel_call(
352 txm_call_t *parameters, ...)
353{
354 TXMReturn_t txm_ret = (TXMReturn_t){.returnCode = kTXMReturnGeneric};
355 kern_return_t ret = KERN_DENIED;
356 va_list args;
357
358 /* Start the variadic arguments list */
359 va_start(args, parameters);
360
361 do {
362 txm_ret = txm_kernel_call_internal(parameters, args);
363 if (txm_ret.returnCode == kTXMReturnOutOfMemory) {
364 if (parameters->selector == kTXMKernelSelectorAddFreeListPage) {
365 panic("received out-of-memory error when adding a free page to TXM");
366 }
367 txm_add_page();
368 }
369 } while (txm_ret.returnCode == kTXMReturnOutOfMemory);
370
371 /* Clean up the variadic arguments list */
372 va_end(args);
373
374 /* Print all TXM logs from the log buffer */
375 if (parameters->skip_logs == false) {
376 txm_print_logs();
377 }
378
379 /* Print the return code from TXM -- only prints for an error */
380 if (parameters->failure_silent != true) {
381 if (parameters->failure_code_silent != txm_ret.returnCode) {
382 txm_print_return(parameters->selector, txm_ret);
383 }
384 }
385
386 /*
387 * To ease the process of calling into TXM, and to also reduce the number of
388 * lines of code for each call site, the txm_call_t offers some properties
389 * we can enforce over here. Go through these, and panic in case they aren't
390 * honored.
391 *
392 * NOTE: We check for "<" instead of "!=" for the number of return words we
393 * get back from TXM since this helps in forward development. If the kernel
394 * and TXM are proceeding at different project cadences, we do not want to
395 * gate adding more return words from TXM on the kernel first adopting the
396 * new number of return words.
397 */
398 ret = txm_parse_return(txm_ret);
399
400 if (parameters->failure_fatal && (ret != KERN_SUCCESS)) {
401 panic("received fatal error for a selector from TXM: selector: %u | 0x%0llX",
402 parameters->selector, txm_ret.rawValue);
403 } else if (parameters->num_return_words < parameters->num_output_args) {
404 /* Only panic if return was a success */
405 if (ret == KERN_SUCCESS) {
406 panic("received fewer than expected return words from TXM: selector: %u | %llu",
407 parameters->selector, parameters->num_return_words);
408 }
409 }
410
411 return ret;
412}
413
414void
415txm_transfer_region(
416 vm_address_t addr,
417 vm_size_t size)
418{
419 vm_address_t addr_end = 0;
420 vm_size_t size_aligned = round_page(size);
421
422 if ((addr & PAGE_MASK) != 0) {
423 panic("attempted to transfer non-page-aligned memory to TXM: %p", (void*)addr);
424 } else if (os_add_overflow(addr, size_aligned, &addr_end)) {
425 panic("overflow on range to be transferred to TXM: %p | %lu",
426 (void*)addr, size);
427 }
428
429 /* Make the memory read-only first (transfer will panic otherwise) */
430 vm_protect(kernel_map, addr, size_aligned, false, VM_PROT_READ);
431
432 /* Transfer each physical page to be TXM_DEFAULT */
433 for (vm_address_t page = addr; page < addr_end; page += PAGE_SIZE) {
434 pmap_txm_transfer_page(page);
435 }
436}
437
438void
439txm_reclaim_region(
440 vm_address_t addr,
441 vm_size_t size)
442{
443 vm_address_t addr_end = 0;
444 vm_size_t size_aligned = round_page(size);
445
446 if ((addr & PAGE_MASK) != 0) {
447 panic("attempted to reclaim non-page-aligned memory from TXM: %p", (void*)addr);
448 } else if (os_add_overflow(addr, size_aligned, &addr_end)) {
449 panic("overflow on range to be reclaimed from TXM: %p | %lu",
450 (void*)addr, size);
451 }
452
453 /*
454 * We can only reclaim once TXM has transferred the memory range back to the
455 * kernel. Hence, we simply try and switch permissions to read-write. If TXM
456 * hasn't transferred pages, this then should panic.
457 */
458 vm_protect(kernel_map, addr, size_aligned, false, VM_PROT_READ | VM_PROT_WRITE);
459}
460
461static SECURITY_READ_ONLY_LATE(const char*) txm_log_page = NULL;
462static SECURITY_READ_ONLY_LATE(const uint32_t*) txm_log_head = NULL;
463static SECURITY_READ_ONLY_LATE(const uint32_t*) txm_log_sync = NULL;
464
465static decl_lck_mtx_data(, log_lock);
466static uint32_t log_head = 0;
467
468void
469txm_print_logs(void)
470{
471 uint32_t start_index = 0;
472 uint32_t end_index = 0;
473
474 /*
475 * The design here is very simple. TXM keeps adding slots to its circular buffer
476 * and the kernel attempts to read each one and print it, maintaining its own head
477 * for the log.
478 *
479 * This design is by nature lazy. TXM doesn't know or care if the kernel has gone
480 * through and printed any of the logs, so it'll just keep writing into its buffer
481 * and then circle around when it becomes full.
482 *
483 * This is fine most of the time since there are a decent amount of slots in the
484 * log buffer. We mostly have an issue when TXM is adding so many logs so quickly
485 * such that it wraps around and starts overwriting logs which haven't been seen
486 * by the kernel. If this were to happen, TXM's log head may circle around the
487 * head maintained by the kernel, causing a lot of logs to be missed, since the
488 * kernel only attempts the number of logs in-between the two heads.
489 *
490 * The fix for that is complicated, and until we see an actual impact, we're going
491 * to keep the simpler design in place.
492 */
493
494 /* Return if the logging hasn't been setup yet */
495 if (txm_log_sync == NULL) {
496 return;
497 }
498
499 /*
500 * Holding the log lock and printing can cause lots of issues since printing can
501 * be rather slow. While we make it a point to keep the logging buffer quiet, some
502 * actions (such as loading trust caches) are still very chatty.
503 *
504 * As a result, we optimize this routine to ensure that the lock itself isn't held
505 * for very long. All we need to do within the critical section is calculate the
506 * starting and ending index of the log buffer. The actual printing doesn't need
507 * to be done with the lock held.
508 */
509 lck_mtx_lock(&log_lock);
510
511 start_index = log_head;
512 end_index = os_atomic_load(txm_log_head, relaxed) % kTXMLogSlots;
513
514 /* Update the log head with the new index */
515 log_head = end_index;
516
517 /* Release the log lock */
518 lck_mtx_unlock(&log_lock);
519
520 if (start_index != end_index) {
521 /* Use load acquire here to sync up with all writes to the buffer */
522 os_atomic_load(txm_log_sync, acquire);
523
524 while (start_index != end_index) {
525 const char *slot = txm_log_page + (start_index * kTXMLogSlotSize);
526
527 /* We add newlines after each log statement since TXM does not */
528 printf("%s\n", slot);
529
530 start_index = (start_index + 1) % kTXMLogSlots;
531 }
532 }
533}
534
535#pragma mark Initialization
536
537SECURITY_READ_ONLY_LATE(const TXMReadOnlyData_t*) txm_ro_data = NULL;
538SECURITY_READ_ONLY_LATE(const TXMStatistics_t*) txm_stats = NULL;
539SECURITY_READ_ONLY_LATE(const CSConfig_t*) txm_cs_config = NULL;
540
541SECURITY_READ_ONLY_LATE(bool*) developer_mode_enabled = NULL;
542static SECURITY_READ_ONLY_LATE(bool) code_signing_enabled = true;
543static SECURITY_READ_ONLY_LATE(uint32_t) managed_signature_size = 0;
544
545static decl_lck_mtx_data(, compilation_service_lock);
546static decl_lck_mtx_data(, unregister_sync_lock);
547
548static void
549get_logging_info(void)
550{
551 txm_call_t txm_call = {
552 .selector = kTXMKernelSelectorGetLogInfo,
553 .failure_fatal = true,
554 .num_output_args = 3
555 };
556 txm_kernel_call(&txm_call);
557
558 txm_log_page = (const char*)txm_call.return_words[0];
559 txm_log_head = (const uint32_t*)txm_call.return_words[1];
560 txm_log_sync = (const uint32_t*)txm_call.return_words[2];
561}
562
563static void
564get_code_signing_info(void)
565{
566 txm_call_t txm_call = {
567 .selector = kTXMKernelSelectorGetCodeSigningInfo,
568 .failure_fatal = true,
569 .num_output_args = 6
570 };
571 txm_kernel_call(&txm_call);
572
573 /*
574 * Not using txm_call.return_words[0] for now. This was previously the
575 * code_signing_enabled field, but we've since switched to acquiring that
576 * value from TXM's read-only data.
577 *
578 * Not using txm_call.return_words[4] for now. This was previously the
579 * txm_cs_config field, but we've since switched to acquiring that value
580 * from TXM's read-only data.
581 */
582
583 developer_mode_enabled = (bool*)txm_call.return_words[1];
584 txm_stats = (TXMStatistics_t*)txm_call.return_words[2];
585 managed_signature_size = (uint32_t)txm_call.return_words[3];
586 txm_ro_data = (TXMReadOnlyData_t*)txm_call.return_words[5];
587
588 /* Set code_signing_disabled based on read-only data */
589 code_signing_enabled = txm_ro_data->codeSigningDisabled == false;
590
591 /* Set txm_cs_config based on read-only data */
592 txm_cs_config = &txm_ro_data->CSConfiguration;
593}
594
595static void
596set_shared_region_base_address(void)
597{
598 txm_call_t txm_call = {
599 .selector = kTXMKernelSelectorSetSharedRegionBaseAddress,
600 .failure_fatal = true,
601 .num_input_args = 2,
602 };
603
604 txm_kernel_call(&txm_call,
605 SHARED_REGION_BASE,
606 SHARED_REGION_SIZE);
607}
608
609void
610code_signing_init(void)
611{
612 /* Setup the thread stacks used by TXM */
613 setup_thread_stacks();
614
615 /* Setup the logging lock */
616 lck_mtx_init(&log_lock, &txm_lck_grp, 0);
617
618 /* Setup TXM logging information */
619 get_logging_info();
620
621 /* Setup code signing configuration */
622 get_code_signing_info();
623
624 /* Setup all the other locks we need */
625 lck_mtx_init(&compilation_service_lock, &txm_lck_grp, 0);
626 lck_mtx_init(&unregister_sync_lock, &txm_lck_grp, 0);
627
628 /*
629 * We need to let TXM know what the shared region base address is going
630 * to be for this boot.
631 */
632 set_shared_region_base_address();
633
634 /* Require signed code when monitor is enabled */
635 if (code_signing_enabled == true) {
636 cs_debug_fail_on_unsigned_code = 1;
637 }
638}
639
640void
641txm_enter_lockdown_mode(void)
642{
643#if kTXMKernelAPIVersion >= 3
644 txm_call_t txm_call = {
645 .selector = kTXMKernelSelectorEnterLockdownMode,
646 .failure_fatal = true,
647 };
648
649 txm_kernel_call(&txm_call);
650#endif
651}
652
653#pragma mark Developer Mode
654
655void
656txm_toggle_developer_mode(bool state)
657{
658 txm_call_t txm_call = {
659 .selector = kTXMKernelSelectorDeveloperModeToggle,
660 .failure_fatal = true,
661 .num_input_args = 1
662 };
663
664 txm_kernel_call(&txm_call, state);
665}
666
667#pragma mark Code Signing and Provisioning Profiles
668
669bool
670txm_code_signing_enabled(void)
671{
672 return code_signing_enabled;
673}
674
675vm_size_t
676txm_managed_code_signature_size(void)
677{
678 return managed_signature_size;
679}
680
681kern_return_t
682txm_register_provisioning_profile(
683 const void *profile_blob,
684 const size_t profile_blob_size,
685 void **profile_obj)
686{
687 txm_call_t txm_call = {
688 .selector = kTXMKernelSelectorRegisterProvisioningProfile,
689 .num_input_args = 2,
690 .num_output_args = 1
691 };
692 vm_address_t payload_addr = 0;
693 kern_return_t ret = KERN_DENIED;
694
695 /* We need to allocate page-wise in order to transfer the range to TXM */
696 ret = kmem_alloc(kernel_map, &payload_addr, profile_blob_size,
697 KMA_KOBJECT | KMA_DATA, VM_KERN_MEMORY_SECURITY);
698 if (ret != KERN_SUCCESS) {
699 printf("unable to allocate memory for profile payload: %d\n", ret);
700 goto exit;
701 }
702
703 /* Copy the contents into the allocation */
704 memcpy((void*)payload_addr, profile_blob, profile_blob_size);
705
706 /* Transfer the memory range to TXM */
707 txm_transfer_region(payload_addr, profile_blob_size);
708
709 ret = txm_kernel_call(&txm_call, payload_addr, profile_blob_size);
710 if (ret == KERN_SUCCESS) {
711 *profile_obj = (void*)txm_call.return_words[0];
712 }
713
714exit:
715 if ((ret != KERN_SUCCESS) && (payload_addr != 0)) {
716 /* Reclaim this memory range */
717 txm_reclaim_region(payload_addr, profile_blob_size);
718
719 /* Free the memory range */
720 kmem_free(kernel_map, payload_addr, profile_blob_size);
721 payload_addr = 0;
722 }
723
724 return ret;
725}
726
727kern_return_t
728txm_unregister_provisioning_profile(
729 void *profile_obj)
730{
731 txm_call_t txm_call = {
732 .selector = kTXMKernelSelectorUnregisterProvisioningProfile,
733 .num_input_args = 1,
734 .num_output_args = 2
735 };
736 vm_address_t profile_addr = 0;
737 vm_size_t profile_size = 0;
738 kern_return_t ret = KERN_DENIED;
739
740 ret = txm_kernel_call(&txm_call, profile_obj);
741 if (ret != KERN_SUCCESS) {
742 return ret;
743 }
744
745 profile_addr = txm_call.return_words[0];
746 profile_size = txm_call.return_words[1];
747
748 /* Reclaim this memory range */
749 txm_reclaim_region(profile_addr, profile_size);
750
751 /* Free the memory range */
752 kmem_free(kernel_map, profile_addr, profile_size);
753
754 return KERN_SUCCESS;
755}
756
757kern_return_t
758txm_associate_provisioning_profile(
759 void *sig_obj,
760 void *profile_obj)
761{
762 txm_call_t txm_call = {
763 .selector = kTXMKernelSelectorAssociateProvisioningProfile,
764 .num_input_args = 2,
765 };
766
767 return txm_kernel_call(&txm_call, sig_obj, profile_obj);
768}
769
770kern_return_t
771txm_disassociate_provisioning_profile(
772 void *sig_obj)
773{
774 txm_call_t txm_call = {
775 .selector = kTXMKernelSelectorDisassociateProvisioningProfile,
776 .num_input_args = 1,
777 };
778
779 /*
780 * Take the unregistration sync lock.
781 * For more information: rdar://99205627.
782 */
783 lck_mtx_lock(&unregister_sync_lock);
784
785 /* Disassociate the profile from the signature */
786 kern_return_t ret = txm_kernel_call(&txm_call, sig_obj);
787
788 /* Release the unregistration sync lock */
789 lck_mtx_unlock(&unregister_sync_lock);
790
791 return ret;
792}
793
794void
795txm_set_compilation_service_cdhash(
796 const uint8_t cdhash[CS_CDHASH_LEN])
797{
798 txm_call_t txm_call = {
799 .selector = kTXMKernelSelectorAuthorizeCompilationServiceCDHash,
800 .num_input_args = 1,
801 };
802
803 lck_mtx_lock(&compilation_service_lock);
804 txm_kernel_call(&txm_call, cdhash);
805 lck_mtx_unlock(&compilation_service_lock);
806}
807
808bool
809txm_match_compilation_service_cdhash(
810 const uint8_t cdhash[CS_CDHASH_LEN])
811{
812 txm_call_t txm_call = {
813 .selector = kTXMKernelSelectorMatchCompilationServiceCDHash,
814 .failure_silent = true,
815 .num_input_args = 1,
816 .num_output_args = 1,
817 };
818 kern_return_t ret = KERN_DENIED;
819
820 /* Be safe and take the lock (avoid thread collisions) */
821 lck_mtx_lock(&compilation_service_lock);
822 ret = txm_kernel_call(&txm_call, cdhash);
823 lck_mtx_unlock(&compilation_service_lock);
824
825 if (ret == KERN_SUCCESS) {
826 return true;
827 }
828 return false;
829}
830
831void
832txm_set_local_signing_public_key(
833 const uint8_t public_key[XNU_LOCAL_SIGNING_KEY_SIZE])
834{
835 txm_call_t txm_call = {
836 .selector = kTXMKernelSelectorSetLocalSigningPublicKey,
837 .num_input_args = 1,
838 };
839
840 txm_kernel_call(&txm_call, public_key);
841}
842
843uint8_t*
844txm_get_local_signing_public_key(void)
845{
846 txm_call_t txm_call = {
847 .selector = kTXMKernelSelectorGetLocalSigningPublicKey,
848 .num_output_args = 1,
849 };
850 kern_return_t ret = KERN_DENIED;
851
852 ret = txm_kernel_call(&txm_call);
853 if (ret != KERN_SUCCESS) {
854 return NULL;
855 }
856
857 return (uint8_t*)txm_call.return_words[0];
858}
859
860void
861txm_unrestrict_local_signing_cdhash(
862 const uint8_t cdhash[CS_CDHASH_LEN])
863{
864 txm_call_t txm_call = {
865 .selector = kTXMKernelSelectorAuthorizeLocalSigningCDHash,
866 .num_input_args = 1,
867 };
868
869 txm_kernel_call(&txm_call, cdhash);
870}
871
872kern_return_t
873txm_register_code_signature(
874 const vm_address_t signature_addr,
875 const vm_size_t signature_size,
876 const vm_offset_t code_directory_offset,
877 const char *signature_path,
878 void **sig_obj,
879 vm_address_t *txm_signature_addr)
880{
881 txm_call_t txm_call = {
882 .selector = kTXMKernelSelectorRegisterCodeSignature,
883 .num_input_args = 3,
884 .num_output_args = 2,
885 };
886 kern_return_t ret = KERN_DENIED;
887
888 /*
889 * TXM performs more exhaustive validation of the code signature and figures
890 * out the best code directory to use on its own. As a result, this offset here
891 * is not used.
892 */
893 (void)code_directory_offset;
894
895 /*
896 * If the signature is large enough to not fit within TXM's managed signature
897 * size, then we need to transfer it over so it is owned by TXM.
898 */
899 if (signature_size > txm_managed_code_signature_size()) {
900 txm_transfer_region(signature_addr, signature_size);
901 }
902
903 ret = txm_kernel_call(
904 &txm_call,
905 signature_addr,
906 signature_size,
907 signature_path);
908
909 if (ret != KERN_SUCCESS) {
910 goto exit;
911 }
912
913 *sig_obj = (void*)txm_call.return_words[0];
914 *txm_signature_addr = txm_call.return_words[1];
915
916exit:
917 if ((ret != KERN_SUCCESS) && (signature_size > txm_managed_code_signature_size())) {
918 txm_reclaim_region(signature_addr, signature_size);
919 }
920
921 return ret;
922}
923
924kern_return_t
925txm_unregister_code_signature(
926 void *sig_obj)
927{
928 txm_call_t txm_call = {
929 .selector = kTXMKernelSelectorUnregisterCodeSignature,
930 .failure_fatal = true,
931 .num_input_args = 1,
932 .num_output_args = 2,
933 };
934 TXMCodeSignature_t *cs_obj = sig_obj;
935 vm_address_t signature_addr = 0;
936 vm_size_t signature_size = 0;
937 bool txm_managed = false;
938
939 /* Check if the signature memory is TXM managed */
940 txm_managed = cs_obj->sptmType != TXM_BULK_DATA;
941
942 /*
943 * Take the unregistration sync lock.
944 * For more information: rdar://99205627.
945 */
946 lck_mtx_lock(&unregister_sync_lock);
947
948 /* Unregister the signature from TXM -- cannot fail */
949 txm_kernel_call(&txm_call, sig_obj);
950
951 /* Release the unregistration sync lock */
952 lck_mtx_unlock(&unregister_sync_lock);
953
954 signature_addr = txm_call.return_words[0];
955 signature_size = txm_call.return_words[1];
956
957 /* Reclaim the memory range in case we need to */
958 if (txm_managed == false) {
959 txm_reclaim_region(signature_addr, signature_size);
960 }
961
962 return KERN_SUCCESS;
963}
964
965kern_return_t
966txm_verify_code_signature(
967 void *sig_obj)
968{
969 txm_call_t txm_call = {
970 .selector = kTXMKernelSelectorValidateCodeSignature,
971 .num_input_args = 1,
972 };
973 kern_return_t ret = KERN_DENIED;
974
975 /*
976 * Verification of the code signature may perform a trust cache look up.
977 * In order to avoid any collisions with threads which may be loading a
978 * trust cache, we take a reader lock on the trust cache runtime.
979 */
980
981 lck_rw_lock_shared(&txm_trust_cache_lck);
982 ret = txm_kernel_call(&txm_call, sig_obj);
983 lck_rw_unlock_shared(&txm_trust_cache_lck);
984
985 return ret;
986}
987
988kern_return_t
989txm_reconstitute_code_signature(
990 void *sig_obj,
991 vm_address_t *unneeded_addr,
992 vm_size_t *unneeded_size)
993{
994 txm_call_t txm_call = {
995 .selector = kTXMKernelSelectorReconstituteCodeSignature,
996 .failure_fatal = true,
997 .num_input_args = 1,
998 .num_output_args = 2,
999 };
1000 vm_address_t return_addr = 0;
1001 vm_size_t return_size = 0;
1002
1003 /* Reconstitute the code signature -- cannot fail */
1004 txm_kernel_call(&txm_call, sig_obj);
1005
1006 return_addr = txm_call.return_words[0];
1007 return_size = txm_call.return_words[1];
1008
1009 /* Reclaim the memory region if we need to */
1010 if ((return_addr != 0) && (return_size != 0)) {
1011 txm_reclaim_region(return_addr, return_size);
1012 }
1013
1014 *unneeded_addr = return_addr;
1015 *unneeded_size = return_size;
1016
1017 return KERN_SUCCESS;
1018}
1019
1020#pragma mark Address Spaces
1021
1022kern_return_t
1023txm_register_address_space(
1024 pmap_t pmap,
1025 uint16_t addr_space_id,
1026 TXMAddressSpaceFlags_t flags)
1027{
1028 txm_call_t txm_call = {
1029 .selector = kTXMKernelSelectorRegisterAddressSpace,
1030 .failure_fatal = true,
1031 .num_input_args = 2,
1032 .num_output_args = 1,
1033 };
1034 TXMAddressSpace_t *txm_addr_space = NULL;
1035
1036 /* Register the address space -- cannot fail */
1037 txm_kernel_call(&txm_call, addr_space_id, flags);
1038
1039 /* Set the address space object within the PMAP */
1040 txm_addr_space = (TXMAddressSpace_t*)txm_call.return_words[0];
1041 pmap_txm_set_addr_space(pmap, txm_addr_space);
1042
1043 return KERN_SUCCESS;
1044}
1045
1046kern_return_t
1047txm_unregister_address_space(
1048 pmap_t pmap)
1049{
1050 txm_call_t txm_call = {
1051 .selector = kTXMKernelSelectorUnregisterAddressSpace,
1052 .failure_fatal = true,
1053 .num_input_args = 1,
1054 };
1055 TXMAddressSpace_t *txm_addr_space = pmap_txm_addr_space(pmap);
1056
1057 /*
1058 * Take the unregistration sync lock.
1059 * For more information: rdar://99205627.
1060 */
1061 lck_mtx_lock(&unregister_sync_lock);
1062
1063 /* Unregister the address space -- cannot fail */
1064 txm_kernel_call(&txm_call, txm_addr_space);
1065
1066 /* Release the unregistration sync lock */
1067 lck_mtx_unlock(&unregister_sync_lock);
1068
1069 /* Remove the address space from the pmap */
1070 pmap_txm_set_addr_space(pmap, NULL);
1071
1072 return KERN_SUCCESS;
1073}
1074
1075kern_return_t
1076txm_associate_code_signature(
1077 pmap_t pmap,
1078 void *sig_obj,
1079 const vm_address_t region_addr,
1080 const vm_size_t region_size,
1081 const vm_offset_t region_offset)
1082{
1083 txm_call_t txm_call = {
1084 .selector = kTXMKernelSelectorAssociateCodeSignature,
1085 .num_input_args = 5,
1086 };
1087 TXMAddressSpace_t *txm_addr_space = pmap_txm_addr_space(pmap);
1088 kern_return_t ret = KERN_DENIED;
1089
1090 /*
1091 * Associating a code signature may require exclusive access to the TXM address
1092 * space lock within TXM.
1093 */
1094 pmap_txm_acquire_exclusive_lock(pmap);
1095
1096 /*
1097 * If the address space in question is a nested address space, then all associations
1098 * need to go into the shared region base range. The VM layer is inconsistent with
1099 * how it makes associations with TXM vs. how it maps pages into the shared region.
1100 *
1101 * For TXM, the associations are made without taking the base range into account,
1102 * but when mappings are entered into the shared region, the base range is taken
1103 * into account. To normalize this, we add the base range address here.
1104 */
1105 vm_address_t adjusted_region_addr = region_addr;
1106 if (txm_addr_space->addrSpaceID.type == kTXMAddressSpaceIDTypeSharedRegion) {
1107 adjusted_region_addr += SHARED_REGION_BASE;
1108 }
1109
1110 /*
1111 * The VM tries a bunch of weird mappings within launchd for some platform code
1112 * which isn't mapped contiguously. These mappings don't succeed, but the failure
1113 * is fairly harmless since everything seems to work. However, since the call to
1114 * TXM fails, we make a series of logs. Hence, for launchd, we suppress failure
1115 * logs.
1116 */
1117 if (txm_addr_space->addrSpaceID.type == kTXMAddressSpaceIDTypeAddressSpace) {
1118 /* TXMTODO: Scope this to launchd better */
1119 txm_call.failure_code_silent = kTXMReturnPlatformCodeMapping;
1120 }
1121
1122 /* Check if the main region has been set on the address space */
1123 bool main_region_set = txm_addr_space->mainRegion != NULL;
1124 bool main_region_set_after = false;
1125
1126 ret = txm_kernel_call(
1127 &txm_call,
1128 txm_addr_space,
1129 sig_obj,
1130 adjusted_region_addr,
1131 region_size,
1132 region_offset);
1133
1134 /*
1135 * If the main region wasn't set on the address space before hand, but this new
1136 * call into TXM was successful and sets the main region, it means this signature
1137 * object is associated with the main region on the address space. With this, we
1138 * can now set the appropriate trust level on the PMAP.
1139 */
1140 if (ret == KERN_SUCCESS) {
1141 main_region_set_after = txm_addr_space->mainRegion != NULL;
1142 }
1143
1144 /* Unlock the TXM address space lock */
1145 pmap_txm_release_exclusive_lock(pmap);
1146
1147 /* Check if we should set the trust level on the PMAP */
1148 if (!main_region_set && main_region_set_after) {
1149 const TXMCodeSignature_t *cs_obj = sig_obj;
1150 const SignatureValidation_t *sig = &cs_obj->sig;
1151
1152 /*
1153 * This is gross, as we're dereferencing into a private data structure type.
1154 * There are 2 ways to clean this up in the future:
1155 * 1. Import libCodeSignature, so we can use "codeSignatureGetTrustLevel".
1156 * 2. Cache the trust level on the address space within TXM and then use it.
1157 */
1158 pmap_txm_set_trust_level(pmap, sig->trustLevel);
1159 }
1160
1161 return ret;
1162}
1163
1164kern_return_t
1165txm_allow_jit_region(
1166 pmap_t pmap)
1167{
1168 txm_call_t txm_call = {
1169 .selector = kTXMKernelSelectorAllowJITRegion,
1170 .num_input_args = 1,
1171 };
1172 TXMAddressSpace_t *txm_addr_space = pmap_txm_addr_space(pmap);
1173 kern_return_t ret = KERN_DENIED;
1174
1175 pmap_txm_acquire_shared_lock(pmap);
1176 ret = txm_kernel_call(&txm_call, txm_addr_space);
1177 pmap_txm_release_shared_lock(pmap);
1178
1179 return ret;
1180}
1181
1182kern_return_t
1183txm_associate_jit_region(
1184 pmap_t pmap,
1185 const vm_address_t region_addr,
1186 const vm_size_t region_size)
1187{
1188 txm_call_t txm_call = {
1189 .selector = kTXMKernelSelectorAssociateJITRegion,
1190 .num_input_args = 3,
1191 };
1192 TXMAddressSpace_t *txm_addr_space = pmap_txm_addr_space(pmap);
1193 kern_return_t ret = KERN_DENIED;
1194
1195 /*
1196 * Associating a JIT region may require exclusive access to the TXM address
1197 * space lock within TXM.
1198 */
1199 pmap_txm_acquire_exclusive_lock(pmap);
1200
1201 ret = txm_kernel_call(
1202 &txm_call,
1203 txm_addr_space,
1204 region_addr,
1205 region_size);
1206
1207 /* Unlock the TXM address space lock */
1208 pmap_txm_release_exclusive_lock(pmap);
1209
1210 return ret;
1211}
1212
1213kern_return_t
1214txm_address_space_debugged(
1215 pmap_t pmap)
1216{
1217 TXMAddressSpace_t *txm_addr_space = pmap_txm_addr_space(pmap);
1218 bool debug_regions_allowed = false;
1219
1220 /*
1221 * We do not actually need to trap into the monitor for this function for
1222 * now. It might be a tad bit more secure to actually trap into the monitor
1223 * as it implicitly verifies all of our pointers, but since this is a simple
1224 * state check against the address space, the real policy around it lies
1225 * within the kernel still, in which case entering the monitor doesn't
1226 * really provide much more security.
1227 */
1228
1229 pmap_txm_acquire_shared_lock(pmap);
1230 debug_regions_allowed = os_atomic_load(&txm_addr_space->allowsInvalidCode, relaxed);
1231 pmap_txm_release_shared_lock(pmap);
1232
1233 if (debug_regions_allowed == true) {
1234 return KERN_SUCCESS;
1235 }
1236 return KERN_DENIED;
1237}
1238
1239kern_return_t
1240txm_associate_debug_region(
1241 pmap_t pmap,
1242 const vm_address_t region_addr,
1243 const vm_size_t region_size)
1244{
1245 /*
1246 * This function is an interesting one. There is no need for us to make
1247 * a call into TXM for this one and instead, all we need to do here is
1248 * to verify that the TXM address space actually allows debug regions to
1249 * be mapped in or not.
1250 */
1251 (void)region_addr;
1252 (void)region_size;
1253
1254 kern_return_t ret = txm_address_space_debugged(pmap);
1255 if (ret != KERN_SUCCESS) {
1256 printf("address space does not allow creating debug regions\n");
1257 }
1258
1259 return ret;
1260}
1261
1262kern_return_t
1263txm_allow_invalid_code(
1264 pmap_t pmap)
1265{
1266 txm_call_t txm_call = {
1267 .selector = kTXMKernelSelectorAllowInvalidCode,
1268 .num_input_args = 1,
1269 };
1270 TXMAddressSpace_t *txm_addr_space = pmap_txm_addr_space(pmap);
1271 kern_return_t ret = KERN_DENIED;
1272
1273 /*
1274 * Allowing invalid code may require exclusive access to the TXM address
1275 * space lock within TXM.
1276 */
1277
1278 pmap_txm_acquire_exclusive_lock(pmap);
1279 ret = txm_kernel_call(&txm_call, txm_addr_space);
1280 pmap_txm_release_exclusive_lock(pmap);
1281
1282 return ret;
1283}
1284
1285kern_return_t
1286txm_get_trust_level_kdp(
1287 pmap_t pmap,
1288 uint32_t *trust_level)
1289{
1290 CSTrust_t txm_trust_level = kCSTrustUntrusted;
1291
1292 kern_return_t ret = pmap_txm_get_trust_level_kdp(pmap, &txm_trust_level);
1293 if (ret != KERN_SUCCESS) {
1294 return ret;
1295 }
1296
1297 if (trust_level != NULL) {
1298 *trust_level = txm_trust_level;
1299 }
1300 return KERN_SUCCESS;
1301}
1302
1303kern_return_t
1304txm_address_space_exempt(
1305 const pmap_t pmap)
1306{
1307 if (pmap_performs_stage2_translations(pmap) == true) {
1308 return KERN_SUCCESS;
1309 }
1310
1311 return KERN_DENIED;
1312}
1313
1314kern_return_t
1315txm_fork_prepare(
1316 pmap_t old_pmap,
1317 pmap_t new_pmap)
1318{
1319 /*
1320 * We'll add support for this as the need for it becomes more important.
1321 * TXMTODO: Complete this implementation.
1322 */
1323 (void)old_pmap;
1324 (void)new_pmap;
1325
1326 return KERN_SUCCESS;
1327}
1328
1329kern_return_t
1330txm_acquire_signing_identifier(
1331 const void *sig_obj,
1332 const char **signing_id)
1333{
1334 txm_call_t txm_call = {
1335 .selector = kTXMKernelSelectorAcquireSigningIdentifier,
1336 .num_input_args = 1,
1337 .num_output_args = 1,
1338 .failure_fatal = true,
1339 };
1340
1341 /* Get the signing ID -- should not fail */
1342 txm_kernel_call(&txm_call, sig_obj);
1343
1344 if (signing_id != NULL) {
1345 *signing_id = (const char*)txm_call.return_words[0];
1346 }
1347 return KERN_SUCCESS;
1348}
1349
1350#pragma mark Entitlements
1351
1352kern_return_t
1353txm_associate_kernel_entitlements(
1354 void *sig_obj,
1355 const void *kernel_entitlements)
1356{
1357 txm_call_t txm_call = {
1358 .selector = kTXMKernelSelectorAssociateKernelEntitlements,
1359 .num_input_args = 2,
1360 .failure_fatal = true,
1361 };
1362
1363 /* Associate the kernel entitlements -- should not fail */
1364 txm_kernel_call(&txm_call, sig_obj, kernel_entitlements);
1365
1366 return KERN_SUCCESS;
1367}
1368
1369kern_return_t
1370txm_resolve_kernel_entitlements(
1371 pmap_t pmap,
1372 const void **kernel_entitlements)
1373{
1374 txm_call_t txm_call = {
1375 .selector = kTXMKernelSelectorResolveKernelEntitlementsAddressSpace,
1376 .skip_logs = true,
1377 .num_input_args = 1,
1378 .num_output_args = 1,
1379 .failure_silent = true,
1380 };
1381 TXMAddressSpace_t *txm_addr_space = NULL;
1382 kern_return_t ret = KERN_DENIED;
1383
1384 if (pmap == pmap_txm_kernel_pmap()) {
1385 return KERN_NOT_FOUND;
1386 }
1387 txm_addr_space = pmap_txm_addr_space(pmap);
1388
1389 pmap_txm_acquire_shared_lock(pmap);
1390 ret = txm_kernel_call(&txm_call, txm_addr_space);
1391 pmap_txm_release_shared_lock(pmap);
1392
1393 if ((ret == KERN_SUCCESS) && (kernel_entitlements != NULL)) {
1394 *kernel_entitlements = (const void*)txm_call.return_words[0];
1395 }
1396 return ret;
1397}
1398
1399kern_return_t
1400txm_accelerate_entitlements(
1401 void *sig_obj,
1402 CEQueryContext_t *ce_ctx)
1403{
1404 txm_call_t txm_call = {
1405 .selector = kTXMKernelSelectorAccelerateEntitlements,
1406 .num_input_args = 1,
1407 .num_output_args = 1,
1408 };
1409 kern_return_t ret = KERN_DENIED;
1410
1411 ret = txm_kernel_call(&txm_call, sig_obj);
1412 if ((ret == KERN_SUCCESS) && (ce_ctx != NULL)) {
1413 *ce_ctx = (CEQueryContext_t)txm_call.return_words[0];
1414 }
1415
1416 return ret;
1417}
1418
1419#pragma mark Image4
1420
1421void*
1422txm_image4_storage_data(
1423 __unused size_t *allocated_size)
1424{
1425 /*
1426 * AppleImage4 builds a variant of TXM which TXM should link against statically
1427 * thereby removing the need for the kernel to allocate some data on behalf of
1428 * the kernel extension.
1429 */
1430 panic("unsupported AppleImage4 interface");
1431}
1432
1433void
1434txm_image4_set_nonce(
1435 const img4_nonce_domain_index_t ndi,
1436 const img4_nonce_t *nonce)
1437{
1438 txm_call_t txm_call = {
1439 .selector = kTXMKernelSelectorImage4SetNonce,
1440 .failure_fatal = true,
1441 .num_input_args = 2,
1442 };
1443
1444 txm_kernel_call(&txm_call, ndi, nonce);
1445}
1446
1447void
1448txm_image4_roll_nonce(
1449 const img4_nonce_domain_index_t ndi)
1450{
1451 txm_call_t txm_call = {
1452 .selector = kTXMKernelSelectorImage4RollNonce,
1453 .failure_fatal = true,
1454 .num_input_args = 1,
1455 };
1456
1457 txm_kernel_call(&txm_call, ndi);
1458}
1459
1460errno_t
1461txm_image4_copy_nonce(
1462 const img4_nonce_domain_index_t ndi,
1463 img4_nonce_t *nonce_out)
1464{
1465 txm_call_t txm_call = {
1466 .selector = kTXMKernelSelectorImage4GetNonce,
1467 .num_input_args = 1,
1468 .num_output_args = 1,
1469 };
1470 const img4_nonce_t *nonce = NULL;
1471 TXMReturn_t txm_ret = {0};
1472 kern_return_t ret = KERN_DENIED;
1473
1474 ret = txm_kernel_call(&txm_call, ndi);
1475 if (ret != KERN_SUCCESS) {
1476 txm_ret = txm_call.txm_ret;
1477 if (txm_ret.returnCode != kTXMReturnCodeErrno) {
1478 return EPERM;
1479 }
1480 return txm_ret.errnoRet;
1481 }
1482
1483 /* Acquire a pointer to the nonce from TXM */
1484 nonce = (const img4_nonce_t*)txm_call.return_words[0];
1485
1486 if (nonce_out) {
1487 *nonce_out = *nonce;
1488 }
1489 return 0;
1490}
1491
1492errno_t
1493txm_image4_execute_object(
1494 img4_runtime_object_spec_index_t obj_spec_index,
1495 const img4_buff_t *payload,
1496 const img4_buff_t *manifest)
1497{
1498 /* Not supported within TXM yet */
1499 (void)obj_spec_index;
1500 (void)payload;
1501 (void)manifest;
1502
1503 printf("image4 object execution isn't supported by TXM\n");
1504 return ENOSYS;
1505}
1506
1507errno_t
1508txm_image4_copy_object(
1509 img4_runtime_object_spec_index_t obj_spec_index,
1510 vm_address_t object_out,
1511 size_t *object_length)
1512{
1513 /* Not supported within TXM yet */
1514 (void)obj_spec_index;
1515 (void)object_out;
1516 (void)object_length;
1517
1518 printf("image4 object copying isn't supported by TXM\n");
1519 return ENOSYS;
1520}
1521
1522const void*
1523txm_image4_get_monitor_exports(void)
1524{
1525 txm_call_t txm_call = {
1526 .selector = kTXMKernelSelectorImage4GetExports,
1527 .failure_fatal = true,
1528 .num_output_args = 1,
1529 };
1530
1531 txm_kernel_call(&txm_call);
1532 return (const void*)txm_call.return_words[0];
1533}
1534
1535errno_t
1536txm_image4_set_release_type(
1537 const char *release_type)
1538{
1539 txm_call_t txm_call = {
1540 .selector = kTXMKernelSelectorImage4SetReleaseType,
1541 .failure_fatal = true,
1542 .num_input_args = 1,
1543 };
1544
1545 /* Set the release type -- cannot fail */
1546 txm_kernel_call(&txm_call, release_type);
1547
1548 return 0;
1549}
1550
1551errno_t
1552txm_image4_set_bnch_shadow(
1553 const img4_nonce_domain_index_t ndi)
1554{
1555 txm_call_t txm_call = {
1556 .selector = kTXMKernelSelectorImage4SetBootNonceShadow,
1557 .failure_fatal = true,
1558 .num_input_args = 1,
1559 };
1560
1561 /* Set the release type -- cannot fail */
1562 txm_kernel_call(&txm_call, ndi);
1563
1564 return 0;
1565}
1566
1567#pragma mark Image4 - New
1568
1569static inline bool
1570_txm_image4_monitor_trap_supported(
1571 image4_cs_trap_t selector)
1572{
1573 switch (selector) {
1574#if kTXMImage4APIVersion >= 1
1575 case IMAGE4_CS_TRAP_KMOD_SET_RELEASE_TYPE:
1576 case IMAGE4_CS_TRAP_KMOD_PIN_ROOT:
1577 case IMAGE4_CS_TRAP_KMOD_EVALUATE_TRUST:
1578 case IMAGE4_CS_TRAP_NONCE_SET:
1579 case IMAGE4_CS_TRAP_NONCE_ROLL:
1580 case IMAGE4_CS_TRAP_IMAGE_ACTIVATE:
1581 return true;
1582#endif
1583
1584 default:
1585 return false;
1586 }
1587}
1588
1589kern_return_t
1590txm_image4_transfer_region(
1591 image4_cs_trap_t selector,
1592 vm_address_t region_addr,
1593 vm_size_t region_size)
1594{
1595 if (_txm_image4_monitor_trap_supported(selector) == true) {
1596 txm_transfer_region(region_addr, region_size);
1597 }
1598 return KERN_SUCCESS;
1599}
1600
1601kern_return_t
1602txm_image4_reclaim_region(
1603 image4_cs_trap_t selector,
1604 vm_address_t region_addr,
1605 vm_size_t region_size)
1606{
1607 if (_txm_image4_monitor_trap_supported(selector) == true) {
1608 txm_reclaim_region(region_addr, region_size);
1609 }
1610 return KERN_SUCCESS;
1611}
1612
1613errno_t
1614txm_image4_monitor_trap(
1615 image4_cs_trap_t selector,
1616 __unused const void *input_data,
1617 __unused size_t input_size)
1618{
1619#if kTXMKernelAPIVersion >= 2
1620 txm_call_t txm_call = {
1621 .selector = kTXMKernelSelectorImage4Dispatch,
1622 .num_input_args = 5,
1623 };
1624
1625 kern_return_t ret = txm_kernel_call(
1626 &txm_call, selector,
1627 input_data, input_size,
1628 NULL, NULL);
1629
1630 /* Return 0 for success */
1631 if (ret == KERN_SUCCESS) {
1632 return 0;
1633 }
1634
1635 /* Check for an errno_t return */
1636 if (txm_call.txm_ret.returnCode == kTXMReturnCodeErrno) {
1637 if (txm_call.txm_ret.errnoRet == 0) {
1638 panic("image4 dispatch: unexpected success errno_t: %llu", selector);
1639 }
1640 return txm_call.txm_ret.errnoRet;
1641 }
1642
1643 /* Return a generic error */
1644 return EPERM;
1645#else
1646 printf("image4 dispatch: traps not supported: %llu\n", selector);
1647 return ENOSYS;
1648#endif
1649}
1650
1651#endif /* CONFIG_SPTM */
1652