1 | /* |
2 | * Copyright (c) 2021 Apple Computer, Inc. All rights reserved. |
3 | * |
4 | * @APPLE_LICENSE_HEADER_START@ |
5 | * |
6 | * The contents of this file constitute Original Code as defined in and |
7 | * are subject to the Apple Public Source License Version 1.1 (the |
8 | * "License"). You may not use this file except in compliance with the |
9 | * License. Please obtain a copy of the License at |
10 | * http://www.apple.com/publicsource and read it before using this file. |
11 | * |
12 | * This Original Code and all software distributed under the License are |
13 | * distributed on an "AS IS" basis, WITHOUT WARRANTY OF ANY KIND, EITHER |
14 | * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES, |
15 | * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY, |
16 | * FITNESS FOR A PARTICULAR PURPOSE OR NON-INFRINGEMENT. Please see the |
17 | * License for the specific language governing rights and limitations |
18 | * under the License. |
19 | * |
20 | * @APPLE_LICENSE_HEADER_END@ |
21 | */ |
22 | |
23 | #include <os/overflow.h> |
24 | #include <pexpert/pexpert.h> |
25 | #include <pexpert/device_tree.h> |
26 | #include <mach/boolean.h> |
27 | #include <mach/vm_param.h> |
28 | #include <vm/vm_kern.h> |
29 | #include <vm/pmap_cs.h> |
30 | #include <kern/zalloc.h> |
31 | #include <kern/kalloc.h> |
32 | #include <kern/assert.h> |
33 | #include <kern/lock_rw.h> |
34 | #include <libkern/libkern.h> |
35 | #include <libkern/section_keywords.h> |
36 | #include <libkern/img4/interface.h> |
37 | #include <libkern/amfi/amfi.h> |
38 | #include <sys/vm.h> |
39 | #include <sys/proc.h> |
40 | #include <sys/codesign.h> |
41 | #include <sys/trust_caches.h> |
42 | #include <IOKit/IOBSD.h> |
43 | #include <img4/firmware.h> |
44 | #include <TrustCache/API.h> |
45 | |
46 | static bool boot_os_tc_loaded = false; |
47 | static bool boot_app_tc_loaded = false; |
48 | |
49 | #if CONFIG_SPTM |
50 | /* |
51 | * We have the TrustedExecutionMonitor environment available. All of our artifacts |
52 | * need to be page-aligned, and transferred to the appropriate TXM type before we |
53 | * call into TXM to load the trust cache. |
54 | * |
55 | * The trust cache runtime is managed independently by TXM. All initialization work |
56 | * is done by the TXM bootstrap and there is nothing more we need to do here. |
57 | */ |
58 | #include <sys/trusted_execution_monitor.h> |
59 | |
60 | LCK_GRP_DECLARE(txm_trust_cache_lck_grp, "txm_trust_cache_lck_grp" ); |
61 | decl_lck_rw_data(, txm_trust_cache_lck); |
62 | |
63 | /* Immutable part of the runtime */ |
64 | SECURITY_READ_ONLY_LATE(TrustCacheRuntime_t*) trust_cache_rt = NULL; |
65 | |
66 | /* Mutable part of the runtime */ |
67 | SECURITY_READ_ONLY_LATE(TrustCacheMutableRuntime_t*) trust_cache_mut_rt = NULL; |
68 | |
69 | /* Static trust cache information collected from TXM */ |
70 | SECURITY_READ_ONLY_LATE(uint32_t) num_static_trust_caches = 0; |
71 | SECURITY_READ_ONLY_LATE(TCCapabilities_t) static_trust_cache_capabilities0 = 0; |
72 | SECURITY_READ_ONLY_LATE(TCCapabilities_t) static_trust_cache_capabilities1 = 0; |
73 | |
74 | static void |
75 | get_trust_cache_info(void) |
76 | { |
77 | txm_call_t txm_call = { |
78 | .selector = kTXMKernelSelectorGetTrustCacheInfo, |
79 | .failure_fatal = true, |
80 | .num_output_args = 4 |
81 | }; |
82 | txm_kernel_call(&txm_call); |
83 | |
84 | /* |
85 | * The monitor returns the libTrustCache runtime it uses within the first |
86 | * returned word. The kernel doesn't currently have a use-case for this, so |
87 | * we don't use it. But we continue to return this value from the monitor |
88 | * in case it ever comes in use later down the line. |
89 | */ |
90 | |
91 | num_static_trust_caches = (uint32_t)txm_call.return_words[1]; |
92 | static_trust_cache_capabilities0 = (TCCapabilities_t)txm_call.return_words[2]; |
93 | static_trust_cache_capabilities1 = (TCCapabilities_t)txm_call.return_words[3]; |
94 | } |
95 | |
96 | void |
97 | trust_cache_runtime_init(void) |
98 | { |
99 | /* Image4 interface needs to be available */ |
100 | if (img4if == NULL) { |
101 | panic("image4 interface not available" ); |
102 | } |
103 | |
104 | /* AMFI interface needs to be available */ |
105 | if (amfi == NULL) { |
106 | panic("amfi interface not available" ); |
107 | } else if (amfi->TrustCache.version < 2) { |
108 | panic("amfi interface is stale: %u" , amfi->TrustCache.version); |
109 | } |
110 | |
111 | /* Initialize the TXM trust cache read-write lock */ |
112 | lck_rw_init(&txm_trust_cache_lck, &txm_trust_cache_lck_grp, 0); |
113 | |
114 | /* Acquire trust cache information from the monitor */ |
115 | get_trust_cache_info(); |
116 | } |
117 | |
118 | static kern_return_t |
119 | txm_load_trust_cache( |
120 | TCType_t type, |
121 | const uint8_t *img4_payload, const size_t img4_payload_len, |
122 | const uint8_t *img4_manifest, const size_t img4_manifest_len, |
123 | const uint8_t *img4_aux_manifest, const size_t img4_aux_manifest_len) |
124 | { |
125 | txm_call_t txm_call = { |
126 | .selector = kTXMKernelSelectorLoadTrustCache, |
127 | .num_input_args = 7 |
128 | }; |
129 | vm_address_t payload_addr = 0; |
130 | vm_address_t manifest_addr = 0; |
131 | kern_return_t ret = KERN_DENIED; |
132 | |
133 | /* We don't support the auxiliary manifest for now */ |
134 | (void)img4_aux_manifest; |
135 | (void)img4_aux_manifest_len; |
136 | |
137 | ret = kmem_alloc(kernel_map, &payload_addr, img4_payload_len, |
138 | KMA_KOBJECT | KMA_DATA | KMA_ZERO, VM_KERN_MEMORY_SECURITY); |
139 | if (ret != KERN_SUCCESS) { |
140 | printf("unable to allocate memory for image4 payload: %d\n" , ret); |
141 | goto out; |
142 | } |
143 | memcpy((void*)payload_addr, img4_payload, img4_payload_len); |
144 | |
145 | ret = kmem_alloc(kernel_map, &manifest_addr, img4_manifest_len, |
146 | KMA_KOBJECT | KMA_DATA | KMA_ZERO, VM_KERN_MEMORY_SECURITY); |
147 | if (ret != KERN_SUCCESS) { |
148 | printf("unable to allocate memory for image4 manifest: %d\n" , ret); |
149 | goto out; |
150 | } |
151 | memcpy((void*)manifest_addr, img4_manifest, img4_manifest_len); |
152 | |
153 | /* Transfer both regions to be TXM owned */ |
154 | txm_transfer_region(payload_addr, img4_payload_len); |
155 | txm_transfer_region(manifest_addr, img4_manifest_len); |
156 | |
157 | /* Take the trust cache lock exclusively */ |
158 | lck_rw_lock_exclusive(&txm_trust_cache_lck); |
159 | |
160 | /* TXM will round-up to page length itself */ |
161 | ret = txm_kernel_call( |
162 | &txm_call, |
163 | type, |
164 | payload_addr, img4_payload_len, |
165 | manifest_addr, img4_manifest_len, |
166 | 0, 0); |
167 | |
168 | /* Release the trust cache lock */ |
169 | lck_rw_unlock_exclusive(&txm_trust_cache_lck); |
170 | |
171 | /* Check for duplicate trust cache error */ |
172 | if (txm_call.txm_ret.returnCode == kTXMReturnTrustCache) { |
173 | if (txm_call.txm_ret.tcRet.error == kTCReturnDuplicate) { |
174 | ret = KERN_ALREADY_IN_SET; |
175 | } |
176 | } |
177 | |
178 | out: |
179 | if (manifest_addr != 0) { |
180 | /* Reclaim the manifest region */ |
181 | txm_reclaim_region(manifest_addr, img4_manifest_len); |
182 | |
183 | /* Free the manifest region */ |
184 | kmem_free(kernel_map, manifest_addr, img4_manifest_len); |
185 | manifest_addr = 0; |
186 | } |
187 | |
188 | if ((ret != KERN_SUCCESS) && (payload_addr != 0)) { |
189 | /* Reclaim the payload region */ |
190 | txm_reclaim_region(payload_addr, img4_payload_len); |
191 | |
192 | /* Free the payload region */ |
193 | kmem_free(kernel_map, payload_addr, img4_payload_len); |
194 | payload_addr = 0; |
195 | } |
196 | |
197 | return ret; |
198 | } |
199 | |
200 | static kern_return_t |
201 | txm_load_legacy_trust_cache( |
202 | __unused const uint8_t *module_data, __unused const size_t module_size) |
203 | { |
204 | panic("legacy trust caches are not supported on this platform" ); |
205 | } |
206 | |
207 | static kern_return_t |
208 | txm_query_trust_cache( |
209 | TCQueryType_t query_type, |
210 | const uint8_t cdhash[kTCEntryHashSize], |
211 | TrustCacheQueryToken_t *query_token) |
212 | { |
213 | txm_call_t txm_call = { |
214 | .selector = kTXMKernelSelectorQueryTrustCache, |
215 | .failure_silent = true, |
216 | .num_input_args = 2, |
217 | .num_output_args = 2, |
218 | }; |
219 | kern_return_t ret = KERN_NOT_FOUND; |
220 | |
221 | lck_rw_lock_shared(&txm_trust_cache_lck); |
222 | ret = txm_kernel_call(&txm_call, query_type, cdhash); |
223 | lck_rw_unlock_shared(&txm_trust_cache_lck); |
224 | |
225 | if (ret == KERN_SUCCESS) { |
226 | if (query_token) { |
227 | query_token->trustCache = (const TrustCache_t*)txm_call.return_words[0]; |
228 | query_token->trustCacheEntry = (const void*)txm_call.return_words[1]; |
229 | } |
230 | return KERN_SUCCESS; |
231 | } |
232 | |
233 | /* Check for not-found trust cache error */ |
234 | if (txm_call.txm_ret.returnCode == kTXMReturnTrustCache) { |
235 | if (txm_call.txm_ret.tcRet.error == kTCReturnNotFound) { |
236 | ret = KERN_NOT_FOUND; |
237 | } |
238 | } |
239 | |
240 | return ret; |
241 | } |
242 | |
243 | static kern_return_t |
244 | txm_check_trust_cache_runtime_for_uuid( |
245 | const uint8_t check_uuid[kUUIDSize]) |
246 | { |
247 | txm_call_t txm_call = { |
248 | .selector = kTXMKernelSelectorCheckTrustCacheRuntimeForUUID, |
249 | .failure_silent = true, |
250 | .num_input_args = 1 |
251 | }; |
252 | kern_return_t ret = KERN_DENIED; |
253 | |
254 | lck_rw_lock_shared(&txm_trust_cache_lck); |
255 | ret = txm_kernel_call(&txm_call, check_uuid); |
256 | lck_rw_unlock_shared(&txm_trust_cache_lck); |
257 | |
258 | /* Check for not-found trust cache error */ |
259 | if (txm_call.txm_ret.returnCode == kTXMReturnTrustCache) { |
260 | if (txm_call.txm_ret.tcRet.error == kTCReturnNotFound) { |
261 | ret = KERN_NOT_FOUND; |
262 | } |
263 | } |
264 | |
265 | return ret; |
266 | } |
267 | |
268 | #elif PMAP_CS_PPL_MONITOR |
269 | /* |
270 | * We have the Page Protection Layer environment available. All of our artifacts |
271 | * need to be page-aligned. The PPL will lockdown the artifacts before it begins |
272 | * the validation. |
273 | * |
274 | * Even though the runtimes are PPL owned, we expect the runtime init function |
275 | * to be called before the PPL has been locked down, which allows us to write |
276 | * to them. |
277 | */ |
278 | |
279 | /* Immutable part of the runtime */ |
280 | SECURITY_READ_ONLY_LATE(TrustCacheRuntime_t*) trust_cache_rt = &ppl_trust_cache_rt; |
281 | |
282 | /* Mutable part of the runtime */ |
283 | SECURITY_READ_ONLY_LATE(TrustCacheMutableRuntime_t*) trust_cache_mut_rt = &ppl_trust_cache_mut_rt; |
284 | |
285 | void |
286 | trust_cache_runtime_init(void) |
287 | { |
288 | bool allow_second_static_cache = false; |
289 | bool allow_engineering_caches = false; |
290 | |
291 | #if CONFIG_SECOND_STATIC_TRUST_CACHE |
292 | allow_second_static_cache = true; |
293 | #endif |
294 | |
295 | #if PMAP_CS_INCLUDE_INTERNAL_CODE |
296 | allow_engineering_caches = true; |
297 | #endif |
298 | |
299 | /* Image4 interface needs to be available */ |
300 | if (img4if == NULL) { |
301 | panic("image4 interface not available" ); |
302 | } |
303 | |
304 | /* AMFI interface needs to be available */ |
305 | if (amfi == NULL) { |
306 | panic("amfi interface not available" ); |
307 | } else if (amfi->TrustCache.version < 2) { |
308 | panic("amfi interface is stale: %u" , amfi->TrustCache.version); |
309 | } |
310 | |
311 | trustCacheInitializeRuntime( |
312 | trust_cache_rt, |
313 | trust_cache_mut_rt, |
314 | allow_second_static_cache, |
315 | allow_engineering_caches, |
316 | false, |
317 | IMG4_RUNTIME_PMAP_CS); |
318 | |
319 | /* Locks are initialized in "pmap_bootstrap()" */ |
320 | } |
321 | |
322 | static kern_return_t |
323 | ppl_load_trust_cache( |
324 | TCType_t type, |
325 | const uint8_t *img4_payload, const size_t img4_payload_len, |
326 | const uint8_t *img4_manifest, const size_t img4_manifest_len, |
327 | const uint8_t *img4_aux_manifest, const size_t img4_aux_manifest_len) |
328 | { |
329 | kern_return_t ret = KERN_DENIED; |
330 | vm_address_t payload_addr = 0; |
331 | vm_size_t payload_len = 0; |
332 | vm_size_t payload_len_aligned = 0; |
333 | vm_address_t manifest_addr = 0; |
334 | vm_size_t manifest_len_aligned = 0; |
335 | vm_address_t aux_manifest_addr = 0; |
336 | vm_size_t aux_manifest_len_aligned = 0; |
337 | |
338 | /* The trust cache data structure is bundled with the img4 payload */ |
339 | if (os_add_overflow(img4_payload_len, sizeof(pmap_img4_payload_t), &payload_len)) { |
340 | panic("overflow on pmap img4 payload: %lu" , img4_payload_len); |
341 | } |
342 | payload_len_aligned = round_page(payload_len); |
343 | manifest_len_aligned = round_page(img4_manifest_len); |
344 | aux_manifest_len_aligned = round_page(img4_aux_manifest_len); |
345 | |
346 | ret = kmem_alloc(kernel_map, &payload_addr, payload_len_aligned, |
347 | KMA_KOBJECT | KMA_ZERO, VM_KERN_MEMORY_SECURITY); |
348 | if (ret != KERN_SUCCESS) { |
349 | printf("unable to allocate memory for pmap image4 payload: %d\n" , ret); |
350 | goto out; |
351 | } |
352 | |
353 | pmap_img4_payload_t *pmap_payload = (pmap_img4_payload_t*)payload_addr; |
354 | memcpy(pmap_payload->img4_payload, img4_payload, img4_payload_len); |
355 | |
356 | /* Allocate storage for the manifest */ |
357 | ret = kmem_alloc(kernel_map, &manifest_addr, manifest_len_aligned, |
358 | KMA_KOBJECT | KMA_DATA | KMA_ZERO, VM_KERN_MEMORY_SECURITY); |
359 | if (ret != KERN_SUCCESS) { |
360 | printf("unable to allocate memory for image4 manifest: %d\n" , ret); |
361 | goto out; |
362 | } |
363 | memcpy((void*)manifest_addr, img4_manifest, img4_manifest_len); |
364 | |
365 | if (aux_manifest_len_aligned != 0) { |
366 | /* Allocate storage for the auxiliary manifest */ |
367 | ret = kmem_alloc(kernel_map, &aux_manifest_addr, aux_manifest_len_aligned, |
368 | KMA_KOBJECT | KMA_DATA | KMA_ZERO, VM_KERN_MEMORY_SECURITY); |
369 | if (ret != KERN_SUCCESS) { |
370 | printf("unable to allocate memory for auxiliary image4 manifest: %d\n" , ret); |
371 | goto out; |
372 | } |
373 | memcpy((void*)aux_manifest_addr, img4_aux_manifest, img4_aux_manifest_len); |
374 | } |
375 | |
376 | /* The PPL will round up the length to page size itself */ |
377 | ret = pmap_load_trust_cache_with_type( |
378 | type, |
379 | payload_addr, payload_len, |
380 | manifest_addr, img4_manifest_len, |
381 | aux_manifest_addr, img4_aux_manifest_len); |
382 | |
383 | out: |
384 | if (aux_manifest_addr != 0) { |
385 | kmem_free(kernel_map, aux_manifest_addr, aux_manifest_len_aligned); |
386 | aux_manifest_addr = 0; |
387 | aux_manifest_len_aligned = 0; |
388 | } |
389 | |
390 | if (manifest_addr != 0) { |
391 | kmem_free(kernel_map, manifest_addr, manifest_len_aligned); |
392 | manifest_addr = 0; |
393 | manifest_len_aligned = 0; |
394 | } |
395 | |
396 | if ((ret != KERN_SUCCESS) && (payload_addr != 0)) { |
397 | kmem_free(kernel_map, payload_addr, payload_len_aligned); |
398 | payload_addr = 0; |
399 | payload_len_aligned = 0; |
400 | } |
401 | |
402 | return ret; |
403 | } |
404 | |
405 | static kern_return_t |
406 | ppl_load_legacy_trust_cache( |
407 | __unused const uint8_t *module_data, __unused const size_t module_size) |
408 | { |
409 | panic("legacy trust caches are not supported on this platform" ); |
410 | } |
411 | |
412 | static kern_return_t |
413 | ppl_query_trust_cache( |
414 | TCQueryType_t query_type, |
415 | const uint8_t cdhash[kTCEntryHashSize], |
416 | TrustCacheQueryToken_t *query_token) |
417 | { |
418 | /* |
419 | * We need to query by trapping into the PPL since the PPL trust cache runtime |
420 | * lock needs to be held. We cannot hold the lock from outside the PPL. |
421 | */ |
422 | return pmap_query_trust_cache(query_type, cdhash, query_token); |
423 | } |
424 | |
425 | static kern_return_t |
426 | ppl_check_trust_cache_runtime_for_uuid( |
427 | const uint8_t check_uuid[kUUIDSize]) |
428 | { |
429 | return pmap_check_trust_cache_runtime_for_uuid(check_uuid); |
430 | } |
431 | |
432 | #else |
433 | /* |
434 | * We don't have a monitor environment available. This means someone with a kernel |
435 | * memory exploit will be able to inject a trust cache into the system. There is |
436 | * not much we can do here, since this is older HW. |
437 | */ |
438 | |
439 | /* Lock for the runtime */ |
440 | LCK_GRP_DECLARE(trust_cache_lck_grp, "trust_cache_lck_grp" ); |
441 | decl_lck_rw_data(, trust_cache_rt_lock); |
442 | |
443 | /* Immutable part of the runtime */ |
444 | SECURITY_READ_ONLY_LATE(TrustCacheRuntime_t) trust_cache_rt_storage; |
445 | SECURITY_READ_ONLY_LATE(TrustCacheRuntime_t*) trust_cache_rt = &trust_cache_rt_storage; |
446 | |
447 | /* Mutable part of the runtime */ |
448 | TrustCacheMutableRuntime_t trust_cache_mut_rt_storage; |
449 | SECURITY_READ_ONLY_LATE(TrustCacheMutableRuntime_t*) trust_cache_mut_rt = &trust_cache_mut_rt_storage; |
450 | |
451 | void |
452 | trust_cache_runtime_init(void) |
453 | { |
454 | bool allow_second_static_cache = false; |
455 | bool allow_engineering_caches = false; |
456 | bool allow_legacy_caches = false; |
457 | |
458 | #if CONFIG_SECOND_STATIC_TRUST_CACHE |
459 | allow_second_static_cache = true; |
460 | #endif |
461 | |
462 | #if TRUST_CACHE_INCLUDE_INTERNAL_CODE |
463 | allow_engineering_caches = true; |
464 | #endif |
465 | |
466 | #ifdef XNU_PLATFORM_BridgeOS |
467 | allow_legacy_caches = true; |
468 | #endif |
469 | |
470 | /* Image4 interface needs to be available */ |
471 | if (img4if == NULL) { |
472 | panic("image4 interface not available" ); |
473 | } |
474 | |
475 | /* AMFI interface needs to be available */ |
476 | if (amfi == NULL) { |
477 | panic("amfi interface not available" ); |
478 | } else if (amfi->TrustCache.version < 2) { |
479 | panic("amfi interface is stale: %u" , amfi->TrustCache.version); |
480 | } |
481 | |
482 | trustCacheInitializeRuntime( |
483 | runtime: trust_cache_rt, |
484 | mutableRT: trust_cache_mut_rt, |
485 | allowSecondStaticTC: allow_second_static_cache, |
486 | allowEngineeringTC: allow_engineering_caches, |
487 | allowLegacyTC: allow_legacy_caches, |
488 | IMG4_RUNTIME_DEFAULT); |
489 | |
490 | /* Initialize the read-write lock */ |
491 | lck_rw_init(lck: &trust_cache_rt_lock, grp: &trust_cache_lck_grp, attr: 0); |
492 | } |
493 | |
494 | static kern_return_t |
495 | xnu_load_trust_cache( |
496 | TCType_t type, |
497 | const uint8_t *img4_payload, const size_t img4_payload_len, |
498 | const uint8_t *img4_manifest, const size_t img4_manifest_len, |
499 | const uint8_t *img4_aux_manifest, const size_t img4_aux_manifest_len) |
500 | { |
501 | kern_return_t ret = KERN_DENIED; |
502 | |
503 | /* Ignore the auxiliary manifest until we add support for it */ |
504 | (void)img4_aux_manifest; |
505 | (void)img4_aux_manifest_len; |
506 | |
507 | /* Allocate the trust cache data structure -- Z_WAITOK_ZERO means this can't fail */ |
508 | TrustCache_t *trust_cache = kalloc_type(TrustCache_t, Z_WAITOK_ZERO); |
509 | assert(trust_cache != NULL); |
510 | |
511 | /* |
512 | * The manifests aren't needed after the validation is complete, but the payload needs |
513 | * to persist. The caller of this API expects us to make our own allocations. Since we |
514 | * don't need the manifests after validation, we can use the manifests passed in to us |
515 | * but we need to make a new allocation for the payload, since that needs to persist. |
516 | * |
517 | * Z_WAITOK implies that this allocation can never fail. |
518 | */ |
519 | uint8_t *payload = (uint8_t*)kalloc_data(img4_payload_len, Z_WAITOK); |
520 | assert(payload != NULL); |
521 | |
522 | /* Copy the payload into our allocation */ |
523 | memcpy(dst: payload, src: img4_payload, n: img4_payload_len); |
524 | |
525 | /* Exclusively lock the runtime */ |
526 | lck_rw_lock_exclusive(lck: &trust_cache_rt_lock); |
527 | |
528 | TCReturn_t tc_ret = amfi->TrustCache.load( |
529 | trust_cache_rt, |
530 | type, |
531 | trust_cache, |
532 | (const uintptr_t)payload, img4_payload_len, |
533 | (const uintptr_t)img4_manifest, img4_manifest_len); |
534 | |
535 | /* Unlock the runtime */ |
536 | lck_rw_unlock_exclusive(lck: &trust_cache_rt_lock); |
537 | |
538 | if (tc_ret.error == kTCReturnSuccess) { |
539 | ret = KERN_SUCCESS; |
540 | } else if (tc_ret.error == kTCReturnDuplicate) { |
541 | ret = KERN_ALREADY_IN_SET; |
542 | } else { |
543 | printf("unable to load trust cache (TCReturn: 0x%02X | 0x%02X | %u)\n" , |
544 | tc_ret.component, tc_ret.error, tc_ret.uniqueError); |
545 | |
546 | ret = KERN_FAILURE; |
547 | } |
548 | |
549 | if (ret != KERN_SUCCESS) { |
550 | kfree_data(payload, img4_payload_len); |
551 | payload = NULL; |
552 | |
553 | kfree_type(TrustCache_t, trust_cache); |
554 | trust_cache = NULL; |
555 | } |
556 | return ret; |
557 | } |
558 | |
559 | static kern_return_t |
560 | xnu_load_legacy_trust_cache( |
561 | __unused const uint8_t *module_data, __unused const size_t module_size) |
562 | { |
563 | #if XNU_HAS_LEGACY_TRUST_CACHE_LOADING |
564 | kern_return_t ret = KERN_DENIED; |
565 | |
566 | /* Allocate the trust cache data structure -- Z_WAITOK_ZERO means this can't fail */ |
567 | TrustCache_t *trust_cache = kalloc_type(TrustCache_t, Z_WAITOK_ZERO); |
568 | assert(trust_cache != NULL); |
569 | |
570 | /* Allocate storage for the module -- Z_WAITOK means this can't fail */ |
571 | uint8_t *module = (uint8_t*)kalloc_data(module_size, Z_WAITOK); |
572 | assert(module != NULL); |
573 | |
574 | /* Copy the module into our allocation */ |
575 | memcpy(module, module_data, module_size); |
576 | |
577 | /* Exclusively lock the runtime */ |
578 | lck_rw_lock_exclusive(&trust_cache_rt_lock); |
579 | |
580 | TCReturn_t tc_ret = amfi->TrustCache.loadModule( |
581 | trust_cache_rt, |
582 | kTCTypeLegacy, |
583 | trust_cache, |
584 | (const uintptr_t)module, module_size); |
585 | |
586 | /* Unlock the runtime */ |
587 | lck_rw_unlock_exclusive(&trust_cache_rt_lock); |
588 | |
589 | if (tc_ret.error == kTCReturnSuccess) { |
590 | ret = KERN_SUCCESS; |
591 | } else if (tc_ret.error == kTCReturnDuplicate) { |
592 | ret = KERN_ALREADY_IN_SET; |
593 | } else { |
594 | printf("unable to load legacy trust cache (TCReturn: 0x%02X | 0x%02X | %u)\n" , |
595 | tc_ret.component, tc_ret.error, tc_ret.uniqueError); |
596 | |
597 | ret = KERN_FAILURE; |
598 | } |
599 | |
600 | if (ret != KERN_SUCCESS) { |
601 | kfree_data(module, module_size); |
602 | module = NULL; |
603 | |
604 | kfree_type(TrustCache_t, trust_cache); |
605 | trust_cache = NULL; |
606 | } |
607 | return ret; |
608 | #else |
609 | panic("legacy trust caches are not supported on this platform" ); |
610 | #endif /* XNU_HAS_LEGACY_TRUST_CACHE_LOADING */ |
611 | } |
612 | |
613 | static kern_return_t |
614 | xnu_query_trust_cache( |
615 | TCQueryType_t query_type, |
616 | const uint8_t cdhash[kTCEntryHashSize], |
617 | TrustCacheQueryToken_t *query_token) |
618 | { |
619 | kern_return_t ret = KERN_NOT_FOUND; |
620 | |
621 | /* Validate the query type preemptively */ |
622 | if (query_type >= kTCQueryTypeTotal) { |
623 | printf("unable to query trust cache: invalid query type: %u\n" , query_type); |
624 | return KERN_INVALID_ARGUMENT; |
625 | } |
626 | |
627 | /* Lock the runtime as shared */ |
628 | lck_rw_lock_shared(lck: &trust_cache_rt_lock); |
629 | |
630 | TCReturn_t tc_ret = amfi->TrustCache.query( |
631 | trust_cache_rt, |
632 | query_type, |
633 | cdhash, |
634 | query_token); |
635 | |
636 | /* Unlock the runtime */ |
637 | lck_rw_unlock_shared(lck: &trust_cache_rt_lock); |
638 | |
639 | if (tc_ret.error == kTCReturnSuccess) { |
640 | ret = KERN_SUCCESS; |
641 | } else if (tc_ret.error == kTCReturnNotFound) { |
642 | ret = KERN_NOT_FOUND; |
643 | } else { |
644 | ret = KERN_FAILURE; |
645 | printf("trust cache query failed (TCReturn: 0x%02X | 0x%02X | %u)\n" , |
646 | tc_ret.component, tc_ret.error, tc_ret.uniqueError); |
647 | } |
648 | |
649 | return ret; |
650 | } |
651 | |
652 | static kern_return_t |
653 | xnu_check_trust_cache_runtime_for_uuid( |
654 | const uint8_t check_uuid[kUUIDSize]) |
655 | { |
656 | kern_return_t ret = KERN_DENIED; |
657 | |
658 | if (amfi->TrustCache.version < 3) { |
659 | /* AMFI change hasn't landed in the build */ |
660 | printf("unable to check for loaded trust cache: interface not supported\n" ); |
661 | return KERN_NOT_SUPPORTED; |
662 | } |
663 | |
664 | /* Lock the runtime as shared */ |
665 | lck_rw_lock_shared(lck: &trust_cache_rt_lock); |
666 | |
667 | TCReturn_t tc_ret = amfi->TrustCache.checkRuntimeForUUID( |
668 | trust_cache_rt, |
669 | check_uuid, |
670 | NULL); |
671 | |
672 | /* Unlock the runtime */ |
673 | lck_rw_unlock_shared(lck: &trust_cache_rt_lock); |
674 | |
675 | if (tc_ret.error == kTCReturnSuccess) { |
676 | ret = KERN_SUCCESS; |
677 | } else if (tc_ret.error == kTCReturnNotFound) { |
678 | ret = KERN_NOT_FOUND; |
679 | } else { |
680 | ret = KERN_FAILURE; |
681 | printf("trust cache UUID check failed (TCReturn: 0x%02X | 0x%02X | %u)\n" , |
682 | tc_ret.component, tc_ret.error, tc_ret.uniqueError); |
683 | } |
684 | |
685 | return ret; |
686 | } |
687 | |
688 | #endif /* CONFIG_SPTM */ |
689 | |
690 | kern_return_t |
691 | check_trust_cache_runtime_for_uuid( |
692 | const uint8_t check_uuid[kUUIDSize]) |
693 | { |
694 | kern_return_t ret = KERN_DENIED; |
695 | |
696 | if (check_uuid == NULL) { |
697 | return KERN_INVALID_ARGUMENT; |
698 | } |
699 | |
700 | #if CONFIG_SPTM |
701 | ret = txm_check_trust_cache_runtime_for_uuid(check_uuid); |
702 | #elif PMAP_CS_PPL_MONITOR |
703 | ret = ppl_check_trust_cache_runtime_for_uuid(check_uuid); |
704 | #else |
705 | ret = xnu_check_trust_cache_runtime_for_uuid(check_uuid); |
706 | #endif |
707 | |
708 | return ret; |
709 | } |
710 | |
711 | kern_return_t |
712 | load_trust_cache( |
713 | const uint8_t *img4_object, const size_t img4_object_len, |
714 | const uint8_t *img4_ext_manifest, const size_t img4_ext_manifest_len) |
715 | { |
716 | TCType_t type = kTCTypeInvalid; |
717 | kern_return_t ret = KERN_DENIED; |
718 | |
719 | /* Start from the first valid type and attempt to validate through each */ |
720 | for (type = kTCTypeLTRS; type < kTCTypeTotal; type += 1) { |
721 | ret = load_trust_cache_with_type( |
722 | type, |
723 | img4_object, img4_object_len, |
724 | img4_ext_manifest, img4_ext_manifest_len, |
725 | NULL, img4_aux_manifest_len: 0); |
726 | |
727 | if ((ret == KERN_SUCCESS) || (ret == KERN_ALREADY_IN_SET)) { |
728 | return ret; |
729 | } |
730 | } |
731 | |
732 | #if TRUST_CACHE_INCLUDE_INTERNAL_CODE |
733 | /* Attempt to load as an engineering root */ |
734 | ret = load_trust_cache_with_type( |
735 | kTCTypeDTRS, |
736 | img4_object, img4_object_len, |
737 | img4_ext_manifest, img4_ext_manifest_len, |
738 | NULL, 0); |
739 | #endif |
740 | |
741 | return ret; |
742 | } |
743 | |
744 | kern_return_t |
745 | load_trust_cache_with_type( |
746 | TCType_t type, |
747 | const uint8_t *img4_object, const size_t img4_object_len, |
748 | const uint8_t *img4_ext_manifest, const size_t img4_ext_manifest_len, |
749 | const uint8_t *img4_aux_manifest, const size_t img4_aux_manifest_len) |
750 | { |
751 | kern_return_t ret = KERN_DENIED; |
752 | uintptr_t length_check = 0; |
753 | const uint8_t *img4_payload = NULL; |
754 | size_t img4_payload_len = 0; |
755 | const uint8_t *img4_manifest = NULL; |
756 | size_t img4_manifest_len = 0; |
757 | |
758 | /* img4_object is required */ |
759 | if (!img4_object || (img4_object_len == 0)) { |
760 | printf("unable to load trust cache (type: %u): no img4_object provided\n" , type); |
761 | return KERN_INVALID_ARGUMENT; |
762 | } else if (os_add_overflow((uintptr_t)img4_object, img4_object_len, &length_check)) { |
763 | panic("overflow on the img4 object: %p | %lu" , img4_object, img4_object_len); |
764 | } |
765 | |
766 | /* img4_ext_manifest is optional */ |
767 | if (img4_ext_manifest_len != 0) { |
768 | if (!img4_ext_manifest) { |
769 | printf("unable to load trust cache (type: %u): img4_ext_manifest expected\n" , type); |
770 | return KERN_INVALID_ARGUMENT; |
771 | } else if (os_add_overflow((uintptr_t)img4_ext_manifest, img4_ext_manifest_len, &length_check)) { |
772 | panic("overflow on the ext manifest: %p | %lu" , img4_ext_manifest, img4_ext_manifest_len); |
773 | } |
774 | } |
775 | |
776 | /* img4_aux_manifest is optional */ |
777 | if (img4_aux_manifest_len != 0) { |
778 | if (!img4_aux_manifest) { |
779 | printf("unable to load trust cache (type: %u): img4_aux_manifest expected\n" , type); |
780 | return KERN_INVALID_ARGUMENT; |
781 | } else if (os_add_overflow((uintptr_t)img4_aux_manifest, img4_aux_manifest_len, &length_check)) { |
782 | panic("overflow on the ext manifest: %p | %lu" , img4_aux_manifest, img4_aux_manifest_len); |
783 | } |
784 | } |
785 | |
786 | /* |
787 | * If we don't have an external manifest provided, we expect the img4_object to have |
788 | * the manifest embedded. In this case, we need to extract the different artifacts |
789 | * out of the object. |
790 | */ |
791 | if (img4_ext_manifest_len != 0) { |
792 | img4_payload = img4_object; |
793 | img4_payload_len = img4_object_len; |
794 | img4_manifest = img4_ext_manifest; |
795 | img4_manifest_len = img4_ext_manifest_len; |
796 | } else { |
797 | if (img4if->i4if_version < 15) { |
798 | /* AppleImage4 change hasn't landed in the build */ |
799 | printf("unable to extract payload and manifest from object\n" ); |
800 | return KERN_NOT_SUPPORTED; |
801 | } |
802 | img4_buff_t img4_buff = IMG4_BUFF_INIT; |
803 | |
804 | /* Extract the payload */ |
805 | if (img4_get_payload(img4_object, img4_object_len, &img4_buff) == NULL) { |
806 | printf("unable to find payload within img4 object\n" ); |
807 | return KERN_NOT_FOUND; |
808 | } |
809 | img4_payload = img4_buff.i4b_bytes; |
810 | img4_payload_len = img4_buff.i4b_len; |
811 | |
812 | /* Extract the manifest */ |
813 | if (img4_get_manifest(img4_object, img4_object_len, &img4_buff) == NULL) { |
814 | printf("unable to find manifest within img4 object\n" ); |
815 | return KERN_NOT_FOUND; |
816 | } |
817 | img4_manifest = img4_buff.i4b_bytes; |
818 | img4_manifest_len = img4_buff.i4b_len; |
819 | } |
820 | |
821 | if ((type == kTCTypeStatic) || (type == kTCTypeEngineering) || (type == kTCTypeLegacy)) { |
822 | printf("unable to load trust cache: invalid type: %u\n" , type); |
823 | return KERN_INVALID_ARGUMENT; |
824 | } else if (type >= kTCTypeTotal) { |
825 | printf("unable to load trust cache: unknown type: %u\n" , type); |
826 | return KERN_INVALID_ARGUMENT; |
827 | } |
828 | |
829 | /* Validate entitlement for the calling process */ |
830 | if (TCTypeConfig[type].entitlementValue != NULL) { |
831 | const bool entitlement_satisfied = IOCurrentTaskHasStringEntitlement( |
832 | entitlement: "com.apple.private.pmap.load-trust-cache" , |
833 | value: TCTypeConfig[type].entitlementValue); |
834 | |
835 | if (entitlement_satisfied == false) { |
836 | printf("unable to load trust cache (type: %u): unsatisfied entitlement\n" , type); |
837 | return KERN_DENIED; |
838 | } |
839 | } |
840 | |
841 | if ((type == kTCTypeCryptex1BootOS) && boot_os_tc_loaded) { |
842 | printf("disallowed to load multiple kTCTypeCryptex1BootOS trust caches\n" ); |
843 | return KERN_DENIED; |
844 | } else if ((type == kTCTypeCryptex1BootApp) && boot_app_tc_loaded) { |
845 | printf("disallowed to load multiple kTCTypeCryptex1BootApp trust caches\n" ); |
846 | return KERN_DENIED; |
847 | } |
848 | |
849 | #if CONFIG_SPTM |
850 | ret = txm_load_trust_cache( |
851 | type, |
852 | img4_payload, img4_payload_len, |
853 | img4_manifest, img4_manifest_len, |
854 | img4_aux_manifest, img4_aux_manifest_len); |
855 | #elif PMAP_CS_PPL_MONITOR |
856 | ret = ppl_load_trust_cache( |
857 | type, |
858 | img4_payload, img4_payload_len, |
859 | img4_manifest, img4_manifest_len, |
860 | img4_aux_manifest, img4_aux_manifest_len); |
861 | #else |
862 | ret = xnu_load_trust_cache( |
863 | type, |
864 | img4_payload, img4_payload_len, |
865 | img4_manifest, img4_manifest_len, |
866 | img4_aux_manifest, img4_aux_manifest_len); |
867 | #endif |
868 | |
869 | if (ret != KERN_SUCCESS) { |
870 | printf("unable to load trust cache (type: %u): %d\n" , type, ret); |
871 | } else { |
872 | if (type == kTCTypeCryptex1BootOS) { |
873 | boot_os_tc_loaded = true; |
874 | } else if (type == kTCTypeCryptex1BootApp) { |
875 | boot_app_tc_loaded = true; |
876 | } |
877 | printf("successfully loaded trust cache of type: %u\n" , type); |
878 | } |
879 | |
880 | return ret; |
881 | } |
882 | |
883 | kern_return_t |
884 | load_legacy_trust_cache( |
885 | const uint8_t *module_data, const size_t module_size) |
886 | { |
887 | kern_return_t ret = KERN_DENIED; |
888 | uintptr_t length_check = 0; |
889 | |
890 | /* Module is required */ |
891 | if (!module_data || (module_size == 0)) { |
892 | printf("unable to load legacy trust cache: no module provided\n" ); |
893 | return KERN_INVALID_ARGUMENT; |
894 | } else if (os_add_overflow((uintptr_t)module_data, module_size, &length_check)) { |
895 | panic("overflow on the module: %p | %lu" , module_data, module_size); |
896 | } |
897 | |
898 | #if CONFIG_SPTM |
899 | ret = txm_load_legacy_trust_cache(module_data, module_size); |
900 | #elif PMAP_CS_PPL_MONITOR |
901 | ret = ppl_load_legacy_trust_cache(module_data, module_size); |
902 | #else |
903 | ret = xnu_load_legacy_trust_cache(module_data, module_size); |
904 | #endif |
905 | |
906 | if (ret != KERN_SUCCESS) { |
907 | printf("unable to load legacy trust cache: %d\n" , ret); |
908 | } else { |
909 | printf("successfully loaded legacy trust cache\n" ); |
910 | } |
911 | |
912 | return ret; |
913 | } |
914 | |
915 | kern_return_t |
916 | query_trust_cache( |
917 | TCQueryType_t query_type, |
918 | const uint8_t cdhash[kTCEntryHashSize], |
919 | TrustCacheQueryToken_t *query_token) |
920 | { |
921 | kern_return_t ret = KERN_NOT_FOUND; |
922 | |
923 | if (cdhash == NULL) { |
924 | printf("unable to query trust caches: no cdhash provided\n" ); |
925 | return KERN_INVALID_ARGUMENT; |
926 | } |
927 | |
928 | #if CONFIG_SPTM |
929 | ret = txm_query_trust_cache(query_type, cdhash, query_token); |
930 | #elif PMAP_CS_PPL_MONITOR |
931 | ret = ppl_query_trust_cache(query_type, cdhash, query_token); |
932 | #else |
933 | ret = xnu_query_trust_cache(query_type, cdhash, query_token); |
934 | #endif |
935 | |
936 | return ret; |
937 | } |
938 | |
939 | /* |
940 | * The trust cache management library uses a wrapper data structure to manage each |
941 | * of the trust cache modules. We know the exact number of static trust caches we |
942 | * expect, so we keep around a read-only-late allocation of the data structure for |
943 | * use. |
944 | * |
945 | * Since engineering trust caches are only ever allowed on development builds, they |
946 | * are not protected through the read-only-late property, and instead allocated |
947 | * dynamically. |
948 | */ |
949 | |
950 | SECURITY_READ_ONLY_LATE(bool) trust_cache_static_init = false; |
951 | SECURITY_READ_ONLY_LATE(bool) trust_cache_static_loaded = false; |
952 | SECURITY_READ_ONLY_LATE(TrustCache_t) trust_cache_static0 = {0}; |
953 | |
954 | #if CONFIG_SECOND_STATIC_TRUST_CACHE |
955 | SECURITY_READ_ONLY_LATE(TrustCache_t) trust_cache_static1 = {0}; |
956 | #endif |
957 | |
958 | #if defined(__arm64__) |
959 | |
960 | typedef uint64_t pmap_paddr_t __kernel_ptr_semantics; |
961 | extern vm_map_address_t phystokv(pmap_paddr_t pa); |
962 | |
963 | #else /* x86_64 */ |
964 | /* |
965 | * We need this duplicate definition because it is hidden behind the MACH_KERNEL_PRIVATE |
966 | * macro definition, which makes it inaccessible to this part of the code base. |
967 | */ |
968 | extern uint64_t physmap_base, physmap_max; |
969 | |
970 | static inline void* |
971 | PHYSMAP_PTOV_check(void *paddr) |
972 | { |
973 | uint64_t pvaddr = (uint64_t)paddr + physmap_base; |
974 | |
975 | if (__improbable(pvaddr >= physmap_max)) { |
976 | panic("PHYSMAP_PTOV bounds exceeded, 0x%qx, 0x%qx, 0x%qx" , |
977 | pvaddr, physmap_base, physmap_max); |
978 | } |
979 | |
980 | return (void*)pvaddr; |
981 | } |
982 | |
983 | #define PHYSMAP_PTOV(x) (PHYSMAP_PTOV_check((void*) (x))) |
984 | #define phystokv(x) ((vm_offset_t)(PHYSMAP_PTOV(x))) |
985 | |
986 | #endif /* defined(__arm__) || defined(__arm64__) */ |
987 | |
988 | void |
989 | load_static_trust_cache(void) |
990 | { |
991 | DTEntry memory_map = {0}; |
992 | const DTTrustCacheRange *tc_range = NULL; |
993 | trust_cache_offsets_t *tc_offsets = NULL; |
994 | unsigned int tc_dt_prop_length = 0; |
995 | size_t tc_segment_length = 0; |
996 | |
997 | /* Mark this function as having been called */ |
998 | trust_cache_static_init = true; |
999 | |
1000 | /* Nothing to do when the runtime isn't set */ |
1001 | if (trust_cache_rt == NULL) { |
1002 | return; |
1003 | } |
1004 | |
1005 | if (amfi->TrustCache.version < 1) { |
1006 | /* AMFI change hasn't landed in the build */ |
1007 | printf("unable to load static trust cache: interface not supported\n" ); |
1008 | return; |
1009 | } |
1010 | |
1011 | int err = SecureDTLookupEntry(NULL, pathName: "chosen/memory-map" , foundEntry: &memory_map); |
1012 | if (err != kSuccess) { |
1013 | printf("unable to find chosen/memory-map in the device tree: %d\n" , err); |
1014 | return; |
1015 | } |
1016 | |
1017 | err = SecureDTGetProperty(entry: memory_map, propertyName: "TrustCache" , propertyValue: (const void **)&tc_range, propertySize: &tc_dt_prop_length); |
1018 | if (err == kSuccess) { |
1019 | if (tc_dt_prop_length != sizeof(DTTrustCacheRange)) { |
1020 | panic("unexpected size for TrustCache property: %u != %zu" , |
1021 | tc_dt_prop_length, sizeof(DTTrustCacheRange)); |
1022 | } |
1023 | |
1024 | tc_offsets = (void*)phystokv(pa: tc_range->paddr); |
1025 | tc_segment_length = tc_range->length; |
1026 | } |
1027 | |
1028 | /* x86_64 devices aren't expected to have trust caches */ |
1029 | if (tc_segment_length == 0) { |
1030 | if (tc_offsets && tc_offsets->num_caches != 0) { |
1031 | panic("trust cache segment is zero length but trust caches are available: %u" , |
1032 | tc_offsets->num_caches); |
1033 | } |
1034 | |
1035 | printf("no external trust caches found (segment length is zero)\n" ); |
1036 | return; |
1037 | } else if (tc_offsets->num_caches == 0) { |
1038 | panic("trust cache segment isn't zero but no trust caches available: %lu" , |
1039 | (unsigned long)tc_segment_length); |
1040 | } |
1041 | |
1042 | size_t offsets_length = 0; |
1043 | size_t struct_length = 0; |
1044 | if (os_mul_overflow(tc_offsets->num_caches, sizeof(uint32_t), &offsets_length)) { |
1045 | panic("overflow on the number of trust caches provided: %u" , tc_offsets->num_caches); |
1046 | } else if (os_add_overflow(offsets_length, sizeof(trust_cache_offsets_t), &struct_length)) { |
1047 | panic("overflow on length of the trust cache offsets: %lu" , |
1048 | (unsigned long)offsets_length); |
1049 | } else if (tc_segment_length < struct_length) { |
1050 | panic("trust cache segment length smaller than required: %lu | %lu" , |
1051 | (unsigned long)tc_segment_length, (unsigned long)struct_length); |
1052 | } |
1053 | const uintptr_t tc_region_end = (uintptr_t)tc_offsets + tc_segment_length; |
1054 | |
1055 | printf("attempting to load %u external trust cache modules\n" , tc_offsets->num_caches); |
1056 | |
1057 | for (uint32_t i = 0; i < tc_offsets->num_caches; i++) { |
1058 | TCReturn_t tc_ret = (TCReturn_t){.error = kTCReturnError}; |
1059 | TCType_t tc_type = kTCTypeEngineering; |
1060 | TrustCache_t *trust_cache = NULL; |
1061 | |
1062 | uintptr_t tc_module = 0; |
1063 | if (os_add_overflow((uintptr_t)tc_offsets, tc_offsets->offsets[i], &tc_module)) { |
1064 | panic("trust cache module start overflows: %u | %lu | %u" , |
1065 | i, (unsigned long)tc_offsets, tc_offsets->offsets[i]); |
1066 | } else if (tc_module >= tc_region_end) { |
1067 | panic("trust cache module begins after segment ends: %u | %lx | %lx" , |
1068 | i, (unsigned long)tc_module, tc_region_end); |
1069 | } |
1070 | |
1071 | /* Should be safe for underflow */ |
1072 | const size_t buffer_length = tc_region_end - tc_module; |
1073 | |
1074 | /* The first module is always the static trust cache */ |
1075 | if (i == 0) { |
1076 | tc_type = kTCTypeStatic; |
1077 | trust_cache = &trust_cache_static0; |
1078 | } |
1079 | |
1080 | #if CONFIG_SECOND_STATIC_TRUST_CACHE |
1081 | if (trust_cache_rt->allowSecondStaticTC && (i == 1)) { |
1082 | tc_type = kTCTypeStatic; |
1083 | trust_cache = &trust_cache_static1; |
1084 | } |
1085 | #endif |
1086 | |
1087 | if (tc_type == kTCTypeEngineering) { |
1088 | if (trust_cache_rt->allowEngineeringTC == false) { |
1089 | printf("skipping engineering trust cache module: %u\n" , i); |
1090 | continue; |
1091 | } |
1092 | |
1093 | /* Allocate the trust cache data structure -- Z_WAITOK_ZERO means this can't fail */ |
1094 | trust_cache = kalloc_type(TrustCache_t, Z_WAITOK_ZERO); |
1095 | assert(trust_cache != NULL); |
1096 | } |
1097 | |
1098 | tc_ret = amfi->TrustCache.loadModule( |
1099 | trust_cache_rt, |
1100 | tc_type, |
1101 | trust_cache, |
1102 | tc_module, buffer_length); |
1103 | |
1104 | if (tc_ret.error != kTCReturnSuccess) { |
1105 | printf("unable to load trust cache module: %u (TCReturn: 0x%02X | 0x%02X | %u)\n" , |
1106 | i, tc_ret.component, tc_ret.error, tc_ret.uniqueError); |
1107 | |
1108 | if (tc_type == kTCTypeStatic) { |
1109 | panic("failed to load static trust cache module: %u" , i); |
1110 | } |
1111 | continue; |
1112 | } |
1113 | printf("loaded external trust cache module: %u\n" , i); |
1114 | |
1115 | /* |
1116 | * The first module is always loaded as a static trust cache. If loading it failed, |
1117 | * then this function would've panicked. If we reach here, it means we've loaded a |
1118 | * static trust cache on the system. |
1119 | */ |
1120 | trust_cache_static_loaded = true; |
1121 | } |
1122 | |
1123 | printf("completed loading external trust cache modules\n" ); |
1124 | } |
1125 | |
1126 | kern_return_t |
1127 | static_trust_cache_capabilities( |
1128 | uint32_t *num_static_trust_caches_ret, |
1129 | TCCapabilities_t *capabilities0_ret, |
1130 | TCCapabilities_t *capabilities1_ret) |
1131 | { |
1132 | TCReturn_t tcRet = {.error = kTCReturnError}; |
1133 | |
1134 | *num_static_trust_caches_ret = 0; |
1135 | *capabilities0_ret = kTCCapabilityNone; |
1136 | *capabilities1_ret = kTCCapabilityNone; |
1137 | |
1138 | /* Ensure static trust caches have been initialized */ |
1139 | if (trust_cache_static_init == false) { |
1140 | panic("attempted to query static trust cache capabilities without init" ); |
1141 | } |
1142 | |
1143 | #if CONFIG_SPTM |
1144 | if (num_static_trust_caches > 0) { |
1145 | /* Copy in the data received from TrustedExecutionMonitor */ |
1146 | *num_static_trust_caches_ret = num_static_trust_caches; |
1147 | *capabilities0_ret = static_trust_cache_capabilities0; |
1148 | *capabilities1_ret = static_trust_cache_capabilities1; |
1149 | |
1150 | /* Return successfully */ |
1151 | return KERN_SUCCESS; |
1152 | } |
1153 | #endif |
1154 | |
1155 | if (amfi->TrustCache.version < 2) { |
1156 | /* AMFI change hasn't landed in the build */ |
1157 | printf("unable to get static trust cache capabilities: interface not supported\n" ); |
1158 | return KERN_NOT_SUPPORTED; |
1159 | } else if (trust_cache_static_loaded == false) { |
1160 | /* Return arguments already set */ |
1161 | return KERN_SUCCESS; |
1162 | } |
1163 | |
1164 | tcRet = amfi->TrustCache.getCapabilities(&trust_cache_static0, capabilities0_ret); |
1165 | assert(tcRet.error == kTCReturnSuccess); |
1166 | *num_static_trust_caches_ret += 1; |
1167 | |
1168 | #if CONFIG_SECOND_STATIC_TRUST_CACHE |
1169 | tcRet = amfi->TrustCache.getCapabilities(&trust_cache_static1, capabilities1_ret); |
1170 | assert(tcRet.error == kTCReturnSuccess); |
1171 | *num_static_trust_caches_ret += 1; |
1172 | #endif |
1173 | |
1174 | return KERN_SUCCESS; |
1175 | } |
1176 | |