1/*
2 * Copyright (c) 2023 Apple Inc. All rights reserved.
3 *
4 * @APPLE_OSREFERENCE_LICENSE_HEADER_START@
5 *
6 * This file contains Original Code and/or Modifications of Original Code
7 * as defined in and that are subject to the Apple Public Source License
8 * Version 2.0 (the 'License'). You may not use this file except in
9 * compliance with the License. The rights granted to you under the License
10 * may not be used to create, or enable the creation or redistribution of,
11 * unlawful or unlicensed copies of an Apple operating system, or to
12 * circumvent, violate, or enable the circumvention or violation of, any
13 * terms of an Apple operating system software license agreement.
14 *
15 * Please obtain a copy of the License at
16 * http://www.opensource.apple.com/apsl/ and read it before using this file.
17 *
18 * The Original Code and all software distributed under the License are
19 * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER
20 * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES,
21 * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY,
22 * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT.
23 * Please see the License for the specific language governing rights and
24 * limitations under the License.
25 *
26 * @APPLE_OSREFERENCE_LICENSE_HEADER_END@
27 */
28
29#if CONFIG_EXCLAVES
30
31#include <kern/exclaves_debug.h>
32#include <kern/exclaves_inspection.h>
33#include <kern/exclaves_stackshot.h>
34#include <kern/exclaves_test_stackshot.h>
35#include <kern/exclaves_boot.h>
36#include <kern/exclaves.tightbeam.h>
37#include <mach/exclaves_l4.h>
38#include <vm/pmap.h>
39
40#define EXCLAVES_STACKSHOT_BATCH_SIZE 32
41
42#include "exclaves_resource.h"
43
44#define EXCLAVES_ID_STACKSHOT_SERVER_EP \
45 (exclaves_service_lookup(EXCLAVES_DOMAIN_KERNEL, \
46 "com.apple.service.Stackshot"))
47
48static _Atomic bool exclaves_inspection_initialized;
49static stackshot_taker_s tb_client;
50static size_t exclaves_stackshot_buffer_size;
51static uint8_t ** exclaves_stackshot_buffer_pages;
52static uint8_t * exclaves_stackshot_buffer;
53static integer_t exclaves_collect_priority = MAXPRI_KERNEL;
54static thread_t exclaves_collection_thread;
55static uint64_t scid_list[EXCLAVES_STACKSHOT_BATCH_SIZE];
56static ctid_t ctid_list[EXCLAVES_STACKSHOT_BATCH_SIZE];
57static size_t scid_list_count;
58bool exclaves_stackshot_raw_addresses;
59bool exclaves_stackshot_all_address_spaces;
60exclaves_panic_ss_status_t exclaves_panic_ss_status = EXCLAVES_PANIC_STACKSHOT_UNKNOWN;
61
62static void *exclaves_collect_event = NULL;
63
64static uint8_t exclaves_collect_thread_ready = 0;
65
66queue_head_t exclaves_inspection_queue_stackshot;
67queue_head_t exclaves_inspection_queue_kperf;
68
69static LCK_GRP_DECLARE(exclaves_inspection_lck_grp, "exclaves_inspection_lock");
70LCK_MTX_DECLARE(exclaves_collect_mtx, &exclaves_inspection_lck_grp);
71// Guards initialization to ensure nothing tries to collect before all threads/allocations/etc. are done
72LCK_MTX_DECLARE(exclaves_collect_init_mtx, &exclaves_inspection_lck_grp);
73
74static void exclaves_collect_threads_thread(void *arg, wait_result_t __unused wr);
75void exclaves_inspection_check_ast(void);
76
77extern kern_return_t
78stackshot_exclaves_process_result(kern_return_t collect_kr, const stackshot_stackshotresult_s *result, bool want_raw_addresses);
79
80extern __attribute__((noinline))
81void kperf_thread_exclaves_ast_handler(thread_t thread, const stackshot_stackshotentry_s * _Nonnull entry);
82
83typedef kern_return_t (*exclaves_inspection_process_fn)(kern_return_t collect_kr, const stackshot_stackshotresult_s *data, bool want_raw_addresses);
84
85
86/* Populate provided buffer with a list of scid values of threads from end of the list. */
87static size_t
88prepare_scid_list_stackshot(queue_t wl, uint64_t *pscid_list, ctid_t *pctid_list, uint64_t max_threads)
89{
90 thread_t thread = NULL;
91 size_t count = 0;
92
93 lck_mtx_assert(&exclaves_collect_mtx, LCK_MTX_ASSERT_OWNED);
94
95 for (count = 0; count < max_threads; ++count) {
96 thread = qe_dequeue_tail(wl, struct thread, th_exclaves_inspection_queue_stackshot);
97 if (thread == NULL) {
98 break;
99 }
100 pscid_list[count] = thread->th_exclaves_scheduling_context_id;
101 pctid_list[count] = thread_get_ctid(thread);
102 }
103
104 return count;
105}
106
107static size_t
108prepare_scid_list_kperf(queue_t wl, uint64_t *pscid_list, ctid_t *pctid_list, uint64_t max_threads)
109{
110 thread_t thread = NULL;
111 size_t count = 0;
112
113 lck_mtx_assert(&exclaves_collect_mtx, LCK_MTX_ASSERT_OWNED);
114
115 for (count = 0; count < max_threads; ++count) {
116 thread = qe_dequeue_tail(wl, struct thread, th_exclaves_inspection_queue_kperf);
117 if (thread == NULL) {
118 break;
119 }
120 pscid_list[count] = thread->th_exclaves_scheduling_context_id;
121 pctid_list[count] = thread_get_ctid(thread);
122 }
123
124 return count;
125}
126
127/* Clear flag from the list of pending threads, allowing them to run. */
128static void
129clear_pending_threads_stackshot(ctid_t *ctids, size_t count, thread_exclaves_inspection_flags_t flag)
130{
131 size_t i;
132 thread_t thread;
133
134 for (i = 0; i < count; ++i) {
135 thread = ctid_get_thread(ctids[i]);
136 ctids[i] = 0;
137 assert(thread);
138
139 os_atomic_and(&thread->th_exclaves_inspection_state, ~flag, relaxed);
140 wakeup_all_with_inheritor((event_t)&thread->th_exclaves_inspection_queue_stackshot, THREAD_AWAKENED);
141 thread_deallocate_safe(thread);
142 }
143}
144
145static void
146clear_pending_threads_kperf(ctid_t *ctids, size_t count, thread_exclaves_inspection_flags_t flag)
147{
148 size_t i;
149 thread_t thread;
150
151 for (i = 0; i < count; ++i) {
152 thread = ctid_get_thread(ctids[i]);
153 ctids[i] = 0;
154 assert(thread);
155
156 os_atomic_and(&thread->th_exclaves_inspection_state, ~flag, relaxed);
157 wakeup_all_with_inheritor((event_t)&thread->th_exclaves_inspection_queue_kperf, THREAD_AWAKENED);
158 thread_deallocate_safe(thread);
159 }
160}
161
162static void
163clear_stackshot_queue(thread_exclaves_inspection_flags_t flag)
164{
165 thread_t thread;
166
167 lck_mtx_assert(&exclaves_collect_mtx, LCK_MTX_ASSERT_OWNED);
168
169 while (!queue_empty(&exclaves_inspection_queue_stackshot)) {
170 thread = qe_dequeue_tail(&exclaves_inspection_queue_stackshot, struct thread, th_exclaves_inspection_queue_stackshot);
171 assert(thread);
172 os_atomic_and(&thread->th_exclaves_inspection_state, ~flag, relaxed);
173 wakeup_all_with_inheritor((event_t)&thread->th_exclaves_inspection_queue_stackshot, THREAD_AWAKENED);
174 thread_deallocate_safe(thread);
175 }
176}
177
178static void
179clear_kperf_queue(thread_exclaves_inspection_flags_t flag)
180{
181 thread_t thread;
182
183 lck_mtx_assert(&exclaves_collect_mtx, LCK_MTX_ASSERT_OWNED);
184
185 while (!queue_empty(&exclaves_inspection_queue_kperf)) {
186 thread = qe_dequeue_tail(&exclaves_inspection_queue_kperf, struct thread, th_exclaves_inspection_queue_kperf);
187 assert(thread);
188 os_atomic_and(&thread->th_exclaves_inspection_state, ~flag, relaxed);
189 wakeup_all_with_inheritor((event_t)&thread->th_exclaves_inspection_queue_kperf, THREAD_AWAKENED);
190 thread_deallocate_safe(thread);
191 }
192}
193
194static kern_return_t
195process_exclaves_buffer(uint8_t * buffer, size_t output_length, exclaves_inspection_process_fn process_fn, bool want_raw_addresses)
196{
197 __block kern_return_t error = KERN_SUCCESS;
198 tb_error_t tberr = TB_ERROR_SUCCESS;
199
200 if (output_length) {
201 tberr = stackshot_stackshotresult__unmarshal(buffer, output_length, ^(stackshot_stackshotresult_s result){
202 error = process_fn(KERN_SUCCESS, &result, want_raw_addresses);
203 if (error != KERN_SUCCESS) {
204 exclaves_debug_printf(show_errors, "exclaves stackshot: error processing stackshot result\n");
205 }
206 });
207 if (tberr != TB_ERROR_SUCCESS) {
208 exclaves_debug_printf(show_errors, "exclaves stackshot: process_exclaves_buffer could not unmarshal stackshot data 0x%x\n", tberr);
209 error = KERN_FAILURE;
210 goto error_exit;
211 }
212 } else {
213 error = KERN_FAILURE;
214 exclaves_debug_printf(show_errors, "exclaves stackshot: exclave stackshot data did not fit into shared memory buffer\n");
215 }
216
217error_exit:
218 return error;
219}
220
221static kern_return_t
222collect_scid_list(exclaves_inspection_process_fn process_fn, bool want_raw_addresses, bool all_address_spaces)
223{
224 __block kern_return_t kr = KERN_SUCCESS;
225 tb_error_t tberr = 0;
226 scid_v_s scids = { 0 };
227
228 exclaves_debug_printf(show_progress, "exclaves stackshot: starting collection, scid_list_count=%zu\n", scid_list_count);
229
230 scid__v_assign_copy(&scids, scid_list, scid_list_count);
231
232 tberr = stackshot_taker_takestackshot(&tb_client, &scids, want_raw_addresses, all_address_spaces, ^(stackshot_outputlength_s output_length) {
233 assert3u(output_length, <=, exclaves_stackshot_buffer_size);
234
235 size_t remaining = output_length;
236 uint8_t * dst = exclaves_stackshot_buffer;
237 size_t page_index = 0;
238
239 /* TODO: rdar://115413837 (Map stackshot buffer pages to a continuous range, do not copy) */
240 while (remaining >= PAGE_SIZE) {
241 memcpy(dst, exclaves_stackshot_buffer_pages[page_index], PAGE_SIZE);
242 dst += PAGE_SIZE;
243 page_index++;
244 remaining -= PAGE_SIZE;
245 }
246 if (remaining) {
247 memcpy(dst, exclaves_stackshot_buffer_pages[page_index], remaining);
248 }
249
250 kr = process_exclaves_buffer(exclaves_stackshot_buffer, (size_t)output_length, process_fn, want_raw_addresses);
251 });
252
253 if (tberr != TB_ERROR_SUCCESS) {
254 exclaves_debug_printf(show_errors, "exclaves stackshot: stackshot_taker_takestackshot error 0x%x\n", tberr);
255 kr = KERN_FAILURE;
256 goto error_exit;
257 }
258
259error_exit:
260 exclaves_debug_printf(show_progress, "exclaves stackshot: collection done with result %d\n", kr);
261 return kr;
262}
263
264static kern_return_t
265complete_kperf_ast(kern_return_t collect_kr, const stackshot_stackshotresult_s *result, __unused bool want_raw_addresses)
266{
267 if (collect_kr != KERN_SUCCESS) {
268 return collect_kr;
269 }
270
271 stackshot_stackshotentry__v_visit(&result->stackshotentries, ^(size_t i, const stackshot_stackshotentry_s * _Nonnull entry) {
272 assert(i < scid_list_count);
273 thread_t thread = ctid_get_thread(ctid_list[i]);
274 assert(thread);
275 kperf_thread_exclaves_ast_handler(thread, entry);
276 });
277
278 return KERN_SUCCESS;
279}
280
281/*
282 * Kernel thread that will collect, upon event (exclaves_collect_event), data
283 * on the current activity in the Exclave world of a set of threads registered
284 * with its waitlist.
285 */
286__attribute__((noreturn))
287static void
288exclaves_collect_threads_thread(void __unused *arg, wait_result_t __unused wr)
289{
290 kern_return_t kr = KERN_SUCCESS;
291
292 kr = exclaves_allocate_ipc_buffer(NULL);
293 if (kr != KERN_SUCCESS) {
294 panic("exclaves stackshot: failed to allocate collect ipcb: %d", kr);
295 }
296
297 os_atomic_store(&current_thread()->th_exclaves_inspection_state, TH_EXCLAVES_INSPECTION_NOINSPECT, relaxed);
298 lck_mtx_lock(&exclaves_collect_init_mtx);
299 exclaves_collect_thread_ready = true;
300 wakeup_all_with_inheritor(&exclaves_collect_thread_ready, THREAD_AWAKENED);
301 lck_mtx_unlock(&exclaves_collect_init_mtx);
302
303 lck_mtx_lock(&exclaves_collect_mtx);
304
305 for (;;) {
306 while (queue_empty(&exclaves_inspection_queue_stackshot) && queue_empty(&exclaves_inspection_queue_kperf)) {
307 lck_mtx_sleep(&exclaves_collect_mtx, LCK_SLEEP_DEFAULT, (event_t)&exclaves_collect_event, THREAD_UNINT);
308 }
309
310 if (!queue_empty(&exclaves_inspection_queue_stackshot)) {
311 // only this thread should manipulate the scid_list
312 scid_list_count = prepare_scid_list_stackshot(&exclaves_inspection_queue_stackshot, scid_list, ctid_list, EXCLAVES_STACKSHOT_BATCH_SIZE);
313 while (scid_list_count) {
314 lck_mtx_unlock(&exclaves_collect_mtx);
315
316 kr = collect_scid_list(stackshot_exclaves_process_result, exclaves_stackshot_raw_addresses, exclaves_stackshot_all_address_spaces);
317 lck_mtx_lock(&exclaves_collect_mtx);
318 clear_pending_threads_stackshot(ctid_list, scid_list_count, TH_EXCLAVES_INSPECTION_STACKSHOT);
319 if (kr != KERN_SUCCESS) {
320 goto stackshot_error;
321 }
322
323 scid_list_count = prepare_scid_list_stackshot(&exclaves_inspection_queue_stackshot, scid_list, ctid_list, EXCLAVES_STACKSHOT_BATCH_SIZE);
324 }
325
326stackshot_error:
327 if (!queue_empty(&exclaves_inspection_queue_stackshot)) {
328 clear_stackshot_queue(TH_EXCLAVES_INSPECTION_STACKSHOT);
329 }
330 stackshot_exclaves_process_result(kr, NULL, true);
331 wakeup_all_with_inheritor(&exclaves_inspection_queue_stackshot, THREAD_AWAKENED);
332 }
333
334 if (!queue_empty(&exclaves_inspection_queue_kperf)) {
335 scid_list_count = prepare_scid_list_kperf(&exclaves_inspection_queue_kperf, scid_list, ctid_list, EXCLAVES_STACKSHOT_BATCH_SIZE);
336 while (scid_list_count) {
337 lck_mtx_unlock(&exclaves_collect_mtx);
338
339 kr = collect_scid_list(complete_kperf_ast, false, false);
340 lck_mtx_lock(&exclaves_collect_mtx);
341 clear_pending_threads_kperf(ctid_list, scid_list_count, TH_EXCLAVES_INSPECTION_KPERF);
342 if (kr != KERN_SUCCESS) {
343 goto kperf_error;
344 }
345
346 scid_list_count = prepare_scid_list_kperf(&exclaves_inspection_queue_kperf, scid_list, ctid_list, EXCLAVES_STACKSHOT_BATCH_SIZE);
347 }
348kperf_error:
349 if (!queue_empty(&exclaves_inspection_queue_kperf)) {
350 clear_kperf_queue(TH_EXCLAVES_INSPECTION_KPERF);
351 }
352 }
353 }
354}
355
356void
357exclaves_inspection_begin_collecting(void)
358{
359 lck_mtx_assert(&exclaves_collect_mtx, LCK_MTX_ASSERT_OWNED);
360
361 thread_wakeup_thread((event_t)&exclaves_collect_event, exclaves_collection_thread);
362}
363
364void
365exclaves_inspection_wait_complete(queue_t queue)
366{
367 lck_mtx_assert(&exclaves_collect_mtx, LCK_MTX_ASSERT_OWNED);
368
369 while (!queue_empty(queue)) {
370 lck_mtx_sleep_with_inheritor(&exclaves_collect_mtx, LCK_SLEEP_DEFAULT, (event_t)queue, exclaves_collection_thread, THREAD_UNINT, TIMEOUT_WAIT_FOREVER);
371 }
372}
373
374static kern_return_t
375exclaves_inspection_init(void)
376{
377 __block kern_return_t kr = KERN_SUCCESS;
378 tb_error_t tberr = 0;
379 tb_endpoint_t tb_endpoint = { 0 };
380
381 assert(!os_atomic_load(&exclaves_inspection_initialized, relaxed));
382
383 /*
384 * If there's no stackshot service available, just return.
385 */
386 if (EXCLAVES_ID_STACKSHOT_SERVER_EP == UINT64_C(~0)) {
387 return KERN_SUCCESS;
388 }
389
390 queue_init(&exclaves_inspection_queue_stackshot);
391 queue_init(&exclaves_inspection_queue_kperf);
392
393 tb_endpoint = tb_endpoint_create_with_value(TB_TRANSPORT_TYPE_XNU, EXCLAVES_ID_STACKSHOT_SERVER_EP, TB_ENDPOINT_OPTIONS_NONE);
394
395 tberr = stackshot_taker__init(&tb_client, tb_endpoint);
396 if (tberr != TB_ERROR_SUCCESS) {
397 exclaves_debug_printf(show_errors, "exclaves stackshot: stackshot_taker_init error 0x%x\n", tberr);
398 return KERN_FAILURE;
399 }
400
401 tberr = stackshot_taker_allocsharedbuffer(&tb_client, ^(stackshot_sharedbuffer_s tbresult) {
402 __block size_t page_count = 0;
403 exclaves_stackshot_buffer_size = 0;
404 u64__v_visit(&tbresult.physaddr, ^(size_t __unused i, const uint64_t __unused item) {
405 page_count++;
406 });
407 if (!page_count) {
408 exclaves_debug_printf(show_errors, "exclaves stackshot: stackshot_taker_allocsharedbuffer did not return any page addresses\n");
409 kr = KERN_RESOURCE_SHORTAGE;
410 return;
411 }
412
413 if (os_mul_overflow(page_count, PAGE_SIZE, &exclaves_stackshot_buffer_size)) {
414 panic("exclaves stackshot: buffer size overflow");
415 return;
416 }
417 exclaves_stackshot_buffer = kalloc_type(uint8_t, exclaves_stackshot_buffer_size, Z_WAITOK);
418 if (!exclaves_stackshot_buffer) {
419 panic("exclaves stackshot: cannot allocate buffer for exclaves shared memory");
420 return;
421 }
422
423 exclaves_stackshot_buffer_pages = kalloc_type(uint8_t*, page_count, Z_WAITOK);
424 if (!exclaves_stackshot_buffer_pages) {
425 panic("exclaves stackshot: cannot allocate buffer for exclaves shared memory addresses");
426 return;
427 }
428
429 u64__v_visit(&tbresult.physaddr, ^(size_t i, const uint64_t item) {
430 exclaves_stackshot_buffer_pages[i] = (uint8_t*)phystokv((pmap_paddr_t)item);
431 });
432 });
433
434 if (tberr != TB_ERROR_SUCCESS) {
435 exclaves_debug_printf(show_errors, "exclaves stackshot: stackshot_taker_allocsharedbuffer error 0x%x\n", tberr);
436 /*
437 * Until rdar://115836013 is resolved, this failure must be
438 * supressed.
439 */
440 return KERN_SUCCESS;
441 }
442
443 // this may be due to invalid call or set from result handler
444 if (kr != KERN_SUCCESS) {
445 goto error_exit;
446 }
447
448 exclaves_debug_printf(show_progress, "exclaves stackshot: exclaves stackshot buffer size: %zu bytes\n", exclaves_stackshot_buffer_size);
449
450 kr = (kernel_thread_start_priority(
451 exclaves_collect_threads_thread, NULL, exclaves_collect_priority, &exclaves_collection_thread));
452 if (kr != KERN_SUCCESS) {
453 goto error_exit;
454 }
455 thread_set_thread_name(exclaves_collection_thread, "exclaves-stackshot");
456 thread_deallocate(exclaves_collection_thread);
457
458 lck_mtx_lock(&exclaves_collect_init_mtx);
459
460 while (!exclaves_collect_thread_ready) {
461 lck_mtx_sleep_with_inheritor(&exclaves_collect_init_mtx, LCK_SLEEP_DEFAULT, (event_t)&exclaves_collect_thread_ready, exclaves_collection_thread, THREAD_UNINT, TIMEOUT_WAIT_FOREVER);
462 }
463
464 os_atomic_store(&exclaves_inspection_initialized, true, release);
465 lck_mtx_unlock(&exclaves_collect_init_mtx);
466error_exit:
467 return kr;
468}
469
470EXCLAVES_BOOT_TASK(exclaves_inspection_init, EXCLAVES_BOOT_RANK_SECOND);
471
472bool
473exclaves_inspection_is_initialized()
474{
475 return os_atomic_load(&exclaves_inspection_initialized, acquire);
476}
477
478/*
479 * This function expects preemption and interrupts disabled as
480 * exclaves_scheduler_request does.
481 *
482 * TH_EXCLAVES_STACKSHOT_AST is set when stackshot is running in debug mode
483 * and adds a thread to waiting list.
484 *
485 * TH_EXCLAVES_STACKSHOT_AST is cleaned up by a collection thread which is
486 * holding exclaves_collect_mtx.
487 *
488 * It's guaranteed that th_exclaves_inspection_state & TH_EXCLAVES_STACKSHOT_AST is false
489 * when it exits.
490 */
491
492void
493exclaves_inspection_check_ast(void)
494{
495 thread_t thread = current_thread();
496
497 assert((os_atomic_load(&thread->th_exclaves_inspection_state, relaxed) & TH_EXCLAVES_INSPECTION_NOINSPECT) == 0);
498
499 /* This will unblock exclaves stackshot collection */
500 STACKSHOT_TESTPOINT(TP_AST);
501
502 /* Grab the mutex to prevent cleanup just after next check */
503 lck_mtx_lock(&exclaves_collect_mtx);
504 while ((os_atomic_load(&thread->th_exclaves_inspection_state, relaxed) & TH_EXCLAVES_INSPECTION_STACKSHOT) != 0) {
505 lck_mtx_sleep_with_inheritor(&exclaves_collect_mtx, LCK_SLEEP_DEFAULT,
506 (event_t)&thread->th_exclaves_inspection_queue_stackshot, exclaves_collection_thread,
507 THREAD_UNINT, TIMEOUT_WAIT_FOREVER
508 );
509 }
510
511 if ((os_atomic_load(&thread->th_exclaves_inspection_state, relaxed) & TH_EXCLAVES_INSPECTION_KPERF) != 0) {
512 exclaves_inspection_queue_add(&exclaves_inspection_queue_kperf, &thread->th_exclaves_inspection_queue_kperf);
513 thread_reference(thread);
514 exclaves_inspection_begin_collecting();
515 lck_mtx_sleep_with_inheritor(&exclaves_collect_mtx, LCK_SLEEP_DEFAULT,
516 (event_t)&thread->th_exclaves_inspection_queue_kperf, exclaves_collection_thread,
517 THREAD_UNINT, TIMEOUT_WAIT_FOREVER
518 );
519 }
520 lck_mtx_unlock(&exclaves_collect_mtx);
521}
522
523
524/* this should come from somewhere in EP */
525#define STACKSHOT_PANIC_MAGIC 0xdeadcafebeefbabe
526typedef struct stackshot_panic_magic {
527 uint64_t magic;
528 uint64_t size;
529} stackshot_panic_magic_t;
530_Static_assert(sizeof(stackshot_panic_magic_t) == 16, "panic magic should be 16 bytes");
531
532void
533kdp_read_panic_exclaves_stackshot(struct exclaves_panic_stackshot *eps)
534{
535 assert(debug_mode_active());
536
537 *eps = (struct exclaves_panic_stackshot){ 0 };
538
539 if (!exclaves_inspection_is_initialized()) {
540 return;
541 }
542
543 /* copy the entire potential range of the buffer */
544 size_t remaining = exclaves_stackshot_buffer_size;
545 uint8_t *dst = exclaves_stackshot_buffer;
546 size_t page_index = 0;
547
548 while (remaining >= PAGE_SIZE) {
549 memcpy(dst, exclaves_stackshot_buffer_pages[page_index], PAGE_SIZE);
550 dst += PAGE_SIZE;
551 page_index++;
552 remaining -= PAGE_SIZE;
553 }
554 if (remaining) {
555 memcpy(dst, exclaves_stackshot_buffer_pages[page_index], remaining);
556 }
557
558 if (exclaves_stackshot_buffer_size <= sizeof(stackshot_panic_magic_t)) {
559 return;
560 }
561
562 /* check for panic magic value in xnu's copy of the region */
563 stackshot_panic_magic_t *panic_magic = __IGNORE_WCASTALIGN((stackshot_panic_magic_t *)(exclaves_stackshot_buffer + (exclaves_stackshot_buffer_size - sizeof(stackshot_panic_magic_t))));
564 if (panic_magic->magic != STACKSHOT_PANIC_MAGIC) {
565 return;
566 }
567
568 eps->stackshot_buffer = exclaves_stackshot_buffer;
569 eps->stackshot_buffer_size = panic_magic->size;
570}
571
572#endif /* CONFIG_EXCLAVES */
573