1/*
2 * Copyright (c) 2007-2020 Apple Inc. All rights reserved.
3 *
4 * @APPLE_OSREFERENCE_LICENSE_HEADER_START@
5 *
6 * This file contains Original Code and/or Modifications of Original Code
7 * as defined in and that are subject to the Apple Public Source License
8 * Version 2.0 (the 'License'). You may not use this file except in
9 * compliance with the License. The rights granted to you under the License
10 * may not be used to create, or enable the creation or redistribution of,
11 * unlawful or unlicensed copies of an Apple operating system, or to
12 * circumvent, violate, or enable the circumvention or violation of, any
13 * terms of an Apple operating system software license agreement.
14 *
15 * Please obtain a copy of the License at
16 * http://www.opensource.apple.com/apsl/ and read it before using this file.
17 *
18 * The Original Code and all software distributed under the License are
19 * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER
20 * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES,
21 * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY,
22 * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT.
23 * Please see the License for the specific language governing rights and
24 * limitations under the License.
25 *
26 * @APPLE_OSREFERENCE_LICENSE_HEADER_END@
27 */
28
29#include <debug.h>
30#include <mach_kdp.h>
31
32#include <kern/thread.h>
33#include <machine/pmap.h>
34#include <device/device_types.h>
35
36#include <mach/vm_param.h>
37#include <mach/clock_types.h>
38#include <mach/machine.h>
39#include <mach/kmod.h>
40#include <pexpert/boot.h>
41#include <pexpert/pexpert.h>
42
43#include <ptrauth.h>
44
45#include <kern/misc_protos.h>
46#include <kern/startup.h>
47#include <kern/clock.h>
48#include <kern/debug.h>
49#include <kern/processor.h>
50#include <kdp/kdp_core.h>
51#if ALTERNATE_DEBUGGER
52#include <arm64/alternate_debugger.h>
53#endif
54#include <machine/atomic.h>
55#include <machine/trap.h>
56#include <kern/spl.h>
57#include <pexpert/pexpert.h>
58#include <kdp/kdp_callout.h>
59#include <kdp/kdp_dyld.h>
60#include <kdp/kdp_internal.h>
61#include <kdp/kdp_common.h>
62#include <uuid/uuid.h>
63#include <sys/codesign.h>
64#include <sys/time.h>
65
66#include <IOKit/IOPlatformExpert.h>
67#include <IOKit/IOKitServer.h>
68
69#include <mach/vm_prot.h>
70#include <vm/vm_map.h>
71#include <vm/pmap.h>
72#include <vm/vm_shared_region.h>
73#include <mach/time_value.h>
74#include <machine/machparam.h> /* for btop */
75
76#include <console/video_console.h>
77#include <console/serial_protos.h>
78#include <arm/cpu_data.h>
79#include <arm/cpu_data_internal.h>
80#include <arm/cpu_internal.h>
81#include <arm/misc_protos.h>
82#include <libkern/OSKextLibPrivate.h>
83#include <vm/vm_kern.h>
84#include <kern/kern_cdata.h>
85#include <kern/ledger.h>
86
87
88#if DEVELOPMENT || DEBUG
89#include <kern/ext_paniclog.h>
90#endif
91
92#if CONFIG_EXCLAVES
93#include <kern/exclaves_panic.h>
94#include <kern/exclaves_inspection.h>
95#endif
96
97#if MACH_KDP
98void kdp_trap(unsigned int, struct arm_saved_state *);
99#endif
100
101extern kern_return_t do_panic_stackshot(void *);
102extern void kdp_snapshot_preflight(int pid, void * tracebuf,
103 uint32_t tracebuf_size, uint64_t flags,
104 kcdata_descriptor_t data_p,
105 uint64_t since_timestamp, uint32_t pagetable_mask);
106extern int kdp_stack_snapshot_bytes_traced(void);
107extern int kdp_stack_snapshot_bytes_uncompressed(void);
108
109/*
110 * Increment the PANICLOG_VERSION if you change the format of the panic
111 * log in any way.
112 */
113#define PANICLOG_VERSION 14
114static struct kcdata_descriptor kc_panic_data;
115
116extern char iBoot_version[];
117#if defined(TARGET_OS_OSX) && defined(__arm64__)
118extern char iBoot_Stage_2_version[];
119#endif /* defined(TARGET_OS_OSX) && defined(__arm64__) */
120
121extern volatile uint32_t debug_enabled;
122extern unsigned int not_in_kdp;
123
124extern int copyinframe(vm_address_t fp, uint32_t * frame);
125extern void kdp_callouts(kdp_event_t event);
126
127/* #include <sys/proc.h> */
128#define MAXCOMLEN 16
129struct proc;
130extern int proc_pid(struct proc *p);
131extern void proc_name_kdp(struct proc *, char *, int);
132
133/*
134 * Make sure there's enough space to include the relevant bits in the format required
135 * within the space allocated for the panic version string in the panic header.
136 * The format required by OSAnalytics/DumpPanic is 'Product Version (OS Version)'.
137 */
138#define PANIC_HEADER_VERSION_FMT_STR "%.14s (%.14s)"
139
140extern const char version[];
141extern char osversion[];
142extern char osproductversion[];
143extern char osreleasetype[];
144
145#if defined(XNU_TARGET_OS_BRIDGE)
146extern char macosproductversion[];
147extern char macosversion[];
148#endif
149
150extern uint8_t gPlatformECID[8];
151extern uint32_t gPlatformMemoryID;
152
153extern uint64_t last_hwaccess_thread;
154
155/*Choosing the size for gTargetTypeBuffer as 16 and size for gModelTypeBuffer as 32
156 * since the target name and model name typically doesn't exceed this size */
157extern char gTargetTypeBuffer[16];
158extern char gModelTypeBuffer[32];
159
160extern struct timeval gIOLastSleepTime;
161extern struct timeval gIOLastWakeTime;
162extern boolean_t is_clock_configured;
163extern boolean_t kernelcache_uuid_valid;
164extern uuid_t kernelcache_uuid;
165extern uuid_string_t bootsessionuuid_string;
166
167extern uint64_t roots_installed;
168
169/* Definitions for frame pointers */
170#define FP_ALIGNMENT_MASK ((uint32_t)(0x3))
171#define FP_LR_OFFSET ((uint32_t)4)
172#define FP_LR_OFFSET64 ((uint32_t)8)
173#define FP_MAX_NUM_TO_EVALUATE (50)
174
175/* Timeout for all processors responding to debug crosscall */
176MACHINE_TIMEOUT(debug_ack_timeout, "debug-ack", 240000, MACHINE_TIMEOUT_UNIT_TIMEBASE, NULL);
177
178/* Forward functions definitions */
179void panic_display_times(void);
180void panic_print_symbol_name(vm_address_t search);
181
182
183/* Global variables */
184static uint32_t panic_bt_depth;
185boolean_t PanicInfoSaved = FALSE;
186boolean_t force_immediate_debug_halt = FALSE;
187unsigned int debug_ack_timeout_count = 0;
188volatile unsigned int debugger_sync = 0;
189volatile unsigned int mp_kdp_trap = 0; /* CPUs signalled by the debug CPU will spin on this */
190volatile unsigned int debug_cpus_spinning = 0; /* Number of signalled CPUs still spinning on mp_kdp_trap (in DebuggerXCall). */
191unsigned int DebugContextCount = 0;
192
193#if defined(__arm64__)
194uint8_t PE_smc_stashed_x86_system_state = 0xFF;
195uint8_t PE_smc_stashed_x86_power_state = 0xFF;
196uint8_t PE_smc_stashed_x86_efi_boot_state = 0xFF;
197uint8_t PE_smc_stashed_x86_shutdown_cause = 0xFF;
198uint64_t PE_smc_stashed_x86_prev_power_transitions = UINT64_MAX;
199uint32_t PE_pcie_stashed_link_state = UINT32_MAX;
200uint64_t PE_nvram_stashed_x86_macos_slide = UINT64_MAX;
201#endif
202
203
204/*
205 * Backtrace a single frame.
206 */
207static void
208print_one_backtrace(pmap_t pmap, vm_offset_t topfp, const char *cur_marker,
209 boolean_t is_64_bit, boolean_t print_kexts_in_backtrace)
210{
211 unsigned int i = 0;
212 addr64_t lr = 0;
213 addr64_t fp = topfp;
214 addr64_t fp_for_ppn = 0;
215 ppnum_t ppn = (ppnum_t)NULL;
216 vm_offset_t raddrs[FP_MAX_NUM_TO_EVALUATE] = { 0 };
217 bool dump_kernel_stack = (fp >= VM_MIN_KERNEL_ADDRESS);
218
219#if defined(HAS_APPLE_PAC)
220 fp = (addr64_t)ptrauth_strip((void *)fp, ptrauth_key_frame_pointer);
221#endif
222 do {
223 if ((fp == 0) || ((fp & FP_ALIGNMENT_MASK) != 0)) {
224 break;
225 }
226 if (dump_kernel_stack && ((fp < VM_MIN_KERNEL_ADDRESS) || (fp > VM_MAX_KERNEL_ADDRESS))) {
227 break;
228 }
229 if ((!dump_kernel_stack) && (fp >= VM_MIN_KERNEL_ADDRESS)) {
230 break;
231 }
232
233 /*
234 * Check to see if current address will result in a different
235 * ppn than previously computed (to avoid recomputation) via
236 * (addr) ^ fp_for_ppn) >> PAGE_SHIFT)
237 */
238 if ((((fp + FP_LR_OFFSET) ^ fp_for_ppn) >> PAGE_SHIFT) != 0x0U) {
239 ppn = pmap_find_phys(map: pmap, va: fp + FP_LR_OFFSET);
240 fp_for_ppn = fp + (is_64_bit ? FP_LR_OFFSET64 : FP_LR_OFFSET);
241 }
242 if (ppn != (ppnum_t)NULL) {
243 if (is_64_bit) {
244 lr = ml_phys_read_double_64(paddr: ((((vm_offset_t)ppn) << PAGE_SHIFT)) | ((fp + FP_LR_OFFSET64) & PAGE_MASK));
245#if defined(HAS_APPLE_PAC)
246 /* return addresses on stack will be signed by arm64e ABI */
247 lr = (addr64_t) ptrauth_strip((void *)lr, ptrauth_key_return_address);
248#endif
249 } else {
250 lr = ml_phys_read_word(paddr: ((((vm_offset_t)ppn) << PAGE_SHIFT)) | ((fp + FP_LR_OFFSET) & PAGE_MASK));
251 }
252 } else {
253 if (is_64_bit) {
254 paniclog_append_noflush(format: "%s\t Could not read LR from frame at 0x%016llx\n", cur_marker, fp + FP_LR_OFFSET64);
255 } else {
256 paniclog_append_noflush(format: "%s\t Could not read LR from frame at 0x%08x\n", cur_marker, (uint32_t)(fp + FP_LR_OFFSET));
257 }
258 break;
259 }
260 if (((fp ^ fp_for_ppn) >> PAGE_SHIFT) != 0x0U) {
261 ppn = pmap_find_phys(map: pmap, va: fp);
262 fp_for_ppn = fp;
263 }
264 if (ppn != (ppnum_t)NULL) {
265 if (is_64_bit) {
266 fp = ml_phys_read_double_64(paddr: ((((vm_offset_t)ppn) << PAGE_SHIFT)) | (fp & PAGE_MASK));
267#if defined(HAS_APPLE_PAC)
268 /* frame pointers on stack will be signed by arm64e ABI */
269 fp = (addr64_t) ptrauth_strip((void *)fp, ptrauth_key_frame_pointer);
270#endif
271 } else {
272 fp = ml_phys_read_word(paddr: ((((vm_offset_t)ppn) << PAGE_SHIFT)) | (fp & PAGE_MASK));
273 }
274 } else {
275 if (is_64_bit) {
276 paniclog_append_noflush(format: "%s\t Could not read FP from frame at 0x%016llx\n", cur_marker, fp);
277 } else {
278 paniclog_append_noflush(format: "%s\t Could not read FP from frame at 0x%08x\n", cur_marker, (uint32_t)fp);
279 }
280 break;
281 }
282 /*
283 * Counter 'i' may == FP_MAX_NUM_TO_EVALUATE when running one
284 * extra round to check whether we have all frames in order to
285 * indicate (in)complete backtrace below. This happens in a case
286 * where total frame count and FP_MAX_NUM_TO_EVALUATE are equal.
287 * Do not capture anything.
288 */
289 if (i < FP_MAX_NUM_TO_EVALUATE && lr) {
290 if (is_64_bit) {
291 paniclog_append_noflush(format: "%s\t lr: 0x%016llx fp: 0x%016llx\n", cur_marker, lr, fp);
292 } else {
293 paniclog_append_noflush(format: "%s\t lr: 0x%08x fp: 0x%08x\n", cur_marker, (uint32_t)lr, (uint32_t)fp);
294 }
295 raddrs[i] = lr;
296 }
297 } while ((++i <= FP_MAX_NUM_TO_EVALUATE) && (fp != topfp));
298
299 if (i > FP_MAX_NUM_TO_EVALUATE && fp != 0) {
300 paniclog_append_noflush(format: "Backtrace continues...\n");
301 }
302
303 if (print_kexts_in_backtrace && i > 0) {
304 kmod_panic_dump(addr: &raddrs[0], dump_cnt: i);
305 }
306}
307
308#define SANE_TASK_LIMIT 256
309#define TOP_RUNNABLE_LIMIT 5
310#define PANICLOG_UUID_BUF_SIZE 256
311
312extern void panic_print_vnodes(void);
313
314static void
315panic_display_tpidrs(void)
316{
317#if defined(__arm64__)
318 paniclog_append_noflush(format: "TPIDRx_ELy = {1: 0x%016llx 0: 0x%016llx 0ro: 0x%016llx }\n",
319 __builtin_arm_rsr64("TPIDR_EL1"), __builtin_arm_rsr64("TPIDR_EL0"),
320 __builtin_arm_rsr64("TPIDRRO_EL0"));
321#endif //defined(__arm64__)
322}
323
324static void
325panic_display_hung_cpus_help(void)
326{
327#if defined(__arm64__)
328 const uint32_t pcsr_offset = 0x90;
329
330 /*
331 * Print some info that might help in cases where nothing
332 * else does
333 */
334 const ml_topology_info_t *info = ml_get_topology_info();
335 if (info) {
336 unsigned i, retry;
337
338 for (i = 0; i < info->num_cpus; i++) {
339 if (!PE_cpu_power_check_kdp(cpu_id: i)) {
340 paniclog_append_noflush(format: "CORE %u is offline, skipping\n", i);
341 continue;
342 }
343 if (info->cpus[i].cpu_UTTDBG_regs) {
344 volatile uint64_t *pcsr = (volatile uint64_t*)(info->cpus[i].cpu_UTTDBG_regs + pcsr_offset);
345 volatile uint32_t *pcsrTrigger = (volatile uint32_t*)pcsr;
346 uint64_t pc = 0;
347
348 // a number of retries are needed till this works
349 for (retry = 1024; retry && !pc; retry--) {
350 //a 32-bit read is required to make a PC sample be produced, else we'll only get a zero
351 (void)*pcsrTrigger;
352 pc = *pcsr;
353 }
354
355 //postprocessing (same as astris does)
356 if (pc >> 48) {
357 pc |= 0xffff000000000000ull;
358 }
359 paniclog_append_noflush(format: "CORE %u recently retired instr at 0x%016llx\n", i, pc);
360 }
361 }
362 }
363#endif //defined(__arm64__)
364}
365
366
367static void
368panic_display_pvhs_locked(void)
369{
370}
371
372static void
373panic_display_pvh_to_lock(void)
374{
375}
376
377static void
378panic_display_last_pc_lr(void)
379{
380#if defined(__arm64__)
381 const int max_cpu = ml_get_max_cpu_number();
382
383 for (int cpu = 0; cpu <= max_cpu; cpu++) {
384 cpu_data_t *current_cpu_datap = cpu_datap(cpu);
385
386 if (current_cpu_datap == NULL) {
387 continue;
388 }
389
390 if (current_cpu_datap == getCpuDatap()) {
391 /**
392 * Skip printing the PC/LR if this is the CPU
393 * that initiated the panic.
394 */
395 paniclog_append_noflush(format: "CORE %u is the one that panicked. Check the full backtrace for details.\n", cpu);
396 continue;
397 }
398
399 paniclog_append_noflush(format: "CORE %u: PC=0x%016llx, LR=0x%016llx, FP=0x%016llx\n", cpu,
400 current_cpu_datap->ipi_pc, (uint64_t)VM_KERNEL_STRIP_PTR(current_cpu_datap->ipi_lr),
401 (uint64_t)VM_KERNEL_STRIP_PTR(current_cpu_datap->ipi_fp));
402 }
403#endif
404}
405
406#if CONFIG_EXCLAVES
407static void
408panic_report_exclaves_stackshot(void)
409{
410 if (exclaves_panic_ss_status == EXCLAVES_PANIC_STACKSHOT_FOUND) {
411 paniclog_append_noflush("** Exclaves panic stackshot found\n");
412 } else if (exclaves_panic_ss_status == EXCLAVES_PANIC_STACKSHOT_NOT_FOUND) {
413 paniclog_append_noflush("** Exclaves panic stackshot not found\n");
414 } else if (exclaves_panic_ss_status == EXCLAVES_PANIC_STACKSHOT_DECODE_FAILED) {
415 paniclog_append_noflush("!! Exclaves panic stackshot decode failed !!\n");
416 }
417}
418#endif /* CONFIG_EXCLAVES */
419
420static void
421do_print_all_backtraces(const char *message, uint64_t panic_options)
422{
423 int logversion = PANICLOG_VERSION;
424 thread_t cur_thread = current_thread();
425 uintptr_t cur_fp;
426 task_t task;
427 struct proc *proc;
428 int print_vnodes = 0;
429 const char *nohilite_thread_marker = "\t";
430
431 /* end_marker_bytes set to 200 for printing END marker + stackshot summary info always */
432 int bytes_traced = 0, bytes_remaining = 0, end_marker_bytes = 200;
433 int bytes_uncompressed = 0;
434 uint64_t bytes_used = 0ULL;
435 int err = 0;
436 char *stackshot_begin_loc = NULL;
437 kc_format_t kc_format;
438 bool filesetKC = false;
439#if CONFIG_EXT_PANICLOG
440 uint32_t ext_paniclog_bytes = 0;
441#endif
442
443#if defined(__arm64__)
444 __asm__ volatile ("add %0, xzr, fp":"=r"(cur_fp));
445#else
446#error Unknown architecture.
447#endif
448 if (panic_bt_depth != 0) {
449 return;
450 }
451 panic_bt_depth++;
452
453 __unused bool result = PE_get_primary_kc_format(type: &kc_format);
454 assert(result == true);
455 filesetKC = kc_format == KCFormatFileset;
456
457 /* Truncate panic string to 1200 bytes */
458 paniclog_append_noflush(format: "Debugger message: %.1200s\n", message);
459 if (debug_enabled) {
460 paniclog_append_noflush(format: "Device: %s\n",
461 ('\0' != gTargetTypeBuffer[0]) ? gTargetTypeBuffer : "Not set yet");
462 paniclog_append_noflush(format: "Hardware Model: %s\n",
463 ('\0' != gModelTypeBuffer[0]) ? gModelTypeBuffer:"Not set yet");
464 paniclog_append_noflush(format: "ECID: %02X%02X%02X%02X%02X%02X%02X%02X\n", gPlatformECID[7],
465 gPlatformECID[6], gPlatformECID[5], gPlatformECID[4], gPlatformECID[3],
466 gPlatformECID[2], gPlatformECID[1], gPlatformECID[0]);
467 if (last_hwaccess_thread) {
468 paniclog_append_noflush(format: "AppleHWAccess Thread: 0x%llx\n", last_hwaccess_thread);
469 }
470 paniclog_append_noflush(format: "Boot args: %s\n", PE_boot_args());
471 }
472 paniclog_append_noflush(format: "Memory ID: 0x%x\n", gPlatformMemoryID);
473 paniclog_append_noflush(format: "OS release type: %.256s\n",
474 ('\0' != osreleasetype[0]) ? osreleasetype : "Not set yet");
475 paniclog_append_noflush(format: "OS version: %.256s\n",
476 ('\0' != osversion[0]) ? osversion : "Not set yet");
477#if defined(XNU_TARGET_OS_BRIDGE)
478 paniclog_append_noflush("macOS version: %.256s\n",
479 ('\0' != macosversion[0]) ? macosversion : "Not set");
480#endif
481 paniclog_append_noflush(format: "Kernel version: %.512s\n", version);
482
483#if CONFIG_EXCLAVES
484 exclaves_panic_append_info();
485#endif
486
487 if (kernelcache_uuid_valid) {
488 if (filesetKC) {
489 paniclog_append_noflush(format: "Fileset Kernelcache UUID: ");
490 } else {
491 paniclog_append_noflush(format: "KernelCache UUID: ");
492 }
493 for (size_t index = 0; index < sizeof(uuid_t); index++) {
494 paniclog_append_noflush(format: "%02X", kernelcache_uuid[index]);
495 }
496 paniclog_append_noflush(format: "\n");
497 }
498 panic_display_kernel_uuid();
499
500 if (bootsessionuuid_string[0] != '\0') {
501 paniclog_append_noflush(format: "Boot session UUID: %s\n", bootsessionuuid_string);
502 } else {
503 paniclog_append_noflush(format: "Boot session UUID not yet initialized\n");
504 }
505
506 paniclog_append_noflush(format: "iBoot version: %.128s\n", iBoot_version);
507#if defined(TARGET_OS_OSX) && defined(__arm64__)
508 paniclog_append_noflush("iBoot Stage 2 version: %.128s\n", iBoot_Stage_2_version);
509#endif /* defined(TARGET_OS_OSX) && defined(__arm64__) */
510
511 paniclog_append_noflush(format: "secure boot?: %s\n", debug_enabled ? "NO": "YES");
512 paniclog_append_noflush(format: "roots installed: %lld\n", roots_installed);
513#if defined(XNU_TARGET_OS_BRIDGE)
514 paniclog_append_noflush("x86 EFI Boot State: ");
515 if (PE_smc_stashed_x86_efi_boot_state != 0xFF) {
516 paniclog_append_noflush("0x%x\n", PE_smc_stashed_x86_efi_boot_state);
517 } else {
518 paniclog_append_noflush("not available\n");
519 }
520 paniclog_append_noflush("x86 System State: ");
521 if (PE_smc_stashed_x86_system_state != 0xFF) {
522 paniclog_append_noflush("0x%x\n", PE_smc_stashed_x86_system_state);
523 } else {
524 paniclog_append_noflush("not available\n");
525 }
526 paniclog_append_noflush("x86 Power State: ");
527 if (PE_smc_stashed_x86_power_state != 0xFF) {
528 paniclog_append_noflush("0x%x\n", PE_smc_stashed_x86_power_state);
529 } else {
530 paniclog_append_noflush("not available\n");
531 }
532 paniclog_append_noflush("x86 Shutdown Cause: ");
533 if (PE_smc_stashed_x86_shutdown_cause != 0xFF) {
534 paniclog_append_noflush("0x%x\n", PE_smc_stashed_x86_shutdown_cause);
535 } else {
536 paniclog_append_noflush("not available\n");
537 }
538 paniclog_append_noflush("x86 Previous Power Transitions: ");
539 if (PE_smc_stashed_x86_prev_power_transitions != UINT64_MAX) {
540 paniclog_append_noflush("0x%llx\n", PE_smc_stashed_x86_prev_power_transitions);
541 } else {
542 paniclog_append_noflush("not available\n");
543 }
544 paniclog_append_noflush("PCIeUp link state: ");
545 if (PE_pcie_stashed_link_state != UINT32_MAX) {
546 paniclog_append_noflush("0x%x\n", PE_pcie_stashed_link_state);
547 } else {
548 paniclog_append_noflush("not available\n");
549 }
550 paniclog_append_noflush("macOS kernel slide: ");
551 if (PE_nvram_stashed_x86_macos_slide != UINT64_MAX) {
552 paniclog_append_noflush("%#llx\n", PE_nvram_stashed_x86_macos_slide);
553 } else {
554 paniclog_append_noflush("not available\n");
555 }
556#endif
557 if (panic_data_buffers != NULL) {
558 paniclog_append_noflush(format: "%s data: ", panic_data_buffers->producer_name);
559 uint8_t *panic_buffer_data = (uint8_t *) panic_data_buffers->buf;
560 for (int i = 0; i < panic_data_buffers->len; i++) {
561 paniclog_append_noflush(format: "%02X", panic_buffer_data[i]);
562 }
563 paniclog_append_noflush(format: "\n");
564 }
565 paniclog_append_noflush(format: "Paniclog version: %d\n", logversion);
566
567 panic_display_kernel_aslr();
568 panic_display_times();
569 panic_display_zalloc();
570 panic_display_hung_cpus_help();
571 panic_display_tpidrs();
572 panic_display_pvhs_locked();
573 panic_display_pvh_to_lock();
574 panic_display_last_pc_lr();
575#if CONFIG_ECC_LOGGING
576 panic_display_ecc_errors();
577#endif /* CONFIG_ECC_LOGGING */
578 panic_display_compressor_stats();
579
580#if DEVELOPMENT || DEBUG
581 if (cs_debug_unsigned_exec_failures != 0 || cs_debug_unsigned_mmap_failures != 0) {
582 paniclog_append_noflush("Unsigned code exec failures: %u\n", cs_debug_unsigned_exec_failures);
583 paniclog_append_noflush("Unsigned code mmap failures: %u\n", cs_debug_unsigned_mmap_failures);
584 }
585#endif
586
587 // Highlight threads that used high amounts of CPU in the panic log if requested (historically requested for watchdog panics)
588 if (panic_options & DEBUGGER_OPTION_PRINT_CPU_USAGE_PANICLOG) {
589 thread_t top_runnable[5] = {0};
590 thread_t thread;
591 int total_cpu_usage = 0;
592
593 print_vnodes = 1;
594
595
596 for (thread = (thread_t)queue_first(&threads);
597 PANIC_VALIDATE_PTR(thread) && !queue_end(&threads, (queue_entry_t)thread);
598 thread = (thread_t)queue_next(&thread->threads)) {
599 total_cpu_usage += thread->cpu_usage;
600
601 // Look for the 5 runnable threads with highest priority
602 if (thread->state & TH_RUN) {
603 int k;
604 thread_t comparison_thread = thread;
605
606 for (k = 0; k < TOP_RUNNABLE_LIMIT; k++) {
607 if (top_runnable[k] == 0) {
608 top_runnable[k] = comparison_thread;
609 break;
610 } else if (comparison_thread->sched_pri > top_runnable[k]->sched_pri) {
611 thread_t temp = top_runnable[k];
612 top_runnable[k] = comparison_thread;
613 comparison_thread = temp;
614 } // if comparison thread has higher priority than previously saved thread
615 } // loop through highest priority runnable threads
616 } // Check if thread is runnable
617 } // Loop through all threads
618
619 // Print the relevant info for each thread identified
620 paniclog_append_noflush(format: "Total cpu_usage: %d\n", total_cpu_usage);
621 paniclog_append_noflush(format: "Thread task pri cpu_usage\n");
622
623 for (int i = 0; i < TOP_RUNNABLE_LIMIT; i++) {
624 if (top_runnable[i] &&
625 panic_get_thread_proc_task(thread: top_runnable[i], task: &task, proc: &proc) && proc) {
626 char name[MAXCOMLEN + 1];
627 proc_name_kdp(proc, name, sizeof(name));
628 paniclog_append_noflush(format: "%p %s %d %d\n",
629 top_runnable[i], name, top_runnable[i]->sched_pri, top_runnable[i]->cpu_usage);
630 }
631 } // Loop through highest priority runnable threads
632 paniclog_append_noflush(format: "\n");
633 }
634
635 // print current task info
636 if (panic_get_thread_proc_task(thread: cur_thread, task: &task, proc: &proc)) {
637 if (PANIC_VALIDATE_PTR(task->map) &&
638 PANIC_VALIDATE_PTR(task->map->pmap)) {
639 ledger_amount_t resident = 0;
640 if (task != kernel_task) {
641 ledger_get_balance(ledger: task->ledger, entry: task_ledgers.phys_mem, balance: &resident);
642 resident >>= VM_MAP_PAGE_SHIFT(map: task->map);
643 }
644 paniclog_append_noflush(format: "Panicked task %p: %lld pages, %d threads: ",
645 task, resident, task->thread_count);
646 } else {
647 paniclog_append_noflush(format: "Panicked task %p: %d threads: ",
648 task, task->thread_count);
649 }
650
651 if (proc) {
652 char name[MAXCOMLEN + 1];
653 proc_name_kdp(proc, name, sizeof(name));
654 paniclog_append_noflush(format: "pid %d: %s", proc_pid(p: proc), name);
655 } else {
656 paniclog_append_noflush(format: "unknown task");
657 }
658
659 paniclog_append_noflush(format: "\n");
660 }
661
662 if (cur_fp < VM_MAX_KERNEL_ADDRESS) {
663 paniclog_append_noflush(format: "Panicked thread: %p, backtrace: 0x%llx, tid: %llu\n",
664 cur_thread, (addr64_t)cur_fp, thread_tid(thread: cur_thread));
665#if __LP64__
666 print_one_backtrace(pmap: kernel_pmap, topfp: cur_fp, cur_marker: nohilite_thread_marker, TRUE, print_kexts_in_backtrace: filesetKC);
667#else
668 print_one_backtrace(kernel_pmap, cur_fp, nohilite_thread_marker, FALSE, filesetKC);
669#endif
670 } else {
671 paniclog_append_noflush(format: "Could not print panicked thread backtrace:"
672 "frame pointer outside kernel vm.\n");
673 }
674
675 paniclog_append_noflush(format: "\n");
676 if (filesetKC) {
677 kext_dump_panic_lists(printf_func: &paniclog_append_noflush);
678 paniclog_append_noflush(format: "\n");
679 }
680 panic_info->eph_panic_log_len = PE_get_offset_into_panic_region(location: debug_buf_ptr) - panic_info->eph_panic_log_offset;
681 /* set the os version data in the panic header in the format 'Product Version (OS Version)' (only if they have been set) */
682 if ((osversion[0] != '\0') && (osproductversion[0] != '\0')) {
683 snprintf((char *)&panic_info->eph_os_version, sizeof(panic_info->eph_os_version), PANIC_HEADER_VERSION_FMT_STR,
684 osproductversion, osversion);
685 }
686#if defined(XNU_TARGET_OS_BRIDGE)
687 if ((macosversion[0] != '\0') && (macosproductversion[0] != '\0')) {
688 snprintf((char *)&panic_info->eph_macos_version, sizeof(panic_info->eph_macos_version), PANIC_HEADER_VERSION_FMT_STR,
689 macosproductversion, macosversion);
690 }
691#endif
692 if (bootsessionuuid_string[0] != '\0') {
693 memcpy(dst: panic_info->eph_bootsessionuuid_string, src: bootsessionuuid_string,
694 n: sizeof(panic_info->eph_bootsessionuuid_string));
695 }
696 panic_info->eph_roots_installed = roots_installed;
697
698 if (debug_ack_timeout_count) {
699 panic_info->eph_panic_flags |= EMBEDDED_PANIC_HEADER_FLAG_STACKSHOT_FAILED_DEBUGGERSYNC;
700 panic_info->eph_other_log_offset = PE_get_offset_into_panic_region(location: debug_buf_ptr);
701 paniclog_append_noflush(format: "!! debugger synchronization failed, no stackshot !!\n");
702 } else if (stackshot_active()) {
703 panic_info->eph_panic_flags |= EMBEDDED_PANIC_HEADER_FLAG_STACKSHOT_FAILED_NESTED;
704 panic_info->eph_other_log_offset = PE_get_offset_into_panic_region(location: debug_buf_ptr);
705 paniclog_append_noflush(format: "!! panicked during stackshot, skipping panic stackshot !!\n");
706 } else {
707 /* Align the stackshot buffer to an 8-byte address (especially important for armv7k devices) */
708 debug_buf_ptr += (8 - ((uintptr_t)debug_buf_ptr % 8));
709 stackshot_begin_loc = debug_buf_ptr;
710
711 bytes_remaining = debug_buf_size - (unsigned int)((uintptr_t)stackshot_begin_loc - (uintptr_t)debug_buf_base);
712 err = kcdata_memory_static_init(data: &kc_panic_data, buffer_addr_p: (mach_vm_address_t)debug_buf_ptr,
713 KCDATA_BUFFER_BEGIN_COMPRESSED, size: bytes_remaining - end_marker_bytes,
714 KCFLAG_USE_MEMCOPY);
715 if (err == KERN_SUCCESS) {
716 uint64_t stackshot_flags = (STACKSHOT_GET_GLOBAL_MEM_STATS | STACKSHOT_SAVE_LOADINFO | STACKSHOT_KCDATA_FORMAT |
717 STACKSHOT_ENABLE_BT_FAULTING | STACKSHOT_ENABLE_UUID_FAULTING | STACKSHOT_FROM_PANIC | STACKSHOT_DO_COMPRESS |
718 STACKSHOT_DISABLE_LATENCY_INFO | STACKSHOT_NO_IO_STATS | STACKSHOT_THREAD_WAITINFO | STACKSHOT_GET_DQ |
719 STACKSHOT_COLLECT_SHAREDCACHE_LAYOUT);
720
721 err = kcdata_init_compress(&kc_panic_data, KCDATA_BUFFER_BEGIN_STACKSHOT, memcpy_f: kdp_memcpy, type: KCDCT_ZLIB);
722 if (err != KERN_SUCCESS) {
723 panic_info->eph_panic_flags |= EMBEDDED_PANIC_HEADER_FLAG_COMPRESS_FAILED;
724 stackshot_flags &= ~STACKSHOT_DO_COMPRESS;
725 }
726 if (filesetKC) {
727 stackshot_flags |= STACKSHOT_SAVE_KEXT_LOADINFO;
728 }
729
730 kdp_snapshot_preflight(pid: -1, tracebuf: stackshot_begin_loc, tracebuf_size: bytes_remaining - end_marker_bytes,
731 flags: stackshot_flags, data_p: &kc_panic_data, since_timestamp: 0, pagetable_mask: 0);
732 err = do_panic_stackshot(NULL);
733 bytes_traced = kdp_stack_snapshot_bytes_traced();
734 if (bytes_traced > 0 && !err) {
735 debug_buf_ptr += bytes_traced;
736 panic_info->eph_panic_flags |= EMBEDDED_PANIC_HEADER_FLAG_STACKSHOT_SUCCEEDED;
737 panic_info->eph_stackshot_offset = PE_get_offset_into_panic_region(location: stackshot_begin_loc);
738 panic_info->eph_stackshot_len = bytes_traced;
739
740 panic_info->eph_other_log_offset = PE_get_offset_into_panic_region(location: debug_buf_ptr);
741#if CONFIG_EXCLAVES
742 panic_report_exclaves_stackshot();
743#endif /* CONFIG_EXCLAVES */
744 if (stackshot_flags & STACKSHOT_DO_COMPRESS) {
745 panic_info->eph_panic_flags |= EMBEDDED_PANIC_HEADER_FLAG_STACKSHOT_DATA_COMPRESSED;
746 bytes_uncompressed = kdp_stack_snapshot_bytes_uncompressed();
747 paniclog_append_noflush(format: "\n** Stackshot Succeeded ** Bytes Traced %d (Uncompressed %d) **\n", bytes_traced, bytes_uncompressed);
748 } else {
749 paniclog_append_noflush(format: "\n** Stackshot Succeeded ** Bytes Traced %d **\n", bytes_traced);
750 }
751 } else {
752 bytes_used = kcdata_memory_get_used_bytes(kcd: &kc_panic_data);
753#if CONFIG_EXCLAVES
754 panic_report_exclaves_stackshot();
755#endif /* CONFIG_EXCLAVES */
756 if (bytes_used > 0) {
757 /* Zero out the stackshot data */
758 bzero(s: stackshot_begin_loc, n: bytes_used);
759 panic_info->eph_panic_flags |= EMBEDDED_PANIC_HEADER_FLAG_STACKSHOT_FAILED_INCOMPLETE;
760
761 panic_info->eph_other_log_offset = PE_get_offset_into_panic_region(location: debug_buf_ptr);
762 paniclog_append_noflush(format: "\n** Stackshot Incomplete ** Bytes Filled %llu **\n", bytes_used);
763 } else {
764 bzero(s: stackshot_begin_loc, n: bytes_used);
765 panic_info->eph_panic_flags |= EMBEDDED_PANIC_HEADER_FLAG_STACKSHOT_FAILED_ERROR;
766
767 panic_info->eph_other_log_offset = PE_get_offset_into_panic_region(location: debug_buf_ptr);
768 paniclog_append_noflush(format: "\n!! Stackshot Failed !! Bytes Traced %d, err %d\n", bytes_traced, err);
769 }
770 }
771 } else {
772 panic_info->eph_panic_flags |= EMBEDDED_PANIC_HEADER_FLAG_STACKSHOT_FAILED_ERROR;
773 panic_info->eph_other_log_offset = PE_get_offset_into_panic_region(location: debug_buf_ptr);
774 paniclog_append_noflush(format: "\n!! Stackshot Failed !!\nkcdata_memory_static_init returned %d", err);
775 }
776 }
777
778#if CONFIG_EXT_PANICLOG
779 // Write ext paniclog at the end of the paniclog region.
780 ext_paniclog_bytes = ext_paniclog_write_panicdata();
781 panic_info->eph_ext_paniclog_offset = (ext_paniclog_bytes != 0) ?
782 PE_get_offset_into_panic_region((debug_buf_base + debug_buf_size) - ext_paniclog_bytes) :
783 0;
784 panic_info->eph_ext_paniclog_len = ext_paniclog_bytes;
785#endif
786
787 assert(panic_info->eph_other_log_offset != 0);
788
789 if (print_vnodes != 0) {
790 panic_print_vnodes();
791 }
792
793 panic_bt_depth--;
794}
795
796/*
797 * Entry to print_all_backtraces is serialized by the debugger lock
798 */
799static void
800print_all_backtraces(const char *message, uint64_t panic_options)
801{
802 unsigned int initial_not_in_kdp = not_in_kdp;
803
804 cpu_data_t * cpu_data_ptr = getCpuDatap();
805
806 assert(cpu_data_ptr->PAB_active == FALSE);
807 cpu_data_ptr->PAB_active = TRUE;
808
809 /*
810 * Because print all backtraces uses the pmap routines, it needs to
811 * avoid taking pmap locks. Right now, this is conditionalized on
812 * not_in_kdp.
813 */
814 not_in_kdp = 0;
815 do_print_all_backtraces(message, panic_options);
816
817 not_in_kdp = initial_not_in_kdp;
818
819 cpu_data_ptr->PAB_active = FALSE;
820}
821
822void
823panic_display_times()
824{
825 if (kdp_clock_is_locked()) {
826 paniclog_append_noflush(format: "Warning: clock is locked. Can't get time\n");
827 return;
828 }
829
830 extern lck_ticket_t clock_lock;
831 extern lck_grp_t clock_lock_grp;
832
833 if ((is_clock_configured) && (lck_ticket_lock_try(tlock: &clock_lock, grp: &clock_lock_grp))) {
834 clock_sec_t secs, boot_secs;
835 clock_usec_t usecs, boot_usecs;
836
837 lck_ticket_unlock(tlock: &clock_lock);
838
839 clock_get_calendar_microtime(secs: &secs, microsecs: &usecs);
840 clock_get_boottime_microtime(secs: &boot_secs, microsecs: &boot_usecs);
841
842 paniclog_append_noflush(format: "mach_absolute_time: 0x%llx\n", mach_absolute_time());
843 paniclog_append_noflush(format: "Epoch Time: sec usec\n");
844 paniclog_append_noflush(format: " Boot : 0x%08x 0x%08x\n", (unsigned int)boot_secs, (unsigned int)boot_usecs);
845 paniclog_append_noflush(format: " Sleep : 0x%08x 0x%08x\n", (unsigned int)gIOLastSleepTime.tv_sec, (unsigned int)gIOLastSleepTime.tv_usec);
846 paniclog_append_noflush(format: " Wake : 0x%08x 0x%08x\n", (unsigned int)gIOLastWakeTime.tv_sec, (unsigned int)gIOLastWakeTime.tv_usec);
847 paniclog_append_noflush(format: " Calendar: 0x%08x 0x%08x\n\n", (unsigned int)secs, (unsigned int)usecs);
848 }
849}
850
851void
852panic_print_symbol_name(vm_address_t search)
853{
854#pragma unused(search)
855 // empty stub. Really only used on x86_64.
856 return;
857}
858
859void
860SavePanicInfo(
861 const char *message, __unused void *panic_data, uint64_t panic_options)
862{
863 /*
864 * This should be initialized by the time we get here, but
865 * if it is not, asserting about it will be of no use (it will
866 * come right back to here), so just loop right here and now.
867 * This prevents early-boot panics from becoming recursive and
868 * thus makes them easier to debug. If you attached to a device
869 * and see your PC here, look down a few frames to see your
870 * early-boot panic there.
871 */
872 while (!panic_info || panic_info->eph_panic_log_offset == 0) {
873 // rdar://87170225 (PanicHardening: audit panic code for naked spinloops)
874 // rdar://88094367 (Add test hooks for panic at different stages in XNU)
875 ;
876 }
877
878 if (panic_options & DEBUGGER_OPTION_PANICLOGANDREBOOT) {
879 panic_info->eph_panic_flags |= EMBEDDED_PANIC_HEADER_FLAG_BUTTON_RESET_PANIC;
880 }
881
882 if (panic_options & DEBUGGER_OPTION_COPROC_INITIATED_PANIC) {
883 panic_info->eph_panic_flags |= EMBEDDED_PANIC_HEADER_FLAG_COPROC_INITIATED_PANIC;
884 }
885
886#if defined(XNU_TARGET_OS_BRIDGE)
887 panic_info->eph_x86_power_state = PE_smc_stashed_x86_power_state;
888 panic_info->eph_x86_efi_boot_state = PE_smc_stashed_x86_efi_boot_state;
889 panic_info->eph_x86_system_state = PE_smc_stashed_x86_system_state;
890#endif
891
892 /*
893 * On newer targets, panic data is stored directly into the iBoot panic region.
894 * If we re-enter SavePanicInfo (e.g. on a double panic) on such a target, update the
895 * panic CRC so that iBoot can hopefully find *something* useful in the panic region.
896 */
897 if (PanicInfoSaved && (debug_buf_base >= (char*)gPanicBase) && (debug_buf_base < (char*)gPanicBase + gPanicSize)) {
898 unsigned int pi_size = (unsigned int)(debug_buf_ptr - gPanicBase);
899 PE_update_panic_crc((unsigned char*)gPanicBase, &pi_size);
900 PE_sync_panic_buffers(); // extra precaution; panic path likely isn't reliable if we're here
901 }
902
903 if (PanicInfoSaved || (debug_buf_size == 0)) {
904 return;
905 }
906
907 PanicInfoSaved = TRUE;
908
909
910 print_all_backtraces(message, panic_options);
911
912 assert(panic_info->eph_panic_log_len != 0);
913 panic_info->eph_other_log_len = PE_get_offset_into_panic_region(location: debug_buf_ptr) - panic_info->eph_other_log_offset;
914
915 PEHaltRestart(type: kPEPanicSync);
916
917 /*
918 * Notifies registered IOPlatformPanicAction callbacks
919 * (which includes one to disable the memcache) and flushes
920 * the buffer contents from the cache
921 */
922 paniclog_flush();
923}
924
925void
926paniclog_flush()
927{
928 unsigned int panicbuf_length = 0;
929
930 panicbuf_length = (unsigned int)(debug_buf_ptr - gPanicBase);
931 if (!debug_buf_ptr || !panicbuf_length) {
932 return;
933 }
934
935 /*
936 * Updates the log length of the last part of the panic log.
937 */
938 panic_info->eph_other_log_len = PE_get_offset_into_panic_region(location: debug_buf_ptr) - panic_info->eph_other_log_offset;
939
940 /*
941 * Updates the metadata at the beginning of the panic buffer,
942 * updates the CRC.
943 */
944 PE_update_panic_crc((unsigned char *)gPanicBase, &panicbuf_length);
945
946 /*
947 * This is currently unused by platform KEXTs on embedded but is
948 * kept for compatibility with the published IOKit interfaces.
949 */
950 PESavePanicInfo(buffer: (unsigned char *)gPanicBase, length: panicbuf_length);
951
952 PE_sync_panic_buffers();
953}
954
955/*
956 * @function DebuggerXCallEnter
957 *
958 * @abstract IPI other cores so this core can run in a single-threaded context.
959 *
960 * @discussion This function should be called with the debugger lock held. It
961 * signals the other cores to go into a busy loop so this core can run in a
962 * single-threaded context and inspect kernel memory.
963 *
964 * @param proceed_on_sync_failure If true, then go ahead and try to debug even
965 * if we can't synch with the other cores. This is inherently unsafe and should
966 * only be used if the kernel is going down in flames anyway.
967 *
968 * @param is_stackshot If true, this is a stackshot request.
969 *
970 * @result returns KERN_OPERATION_TIMED_OUT if synchronization times out and
971 * proceed_on_sync_failure is false.
972 */
973kern_return_t
974DebuggerXCallEnter(
975 boolean_t proceed_on_sync_failure, bool is_stackshot)
976{
977 uint64_t max_mabs_time, current_mabs_time;
978 int cpu;
979 int max_cpu;
980 cpu_data_t *target_cpu_datap;
981 cpu_data_t *cpu_data_ptr = getCpuDatap();
982
983 /* Check for nested debugger entry. */
984 cpu_data_ptr->debugger_active++;
985 if (cpu_data_ptr->debugger_active != 1) {
986 return KERN_SUCCESS;
987 }
988
989 /*
990 * If debugger_sync is not 0, someone responded excessively late to the last
991 * debug request (we zero the sync variable in the return function). Zero it
992 * again here. This should prevent us from getting out of sync (heh) and
993 * timing out on every entry to the debugger if we timeout once.
994 */
995
996 debugger_sync = 0;
997 mp_kdp_trap = 1;
998 debug_cpus_spinning = 0;
999
1000#pragma unused(is_stackshot)
1001
1002 /*
1003 * Try to signal all CPUs (except ourselves, of course). Use debugger_sync to
1004 * synchronize with every CPU that we appeared to signal successfully (cpu_signal
1005 * is not synchronous).
1006 */
1007 max_cpu = ml_get_max_cpu_number();
1008
1009 boolean_t immediate_halt = FALSE;
1010 if (proceed_on_sync_failure && force_immediate_debug_halt) {
1011 immediate_halt = TRUE;
1012 }
1013
1014 if (!immediate_halt) {
1015 for (cpu = 0; cpu <= max_cpu; cpu++) {
1016 target_cpu_datap = (cpu_data_t *)CpuDataEntries[cpu].cpu_data_vaddr;
1017
1018 if ((target_cpu_datap == NULL) || (target_cpu_datap == cpu_data_ptr)) {
1019 continue;
1020 }
1021
1022 kern_return_t ret = cpu_signal(target: target_cpu_datap, SIGPdebug, p0: (void *)NULL, NULL);
1023 if (ret == KERN_SUCCESS) {
1024 os_atomic_inc(&debugger_sync, relaxed);
1025 os_atomic_inc(&debug_cpus_spinning, relaxed);
1026 } else if (proceed_on_sync_failure) {
1027 kprintf(fmt: "cpu_signal failed in DebuggerXCallEnter\n");
1028 }
1029 }
1030
1031 max_mabs_time = os_atomic_load(&debug_ack_timeout, relaxed);
1032
1033 if (max_mabs_time > 0) {
1034 current_mabs_time = mach_absolute_time();
1035 max_mabs_time += current_mabs_time;
1036 assert(max_mabs_time > current_mabs_time);
1037 }
1038
1039 /*
1040 * Wait for DEBUG_ACK_TIMEOUT ns for a response from everyone we IPI'd. If we
1041 * timeout, that is simply too bad; we don't have a true NMI, and one CPU may be
1042 * uninterruptibly spinning on someone else. The best we can hope for is that
1043 * all other CPUs have either responded or are spinning in a context that is
1044 * debugger safe.
1045 */
1046 while ((debugger_sync != 0) && (max_mabs_time == 0 || current_mabs_time < max_mabs_time)) {
1047 current_mabs_time = mach_absolute_time();
1048 }
1049 }
1050
1051 if (!proceed_on_sync_failure && (max_mabs_time > 0 && current_mabs_time >= max_mabs_time)) {
1052 __builtin_arm_dmb(DMB_ISH);
1053 for (cpu = 0; cpu <= max_cpu; cpu++) {
1054 target_cpu_datap = (cpu_data_t *)CpuDataEntries[cpu].cpu_data_vaddr;
1055
1056 if ((target_cpu_datap == NULL) || (target_cpu_datap == cpu_data_ptr)) {
1057 continue;
1058 }
1059 if (!(target_cpu_datap->cpu_signal & SIGPdebug)) {
1060 continue;
1061 }
1062 if (processor_array[cpu]->state <= PROCESSOR_PENDING_OFFLINE) {
1063 /*
1064 * This is a processor that was successfully sent a SIGPdebug signal
1065 * but which hasn't acknowledged it because it went offline with
1066 * interrupts disabled before the IPI was delivered, so count it
1067 * here.
1068 */
1069 os_atomic_dec(&debugger_sync, relaxed);
1070 kprintf(fmt: "%s>found CPU %d offline, debugger_sync=%d\n", __FUNCTION__, cpu, debugger_sync);
1071 continue;
1072 }
1073
1074 kprintf(fmt: "%s>Debugger synch pending on cpu %d\n", __FUNCTION__, cpu);
1075 }
1076
1077 if (debugger_sync == 0) {
1078 return KERN_SUCCESS;
1079 } else {
1080 DebuggerXCallReturn();
1081 kprintf(fmt: "%s>returning KERN_OPERATION_TIMED_OUT\n", __FUNCTION__);
1082 return KERN_OPERATION_TIMED_OUT;
1083 }
1084 } else if (immediate_halt || (max_mabs_time > 0 && current_mabs_time >= max_mabs_time)) {
1085 /*
1086 * For the moment, we're aiming for a timeout that the user shouldn't notice,
1087 * but will be sufficient to let the other core respond.
1088 */
1089 __builtin_arm_dmb(DMB_ISH);
1090 for (cpu = 0; cpu <= max_cpu; cpu++) {
1091 target_cpu_datap = (cpu_data_t *)CpuDataEntries[cpu].cpu_data_vaddr;
1092
1093 if ((target_cpu_datap == NULL) || (target_cpu_datap == cpu_data_ptr)) {
1094 continue;
1095 }
1096 if (!(target_cpu_datap->cpu_signal & SIGPdebug) && !immediate_halt) {
1097 continue;
1098 }
1099 if (proceed_on_sync_failure) {
1100 paniclog_append_noflush(format: "Attempting to forcibly halt cpu %d\n", cpu);
1101 dbgwrap_status_t halt_status = ml_dbgwrap_halt_cpu(cpu_index: cpu, timeout_ns: 0);
1102 if (halt_status < 0) {
1103 paniclog_append_noflush(format: "cpu %d failed to halt with error %d: %s\n", cpu, halt_status, ml_dbgwrap_strerror(status: halt_status));
1104 } else {
1105 if (halt_status > 0) {
1106 paniclog_append_noflush(format: "cpu %d halted with warning %d: %s\n", cpu, halt_status, ml_dbgwrap_strerror(status: halt_status));
1107 }
1108 target_cpu_datap->halt_status = CPU_HALTED;
1109 }
1110 } else {
1111 kprintf(fmt: "Debugger synch pending on cpu %d\n", cpu);
1112 }
1113 }
1114 if (proceed_on_sync_failure) {
1115 for (cpu = 0; cpu <= max_cpu; cpu++) {
1116 target_cpu_datap = (cpu_data_t *)CpuDataEntries[cpu].cpu_data_vaddr;
1117
1118 if ((target_cpu_datap == NULL) || (target_cpu_datap == cpu_data_ptr) ||
1119 (target_cpu_datap->halt_status == CPU_NOT_HALTED)) {
1120 continue;
1121 }
1122 dbgwrap_status_t halt_status = ml_dbgwrap_halt_cpu_with_state(cpu_index: cpu,
1123 NSEC_PER_SEC, state: &target_cpu_datap->halt_state);
1124 if ((halt_status < 0) || (halt_status == DBGWRAP_WARN_CPU_OFFLINE)) {
1125 paniclog_append_noflush(format: "Unable to obtain state for cpu %d with status %d: %s\n", cpu, halt_status, ml_dbgwrap_strerror(status: halt_status));
1126 } else {
1127 paniclog_append_noflush(format: "cpu %d successfully halted\n", cpu);
1128 target_cpu_datap->halt_status = CPU_HALTED_WITH_STATE;
1129 }
1130 }
1131 if (immediate_halt) {
1132 paniclog_append_noflush(format: "Immediate halt requested on all cores\n");
1133 } else {
1134 paniclog_append_noflush(format: "Debugger synchronization timed out; waited %llu nanoseconds\n",
1135 os_atomic_load(&debug_ack_timeout, relaxed));
1136 }
1137 debug_ack_timeout_count++;
1138 return KERN_SUCCESS;
1139 } else {
1140 DebuggerXCallReturn();
1141 return KERN_OPERATION_TIMED_OUT;
1142 }
1143 } else {
1144 return KERN_SUCCESS;
1145 }
1146}
1147
1148/*
1149 * @function DebuggerXCallReturn
1150 *
1151 * @abstract Resume normal multicore operation after DebuggerXCallEnter()
1152 *
1153 * @discussion This function should be called with debugger lock held.
1154 */
1155void
1156DebuggerXCallReturn(
1157 void)
1158{
1159 cpu_data_t *cpu_data_ptr = getCpuDatap();
1160 uint64_t max_mabs_time, current_mabs_time;
1161
1162 cpu_data_ptr->debugger_active--;
1163 if (cpu_data_ptr->debugger_active != 0) {
1164 return;
1165 }
1166
1167 mp_kdp_trap = 0;
1168 debugger_sync = 0;
1169
1170 max_mabs_time = os_atomic_load(&debug_ack_timeout, relaxed);
1171
1172 if (max_mabs_time > 0) {
1173 current_mabs_time = mach_absolute_time();
1174 max_mabs_time += current_mabs_time;
1175 assert(max_mabs_time > current_mabs_time);
1176 }
1177
1178 /*
1179 * Wait for other CPUs to stop spinning on mp_kdp_trap (see DebuggerXCall).
1180 * It's possible for one or more CPUs to not decrement debug_cpus_spinning,
1181 * since they may be stuck somewhere else with interrupts disabled.
1182 * Wait for DEBUG_ACK_TIMEOUT ns for a response and move on if we don't get it.
1183 *
1184 * Note that the same is done in DebuggerXCallEnter, when we wait for other
1185 * CPUS to update debugger_sync. If we time out, let's hope for all CPUs to be
1186 * spinning in a debugger-safe context
1187 */
1188 while ((os_atomic_load_exclusive(&debug_cpus_spinning, relaxed) != 0) &&
1189 (max_mabs_time == 0 || current_mabs_time < max_mabs_time)) {
1190 __builtin_arm_wfe();
1191 current_mabs_time = mach_absolute_time();
1192 }
1193 os_atomic_clear_exclusive();
1194}
1195
1196extern void wait_while_mp_kdp_trap(bool check_SIGPdebug);
1197/*
1198 * Spin while mp_kdp_trap is set.
1199 *
1200 * processor_offline() calls this with check_SIGPdebug=true
1201 * to break out of the spin loop if the cpu has SIGPdebug
1202 * pending.
1203 */
1204void
1205wait_while_mp_kdp_trap(bool check_SIGPdebug)
1206{
1207 bool found_mp_kdp_trap = false;
1208 bool found_SIGPdebug = false;
1209
1210 while (os_atomic_load_exclusive(&mp_kdp_trap, relaxed) != 0) {
1211 found_mp_kdp_trap = true;
1212 if (check_SIGPdebug && cpu_has_SIGPdebug_pending()) {
1213 found_SIGPdebug = true;
1214 break;
1215 }
1216 __builtin_arm_wfe();
1217 }
1218 os_atomic_clear_exclusive();
1219
1220 if (check_SIGPdebug && found_mp_kdp_trap) {
1221 kprintf(fmt: "%s>found_mp_kdp_trap=true found_SIGPdebug=%s\n", __FUNCTION__, found_SIGPdebug ? "true" : "false");
1222 }
1223}
1224
1225void
1226DebuggerXCall(
1227 void *ctx)
1228{
1229 boolean_t save_context = FALSE;
1230 vm_offset_t kstackptr = 0;
1231 arm_saved_state_t *regs = (arm_saved_state_t *) ctx;
1232
1233 if (regs != NULL) {
1234#if defined(__arm64__)
1235 current_cpu_datap()->ipi_pc = (uint64_t)get_saved_state_pc(iss: regs);
1236 current_cpu_datap()->ipi_lr = (uint64_t)get_saved_state_lr(iss: regs);
1237 current_cpu_datap()->ipi_fp = (uint64_t)get_saved_state_fp(iss: regs);
1238 save_context = PSR64_IS_KERNEL(get_saved_state_cpsr(regs));
1239#endif
1240 }
1241
1242 kstackptr = (vm_offset_t)current_thread()->machine.kstackptr;
1243
1244#if defined(__arm64__)
1245 arm_kernel_saved_state_t *state = (arm_kernel_saved_state_t *)kstackptr;
1246
1247 if (save_context) {
1248 /* Save the interrupted context before acknowledging the signal */
1249 current_thread()->machine.kpcb = regs;
1250 } else if (regs) {
1251 /* zero old state so machine_trace_thread knows not to backtrace it */
1252 state->fp = 0;
1253 state->pc_was_in_userspace = true;
1254 state->lr = 0;
1255 state->sp = 0;
1256 state->ssbs = 0;
1257 state->uao = 0;
1258 state->dit = 0;
1259 }
1260#endif
1261
1262 /*
1263 * When running in serial mode, the core capturing the dump may hold interrupts disabled
1264 * for a time longer than the timeout. That path includes logic to reset the timestamp
1265 * so that we do not eventually trigger the interrupt timeout assert().
1266 *
1267 * Here we check whether other cores have already gone over the timeout at this point
1268 * before spinning, so we at least cover the IPI reception path. After spinning, however,
1269 * we reset the timestamp so as to avoid hitting the interrupt timeout assert().
1270 */
1271 if ((serialmode & SERIALMODE_OUTPUT) || stackshot_active()) {
1272 INTERRUPT_MASKED_DEBUG_END();
1273 }
1274
1275 os_atomic_dec(&debugger_sync, relaxed);
1276
1277
1278 wait_while_mp_kdp_trap(false);
1279
1280 /**
1281 * Alert the triggering CPU that this CPU is done spinning. The CPU that
1282 * signalled all of the other CPUs will wait (in DebuggerXCallReturn) for
1283 * all of the CPUs to exit the above loop before continuing.
1284 */
1285 os_atomic_dec(&debug_cpus_spinning, relaxed);
1286
1287#if SCHED_HYGIENE_DEBUG
1288 /*
1289 * We also abandon the measurement for preemption disable
1290 * timeouts, if any. Normally, time in interrupt handlers would be
1291 * subtracted from preemption disable time, and this will happen
1292 * up to this point here, but since we here "end" the interrupt
1293 * handler prematurely (from the point of view of interrupt masked
1294 * debugging), the time spinning would otherwise still be
1295 * attributed to preemption disable time, and potentially trigger
1296 * an event, which could be a panic.
1297 */
1298 abandon_preemption_disable_measurement();
1299#endif /* SCHED_HYGIENE_DEBUG */
1300
1301 if ((serialmode & SERIALMODE_OUTPUT) || stackshot_active()) {
1302 INTERRUPT_MASKED_DEBUG_START(current_thread()->machine.int_handler_addr, current_thread()->machine.int_type);
1303 }
1304
1305#if defined(__arm64__)
1306 current_thread()->machine.kpcb = NULL;
1307#endif /* defined(__arm64__) */
1308
1309 /* Any cleanup for our pushed context should go here */
1310}
1311
1312void
1313DebuggerCall(
1314 unsigned int reason,
1315 void *ctx)
1316{
1317#if !MACH_KDP
1318#pragma unused(reason,ctx)
1319#endif /* !MACH_KDP */
1320
1321#if ALTERNATE_DEBUGGER
1322 alternate_debugger_enter();
1323#endif
1324
1325#if MACH_KDP
1326 kdp_trap(reason, (struct arm_saved_state *)ctx);
1327#else
1328 /* TODO: decide what to do if no debugger config */
1329#endif
1330}
1331
1332boolean_t
1333bootloader_valid_page(ppnum_t ppn)
1334{
1335 return pmap_bootloader_page(pn: ppn);
1336}
1337