1 | /* |
2 | * Copyright (c) 2007-2013 Apple Inc. All rights reserved. |
3 | * |
4 | * @APPLE_OSREFERENCE_LICENSE_HEADER_START@ |
5 | * |
6 | * This file contains Original Code and/or Modifications of Original Code |
7 | * as defined in and that are subject to the Apple Public Source License |
8 | * Version 2.0 (the 'License'). You may not use this file except in |
9 | * compliance with the License. The rights granted to you under the License |
10 | * may not be used to create, or enable the creation or redistribution of, |
11 | * unlawful or unlicensed copies of an Apple operating system, or to |
12 | * circumvent, violate, or enable the circumvention or violation of, any |
13 | * terms of an Apple operating system software license agreement. |
14 | * |
15 | * Please obtain a copy of the License at |
16 | * http://www.opensource.apple.com/apsl/ and read it before using this file. |
17 | * |
18 | * The Original Code and all software distributed under the License are |
19 | * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER |
20 | * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES, |
21 | * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY, |
22 | * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT. |
23 | * Please see the License for the specific language governing rights and |
24 | * limitations under the License. |
25 | * |
26 | * @APPLE_OSREFERENCE_LICENSE_HEADER_END@ |
27 | */ |
28 | |
29 | #include <debug.h> |
30 | #include <mach_kdp.h> |
31 | |
32 | #include <kern/thread.h> |
33 | #include <machine/pmap.h> |
34 | #include <device/device_types.h> |
35 | |
36 | #include <mach/vm_param.h> |
37 | #include <mach/clock_types.h> |
38 | #include <mach/machine.h> |
39 | #include <mach/kmod.h> |
40 | #include <pexpert/boot.h> |
41 | #include <pexpert/pexpert.h> |
42 | |
43 | |
44 | #include <kern/misc_protos.h> |
45 | #include <kern/startup.h> |
46 | #include <kern/clock.h> |
47 | #include <kern/debug.h> |
48 | #include <kern/processor.h> |
49 | #include <kdp/kdp_core.h> |
50 | #if ALTERNATE_DEBUGGER |
51 | #include <arm64/alternate_debugger.h> |
52 | #endif |
53 | #include <machine/atomic.h> |
54 | #include <machine/trap.h> |
55 | #include <kern/spl.h> |
56 | #include <pexpert/pexpert.h> |
57 | #include <kdp/kdp_callout.h> |
58 | #include <kdp/kdp_dyld.h> |
59 | #include <kdp/kdp_internal.h> |
60 | #include <uuid/uuid.h> |
61 | #include <sys/codesign.h> |
62 | #include <sys/time.h> |
63 | |
64 | #include <IOKit/IOPlatformExpert.h> |
65 | |
66 | #include <mach/vm_prot.h> |
67 | #include <vm/vm_map.h> |
68 | #include <vm/pmap.h> |
69 | #include <vm/vm_shared_region.h> |
70 | #include <mach/time_value.h> |
71 | #include <machine/machparam.h> /* for btop */ |
72 | |
73 | #include <console/video_console.h> |
74 | #include <arm/cpu_data.h> |
75 | #include <arm/cpu_data_internal.h> |
76 | #include <arm/cpu_internal.h> |
77 | #include <arm/misc_protos.h> |
78 | #include <libkern/OSKextLibPrivate.h> |
79 | #include <vm/vm_kern.h> |
80 | #include <kern/kern_cdata.h> |
81 | |
82 | #if MACH_KDP |
83 | void kdp_trap(unsigned int, struct arm_saved_state *); |
84 | #endif |
85 | |
86 | extern kern_return_t do_stackshot(void *); |
87 | extern void kdp_snapshot_preflight(int pid, void *tracebuf, |
88 | uint32_t tracebuf_size, uint32_t flags, |
89 | kcdata_descriptor_t data_p, |
90 | boolean_t enable_faulting); |
91 | extern int kdp_stack_snapshot_bytes_traced(void); |
92 | |
93 | /* |
94 | * Increment the PANICLOG_VERSION if you change the format of the panic |
95 | * log in any way. |
96 | */ |
97 | #define PANICLOG_VERSION 11 |
98 | static struct kcdata_descriptor kc_panic_data; |
99 | |
100 | extern char firmware_version[]; |
101 | extern volatile uint32_t debug_enabled; |
102 | extern unsigned int not_in_kdp; |
103 | |
104 | extern int copyinframe(vm_address_t fp, uint32_t * frame); |
105 | extern void kdp_callouts(kdp_event_t event); |
106 | |
107 | /* #include <sys/proc.h> */ |
108 | #define MAXCOMLEN 16 |
109 | extern int proc_pid(void *p); |
110 | extern void proc_name_kdp(task_t, char *, int); |
111 | |
112 | extern const char version[]; |
113 | extern char osversion[]; |
114 | extern uint8_t gPlatformECID[8]; |
115 | extern uint32_t gPlatformMemoryID; |
116 | |
117 | extern uint64_t last_hwaccess_thread; |
118 | |
119 | /*Choosing the size for gTargetTypeBuffer as 8 and size for gModelTypeBuffer as 32 |
120 | since the target name and model name typically doesn't exceed this size */ |
121 | extern char gTargetTypeBuffer[8]; |
122 | extern char gModelTypeBuffer[32]; |
123 | |
124 | decl_simple_lock_data(extern,clock_lock) |
125 | extern struct timeval gIOLastSleepTime; |
126 | extern struct timeval gIOLastWakeTime; |
127 | extern boolean_t is_clock_configured; |
128 | extern boolean_t kernelcache_uuid_valid; |
129 | extern uuid_t kernelcache_uuid; |
130 | |
131 | /* Definitions for frame pointers */ |
132 | #define FP_ALIGNMENT_MASK ((uint32_t)(0x3)) |
133 | #define FP_LR_OFFSET ((uint32_t)4) |
134 | #define FP_LR_OFFSET64 ((uint32_t)8) |
135 | #define FP_MAX_NUM_TO_EVALUATE (50) |
136 | |
137 | /* Timeout (in nanoseconds) for all processors responding to debug crosscall */ |
138 | #define DEBUG_ACK_TIMEOUT ((uint64_t) 10000000) |
139 | |
140 | /* Forward functions definitions */ |
141 | void panic_display_times(void) ; |
142 | void panic_print_symbol_name(vm_address_t search); |
143 | |
144 | |
145 | /* Global variables */ |
146 | static uint32_t panic_bt_depth; |
147 | boolean_t PanicInfoSaved = FALSE; |
148 | boolean_t force_immediate_debug_halt = FALSE; |
149 | unsigned int debug_ack_timeout_count = 0; |
150 | volatile unsigned int debugger_sync = 0; |
151 | volatile unsigned int mp_kdp_trap = 0; /* CPUs signalled by the debug CPU will spin on this */ |
152 | unsigned int DebugContextCount = 0; |
153 | |
154 | #if defined(__arm64__) |
155 | uint8_t PE_smc_stashed_x86_system_state = 0xFF; |
156 | uint8_t PE_smc_stashed_x86_power_state = 0xFF; |
157 | uint8_t PE_smc_stashed_x86_efi_boot_state = 0xFF; |
158 | uint8_t PE_smc_stashed_x86_shutdown_cause = 0xFF; |
159 | uint64_t PE_smc_stashed_x86_prev_power_transitions = UINT64_MAX; |
160 | uint32_t PE_pcie_stashed_link_state = UINT32_MAX; |
161 | #endif |
162 | |
163 | |
164 | // Convenient macros to easily validate one or more pointers if |
165 | // they have defined types |
166 | #define VALIDATE_PTR(ptr) \ |
167 | validate_ptr((vm_offset_t)(ptr), sizeof(*(ptr)), #ptr) |
168 | |
169 | #define VALIDATE_PTR_2(ptr0, ptr1) \ |
170 | VALIDATE_PTR(ptr0) && VALIDATE_PTR(ptr1) |
171 | |
172 | #define VALIDATE_PTR_3(ptr0, ptr1, ptr2) \ |
173 | VALIDATE_PTR_2(ptr0, ptr1) && VALIDATE_PTR(ptr2) |
174 | |
175 | #define VALIDATE_PTR_4(ptr0, ptr1, ptr2, ptr3) \ |
176 | VALIDATE_PTR_2(ptr0, ptr1) && VALIDATE_PTR_2(ptr2, ptr3) |
177 | |
178 | #define GET_MACRO(_1,_2,_3,_4,NAME,...) NAME |
179 | |
180 | #define VALIDATE_PTR_LIST(...) GET_MACRO(__VA_ARGS__, VALIDATE_PTR_4, VALIDATE_PTR_3, VALIDATE_PTR_2, VALIDATE_PTR)(__VA_ARGS__) |
181 | |
182 | /* |
183 | * Evaluate if a pointer is valid |
184 | * Print a message if pointer is invalid |
185 | */ |
186 | static boolean_t validate_ptr( |
187 | vm_offset_t ptr, vm_size_t size, const char * ptr_name) |
188 | { |
189 | if (ptr) { |
190 | if (ml_validate_nofault(ptr, size)) { |
191 | return TRUE; |
192 | } else { |
193 | paniclog_append_noflush("Invalid %s pointer: %p size: %d\n" , |
194 | ptr_name, (void *)ptr, (int)size); |
195 | return FALSE; |
196 | } |
197 | } else { |
198 | paniclog_append_noflush("NULL %s pointer\n" , ptr_name); |
199 | return FALSE; |
200 | } |
201 | } |
202 | |
203 | /* |
204 | * Backtrace a single frame. |
205 | */ |
206 | static void |
207 | print_one_backtrace(pmap_t pmap, vm_offset_t topfp, const char *cur_marker, |
208 | boolean_t is_64_bit) |
209 | { |
210 | int i = 0; |
211 | addr64_t lr; |
212 | addr64_t fp; |
213 | addr64_t fp_for_ppn; |
214 | ppnum_t ppn; |
215 | boolean_t dump_kernel_stack; |
216 | |
217 | fp = topfp; |
218 | fp_for_ppn = 0; |
219 | ppn = (ppnum_t)NULL; |
220 | |
221 | if (fp >= VM_MIN_KERNEL_ADDRESS) |
222 | dump_kernel_stack = TRUE; |
223 | else |
224 | dump_kernel_stack = FALSE; |
225 | |
226 | do { |
227 | if ((fp == 0) || ((fp & FP_ALIGNMENT_MASK) != 0)) |
228 | break; |
229 | if (dump_kernel_stack && ((fp < VM_MIN_KERNEL_ADDRESS) || (fp > VM_MAX_KERNEL_ADDRESS))) |
230 | break; |
231 | if ((!dump_kernel_stack) && (fp >=VM_MIN_KERNEL_ADDRESS)) |
232 | break; |
233 | |
234 | /* |
235 | * Check to see if current address will result in a different |
236 | * ppn than previously computed (to avoid recomputation) via |
237 | * (addr) ^ fp_for_ppn) >> PAGE_SHIFT) |
238 | */ |
239 | if ((((fp + FP_LR_OFFSET) ^ fp_for_ppn) >> PAGE_SHIFT) != 0x0U) { |
240 | ppn = pmap_find_phys(pmap, fp + FP_LR_OFFSET); |
241 | fp_for_ppn = fp + (is_64_bit ? FP_LR_OFFSET64 : FP_LR_OFFSET); |
242 | } |
243 | if (ppn != (ppnum_t)NULL) { |
244 | if (is_64_bit) { |
245 | lr = ml_phys_read_double_64(((((vm_offset_t)ppn) << PAGE_SHIFT)) | ((fp + FP_LR_OFFSET64) & PAGE_MASK)); |
246 | } else { |
247 | lr = ml_phys_read_word(((((vm_offset_t)ppn) << PAGE_SHIFT)) | ((fp + FP_LR_OFFSET) & PAGE_MASK)); |
248 | } |
249 | } else { |
250 | if (is_64_bit) { |
251 | paniclog_append_noflush("%s\t Could not read LR from frame at 0x%016llx\n" , cur_marker, fp + FP_LR_OFFSET64); |
252 | } else { |
253 | paniclog_append_noflush("%s\t Could not read LR from frame at 0x%08x\n" , cur_marker, (uint32_t)(fp + FP_LR_OFFSET)); |
254 | } |
255 | break; |
256 | } |
257 | if (((fp ^ fp_for_ppn) >> PAGE_SHIFT) != 0x0U) { |
258 | ppn = pmap_find_phys(pmap, fp); |
259 | fp_for_ppn = fp; |
260 | } |
261 | if (ppn != (ppnum_t)NULL) { |
262 | if (is_64_bit) { |
263 | fp = ml_phys_read_double_64(((((vm_offset_t)ppn) << PAGE_SHIFT)) | (fp & PAGE_MASK)); |
264 | } else { |
265 | fp = ml_phys_read_word(((((vm_offset_t)ppn) << PAGE_SHIFT)) | (fp & PAGE_MASK)); |
266 | } |
267 | } else { |
268 | if (is_64_bit) { |
269 | paniclog_append_noflush("%s\t Could not read FP from frame at 0x%016llx\n" , cur_marker, fp); |
270 | } else { |
271 | paniclog_append_noflush("%s\t Could not read FP from frame at 0x%08x\n" , cur_marker, (uint32_t)fp); |
272 | } |
273 | break; |
274 | } |
275 | |
276 | if (lr) { |
277 | if (is_64_bit) { |
278 | paniclog_append_noflush("%s\t lr: 0x%016llx fp: 0x%016llx\n" , cur_marker, lr, fp); |
279 | } else { |
280 | paniclog_append_noflush("%s\t lr: 0x%08x fp: 0x%08x\n" , cur_marker, (uint32_t)lr, (uint32_t)fp); |
281 | } |
282 | } |
283 | } while ((++i < FP_MAX_NUM_TO_EVALUATE) && (fp != topfp)); |
284 | } |
285 | |
286 | #define SANE_TASK_LIMIT 256 |
287 | #define TOP_RUNNABLE_LIMIT 5 |
288 | #define PANICLOG_UUID_BUF_SIZE 256 |
289 | |
290 | extern void panic_print_vnodes(void); |
291 | |
292 | static void |
293 | do_print_all_backtraces( |
294 | const char *message) |
295 | { |
296 | int logversion = PANICLOG_VERSION; |
297 | thread_t cur_thread = current_thread(); |
298 | uintptr_t cur_fp; |
299 | task_t task; |
300 | int i; |
301 | size_t index; |
302 | int print_vnodes = 0; |
303 | const char *nohilite_thread_marker="\t" ; |
304 | |
305 | /* end_marker_bytes set to 200 for printing END marker + stackshot summary info always */ |
306 | int bytes_traced = 0, bytes_remaining = 0, end_marker_bytes = 200; |
307 | uint64_t bytes_used = 0ULL; |
308 | int err = 0; |
309 | char *stackshot_begin_loc = NULL; |
310 | |
311 | #if defined(__arm__) |
312 | __asm__ volatile("mov %0, r7" :"=r" (cur_fp)); |
313 | #elif defined(__arm64__) |
314 | __asm__ volatile("add %0, xzr, fp" :"=r" (cur_fp)); |
315 | #else |
316 | #error Unknown architecture. |
317 | #endif |
318 | if (panic_bt_depth != 0) |
319 | return; |
320 | panic_bt_depth++; |
321 | |
322 | /* Truncate panic string to 1200 bytes -- WDT log can be ~1100 bytes */ |
323 | paniclog_append_noflush("Debugger message: %.1200s\n" , message); |
324 | if (debug_enabled) { |
325 | paniclog_append_noflush("Device: %s\n" , |
326 | ('\0' != gTargetTypeBuffer[0]) ? gTargetTypeBuffer : "Not set yet" ); |
327 | paniclog_append_noflush("Hardware Model: %s\n" , |
328 | ('\0' != gModelTypeBuffer[0]) ? gModelTypeBuffer:"Not set yet" ); |
329 | paniclog_append_noflush("ECID: %02X%02X%02X%02X%02X%02X%02X%02X\n" , gPlatformECID[7], |
330 | gPlatformECID[6], gPlatformECID[5], gPlatformECID[4], gPlatformECID[3], |
331 | gPlatformECID[2], gPlatformECID[1], gPlatformECID[0]); |
332 | if (last_hwaccess_thread) { |
333 | paniclog_append_noflush("AppleHWAccess Thread: 0x%llx\n" , last_hwaccess_thread); |
334 | } |
335 | paniclog_append_noflush("Boot args: %s\n" , PE_boot_args()); |
336 | } |
337 | paniclog_append_noflush("Memory ID: 0x%x\n" , gPlatformMemoryID); |
338 | paniclog_append_noflush("OS version: %.256s\n" , |
339 | ('\0' != osversion[0]) ? osversion : "Not set yet" ); |
340 | paniclog_append_noflush("Kernel version: %.512s\n" , version); |
341 | |
342 | if (kernelcache_uuid_valid) { |
343 | paniclog_append_noflush("KernelCache UUID: " ); |
344 | for (index = 0; index < sizeof(uuid_t); index++) { |
345 | paniclog_append_noflush("%02X" , kernelcache_uuid[index]); |
346 | } |
347 | paniclog_append_noflush("\n" ); |
348 | } |
349 | panic_display_kernel_uuid(); |
350 | |
351 | paniclog_append_noflush("iBoot version: %.128s\n" , firmware_version); |
352 | paniclog_append_noflush("secure boot?: %s\n" , debug_enabled ? "NO" : "YES" ); |
353 | #if defined(XNU_TARGET_OS_BRIDGE) |
354 | paniclog_append_noflush("x86 EFI Boot State: " ); |
355 | if (PE_smc_stashed_x86_efi_boot_state != 0xFF) { |
356 | paniclog_append_noflush("0x%x\n" , PE_smc_stashed_x86_efi_boot_state); |
357 | } else { |
358 | paniclog_append_noflush("not available\n" ); |
359 | } |
360 | paniclog_append_noflush("x86 System State: " ); |
361 | if (PE_smc_stashed_x86_system_state != 0xFF) { |
362 | paniclog_append_noflush("0x%x\n" , PE_smc_stashed_x86_system_state); |
363 | } else { |
364 | paniclog_append_noflush("not available\n" ); |
365 | } |
366 | paniclog_append_noflush("x86 Power State: " ); |
367 | if (PE_smc_stashed_x86_power_state != 0xFF) { |
368 | paniclog_append_noflush("0x%x\n" , PE_smc_stashed_x86_power_state); |
369 | } else { |
370 | paniclog_append_noflush("not available\n" ); |
371 | } |
372 | paniclog_append_noflush("x86 Shutdown Cause: " ); |
373 | if (PE_smc_stashed_x86_shutdown_cause != 0xFF) { |
374 | paniclog_append_noflush("0x%x\n" , PE_smc_stashed_x86_shutdown_cause); |
375 | } else { |
376 | paniclog_append_noflush("not available\n" ); |
377 | } |
378 | paniclog_append_noflush("x86 Previous Power Transitions: " ); |
379 | if (PE_smc_stashed_x86_prev_power_transitions != UINT64_MAX) { |
380 | paniclog_append_noflush("0x%llx\n" , PE_smc_stashed_x86_prev_power_transitions); |
381 | } else { |
382 | paniclog_append_noflush("not available\n" ); |
383 | } |
384 | paniclog_append_noflush("PCIeUp link state: " ); |
385 | if (PE_pcie_stashed_link_state != UINT32_MAX) { |
386 | paniclog_append_noflush("0x%x\n" , PE_pcie_stashed_link_state); |
387 | } else { |
388 | paniclog_append_noflush("not available\n" ); |
389 | } |
390 | #endif |
391 | paniclog_append_noflush("Paniclog version: %d\n" , logversion); |
392 | |
393 | panic_display_kernel_aslr(); |
394 | panic_display_times(); |
395 | panic_display_zprint(); |
396 | #if CONFIG_ZLEAKS |
397 | panic_display_ztrace(); |
398 | #endif /* CONFIG_ZLEAKS */ |
399 | #if CONFIG_ECC_LOGGING |
400 | panic_display_ecc_errors(); |
401 | #endif /* CONFIG_ECC_LOGGING */ |
402 | |
403 | #if DEVELOPMENT || DEBUG |
404 | if (cs_debug_unsigned_exec_failures != 0 || cs_debug_unsigned_mmap_failures != 0) { |
405 | paniclog_append_noflush("Unsigned code exec failures: %u\n" , cs_debug_unsigned_exec_failures); |
406 | paniclog_append_noflush("Unsigned code mmap failures: %u\n" , cs_debug_unsigned_mmap_failures); |
407 | } |
408 | #endif |
409 | |
410 | // Just print threads with high CPU usage for WDT timeouts |
411 | if (strncmp(message, "WDT timeout" , 11) == 0) { |
412 | thread_t top_runnable[5] = {0}; |
413 | thread_t thread; |
414 | int total_cpu_usage = 0; |
415 | |
416 | print_vnodes = 1; |
417 | |
418 | |
419 | for (thread = (thread_t)queue_first(&threads); |
420 | VALIDATE_PTR(thread) && !queue_end(&threads, (queue_entry_t)thread); |
421 | thread = (thread_t)queue_next(&thread->threads)) { |
422 | |
423 | total_cpu_usage += thread->cpu_usage; |
424 | |
425 | // Look for the 5 runnable threads with highest priority |
426 | if (thread->state & TH_RUN) { |
427 | int k; |
428 | thread_t comparison_thread = thread; |
429 | |
430 | for (k = 0; k < TOP_RUNNABLE_LIMIT; k++) { |
431 | if (top_runnable[k] == 0) { |
432 | top_runnable[k] = comparison_thread; |
433 | break; |
434 | } else if (comparison_thread->sched_pri > top_runnable[k]->sched_pri) { |
435 | thread_t temp = top_runnable[k]; |
436 | top_runnable[k] = comparison_thread; |
437 | comparison_thread = temp; |
438 | } // if comparison thread has higher priority than previously saved thread |
439 | } // loop through highest priority runnable threads |
440 | } // Check if thread is runnable |
441 | } // Loop through all threads |
442 | |
443 | // Print the relevant info for each thread identified |
444 | paniclog_append_noflush("Total cpu_usage: %d\n" , total_cpu_usage); |
445 | paniclog_append_noflush("Thread task pri cpu_usage\n" ); |
446 | |
447 | for (i = 0; i < TOP_RUNNABLE_LIMIT; i++) { |
448 | |
449 | if (top_runnable[i] && VALIDATE_PTR(top_runnable[i]->task) && |
450 | validate_ptr((vm_offset_t)top_runnable[i]->task->bsd_info, 1, "bsd_info" )) { |
451 | |
452 | char name[MAXCOMLEN + 1]; |
453 | proc_name_kdp(top_runnable[i]->task, name, sizeof(name)); |
454 | paniclog_append_noflush("%p %s %d %d\n" , |
455 | top_runnable[i], name, top_runnable[i]->sched_pri, top_runnable[i]->cpu_usage); |
456 | } |
457 | } // Loop through highest priority runnable threads |
458 | paniclog_append_noflush("\n" ); |
459 | } // Check if message is "WDT timeout" |
460 | |
461 | // print current task info |
462 | if (VALIDATE_PTR_LIST(cur_thread, cur_thread->task)) { |
463 | |
464 | task = cur_thread->task; |
465 | |
466 | if (VALIDATE_PTR_LIST(task->map, task->map->pmap)) { |
467 | paniclog_append_noflush("Panicked task %p: %d pages, %d threads: " , |
468 | task, task->map->pmap->stats.resident_count, task->thread_count); |
469 | } else { |
470 | paniclog_append_noflush("Panicked task %p: %d threads: " , |
471 | task, task->thread_count); |
472 | } |
473 | |
474 | if (validate_ptr((vm_offset_t)task->bsd_info, 1, "bsd_info" )) { |
475 | char name[MAXCOMLEN + 1]; |
476 | int pid = proc_pid(task->bsd_info); |
477 | proc_name_kdp(task, name, sizeof(name)); |
478 | paniclog_append_noflush("pid %d: %s" , pid, name); |
479 | } else { |
480 | paniclog_append_noflush("unknown task" ); |
481 | } |
482 | |
483 | paniclog_append_noflush("\n" ); |
484 | } |
485 | |
486 | if (cur_fp < VM_MAX_KERNEL_ADDRESS) { |
487 | paniclog_append_noflush("Panicked thread: %p, backtrace: 0x%llx, tid: %llu\n" , |
488 | cur_thread, (addr64_t)cur_fp, thread_tid(cur_thread)); |
489 | #if __LP64__ |
490 | print_one_backtrace(kernel_pmap, cur_fp, nohilite_thread_marker, TRUE); |
491 | #else |
492 | print_one_backtrace(kernel_pmap, cur_fp, nohilite_thread_marker, FALSE); |
493 | #endif |
494 | } else { |
495 | paniclog_append_noflush("Could not print panicked thread backtrace:" |
496 | "frame pointer outside kernel vm.\n" ); |
497 | } |
498 | |
499 | paniclog_append_noflush("\n" ); |
500 | panic_info->eph_panic_log_len = PE_get_offset_into_panic_region(debug_buf_ptr) - panic_info->eph_panic_log_offset; |
501 | |
502 | if (debug_ack_timeout_count) { |
503 | panic_info->eph_panic_flags |= EMBEDDED_PANIC_HEADER_FLAG_STACKSHOT_FAILED_DEBUGGERSYNC; |
504 | panic_info->eph_other_log_offset = PE_get_offset_into_panic_region(debug_buf_ptr); |
505 | paniclog_append_noflush("!! debugger synchronization failed, no stackshot !!\n" ); |
506 | } else if (stackshot_active()) { |
507 | panic_info->eph_panic_flags |= EMBEDDED_PANIC_HEADER_FLAG_STACKSHOT_FAILED_NESTED; |
508 | panic_info->eph_other_log_offset = PE_get_offset_into_panic_region(debug_buf_ptr); |
509 | paniclog_append_noflush("!! panicked during stackshot, skipping panic stackshot !!\n" ); |
510 | } else { |
511 | /* Align the stackshot buffer to an 8-byte address (especially important for armv7k devices) */ |
512 | debug_buf_ptr += (8 - ((uintptr_t)debug_buf_ptr % 8)); |
513 | stackshot_begin_loc = debug_buf_ptr; |
514 | |
515 | bytes_remaining = debug_buf_size - (unsigned int)((uintptr_t)stackshot_begin_loc - (uintptr_t)debug_buf_base); |
516 | err = kcdata_memory_static_init(&kc_panic_data, (mach_vm_address_t)debug_buf_ptr, |
517 | KCDATA_BUFFER_BEGIN_STACKSHOT, bytes_remaining - end_marker_bytes, |
518 | KCFLAG_USE_MEMCOPY); |
519 | if (err == KERN_SUCCESS) { |
520 | kdp_snapshot_preflight(-1, stackshot_begin_loc, bytes_remaining - end_marker_bytes, |
521 | (STACKSHOT_GET_GLOBAL_MEM_STATS | STACKSHOT_SAVE_LOADINFO | STACKSHOT_KCDATA_FORMAT | |
522 | STACKSHOT_ENABLE_BT_FAULTING | STACKSHOT_ENABLE_UUID_FAULTING | STACKSHOT_FROM_PANIC | |
523 | STACKSHOT_NO_IO_STATS | STACKSHOT_THREAD_WAITINFO), &kc_panic_data, 0); |
524 | err = do_stackshot(NULL); |
525 | bytes_traced = kdp_stack_snapshot_bytes_traced(); |
526 | if (bytes_traced > 0 && !err) { |
527 | debug_buf_ptr += bytes_traced; |
528 | panic_info->eph_panic_flags |= EMBEDDED_PANIC_HEADER_FLAG_STACKSHOT_SUCCEEDED; |
529 | panic_info->eph_stackshot_offset = PE_get_offset_into_panic_region(stackshot_begin_loc); |
530 | panic_info->eph_stackshot_len = bytes_traced; |
531 | |
532 | panic_info->eph_other_log_offset = PE_get_offset_into_panic_region(debug_buf_ptr); |
533 | paniclog_append_noflush("\n** Stackshot Succeeded ** Bytes Traced %d **\n" , bytes_traced); |
534 | } else { |
535 | bytes_used = kcdata_memory_get_used_bytes(&kc_panic_data); |
536 | if (bytes_used > 0) { |
537 | /* Zero out the stackshot data */ |
538 | bzero(stackshot_begin_loc, bytes_used); |
539 | panic_info->eph_panic_flags |= EMBEDDED_PANIC_HEADER_FLAG_STACKSHOT_FAILED_INCOMPLETE; |
540 | |
541 | panic_info->eph_other_log_offset = PE_get_offset_into_panic_region(debug_buf_ptr); |
542 | paniclog_append_noflush("\n** Stackshot Incomplete ** Bytes Filled %llu **\n" , bytes_used); |
543 | } else { |
544 | bzero(stackshot_begin_loc, bytes_used); |
545 | panic_info->eph_panic_flags |= EMBEDDED_PANIC_HEADER_FLAG_STACKSHOT_FAILED_ERROR; |
546 | |
547 | panic_info->eph_other_log_offset = PE_get_offset_into_panic_region(debug_buf_ptr); |
548 | paniclog_append_noflush("\n!! Stackshot Failed !! Bytes Traced %d, err %d\n" , bytes_traced, err); |
549 | } |
550 | } |
551 | } else { |
552 | panic_info->eph_panic_flags |= EMBEDDED_PANIC_HEADER_FLAG_STACKSHOT_FAILED_ERROR; |
553 | panic_info->eph_other_log_offset = PE_get_offset_into_panic_region(debug_buf_ptr); |
554 | paniclog_append_noflush("\n!! Stackshot Failed !!\nkcdata_memory_static_init returned %d" , err); |
555 | } |
556 | } |
557 | |
558 | assert(panic_info->eph_other_log_offset != 0); |
559 | |
560 | if (print_vnodes != 0) |
561 | panic_print_vnodes(); |
562 | |
563 | panic_bt_depth--; |
564 | } |
565 | |
566 | /* |
567 | * Entry to print_all_backtraces is serialized by the debugger lock |
568 | */ |
569 | static void |
570 | print_all_backtraces(const char *message) |
571 | { |
572 | unsigned int initial_not_in_kdp = not_in_kdp; |
573 | |
574 | cpu_data_t * cpu_data_ptr = getCpuDatap(); |
575 | |
576 | assert(cpu_data_ptr->PAB_active == FALSE); |
577 | cpu_data_ptr->PAB_active = TRUE; |
578 | |
579 | /* |
580 | * Because print all backtraces uses the pmap routines, it needs to |
581 | * avoid taking pmap locks. Right now, this is conditionalized on |
582 | * not_in_kdp. |
583 | */ |
584 | not_in_kdp = 0; |
585 | do_print_all_backtraces(message); |
586 | |
587 | not_in_kdp = initial_not_in_kdp; |
588 | |
589 | cpu_data_ptr->PAB_active = FALSE; |
590 | } |
591 | |
592 | void |
593 | panic_display_times() |
594 | { |
595 | if (kdp_clock_is_locked()) { |
596 | paniclog_append_noflush("Warning: clock is locked. Can't get time\n" ); |
597 | return; |
598 | } |
599 | |
600 | if ((is_clock_configured) && (simple_lock_try(&clock_lock))) { |
601 | clock_sec_t secs, boot_secs; |
602 | clock_usec_t usecs, boot_usecs; |
603 | |
604 | simple_unlock(&clock_lock); |
605 | |
606 | clock_get_calendar_microtime(&secs, &usecs); |
607 | clock_get_boottime_microtime(&boot_secs, &boot_usecs); |
608 | |
609 | paniclog_append_noflush("Epoch Time: sec usec\n" ); |
610 | paniclog_append_noflush(" Boot : 0x%08x 0x%08x\n" , (unsigned int)boot_secs, (unsigned int)boot_usecs); |
611 | paniclog_append_noflush(" Sleep : 0x%08x 0x%08x\n" , (unsigned int)gIOLastSleepTime.tv_sec, (unsigned int)gIOLastSleepTime.tv_usec); |
612 | paniclog_append_noflush(" Wake : 0x%08x 0x%08x\n" , (unsigned int)gIOLastWakeTime.tv_sec, (unsigned int)gIOLastWakeTime.tv_usec); |
613 | paniclog_append_noflush(" Calendar: 0x%08x 0x%08x\n\n" , (unsigned int)secs, (unsigned int)usecs); |
614 | } |
615 | } |
616 | |
617 | void panic_print_symbol_name(vm_address_t search) |
618 | { |
619 | #pragma unused(search) |
620 | // empty stub. Really only used on x86_64. |
621 | return; |
622 | } |
623 | |
624 | void |
625 | SavePanicInfo( |
626 | const char *message, __unused void *panic_data, __unused uint64_t panic_options) |
627 | { |
628 | |
629 | /* This should be initialized by the time we get here */ |
630 | assert(panic_info->eph_panic_log_offset != 0); |
631 | |
632 | if (panic_options & DEBUGGER_OPTION_PANICLOGANDREBOOT) { |
633 | panic_info->eph_panic_flags |= EMBEDDED_PANIC_HEADER_FLAG_BUTTON_RESET_PANIC; |
634 | } |
635 | |
636 | if (panic_options & DEBUGGER_OPTION_COPROC_INITIATED_PANIC) { |
637 | panic_info->eph_panic_flags |= EMBEDDED_PANIC_HEADER_FLAG_COPROC_INITIATED_PANIC; |
638 | } |
639 | |
640 | #if defined(XNU_TARGET_OS_BRIDGE) |
641 | panic_info->eph_x86_power_state = PE_smc_stashed_x86_power_state; |
642 | panic_info->eph_x86_efi_boot_state = PE_smc_stashed_x86_efi_boot_state; |
643 | panic_info->eph_x86_system_state = PE_smc_stashed_x86_system_state; |
644 | #endif |
645 | |
646 | /* |
647 | * On newer targets, panic data is stored directly into the iBoot panic region. |
648 | * If we re-enter SavePanicInfo (e.g. on a double panic) on such a target, update the |
649 | * panic CRC so that iBoot can hopefully find *something* useful in the panic region. |
650 | */ |
651 | if (PanicInfoSaved && (debug_buf_base >= (char*)gPanicBase) && (debug_buf_base < (char*)gPanicBase + gPanicSize)) { |
652 | unsigned int pi_size = (unsigned int)(debug_buf_ptr - gPanicBase); |
653 | PE_save_buffer_to_vram((unsigned char*)gPanicBase, &pi_size); |
654 | PE_sync_panic_buffers(); // extra precaution; panic path likely isn't reliable if we're here |
655 | } |
656 | |
657 | if (PanicInfoSaved || (debug_buf_size == 0)) |
658 | return; |
659 | |
660 | PanicInfoSaved = TRUE; |
661 | |
662 | print_all_backtraces(message); |
663 | |
664 | assert(panic_info->eph_panic_log_len != 0); |
665 | panic_info->eph_other_log_len = PE_get_offset_into_panic_region(debug_buf_ptr) - panic_info->eph_other_log_offset; |
666 | |
667 | PEHaltRestart(kPEPanicSync); |
668 | |
669 | /* |
670 | * Notifies registered IOPlatformPanicAction callbacks |
671 | * (which includes one to disable the memcache) and flushes |
672 | * the buffer contents from the cache |
673 | */ |
674 | paniclog_flush(); |
675 | } |
676 | |
677 | void |
678 | paniclog_flush() |
679 | { |
680 | unsigned int panicbuf_length = 0; |
681 | |
682 | panicbuf_length = (unsigned int)(debug_buf_ptr - gPanicBase); |
683 | if (!panicbuf_length) |
684 | return; |
685 | |
686 | /* |
687 | * Updates the log length of the last part of the panic log. |
688 | */ |
689 | panic_info->eph_other_log_len = PE_get_offset_into_panic_region(debug_buf_ptr) - panic_info->eph_other_log_offset; |
690 | |
691 | /* |
692 | * Updates the metadata at the beginning of the panic buffer, |
693 | * updates the CRC. |
694 | */ |
695 | PE_save_buffer_to_vram((unsigned char *)gPanicBase, &panicbuf_length); |
696 | |
697 | /* |
698 | * This is currently unused by platform KEXTs on embedded but is |
699 | * kept for compatibility with the published IOKit interfaces. |
700 | */ |
701 | PESavePanicInfo((unsigned char *)gPanicBase, panicbuf_length); |
702 | |
703 | PE_sync_panic_buffers(); |
704 | } |
705 | |
706 | /* |
707 | * @function DebuggerXCallEnter |
708 | * |
709 | * @abstract IPI other cores so this core can run in a single-threaded context. |
710 | * |
711 | * @discussion This function should be called with the debugger lock held. It |
712 | * signals the other cores to go into a busy loop so this core can run in a |
713 | * single-threaded context and inspect kernel memory. |
714 | * |
715 | * @param proceed_on_sync_failure If true, then go ahead and try to debug even |
716 | * if we can't synch with the other cores. This is inherently unsafe and should |
717 | * only be used if the kernel is going down in flames anyway. |
718 | * |
719 | * @result returns KERN_OPERATION_TIMED_OUT if synchronization times out and |
720 | * proceed_on_sync_failure is false. |
721 | */ |
722 | kern_return_t |
723 | DebuggerXCallEnter( |
724 | boolean_t proceed_on_sync_failure) |
725 | { |
726 | uint64_t max_mabs_time, current_mabs_time; |
727 | int cpu; |
728 | int max_cpu; |
729 | cpu_data_t *target_cpu_datap; |
730 | cpu_data_t *cpu_data_ptr = getCpuDatap(); |
731 | |
732 | /* Check for nested debugger entry. */ |
733 | cpu_data_ptr->debugger_active++; |
734 | if (cpu_data_ptr->debugger_active != 1) |
735 | return KERN_SUCCESS; |
736 | |
737 | /* |
738 | * If debugger_sync is not 0, someone responded excessively late to the last |
739 | * debug request (we zero the sync variable in the return function). Zero it |
740 | * again here. This should prevent us from getting out of sync (heh) and |
741 | * timing out on every entry to the debugger if we timeout once. |
742 | */ |
743 | |
744 | debugger_sync = 0; |
745 | mp_kdp_trap = 1; |
746 | |
747 | /* |
748 | * We need a barrier here to ensure CPUs see mp_kdp_trap and spin when responding |
749 | * to the signal. |
750 | */ |
751 | __builtin_arm_dmb(DMB_ISH); |
752 | |
753 | /* |
754 | * Try to signal all CPUs (except ourselves, of course). Use debugger_sync to |
755 | * synchronize with every CPU that we appeared to signal successfully (cpu_signal |
756 | * is not synchronous). |
757 | */ |
758 | bool cpu_signal_failed = false; |
759 | max_cpu = ml_get_max_cpu_number(); |
760 | |
761 | boolean_t immediate_halt = FALSE; |
762 | if (proceed_on_sync_failure && force_immediate_debug_halt) |
763 | immediate_halt = TRUE; |
764 | |
765 | if (!immediate_halt) { |
766 | for (cpu=0; cpu <= max_cpu; cpu++) { |
767 | target_cpu_datap = (cpu_data_t *)CpuDataEntries[cpu].cpu_data_vaddr; |
768 | |
769 | if ((target_cpu_datap == NULL) || (target_cpu_datap == cpu_data_ptr)) |
770 | continue; |
771 | |
772 | if(KERN_SUCCESS == cpu_signal(target_cpu_datap, SIGPdebug, (void *)NULL, NULL)) { |
773 | (void)hw_atomic_add(&debugger_sync, 1); |
774 | } else { |
775 | cpu_signal_failed = true; |
776 | kprintf("cpu_signal failed in DebuggerXCallEnter\n" ); |
777 | } |
778 | } |
779 | |
780 | nanoseconds_to_absolutetime(DEBUG_ACK_TIMEOUT, &max_mabs_time); |
781 | current_mabs_time = mach_absolute_time(); |
782 | max_mabs_time += current_mabs_time; |
783 | assert(max_mabs_time > current_mabs_time); |
784 | |
785 | /* |
786 | * Wait for DEBUG_ACK_TIMEOUT ns for a response from everyone we IPI'd. If we |
787 | * timeout, that is simply too bad; we don't have a true NMI, and one CPU may be |
788 | * uninterruptibly spinning on someone else. The best we can hope for is that |
789 | * all other CPUs have either responded or are spinning in a context that is |
790 | * debugger safe. |
791 | */ |
792 | while ((debugger_sync != 0) && (current_mabs_time < max_mabs_time)) |
793 | current_mabs_time = mach_absolute_time(); |
794 | |
795 | } |
796 | |
797 | if (cpu_signal_failed && !proceed_on_sync_failure) { |
798 | DebuggerXCallReturn(); |
799 | return KERN_FAILURE; |
800 | } else if (immediate_halt || (current_mabs_time >= max_mabs_time)) { |
801 | /* |
802 | * For the moment, we're aiming for a timeout that the user shouldn't notice, |
803 | * but will be sufficient to let the other core respond. |
804 | */ |
805 | __builtin_arm_dmb(DMB_ISH); |
806 | for (cpu=0; cpu <= max_cpu; cpu++) { |
807 | target_cpu_datap = (cpu_data_t *)CpuDataEntries[cpu].cpu_data_vaddr; |
808 | |
809 | if ((target_cpu_datap == NULL) || (target_cpu_datap == cpu_data_ptr)) |
810 | continue; |
811 | if (!(target_cpu_datap->cpu_signal & SIGPdebug) && !immediate_halt) |
812 | continue; |
813 | if (proceed_on_sync_failure) { |
814 | paniclog_append_noflush("Attempting to forcibly halt cpu %d\n" , cpu); |
815 | dbgwrap_status_t halt_status = ml_dbgwrap_halt_cpu(cpu, 0); |
816 | if (halt_status < 0) |
817 | paniclog_append_noflush("cpu %d failed to halt with error %d: %s\n" , cpu, halt_status, ml_dbgwrap_strerror(halt_status)); |
818 | else { |
819 | if (halt_status > 0) |
820 | paniclog_append_noflush("cpu %d halted with warning %d: %s\n" , cpu, halt_status, ml_dbgwrap_strerror(halt_status)); |
821 | else |
822 | paniclog_append_noflush("cpu %d successfully halted\n" , cpu); |
823 | target_cpu_datap->halt_status = CPU_HALTED; |
824 | } |
825 | } else |
826 | kprintf("Debugger synch pending on cpu %d\n" , cpu); |
827 | } |
828 | if (proceed_on_sync_failure) { |
829 | for (cpu = 0; cpu <= max_cpu; cpu++) { |
830 | target_cpu_datap = (cpu_data_t *)CpuDataEntries[cpu].cpu_data_vaddr; |
831 | |
832 | if ((target_cpu_datap == NULL) || (target_cpu_datap == cpu_data_ptr) || |
833 | (target_cpu_datap->halt_status == CPU_NOT_HALTED)) |
834 | continue; |
835 | dbgwrap_status_t halt_status = ml_dbgwrap_halt_cpu_with_state(cpu, |
836 | NSEC_PER_SEC, &target_cpu_datap->halt_state); |
837 | if ((halt_status < 0) || (halt_status == DBGWRAP_WARN_CPU_OFFLINE)) |
838 | paniclog_append_noflush("Unable to obtain state for cpu %d with status %d: %s\n" , cpu, halt_status, ml_dbgwrap_strerror(halt_status)); |
839 | else |
840 | target_cpu_datap->halt_status = CPU_HALTED_WITH_STATE; |
841 | } |
842 | if (immediate_halt) |
843 | paniclog_append_noflush("Immediate halt requested on all cores\n" ); |
844 | else |
845 | paniclog_append_noflush("Debugger synchronization timed out; waited %llu nanoseconds\n" , DEBUG_ACK_TIMEOUT); |
846 | debug_ack_timeout_count++; |
847 | return KERN_SUCCESS; |
848 | } else { |
849 | DebuggerXCallReturn(); |
850 | return KERN_OPERATION_TIMED_OUT; |
851 | } |
852 | } else { |
853 | return KERN_SUCCESS; |
854 | } |
855 | } |
856 | |
857 | /* |
858 | * @function DebuggerXCallReturn |
859 | * |
860 | * @abstract Resume normal multicore operation after DebuggerXCallEnter() |
861 | * |
862 | * @discussion This function should be called with debugger lock held. |
863 | */ |
864 | void |
865 | DebuggerXCallReturn( |
866 | void) |
867 | { |
868 | cpu_data_t *cpu_data_ptr = getCpuDatap(); |
869 | |
870 | cpu_data_ptr->debugger_active--; |
871 | if (cpu_data_ptr->debugger_active != 0) |
872 | return; |
873 | |
874 | mp_kdp_trap = 0; |
875 | debugger_sync = 0; |
876 | |
877 | /* Do we need a barrier here? */ |
878 | __builtin_arm_dmb(DMB_ISH); |
879 | } |
880 | |
881 | void |
882 | DebuggerXCall( |
883 | void *ctx) |
884 | { |
885 | boolean_t save_context = FALSE; |
886 | vm_offset_t kstackptr = 0; |
887 | arm_saved_state_t *regs = (arm_saved_state_t *) ctx; |
888 | |
889 | if (regs != NULL) { |
890 | #if defined(__arm64__) |
891 | save_context = PSR64_IS_KERNEL(get_saved_state_cpsr(regs)); |
892 | #else |
893 | save_context = PSR_IS_KERNEL(regs->cpsr); |
894 | #endif |
895 | } |
896 | |
897 | kstackptr = current_thread()->machine.kstackptr; |
898 | arm_saved_state_t *state = (arm_saved_state_t *)kstackptr; |
899 | |
900 | if (save_context) { |
901 | /* Save the interrupted context before acknowledging the signal */ |
902 | *state = *regs; |
903 | |
904 | } else if (regs) { |
905 | /* zero old state so machine_trace_thread knows not to backtrace it */ |
906 | set_saved_state_fp(state, 0); |
907 | set_saved_state_pc(state, 0); |
908 | set_saved_state_lr(state, 0); |
909 | set_saved_state_sp(state, 0); |
910 | } |
911 | |
912 | (void)hw_atomic_sub(&debugger_sync, 1); |
913 | __builtin_arm_dmb(DMB_ISH); |
914 | while (mp_kdp_trap); |
915 | |
916 | /* Any cleanup for our pushed context should go here */ |
917 | } |
918 | |
919 | |
920 | void |
921 | DebuggerCall( |
922 | unsigned int reason, |
923 | void *ctx) |
924 | { |
925 | #if !MACH_KDP |
926 | #pragma unused(reason,ctx) |
927 | #endif /* !MACH_KDP */ |
928 | |
929 | #if ALTERNATE_DEBUGGER |
930 | alternate_debugger_enter(); |
931 | #endif |
932 | |
933 | #if MACH_KDP |
934 | kdp_trap(reason, (struct arm_saved_state *)ctx); |
935 | #else |
936 | /* TODO: decide what to do if no debugger config */ |
937 | #endif |
938 | } |
939 | |
940 | |
941 | |