1 | /* |
2 | * Copyright (c) 2007-2023 Apple Inc. All rights reserved. |
3 | * Copyright (c) 2000-2006 Apple Computer, Inc. All rights reserved. |
4 | */ |
5 | |
6 | |
7 | /* Required to know if we must compile the file. */ |
8 | #include <pexpert/arm64/board_config.h> |
9 | |
10 | /* Generic headers. */ |
11 | #include <pexpert/pexpert.h> |
12 | #include <pexpert/device_tree.h> |
13 | #include <machine/machine_routines.h> |
14 | #include <sys/sysctl.h> |
15 | #include <kern/clock.h> |
16 | |
17 | /* Dev headers. */ |
18 | #if DEVELOPMENT || DEBUG |
19 | #include <kern/simple_lock.h> |
20 | #include <os/hash.h> |
21 | #endif /* DEVELOPMENT || DEBUG */ |
22 | |
23 | /* Trace-specific headers. */ |
24 | |
25 | /******** |
26 | * Logs * |
27 | ********/ |
28 | |
29 | #define PANIC_TRACE_LOG 1 |
30 | #define panic_trace_error(msg, args...) { if (panic_trace_debug == 1) kprintf("panic_trace: " msg "\n", ##args); else if (panic_trace_debug == 2) printf("panic_trace: " msg "\n", ##args); } |
31 | #if PANIC_TRACE_LOG |
32 | #define panic_trace_log(msg, args...) { if (panic_trace_debug) panic_trace_error(msg, ##args); } |
33 | #else |
34 | #define panic_trace_log(msg, args...) |
35 | #endif /* PANIC_TRACE_LOG */ |
36 | |
37 | /************ |
38 | * Externals * |
39 | ************/ |
40 | |
41 | /* |
42 | * Soc base physical address. |
43 | * Set by pe_identify_machine.c:pe_arm_map_interrupt_controller during |
44 | * early boot, null before. |
45 | */ |
46 | extern vm_offset_t gSocPhys; |
47 | |
48 | /******* |
49 | * Logs * |
50 | *******/ |
51 | |
52 | #if DEVELOPMENT || DEBUG |
53 | #ifndef CT_DFT_LOGS_ON |
54 | #define CT_DFT_LOGS_ON 0 |
55 | #endif /* CT_DFT_LOGS_ON */ |
56 | #endif /* DEVELOPMENT || DEBUG */ |
57 | |
58 | /**************** |
59 | * Default state * |
60 | ****************/ |
61 | |
62 | #if DEVELOPMENT || DEBUG |
63 | |
64 | /* |
65 | * When supported, panic-trace is enabled by default on some platforms. |
66 | * This section defines on which platform it is enabled.. |
67 | */ |
68 | |
69 | /* Opensource -> disabled. */ |
70 | #define DEFAULT_PANIC_TRACE_MODE panic_trace_disabled |
71 | |
72 | #endif /* DEVELOPMENT || DEBUG */ |
73 | |
74 | /********** |
75 | * Globals * |
76 | **********/ |
77 | |
78 | #if DEVELOPMENT || DEBUG |
79 | boolean_t panic_trace_disabled_for_rdar107003520 = FALSE; |
80 | #endif /* DEVELOPMENT || DEBUG */ |
81 | |
82 | static boolean_t debug_and_trace_initialized = false; |
83 | |
84 | /************ |
85 | * Boot-args * |
86 | ************/ |
87 | |
88 | #if DEVELOPMENT || DEBUG |
89 | /* |
90 | * Panic trace state. |
91 | * Has a double meaning : |
92 | * - at system init, it gives the expected tracing state. |
93 | * -> init code uses that to enable tracing. |
94 | * - after system init, used to report the tracing state. |
95 | */ |
96 | TUNABLE_DT_WRITEABLE(panic_trace_t, panic_trace, "/arm-io/cpu-debug-interface" , |
97 | "panic-trace-mode" , "panic_trace" , DEFAULT_PANIC_TRACE_MODE, TUNABLE_DT_NONE); |
98 | |
99 | /* |
100 | * Panic trace debug state. See 'Logs' section above. |
101 | */ |
102 | TUNABLE_WRITEABLE(boolean_t, panic_trace_debug, "panic_trace_debug" , CT_DFT_LOGS_ON); |
103 | |
104 | #endif /* DEVELOPMENT || DEBUG */ |
105 | |
106 | /******** |
107 | * Locks * |
108 | ********/ |
109 | |
110 | /* Panic trace lock. */ |
111 | |
112 | /**************** |
113 | * Debug command * |
114 | ****************/ |
115 | |
116 | #if DEVELOPMENT || DEBUG |
117 | |
118 | decl_simple_lock_data(, panic_hook_lock); |
119 | |
120 | TUNABLE(unsigned int, bootarg_stop_clocks, "stop_clocks" , 0); |
121 | |
122 | // The command buffer contains the converted commands from the device tree for commanding cpu_halt, enable_trace, etc. |
123 | #define DEBUG_COMMAND_BUFFER_SIZE 256 |
124 | typedef struct command_buffer_element { |
125 | uintptr_t address; |
126 | uintptr_t address_pa; |
127 | uintptr_t value; |
128 | union cpu_selector { |
129 | uint16_t mask; |
130 | struct cpu_range { |
131 | uint8_t min_cpu; |
132 | uint8_t max_cpu; |
133 | } range; |
134 | } destination_cpu_selector; |
135 | uint16_t delay_us; |
136 | bool cpu_selector_is_range; |
137 | bool is_32bit; |
138 | } command_buffer_element_t; |
139 | |
140 | #define CPU_SELECTOR_SHIFT (16) |
141 | #define CPU_SELECTOR_MASK (0xFFFF << CPU_SELECTOR_SHIFT) |
142 | #define REGISTER_OFFSET_MASK ((1 << CPU_SELECTOR_SHIFT) - 1) |
143 | #define REGISTER_OFFSET(register_prop) (register_prop & REGISTER_OFFSET_MASK) |
144 | #define CPU_SELECTOR(register_offset) ((register_offset & CPU_SELECTOR_MASK) >> CPU_SELECTOR_SHIFT) // Upper 16bits holds the cpu selector |
145 | #define MAX_WINDOW_SIZE 0xFFFF |
146 | #define DELAY_SHIFT (32) |
147 | #define DELAY_MASK (0xFFFFULL << DELAY_SHIFT) |
148 | #define DELAY_US(register_offset) ((register_offset & DELAY_MASK) >> DELAY_SHIFT) |
149 | #define CPU_SELECTOR_ISRANGE_MASK (1ULL << 62) |
150 | #define REGISTER_32BIT_MASK (1ULL << 63) |
151 | #define ALL_CPUS 0x0000 |
152 | #define RESET_VIRTUAL_ADDRESS_WINDOW 0xFFFFFFFF |
153 | |
154 | #define REGISTER_IS_32BIT(register_offset) ((register_offset & REGISTER_32BIT_MASK) != 0) |
155 | #define REGISTER_SIZE(register_offset) (REGISTER_IS_32BIT(register_offset) ? sizeof(uint32_t) : sizeof(uintptr_t)) |
156 | #define CPU_SELECTOR_IS_RANGE(register_offset) ((register_offset & CPU_SELECTOR_ISRANGE_MASK) != 0) |
157 | #define CPU_SELECTOR_MIN_CPU(register_offset) ((CPU_SELECTOR(register_offset) & 0xff00) >> 8) |
158 | #define CPU_SELECTOR_MAX_CPU(register_offset) (CPU_SELECTOR(register_offset) & 0x00ff) |
159 | |
160 | // Record which CPU is currently running one of our debug commands, so we can trap panic reentrancy to PE_arm_debug_panic_hook. |
161 | static int running_debug_command_on_cpu_number = -1; |
162 | |
163 | |
164 | // Determine whether the current debug command is intended for this CPU. |
165 | static inline bool |
166 | is_running_cpu_selected(command_buffer_element_t *command) |
167 | { |
168 | assert(running_debug_command_on_cpu_number >= 0); |
169 | if (command->cpu_selector_is_range) { |
170 | return running_debug_command_on_cpu_number >= command->destination_cpu_selector.range.min_cpu |
171 | && running_debug_command_on_cpu_number <= command->destination_cpu_selector.range.max_cpu; |
172 | } else if (command->destination_cpu_selector.mask == ALL_CPUS) { |
173 | return true; |
174 | } else { |
175 | return !!(command->destination_cpu_selector.mask & (1 << running_debug_command_on_cpu_number)); |
176 | } |
177 | } |
178 | |
179 | |
180 | // Pointers into debug_command_buffer for each operation. Assumes runtime will init them to zero. |
181 | static command_buffer_element_t *cpu_halt; |
182 | static command_buffer_element_t *enable_trace; |
183 | static command_buffer_element_t *enable_alt_trace; |
184 | static command_buffer_element_t *trace_halt; |
185 | static command_buffer_element_t *enable_stop_clocks; |
186 | static command_buffer_element_t *stop_clocks; |
187 | |
188 | boolean_t |
189 | PE_arm_debug_and_trace_initialized(void) |
190 | { |
191 | return debug_and_trace_initialized; |
192 | } |
193 | |
194 | static void |
195 | pe_init_debug_command(DTEntry entryP, command_buffer_element_t **command_buffer, const char* entry_name) |
196 | { |
197 | // statically allocate to prevent needing alloc at runtime |
198 | static command_buffer_element_t debug_command_buffer[DEBUG_COMMAND_BUFFER_SIZE]; |
199 | static command_buffer_element_t *next_command_buffer_entry = debug_command_buffer; |
200 | |
201 | // record this pointer but don't assign it to *command_buffer yet, in case we panic while half-initialized |
202 | command_buffer_element_t *command_starting_index = next_command_buffer_entry; |
203 | |
204 | uintptr_t const *reg_prop; |
205 | uint32_t prop_size, reg_window_size = 0; |
206 | uintptr_t base_address_pa = 0, debug_reg_window = 0; |
207 | |
208 | if (command_buffer == 0) { |
209 | panic_trace_log("%s: %s: no hook to assign this command to\n" , __func__, entry_name); |
210 | return; |
211 | } |
212 | |
213 | if (SecureDTGetProperty(entryP, entry_name, (void const **)®_prop, &prop_size) != kSuccess) { |
214 | panic("%s: %s: failed to read property from device tree" , __func__, entry_name); |
215 | } |
216 | |
217 | if (prop_size % (2 * sizeof(*reg_prop))) { |
218 | panic("%s: %s: property size %u bytes is not a multiple of %lu" , |
219 | __func__, entry_name, prop_size, 2 * sizeof(*reg_prop)); |
220 | } |
221 | |
222 | // convert to real virt addresses and stuff commands into debug_command_buffer |
223 | for (; prop_size; reg_prop += 2, prop_size -= 2 * sizeof(*reg_prop)) { |
224 | if (*reg_prop == RESET_VIRTUAL_ADDRESS_WINDOW) { |
225 | debug_reg_window = 0; // Create a new window |
226 | } else if (debug_reg_window == 0) { |
227 | // create a window from virtual address to the specified physical address |
228 | base_address_pa = gSocPhys + *reg_prop; |
229 | reg_window_size = ((uint32_t)*(reg_prop + 1)); |
230 | if (reg_window_size > MAX_WINDOW_SIZE) { |
231 | panic("%s: %s: %#x-byte window at #%lx exceeds maximum size of %#x" , |
232 | __func__, entry_name, reg_window_size, base_address_pa, MAX_WINDOW_SIZE ); |
233 | } |
234 | debug_reg_window = ml_io_map(base_address_pa, reg_window_size); |
235 | assert(debug_reg_window); |
236 | panic_trace_log("%s: %s: %#x bytes at %#lx mapped to %#lx\n" , |
237 | __func__, entry_name, reg_window_size, base_address_pa, debug_reg_window ); |
238 | } else { |
239 | if ((REGISTER_OFFSET(*reg_prop) + REGISTER_SIZE(*reg_prop)) > reg_window_size) { |
240 | panic("%s: %s[%ld]: %#lx(+%lu)-byte offset from %#lx exceeds allocated size of %#x" , |
241 | __func__, entry_name, next_command_buffer_entry - command_starting_index, |
242 | REGISTER_OFFSET(*reg_prop), REGISTER_SIZE(*reg_prop), base_address_pa, reg_window_size ); |
243 | } |
244 | |
245 | if (next_command_buffer_entry - debug_command_buffer >= DEBUG_COMMAND_BUFFER_SIZE - 1) { |
246 | // can't use the very last entry, since we need it to terminate the command |
247 | panic("%s: %s[%ld]: out of space in command buffer" , |
248 | __func__, entry_name, next_command_buffer_entry - command_starting_index ); |
249 | } |
250 | |
251 | next_command_buffer_entry->address = debug_reg_window + REGISTER_OFFSET(*reg_prop); |
252 | next_command_buffer_entry->address_pa = base_address_pa + REGISTER_OFFSET(*reg_prop); |
253 | next_command_buffer_entry->value = *(reg_prop + 1); |
254 | #if defined(__arm64__) |
255 | next_command_buffer_entry->delay_us = DELAY_US(*reg_prop); |
256 | next_command_buffer_entry->is_32bit = REGISTER_IS_32BIT(*reg_prop); |
257 | #else |
258 | next_command_buffer_entry->delay_us = 0; |
259 | next_command_buffer_entry->is_32bit = false; |
260 | #endif |
261 | if ((next_command_buffer_entry->cpu_selector_is_range = CPU_SELECTOR_IS_RANGE(*reg_prop))) { |
262 | next_command_buffer_entry->destination_cpu_selector.range.min_cpu = (uint8_t)CPU_SELECTOR_MIN_CPU(*reg_prop); |
263 | next_command_buffer_entry->destination_cpu_selector.range.max_cpu = (uint8_t)CPU_SELECTOR_MAX_CPU(*reg_prop); |
264 | } else { |
265 | next_command_buffer_entry->destination_cpu_selector.mask = (uint16_t)CPU_SELECTOR(*reg_prop); |
266 | } |
267 | next_command_buffer_entry++; |
268 | } |
269 | } |
270 | |
271 | // null terminate the address field of the command to end it |
272 | (next_command_buffer_entry++)->address = 0; |
273 | |
274 | // save pointer into table for this command |
275 | *command_buffer = command_starting_index; |
276 | } |
277 | |
278 | static void |
279 | pe_run_debug_command(command_buffer_element_t *command_buffer) |
280 | { |
281 | if (!PE_arm_debug_and_trace_initialized()) { |
282 | /* |
283 | * In practice this can only happen if we panicked very early, |
284 | * when only the boot CPU is online and before it has finished |
285 | * initializing the debug and trace infrastructure. Avoid an |
286 | * unhelpful nested panic() here and instead resume execution |
287 | * to handle_debugger_trap(), which logs a user friendly error |
288 | * message before spinning forever. |
289 | */ |
290 | return; |
291 | } |
292 | |
293 | // When both the CPUs panic, one will get stuck on the lock and the other CPU will be halted when the first executes the debug command |
294 | simple_lock(&panic_hook_lock, LCK_GRP_NULL); |
295 | |
296 | running_debug_command_on_cpu_number = cpu_number(); |
297 | |
298 | while (command_buffer && command_buffer->address) { |
299 | if (is_running_cpu_selected(command_buffer)) { |
300 | panic_trace_log("%s: cpu %d: reg write 0x%lx (VA 0x%lx):= 0x%lx" , |
301 | __func__, running_debug_command_on_cpu_number, command_buffer->address_pa, |
302 | command_buffer->address, command_buffer->value); |
303 | if (command_buffer->is_32bit) { |
304 | *((volatile uint32_t*)(command_buffer->address)) = (uint32_t)(command_buffer->value); |
305 | } else { |
306 | *((volatile uintptr_t*)(command_buffer->address)) = command_buffer->value; // register = value; |
307 | } |
308 | if (command_buffer->delay_us != 0) { |
309 | uint64_t deadline; |
310 | nanoseconds_to_absolutetime(command_buffer->delay_us * NSEC_PER_USEC, &deadline); |
311 | deadline += ml_get_timebase(); |
312 | while (ml_get_timebase() < deadline) { |
313 | os_compiler_barrier(); |
314 | } |
315 | } |
316 | } |
317 | command_buffer++; |
318 | } |
319 | |
320 | running_debug_command_on_cpu_number = -1; |
321 | simple_unlock(&panic_hook_lock); |
322 | } |
323 | |
324 | #endif /* DEVELOPMENT || DEBUG */ |
325 | |
326 | /***************** |
327 | * Partial policy * |
328 | *****************/ |
329 | |
330 | /* Debug-only section. */ |
331 | #if DEVELOPMENT || DEBUG |
332 | |
333 | /* Util. */ |
334 | #ifndef MIN |
335 | #define MIN(a, b) (((a) < (b)) ? (a) : (b)) |
336 | #endif /* MIN */ |
337 | |
338 | /* |
339 | * The % of devices which will have panic_trace enabled when using a partial |
340 | * enablement policy. |
341 | */ |
342 | static TUNABLE_DT(uint32_t, panic_trace_partial_percent, |
343 | "/arm-io/cpu-debug-interface" , "panic-trace-partial-percent" , |
344 | "panic_trace_partial_percent" , 50, TUNABLE_DT_NONE); |
345 | |
346 | /* |
347 | * Stress racks opt out of panic_trace, unless overridden by the panic_trace boot-arg. |
348 | */ |
349 | static void |
350 | panic_trace_apply_stress_rack_policy(void) |
351 | { |
352 | DTEntry ent = NULL; |
353 | DTEntry entryP = NULL; |
354 | const void *propP = NULL; |
355 | unsigned int size = 0; |
356 | |
357 | if (SecureDTLookupEntry(NULL, "/chosen" , &ent) == kSuccess && |
358 | SecureDTGetProperty(ent, "stress-rack" , &propP, &size) == kSuccess) { |
359 | (void)entryP; |
360 | if (PE_parse_boot_argn("panic_trace" , NULL, 0)) { |
361 | // Prefer user specified boot-arg even when running on stress racks. |
362 | // Make an exception for devices with broken single-stepping. |
363 | } else { |
364 | panic_trace = 0; |
365 | } |
366 | } |
367 | } |
368 | |
369 | /* |
370 | * When the `panic_trace_partial_policy` flag is set, not all devices will have |
371 | * the panic_trace settings applied. The actual % is determined by |
372 | * `panic_trace_partial_percent`. |
373 | * By using the ECID instead of a random number the process is made |
374 | * deterministic for any given device. |
375 | * This function disables panic trace if the device falls into the disabled % |
376 | * range. It otherwise leaves the panic_trace value unmodified. |
377 | * Called on the boot path, thus does not lock panic_trace_lock. |
378 | */ |
379 | static void |
380 | panic_trace_apply_partial_policy(void) |
381 | { |
382 | assert3u((panic_trace & panic_trace_partial_policy), !=, 0); |
383 | |
384 | DTEntry ent = NULL; |
385 | unsigned int size = 0; |
386 | const void *ecid = NULL; |
387 | |
388 | /* Grab the ECID. */ |
389 | if (SecureDTLookupEntry(NULL, "/chosen" , &ent) != kSuccess || |
390 | SecureDTGetProperty(ent, "unique-chip-id" , &ecid, &size) != kSuccess) { |
391 | panic_trace = panic_trace_disabled; |
392 | return; |
393 | } |
394 | |
395 | /* |
396 | * Use os_hash_jenkins to convert the decidedly non-random ECID into |
397 | * something resembling a random number. Better (cryptographic) hash |
398 | * functions are not available at this point in boot. |
399 | */ |
400 | const uint32_t rand = os_hash_jenkins(ecid, size); |
401 | |
402 | /* Sanitize the percent value. */ |
403 | const uint32_t percent = MIN(100, panic_trace_partial_percent); |
404 | |
405 | /* |
406 | * Apply the ECID percent value. The bias here should be so tiny as to not |
407 | * matter for this purpose. |
408 | */ |
409 | if ((rand % 100) >= percent) { |
410 | panic_trace = panic_trace_disabled; |
411 | } |
412 | } |
413 | |
414 | #endif /* DEVELOPMENT || DEBUG */ |
415 | |
416 | /*************** |
417 | * External API * |
418 | ***************/ |
419 | |
420 | #if DEVELOPMENT || DEBUG |
421 | void |
422 | PE_arm_debug_enable_trace(bool should_log) |
423 | { |
424 | if (should_log) { |
425 | panic_trace_log("%s enter" , __FUNCTION__); |
426 | } |
427 | if (panic_trace & panic_trace_enabled) { |
428 | pe_run_debug_command(enable_trace); |
429 | } else if (panic_trace & panic_trace_alt_enabled) { |
430 | pe_run_debug_command(enable_alt_trace); |
431 | } |
432 | if (should_log) { |
433 | panic_trace_log("%s exit" , __FUNCTION__); |
434 | } |
435 | } |
436 | #endif /* DEVELOPMENT || DEBUG */ |
437 | |
438 | #if DEVELOPMENT || DEBUG |
439 | static void |
440 | PE_arm_panic_hook(const char *str __unused) |
441 | { |
442 | (void)str; // not used |
443 | #if defined(__arm64__) && !APPLEVIRTUALPLATFORM |
444 | /* |
445 | * For Fastsim support--inform the simulator that it can dump a |
446 | * panic trace now (so we don't capture all the panic handling). |
447 | * This constant is randomly chosen by agreement between xnu and |
448 | * Fastsim. |
449 | */ |
450 | __asm__ volatile ("hint #0x4f" ); |
451 | #endif /* defined(__arm64__) && !APPLEVIRTUALPLATFORM */ |
452 | if (bootarg_stop_clocks) { |
453 | pe_run_debug_command(stop_clocks); |
454 | } |
455 | // disable panic trace to snapshot its ringbuffer |
456 | // note: Not taking panic_trace_lock to avoid delaying cpu halt. |
457 | // This is known to be racy. |
458 | if (panic_trace) { |
459 | if (running_debug_command_on_cpu_number == cpu_number()) { |
460 | // This is going to end badly if we don't trap, since we'd be panic-ing during our own code |
461 | kprintf("## Panic Trace code caused the panic ##\n" ); |
462 | return; // allow the normal panic operation to occur. |
463 | } |
464 | |
465 | // Stop tracing to freeze the buffer and return to normal panic processing. |
466 | pe_run_debug_command(trace_halt); |
467 | } |
468 | } |
469 | #endif /* DEVELOPMENT || DEBUG */ |
470 | |
471 | |
472 | #if DEVELOPMENT || DEBUG |
473 | void (*PE_arm_debug_panic_hook)(const char *str) = PE_arm_panic_hook; |
474 | #else |
475 | void(*const PE_arm_debug_panic_hook)(const char *str) = NULL; |
476 | #endif // DEVELOPMENT || DEBUG |
477 | |
478 | void |
479 | PE_init_cpu(void) |
480 | { |
481 | #if DEVELOPMENT || DEBUG |
482 | if (bootarg_stop_clocks) { |
483 | pe_run_debug_command(enable_stop_clocks); |
484 | } |
485 | #endif // DEVELOPMENT || DEBUG |
486 | |
487 | pe_init_fiq(); |
488 | } |
489 | |
490 | |
491 | void |
492 | PE_singlestep_hook(void) |
493 | { |
494 | } |
495 | |
496 | void |
497 | PE_panic_hook(const char *str __unused) |
498 | { |
499 | if (PE_arm_debug_panic_hook != NULL) { |
500 | PE_arm_debug_panic_hook(str); |
501 | } |
502 | } |
503 | |
504 | /* |
505 | * Initialize the trace infrastructure. |
506 | */ |
507 | void |
508 | pe_arm_init_debug(void *args) |
509 | { |
510 | DTEntry entryP; |
511 | uintptr_t const *reg_prop; |
512 | uint32_t prop_size; |
513 | |
514 | /* |
515 | * When args != NULL, this means we're being called from arm_init() on the |
516 | * boot CPU; this controls one-time init of the panic trace infrastructure. |
517 | * During one-time init, panic_trace_lock does not need to be held. |
518 | */ |
519 | const bool is_boot_cpu = (args != NULL); |
520 | |
521 | if (gSocPhys == 0) { |
522 | kprintf(fmt: "pe_arm_init_debug: failed to initialize gSocPhys == 0\n" ); |
523 | return; |
524 | } |
525 | |
526 | #if DEVELOPMENT || DEBUG |
527 | if (is_boot_cpu) { |
528 | if (panic_trace != 0) { |
529 | panic_trace_apply_stress_rack_policy(); |
530 | } |
531 | |
532 | if ((panic_trace & panic_trace_partial_policy) != 0) { |
533 | panic_trace_apply_partial_policy(); |
534 | } |
535 | } |
536 | #endif /* DEVELOPMENT || DEBUG */ |
537 | |
538 | if (SecureDTFindEntry(propName: "device_type" , propValue: "cpu-debug-interface" , entryH: &entryP) == kSuccess) { |
539 | if (is_boot_cpu) { |
540 | if (SecureDTGetProperty(entry: entryP, propertyName: "reg" , propertyValue: (void const **)®_prop, propertySize: &prop_size) == kSuccess) { |
541 | ml_init_arm_debug_interface(args, virt_address: ml_io_map(phys_addr: gSocPhys + *reg_prop, size: *(reg_prop + 1))); |
542 | } |
543 | #if DEVELOPMENT || DEBUG |
544 | simple_lock_init(&panic_hook_lock, 0); //assuming single threaded mode |
545 | |
546 | if (panic_trace) { |
547 | kprintf("pe_arm_init_debug: panic_trace=%d\n" , panic_trace); |
548 | |
549 | // Prepare debug command buffers. |
550 | pe_init_debug_command(entryP, &cpu_halt, "cpu_halt" ); |
551 | pe_init_debug_command(entryP, &enable_trace, "enable_trace" ); |
552 | pe_init_debug_command(entryP, &enable_alt_trace, "enable_alt_trace" ); |
553 | pe_init_debug_command(entryP, &trace_halt, "trace_halt" ); |
554 | |
555 | // start tracing now |
556 | PE_arm_debug_enable_trace(true); |
557 | } |
558 | if (bootarg_stop_clocks) { |
559 | pe_init_debug_command(entryP, &enable_stop_clocks, "enable_stop_clocks" ); |
560 | pe_init_debug_command(entryP, &stop_clocks, "stop_clocks" ); |
561 | } |
562 | #endif |
563 | } |
564 | } else { |
565 | #if DEVELOPMENT || DEBUG |
566 | const uint32_t dependent_modes = (panic_trace_enabled | panic_trace_alt_enabled); |
567 | if (is_boot_cpu && (bootarg_stop_clocks || (panic_trace & dependent_modes))) { |
568 | panic("failed to find cpu-debug-interface node in the EDT! " |
569 | "(required by `panic_trace={0x01, 0x10}` or `stop_clocks=1`)" ); |
570 | } else |
571 | #endif |
572 | { |
573 | kprintf(fmt: "pe_arm_init_debug: failed to find cpu-debug-interface\n" ); |
574 | } |
575 | } |
576 | |
577 | |
578 | debug_and_trace_initialized = true; |
579 | } |
580 | |
581 | /********************* |
582 | * Panic-trace sysctl * |
583 | *********************/ |
584 | |
585 | |