1/*
2 * Copyright (c) 2000-2017 Apple Inc. All rights reserved.
3 *
4 * arm platform expert initialization.
5 */
6#include <sys/types.h>
7#include <sys/kdebug.h>
8#include <mach/vm_param.h>
9#include <pexpert/protos.h>
10#include <pexpert/pexpert.h>
11#include <pexpert/boot.h>
12#include <pexpert/device_tree.h>
13#include <pexpert/pe_images.h>
14#include <kern/sched_prim.h>
15#include <kern/socd_client.h>
16#include <machine/atomic.h>
17#include <machine/machine_routines.h>
18#include <arm/caches_internal.h>
19#include <kern/debug.h>
20#include <libkern/section_keywords.h>
21#include <os/overflow.h>
22
23#include <pexpert/arm64/board_config.h>
24
25#if CONFIG_SPTM
26#include <arm64/sptm/sptm.h>
27#endif
28
29/* extern references */
30extern void pe_identify_machine(boot_args *bootArgs);
31
32/* static references */
33static void pe_prepare_images(void);
34
35/* private globals */
36SECURITY_READ_ONLY_LATE(PE_state_t) PE_state;
37TUNABLE_DT(uint32_t, PE_srd_fused, "/chosen", "research-enabled",
38 "srd_fusing", 0, TUNABLE_DT_NONE);
39
40#define FW_VERS_LEN 128
41
42char iBoot_version[FW_VERS_LEN];
43#if defined(TARGET_OS_OSX) && defined(__arm64__)
44char iBoot_Stage_2_version[FW_VERS_LEN];
45#endif /* defined(TARGET_OS_OSX) && defined(__arm64__) */
46
47/*
48 * This variable is only modified once, when the BSP starts executing. We put it in __DATA_CONST
49 * as page protections on kernel text early in startup are read-write. The kernel is
50 * locked down later in start-up, said mappings become RO and thus this
51 * variable becomes immutable.
52 *
53 * See osfmk/arm/arm_vm_init.c for more information.
54 */
55SECURITY_READ_ONLY_LATE(volatile uint32_t) debug_enabled = FALSE;
56
57/*
58 * This variable indicates the page protection security policy used by the system.
59 * It is intended mostly for debugging purposes.
60 */
61SECURITY_READ_ONLY_LATE(ml_page_protection_t) page_protection_type;
62
63uint8_t gPlatformECID[8];
64uint32_t gPlatformMemoryID;
65static boolean_t vc_progress_initialized = FALSE;
66uint64_t last_hwaccess_thread = 0;
67char gTargetTypeBuffer[16];
68char gModelTypeBuffer[32];
69
70/* Clock Frequency Info */
71clock_frequency_info_t gPEClockFrequencyInfo;
72
73vm_offset_t gPanicBase = 0;
74unsigned int gPanicSize;
75struct embedded_panic_header *panic_info = NULL;
76
77#if (DEVELOPMENT || DEBUG) && defined(XNU_TARGET_OS_BRIDGE)
78/*
79 * On DEVELOPMENT bridgeOS, we map the x86 panic region
80 * so we can include this data in bridgeOS corefiles
81 */
82uint64_t macos_panic_base = 0;
83unsigned int macos_panic_size = 0;
84
85struct macos_panic_header *mac_panic_header = NULL;
86#endif
87
88/* Maximum size of panic log excluding headers, in bytes */
89static unsigned int panic_text_len;
90
91/* Whether a console is standing by for panic logging */
92static boolean_t panic_console_available = FALSE;
93
94/* socd trace ram attributes */
95static SECURITY_READ_ONLY_LATE(vm_offset_t) socd_trace_ram_base = 0;
96static SECURITY_READ_ONLY_LATE(vm_size_t) socd_trace_ram_size = 0;
97
98extern uint32_t crc32(uint32_t crc, const void *buf, size_t size);
99
100void PE_slide_devicetree(vm_offset_t);
101
102static void
103check_for_panic_log(void)
104{
105#ifdef PLATFORM_PANIC_LOG_PADDR
106 gPanicBase = ml_io_map_wcomb(PLATFORM_PANIC_LOG_PADDR, PLATFORM_PANIC_LOG_SIZE);
107 panic_text_len = PLATFORM_PANIC_LOG_SIZE - sizeof(struct embedded_panic_header);
108 gPanicSize = PLATFORM_PANIC_LOG_SIZE;
109#else
110 DTEntry entry, chosen;
111 unsigned int size;
112 uintptr_t const *reg_prop;
113 uint32_t const *panic_region_length;
114
115 /*
116 * DT properties for the panic region are populated by UpdateDeviceTree() in iBoot:
117 *
118 * chosen {
119 * embedded-panic-log-size = <0x00080000>;
120 * [a bunch of other stuff]
121 * };
122 *
123 * pram {
124 * reg = <0x00000008_fbc48000 0x00000000_000b4000>;
125 * };
126 *
127 * reg[0] is the physical address
128 * reg[1] is the size of iBoot's kMemoryRegion_Panic (not used)
129 * embedded-panic-log-size is the maximum amount of data to store in the buffer
130 */
131 if (kSuccess != SecureDTLookupEntry(searchPoint: 0, pathName: "pram", foundEntry: &entry)) {
132 return;
133 }
134
135 if (kSuccess != SecureDTGetProperty(entry, propertyName: "reg", propertyValue: (void const **)&reg_prop, propertySize: &size)) {
136 return;
137 }
138
139 if (kSuccess != SecureDTLookupEntry(searchPoint: 0, pathName: "/chosen", foundEntry: &chosen)) {
140 return;
141 }
142
143 if (kSuccess != SecureDTGetProperty(entry: chosen, propertyName: "embedded-panic-log-size", propertyValue: (void const **) &panic_region_length, propertySize: &size)) {
144 return;
145 }
146
147 gPanicBase = ml_io_map_wcomb(phys_addr: reg_prop[0], size: panic_region_length[0]);
148
149 /* Deduct the size of the panic header from the panic region size */
150 panic_text_len = panic_region_length[0] - sizeof(struct embedded_panic_header);
151 gPanicSize = panic_region_length[0];
152
153#if DEVELOPMENT && defined(XNU_TARGET_OS_BRIDGE)
154 if (PE_consistent_debug_enabled()) {
155 uint64_t macos_panic_physbase = 0;
156 uint64_t macos_panic_physlen = 0;
157 /* Populate the macOS panic region data if it's present in consistent debug */
158 if (PE_consistent_debug_lookup_entry(kDbgIdMacOSPanicRegion, &macos_panic_physbase, &macos_panic_physlen)) {
159 macos_panic_base = ml_io_map_with_prot(macos_panic_physbase, macos_panic_physlen, VM_PROT_READ);
160 mac_panic_header = (struct macos_panic_header *) ((void *) macos_panic_base);
161 macos_panic_size = macos_panic_physlen;
162 }
163 }
164#endif /* DEVELOPMENT && defined(XNU_TARGET_OS_BRIDGE) */
165
166#endif
167 panic_info = (struct embedded_panic_header *)gPanicBase;
168
169 /* Check if a shared memory console is running in the panic buffer */
170 if (panic_info->eph_magic == 'SHMC') {
171 panic_console_available = TRUE;
172 return;
173 }
174
175 /* Check if there's a boot profile in the panic buffer */
176 if (panic_info->eph_magic == 'BTRC') {
177 return;
178 }
179
180 /*
181 * Check to see if a panic (FUNK) is in VRAM from the last time
182 */
183 if (panic_info->eph_magic == EMBEDDED_PANIC_MAGIC) {
184 printf(fmt: "iBoot didn't extract panic log from previous session crash, this is bad\n");
185 }
186
187 /* Clear panic region */
188 bzero(s: (void *)gPanicBase, n: gPanicSize);
189}
190
191int
192PE_initialize_console(PE_Video * info, int op)
193{
194 static int last_console = -1;
195
196 if (info && (info != &PE_state.video)) {
197 info->v_scale = PE_state.video.v_scale;
198 }
199
200 switch (op) {
201 case kPEDisableScreen:
202 initialize_screen(info, op);
203 last_console = switch_to_serial_console();
204 kprintf(fmt: "kPEDisableScreen %d\n", last_console);
205 break;
206
207 case kPEEnableScreen:
208 initialize_screen(info, op);
209 if (info) {
210 PE_state.video = *info;
211 }
212 kprintf(fmt: "kPEEnableScreen %d\n", last_console);
213 if (last_console != -1) {
214 switch_to_old_console(last_console);
215 }
216 break;
217
218 case kPEReleaseScreen:
219 /*
220 * we don't show the progress indicator on boot, but want to
221 * show it afterwards.
222 */
223 if (!vc_progress_initialized) {
224 default_progress.dx = 0;
225 default_progress.dy = 0;
226 vc_progress_initialize(desc: &default_progress,
227 data1x: default_progress_data1x,
228 data2x: default_progress_data2x,
229 data3x: default_progress_data3x,
230 clut: (unsigned char *) appleClut8);
231 vc_progress_initialized = TRUE;
232 }
233 initialize_screen(info, op);
234 break;
235
236 default:
237 initialize_screen(info, op);
238 break;
239 }
240
241 return 0;
242}
243
244void
245PE_init_iokit(void)
246{
247 DTEntry entry;
248 unsigned int size, scale;
249 unsigned long display_size;
250 void const * const *map;
251 unsigned int show_progress;
252 int *delta, image_size, flip;
253 uint32_t start_time_value = 0;
254 uint32_t debug_wait_start_value = 0;
255 uint32_t load_kernel_start_value = 0;
256 uint32_t populate_registry_time_value = 0;
257
258 PE_init_printf(TRUE);
259
260 printf(fmt: "iBoot version: %s\n", iBoot_version);
261#if defined(TARGET_OS_OSX) && defined(__arm64__)
262 printf("iBoot Stage 2 version: %s\n", iBoot_Stage_2_version);
263#endif /* defined(TARGET_OS_OSX) && defined(__arm64__) */
264
265 if (kSuccess == SecureDTLookupEntry(searchPoint: 0, pathName: "/chosen/memory-map", foundEntry: &entry)) {
266 boot_progress_element const *bootPict;
267
268 if (kSuccess == SecureDTGetProperty(entry, propertyName: "BootCLUT", propertyValue: (void const **) &map, propertySize: &size)) {
269 bcopy(src: map[0], dst: appleClut8, n: sizeof(appleClut8));
270 }
271
272 if (kSuccess == SecureDTGetProperty(entry, propertyName: "Pict-FailedBoot", propertyValue: (void const **) &map, propertySize: &size)) {
273 bootPict = (boot_progress_element const *) map[0];
274 default_noroot.width = bootPict->width;
275 default_noroot.height = bootPict->height;
276 default_noroot.dx = 0;
277 default_noroot.dy = bootPict->yOffset;
278 default_noroot_data = &bootPict->data[0];
279 }
280 }
281
282 pe_prepare_images();
283
284 scale = PE_state.video.v_scale;
285 flip = 1;
286
287#if defined(XNU_TARGET_OS_OSX)
288 int notused;
289 show_progress = TRUE;
290 if (PE_parse_boot_argn(arg_string: "-restore", arg_ptr: &notused, max_arg: sizeof(notused))) {
291 show_progress = FALSE;
292 }
293 if (PE_parse_boot_argn(arg_string: "-noprogress", arg_ptr: &notused, max_arg: sizeof(notused))) {
294 show_progress = FALSE;
295 }
296#else
297 show_progress = FALSE;
298 PE_parse_boot_argn("-progress", &show_progress, sizeof(show_progress));
299#endif /* XNU_TARGET_OS_OSX */
300 if (show_progress) {
301 /* Rotation: 0:normal, 1:right 90, 2:left 180, 3:left 90 */
302 switch (PE_state.video.v_rotate) {
303 case 2:
304 flip = -1;
305 OS_FALLTHROUGH;
306 case 0:
307 display_size = PE_state.video.v_height;
308 image_size = default_progress.height;
309 delta = &default_progress.dy;
310 break;
311 case 1:
312 flip = -1;
313 OS_FALLTHROUGH;
314 case 3:
315 default:
316 display_size = PE_state.video.v_width;
317 image_size = default_progress.width;
318 delta = &default_progress.dx;
319 }
320 assert(*delta >= 0);
321 while (((unsigned)(*delta + image_size)) >= (display_size / 2)) {
322 *delta -= 50 * scale;
323 assert(*delta >= 0);
324 }
325 *delta *= flip;
326
327 /* Check for DT-defined progress y delta */
328 PE_get_default(property_name: "progress-dy", property_ptr: &default_progress.dy, max_property: sizeof(default_progress.dy));
329
330 vc_progress_initialize(desc: &default_progress,
331 data1x: default_progress_data1x,
332 data2x: default_progress_data2x,
333 data3x: default_progress_data3x,
334 clut: (unsigned char *) appleClut8);
335 vc_progress_initialized = TRUE;
336 }
337
338 if (kdebug_enable && kdebug_debugid_enabled(IOKDBG_CODE(DBG_BOOTER, 0))) {
339 /* Trace iBoot-provided timing information. */
340 if (kSuccess == SecureDTLookupEntry(searchPoint: 0, pathName: "/chosen/iBoot", foundEntry: &entry)) {
341 uint32_t const * value_ptr;
342
343 if (kSuccess == SecureDTGetProperty(entry, propertyName: "start-time", propertyValue: (void const **)&value_ptr, propertySize: &size)) {
344 if (size == sizeof(start_time_value)) {
345 start_time_value = *value_ptr;
346 }
347 }
348
349 if (kSuccess == SecureDTGetProperty(entry, propertyName: "debug-wait-start", propertyValue: (void const **)&value_ptr, propertySize: &size)) {
350 if (size == sizeof(debug_wait_start_value)) {
351 debug_wait_start_value = *value_ptr;
352 }
353 }
354
355 if (kSuccess == SecureDTGetProperty(entry, propertyName: "load-kernel-start", propertyValue: (void const **)&value_ptr, propertySize: &size)) {
356 if (size == sizeof(load_kernel_start_value)) {
357 load_kernel_start_value = *value_ptr;
358 }
359 }
360
361 if (kSuccess == SecureDTGetProperty(entry, propertyName: "populate-registry-time", propertyValue: (void const **)&value_ptr, propertySize: &size)) {
362 if (size == sizeof(populate_registry_time_value)) {
363 populate_registry_time_value = *value_ptr;
364 }
365 }
366 }
367
368 KDBG_RELEASE(IOKDBG_CODE(DBG_BOOTER, 0), start_time_value, debug_wait_start_value, load_kernel_start_value, populate_registry_time_value);
369#if CONFIG_SPTM
370 KDBG_RELEASE(IOKDBG_CODE(DBG_BOOTER, 1), SPTMArgs->timestamp_sk_bootstrap, SPTMArgs->timestamp_xnu_bootstrap);
371#endif
372 }
373
374 InitIOKit(dtTop: PE_state.deviceTreeHead);
375 ConfigureIOKit();
376}
377
378void
379PE_lockdown_iokit(void)
380{
381 /*
382 * On arm/arm64 platforms, and especially those that employ KTRR/CTRR,
383 * machine_lockdown() is treated as a hard security checkpoint, such that
384 * code which executes prior to lockdown must be minimized and limited only to
385 * trusted parts of the kernel and specially-entitled kexts. We therefore
386 * cannot start the general-purpose IOKit matching process until after lockdown,
387 * as it may involve execution of untrusted/non-entitled kext code.
388 * Furthermore, such kext code may process attacker controlled data (e.g.
389 * network packets), which dramatically increases the potential attack surface
390 * against a kernel which has not yet enabled the full set of available
391 * hardware protections.
392 */
393 zalloc_iokit_lockdown();
394 StartIOKitMatching();
395}
396
397void
398PE_slide_devicetree(vm_offset_t slide)
399{
400 assert(PE_state.initialized);
401 PE_state.deviceTreeHead = (void *)((uintptr_t)PE_state.deviceTreeHead + slide);
402 SecureDTInit(base: PE_state.deviceTreeHead, size: PE_state.deviceTreeSize);
403}
404
405void
406PE_init_platform(boolean_t vm_initialized, void *args)
407{
408 DTEntry entry;
409 unsigned int size;
410 void * const *prop;
411 boot_args *boot_args_ptr = (boot_args *) args;
412
413 if (PE_state.initialized == FALSE) {
414 page_protection_type = ml_page_protection_type();
415 PE_state.initialized = TRUE;
416 PE_state.bootArgs = boot_args_ptr;
417 PE_state.deviceTreeHead = boot_args_ptr->deviceTreeP;
418 PE_state.deviceTreeSize = boot_args_ptr->deviceTreeLength;
419 PE_state.video.v_baseAddr = boot_args_ptr->Video.v_baseAddr;
420 PE_state.video.v_rowBytes = boot_args_ptr->Video.v_rowBytes;
421 PE_state.video.v_width = boot_args_ptr->Video.v_width;
422 PE_state.video.v_height = boot_args_ptr->Video.v_height;
423 PE_state.video.v_depth = (boot_args_ptr->Video.v_depth >> kBootVideoDepthDepthShift) & kBootVideoDepthMask;
424 PE_state.video.v_rotate = (
425 ((boot_args_ptr->Video.v_depth >> kBootVideoDepthRotateShift) & kBootVideoDepthMask) + // rotation
426 ((boot_args_ptr->Video.v_depth >> kBootVideoDepthBootRotateShift) & kBootVideoDepthMask) // add extra boot rotation
427 ) % 4;
428 PE_state.video.v_scale = ((boot_args_ptr->Video.v_depth >> kBootVideoDepthScaleShift) & kBootVideoDepthMask) + 1;
429 PE_state.video.v_display = boot_args_ptr->Video.v_display;
430 strlcpy(dst: PE_state.video.v_pixelFormat, src: "BBBBBBBBGGGGGGGGRRRRRRRR", n: sizeof(PE_state.video.v_pixelFormat));
431 }
432 if (!vm_initialized) {
433 /*
434 * Setup the Device Tree routines
435 * so the console can be found and the right I/O space
436 * can be used..
437 */
438 SecureDTInit(base: PE_state.deviceTreeHead, size: PE_state.deviceTreeSize);
439 pe_identify_machine(bootArgs: boot_args_ptr);
440 } else {
441 pe_arm_init_interrupts(args);
442 pe_arm_init_debug(args);
443 }
444
445 if (!vm_initialized) {
446 if (kSuccess == (SecureDTFindEntry(propName: "name", propValue: "device-tree", entryH: &entry))) {
447 if (kSuccess == SecureDTGetProperty(entry, propertyName: "target-type",
448 propertyValue: (void const **)&prop, propertySize: &size)) {
449 if (size > sizeof(gTargetTypeBuffer)) {
450 size = sizeof(gTargetTypeBuffer);
451 }
452 bcopy(src: prop, dst: gTargetTypeBuffer, n: size);
453 gTargetTypeBuffer[size - 1] = '\0';
454 }
455 }
456 if (kSuccess == (SecureDTFindEntry(propName: "name", propValue: "device-tree", entryH: &entry))) {
457 if (kSuccess == SecureDTGetProperty(entry, propertyName: "model",
458 propertyValue: (void const **)&prop, propertySize: &size)) {
459 if (size > sizeof(gModelTypeBuffer)) {
460 size = sizeof(gModelTypeBuffer);
461 }
462 bcopy(src: prop, dst: gModelTypeBuffer, n: size);
463 gModelTypeBuffer[size - 1] = '\0';
464 }
465 }
466 if (kSuccess == SecureDTLookupEntry(NULL, pathName: "/chosen", foundEntry: &entry)) {
467 if (kSuccess == SecureDTGetProperty(entry, propertyName: "debug-enabled",
468 propertyValue: (void const **) &prop, propertySize: &size)) {
469 /*
470 * We purposefully modify a constified variable as
471 * it will get locked down by a trusted monitor or
472 * via page table mappings. We don't want people easily
473 * modifying this variable...
474 */
475#pragma clang diagnostic push
476#pragma clang diagnostic ignored "-Wcast-qual"
477 boolean_t *modify_debug_enabled = (boolean_t *) &debug_enabled;
478 if (size > sizeof(uint32_t)) {
479 size = sizeof(uint32_t);
480 }
481 bcopy(src: prop, dst: modify_debug_enabled, n: size);
482#pragma clang diagnostic pop
483 }
484 if (kSuccess == SecureDTGetProperty(entry, propertyName: "firmware-version", propertyValue: (void const **) &prop, propertySize: &size)) {
485 if (size > sizeof(iBoot_version)) {
486 size = sizeof(iBoot_version);
487 }
488 bcopy(src: prop, dst: iBoot_version, n: size);
489 iBoot_version[size - 1] = '\0';
490 }
491#if defined(TARGET_OS_OSX) && defined(__arm64__)
492 if (kSuccess == SecureDTGetProperty(entry, "system-firmware-version", (void const **) &prop, &size)) {
493 if (size > sizeof(iBoot_Stage_2_version)) {
494 size = sizeof(iBoot_Stage_2_version);
495 }
496 bcopy(prop, iBoot_Stage_2_version, size);
497 iBoot_Stage_2_version[size - 1] = '\0';
498 }
499#endif /* defined(TARGET_OS_OSX) && defined(__arm64__) */
500 if (kSuccess == SecureDTGetProperty(entry, propertyName: "unique-chip-id",
501 propertyValue: (void const **) &prop, propertySize: &size)) {
502 if (size > sizeof(gPlatformECID)) {
503 size = sizeof(gPlatformECID);
504 }
505 bcopy(src: prop, dst: gPlatformECID, n: size);
506 }
507 if (kSuccess == SecureDTGetProperty(entry, propertyName: "dram-vendor-id",
508 propertyValue: (void const **) &prop, propertySize: &size)) {
509 if (size > sizeof(gPlatformMemoryID)) {
510 size = sizeof(gPlatformMemoryID);
511 }
512 bcopy(src: prop, dst: &gPlatformMemoryID, n: size);
513 }
514 }
515 pe_init_debug();
516 }
517}
518
519void
520PE_create_console(void)
521{
522 /*
523 * Check the head of VRAM for a panic log saved on last panic.
524 * Do this before the VRAM is trashed.
525 */
526 check_for_panic_log();
527
528 if (PE_state.video.v_display) {
529 PE_initialize_console(info: &PE_state.video, kPEGraphicsMode);
530 } else {
531 PE_initialize_console(info: &PE_state.video, kPETextMode);
532 }
533}
534
535int
536PE_current_console(PE_Video * info)
537{
538 *info = PE_state.video;
539 return 0;
540}
541
542void
543PE_display_icon(__unused unsigned int flags, __unused const char *name)
544{
545 if (default_noroot_data) {
546 vc_display_icon(desc: &default_noroot, data: default_noroot_data);
547 }
548}
549
550extern boolean_t
551PE_get_hotkey(__unused unsigned char key)
552{
553 return FALSE;
554}
555
556static timebase_callback_func gTimebaseCallback;
557
558void
559PE_register_timebase_callback(timebase_callback_func callback)
560{
561 gTimebaseCallback = callback;
562
563 PE_call_timebase_callback();
564}
565
566void
567PE_call_timebase_callback(void)
568{
569 struct timebase_freq_t timebase_freq;
570
571 timebase_freq.timebase_num = gPEClockFrequencyInfo.timebase_frequency_hz;
572 timebase_freq.timebase_den = 1;
573
574 if (gTimebaseCallback) {
575 gTimebaseCallback(&timebase_freq);
576 }
577}
578
579/*
580 * The default PE_poll_input handler.
581 */
582int
583PE_stub_poll_input(__unused unsigned int options, char *c)
584{
585 *c = (char)uart_getc();
586 return 0; /* 0 for success, 1 for unsupported */
587}
588
589/*
590 * This routine will return 1 if you are running on a device with a variant
591 * of iBoot that allows debugging. This is typically not the case on production
592 * fused parts (even when running development variants of iBoot).
593 *
594 * The routine takes an optional argument of the flags passed to debug="" so
595 * kexts don't have to parse the boot arg themselves.
596 */
597uint32_t
598PE_i_can_has_debugger(uint32_t *debug_flags)
599{
600 if (debug_flags) {
601#if DEVELOPMENT || DEBUG
602 assert(startup_phase >= STARTUP_SUB_TUNABLES);
603#endif
604 if (debug_enabled) {
605 *debug_flags = debug_boot_arg;
606 } else {
607 *debug_flags = 0;
608 }
609 }
610 return debug_enabled;
611}
612
613/*
614 * This routine returns TRUE if the device is configured
615 * with panic debugging enabled.
616 */
617boolean_t
618PE_panic_debugging_enabled()
619{
620 return panicDebugging;
621}
622
623void
624PE_update_panic_crc(unsigned char *buf, unsigned int *size)
625{
626 if (!panic_info || !size) {
627 return;
628 }
629
630 if (!buf) {
631 *size = panic_text_len;
632 return;
633 }
634
635 if (*size == 0) {
636 return;
637 }
638
639 *size = *size > panic_text_len ? panic_text_len : *size;
640 if (panic_info->eph_magic != EMBEDDED_PANIC_MAGIC) {
641 // rdar://88696402 (PanicTest: test case for MAGIC check in PE_update_panic_crc)
642 printf(fmt: "Error!! Current Magic 0x%X, expected value 0x%x", panic_info->eph_magic, EMBEDDED_PANIC_MAGIC);
643 }
644
645 /* CRC everything after the CRC itself - starting with the panic header version */
646 panic_info->eph_crc = crc32(crc: 0L, buf: &panic_info->eph_version, size: (panic_text_len +
647 sizeof(struct embedded_panic_header) - offsetof(struct embedded_panic_header, eph_version)));
648}
649
650uint32_t
651PE_get_offset_into_panic_region(char *location)
652{
653 assert(gPanicBase != 0);
654 assert(location >= (char *) gPanicBase);
655 assert((unsigned int)(location - gPanicBase) < gPanicSize);
656
657 return (uint32_t)(uintptr_t)(location - gPanicBase);
658}
659
660void
661PE_init_panicheader()
662{
663 if (!panic_info) {
664 return;
665 }
666
667 bzero(s: panic_info, n: sizeof(struct embedded_panic_header));
668
669 /*
670 * The panic log begins immediately after the panic header -- debugger synchronization and other functions
671 * may log into this region before we've become the exclusive panicking CPU and initialize the header here.
672 */
673 panic_info->eph_panic_log_offset = debug_buf_base ? PE_get_offset_into_panic_region(location: debug_buf_base) : 0;
674
675 panic_info->eph_magic = EMBEDDED_PANIC_MAGIC;
676 panic_info->eph_version = EMBEDDED_PANIC_HEADER_CURRENT_VERSION;
677
678 return;
679}
680
681/*
682 * Tries to update the panic header to keep it consistent on nested panics.
683 *
684 * NOTE: The purpose of this function is NOT to detect/correct corruption in the panic region,
685 * it is to update the panic header to make it consistent when we nest panics.
686 */
687void
688PE_update_panicheader_nestedpanic()
689{
690 /*
691 * if the panic header pointer is bogus (e.g. someone stomped on it) then bail.
692 */
693 if (!panic_info) {
694 /* if this happens in development then blow up bigly */
695 assert(panic_info);
696 return;
697 }
698
699 /*
700 * If the panic log offset is not set, re-init the panic header
701 *
702 * note that this should not be possible unless someone stomped on the panic header to zero it out, since by the time
703 * we reach this location *someone* should have appended something to the log..
704 */
705 if (panic_info->eph_panic_log_offset == 0) {
706 PE_init_panicheader();
707 panic_info->eph_panic_flags |= EMBEDDED_PANIC_HEADER_FLAG_NESTED_PANIC;
708 return;
709 }
710
711 panic_info->eph_panic_flags |= EMBEDDED_PANIC_HEADER_FLAG_NESTED_PANIC;
712
713 /*
714 * If the panic log length is not set, set the end to
715 * the current location of the debug_buf_ptr to close it.
716 */
717 if (panic_info->eph_panic_log_len == 0) {
718 panic_info->eph_panic_log_len = PE_get_offset_into_panic_region(location: debug_buf_ptr);
719
720 /* indicative of corruption in the panic region, consumer beware */
721 if ((panic_info->eph_other_log_offset == 0) &&
722 (panic_info->eph_other_log_len == 0)) {
723 panic_info->eph_panic_flags |= EMBEDDED_PANIC_HEADER_FLAG_INCOHERENT_PANICLOG;
724 }
725 }
726
727 /* likely indicative of corruption in the panic region, consumer beware */
728 if (((panic_info->eph_stackshot_offset == 0) && (panic_info->eph_stackshot_len == 0)) || ((panic_info->eph_stackshot_offset != 0) && (panic_info->eph_stackshot_len != 0))) {
729 panic_info->eph_panic_flags |= EMBEDDED_PANIC_HEADER_FLAG_INCOHERENT_PANICLOG;
730 }
731
732 /*
733 * If we haven't set up the other log yet, set the beginning of the other log
734 * to the current location of the debug_buf_ptr
735 */
736 if (panic_info->eph_other_log_offset == 0) {
737 panic_info->eph_other_log_offset = PE_get_offset_into_panic_region(location: debug_buf_ptr);
738
739 /* indicative of corruption in the panic region, consumer beware */
740 if (panic_info->eph_other_log_len == 0) {
741 panic_info->eph_panic_flags |= EMBEDDED_PANIC_HEADER_FLAG_INCOHERENT_PANICLOG;
742 }
743 }
744
745 return;
746}
747
748boolean_t
749PE_reboot_on_panic(void)
750{
751 uint32_t debug_flags;
752
753 if (PE_i_can_has_debugger(debug_flags: &debug_flags)
754 && (debug_flags & DB_NMI)) {
755 /* kernel debugging is active */
756 return FALSE;
757 } else {
758 return TRUE;
759 }
760}
761
762void
763PE_sync_panic_buffers(void)
764{
765 /*
766 * rdar://problem/26453070:
767 * The iBoot panic region is write-combined on arm64. We must flush dirty lines
768 * from L1/L2 as late as possible before reset, with no further reads of the panic
769 * region between the flush and the reset. Some targets have an additional memcache (L3),
770 * and a read may bring dirty lines out of L3 and back into L1/L2, causing the lines to
771 * be discarded on reset. If we can make sure the lines are flushed to L3/DRAM,
772 * the platform reset handler will flush any L3.
773 */
774 if (gPanicBase) {
775 CleanPoC_DcacheRegion_Force(va: gPanicBase, length: gPanicSize);
776 }
777}
778
779static void
780pe_prepare_images(void)
781{
782 if ((1 & PE_state.video.v_rotate) != 0) {
783 // Only square square images with radial symmetry are supported
784 // No need to actually rotate the data
785
786 // Swap the dx and dy offsets
787 uint32_t tmp = default_progress.dx;
788 default_progress.dx = default_progress.dy;
789 default_progress.dy = tmp;
790 }
791#if 0
792 uint32_t cnt, cnt2, cnt3, cnt4;
793 uint32_t tmp, width, height;
794 uint8_t data, *new_data;
795 const uint8_t *old_data;
796
797 width = default_progress.width;
798 height = default_progress.height * default_progress.count;
799
800 // Scale images if the UI is being scaled
801 if (PE_state.video.v_scale > 1) {
802 new_data = kalloc(width * height * scale * scale);
803 if (new_data != 0) {
804 old_data = default_progress_data;
805 default_progress_data = new_data;
806 for (cnt = 0; cnt < height; cnt++) {
807 for (cnt2 = 0; cnt2 < width; cnt2++) {
808 data = *(old_data++);
809 for (cnt3 = 0; cnt3 < scale; cnt3++) {
810 for (cnt4 = 0; cnt4 < scale; cnt4++) {
811 new_data[width * scale * cnt3 + cnt4] = data;
812 }
813 }
814 new_data += scale;
815 }
816 new_data += width * scale * (scale - 1);
817 }
818 default_progress.width *= scale;
819 default_progress.height *= scale;
820 default_progress.dx *= scale;
821 default_progress.dy *= scale;
822 }
823 }
824#endif
825}
826
827void
828PE_mark_hwaccess(uint64_t thread)
829{
830 last_hwaccess_thread = thread;
831 __builtin_arm_dmb(DMB_ISH);
832}
833
834__startup_func
835vm_size_t
836PE_init_socd_client(void)
837{
838 DTEntry entry;
839 uintptr_t const *reg_prop;
840 unsigned int size;
841
842 if (kSuccess != SecureDTLookupEntry(searchPoint: 0, pathName: "socd-trace-ram", foundEntry: &entry)) {
843 return 0;
844 }
845
846 if (kSuccess != SecureDTGetProperty(entry, propertyName: "reg", propertyValue: (void const **)&reg_prop, propertySize: &size)) {
847 return 0;
848 }
849
850 socd_trace_ram_base = ml_io_map(phys_addr: reg_prop[0], size: (vm_size_t)reg_prop[1]);
851 socd_trace_ram_size = (vm_size_t)reg_prop[1];
852
853 return socd_trace_ram_size;
854}
855
856/*
857 * PE_write_socd_client_buffer solves two problems:
858 * 1. Prevents accidentally trusting a value read from socd client buffer. socd client buffer is considered untrusted.
859 * 2. Ensures only 4 byte store instructions are used. On some platforms, socd client buffer is backed up
860 * by a SRAM that must be written to only 4 bytes at a time.
861 */
862void
863PE_write_socd_client_buffer(vm_offset_t offset, const void *buff, vm_size_t size)
864{
865 volatile uint32_t *dst = (volatile uint32_t *)(socd_trace_ram_base + offset);
866 vm_size_t len = size / sizeof(dst[0]);
867
868 assert(offset + size <= socd_trace_ram_size);
869
870 /* Perform 4 byte aligned accesses */
871 if ((offset % 4 != 0) || (size % 4 != 0)) {
872 panic("unaligned acccess to socd trace ram");
873 }
874
875 for (vm_size_t i = 0; i < len; i++) {
876 dst[i] = ((const uint32_t *)buff)[i];
877 }
878}
879