1 | /* |
2 | * Copyright (c) 2007-2021 Apple Inc. All rights reserved. |
3 | * |
4 | * @APPLE_OSREFERENCE_LICENSE_HEADER_START@ |
5 | * |
6 | * This file contains Original Code and/or Modifications of Original Code |
7 | * as defined in and that are subject to the Apple Public Source License |
8 | * Version 2.0 (the 'License'). You may not use this file except in |
9 | * compliance with the License. The rights granted to you under the License |
10 | * may not be used to create, or enable the creation or redistribution of, |
11 | * unlawful or unlicensed copies of an Apple operating system, or to |
12 | * circumvent, violate, or enable the circumvention or violation of, any |
13 | * terms of an Apple operating system software license agreement. |
14 | * |
15 | * Please obtain a copy of the License at |
16 | * http://www.opensource.apple.com/apsl/ and read it before using this file. |
17 | * |
18 | * The Original Code and all software distributed under the License are |
19 | * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER |
20 | * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES, |
21 | * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY, |
22 | * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT. |
23 | * Please see the License for the specific language governing rights and |
24 | * limitations under the License. |
25 | * |
26 | * @APPLE_OSREFERENCE_LICENSE_HEADER_END@ |
27 | */ |
28 | /* |
29 | * @OSF_COPYRIGHT@ |
30 | */ |
31 | |
32 | #include <debug.h> |
33 | #include <mach_ldebug.h> |
34 | #include <mach_kdp.h> |
35 | |
36 | #include <kern/misc_protos.h> |
37 | #include <kern/thread.h> |
38 | #include <kern/timer_queue.h> |
39 | #include <kern/processor.h> |
40 | #include <kern/startup.h> |
41 | #include <kern/debug.h> |
42 | #include <kern/monotonic.h> |
43 | #include <prng/random.h> |
44 | #include <kern/ecc.h> |
45 | #include <machine/machine_routines.h> |
46 | #include <machine/commpage.h> |
47 | #include <machine/config.h> |
48 | #if HIBERNATION |
49 | #include <machine/pal_hibernate.h> |
50 | #endif /* HIBERNATION */ |
51 | /* ARM64_TODO unify boot.h */ |
52 | #if __arm64__ |
53 | #include <pexpert/arm64/boot.h> |
54 | #include <arm64/amcc_rorgn.h> |
55 | #else |
56 | #error Unsupported arch |
57 | #endif |
58 | #include <pexpert/arm/consistent_debug.h> |
59 | #include <pexpert/device_tree.h> |
60 | #include <arm64/proc_reg.h> |
61 | #include <arm/pmap.h> |
62 | #include <arm/caches_internal.h> |
63 | #include <arm/cpu_internal.h> |
64 | #include <arm/cpu_data_internal.h> |
65 | #include <arm/cpuid_internal.h> |
66 | #include <arm/misc_protos.h> |
67 | #include <arm/machine_cpu.h> |
68 | #include <arm/rtclock.h> |
69 | #include <vm/vm_map.h> |
70 | |
71 | #include <libkern/kernel_mach_header.h> |
72 | #include <libkern/stack_protector.h> |
73 | #include <libkern/section_keywords.h> |
74 | #include <san/kasan.h> |
75 | #include <sys/kdebug.h> |
76 | |
77 | #include <pexpert/pexpert.h> |
78 | |
79 | #include <console/serial_protos.h> |
80 | |
81 | #if CONFIG_TELEMETRY |
82 | #include <kern/telemetry.h> |
83 | #endif |
84 | |
85 | #if KPERF |
86 | #include <kperf/kptimer.h> |
87 | #endif /* KPERF */ |
88 | |
89 | #if HIBERNATION |
90 | #include <IOKit/IOPlatformExpert.h> |
91 | #endif /* HIBERNATION */ |
92 | |
93 | extern void patch_low_glo(void); |
94 | extern int serial_init(void); |
95 | extern void sleep_token_buffer_init(void); |
96 | |
97 | extern vm_offset_t intstack_top; |
98 | #if __arm64__ |
99 | extern vm_offset_t excepstack_top; |
100 | #endif |
101 | |
102 | extern const char version[]; |
103 | extern const char version_variant[]; |
104 | extern int disableConsoleOutput; |
105 | |
106 | int pc_trace_buf[PC_TRACE_BUF_SIZE] = {0}; |
107 | int pc_trace_cnt = PC_TRACE_BUF_SIZE; |
108 | int debug_task; |
109 | |
110 | SECURITY_READ_ONLY_LATE(bool) static_kernelcache = false; |
111 | |
112 | #if HAS_BP_RET |
113 | /* Enable both branch target retention (0x2) and branch direction retention (0x1) across sleep */ |
114 | uint32_t bp_ret = 3; |
115 | extern void set_bp_ret(void); |
116 | #endif |
117 | |
118 | #if SCHED_HYGIENE_DEBUG |
119 | boolean_t sched_hygiene_debug_pmc = 1; |
120 | #endif |
121 | |
122 | #if SCHED_HYGIENE_DEBUG |
123 | |
124 | #if XNU_PLATFORM_iPhoneOS |
125 | #define DEFAULT_INTERRUPT_MASKED_TIMEOUT 12000 /* 500us */ |
126 | #else |
127 | #define DEFAULT_INTERRUPT_MASKED_TIMEOUT 0xd0000 /* 35.499ms */ |
128 | #endif /* XNU_PLATFORM_iPhoneOS */ |
129 | |
130 | TUNABLE_DT_WRITEABLE(sched_hygiene_mode_t, interrupt_masked_debug_mode, |
131 | "machine-timeouts" , "interrupt-masked-debug-mode" , |
132 | "interrupt-masked-debug-mode" , |
133 | SCHED_HYGIENE_MODE_PANIC, |
134 | TUNABLE_DT_CHECK_CHOSEN); |
135 | |
136 | MACHINE_TIMEOUT_DEV_WRITEABLE(interrupt_masked_timeout, "interrupt-masked" , |
137 | DEFAULT_INTERRUPT_MASKED_TIMEOUT, MACHINE_TIMEOUT_UNIT_TIMEBASE, |
138 | NULL); |
139 | #if __arm64__ |
140 | #define SSHOT_INTERRUPT_MASKED_TIMEOUT 0xf9999 /* 64-bit: 42.599ms */ |
141 | #endif |
142 | MACHINE_TIMEOUT_DEV_WRITEABLE(stackshot_interrupt_masked_timeout, "sshot-interrupt-masked" , |
143 | SSHOT_INTERRUPT_MASKED_TIMEOUT, MACHINE_TIMEOUT_UNIT_TIMEBASE, |
144 | NULL); |
145 | #undef SSHOT_INTERRUPT_MASKED_TIMEOUT |
146 | #endif |
147 | |
148 | /* |
149 | * A 6-second timeout will give the watchdog code a chance to run |
150 | * before a panic is triggered by the xcall routine. |
151 | */ |
152 | #define XCALL_ACK_TIMEOUT_NS ((uint64_t) 6000000000) |
153 | uint64_t xcall_ack_timeout_abstime; |
154 | |
155 | boot_args const_boot_args __attribute__((section("__DATA, __const" ))); |
156 | boot_args *BootArgs __attribute__((section("__DATA, __const" ))); |
157 | |
158 | TUNABLE(uint32_t, arm_diag, "diag" , 0); |
159 | #ifdef APPLETYPHOON |
160 | static unsigned cpus_defeatures = 0x0; |
161 | extern void cpu_defeatures_set(unsigned int); |
162 | #endif |
163 | |
164 | #if __arm64__ && __ARM_GLOBAL_SLEEP_BIT__ |
165 | extern volatile boolean_t arm64_stall_sleep; |
166 | #endif |
167 | |
168 | extern boolean_t force_immediate_debug_halt; |
169 | |
170 | #if HAS_APPLE_PAC |
171 | SECURITY_READ_ONLY_LATE(boolean_t) diversify_user_jop = TRUE; |
172 | #endif |
173 | |
174 | SECURITY_READ_ONLY_LATE(uint64_t) gDramBase; |
175 | SECURITY_READ_ONLY_LATE(uint64_t) gDramSize; |
176 | |
177 | SECURITY_READ_ONLY_LATE(bool) serial_console_enabled = false; |
178 | #ifdef XNU_ENABLE_PROCESSOR_EXIT |
179 | SECURITY_READ_ONLY_LATE(bool) enable_processor_exit = true; |
180 | #else |
181 | SECURITY_READ_ONLY_LATE(bool) enable_processor_exit = false; |
182 | #endif |
183 | |
184 | |
185 | /* |
186 | * Forward definition |
187 | */ |
188 | void arm_init(boot_args * args); |
189 | |
190 | #if __arm64__ |
191 | unsigned int page_shift_user32; /* for page_size as seen by a 32-bit task */ |
192 | |
193 | extern void configure_misc_apple_boot_args(void); |
194 | extern void configure_misc_apple_regs(bool is_boot_cpu); |
195 | extern void configure_timer_apple_regs(void); |
196 | extern void configure_late_apple_regs(bool cold_boot); |
197 | #endif /* __arm64__ */ |
198 | |
199 | |
200 | /* |
201 | * JOP rebasing |
202 | */ |
203 | |
204 | #define dyldLogFunc(msg, ...) |
205 | #include <mach/dyld_kernel_fixups.h> |
206 | |
207 | extern uint32_t __thread_starts_sect_start[] __asm("section$start$__TEXT$__thread_starts" ); |
208 | extern uint32_t __thread_starts_sect_end[] __asm("section$end$__TEXT$__thread_starts" ); |
209 | #if defined(HAS_APPLE_PAC) |
210 | extern void OSRuntimeSignStructors(kernel_mach_header_t * ); |
211 | extern void OSRuntimeSignStructorsInFileset(kernel_mach_header_t * ); |
212 | #endif /* defined(HAS_APPLE_PAC) */ |
213 | |
214 | extern vm_offset_t vm_kernel_slide; |
215 | extern vm_offset_t segLOWESTKC, segHIGHESTKC, segLOWESTROKC, segHIGHESTROKC; |
216 | extern vm_offset_t segLOWESTAuxKC, segHIGHESTAuxKC, segLOWESTROAuxKC, segHIGHESTROAuxKC; |
217 | extern vm_offset_t segLOWESTRXAuxKC, segHIGHESTRXAuxKC, segHIGHESTNLEAuxKC; |
218 | |
219 | static void |
220 | arm_slide_rebase_and_sign_image(void) |
221 | { |
222 | kernel_mach_header_t *k_mh, *kc_mh = NULL; |
223 | kernel_segment_command_t *seg; |
224 | uintptr_t slide; |
225 | |
226 | k_mh = &_mh_execute_header; |
227 | if (kernel_mach_header_is_in_fileset(k_mh)) { |
228 | /* |
229 | * The kernel is part of a MH_FILESET kernel collection, determine slide |
230 | * based on first segment's mach-o vmaddr (requires first kernel load |
231 | * command to be LC_SEGMENT_64 of the __TEXT segment) |
232 | */ |
233 | seg = (kernel_segment_command_t *)((uintptr_t)k_mh + sizeof(*k_mh)); |
234 | assert(seg->cmd == LC_SEGMENT_KERNEL); |
235 | slide = (uintptr_t)k_mh - seg->vmaddr; |
236 | |
237 | /* |
238 | * The kernel collection linker guarantees that the boot collection mach |
239 | * header vmaddr is the hardcoded kernel link address (as specified to |
240 | * ld64 when linking the kernel). |
241 | */ |
242 | kc_mh = (kernel_mach_header_t*)(VM_KERNEL_LINK_ADDRESS + slide); |
243 | assert(kc_mh->filetype == MH_FILESET); |
244 | |
245 | /* |
246 | * rebase and sign jops |
247 | * Note that we can't call any functions before this point, so |
248 | * we have to hard-code the knowledge that the base of the KC |
249 | * is the KC's mach-o header. This would change if any |
250 | * segment's VA started *before* the text segment |
251 | * (as the HIB segment does on x86). |
252 | */ |
253 | const void *collection_base_pointers[KCNumKinds] = {[0] = kc_mh, }; |
254 | kernel_collection_slide(mh: (struct mach_header_64 *)kc_mh, basePointers: collection_base_pointers); |
255 | |
256 | PE_set_kc_header(type: KCKindPrimary, header: kc_mh, slide); |
257 | |
258 | /* |
259 | * iBoot doesn't slide load command vmaddrs in an MH_FILESET kernel |
260 | * collection, so adjust them now, and determine the vmaddr range |
261 | * covered by read-only segments for the CTRR rorgn. |
262 | */ |
263 | kernel_collection_adjust_mh_addrs(kc_mh: (struct mach_header_64 *)kc_mh, adj: slide, false, |
264 | kc_lowest_vmaddr: (uintptr_t *)&segLOWESTKC, kc_highest_vmaddr: (uintptr_t *)&segHIGHESTKC, |
265 | kc_lowest_ro_vmaddr: (uintptr_t *)&segLOWESTROKC, kc_highest_ro_vmaddr: (uintptr_t *)&segHIGHESTROKC, |
266 | NULL, NULL, NULL); |
267 | #if defined(HAS_APPLE_PAC) |
268 | OSRuntimeSignStructorsInFileset(header: kc_mh); |
269 | #endif /* defined(HAS_APPLE_PAC) */ |
270 | } else { |
271 | /* |
272 | * Static kernelcache: iBoot slid kernel MachO vmaddrs, determine slide |
273 | * using hardcoded kernel link address |
274 | */ |
275 | slide = (uintptr_t)k_mh - VM_KERNEL_LINK_ADDRESS; |
276 | |
277 | /* rebase and sign jops */ |
278 | static_kernelcache = &__thread_starts_sect_end[0] != &__thread_starts_sect_start[0]; |
279 | if (static_kernelcache) { |
280 | rebase_threaded_starts( threadArrayStart: &__thread_starts_sect_start[0], |
281 | threadArrayEnd: &__thread_starts_sect_end[0], |
282 | macho_header_addr: (uintptr_t)k_mh, macho_header_vmaddr: (uintptr_t)k_mh - slide, slide); |
283 | } |
284 | #if defined(HAS_APPLE_PAC) |
285 | OSRuntimeSignStructors(header: &_mh_execute_header); |
286 | #endif /* defined(HAS_APPLE_PAC) */ |
287 | } |
288 | |
289 | |
290 | /* |
291 | * Initialize slide global here to avoid duplicating this logic in |
292 | * arm_vm_init() |
293 | */ |
294 | vm_kernel_slide = slide; |
295 | } |
296 | |
297 | void |
298 | arm_auxkc_init(void *mh, void *base) |
299 | { |
300 | /* |
301 | * The kernel collection linker guarantees that the lowest vmaddr in an |
302 | * AuxKC collection is 0 (but note that the mach header is higher up since |
303 | * RW segments precede RO segments in the AuxKC). |
304 | */ |
305 | uintptr_t slide = (uintptr_t)base; |
306 | kernel_mach_header_t *akc_mh = (kernel_mach_header_t*)mh; |
307 | |
308 | assert(akc_mh->filetype == MH_FILESET); |
309 | PE_set_kc_header_and_base(type: KCKindAuxiliary, header: akc_mh, base, slide); |
310 | |
311 | /* rebase and sign jops */ |
312 | const void *collection_base_pointers[KCNumKinds]; |
313 | memcpy(dst: collection_base_pointers, src: PE_get_kc_base_pointers(), n: sizeof(collection_base_pointers)); |
314 | kernel_collection_slide(mh: (struct mach_header_64 *)akc_mh, basePointers: collection_base_pointers); |
315 | |
316 | kernel_collection_adjust_mh_addrs(kc_mh: (struct mach_header_64 *)akc_mh, adj: slide, false, |
317 | kc_lowest_vmaddr: (uintptr_t *)&segLOWESTAuxKC, kc_highest_vmaddr: (uintptr_t *)&segHIGHESTAuxKC, kc_lowest_ro_vmaddr: (uintptr_t *)&segLOWESTROAuxKC, |
318 | kc_highest_ro_vmaddr: (uintptr_t *)&segHIGHESTROAuxKC, kc_lowest_rx_vmaddr: (uintptr_t *)&segLOWESTRXAuxKC, kc_highest_rx_vmaddr: (uintptr_t *)&segHIGHESTRXAuxKC, |
319 | kc_highest_nle_vmaddr: (uintptr_t *)&segHIGHESTNLEAuxKC); |
320 | #if defined(HAS_APPLE_PAC) |
321 | OSRuntimeSignStructorsInFileset(header: akc_mh); |
322 | #endif /* defined(HAS_APPLE_PAC) */ |
323 | } |
324 | |
325 | /* |
326 | * Routine: arm_setup_pre_sign |
327 | * Function: Perform HW initialization that must happen ahead of the first PAC sign |
328 | * operation. |
329 | */ |
330 | static void |
331 | arm_setup_pre_sign(void) |
332 | { |
333 | #if __arm64__ |
334 | /* DATA TBI, if enabled, affects the number of VA bits that contain the signature */ |
335 | arm_set_kernel_tbi(); |
336 | #endif /* __arm64 */ |
337 | } |
338 | |
339 | /* |
340 | * Routine: arm_init |
341 | * Function: Runs on the boot CPU, once, on entry from iBoot. |
342 | */ |
343 | |
344 | __startup_func |
345 | void |
346 | arm_init( |
347 | boot_args *args) |
348 | { |
349 | unsigned int maxmem; |
350 | uint32_t memsize; |
351 | uint64_t xmaxmem; |
352 | thread_t thread; |
353 | DTEntry chosen = NULL; |
354 | unsigned int dt_entry_size = 0; |
355 | |
356 | arm_setup_pre_sign(); |
357 | |
358 | arm_slide_rebase_and_sign_image(); |
359 | |
360 | /* If kernel integrity is supported, use a constant copy of the boot args. */ |
361 | const_boot_args = *args; |
362 | BootArgs = args = &const_boot_args; |
363 | |
364 | cpu_data_init(cpu_data_ptr: &BootCpuData); |
365 | #if defined(HAS_APPLE_PAC) |
366 | /* bootstrap cpu process dependent key for kernel has been loaded by start.s */ |
367 | BootCpuData.rop_key = ml_default_rop_pid(); |
368 | BootCpuData.jop_key = ml_default_jop_pid(); |
369 | #endif /* defined(HAS_APPLE_PAC) */ |
370 | |
371 | PE_init_platform(FALSE, args); /* Get platform expert set up */ |
372 | |
373 | #if __arm64__ |
374 | configure_timer_apple_regs(); |
375 | wfe_timeout_configure(); |
376 | wfe_timeout_init(); |
377 | |
378 | configure_misc_apple_boot_args(); |
379 | configure_misc_apple_regs(true); |
380 | |
381 | #if (DEVELOPMENT || DEBUG) |
382 | unsigned long const *platform_stall_ptr = NULL; |
383 | |
384 | if (SecureDTLookupEntry(NULL, "/chosen" , &chosen) != kSuccess) { |
385 | panic("%s: Unable to find 'chosen' DT node" , __FUNCTION__); |
386 | } |
387 | |
388 | // Not usable TUNABLE here because TUNABLEs are parsed at a later point. |
389 | if (SecureDTGetProperty(chosen, "xnu_platform_stall" , (void const **)&platform_stall_ptr, |
390 | &dt_entry_size) == kSuccess) { |
391 | xnu_platform_stall_value = *platform_stall_ptr; |
392 | } |
393 | |
394 | platform_stall_panic_or_spin(PLATFORM_STALL_XNU_LOCATION_ARM_INIT); |
395 | |
396 | chosen = NULL; // Force a re-lookup later on since VM addresses are not final at this point |
397 | dt_entry_size = 0; |
398 | #endif |
399 | |
400 | |
401 | { |
402 | /* |
403 | * Select the advertised kernel page size. |
404 | */ |
405 | if (args->memSize > 1ULL * 1024 * 1024 * 1024) { |
406 | /* |
407 | * arm64 device with > 1GB of RAM: |
408 | * kernel uses 16KB pages. |
409 | */ |
410 | PAGE_SHIFT_CONST = PAGE_MAX_SHIFT; |
411 | } else { |
412 | /* |
413 | * arm64 device with <= 1GB of RAM: |
414 | * kernel uses hardware page size |
415 | * (4KB for H6/H7, 16KB for H8+). |
416 | */ |
417 | PAGE_SHIFT_CONST = ARM_PGSHIFT; |
418 | } |
419 | |
420 | /* 32-bit apps always see 16KB page size */ |
421 | page_shift_user32 = PAGE_MAX_SHIFT; |
422 | #ifdef APPLETYPHOON |
423 | if (PE_parse_boot_argn("cpus_defeatures" , &cpus_defeatures, sizeof(cpus_defeatures))) { |
424 | if ((cpus_defeatures & 0xF) != 0) { |
425 | cpu_defeatures_set(cpus_defeatures & 0xF); |
426 | } |
427 | } |
428 | #endif |
429 | } |
430 | #endif |
431 | |
432 | ml_parse_cpu_topology(); |
433 | |
434 | |
435 | master_cpu = ml_get_boot_cpu_number(); |
436 | assert(master_cpu >= 0 && master_cpu <= ml_get_max_cpu_number()); |
437 | |
438 | BootCpuData.cpu_number = (unsigned short)master_cpu; |
439 | BootCpuData.intstack_top = (vm_offset_t) &intstack_top; |
440 | BootCpuData.istackptr = &intstack_top; |
441 | #if __arm64__ |
442 | BootCpuData.excepstack_top = (vm_offset_t) &excepstack_top; |
443 | #endif |
444 | CpuDataEntries[master_cpu].cpu_data_vaddr = &BootCpuData; |
445 | CpuDataEntries[master_cpu].cpu_data_paddr = (void *)((uintptr_t)(args->physBase) |
446 | + ((uintptr_t)&BootCpuData |
447 | - (uintptr_t)(args->virtBase))); |
448 | |
449 | thread = thread_bootstrap(); |
450 | thread->machine.CpuDatap = &BootCpuData; |
451 | thread->machine.pcpu_data_base = (vm_offset_t)0; |
452 | machine_set_current_thread(thread); |
453 | |
454 | /* |
455 | * Preemption is enabled for this thread so that it can lock mutexes without |
456 | * tripping the preemption check. In reality scheduling is not enabled until |
457 | * this thread completes, and there are no other threads to switch to, so |
458 | * preemption level is not really meaningful for the bootstrap thread. |
459 | */ |
460 | thread->machine.preemption_count = 0; |
461 | cpu_bootstrap(); |
462 | |
463 | rtclock_early_init(); |
464 | |
465 | kernel_debug_string_early(message: "kernel_startup_bootstrap" ); |
466 | kernel_startup_bootstrap(); |
467 | |
468 | /* |
469 | * Initialize the timer callout world |
470 | */ |
471 | timer_call_init(); |
472 | |
473 | cpu_init(); |
474 | |
475 | processor_bootstrap(); |
476 | |
477 | if (PE_parse_boot_argn(arg_string: "maxmem" , arg_ptr: &maxmem, max_arg: sizeof(maxmem))) { |
478 | xmaxmem = (uint64_t) maxmem * (1024 * 1024); |
479 | } else if (PE_get_default(property_name: "hw.memsize" , property_ptr: &memsize, max_property: sizeof(memsize))) { |
480 | xmaxmem = (uint64_t) memsize; |
481 | } else { |
482 | xmaxmem = 0; |
483 | } |
484 | |
485 | #if SCHED_HYGIENE_DEBUG |
486 | { |
487 | int wdt_boot_arg = 0; |
488 | bool const wdt_disabled = (PE_parse_boot_argn("wdt" , &wdt_boot_arg, sizeof(wdt_boot_arg)) && (wdt_boot_arg == -1)); |
489 | |
490 | /* Disable if WDT is disabled */ |
491 | if (wdt_disabled || kern_feature_override(KF_INTERRUPT_MASKED_DEBUG_OVRD)) { |
492 | interrupt_masked_debug_mode = SCHED_HYGIENE_MODE_OFF; |
493 | } else if (kern_feature_override(KF_SCHED_HYGIENE_DEBUG_PMC_OVRD)) { |
494 | /* |
495 | * The sched hygiene facility can, in adition to checking time, capture |
496 | * metrics provided by the cycle and instruction counters available in some |
497 | * systems. Check if we should enable this feature based on the validation |
498 | * overrides. |
499 | */ |
500 | sched_hygiene_debug_pmc = 0; |
501 | } |
502 | |
503 | if (wdt_disabled || kern_feature_override(KF_PREEMPTION_DISABLED_DEBUG_OVRD)) { |
504 | sched_preemption_disable_debug_mode = SCHED_HYGIENE_MODE_OFF; |
505 | } |
506 | } |
507 | #endif /* SCHED_HYGIENE_DEBUG */ |
508 | |
509 | nanoseconds_to_absolutetime(XCALL_ACK_TIMEOUT_NS, result: &xcall_ack_timeout_abstime); |
510 | |
511 | #if HAS_BP_RET |
512 | PE_parse_boot_argn("bpret" , &bp_ret, sizeof(bp_ret)); |
513 | set_bp_ret(); // Apply branch predictor retention settings to boot CPU |
514 | #endif |
515 | |
516 | PE_parse_boot_argn(arg_string: "immediate_NMI" , arg_ptr: &force_immediate_debug_halt, max_arg: sizeof(force_immediate_debug_halt)); |
517 | |
518 | #if __ARM_PAN_AVAILABLE__ |
519 | __builtin_arm_wsr("pan" , 1); |
520 | #endif /* __ARM_PAN_AVAILABLE__ */ |
521 | |
522 | arm_vm_init(memory_size: xmaxmem, args); |
523 | |
524 | if (debug_boot_arg) { |
525 | patch_low_glo(); |
526 | } |
527 | |
528 | #if __arm64__ && WITH_CLASSIC_S2R |
529 | sleep_token_buffer_init(); |
530 | #endif |
531 | |
532 | PE_consistent_debug_inherit(); |
533 | |
534 | /* Setup debugging output. */ |
535 | const unsigned int serial_exists = serial_init(); |
536 | kernel_startup_initialize_upto(upto: STARTUP_SUB_KPRINTF); |
537 | kprintf(fmt: "kprintf initialized\n" ); |
538 | |
539 | serialmode = 0; |
540 | if (PE_parse_boot_argn(arg_string: "serial" , arg_ptr: &serialmode, max_arg: sizeof(serialmode))) { |
541 | /* Do we want a serial keyboard and/or console? */ |
542 | kprintf(fmt: "Serial mode specified: %08X\n" , serialmode); |
543 | disable_iolog_serial_output = (serialmode & SERIALMODE_NO_IOLOG) != 0; |
544 | enable_dklog_serial_output = (serialmode & SERIALMODE_DKLOG) != 0; |
545 | int force_sync = serialmode & SERIALMODE_SYNCDRAIN; |
546 | if (force_sync || PE_parse_boot_argn(arg_string: "drain_uart_sync" , arg_ptr: &force_sync, max_arg: sizeof(force_sync))) { |
547 | if (force_sync) { |
548 | serialmode |= SERIALMODE_SYNCDRAIN; |
549 | kprintf( |
550 | fmt: "WARNING: Forcing uart driver to output synchronously." |
551 | "printf()s/IOLogs will impact kernel performance.\n" |
552 | "You are advised to avoid using 'drain_uart_sync' boot-arg.\n" ); |
553 | } |
554 | } |
555 | /* If on-demand is selected, disable serials until reception. */ |
556 | bool on_demand = !!(serialmode & SERIALMODE_ON_DEMAND); |
557 | if (on_demand && !(serialmode & SERIALMODE_INPUT)) { |
558 | kprintf( |
559 | fmt: "WARNING: invalid serial boot-args : ON_DEMAND (0x%x) flag " |
560 | "requires INPUT(0x%x). Ignoring ON_DEMAND.\n" , |
561 | SERIALMODE_ON_DEMAND, SERIALMODE_INPUT |
562 | ); |
563 | on_demand = 0; |
564 | } |
565 | serial_set_on_demand(on_demand); |
566 | } |
567 | if (kern_feature_override(fmask: KF_SERIAL_OVRD)) { |
568 | serialmode = 0; |
569 | } |
570 | |
571 | /* Start serial if requested and a serial device was enumerated in serial_init(). */ |
572 | if ((serialmode & SERIALMODE_OUTPUT) && serial_exists) { |
573 | serial_console_enabled = true; |
574 | (void)switch_to_serial_console(); /* Switch into serial mode from video console */ |
575 | disableConsoleOutput = FALSE; /* Allow printfs to happen */ |
576 | } |
577 | PE_create_console(); |
578 | |
579 | /* setup console output */ |
580 | PE_init_printf(FALSE); |
581 | |
582 | #if __arm64__ |
583 | #if DEBUG |
584 | dump_kva_space(); |
585 | #endif |
586 | #endif |
587 | |
588 | cpu_machine_idle_init(TRUE); |
589 | |
590 | PE_init_platform(TRUE, args: &BootCpuData); |
591 | |
592 | #if __arm64__ |
593 | extern bool cpu_config_correct; |
594 | if (!cpu_config_correct) { |
595 | panic("The cpumask=N boot arg cannot be used together with cpus=N, and the boot CPU must be enabled" ); |
596 | } |
597 | |
598 | ml_map_cpu_pio(); |
599 | |
600 | #if APPLE_ARM64_ARCH_FAMILY |
601 | configure_late_apple_regs(true); |
602 | #endif |
603 | |
604 | #endif |
605 | |
606 | cpu_timebase_init(TRUE); |
607 | |
608 | #if KPERF |
609 | /* kptimer_curcpu_up() must be called after cpu_timebase_init */ |
610 | kptimer_curcpu_up(); |
611 | #endif /* KPERF */ |
612 | |
613 | PE_init_cpu(); |
614 | fiq_context_init(TRUE); |
615 | |
616 | |
617 | #if HIBERNATION |
618 | pal_hib_init(); |
619 | #endif /* HIBERNATION */ |
620 | |
621 | /* |
622 | * gPhysBase/Size only represent kernel-managed memory. These globals represent |
623 | * the actual DRAM base address and size as reported by iBoot through the |
624 | * device tree. |
625 | */ |
626 | unsigned long const *dram_base; |
627 | unsigned long const *dram_size; |
628 | |
629 | if (SecureDTLookupEntry(NULL, pathName: "/chosen" , foundEntry: &chosen) != kSuccess) { |
630 | panic("%s: Unable to find 'chosen' DT node" , __FUNCTION__); |
631 | } |
632 | |
633 | if (SecureDTGetProperty(entry: chosen, propertyName: "dram-base" , propertyValue: (void const **)&dram_base, propertySize: &dt_entry_size) != kSuccess) { |
634 | panic("%s: Unable to find 'dram-base' entry in the 'chosen' DT node" , __FUNCTION__); |
635 | } |
636 | |
637 | if (SecureDTGetProperty(entry: chosen, propertyName: "dram-size" , propertyValue: (void const **)&dram_size, propertySize: &dt_entry_size) != kSuccess) { |
638 | panic("%s: Unable to find 'dram-size' entry in the 'chosen' DT node" , __FUNCTION__); |
639 | } |
640 | |
641 | gDramBase = *dram_base; |
642 | gDramSize = *dram_size; |
643 | |
644 | /* |
645 | * Initialize the stack protector for all future calls |
646 | * to C code. Since kernel_bootstrap() eventually |
647 | * switches stack context without returning through this |
648 | * function, we do not risk failing the check even though |
649 | * we mutate the guard word during execution. |
650 | */ |
651 | __stack_chk_guard = (unsigned long)early_random(); |
652 | /* Zero a byte of the protector to guard |
653 | * against string vulnerabilities |
654 | */ |
655 | __stack_chk_guard &= ~(0xFFULL << 8); |
656 | machine_startup(args); |
657 | } |
658 | |
659 | /* |
660 | * Routine: arm_init_cpu |
661 | * Function: |
662 | * Runs on S2R resume (all CPUs) and SMP boot (non-boot CPUs only). |
663 | */ |
664 | |
665 | void |
666 | arm_init_cpu( |
667 | cpu_data_t *cpu_data_ptr) |
668 | { |
669 | #if __ARM_PAN_AVAILABLE__ |
670 | __builtin_arm_wsr("pan" , 1); |
671 | #endif |
672 | |
673 | #ifdef __arm64__ |
674 | configure_timer_apple_regs(); |
675 | configure_misc_apple_regs(false); |
676 | #endif |
677 | |
678 | cpu_data_ptr->cpu_flags &= ~SleepState; |
679 | |
680 | |
681 | machine_set_current_thread(thread: cpu_data_ptr->cpu_active_thread); |
682 | |
683 | #if APPLE_ARM64_ARCH_FAMILY |
684 | configure_late_apple_regs(false); |
685 | #endif |
686 | |
687 | #if HIBERNATION |
688 | if ((cpu_data_ptr == &BootCpuData) && (gIOHibernateState == kIOHibernateStateWakingFromHibernate)) { |
689 | // the "normal" S2R code captures wake_abstime too early, so on a hibernation resume we fix it up here |
690 | extern uint64_t wake_abstime; |
691 | wake_abstime = gIOHibernateCurrentHeader->lastHibAbsTime; |
692 | |
693 | // since the hw clock stops ticking across hibernation, we need to apply an offset; |
694 | // iBoot computes this offset for us and passes it via the hibernation header |
695 | extern uint64_t hwclock_conttime_offset; |
696 | hwclock_conttime_offset = gIOHibernateCurrentHeader->hwClockOffset; |
697 | |
698 | // during hibernation, we captured the idle thread's state from inside the PPL context, so we have to |
699 | // fix up its preemption count |
700 | unsigned int expected_preemption_count = (gEnforcePlatformActionSafety ? 2 : 1); |
701 | if (get_preemption_level_for_thread(cpu_data_ptr->cpu_active_thread) != |
702 | expected_preemption_count) { |
703 | panic("unexpected preemption count %u on boot cpu thread (should be %u)" , |
704 | get_preemption_level_for_thread(cpu_data_ptr->cpu_active_thread), |
705 | expected_preemption_count); |
706 | } |
707 | cpu_data_ptr->cpu_active_thread->machine.preemption_count--; |
708 | } |
709 | #endif /* HIBERNATION */ |
710 | |
711 | #if __arm64__ |
712 | wfe_timeout_init(); |
713 | pmap_clear_user_ttb(); |
714 | flush_mmu_tlb(); |
715 | #endif |
716 | |
717 | cpu_machine_idle_init(FALSE); |
718 | |
719 | cpu_init(); |
720 | |
721 | #ifdef APPLETYPHOON |
722 | if ((cpus_defeatures & (0xF << 4 * cpu_data_ptr->cpu_number)) != 0) { |
723 | cpu_defeatures_set((cpus_defeatures >> 4 * cpu_data_ptr->cpu_number) & 0xF); |
724 | } |
725 | #endif |
726 | /* Initialize the timebase before serial_init, as some serial |
727 | * drivers use mach_absolute_time() to implement rate control |
728 | */ |
729 | cpu_timebase_init(FALSE); |
730 | |
731 | #if KPERF |
732 | /* kptimer_curcpu_up() must be called after cpu_timebase_init */ |
733 | kptimer_curcpu_up(); |
734 | #endif /* KPERF */ |
735 | |
736 | if (cpu_data_ptr == &BootCpuData) { |
737 | #if __arm64__ && __ARM_GLOBAL_SLEEP_BIT__ |
738 | /* |
739 | * Prevent CPUs from going into deep sleep until all |
740 | * CPUs are ready to do so. |
741 | */ |
742 | arm64_stall_sleep = TRUE; |
743 | #endif |
744 | serial_init(); |
745 | PE_init_platform(TRUE, NULL); |
746 | commpage_update_timebase(); |
747 | } |
748 | PE_init_cpu(); |
749 | |
750 | fiq_context_init(TRUE); |
751 | cpu_data_ptr->rtcPop = EndOfAllTime; |
752 | timer_resync_deadlines(); |
753 | |
754 | processor_t processor = PERCPU_GET_RELATIVE(processor, cpu_data, cpu_data_ptr); |
755 | bool should_kprintf = processor_should_kprintf(processor, true); |
756 | |
757 | #if DEVELOPMENT || DEBUG |
758 | PE_arm_debug_enable_trace(should_kprintf); |
759 | #endif /* DEVELOPMENT || DEBUG */ |
760 | |
761 | #if KERNEL_INTEGRITY_KTRR || KERNEL_INTEGRITY_CTRR |
762 | rorgn_validate_core(); |
763 | #endif |
764 | |
765 | |
766 | if (should_kprintf) { |
767 | kprintf(fmt: "arm_cpu_init(): cpu %d online\n" , cpu_data_ptr->cpu_number); |
768 | } |
769 | |
770 | if (cpu_data_ptr == &BootCpuData) { |
771 | if (kdebug_enable == 0) { |
772 | __kdebug_only uint64_t elapsed = kdebug_wake(); |
773 | KDBG(IOKDBG_CODE(DBG_HIBERNATE, 15), mach_absolute_time() - elapsed); |
774 | } |
775 | |
776 | #if CONFIG_TELEMETRY |
777 | bootprofile_wake_from_sleep(); |
778 | #endif /* CONFIG_TELEMETRY */ |
779 | } |
780 | #if CONFIG_CPU_COUNTERS |
781 | mt_wake_per_core(); |
782 | #endif /* CONFIG_CPU_COUNTERS */ |
783 | |
784 | #if defined(KERNEL_INTEGRITY_CTRR) |
785 | if (ctrr_cluster_locked[cpu_data_ptr->cpu_cluster_id] != CTRR_LOCKED) { |
786 | lck_spin_lock(&ctrr_cpu_start_lck); |
787 | ctrr_cluster_locked[cpu_data_ptr->cpu_cluster_id] = CTRR_LOCKED; |
788 | thread_wakeup(&ctrr_cluster_locked[cpu_data_ptr->cpu_cluster_id]); |
789 | lck_spin_unlock(&ctrr_cpu_start_lck); |
790 | } |
791 | #endif |
792 | |
793 | |
794 | slave_main(NULL); |
795 | } |
796 | |
797 | /* |
798 | * Routine: arm_init_idle_cpu |
799 | * Function: Resume from non-retention WFI. Called from the reset vector. |
800 | */ |
801 | void __attribute__((noreturn)) |
802 | arm_init_idle_cpu( |
803 | cpu_data_t *cpu_data_ptr) |
804 | { |
805 | #if __ARM_PAN_AVAILABLE__ |
806 | __builtin_arm_wsr("pan" , 1); |
807 | #endif |
808 | |
809 | machine_set_current_thread(thread: cpu_data_ptr->cpu_active_thread); |
810 | |
811 | #if __arm64__ |
812 | wfe_timeout_init(); |
813 | pmap_clear_user_ttb(); |
814 | flush_mmu_tlb(); |
815 | /* Enable asynchronous exceptions */ |
816 | __builtin_arm_wsr("DAIFClr" , DAIFSC_ASYNCF); |
817 | #endif |
818 | |
819 | #ifdef APPLETYPHOON |
820 | if ((cpus_defeatures & (0xF << 4 * cpu_data_ptr->cpu_number)) != 0) { |
821 | cpu_defeatures_set((cpus_defeatures >> 4 * cpu_data_ptr->cpu_number) & 0xF); |
822 | } |
823 | #endif |
824 | |
825 | /* |
826 | * Update the active debug object to reflect that debug registers have been reset. |
827 | * This will force any thread with active debug state to resync the debug registers |
828 | * if it returns to userspace on this CPU. |
829 | */ |
830 | if (cpu_data_ptr->cpu_user_debug != NULL) { |
831 | arm_debug_set(NULL); |
832 | } |
833 | |
834 | fiq_context_init(FALSE); |
835 | |
836 | cpu_idle_exit(TRUE); |
837 | } |
838 | |