1/*
2 * Copyright (c) 2007-2009 Apple Inc. All rights reserved.
3 *
4 * @APPLE_OSREFERENCE_LICENSE_HEADER_START@
5 *
6 * This file contains Original Code and/or Modifications of Original Code
7 * as defined in and that are subject to the Apple Public Source License
8 * Version 2.0 (the 'License'). You may not use this file except in
9 * compliance with the License. The rights granted to you under the License
10 * may not be used to create, or enable the creation or redistribution of,
11 * unlawful or unlicensed copies of an Apple operating system, or to
12 * circumvent, violate, or enable the circumvention or violation of, any
13 * terms of an Apple operating system software license agreement.
14 *
15 * Please obtain a copy of the License at
16 * http://www.opensource.apple.com/apsl/ and read it before using this file.
17 *
18 * The Original Code and all software distributed under the License are
19 * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER
20 * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES,
21 * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY,
22 * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT.
23 * Please see the License for the specific language governing rights and
24 * limitations under the License.
25 *
26 * @APPLE_OSREFERENCE_LICENSE_HEADER_END@
27 */
28/*
29 * @OSF_COPYRIGHT@
30 */
31
32#include <debug.h>
33#include <mach_ldebug.h>
34#include <mach_kdp.h>
35
36#include <kern/misc_protos.h>
37#include <kern/thread.h>
38#include <kern/timer_queue.h>
39#include <kern/processor.h>
40#include <kern/startup.h>
41#include <kern/debug.h>
42#include <prng/random.h>
43#include <machine/machine_routines.h>
44#include <machine/commpage.h>
45/* ARM64_TODO unify boot.h */
46#if __arm64__
47#include <pexpert/arm64/boot.h>
48#elif __arm__
49#include <pexpert/arm/boot.h>
50#else
51#error Unsupported arch
52#endif
53#include <pexpert/arm/consistent_debug.h>
54#include <pexpert/device_tree.h>
55#include <arm/proc_reg.h>
56#include <arm/pmap.h>
57#include <arm/caches_internal.h>
58#include <arm/cpu_internal.h>
59#include <arm/cpu_data_internal.h>
60#include <arm/misc_protos.h>
61#include <arm/machine_cpu.h>
62#include <arm/rtclock.h>
63#include <vm/vm_map.h>
64
65#include <libkern/kernel_mach_header.h>
66#include <libkern/stack_protector.h>
67#include <libkern/section_keywords.h>
68#include <san/kasan.h>
69
70#include <pexpert/pexpert.h>
71
72#include <console/serial_protos.h>
73
74#if CONFIG_TELEMETRY
75#include <kern/telemetry.h>
76#endif
77#if MONOTONIC
78#include <kern/monotonic.h>
79#endif /* MONOTONIC */
80
81extern void patch_low_glo(void);
82extern int serial_init(void);
83extern void sleep_token_buffer_init(void);
84
85extern vm_offset_t intstack_top;
86#if __arm64__
87extern vm_offset_t excepstack_top;
88#else
89extern vm_offset_t fiqstack_top;
90#endif
91
92extern const char version[];
93extern const char version_variant[];
94extern int disableConsoleOutput;
95
96int pc_trace_buf[PC_TRACE_BUF_SIZE] = {0};
97int pc_trace_cnt = PC_TRACE_BUF_SIZE;
98int debug_task;
99
100boolean_t up_style_idle_exit = 0;
101
102
103
104#if INTERRUPT_MASKED_DEBUG
105boolean_t interrupt_masked_debug = 1;
106uint64_t interrupt_masked_timeout = 0xd0000;
107#endif
108
109boot_args const_boot_args __attribute__((section("__DATA, __const")));
110boot_args *BootArgs __attribute__((section("__DATA, __const")));
111
112unsigned int arm_diag;
113#ifdef APPLETYPHOON
114static unsigned cpus_defeatures = 0x0;
115extern void cpu_defeatures_set(unsigned int);
116#endif
117
118#if __arm64__ && __ARM_GLOBAL_SLEEP_BIT__
119extern volatile boolean_t arm64_stall_sleep;
120#endif
121
122extern boolean_t force_immediate_debug_halt;
123
124#define MIN_LOW_GLO_MASK (0x144)
125
126/*
127 * Forward definition
128 */
129void arm_init(boot_args * args);
130
131#if __arm64__
132unsigned int page_shift_user32; /* for page_size as seen by a 32-bit task */
133#endif /* __arm64__ */
134
135
136/*
137 * JOP rebasing
138 */
139
140
141// Note, the following should come from a header from dyld
142static void
143rebase_chain(uintptr_t chainStartAddress, uint64_t stepMultiplier, uintptr_t baseAddress __unused, uint64_t slide)
144{
145 uint64_t delta = 0;
146 uintptr_t address = chainStartAddress;
147 do {
148 uint64_t value = *(uint64_t*)address;
149
150 bool isAuthenticated = (value & (1ULL << 63)) != 0;
151 bool isRebase = (value & (1ULL << 62)) == 0;
152 if (isRebase) {
153 if (isAuthenticated) {
154 // The new value for a rebase is the low 32-bits of the threaded value plus the slide.
155 uint64_t newValue = (value & 0xFFFFFFFF) + slide;
156 // Add in the offset from the mach_header
157 newValue += baseAddress;
158 *(uint64_t*)address = newValue;
159
160 } else
161 {
162 // Regular pointer which needs to fit in 51-bits of value.
163 // C++ RTTI uses the top bit, so we'll allow the whole top-byte
164 // and the bottom 43-bits to be fit in to 51-bits.
165 uint64_t top8Bits = value & 0x0007F80000000000ULL;
166 uint64_t bottom43Bits = value & 0x000007FFFFFFFFFFULL;
167 uint64_t targetValue = ( top8Bits << 13 ) | (((intptr_t)(bottom43Bits << 21) >> 21) & 0x00FFFFFFFFFFFFFF);
168 targetValue = targetValue + slide;
169 *(uint64_t*)address = targetValue;
170 }
171 }
172
173 // The delta is bits [51..61]
174 // And bit 62 is to tell us if we are a rebase (0) or bind (1)
175 value &= ~(1ULL << 62);
176 delta = ( value & 0x3FF8000000000000 ) >> 51;
177 address += delta * stepMultiplier;
178 } while ( delta != 0 );
179}
180
181// Note, the following method should come from a header from dyld
182static bool
183rebase_threaded_starts(uint32_t *threadArrayStart, uint32_t *threadArrayEnd,
184 uintptr_t macho_header_addr, uintptr_t macho_header_vmaddr, size_t slide)
185{
186 uint32_t threadStartsHeader = *threadArrayStart;
187 uint64_t stepMultiplier = (threadStartsHeader & 1) == 1 ? 8 : 4;
188 for (uint32_t* threadOffset = threadArrayStart + 1; threadOffset != threadArrayEnd; ++threadOffset) {
189 if (*threadOffset == 0xFFFFFFFF)
190 break;
191 rebase_chain(macho_header_addr + *threadOffset, stepMultiplier, macho_header_vmaddr, slide);
192 }
193 return true;
194}
195
196/*
197 * Routine: arm_init
198 * Function:
199 */
200
201extern uint32_t __thread_starts_sect_start[] __asm("section$start$__TEXT$__thread_starts");
202extern uint32_t __thread_starts_sect_end[] __asm("section$end$__TEXT$__thread_starts");
203
204void
205arm_init(
206 boot_args *args)
207{
208 unsigned int maxmem;
209 uint32_t memsize;
210 uint64_t xmaxmem;
211 thread_t thread;
212 processor_t my_master_proc;
213
214 // rebase and sign jops
215 if (&__thread_starts_sect_end[0] != &__thread_starts_sect_start[0])
216 {
217 uintptr_t mh = (uintptr_t) &_mh_execute_header;
218 uintptr_t slide = mh - VM_KERNEL_LINK_ADDRESS;
219 rebase_threaded_starts( &__thread_starts_sect_start[0],
220 &__thread_starts_sect_end[0],
221 mh, mh - slide, slide);
222 }
223
224 /* If kernel integrity is supported, use a constant copy of the boot args. */
225 const_boot_args = *args;
226 BootArgs = args = &const_boot_args;
227
228 cpu_data_init(&BootCpuData);
229
230 PE_init_platform(FALSE, args); /* Get platform expert set up */
231
232#if __arm64__
233
234
235 {
236 unsigned int tmp_16k = 0;
237
238#ifdef XXXX
239 /*
240 * Select the advertised kernel page size; without the boot-arg
241 * we default to the hardware page size for the current platform.
242 */
243 if (PE_parse_boot_argn("-vm16k", &tmp_16k, sizeof(tmp_16k)))
244 PAGE_SHIFT_CONST = PAGE_MAX_SHIFT;
245 else
246 PAGE_SHIFT_CONST = ARM_PGSHIFT;
247#else
248 /*
249 * Select the advertised kernel page size; with the boot-arg
250 * use to the hardware page size for the current platform.
251 */
252 int radar_20804515 = 1; /* default: new mode */
253 PE_parse_boot_argn("radar_20804515", &radar_20804515, sizeof(radar_20804515));
254 if (radar_20804515) {
255 if (args->memSize > 1ULL*1024*1024*1024) {
256 /*
257 * arm64 device with > 1GB of RAM:
258 * kernel uses 16KB pages.
259 */
260 PAGE_SHIFT_CONST = PAGE_MAX_SHIFT;
261 } else {
262 /*
263 * arm64 device with <= 1GB of RAM:
264 * kernel uses hardware page size
265 * (4KB for H6/H7, 16KB for H8+).
266 */
267 PAGE_SHIFT_CONST = ARM_PGSHIFT;
268 }
269 /* 32-bit apps always see 16KB page size */
270 page_shift_user32 = PAGE_MAX_SHIFT;
271 } else {
272 /* kernel page size: */
273 if (PE_parse_boot_argn("-use_hwpagesize", &tmp_16k, sizeof(tmp_16k)))
274 PAGE_SHIFT_CONST = ARM_PGSHIFT;
275 else
276 PAGE_SHIFT_CONST = PAGE_MAX_SHIFT;
277 /* old mode: 32-bit apps see same page size as kernel */
278 page_shift_user32 = PAGE_SHIFT_CONST;
279 }
280#endif
281#ifdef APPLETYPHOON
282 if (PE_parse_boot_argn("cpus_defeatures", &cpus_defeatures, sizeof(cpus_defeatures))) {
283 if ((cpus_defeatures & 0xF) != 0)
284 cpu_defeatures_set(cpus_defeatures & 0xF);
285 }
286#endif
287 }
288#endif
289
290 ml_parse_cpu_topology();
291
292 master_cpu = ml_get_boot_cpu_number();
293 assert(master_cpu >= 0 && master_cpu <= ml_get_max_cpu_number());
294
295 BootCpuData.cpu_number = (unsigned short)master_cpu;
296#if __arm__
297 BootCpuData.cpu_exc_vectors = (vm_offset_t)&ExceptionVectorsTable;
298#endif
299 BootCpuData.intstack_top = (vm_offset_t) & intstack_top;
300 BootCpuData.istackptr = BootCpuData.intstack_top;
301#if __arm64__
302 BootCpuData.excepstack_top = (vm_offset_t) & excepstack_top;
303 BootCpuData.excepstackptr = BootCpuData.excepstack_top;
304#else
305 BootCpuData.fiqstack_top = (vm_offset_t) & fiqstack_top;
306 BootCpuData.fiqstackptr = BootCpuData.fiqstack_top;
307#endif
308 BootCpuData.cpu_processor = cpu_processor_alloc(TRUE);
309 BootCpuData.cpu_console_buf = (void *)NULL;
310 CpuDataEntries[master_cpu].cpu_data_vaddr = &BootCpuData;
311 CpuDataEntries[master_cpu].cpu_data_paddr = (void *)((uintptr_t)(args->physBase)
312 + ((uintptr_t)&BootCpuData
313 - (uintptr_t)(args->virtBase)));
314
315 thread_bootstrap();
316 thread = current_thread();
317 /*
318 * Preemption is enabled for this thread so that it can lock mutexes without
319 * tripping the preemption check. In reality scheduling is not enabled until
320 * this thread completes, and there are no other threads to switch to, so
321 * preemption level is not really meaningful for the bootstrap thread.
322 */
323 thread->machine.preemption_count = 0;
324 thread->machine.CpuDatap = &BootCpuData;
325#if __arm__ && __ARM_USER_PROTECT__
326 {
327 unsigned int ttbr0_val, ttbr1_val, ttbcr_val;
328 __asm__ volatile("mrc p15,0,%0,c2,c0,0\n" : "=r"(ttbr0_val));
329 __asm__ volatile("mrc p15,0,%0,c2,c0,1\n" : "=r"(ttbr1_val));
330 __asm__ volatile("mrc p15,0,%0,c2,c0,2\n" : "=r"(ttbcr_val));
331 thread->machine.uptw_ttb = ttbr0_val;
332 thread->machine.kptw_ttb = ttbr1_val;
333 thread->machine.uptw_ttc = ttbcr_val;
334 }
335#endif
336 BootCpuData.cpu_processor->processor_data.kernel_timer = &thread->system_timer;
337 BootCpuData.cpu_processor->processor_data.thread_timer = &thread->system_timer;
338
339 cpu_bootstrap();
340
341 rtclock_early_init();
342
343 kernel_early_bootstrap();
344
345 cpu_init();
346
347 EntropyData.index_ptr = EntropyData.buffer;
348
349 processor_bootstrap();
350 my_master_proc = master_processor;
351
352 (void)PE_parse_boot_argn("diag", &arm_diag, sizeof (arm_diag));
353
354 if (PE_parse_boot_argn("maxmem", &maxmem, sizeof (maxmem)))
355 xmaxmem = (uint64_t) maxmem *(1024 * 1024);
356 else if (PE_get_default("hw.memsize", &memsize, sizeof (memsize)))
357 xmaxmem = (uint64_t) memsize;
358 else
359 xmaxmem = 0;
360
361 if (PE_parse_boot_argn("up_style_idle_exit", &up_style_idle_exit, sizeof(up_style_idle_exit))) {
362 up_style_idle_exit = 1;
363 }
364#if INTERRUPT_MASKED_DEBUG
365 int wdt_boot_arg = 0;
366 /* Disable if WDT is disabled or no_interrupt_mask_debug in boot-args */
367 if (PE_parse_boot_argn("no_interrupt_masked_debug", &interrupt_masked_debug,
368 sizeof(interrupt_masked_debug)) || (PE_parse_boot_argn("wdt", &wdt_boot_arg,
369 sizeof(wdt_boot_arg)) && (wdt_boot_arg == -1))) {
370 interrupt_masked_debug = 0;
371 }
372
373 PE_parse_boot_argn("interrupt_masked_debug_timeout", &interrupt_masked_timeout, sizeof(interrupt_masked_timeout));
374#endif
375
376
377
378 PE_parse_boot_argn("immediate_NMI", &force_immediate_debug_halt, sizeof(force_immediate_debug_halt));
379
380#if __ARM_PAN_AVAILABLE__
381 __builtin_arm_wsr("pan", 1);
382#endif /* __ARM_PAN_AVAILABLE__ */
383
384 arm_vm_init(xmaxmem, args);
385
386 uint32_t debugmode;
387 if (PE_parse_boot_argn("debug", &debugmode, sizeof(debugmode)) &&
388 ((debugmode & MIN_LOW_GLO_MASK) == MIN_LOW_GLO_MASK))
389 patch_low_glo();
390
391 printf_init();
392 panic_init();
393#if __arm64__
394 /* Enable asynchronous exceptions */
395 __builtin_arm_wsr("DAIFClr", DAIFSC_ASYNCF);
396#endif
397#if __arm64__ && WITH_CLASSIC_S2R
398 sleep_token_buffer_init();
399#endif
400
401 PE_consistent_debug_inherit();
402
403 /* setup debugging output if one has been chosen */
404 PE_init_kprintf(FALSE);
405
406 kprintf("kprintf initialized\n");
407
408 serialmode = 0; /* Assume normal keyboard and console */
409 if (PE_parse_boot_argn("serial", &serialmode, sizeof(serialmode))) { /* Do we want a serial
410 * keyboard and/or
411 * console? */
412 kprintf("Serial mode specified: %08X\n", serialmode);
413 int force_sync = serialmode & SERIALMODE_SYNCDRAIN;
414 if (force_sync || PE_parse_boot_argn("drain_uart_sync", &force_sync, sizeof(force_sync))) {
415 if (force_sync) {
416 serialmode |= SERIALMODE_SYNCDRAIN;
417 kprintf(
418 "WARNING: Forcing uart driver to output synchronously."
419 "printf()s/IOLogs will impact kernel performance.\n"
420 "You are advised to avoid using 'drain_uart_sync' boot-arg.\n");
421 }
422 }
423 }
424 if (kern_feature_override(KF_SERIAL_OVRD)) {
425 serialmode = 0;
426 }
427
428 if (serialmode & SERIALMODE_OUTPUT) { /* Start serial if requested */
429 (void)switch_to_serial_console(); /* Switch into serial mode */
430 disableConsoleOutput = FALSE; /* Allow printfs to happen */
431 }
432 PE_create_console();
433
434 /* setup console output */
435 PE_init_printf(FALSE);
436
437#if __arm64__
438#if DEBUG
439 dump_kva_space();
440#endif
441#endif
442
443 cpu_machine_idle_init(TRUE);
444
445#if (__ARM_ARCH__ == 7)
446 if (arm_diag & 0x8000)
447 set_mmu_control((get_mmu_control()) ^ SCTLR_PREDIC);
448#endif
449
450 PE_init_platform(TRUE, &BootCpuData);
451 cpu_timebase_init(TRUE);
452 fiq_context_bootstrap(TRUE);
453
454
455 /*
456 * Initialize the stack protector for all future calls
457 * to C code. Since kernel_bootstrap() eventually
458 * switches stack context without returning through this
459 * function, we do not risk failing the check even though
460 * we mutate the guard word during execution.
461 */
462 __stack_chk_guard = (unsigned long)early_random();
463 /* Zero a byte of the protector to guard
464 * against string vulnerabilities
465 */
466 __stack_chk_guard &= ~(0xFFULL << 8);
467 machine_startup(args);
468}
469
470/*
471 * Routine: arm_init_cpu
472 * Function:
473 * Re-initialize CPU when coming out of reset
474 */
475
476void
477arm_init_cpu(
478 cpu_data_t *cpu_data_ptr)
479{
480#if __ARM_PAN_AVAILABLE__
481 __builtin_arm_wsr("pan", 1);
482#endif
483
484 cpu_data_ptr->cpu_flags &= ~SleepState;
485#if __ARM_SMP__ && defined(ARMA7)
486 cpu_data_ptr->cpu_CLW_active = 1;
487#endif
488
489 machine_set_current_thread(cpu_data_ptr->cpu_active_thread);
490
491#if __arm64__
492 pmap_clear_user_ttb();
493 flush_mmu_tlb();
494 /* Enable asynchronous exceptions */
495 __builtin_arm_wsr("DAIFClr", DAIFSC_ASYNCF);
496#endif
497
498 cpu_machine_idle_init(FALSE);
499
500 cpu_init();
501
502#if (__ARM_ARCH__ == 7)
503 if (arm_diag & 0x8000)
504 set_mmu_control((get_mmu_control()) ^ SCTLR_PREDIC);
505#endif
506#ifdef APPLETYPHOON
507 if ((cpus_defeatures & (0xF << 4*cpu_data_ptr->cpu_number)) != 0)
508 cpu_defeatures_set((cpus_defeatures >> 4*cpu_data_ptr->cpu_number) & 0xF);
509#endif
510 /* Initialize the timebase before serial_init, as some serial
511 * drivers use mach_absolute_time() to implement rate control
512 */
513 cpu_timebase_init(FALSE);
514
515 if (cpu_data_ptr == &BootCpuData) {
516#if __arm64__ && __ARM_GLOBAL_SLEEP_BIT__
517 /*
518 * Prevent CPUs from going into deep sleep until all
519 * CPUs are ready to do so.
520 */
521 arm64_stall_sleep = TRUE;
522#endif
523 serial_init();
524 PE_init_platform(TRUE, NULL);
525 commpage_update_timebase();
526 }
527
528 fiq_context_init(TRUE);
529 cpu_data_ptr->rtcPop = EndOfAllTime;
530 timer_resync_deadlines();
531
532#if DEVELOPMENT || DEBUG
533 PE_arm_debug_enable_trace();
534#endif
535
536 kprintf("arm_cpu_init(): cpu %d online\n", cpu_data_ptr->cpu_processor->cpu_id);
537
538 if (cpu_data_ptr == &BootCpuData) {
539#if CONFIG_TELEMETRY
540 bootprofile_wake_from_sleep();
541#endif /* CONFIG_TELEMETRY */
542 }
543#if MONOTONIC && defined(__arm64__)
544 mt_wake_per_core();
545#endif /* MONOTONIC && defined(__arm64__) */
546
547
548 slave_main(NULL);
549}
550
551/*
552 * Routine: arm_init_idle_cpu
553 * Function:
554 */
555void __attribute__((noreturn))
556arm_init_idle_cpu(
557 cpu_data_t *cpu_data_ptr)
558{
559#if __ARM_PAN_AVAILABLE__
560 __builtin_arm_wsr("pan", 1);
561#endif
562#if __ARM_SMP__ && defined(ARMA7)
563 cpu_data_ptr->cpu_CLW_active = 1;
564#endif
565
566 machine_set_current_thread(cpu_data_ptr->cpu_active_thread);
567
568#if __arm64__
569 pmap_clear_user_ttb();
570 flush_mmu_tlb();
571 /* Enable asynchronous exceptions */
572 __builtin_arm_wsr("DAIFClr", DAIFSC_ASYNCF);
573#endif
574
575#if (__ARM_ARCH__ == 7)
576 if (arm_diag & 0x8000)
577 set_mmu_control((get_mmu_control()) ^ SCTLR_PREDIC);
578#endif
579#ifdef APPLETYPHOON
580 if ((cpus_defeatures & (0xF << 4*cpu_data_ptr->cpu_number)) != 0)
581 cpu_defeatures_set((cpus_defeatures >> 4*cpu_data_ptr->cpu_number) & 0xF);
582#endif
583
584 fiq_context_init(FALSE);
585
586 cpu_idle_exit(TRUE);
587}
588