1/*
2 * Copyright (c) 2007-2021 Apple Inc. All rights reserved.
3 *
4 * @APPLE_OSREFERENCE_LICENSE_HEADER_START@
5 *
6 * This file contains Original Code and/or Modifications of Original Code
7 * as defined in and that are subject to the Apple Public Source License
8 * Version 2.0 (the 'License'). You may not use this file except in
9 * compliance with the License. The rights granted to you under the License
10 * may not be used to create, or enable the creation or redistribution of,
11 * unlawful or unlicensed copies of an Apple operating system, or to
12 * circumvent, violate, or enable the circumvention or violation of, any
13 * terms of an Apple operating system software license agreement.
14 *
15 * Please obtain a copy of the License at
16 * http://www.opensource.apple.com/apsl/ and read it before using this file.
17 *
18 * The Original Code and all software distributed under the License are
19 * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER
20 * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES,
21 * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY,
22 * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT.
23 * Please see the License for the specific language governing rights and
24 * limitations under the License.
25 *
26 * @APPLE_OSREFERENCE_LICENSE_HEADER_END@
27 */
28/*
29 * File: arm64/cpu.c
30 *
31 * cpu specific routines
32 */
33
34#include <pexpert/arm64/board_config.h>
35#include <kern/kalloc.h>
36#include <kern/machine.h>
37#include <kern/cpu_number.h>
38#include <kern/percpu.h>
39#include <kern/thread.h>
40#include <kern/timer_queue.h>
41#include <arm/cpu_data.h>
42#include <arm/cpuid.h>
43#include <arm/caches_internal.h>
44#include <arm/cpu_data_internal.h>
45#include <arm/cpu_internal.h>
46#include <arm/misc_protos.h>
47#include <arm/machine_cpu.h>
48#include <arm/rtclock.h>
49#include <arm64/proc_reg.h>
50#include <mach/processor_info.h>
51#include <vm/pmap.h>
52#include <vm/vm_kern.h>
53#include <vm/vm_map.h>
54#include <pexpert/arm/protos.h>
55#include <pexpert/device_tree.h>
56#include <sys/kdebug.h>
57#include <arm/machine_routines.h>
58
59#include <machine/atomic.h>
60
61#include <san/kasan.h>
62
63#include <kern/kpc.h>
64#if CONFIG_CPU_COUNTERS
65#include <kern/monotonic.h>
66#endif /* CONFIG_CPU_COUNTERS */
67
68#if KPERF
69#include <kperf/kptimer.h>
70#endif /* KPERF */
71
72#if HIBERNATION
73#include <IOKit/IOPlatformExpert.h>
74#include <IOKit/IOHibernatePrivate.h>
75#endif /* HIBERNATION */
76
77
78#include <libkern/section_keywords.h>
79
80extern boolean_t idle_enable;
81extern uint64_t wake_abstime;
82
83#if WITH_CLASSIC_S2R
84void sleep_token_buffer_init(void);
85#endif
86
87#if !CONFIG_SPTM
88extern uintptr_t resume_idle_cpu;
89extern uintptr_t start_cpu;
90vm_address_t start_cpu_paddr;
91#endif
92
93#if __ARM_KERNEL_PROTECT__
94extern void exc_vectors_table;
95#endif /* __ARM_KERNEL_PROTECT__ */
96
97#if APPLEVIRTUALPLATFORM
98extern void __attribute__((noreturn)) arm64_prepare_for_sleep(boolean_t deep_sleep, unsigned int cpu, uint64_t entry_pa);
99#else
100extern void __attribute__((noreturn)) arm64_prepare_for_sleep(boolean_t deep_sleep);
101#endif
102extern void arm64_force_wfi_clock_gate(void);
103#if defined(APPLETYPHOON)
104// <rdar://problem/15827409>
105extern void typhoon_prepare_for_wfi(void);
106extern void typhoon_return_from_wfi(void);
107#endif
108
109#if HAS_RETENTION_STATE
110extern void arm64_retention_wfi(void);
111#endif
112
113sysreg_restore_t sysreg_restore __attribute__((section("__DATA, __const"))) = {
114 .tcr_el1 = TCR_EL1_BOOT,
115};
116
117// wfi - wfi mode
118// 0 : disabled
119// 1 : normal
120// 2 : overhead simulation (delay & flags)
121TUNABLE(unsigned int, wfi, "wfi", 1);
122#if DEVELOPMENT || DEBUG
123
124// wfi_flags
125// 1 << 0 : flush L1s
126// 1 << 1 : flush TLBs
127static int wfi_flags = 0;
128
129// wfi_delay - delay ticks after wfi exit
130static uint64_t wfi_delay = 0;
131
132#endif /* DEVELOPMENT || DEBUG */
133
134#define CPUPM_IDLE_WFE 0x5310300
135#define CPUPM_IDLE_TIMER_WFE 0x5310304
136
137#define DEFAULT_EXPECTING_IPI_WFE_TIMEOUT_USEC (60ULL)
138TUNABLE(uint32_t, expecting_ipi_wfe_timeout_usec,
139 "expecting_ipi_wfe_timeout_usec", DEFAULT_EXPECTING_IPI_WFE_TIMEOUT_USEC);
140uint64_t expecting_ipi_wfe_timeout_mt = 0x0ULL; /* initialized to a non-zero value in sched_init */
141
142/* When recommended, issue WFE with [FI]IRQ unmasked in the idle
143 * loop. The default.
144 */
145uint32_t idle_proximate_io_wfe_unmasked = 1;
146#if DEVELOPMENT || DEBUG
147uint32_t idle_proximate_timer_wfe = 1;
148uint32_t idle_proximate_io_wfe_masked = 0;
149#else
150/* Issue WFE in lieu of WFI when awaiting a proximate timer. */
151static uint32_t idle_proximate_timer_wfe = 1;
152/* When recommended, issue WFE with [FI]IRQ masked in the idle loop.
153 * Non-default, retained for experimentation.
154 */
155static uint32_t idle_proximate_io_wfe_masked = 0;
156#endif
157
158#if __ARM_GLOBAL_SLEEP_BIT__
159volatile boolean_t arm64_stall_sleep = TRUE;
160#endif
161
162#if WITH_CLASSIC_S2R
163/*
164 * These must be aligned to avoid issues with calling bcopy_phys on them before
165 * we are done with pmap initialization.
166 */
167static const uint8_t __attribute__ ((aligned(8))) suspend_signature[] = {'X', 'S', 'O', 'M', 'P', 'S', 'U', 'S'};
168static const uint8_t __attribute__ ((aligned(8))) running_signature[] = {'X', 'S', 'O', 'M', 'N', 'N', 'U', 'R'};
169#endif
170
171#if WITH_CLASSIC_S2R
172static vm_offset_t sleepTokenBuffer = (vm_offset_t)NULL;
173#endif
174static boolean_t coresight_debug_enabled = FALSE;
175
176#if defined(CONFIG_XNUPOST)
177void arm64_ipi_test_callback(void *);
178void arm64_immediate_ipi_test_callback(void *);
179
180void
181arm64_ipi_test_callback(void *parm)
182{
183 volatile uint64_t *ipi_test_data = parm;
184 cpu_data_t *cpu_data;
185
186 cpu_data = getCpuDatap();
187
188 *ipi_test_data = cpu_data->cpu_number;
189}
190
191void
192arm64_immediate_ipi_test_callback(void *parm)
193{
194 volatile uint64_t *ipi_test_data = parm;
195 cpu_data_t *cpu_data;
196
197 cpu_data = getCpuDatap();
198
199 *ipi_test_data = cpu_data->cpu_number + MAX_CPUS;
200}
201
202uint64_t arm64_ipi_test_data[MAX_CPUS * 2];
203
204MACHINE_TIMEOUT(arm64_ipi_test_timeout, "arm64-ipi-test", 100, MACHINE_TIMEOUT_UNIT_MSEC, NULL);
205
206void
207arm64_ipi_test()
208{
209 volatile uint64_t *ipi_test_data, *immediate_ipi_test_data;
210 uint64_t timeout_ms = os_atomic_load(&arm64_ipi_test_timeout, relaxed);
211 uint64_t then, now, delta;
212 int current_cpu_number = getCpuDatap()->cpu_number;
213
214 /*
215 * probably the only way to have this on most systems is with the
216 * cpus=1 boot-arg, but nonetheless, if we only have 1 CPU active,
217 * IPI is not available
218 */
219 if (real_ncpus == 1) {
220 return;
221 }
222
223 const unsigned int max_cpu_id = ml_get_max_cpu_number();
224 for (unsigned int i = 0; i <= max_cpu_id; ++i) {
225 ipi_test_data = &arm64_ipi_test_data[i];
226 immediate_ipi_test_data = &arm64_ipi_test_data[i + MAX_CPUS];
227 *ipi_test_data = ~i;
228 kern_return_t error = cpu_xcall((int)i, (void *)arm64_ipi_test_callback, (void *)(uintptr_t)ipi_test_data);
229 if (error != KERN_SUCCESS) {
230 panic("CPU %d was unable to IPI CPU %u: error %d", current_cpu_number, i, error);
231 }
232
233 while ((error = cpu_immediate_xcall((int)i, (void *)arm64_immediate_ipi_test_callback,
234 (void *)(uintptr_t)immediate_ipi_test_data)) == KERN_ALREADY_WAITING) {
235 now = mach_absolute_time();
236 absolutetime_to_nanoseconds(now - then, &delta);
237 if ((delta / NSEC_PER_MSEC) > timeout_ms) {
238 panic("CPU %d was unable to immediate-IPI CPU %u within %lldms", current_cpu_number, i, timeout_ms);
239 }
240 }
241
242 if (error != KERN_SUCCESS) {
243 panic("CPU %d was unable to immediate-IPI CPU %u: error %d", current_cpu_number, i, error);
244 }
245
246 then = mach_absolute_time();
247
248 while ((*ipi_test_data != i) || (*immediate_ipi_test_data != (i + MAX_CPUS))) {
249 now = mach_absolute_time();
250 absolutetime_to_nanoseconds(now - then, &delta);
251 if ((delta / NSEC_PER_MSEC) > timeout_ms) {
252 panic("CPU %d tried to IPI CPU %d but didn't get correct responses within %lldms, responses: %llx, %llx",
253 current_cpu_number, i, timeout_ms, *ipi_test_data, *immediate_ipi_test_data);
254 }
255 }
256 }
257}
258#endif /* defined(CONFIG_XNUPOST) */
259
260static void
261configure_coresight_registers(cpu_data_t *cdp)
262{
263 int i;
264
265 assert(cdp);
266 vm_offset_t coresight_regs = ml_get_topology_info()->cpus[cdp->cpu_number].coresight_regs;
267
268 /*
269 * ARMv8 coresight registers are optional. If the device tree did not
270 * provide either cpu_regmap_paddr (from the legacy "reg-private" EDT property)
271 * or coresight_regs (from the new "coresight-reg" property), assume that
272 * coresight registers are not supported.
273 */
274 if (cdp->cpu_regmap_paddr || coresight_regs) {
275 for (i = 0; i < CORESIGHT_REGIONS; ++i) {
276 if (i == CORESIGHT_CTI) {
277 continue;
278 }
279 /* Skip debug-only registers on production chips */
280 if (((i == CORESIGHT_ED) || (i == CORESIGHT_UTT)) && !coresight_debug_enabled) {
281 continue;
282 }
283
284 if (!cdp->coresight_base[i]) {
285 if (coresight_regs) {
286 cdp->coresight_base[i] = coresight_regs + CORESIGHT_OFFSET(i);
287 } else {
288 uint64_t addr = cdp->cpu_regmap_paddr + CORESIGHT_OFFSET(i);
289 cdp->coresight_base[i] = (vm_offset_t)ml_io_map(phys_addr: addr, CORESIGHT_SIZE);
290 }
291 }
292 /* Unlock EDLAR, CTILAR, PMLAR */
293 if (i != CORESIGHT_UTT) {
294 *(volatile uint32_t *)(cdp->coresight_base[i] + ARM_DEBUG_OFFSET_DBGLAR) = ARM_DBG_LOCK_ACCESS_KEY;
295 }
296 }
297 }
298}
299
300
301/*
302 * Routine: cpu_bootstrap
303 * Function:
304 */
305void
306cpu_bootstrap(void)
307{
308}
309
310/*
311 * Routine: cpu_sleep
312 * Function:
313 */
314void
315cpu_sleep(void)
316{
317 cpu_data_t *cpu_data_ptr = getCpuDatap();
318
319 cpu_data_ptr->cpu_active_thread = current_thread();
320#if CONFIG_SPTM
321 cpu_data_ptr->cpu_reset_handler = (uintptr_t) VM_KERNEL_STRIP_PTR(arm_init_cpu);
322#else
323 cpu_data_ptr->cpu_reset_handler = (uintptr_t) start_cpu_paddr;
324#endif
325 cpu_data_ptr->cpu_flags |= SleepState;
326
327 if (cpu_data_ptr->cpu_user_debug != NULL) {
328 arm_debug_set(NULL);
329 }
330
331#if CONFIG_CPU_COUNTERS
332 kpc_idle();
333 mt_cpu_down(cpu_data_ptr);
334#endif /* CONFIG_CPU_COUNTERS */
335#if KPERF
336 kptimer_stop_curcpu();
337#endif /* KPERF */
338
339 CleanPoC_Dcache();
340
341#if USE_APPLEARMSMP
342 if (ml_is_quiescing()) {
343 PE_cpu_machine_quiesce(target: cpu_data_ptr->cpu_id);
344 } else {
345 bool deep_sleep = PE_cpu_down(target: cpu_data_ptr->cpu_id);
346 cpu_data_ptr->cpu_sleep_token = ARM_CPU_ON_SLEEP_PATH;
347 // hang CPU on spurious wakeup
348 cpu_data_ptr->cpu_reset_handler = (uintptr_t)0;
349 __builtin_arm_dsb(DSB_ISH);
350 CleanPoU_Dcache();
351#if APPLEVIRTUALPLATFORM
352 arm64_prepare_for_sleep(deep_sleep, cpu: cpu_data_ptr->cpu_number, entry_pa: ml_vtophys(vaddr: (vm_offset_t)&LowResetVectorBase));
353#else
354 arm64_prepare_for_sleep(deep_sleep);
355#endif
356 }
357#else
358 PE_cpu_machine_quiesce(cpu_data_ptr->cpu_id);
359#endif
360 /*NOTREACHED*/
361}
362
363/*
364 * Routine: cpu_interrupt_is_pending
365 * Function: Returns a bool signifying a non-zero ISR_EL1,
366 * indicating a pending IRQ, FIQ or external abort.
367 */
368
369bool
370cpu_interrupt_is_pending(void)
371{
372 uint64_t isr_value;
373 isr_value = __builtin_arm_rsr64("ISR_EL1");
374 return isr_value != 0;
375}
376
377static bool
378cpu_proximate_timer(void)
379{
380 return !SetIdlePop();
381}
382
383#ifdef ARM64_BOARD_CONFIG_T6000
384int wfe_allowed = 0;
385#else
386int wfe_allowed = 1;
387#endif /* ARM64_BOARD_CONFIG_T6000 */
388
389#if DEVELOPMENT || DEBUG
390#define WFE_STAT(x) \
391 do { \
392 (x); \
393 } while(0)
394#else
395#define WFE_STAT(x) do {} while(0)
396#endif /* DEVELOPMENT || DEBUG */
397
398bool
399wfe_to_deadline_or_interrupt(uint32_t cid, uint64_t wfe_deadline, cpu_data_t *cdp, bool unmask, bool check_cluster_recommendation)
400{
401 bool ipending = false;
402 uint64_t irqc = 0, nirqc = 0;
403
404 /* The ARMv8 architecture permits a processor dwelling in WFE
405 * with F/IRQ masked to ignore a pending interrupt, i.e.
406 * not classify it as an 'event'. This is potentially
407 * problematic with AICv2's IRQ distribution model, as
408 * a transient interrupt masked interval can cause an SIQ
409 * query rejection, possibly routing the interrupt to
410 * another core/cluster in a powergated state.
411 * Hence, optionally unmask IRQs+FIQs across WFE.
412 */
413 if (unmask) {
414 /* Latch SW IRQ+FIQ counter prior to unmasking
415 * interrupts.
416 */
417 irqc = nirqc = os_atomic_load(&cdp->cpu_stat.irq_ex_cnt_wake, relaxed);
418 /* Unmask IRQ+FIQ. Mirrors mask used by machine_idle()
419 * with ASYNCF omission. Consider that this could
420 * delay recognition of an async abort, including
421 * those triggered by ISRs
422 */
423 __builtin_arm_wsr("DAIFClr", (DAIFSC_IRQF | DAIFSC_FIQF));
424 }
425
426 while ((ipending = (cpu_interrupt_is_pending())) == false) {
427 if (unmask) {
428 /* If WFE was issued with IRQs unmasked, an
429 * interrupt may have been processed.
430 * Consult the SW IRQ counter to determine
431 * whether the 'idle loop' must be
432 * re-evaluated.
433 */
434 nirqc = os_atomic_load(&cdp->cpu_stat.irq_ex_cnt_wake, relaxed);
435 if (nirqc != irqc) {
436 break;
437 }
438 }
439
440 if (__probable(wfe_allowed)) {
441 /*
442 * If IRQs are unmasked, there's a small window
443 * where an 'extra' WFE may be issued after
444 * the consultation of the SW interrupt counter
445 * and new interrupt arrival. Hence this WFE
446 * relies on the [FI]RQ interrupt handler
447 * epilogue issuing a 'SEVL', to post an
448 * event which causes the next WFE on the same
449 * PE to retire immediately.
450 */
451
452 __builtin_arm_wfe();
453 }
454
455 WFE_STAT(cdp->wfe_count++);
456 if (wfe_deadline != ~0ULL) {
457 WFE_STAT(cdp->wfe_deadline_checks++);
458 /* Check if the WFE recommendation has expired.
459 * We do not recompute the deadline here.
460 */
461 if ((check_cluster_recommendation && ml_cluster_wfe_timeout(wfe_cluster_id: cid) == 0) ||
462 mach_absolute_time() >= wfe_deadline) {
463 WFE_STAT(cdp->wfe_terminations++);
464 break;
465 }
466 }
467 }
468
469 if (unmask) {
470 /* Re-mask IRQ+FIQ
471 * Mirrors mask used by machine_idle(), with ASYNCF
472 * omission
473 */
474 __builtin_arm_wsr64("DAIFSet", (DAIFSC_IRQF | DAIFSC_FIQF));
475 /* Refetch SW interrupt counter with IRQs masked
476 * It is important that this routine accurately flags
477 * any observed interrupts via its return value,
478 * inaccuracy may lead to an erroneous WFI fallback.
479 */
480 nirqc = os_atomic_load(&cdp->cpu_stat.irq_ex_cnt_wake, relaxed);
481 }
482
483 return ipending || (nirqc != irqc);
484}
485
486/*
487 * Routine: cpu_idle
488 * Function:
489 */
490void __attribute__((noreturn))
491cpu_idle(void)
492{
493 cpu_data_t *cpu_data_ptr = getCpuDatap();
494 processor_t processor = current_processor();
495 uint64_t new_idle_timeout_ticks = 0x0ULL, lastPop;
496 bool idle_disallowed = false;
497 /* Read and reset the next_idle_short flag */
498 bool next_idle_short = processor->next_idle_short;
499 processor->next_idle_short = false;
500
501 if (__improbable((!idle_enable))) {
502 idle_disallowed = true;
503 } else if (__improbable(cpu_data_ptr->cpu_signal & SIGPdisabled)) {
504 idle_disallowed = true;
505 }
506
507 if (__improbable(idle_disallowed)) {
508 Idle_load_context();
509 }
510
511 bool ipending = false;
512 uint32_t cid = cpu_data_ptr->cpu_cluster_id;
513 bool check_cluster_recommendation = true;
514 uint64_t wfe_timeout = 0;
515
516 if (idle_proximate_io_wfe_masked == 1) {
517 /* Check for an active perf. controller generated
518 * WFE recommendation for this cluster.
519 */
520 wfe_timeout = ml_cluster_wfe_timeout(wfe_cluster_id: cid);
521 }
522
523 if (next_idle_short && expecting_ipi_wfe_timeout_mt > wfe_timeout) {
524 /* In this case we should WFE because a response IPI
525 * is expected soon.
526 */
527 wfe_timeout = expecting_ipi_wfe_timeout_mt;
528 check_cluster_recommendation = false;
529 }
530
531 if (wfe_timeout != 0) {
532 uint64_t wfe_deadline = mach_absolute_time() + wfe_timeout;
533 /* Poll issuing event-bounded WFEs until an interrupt
534 * arrives or the WFE recommendation expires
535 */
536 KDBG(CPUPM_IDLE_WFE | DBG_FUNC_START, ipending, cpu_data_ptr->wfe_count, wfe_timeout, !check_cluster_recommendation);
537 ipending = wfe_to_deadline_or_interrupt(cid, wfe_deadline, cdp: cpu_data_ptr, false, check_cluster_recommendation);
538 KDBG(CPUPM_IDLE_WFE | DBG_FUNC_END, ipending, cpu_data_ptr->wfe_count, wfe_deadline);
539 if (ipending == true) {
540 /* Back to machine_idle() */
541 Idle_load_context();
542 }
543 }
544
545 if (__improbable(cpu_proximate_timer())) {
546 if (idle_proximate_timer_wfe == 1) {
547 /* Poll issuing WFEs until the expected
548 * timer FIQ arrives.
549 */
550 KDBG(CPUPM_IDLE_TIMER_WFE | DBG_FUNC_START, ipending, cpu_data_ptr->wfe_count, ~0ULL);
551 ipending = wfe_to_deadline_or_interrupt(cid, wfe_deadline: ~0ULL, cdp: cpu_data_ptr, false, false);
552 KDBG(CPUPM_IDLE_TIMER_WFE | DBG_FUNC_END, ipending, cpu_data_ptr->wfe_count, ~0ULL);
553 assert(ipending == true);
554 }
555 Idle_load_context();
556 }
557
558 lastPop = cpu_data_ptr->rtcPop;
559
560 cpu_data_ptr->cpu_active_thread = current_thread();
561
562 if (wfi && (cpu_data_ptr->cpu_idle_notify != NULL)) {
563 cpu_data_ptr->cpu_idle_notify(cpu_data_ptr->cpu_id, TRUE, &new_idle_timeout_ticks);
564 }
565
566 if (cpu_data_ptr->idle_timer_notify != NULL) {
567 if (new_idle_timeout_ticks == 0x0ULL) {
568 /* turn off the idle timer */
569 cpu_data_ptr->idle_timer_deadline = 0x0ULL;
570 } else {
571 /* set the new idle timeout */
572 clock_absolutetime_interval_to_deadline(abstime: new_idle_timeout_ticks, result: &cpu_data_ptr->idle_timer_deadline);
573 }
574 timer_resync_deadlines();
575 if (cpu_data_ptr->rtcPop != lastPop) {
576 SetIdlePop();
577 }
578 }
579
580#if CONFIG_CPU_COUNTERS
581 kpc_idle();
582 mt_cpu_idle(cpu_data_ptr);
583#endif /* CONFIG_CPU_COUNTERS */
584
585 if (wfi) {
586#if !defined(APPLE_ARM64_ARCH_FAMILY)
587 platform_cache_idle_enter();
588#endif
589
590#if DEVELOPMENT || DEBUG
591 // When simulating wfi overhead,
592 // force wfi to clock gating only
593 if (wfi == 2) {
594 arm64_force_wfi_clock_gate();
595 }
596#endif /* DEVELOPMENT || DEBUG */
597
598#if defined(APPLETYPHOON)
599 // <rdar://problem/15827409> CPU1 Stuck in WFIWT Because of MMU Prefetch
600 typhoon_prepare_for_wfi();
601#endif
602 __builtin_arm_dsb(DSB_SY);
603#if HAS_RETENTION_STATE
604 arm64_retention_wfi();
605#else
606 __builtin_arm_wfi();
607#endif
608
609#if defined(APPLETYPHOON)
610 // <rdar://problem/15827409> CPU1 Stuck in WFIWT Because of MMU Prefetch
611 typhoon_return_from_wfi();
612#endif
613
614#if DEVELOPMENT || DEBUG
615 // Handle wfi overhead simulation
616 if (wfi == 2) {
617 uint64_t deadline;
618
619 // Calculate wfi delay deadline
620 clock_absolutetime_interval_to_deadline(wfi_delay, &deadline);
621
622 // Flush L1 caches
623 if ((wfi_flags & 1) != 0) {
624 InvalidatePoU_Icache();
625 FlushPoC_Dcache();
626 }
627
628 // Flush TLBs
629 if ((wfi_flags & 2) != 0) {
630 flush_core_tlb();
631 }
632
633 // Wait for the ballance of the wfi delay
634 clock_delay_until(deadline);
635 }
636#endif /* DEVELOPMENT || DEBUG */
637 }
638
639 ClearIdlePop(TRUE);
640
641 cpu_idle_exit(FALSE);
642}
643
644/*
645 * Routine: cpu_idle_exit
646 * Function:
647 */
648void
649cpu_idle_exit(boolean_t from_reset)
650{
651 uint64_t new_idle_timeout_ticks = 0x0ULL;
652 cpu_data_t *cpu_data_ptr = getCpuDatap();
653
654 assert(exception_stack_pointer() != 0);
655
656 /* Back from WFI, unlock OSLAR and EDLAR. */
657 if (from_reset) {
658 configure_coresight_registers(cdp: cpu_data_ptr);
659 }
660
661#if CONFIG_CPU_COUNTERS
662 kpc_idle_exit();
663 mt_cpu_run(cpu_data_ptr);
664#endif /* CONFIG_CPU_COUNTERS */
665
666 if (wfi && (cpu_data_ptr->cpu_idle_notify != NULL)) {
667 cpu_data_ptr->cpu_idle_notify(cpu_data_ptr->cpu_id, FALSE, &new_idle_timeout_ticks);
668 }
669
670 if (cpu_data_ptr->idle_timer_notify != NULL) {
671 if (new_idle_timeout_ticks == 0x0ULL) {
672 /* turn off the idle timer */
673 cpu_data_ptr->idle_timer_deadline = 0x0ULL;
674 } else {
675 /* set the new idle timeout */
676 clock_absolutetime_interval_to_deadline(abstime: new_idle_timeout_ticks, result: &cpu_data_ptr->idle_timer_deadline);
677 }
678 timer_resync_deadlines();
679 }
680
681#if KASAN_TBI
682 kasan_unpoison_curstack(false);
683#endif /* KASAN_TBI */
684
685 Idle_load_context();
686}
687
688void
689cpu_init(void)
690{
691 cpu_data_t *cdp = getCpuDatap();
692 arm_cpu_info_t *cpu_info_p;
693
694 assert(exception_stack_pointer() != 0);
695
696 if (cdp->cpu_type != CPU_TYPE_ARM64) {
697 cdp->cpu_type = CPU_TYPE_ARM64;
698
699 timer_call_queue_init(&cdp->rtclock_timer.queue);
700 cdp->rtclock_timer.deadline = EndOfAllTime;
701
702 if (cdp == &BootCpuData) {
703 do_cpuid();
704 do_mvfpid();
705 } else {
706 /*
707 * We initialize non-boot CPUs here; the boot CPU is
708 * dealt with as part of pmap_bootstrap.
709 */
710 pmap_cpu_data_init();
711 }
712
713 do_cacheid();
714
715 /* ARM_SMP: Assuming identical cpu */
716 do_debugid();
717
718 cpu_info_p = cpuid_info();
719
720 /* switch based on CPU's reported architecture */
721 switch (cpu_info_p->arm_info.arm_arch) {
722 case CPU_ARCH_ARMv8:
723 cdp->cpu_subtype = CPU_SUBTYPE_ARM64_V8;
724 break;
725 case CPU_ARCH_ARMv8E:
726 cdp->cpu_subtype = CPU_SUBTYPE_ARM64E;
727 break;
728 default:
729 //cdp->cpu_subtype = CPU_SUBTYPE_ARM64_ALL;
730 /* this panic doesn't work this early in startup */
731 panic("Unknown CPU subtype...");
732 break;
733 }
734
735 cdp->cpu_threadtype = CPU_THREADTYPE_NONE;
736 }
737 cdp->cpu_stat.irq_ex_cnt_wake = 0;
738 cdp->cpu_stat.ipi_cnt_wake = 0;
739#if CONFIG_CPU_COUNTERS
740 cdp->cpu_stat.pmi_cnt_wake = 0;
741#endif /* CONFIG_CPU_COUNTERS */
742 cdp->cpu_running = TRUE;
743 cdp->cpu_sleep_token_last = cdp->cpu_sleep_token;
744 cdp->cpu_sleep_token = 0x0UL;
745#if CONFIG_CPU_COUNTERS
746 kpc_idle_exit();
747 mt_cpu_up(cdp);
748#endif /* CONFIG_CPU_COUNTERS */
749}
750
751void
752cpu_stack_alloc(cpu_data_t *cpu_data_ptr)
753{
754 vm_offset_t irq_stack = 0;
755 vm_offset_t exc_stack = 0;
756
757 kmem_alloc(map: kernel_map, addrp: &irq_stack,
758 INTSTACK_SIZE + ptoa(2), flags: KMA_NOFAIL | KMA_PERMANENT | KMA_ZERO |
759 KMA_GUARD_FIRST | KMA_GUARD_LAST | KMA_KSTACK | KMA_KOBJECT,
760 VM_KERN_MEMORY_STACK);
761
762 cpu_data_ptr->intstack_top = irq_stack + PAGE_SIZE + INTSTACK_SIZE;
763 cpu_data_ptr->istackptr = (void *)cpu_data_ptr->intstack_top;
764
765 kmem_alloc(map: kernel_map, addrp: &exc_stack,
766 EXCEPSTACK_SIZE + ptoa(2), flags: KMA_NOFAIL | KMA_PERMANENT | KMA_ZERO |
767 KMA_GUARD_FIRST | KMA_GUARD_LAST | KMA_KSTACK | KMA_KOBJECT,
768 VM_KERN_MEMORY_STACK);
769
770 cpu_data_ptr->excepstack_top = exc_stack + PAGE_SIZE + EXCEPSTACK_SIZE;
771}
772
773void
774cpu_data_free(cpu_data_t *cpu_data_ptr)
775{
776 if ((cpu_data_ptr == NULL) || (cpu_data_ptr == &BootCpuData)) {
777 return;
778 }
779
780 int cpu_number = cpu_data_ptr->cpu_number;
781
782 if (CpuDataEntries[cpu_number].cpu_data_vaddr == cpu_data_ptr) {
783 CpuDataEntries[cpu_number].cpu_data_vaddr = NULL;
784 CpuDataEntries[cpu_number].cpu_data_paddr = 0;
785 __builtin_arm_dmb(DMB_ISH); // Ensure prior stores to cpu array are visible
786 }
787 kmem_free(map: kernel_map,
788 addr: cpu_data_ptr->intstack_top - INTSTACK_SIZE - PAGE_SIZE,
789 INTSTACK_SIZE + 2 * PAGE_SIZE);
790 kmem_free(map: kernel_map,
791 addr: cpu_data_ptr->excepstack_top - EXCEPSTACK_SIZE - PAGE_SIZE,
792 EXCEPSTACK_SIZE + 2 * PAGE_SIZE);
793}
794
795void
796cpu_data_init(cpu_data_t *cpu_data_ptr)
797{
798 uint32_t i;
799
800 cpu_data_ptr->cpu_flags = 0;
801 cpu_data_ptr->cpu_int_state = 0;
802 cpu_data_ptr->cpu_pending_ast = AST_NONE;
803 cpu_data_ptr->cpu_cache_dispatch = NULL;
804 cpu_data_ptr->rtcPop = EndOfAllTime;
805 cpu_data_ptr->rtclock_datap = &RTClockData;
806 cpu_data_ptr->cpu_user_debug = NULL;
807
808
809 cpu_data_ptr->cpu_base_timebase = 0;
810 cpu_data_ptr->cpu_idle_notify = NULL;
811 cpu_data_ptr->cpu_idle_latency = 0x0ULL;
812 cpu_data_ptr->cpu_idle_pop = 0x0ULL;
813 cpu_data_ptr->cpu_reset_type = 0x0UL;
814 cpu_data_ptr->cpu_reset_handler = 0x0UL;
815 cpu_data_ptr->cpu_reset_assist = 0x0UL;
816 cpu_data_ptr->cpu_regmap_paddr = 0x0ULL;
817 cpu_data_ptr->cpu_phys_id = 0x0UL;
818 cpu_data_ptr->cpu_l2_access_penalty = 0;
819 cpu_data_ptr->cpu_cluster_type = CLUSTER_TYPE_SMP;
820 cpu_data_ptr->cpu_cluster_id = 0;
821 cpu_data_ptr->cpu_l2_id = 0;
822 cpu_data_ptr->cpu_l2_size = 0;
823 cpu_data_ptr->cpu_l3_id = 0;
824 cpu_data_ptr->cpu_l3_size = 0;
825
826 cpu_data_ptr->cpu_signal = SIGPdisabled;
827
828 cpu_data_ptr->cpu_get_fiq_handler = NULL;
829 cpu_data_ptr->cpu_tbd_hardware_addr = NULL;
830 cpu_data_ptr->cpu_tbd_hardware_val = NULL;
831 cpu_data_ptr->cpu_get_decrementer_func = NULL;
832 cpu_data_ptr->cpu_set_decrementer_func = NULL;
833 cpu_data_ptr->cpu_sleep_token = ARM_CPU_ON_SLEEP_PATH;
834 cpu_data_ptr->cpu_sleep_token_last = 0x00000000UL;
835 cpu_data_ptr->cpu_xcall_p0 = NULL;
836 cpu_data_ptr->cpu_xcall_p1 = NULL;
837 cpu_data_ptr->cpu_imm_xcall_p0 = NULL;
838 cpu_data_ptr->cpu_imm_xcall_p1 = NULL;
839
840 for (i = 0; i < CORESIGHT_REGIONS; ++i) {
841 cpu_data_ptr->coresight_base[i] = 0;
842 }
843
844#if !XNU_MONITOR
845 pmap_cpu_data_t * pmap_cpu_data_ptr = &cpu_data_ptr->cpu_pmap_cpu_data;
846
847 pmap_cpu_data_ptr->cpu_number = PMAP_INVALID_CPU_NUM;
848 pmap_cpu_data_ptr->pv_free.list = NULL;
849 pmap_cpu_data_ptr->pv_free.count = 0;
850 pmap_cpu_data_ptr->pv_free_spill_marker = NULL;
851#if !CONFIG_SPTM
852 pmap_cpu_data_ptr->cpu_nested_pmap = (struct pmap *) NULL;
853 bzero(s: &(pmap_cpu_data_ptr->cpu_sw_asids[0]), n: sizeof(pmap_cpu_data_ptr->cpu_sw_asids));
854#endif
855#endif /* !XNU_MONITOR */
856 cpu_data_ptr->halt_status = CPU_NOT_HALTED;
857#if __ARM_KERNEL_PROTECT__
858 cpu_data_ptr->cpu_exc_vectors = (vm_offset_t)&exc_vectors_table;
859#endif /* __ARM_KERNEL_PROTECT__ */
860
861#if defined(HAS_APPLE_PAC)
862 cpu_data_ptr->rop_key = 0;
863 cpu_data_ptr->jop_key = ml_default_jop_pid();
864#endif
865}
866
867kern_return_t
868cpu_data_register(cpu_data_t *cpu_data_ptr)
869{
870 int cpu = cpu_data_ptr->cpu_number;
871
872#if KASAN
873 for (int i = 0; i < CPUWINDOWS_MAX; i++) {
874 kasan_notify_address_nopoison(pmap_cpu_windows_copy_addr(cpu, i), PAGE_SIZE);
875 }
876#endif
877
878 __builtin_arm_dmb(DMB_ISH); // Ensure prior stores to cpu data are visible
879 CpuDataEntries[cpu].cpu_data_vaddr = cpu_data_ptr;
880 CpuDataEntries[cpu].cpu_data_paddr = (void *)ml_vtophys(vaddr: (vm_offset_t)cpu_data_ptr);
881 return KERN_SUCCESS;
882}
883
884#if defined(KERNEL_INTEGRITY_CTRR)
885/* Hibernation needs to reset this state, so data and text are in the hib segment;
886 * this allows them be accessed and executed early.
887 */
888LCK_GRP_DECLARE(ctrr_cpu_start_lock_grp, "ctrr_cpu_start_lock");
889LCK_SPIN_DECLARE(ctrr_cpu_start_lck, &ctrr_cpu_start_lock_grp);
890enum ctrr_cluster_states ctrr_cluster_locked[MAX_CPU_CLUSTERS] MARK_AS_HIBERNATE_DATA;
891
892MARK_AS_HIBERNATE_TEXT
893void
894init_ctrr_cluster_states(void)
895{
896 for (int i = 0; i < MAX_CPU_CLUSTERS; i++) {
897 ctrr_cluster_locked[i] = CTRR_UNLOCKED;
898 }
899}
900#endif
901
902kern_return_t
903cpu_start(int cpu)
904{
905 cpu_data_t *cpu_data_ptr = CpuDataEntries[cpu].cpu_data_vaddr;
906 processor_t processor = PERCPU_GET_RELATIVE(processor, cpu_data, cpu_data_ptr);
907
908 if (processor_should_kprintf(processor, true)) {
909 kprintf(fmt: "cpu_start() cpu: %d\n", cpu);
910 }
911
912 if (cpu == cpu_number()) {
913 cpu_machine_init();
914 configure_coresight_registers(cdp: cpu_data_ptr);
915 } else {
916 thread_t first_thread;
917#if CONFIG_SPTM
918 cpu_data_ptr->cpu_reset_handler = (vm_offset_t) VM_KERNEL_STRIP_PTR(arm_init_cpu);
919#else
920 cpu_data_ptr->cpu_reset_handler = (vm_offset_t) start_cpu_paddr;
921#if !XNU_MONITOR
922 cpu_data_ptr->cpu_pmap_cpu_data.cpu_nested_pmap = NULL;
923#endif
924#endif /* !CONFIG_SPTM */
925
926 if (processor->startup_thread != THREAD_NULL) {
927 first_thread = processor->startup_thread;
928 } else {
929 first_thread = processor->idle_thread;
930 }
931 cpu_data_ptr->cpu_active_thread = first_thread;
932 first_thread->machine.CpuDatap = cpu_data_ptr;
933 first_thread->machine.pcpu_data_base =
934 (char *)cpu_data_ptr - __PERCPU_ADDR(cpu_data);
935
936 configure_coresight_registers(cdp: cpu_data_ptr);
937
938 flush_dcache(addr: (vm_offset_t)&CpuDataEntries[cpu], count: sizeof(cpu_data_entry_t), FALSE);
939 flush_dcache(addr: (vm_offset_t)cpu_data_ptr, count: sizeof(cpu_data_t), FALSE);
940#if CONFIG_SPTM
941 /**
942 * On SPTM devices, CTRR is configured entirely by the SPTM. Due to this, this logic
943 * is no longer required in XNU.
944 */
945#else
946#if defined(KERNEL_INTEGRITY_CTRR)
947
948 /* First CPU being started within a cluster goes ahead to lock CTRR for cluster;
949 * other CPUs block until cluster is locked. */
950 lck_spin_lock(&ctrr_cpu_start_lck);
951 switch (ctrr_cluster_locked[cpu_data_ptr->cpu_cluster_id]) {
952 case CTRR_UNLOCKED:
953 ctrr_cluster_locked[cpu_data_ptr->cpu_cluster_id] = CTRR_LOCKING;
954 lck_spin_unlock(&ctrr_cpu_start_lck);
955 break;
956 case CTRR_LOCKING:
957 assert_wait(&ctrr_cluster_locked[cpu_data_ptr->cpu_cluster_id], THREAD_UNINT);
958 lck_spin_unlock(&ctrr_cpu_start_lck);
959 thread_block(THREAD_CONTINUE_NULL);
960 assert(ctrr_cluster_locked[cpu_data_ptr->cpu_cluster_id] != CTRR_LOCKING);
961 break;
962 default: // CTRR_LOCKED
963 lck_spin_unlock(&ctrr_cpu_start_lck);
964 break;
965 }
966#endif
967#endif /* CONFIG_SPTM */
968 (void) PE_cpu_start(target: cpu_data_ptr->cpu_id, start_paddr: (vm_offset_t)NULL, arg_paddr: (vm_offset_t)NULL);
969 }
970
971 return KERN_SUCCESS;
972}
973
974
975void
976cpu_timebase_init(boolean_t from_boot)
977{
978 cpu_data_t *cdp = getCpuDatap();
979 uint64_t timebase_offset = 0;
980
981 if (cdp->cpu_get_fiq_handler == NULL) {
982 cdp->cpu_get_fiq_handler = rtclock_timebase_func.tbd_fiq_handler;
983 cdp->cpu_get_decrementer_func = rtclock_timebase_func.tbd_get_decrementer;
984 cdp->cpu_set_decrementer_func = rtclock_timebase_func.tbd_set_decrementer;
985 cdp->cpu_tbd_hardware_addr = (void *)rtclock_timebase_addr;
986 cdp->cpu_tbd_hardware_val = (void *)rtclock_timebase_val;
987 }
988
989 if (!from_boot && (cdp == &BootCpuData)) {
990 /*
991 * When we wake from sleep, we have no guarantee about the state
992 * of the hardware timebase. It may have kept ticking across sleep, or
993 * it may have reset.
994 *
995 * To deal with this, we calculate an offset to the clock that will
996 * produce a timebase value wake_abstime at the point the boot
997 * CPU calls cpu_timebase_init on wake.
998 *
999 * This ensures that mach_absolute_time() stops ticking across sleep.
1000 */
1001 rtclock_base_abstime = wake_abstime - ml_get_hwclock();
1002 } else if (from_boot) {
1003#if DEBUG || DEVELOPMENT
1004 if (PE_parse_boot_argn("timebase_offset", &timebase_offset, sizeof(timebase_offset))) {
1005 rtclock_base_abstime += timebase_offset;
1006 }
1007#endif
1008 /* On initial boot, initialize time_since_reset to CNTPCT_EL0. */
1009 ml_set_reset_time(wake_time: ml_get_hwclock());
1010 }
1011
1012 cdp->cpu_decrementer = 0x7FFFFFFFUL;
1013 cdp->cpu_timebase = timebase_offset;
1014 cdp->cpu_base_timebase = rtclock_base_abstime;
1015}
1016
1017int
1018cpu_cluster_id(void)
1019{
1020 return getCpuDatap()->cpu_cluster_id;
1021}
1022
1023__attribute__((noreturn))
1024void
1025ml_arm_sleep(void)
1026{
1027 cpu_data_t *cpu_data_ptr = getCpuDatap();
1028
1029 if (cpu_data_ptr == &BootCpuData) {
1030 cpu_data_t *target_cdp;
1031 int cpu;
1032 int max_cpu;
1033
1034 max_cpu = ml_get_max_cpu_number();
1035 for (cpu = 0; cpu <= max_cpu; cpu++) {
1036 target_cdp = (cpu_data_t *)CpuDataEntries[cpu].cpu_data_vaddr;
1037
1038 if ((target_cdp == NULL) || (target_cdp == cpu_data_ptr)) {
1039 continue;
1040 }
1041
1042 while (target_cdp->cpu_sleep_token != ARM_CPU_ON_SLEEP_PATH) {
1043 ;
1044 }
1045 }
1046
1047 /*
1048 * Now that the other cores have entered the sleep path, set
1049 * the abstime value we'll use when we resume.
1050 */
1051 wake_abstime = ml_get_timebase();
1052 ml_set_reset_time(UINT64_MAX);
1053 } else {
1054 CleanPoU_Dcache();
1055 }
1056
1057 cpu_data_ptr->cpu_sleep_token = ARM_CPU_ON_SLEEP_PATH;
1058
1059 if (cpu_data_ptr == &BootCpuData) {
1060#if WITH_CLASSIC_S2R
1061 // Classic suspend to RAM writes the suspend signature into the
1062 // sleep token buffer so that iBoot knows that it's on the warm
1063 // boot (wake) path (as opposed to the cold boot path). Newer SoC
1064 // do not go through SecureROM/iBoot on the warm boot path. The
1065 // reconfig engine script brings the CPU out of reset at the kernel's
1066 // reset vector which points to the warm boot initialization code.
1067 if (sleepTokenBuffer != (vm_offset_t) NULL) {
1068 platform_cache_shutdown();
1069 bcopy((const void *)suspend_signature, (void *)sleepTokenBuffer, sizeof(SleepToken));
1070 } else {
1071 panic("No sleep token buffer");
1072 }
1073#endif
1074
1075#if __ARM_GLOBAL_SLEEP_BIT__
1076 /* Allow other CPUs to go to sleep. */
1077 arm64_stall_sleep = FALSE;
1078 __builtin_arm_dmb(DMB_ISH);
1079#endif
1080
1081 /* Architectural debug state: <rdar://problem/12390433>:
1082 * Grab debug lock EDLAR and clear bit 0 in EDPRCR,
1083 * tell debugger to not prevent power gating .
1084 */
1085 if (cpu_data_ptr->coresight_base[CORESIGHT_ED]) {
1086 *(volatile uint32_t *)(cpu_data_ptr->coresight_base[CORESIGHT_ED] + ARM_DEBUG_OFFSET_DBGLAR) = ARM_DBG_LOCK_ACCESS_KEY;
1087 *(volatile uint32_t *)(cpu_data_ptr->coresight_base[CORESIGHT_ED] + ARM_DEBUG_OFFSET_DBGPRCR) = 0;
1088 }
1089
1090#if HIBERNATION
1091 uint32_t mode = hibernate_write_image();
1092 if (mode == kIOHibernatePostWriteHalt) {
1093 HIBLOG("powering off after writing hibernation image\n");
1094 int halt_result = -1;
1095 if (PE_halt_restart) {
1096 /**
1097 * Drain serial FIFOs now as the normal call further down won't
1098 * be hit when the CPU halts here for hibernation. Here, it'll
1099 * make sure the preceding HIBLOG is flushed as well.
1100 */
1101 serial_go_to_sleep();
1102 halt_result = (*PE_halt_restart)(kPEHaltCPU);
1103 }
1104 panic("can't shutdown: PE_halt_restart returned %d", halt_result);
1105 }
1106#endif /* HIBERNATION */
1107
1108 serial_go_to_sleep();
1109
1110#if CONFIG_CPU_COUNTERS
1111 mt_sleep();
1112#endif /* CONFIG_CPU_COUNTERS */
1113 /* ARM64-specific preparation */
1114#if APPLEVIRTUALPLATFORM
1115 arm64_prepare_for_sleep(true, cpu: cpu_data_ptr->cpu_number, entry_pa: ml_vtophys(vaddr: (vm_offset_t)&LowResetVectorBase));
1116#else
1117 arm64_prepare_for_sleep(true);
1118#endif
1119 } else {
1120#if __ARM_GLOBAL_SLEEP_BIT__
1121 /*
1122 * With the exception of the CPU revisions listed above, our ARM64 CPUs have a
1123 * global register to manage entering deep sleep, as opposed to a per-CPU
1124 * register. We cannot update this register until all CPUs are ready to enter
1125 * deep sleep, because if a CPU executes WFI outside of the deep sleep context
1126 * (by idling), it will hang (due to the side effects of enabling deep sleep),
1127 * which can hang the sleep process or cause memory corruption on wake.
1128 *
1129 * To avoid these issues, we'll stall on this global value, which CPU0 will
1130 * manage.
1131 */
1132 while (arm64_stall_sleep) {
1133 __builtin_arm_wfe();
1134 }
1135#endif
1136 CleanPoU_DcacheRegion(va: (vm_offset_t) cpu_data_ptr, length: sizeof(cpu_data_t));
1137
1138 /* Architectural debug state: <rdar://problem/12390433>:
1139 * Grab debug lock EDLAR and clear bit 0 in EDPRCR,
1140 * tell debugger to not prevent power gating .
1141 */
1142 if (cpu_data_ptr->coresight_base[CORESIGHT_ED]) {
1143 *(volatile uint32_t *)(cpu_data_ptr->coresight_base[CORESIGHT_ED] + ARM_DEBUG_OFFSET_DBGLAR) = ARM_DBG_LOCK_ACCESS_KEY;
1144 *(volatile uint32_t *)(cpu_data_ptr->coresight_base[CORESIGHT_ED] + ARM_DEBUG_OFFSET_DBGPRCR) = 0;
1145 }
1146
1147 /* ARM64-specific preparation */
1148#if APPLEVIRTUALPLATFORM
1149 arm64_prepare_for_sleep(true, cpu: cpu_data_ptr->cpu_number, entry_pa: ml_vtophys(vaddr: (vm_offset_t)&LowResetVectorBase));
1150#else
1151 arm64_prepare_for_sleep(true);
1152#endif
1153 }
1154}
1155
1156void
1157cpu_machine_idle_init(boolean_t from_boot)
1158{
1159#if !CONFIG_SPTM
1160 static vm_address_t resume_idle_cpu_paddr = (vm_address_t)NULL;
1161#endif
1162 cpu_data_t *cpu_data_ptr = getCpuDatap();
1163
1164 if (from_boot) {
1165 uint32_t production = 1;
1166 DTEntry entry;
1167
1168 unsigned long jtag = 0;
1169
1170 if (PE_parse_boot_argn(arg_string: "jtag", arg_ptr: &jtag, max_arg: sizeof(jtag))) {
1171 if (jtag != 0) {
1172 idle_enable = FALSE;
1173 } else {
1174 idle_enable = TRUE;
1175 }
1176 } else {
1177 idle_enable = TRUE;
1178 }
1179
1180#if DEVELOPMENT || DEBUG
1181 uint32_t wfe_mode = 0;
1182 if (PE_parse_boot_argn("wfe_mode", &wfe_mode, sizeof(wfe_mode))) {
1183 idle_proximate_timer_wfe = ((wfe_mode & 1) == 1);
1184 idle_proximate_io_wfe_masked = ((wfe_mode & 2) == 2);
1185 extern uint32_t idle_proximate_io_wfe_unmasked;
1186 idle_proximate_io_wfe_unmasked = ((wfe_mode & 4) == 4);
1187 }
1188#endif
1189
1190 // bits 7..0 give the wfi type
1191 switch (wfi & 0xff) {
1192 case 0:
1193 // disable wfi
1194 wfi = 0;
1195 break;
1196
1197#if DEVELOPMENT || DEBUG
1198 case 2:
1199 // wfi overhead simulation
1200 // 31..16 - wfi delay is us
1201 // 15..8 - flags
1202 // 7..0 - 2
1203 wfi = 2;
1204 wfi_flags = (wfi >> 8) & 0xFF;
1205 nanoseconds_to_absolutetime(((wfi >> 16) & 0xFFFF) * NSEC_PER_MSEC, &wfi_delay);
1206 break;
1207#endif /* DEVELOPMENT || DEBUG */
1208
1209 case 1:
1210 default:
1211 // do nothing
1212 break;
1213 }
1214#if !CONFIG_SPTM
1215 ResetHandlerData.assist_reset_handler = 0;
1216 ResetHandlerData.cpu_data_entries = ml_static_vtop((vm_offset_t)CpuDataEntries);
1217#endif
1218
1219#ifdef MONITOR
1220 monitor_call(MONITOR_SET_ENTRY, (uintptr_t)ml_static_vtop((vm_offset_t)&LowResetVectorBase), 0, 0);
1221#elif !defined(NO_MONITOR)
1222#error MONITOR undefined, WFI power gating may not operate correctly
1223#endif /* MONITOR */
1224
1225 // Determine if we are on production or debug chip
1226 if (kSuccess == SecureDTLookupEntry(NULL, pathName: "/chosen", foundEntry: &entry)) {
1227 unsigned int size;
1228 void const *prop;
1229
1230 if (kSuccess == SecureDTGetProperty(entry, propertyName: "effective-production-status-ap", propertyValue: &prop, propertySize: &size)) {
1231 if (size == 4) {
1232 bcopy(src: prop, dst: &production, n: size);
1233 }
1234 }
1235 }
1236 if (!production) {
1237#if defined(APPLE_ARM64_ARCH_FAMILY)
1238 // Enable coresight debug registers on debug-fused chips
1239 coresight_debug_enabled = TRUE;
1240#endif
1241 }
1242#if !CONFIG_SPTM
1243 start_cpu_paddr = ml_static_vtop((vm_offset_t)&start_cpu);
1244 resume_idle_cpu_paddr = ml_static_vtop((vm_offset_t)&resume_idle_cpu);
1245#endif
1246 }
1247
1248#if WITH_CLASSIC_S2R
1249 if (cpu_data_ptr == &BootCpuData) {
1250 static addr64_t SleepToken_low_paddr = (addr64_t)NULL;
1251 if (sleepTokenBuffer != (vm_offset_t) NULL) {
1252 SleepToken_low_paddr = ml_vtophys(sleepTokenBuffer);
1253 } else {
1254 panic("No sleep token buffer");
1255 }
1256
1257 bcopy_phys((addr64_t)ml_static_vtop((vm_offset_t)running_signature),
1258 SleepToken_low_paddr, sizeof(SleepToken));
1259 flush_dcache((vm_offset_t)SleepToken, sizeof(SleepToken), TRUE);
1260 }
1261 ;
1262#endif
1263#if CONFIG_SPTM
1264 cpu_data_ptr->cpu_reset_handler = (uintptr_t) VM_KERNEL_STRIP_PTR(arm_init_idle_cpu);
1265#else
1266 cpu_data_ptr->cpu_reset_handler = resume_idle_cpu_paddr;
1267#endif
1268 clean_dcache(addr: (vm_offset_t)cpu_data_ptr, count: sizeof(cpu_data_t), FALSE);
1269}
1270
1271_Atomic uint32_t cpu_idle_count = 0;
1272
1273void
1274machine_track_platform_idle(boolean_t entry)
1275{
1276 if (entry) {
1277 os_atomic_inc(&cpu_idle_count, relaxed);
1278 } else {
1279 os_atomic_dec(&cpu_idle_count, relaxed);
1280 }
1281}
1282
1283#if WITH_CLASSIC_S2R
1284void
1285sleep_token_buffer_init(void)
1286{
1287 cpu_data_t *cpu_data_ptr = getCpuDatap();
1288 DTEntry entry;
1289 size_t size;
1290 void const * const *prop;
1291
1292 if ((cpu_data_ptr == &BootCpuData) && (sleepTokenBuffer == (vm_offset_t) NULL)) {
1293 /* Find the stpage node in the device tree */
1294 if (kSuccess != SecureDTLookupEntry(0, "stram", &entry)) {
1295 return;
1296 }
1297
1298 if (kSuccess != SecureDTGetProperty(entry, "reg", (const void **)&prop, (unsigned int *)&size)) {
1299 return;
1300 }
1301
1302 /* Map the page into the kernel space */
1303 sleepTokenBuffer = ml_io_map(((vm_offset_t const *)prop)[0], ((vm_size_t const *)prop)[1]);
1304 }
1305}
1306#endif
1307