1 | /* |
2 | * Copyright (c) 2007-2021 Apple Inc. All rights reserved. |
3 | * |
4 | * @APPLE_OSREFERENCE_LICENSE_HEADER_START@ |
5 | * |
6 | * This file contains Original Code and/or Modifications of Original Code |
7 | * as defined in and that are subject to the Apple Public Source License |
8 | * Version 2.0 (the 'License'). You may not use this file except in |
9 | * compliance with the License. The rights granted to you under the License |
10 | * may not be used to create, or enable the creation or redistribution of, |
11 | * unlawful or unlicensed copies of an Apple operating system, or to |
12 | * circumvent, violate, or enable the circumvention or violation of, any |
13 | * terms of an Apple operating system software license agreement. |
14 | * |
15 | * Please obtain a copy of the License at |
16 | * http://www.opensource.apple.com/apsl/ and read it before using this file. |
17 | * |
18 | * The Original Code and all software distributed under the License are |
19 | * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER |
20 | * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES, |
21 | * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY, |
22 | * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT. |
23 | * Please see the License for the specific language governing rights and |
24 | * limitations under the License. |
25 | * |
26 | * @APPLE_OSREFERENCE_LICENSE_HEADER_END@ |
27 | */ |
28 | /* |
29 | * @OSF_COPYRIGHT@ |
30 | * |
31 | */ |
32 | |
33 | #ifndef ARM_CPU_DATA_INTERNAL |
34 | #define ARM_CPU_DATA_INTERNAL |
35 | |
36 | #include <mach_assert.h> |
37 | #include <kern/assert.h> |
38 | #include <kern/kern_types.h> |
39 | #include <kern/percpu.h> |
40 | #include <kern/processor.h> |
41 | #include <os/base.h> |
42 | #include <pexpert/pexpert.h> |
43 | #include <arm/dbgwrap.h> |
44 | #include <arm/machine_routines.h> |
45 | #include <arm64/proc_reg.h> |
46 | #include <arm/thread.h> |
47 | #include <arm/pmap.h> |
48 | #include <machine/monotonic.h> |
49 | #include <san/kcov_data.h> |
50 | |
51 | #define NSEC_PER_HZ (NSEC_PER_SEC / 100) |
52 | |
53 | typedef struct reset_handler_data { |
54 | vm_offset_t assist_reset_handler; /* Assist handler phys address */ |
55 | vm_offset_t cpu_data_entries; /* CpuDataEntries phys address */ |
56 | } reset_handler_data_t; |
57 | |
58 | #if !CONFIG_SPTM |
59 | extern reset_handler_data_t ResetHandlerData; |
60 | #endif |
61 | |
62 | /* Put the static check for cpumap_t here as it's defined in <kern/processor.h> */ |
63 | static_assert(sizeof(cpumap_t) * CHAR_BIT >= MAX_CPUS, "cpumap_t bitvector is too small for current MAX_CPUS value" ); |
64 | |
65 | #define CPUWINDOWS_BASE_MASK 0xFFFFFFFFFFE00000UL |
66 | #define CPUWINDOWS_BASE (VM_MAX_KERNEL_ADDRESS & CPUWINDOWS_BASE_MASK) |
67 | #define CPUWINDOWS_TOP (CPUWINDOWS_BASE + (MAX_CPUS * CPUWINDOWS_MAX * ARM_PGBYTES)) |
68 | |
69 | static_assert((CPUWINDOWS_BASE >= VM_MIN_KERNEL_ADDRESS) && ((CPUWINDOWS_TOP - 1) <= VM_MAX_KERNEL_ADDRESS), |
70 | "CPU copy windows too large for CPUWINDOWS_BASE_MASK value" ); |
71 | |
72 | typedef struct cpu_data_entry { |
73 | void *cpu_data_paddr; /* Cpu data physical address */ |
74 | struct cpu_data *cpu_data_vaddr; /* Cpu data virtual address */ |
75 | #if !defined(__arm64__) |
76 | #error Check cpu_data_entry padding for this architecture |
77 | #endif |
78 | } cpu_data_entry_t; |
79 | |
80 | |
81 | typedef struct rtclock_timer { |
82 | mpqueue_head_t queue; |
83 | uint64_t deadline; |
84 | uint32_t is_set:1, |
85 | has_expired:1, |
86 | :0; |
87 | } rtclock_timer_t; |
88 | |
89 | typedef struct { |
90 | /* |
91 | * The wake variants of these counters are reset to 0 when the CPU wakes. |
92 | */ |
93 | uint64_t irq_ex_cnt; |
94 | uint64_t irq_ex_cnt_wake; |
95 | uint64_t ipi_cnt; |
96 | uint64_t ipi_cnt_wake; |
97 | uint64_t timer_cnt; |
98 | #if CONFIG_CPU_COUNTERS |
99 | uint64_t pmi_cnt_wake; |
100 | #endif /* CONFIG_CPU_COUNTERS */ |
101 | uint64_t undef_ex_cnt; |
102 | uint64_t unaligned_cnt; |
103 | uint64_t vfp_cnt; |
104 | uint64_t data_ex_cnt; |
105 | uint64_t instr_ex_cnt; |
106 | } cpu_stat_t; |
107 | |
108 | typedef struct cpu_data { |
109 | short cpu_number; |
110 | unsigned short cpu_flags; |
111 | int cpu_type; |
112 | int cpu_subtype; |
113 | int cpu_threadtype; |
114 | |
115 | void * XNU_PTRAUTH_SIGNED_PTR("cpu_data.istackptr" ) istackptr; |
116 | vm_offset_t intstack_top; |
117 | #if __arm64__ |
118 | vm_offset_t excepstack_top; |
119 | #endif |
120 | thread_t cpu_active_thread; |
121 | vm_offset_t cpu_active_stack; |
122 | cpu_id_t cpu_id; |
123 | unsigned volatile int cpu_signal; |
124 | ast_t cpu_pending_ast; |
125 | cache_dispatch_t cpu_cache_dispatch; |
126 | |
127 | #if __arm64__ |
128 | uint64_t cpu_base_timebase; |
129 | uint64_t cpu_timebase; |
130 | #endif |
131 | bool cpu_hibernate; /* This cpu is currently hibernating the system */ |
132 | bool cpu_running; |
133 | bool cluster_master; |
134 | #if __ARM_ARCH_8_5__ |
135 | bool sync_on_cswitch; |
136 | #endif /* __ARM_ARCH_8_5__ */ |
137 | /* true if processor_start() or processor_exit() is operating on this CPU */ |
138 | bool in_state_transition; |
139 | |
140 | uint32_t cpu_decrementer; |
141 | get_decrementer_t cpu_get_decrementer_func; |
142 | set_decrementer_t cpu_set_decrementer_func; |
143 | fiq_handler_t cpu_get_fiq_handler; |
144 | |
145 | void *cpu_tbd_hardware_addr; |
146 | void *cpu_tbd_hardware_val; |
147 | |
148 | processor_idle_t cpu_idle_notify; |
149 | uint64_t cpu_idle_latency; |
150 | uint64_t cpu_idle_pop; |
151 | |
152 | #if __ARM_KERNEL_PROTECT__ |
153 | vm_offset_t cpu_exc_vectors; |
154 | #endif /* __ARM_KERNEL_PROTECT__ */ |
155 | vm_offset_t cpu_reset_handler; |
156 | uintptr_t cpu_reset_assist; |
157 | uint32_t cpu_reset_type; |
158 | |
159 | unsigned int interrupt_source; |
160 | void *cpu_int_state; |
161 | IOInterruptHandler interrupt_handler; |
162 | void *interrupt_nub; |
163 | void *interrupt_target; |
164 | void *interrupt_refCon; |
165 | |
166 | idle_timer_t idle_timer_notify; |
167 | void *idle_timer_refcon; |
168 | uint64_t idle_timer_deadline; |
169 | |
170 | uint64_t rtcPop; |
171 | rtclock_timer_t rtclock_timer; |
172 | struct _rtclock_data_ *rtclock_datap; |
173 | |
174 | arm_debug_state_t *cpu_user_debug; /* Current debug state */ |
175 | vm_offset_t cpu_debug_interface_map; |
176 | |
177 | volatile int debugger_active; |
178 | volatile int PAB_active; /* Tells the console if we are dumping backtraces */ |
179 | |
180 | void *cpu_xcall_p0; |
181 | void *cpu_xcall_p1; |
182 | void *cpu_imm_xcall_p0; |
183 | void *cpu_imm_xcall_p1; |
184 | |
185 | |
186 | #if __arm64__ |
187 | vm_offset_t coresight_base[CORESIGHT_REGIONS]; |
188 | #endif |
189 | |
190 | /* CCC ARMv8 registers */ |
191 | uint64_t cpu_regmap_paddr; |
192 | |
193 | uint32_t cpu_phys_id; |
194 | uint32_t cpu_l2_access_penalty; |
195 | platform_error_handler_t platform_error_handler; |
196 | |
197 | int cpu_mcount_off; |
198 | |
199 | #define ARM_CPU_ON_SLEEP_PATH 0x50535553UL |
200 | volatile unsigned int cpu_sleep_token; |
201 | unsigned int cpu_sleep_token_last; |
202 | |
203 | cluster_type_t cpu_cluster_type; |
204 | uint32_t cpu_cluster_id; |
205 | uint32_t cpu_l2_id; |
206 | uint32_t cpu_l2_size; |
207 | uint32_t cpu_l3_id; |
208 | uint32_t cpu_l3_size; |
209 | |
210 | enum { |
211 | CPU_NOT_HALTED = 0, |
212 | CPU_HALTED, |
213 | CPU_HALTED_WITH_STATE |
214 | } halt_status; |
215 | #if defined(HAS_APPLE_PAC) |
216 | uint64_t rop_key; |
217 | uint64_t jop_key; |
218 | #endif /* defined(HAS_APPLE_PAC) */ |
219 | |
220 | /* large structs with large alignment requirements */ |
221 | |
222 | /* double-buffered performance counter data */ |
223 | uint64_t *cpu_kpc_buf[2]; |
224 | /* PMC shadow and reload value buffers */ |
225 | uint64_t *cpu_kpc_shadow; |
226 | uint64_t *cpu_kpc_reload; |
227 | |
228 | #if CONFIG_CPU_COUNTERS |
229 | struct mt_cpu cpu_monotonic; |
230 | #endif /* CONFIG_CPU_COUNTERS */ |
231 | |
232 | cpu_stat_t cpu_stat; |
233 | #if !XNU_MONITOR |
234 | struct pmap_cpu_data cpu_pmap_cpu_data; |
235 | #endif |
236 | dbgwrap_thread_state_t halt_state; |
237 | #if DEVELOPMENT || DEBUG |
238 | uint64_t wfe_count; |
239 | uint64_t wfe_deadline_checks; |
240 | uint64_t wfe_terminations; |
241 | #endif |
242 | #if CONFIG_KCOV |
243 | kcov_cpu_data_t cpu_kcov_data; |
244 | #endif |
245 | #if __arm64__ |
246 | /** |
247 | * Stash the state of the system when an IPI is received. This will be |
248 | * dumped in the case a panic is getting triggered. |
249 | */ |
250 | uint64_t ipi_pc; |
251 | uint64_t ipi_lr; |
252 | uint64_t ipi_fp; |
253 | |
254 | /* Encoded data to store in TPIDR_EL0 on context switch */ |
255 | uint64_t cpu_tpidr_el0; |
256 | #endif |
257 | |
258 | } cpu_data_t; |
259 | |
260 | /* |
261 | * cpu_flags |
262 | */ |
263 | #define SleepState 0x0800 |
264 | #define StartedState 0x1000 |
265 | |
266 | extern cpu_data_entry_t CpuDataEntries[MAX_CPUS]; |
267 | PERCPU_DECL(cpu_data_t, cpu_data); |
268 | #define BootCpuData __PERCPU_NAME(cpu_data) |
269 | extern boot_args *BootArgs; |
270 | |
271 | #if __arm64__ |
272 | extern unsigned int LowResetVectorBase; |
273 | extern unsigned int LowResetVectorEnd; |
274 | #if WITH_CLASSIC_S2R |
275 | extern uint8_t SleepToken[8]; |
276 | #endif |
277 | extern unsigned int LowExceptionVectorBase; |
278 | #else |
279 | #error Unknown arch |
280 | #endif |
281 | |
282 | extern cpu_data_t *cpu_datap(int cpu); |
283 | extern cpu_data_t *cpu_data_alloc(boolean_t is_boot); |
284 | extern void cpu_stack_alloc(cpu_data_t*); |
285 | extern void cpu_data_init(cpu_data_t *cpu_data_ptr); |
286 | extern void cpu_data_free(cpu_data_t *cpu_data_ptr); |
287 | extern kern_return_t cpu_data_register(cpu_data_t *cpu_data_ptr); |
288 | extern cpu_data_t *processor_to_cpu_datap( processor_t processor); |
289 | |
290 | #if __arm64__ |
291 | typedef struct sysreg_restore { |
292 | uint64_t tcr_el1; |
293 | } sysreg_restore_t; |
294 | |
295 | extern sysreg_restore_t sysreg_restore; |
296 | #endif /* __arm64__ */ |
297 | |
298 | #endif /* ARM_CPU_DATA_INTERNAL */ |
299 | |