1 | /* |
2 | * Copyright (c) 2007 Apple Inc. All rights reserved. |
3 | * |
4 | * @APPLE_OSREFERENCE_LICENSE_HEADER_START@ |
5 | * |
6 | * This file contains Original Code and/or Modifications of Original Code |
7 | * as defined in and that are subject to the Apple Public Source License |
8 | * Version 2.0 (the 'License'). You may not use this file except in |
9 | * compliance with the License. The rights granted to you under the License |
10 | * may not be used to create, or enable the creation or redistribution of, |
11 | * unlawful or unlicensed copies of an Apple operating system, or to |
12 | * circumvent, violate, or enable the circumvention or violation of, any |
13 | * terms of an Apple operating system software license agreement. |
14 | * |
15 | * Please obtain a copy of the License at |
16 | * http://www.opensource.apple.com/apsl/ and read it before using this file. |
17 | * |
18 | * The Original Code and all software distributed under the License are |
19 | * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER |
20 | * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES, |
21 | * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY, |
22 | * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT. |
23 | * Please see the License for the specific language governing rights and |
24 | * limitations under the License. |
25 | * |
26 | * @APPLE_OSREFERENCE_LICENSE_HEADER_END@ |
27 | */ |
28 | /* |
29 | * @OSF_COPYRIGHT@ |
30 | * |
31 | */ |
32 | |
33 | #ifndef ARM_CPU_DATA_INTERNAL |
34 | #define ARM_CPU_DATA_INTERNAL |
35 | |
36 | #include <mach_assert.h> |
37 | #include <kern/assert.h> |
38 | #include <kern/kern_types.h> |
39 | #include <kern/processor.h> |
40 | #include <pexpert/pexpert.h> |
41 | #include <arm/dbgwrap.h> |
42 | #include <arm/proc_reg.h> |
43 | #include <arm/thread.h> |
44 | #include <arm/pmap.h> |
45 | |
46 | #if MONOTONIC |
47 | #include <machine/monotonic.h> |
48 | #endif /* MONOTONIC */ |
49 | |
50 | #define NSEC_PER_HZ (NSEC_PER_SEC / 100) |
51 | |
52 | typedef struct reset_handler_data { |
53 | vm_offset_t assist_reset_handler; /* Assist handler phys address */ |
54 | vm_offset_t cpu_data_entries; /* CpuDataEntries phys address */ |
55 | #if !__arm64__ |
56 | vm_offset_t boot_args; /* BootArgs phys address */ |
57 | #endif |
58 | } reset_handler_data_t; |
59 | |
60 | extern reset_handler_data_t ResetHandlerData; |
61 | |
62 | #if __ARM_SMP__ |
63 | #ifdef CPU_COUNT |
64 | #define MAX_CPUS CPU_COUNT |
65 | #else |
66 | #define MAX_CPUS 2 |
67 | #endif |
68 | #else |
69 | #define MAX_CPUS 1 |
70 | #endif |
71 | |
72 | #define CPUWINDOWS_MAX 4 |
73 | #ifdef __arm__ |
74 | #define CPUWINDOWS_BASE 0xFFF00000UL |
75 | #else |
76 | #define CPUWINDOWS_BASE_MASK 0xFFFFFFFFFFF00000UL |
77 | #define CPUWINDOWS_BASE (VM_MAX_KERNEL_ADDRESS & CPUWINDOWS_BASE_MASK) |
78 | #endif |
79 | #define CPUWINDOWS_TOP (CPUWINDOWS_BASE + (MAX_CPUS * CPUWINDOWS_MAX * PAGE_SIZE)) |
80 | |
81 | typedef struct cpu_data_entry { |
82 | void *cpu_data_paddr; /* Cpu data physical address */ |
83 | struct cpu_data *cpu_data_vaddr; /* Cpu data virtual address */ |
84 | #if __arm__ |
85 | uint32_t cpu_data_offset_8; |
86 | uint32_t cpu_data_offset_12; |
87 | #elif __arm64__ |
88 | #else |
89 | #error Check cpu_data_entry padding for this architecture |
90 | #endif |
91 | } cpu_data_entry_t; |
92 | |
93 | |
94 | typedef struct rtclock_timer { |
95 | mpqueue_head_t queue; |
96 | uint64_t deadline; |
97 | uint32_t is_set:1, |
98 | has_expired:1, |
99 | :0; |
100 | } rtclock_timer_t; |
101 | |
102 | typedef struct { |
103 | uint32_t irq_ex_cnt; |
104 | uint32_t irq_ex_cnt_wake; |
105 | uint32_t ipi_cnt; |
106 | uint32_t ipi_cnt_wake; |
107 | uint32_t timer_cnt; |
108 | uint32_t timer_cnt_wake; |
109 | uint32_t undef_ex_cnt; |
110 | uint32_t unaligned_cnt; |
111 | uint32_t vfp_cnt; |
112 | uint32_t data_ex_cnt; |
113 | uint32_t instr_ex_cnt; |
114 | } cpu_stat_t; |
115 | |
116 | typedef struct cpu_data |
117 | { |
118 | unsigned short cpu_number; |
119 | unsigned short cpu_flags; |
120 | vm_offset_t istackptr; |
121 | vm_offset_t intstack_top; |
122 | #if __arm64__ |
123 | vm_offset_t excepstackptr; |
124 | vm_offset_t excepstack_top; |
125 | boolean_t cluster_master; |
126 | #else |
127 | vm_offset_t fiqstackptr; |
128 | vm_offset_t fiqstack_top; |
129 | #endif |
130 | boolean_t interrupts_enabled; |
131 | thread_t cpu_active_thread; |
132 | vm_offset_t cpu_active_stack; |
133 | unsigned int cpu_ident; |
134 | cpu_id_t cpu_id; |
135 | unsigned volatile int cpu_signal; |
136 | #if DEBUG || DEVELOPMENT |
137 | void *failed_xcall; |
138 | unsigned int failed_signal; |
139 | volatile long failed_signal_count; |
140 | #endif |
141 | void *cpu_cache_dispatch; |
142 | ast_t cpu_pending_ast; |
143 | struct processor *cpu_processor; |
144 | int cpu_type; |
145 | int cpu_subtype; |
146 | int cpu_threadtype; |
147 | int cpu_running; |
148 | |
149 | #ifdef __LP64__ |
150 | uint64_t cpu_base_timebase; |
151 | uint64_t cpu_timebase; |
152 | #else |
153 | union { |
154 | struct { |
155 | uint32_t low; |
156 | uint32_t high; |
157 | } split; |
158 | struct { |
159 | uint64_t val; |
160 | } raw; |
161 | } cbtb; |
162 | #define cpu_base_timebase_low cbtb.split.low |
163 | #define cpu_base_timebase_high cbtb.split.high |
164 | |
165 | union { |
166 | struct { |
167 | uint32_t low; |
168 | uint32_t high; |
169 | } split; |
170 | struct { |
171 | uint64_t val; |
172 | } raw; |
173 | } ctb; |
174 | #define cpu_timebase_low ctb.split.low |
175 | #define cpu_timebase_high ctb.split.high |
176 | #endif |
177 | |
178 | uint32_t cpu_decrementer; |
179 | void *cpu_get_decrementer_func; |
180 | void *cpu_set_decrementer_func; |
181 | void *cpu_get_fiq_handler; |
182 | |
183 | void *cpu_tbd_hardware_addr; |
184 | void *cpu_tbd_hardware_val; |
185 | |
186 | void *cpu_console_buf; |
187 | |
188 | void *cpu_idle_notify; |
189 | uint64_t cpu_idle_latency; |
190 | uint64_t cpu_idle_pop; |
191 | |
192 | #if __arm__ || __ARM_KERNEL_PROTECT__ |
193 | vm_offset_t cpu_exc_vectors; |
194 | #endif /* __ARM_KERNEL_PROTECT__ */ |
195 | vm_offset_t cpu_reset_handler; |
196 | uint32_t cpu_reset_type; |
197 | uintptr_t cpu_reset_assist; |
198 | |
199 | void *cpu_int_state; |
200 | IOInterruptHandler interrupt_handler; |
201 | void *interrupt_nub; |
202 | unsigned int interrupt_source; |
203 | void *interrupt_target; |
204 | void *interrupt_refCon; |
205 | |
206 | void *idle_timer_notify; |
207 | void *idle_timer_refcon; |
208 | uint64_t idle_timer_deadline; |
209 | |
210 | uint64_t quantum_timer_deadline; |
211 | uint64_t rtcPop; |
212 | rtclock_timer_t rtclock_timer; |
213 | struct _rtclock_data_ *rtclock_datap; |
214 | |
215 | arm_debug_state_t *cpu_user_debug; /* Current debug state */ |
216 | vm_offset_t cpu_debug_interface_map; |
217 | |
218 | volatile int debugger_active; |
219 | |
220 | void *cpu_xcall_p0; |
221 | void *cpu_xcall_p1; |
222 | |
223 | #if __ARM_SMP__ && defined(ARMA7) |
224 | volatile uint32_t cpu_CLW_active; |
225 | volatile uint64_t cpu_CLWFlush_req; |
226 | volatile uint64_t cpu_CLWFlush_last; |
227 | volatile uint64_t cpu_CLWClean_req; |
228 | volatile uint64_t cpu_CLWClean_last; |
229 | #endif |
230 | |
231 | |
232 | #if __arm64__ |
233 | vm_offset_t coresight_base[CORESIGHT_REGIONS]; |
234 | #endif |
235 | |
236 | /* CCC ARMv8 registers */ |
237 | uint64_t cpu_regmap_paddr; |
238 | |
239 | uint32_t cpu_phys_id; |
240 | uint32_t cpu_l2_access_penalty; |
241 | void *platform_error_handler; |
242 | |
243 | int cpu_mcount_off; |
244 | |
245 | #define ARM_CPU_ON_SLEEP_PATH 0x50535553UL |
246 | volatile unsigned int cpu_sleep_token; |
247 | unsigned int cpu_sleep_token_last; |
248 | |
249 | cpu_stat_t cpu_stat; |
250 | |
251 | volatile int PAB_active; /* Tells the console if we are dumping backtraces */ |
252 | |
253 | #if KPC |
254 | /* double-buffered performance counter data */ |
255 | uint64_t *cpu_kpc_buf[2]; |
256 | /* PMC shadow and reload value buffers */ |
257 | uint64_t *cpu_kpc_shadow; |
258 | uint64_t *cpu_kpc_reload; |
259 | #endif |
260 | #if MONOTONIC |
261 | struct mt_cpu cpu_monotonic; |
262 | #endif /* MONOTONIC */ |
263 | cluster_type_t cpu_cluster_type; |
264 | uint32_t cpu_cluster_id; |
265 | uint32_t cpu_l2_id; |
266 | uint32_t cpu_l2_size; |
267 | uint32_t cpu_l3_id; |
268 | uint32_t cpu_l3_size; |
269 | |
270 | struct pmap_cpu_data cpu_pmap_cpu_data; |
271 | dbgwrap_thread_state_t halt_state; |
272 | enum { |
273 | CPU_NOT_HALTED = 0, |
274 | CPU_HALTED, |
275 | CPU_HALTED_WITH_STATE |
276 | } halt_status; |
277 | } cpu_data_t; |
278 | |
279 | /* |
280 | * cpu_flags |
281 | */ |
282 | #define SleepState 0x0800 |
283 | #define StartedState 0x1000 |
284 | |
285 | extern cpu_data_entry_t CpuDataEntries[MAX_CPUS]; |
286 | extern cpu_data_t BootCpuData; |
287 | extern boot_args *BootArgs; |
288 | |
289 | #if __arm__ |
290 | extern unsigned int *ExceptionLowVectorsBase; |
291 | extern unsigned int *ExceptionVectorsTable; |
292 | #elif __arm64__ |
293 | extern unsigned int LowResetVectorBase; |
294 | extern unsigned int LowResetVectorEnd; |
295 | #if WITH_CLASSIC_S2R |
296 | extern uint8_t SleepToken[8]; |
297 | #endif |
298 | extern unsigned int LowExceptionVectorBase; |
299 | #else |
300 | #error Unknown arch |
301 | #endif |
302 | |
303 | extern cpu_data_t *cpu_datap(int cpu); |
304 | extern cpu_data_t *cpu_data_alloc(boolean_t is_boot); |
305 | extern void cpu_stack_alloc(cpu_data_t*); |
306 | extern void cpu_data_init(cpu_data_t *cpu_data_ptr); |
307 | extern void cpu_data_free(cpu_data_t *cpu_data_ptr); |
308 | extern kern_return_t cpu_data_register(cpu_data_t *cpu_data_ptr); |
309 | extern cpu_data_t *processor_to_cpu_datap( processor_t processor); |
310 | |
311 | #if __arm64__ |
312 | typedef struct sysreg_restore |
313 | { |
314 | uint64_t tcr_el1; |
315 | } sysreg_restore_t; |
316 | |
317 | extern sysreg_restore_t sysreg_restore; |
318 | #endif /* __arm64__ */ |
319 | |
320 | #endif /* ARM_CPU_DATA_INTERNAL */ |
321 | |