1 | /* |
2 | * Copyright (c) 2000-2007 Apple Inc. All rights reserved. |
3 | * |
4 | * @APPLE_OSREFERENCE_LICENSE_HEADER_START@ |
5 | * |
6 | * This file contains Original Code and/or Modifications of Original Code |
7 | * as defined in and that are subject to the Apple Public Source License |
8 | * Version 2.0 (the 'License'). You may not use this file except in |
9 | * compliance with the License. The rights granted to you under the License |
10 | * may not be used to create, or enable the creation or redistribution of, |
11 | * unlawful or unlicensed copies of an Apple operating system, or to |
12 | * circumvent, violate, or enable the circumvention or violation of, any |
13 | * terms of an Apple operating system software license agreement. |
14 | * |
15 | * Please obtain a copy of the License at |
16 | * http://www.opensource.apple.com/apsl/ and read it before using this file. |
17 | * |
18 | * The Original Code and all software distributed under the License are |
19 | * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER |
20 | * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES, |
21 | * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY, |
22 | * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT. |
23 | * Please see the License for the specific language governing rights and |
24 | * limitations under the License. |
25 | * |
26 | * @APPLE_OSREFERENCE_LICENSE_HEADER_END@ |
27 | */ |
28 | #ifndef _MACHINE_MACHINE_ROUTINES_H |
29 | #define _MACHINE_MACHINE_ROUTINES_H |
30 | |
31 | #include <sys/cdefs.h> |
32 | #include <stdint.h> |
33 | |
34 | #if defined (__i386__) || defined(__x86_64__) |
35 | #include "i386/machine_routines.h" |
36 | #elif defined (__arm__) || defined (__arm64__) |
37 | #include "arm/machine_routines.h" |
38 | #else |
39 | #error architecture not supported |
40 | #endif |
41 | |
42 | __BEGIN_DECLS |
43 | |
44 | #ifdef XNU_KERNEL_PRIVATE |
45 | #pragma GCC visibility push(hidden) |
46 | |
47 | /*! |
48 | * @function ml_cpu_can_exit |
49 | * @brief Check whether the platform code allows |cpu_id| to be |
50 | * shut down at runtime. |
51 | * @return true if allowed, false otherwise |
52 | */ |
53 | bool ml_cpu_can_exit(int cpu_id, processor_reason_t reason); |
54 | |
55 | /*! |
56 | * @function ml_cpu_begin_state_transition |
57 | * @brief Tell the platform code that processor_start() or |
58 | * processor_exit() is about to begin for |cpu_id|. This |
59 | * can block. |
60 | * @param cpu_id CPU that is (potentially) going up or down |
61 | */ |
62 | void ml_cpu_begin_state_transition(int cpu_id); |
63 | |
64 | /*! |
65 | * @function ml_cpu_end_state_transition |
66 | * @brief Tell the platform code that processor_start() or |
67 | * processor_exit() is finished for |cpu_id|. This |
68 | * can block. Can be called from a different thread from |
69 | * ml_cpu_begin_state_transition(). |
70 | * @param cpu_id CPU that is (potentially) going up or down |
71 | */ |
72 | void ml_cpu_end_state_transition(int cpu_id); |
73 | |
74 | /*! |
75 | * @function ml_cpu_begin_loop |
76 | * @brief Acquire a global lock that prevents processor_start() or |
77 | * processor_exit() from changing any CPU states for the |
78 | * duration of a loop. This can block. |
79 | */ |
80 | void ml_cpu_begin_loop(void); |
81 | |
82 | /*! |
83 | * @function ml_cpu_end_loop |
84 | * @brief Release the global lock acquired by ml_cpu_begin_loop(). |
85 | * Must be called from the same thread as ml_cpu_begin_loop(). |
86 | */ |
87 | void ml_cpu_end_loop(void); |
88 | |
89 | /*! |
90 | * @function ml_early_cpu_max_number() |
91 | * @brief Returns an early maximum cpu number the kernel will ever use. |
92 | * |
93 | * @return the maximum cpu number the kernel will ever use. |
94 | * |
95 | * @discussion |
96 | * The value returned by this function might be an over-estimate, |
97 | * but is more precise than @c MAX_CPUS. |
98 | * |
99 | * Unlike @c real_ncpus which is only initialized late in boot, |
100 | * this can be called during startup after the @c STARTUP_SUB_TUNABLES |
101 | * subsystem has been initialized. |
102 | */ |
103 | int ml_early_cpu_max_number(void); |
104 | |
105 | /*! |
106 | * @function ml_cpu_power_enable |
107 | * @abstract Enable voltage rails to a CPU prior to bringing it up |
108 | * @discussion Called from the scheduler to enable any voltage rails |
109 | * needed by a CPU. This should happen before the |
110 | * CPU_BOOT_REQUESTED broadcast. This does not boot the |
111 | * CPU and it may be a no-op on some platforms. This must be |
112 | * called from a schedulable context. |
113 | * @param cpu_id The logical CPU ID (from the topology) of the CPU to be booted |
114 | */ |
115 | void ml_cpu_power_enable(int cpu_id); |
116 | |
117 | /*! |
118 | * @function ml_cpu_power_disable |
119 | * @abstract Disable voltage rails to a CPU after bringing it down |
120 | * @discussion Called from the scheduler to disable any voltage rails |
121 | * that are no longer needed by an offlined CPU or cluster. |
122 | * This should happen after the CPU_EXITED broadcast. |
123 | * This does not halt the CPU and it may be a no-op on some |
124 | * platforms. This must be called from a schedulable context. |
125 | * @param cpu_id The logical CPU ID (from the topology) of the halted CPU |
126 | */ |
127 | void ml_cpu_power_disable(int cpu_id); |
128 | |
129 | #pragma GCC visibility pop |
130 | #endif /* defined(XNU_KERNEL_PRIVATE) */ |
131 | |
132 | /*! |
133 | * @enum cpu_event |
134 | * @abstract Broadcast events allowing clients to hook CPU state transitions. |
135 | * @constant CPU_BOOT_REQUESTED Called from processor_start(); may block. |
136 | * @constant CPU_BOOTED Called from platform code on the newly-booted CPU; may not block. |
137 | * @constant CPU_ACTIVE Called from scheduler code; may block. |
138 | * @constant CLUSTER_ACTIVE Called from platform code; may block. |
139 | * @constant CPU_EXIT_REQUESTED Called from processor_exit(); may block. |
140 | * @constant CPU_DOWN Called from platform code on the disabled CPU; may not block. |
141 | * @constant CLUSTER_EXIT_REQUESTED Called from platform code; may block. |
142 | * @constant CPU_EXITED Called after CPU is stopped; may block. |
143 | */ |
144 | enum cpu_event { |
145 | CPU_BOOT_REQUESTED = 0, |
146 | CPU_BOOTED, |
147 | CPU_ACTIVE, |
148 | CLUSTER_ACTIVE, |
149 | CPU_EXIT_REQUESTED, |
150 | CPU_DOWN, |
151 | CLUSTER_EXIT_REQUESTED, |
152 | CPU_EXITED, |
153 | }; |
154 | |
155 | typedef bool (*cpu_callback_t)(void *param, enum cpu_event event, unsigned int cpu_or_cluster); |
156 | |
157 | /*! |
158 | * @function cpu_event_register_callback |
159 | * @abstract Register a function to be called on CPU state changes. |
160 | * @param fn Function to call on state change events. |
161 | * @param param Optional argument to be passed to the callback (e.g. object pointer). |
162 | */ |
163 | void cpu_event_register_callback(cpu_callback_t fn, void *param); |
164 | |
165 | /*! |
166 | * @function cpu_event_unregister_callback |
167 | * @abstract Unregister a previously-registered callback function. |
168 | * @param fn Function pointer previously passed to cpu_event_register_callback(). |
169 | */ |
170 | void cpu_event_unregister_callback(cpu_callback_t fn); |
171 | |
172 | #if XNU_KERNEL_PRIVATE |
173 | /*! |
174 | * @function ml_broadcast_cpu_event |
175 | * @abstract Internal XNU function used to broadcast CPU state changes to callers. |
176 | * @param event CPU event that is occurring. |
177 | * @param cpu_or_cluster Logical CPU ID of the core (or cluster) affected by the event. |
178 | */ |
179 | void ml_broadcast_cpu_event(enum cpu_event event, unsigned int cpu_or_cluster); |
180 | #endif |
181 | |
182 | /*! |
183 | * @function ml_io_read() |
184 | * @brief Perform an MMIO read access |
185 | * |
186 | * @return The value resulting from the read. |
187 | * |
188 | */ |
189 | unsigned long long ml_io_read(uintptr_t iovaddr, int iovsz); |
190 | unsigned int ml_io_read8(uintptr_t iovaddr); |
191 | unsigned int ml_io_read16(uintptr_t iovaddr); |
192 | unsigned int ml_io_read32(uintptr_t iovaddr); |
193 | unsigned long long ml_io_read64(uintptr_t iovaddr); |
194 | |
195 | /*! |
196 | * @function ml_io_write() |
197 | * @brief Perform an MMIO write access |
198 | * |
199 | */ |
200 | void ml_io_write(uintptr_t vaddr, uint64_t val, int size); |
201 | void ml_io_write8(uintptr_t vaddr, uint8_t val); |
202 | void ml_io_write16(uintptr_t vaddr, uint16_t val); |
203 | void ml_io_write32(uintptr_t vaddr, uint32_t val); |
204 | void ml_io_write64(uintptr_t vaddr, uint64_t val); |
205 | |
206 | #if XNU_KERNEL_PRIVATE |
207 | /* |
208 | * ml_io access timeouts and tracing. |
209 | * |
210 | * We are specific in what to compile in, in order to not burden |
211 | * heavily used code with paths that will never be used on common |
212 | * configurations. |
213 | */ |
214 | |
215 | /* ml_io_read/write timeouts are generally enabled on macOS, because |
216 | * they may help developers. */ |
217 | #if (XNU_TARGET_OS_OSX || DEVELOPMENT || DEBUG) |
218 | |
219 | #define ML_IO_TIMEOUTS_ENABLED 1 |
220 | |
221 | /* Simulating stretched IO is only for DEVELOPMENT || DEBUG. */ |
222 | #if DEVELOPMENT || DEBUG |
223 | #define ML_IO_SIMULATE_STRETCHED_ENABLED 1 |
224 | #endif |
225 | |
226 | /* We also check that the memory is mapped non-cacheable on x86 internally. */ |
227 | #if defined(__x86_64__) && (DEVELOPMENT || DEBUG) |
228 | #define ML_IO_VERIFY_UNCACHEABLE 1 |
229 | #endif |
230 | |
231 | #endif /* (XNU_TARGET_OS_OSX || DEVELOPMENT || DEBUG) */ |
232 | #endif /* XNU_KERNEL_PRIVATE */ |
233 | |
234 | #if KERNEL_PRIVATE |
235 | |
236 | /*! |
237 | * @function ml_io_increase_timeouts |
238 | * @brief Increase the ml_io_read* and ml_io_write* |
239 | * timeouts for a region of VA space |
240 | * [`iovaddr_base', `iovaddr_base' + `size'). |
241 | * @discussion This function is intended for building an |
242 | * allowlist of known-misbehaving register spaces |
243 | * on specific peripherals. `size' must be between |
244 | * 1 and 4096 inclusive, and the VA range must not |
245 | * overlap with any ranges previously passed to |
246 | * ml_io_increase_timeouts(). |
247 | * @note This function has no effect when the new timeouts are |
248 | * shorter than the global timeouts. |
249 | * @param iovaddr_base Base VA of the target region |
250 | * @param size Size of the target region, in bytes |
251 | * @param read_timeout_us New read timeout, in microseconds |
252 | * @param write_timeout_us New write timeout, in microseconds |
253 | * @return 0 if successful, or KERN_INVALID_ARGUMENT if either |
254 | * the VA range or timeout is invalid. |
255 | */ |
256 | OS_WARN_RESULT |
257 | int ml_io_increase_timeouts(uintptr_t iovaddr_base, unsigned int size, uint32_t read_timeout_us, uint32_t write_timeout_us); |
258 | |
259 | /*! |
260 | * @function ml_io_reset_timeouts |
261 | * @brief Unregister custom timeouts previously registered by |
262 | * ml_io_increase_timeouts(). |
263 | * @discussion The caller must use the exact `iovaddr_base' and `size' |
264 | * range passed to a previous ml_io_increase_timeouts() |
265 | * call. Unregistering a smaller subrange is unsupported |
266 | * and will return an error. |
267 | * @param iovaddr_base Base VA previously passed to ml_io_increase_timeouts() |
268 | * @param size Size previously passed to ml_io_increase_timeouts() |
269 | * @return 0 if successful, or KERN_NOT_FOUND if the specfied range |
270 | * does not match a previously-registered timeout. |
271 | */ |
272 | OS_WARN_RESULT |
273 | int ml_io_reset_timeouts(uintptr_t iovaddr_base, unsigned int size); |
274 | |
275 | #endif /* KERNEL_PRIVATE */ |
276 | |
277 | #if XNU_KERNEL_PRIVATE |
278 | |
279 | #if ML_IO_TIMEOUTS_ENABLED |
280 | #if !defined(__x86_64__) |
281 | /* x86 does not have the MACHINE_TIMEOUTs types, and the variables are |
282 | * declared elsewhere. */ |
283 | extern machine_timeout_t report_phy_read_delay_to; |
284 | extern machine_timeout_t report_phy_write_delay_to; |
285 | extern machine_timeout_t report_phy_read_delay_to; |
286 | extern machine_timeout_t trace_phy_read_delay_to; |
287 | extern machine_timeout_t trace_phy_write_delay_to; |
288 | #endif /* !defined(__x86_64__) */ |
289 | extern void override_io_timeouts(uintptr_t vaddr, uint64_t paddr, |
290 | uint64_t *read_timeout, uint64_t *write_timeout); |
291 | #endif /* ML_IO_TIMEOUTS_ENABLED */ |
292 | |
293 | void ml_get_cluster_type_name(cluster_type_t cluster_type, char *name, |
294 | size_t name_size); |
295 | |
296 | unsigned int ml_get_cluster_count(void); |
297 | |
298 | /** |
299 | * Depending on the system, it's possible that a kernel backtrace could contain |
300 | * stack frames from both XNU and non-XNU-owned stacks. This function can be |
301 | * used to determine whether an address is pointing to one of these non-XNU |
302 | * stacks. |
303 | * |
304 | * @param addr The virtual address to check. |
305 | * |
306 | * @return True if the address is within the bounds of a non-XNU stack. False |
307 | * otherwise. |
308 | */ |
309 | bool ml_addr_in_non_xnu_stack(uintptr_t addr); |
310 | |
311 | #endif /* XNU_KERNEL_PRIVATE */ |
312 | |
313 | #if MACH_KERNEL_PRIVATE |
314 | |
315 | /*! |
316 | * @func ml_map_cpus_to_clusters |
317 | * @brief Populate the logical CPU -> logical cluster ID table at address addr. |
318 | * |
319 | * @param table array to write to |
320 | */ |
321 | void ml_map_cpus_to_clusters(uint8_t *table); |
322 | |
323 | #endif /* MACH_KERNEL_PRIVATE */ |
324 | |
325 | __END_DECLS |
326 | |
327 | #endif /* _MACHINE_MACHINE_ROUTINES_H */ |
328 | |