1 | /* |
2 | * Copyright (c) 2000-2005 Apple Computer, Inc. All rights reserved. |
3 | * |
4 | * @APPLE_OSREFERENCE_LICENSE_HEADER_START@ |
5 | * |
6 | * This file contains Original Code and/or Modifications of Original Code |
7 | * as defined in and that are subject to the Apple Public Source License |
8 | * Version 2.0 (the 'License'). You may not use this file except in |
9 | * compliance with the License. The rights granted to you under the License |
10 | * may not be used to create, or enable the creation or redistribution of, |
11 | * unlawful or unlicensed copies of an Apple operating system, or to |
12 | * circumvent, violate, or enable the circumvention or violation of, any |
13 | * terms of an Apple operating system software license agreement. |
14 | * |
15 | * Please obtain a copy of the License at |
16 | * http://www.opensource.apple.com/apsl/ and read it before using this file. |
17 | * |
18 | * The Original Code and all software distributed under the License are |
19 | * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER |
20 | * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES, |
21 | * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY, |
22 | * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT. |
23 | * Please see the License for the specific language governing rights and |
24 | * limitations under the License. |
25 | * |
26 | * @APPLE_OSREFERENCE_LICENSE_HEADER_END@ |
27 | */ |
28 | /* |
29 | * Copyright (C) 1998 Apple Computer |
30 | * All Rights Reserved |
31 | */ |
32 | /* |
33 | * @OSF_COPYRIGHT@ |
34 | */ |
35 | /* |
36 | * Mach Operating System |
37 | * Copyright (c) 1991,1990,1989,1988,1987 Carnegie Mellon University |
38 | * All Rights Reserved. |
39 | * |
40 | * Permission to use, copy, modify and distribute this software and its |
41 | * documentation is hereby granted, provided that both the copyright |
42 | * notice and this permission notice appear in all copies of the |
43 | * software, derivative works or modified versions, and any portions |
44 | * thereof, and that both notices appear in supporting documentation. |
45 | * |
46 | * CARNEGIE MELLON ALLOWS FREE USE OF THIS SOFTWARE IN ITS "AS IS" |
47 | * CONDITION. CARNEGIE MELLON DISCLAIMS ANY LIABILITY OF ANY KIND FOR |
48 | * ANY DAMAGES WHATSOEVER RESULTING FROM THE USE OF THIS SOFTWARE. |
49 | * |
50 | * Carnegie Mellon requests users of this software to return to |
51 | * |
52 | * Software Distribution Coordinator or Software.Distribution@CS.CMU.EDU |
53 | * School of Computer Science |
54 | * Carnegie Mellon University |
55 | * Pittsburgh PA 15213-3890 |
56 | * |
57 | * any improvements or extensions that they make and grant Carnegie Mellon |
58 | * the rights to redistribute these changes. |
59 | */ |
60 | /* |
61 | * File: kern/simple_lock.h (derived from kern/lock.h) |
62 | * Author: Avadis Tevanian, Jr., Michael Wayne Young |
63 | * Date: 1985 |
64 | * |
65 | * Atomic primitives and Simple Locking primitives definitions |
66 | */ |
67 | |
68 | #ifdef KERNEL_PRIVATE |
69 | |
70 | #ifndef _KERN_SIMPLE_LOCK_H_ |
71 | #define _KERN_SIMPLE_LOCK_H_ |
72 | |
73 | #include <mach/boolean.h> |
74 | #include <kern/lock_types.h> |
75 | #include <kern/lock_group.h> |
76 | #include <machine/simple_lock.h> |
77 | |
78 | #ifdef XNU_KERNEL_PRIVATE |
79 | |
80 | #if MACH_KERNEL_PRIVATE |
81 | #include <machine/atomic.h> |
82 | #include <mach_ldebug.h> |
83 | #endif |
84 | |
85 | __BEGIN_DECLS |
86 | |
87 | #pragma GCC visibility push(hidden) |
88 | |
89 | #ifdef MACH_KERNEL_PRIVATE |
90 | |
91 | #define HW_LOCK_STATE_TO_THREAD(state) ((thread_t)(state)) |
92 | #define HW_LOCK_THREAD_TO_STATE(thread) ((uintptr_t)(thread)) |
93 | |
94 | extern void hw_lock_init( |
95 | hw_lock_t); |
96 | |
97 | extern void hw_lock_lock( |
98 | hw_lock_t |
99 | LCK_GRP_ARG(lck_grp_t*)); |
100 | |
101 | extern void hw_lock_lock_nopreempt( |
102 | hw_lock_t |
103 | LCK_GRP_ARG(lck_grp_t*)); |
104 | |
105 | extern unsigned int hw_lock_to( |
106 | hw_lock_t, |
107 | hw_spin_policy_t |
108 | LCK_GRP_ARG(lck_grp_t*)) __result_use_check; |
109 | |
110 | extern unsigned int hw_lock_to_nopreempt( |
111 | hw_lock_t, |
112 | hw_spin_policy_t |
113 | LCK_GRP_ARG(lck_grp_t*)) __result_use_check; |
114 | |
115 | extern unsigned int hw_lock_try( |
116 | hw_lock_t |
117 | LCK_GRP_ARG(lck_grp_t*)) __result_use_check; |
118 | |
119 | extern unsigned int hw_lock_try_nopreempt( |
120 | hw_lock_t |
121 | LCK_GRP_ARG(lck_grp_t*)) __result_use_check; |
122 | |
123 | #if !LCK_GRP_USE_ARG |
124 | #define hw_lock_lock(lck, grp) \ |
125 | hw_lock_lock(lck) |
126 | |
127 | #define hw_lock_lock_nopreempt(lck, grp) \ |
128 | hw_lock_lock_nopreempt(lck) |
129 | |
130 | #define hw_lock_to(lck, spec, grp) \ |
131 | hw_lock_to(lck, spec) |
132 | |
133 | #define hw_lock_to_nopreempt(lck, spec, grp) \ |
134 | hw_lock_to_nopreempt(lck, spec) |
135 | |
136 | #define hw_lock_try(lck, grp) \ |
137 | hw_lock_try(lck) |
138 | |
139 | #define hw_lock_try_nopreempt(lck, grp) \ |
140 | hw_lock_try_nopreempt(lck) |
141 | #endif /* !LCK_GRP_USE_ARG */ |
142 | |
143 | extern void hw_lock_unlock( |
144 | hw_lock_t); |
145 | |
146 | extern void hw_lock_unlock_nopreempt( |
147 | hw_lock_t); |
148 | |
149 | extern void hw_lock_assert( |
150 | hw_lock_t lock, |
151 | unsigned int type); |
152 | |
153 | extern unsigned int hw_lock_held( |
154 | hw_lock_t) __result_use_check; |
155 | |
156 | extern boolean_t hw_atomic_test_and_set32( |
157 | uint32_t *target, |
158 | uint32_t test_mask, |
159 | uint32_t set_mask, |
160 | enum memory_order ord, |
161 | boolean_t wait); |
162 | |
163 | extern boolean_t atomic_test_and_set32( |
164 | uint32_t *target, |
165 | uint32_t test_mask, |
166 | uint32_t set_mask, |
167 | enum memory_order ord, |
168 | boolean_t wait); |
169 | |
170 | extern void atomic_exchange_abort( |
171 | void); |
172 | |
173 | extern boolean_t atomic_exchange_complete32( |
174 | uint32_t *target, |
175 | uint32_t previous, |
176 | uint32_t newval, |
177 | enum memory_order ord); |
178 | |
179 | extern uint32_t atomic_exchange_begin32( |
180 | uint32_t *target, |
181 | uint32_t *previous, |
182 | enum memory_order ord); |
183 | |
184 | #if defined(__arm__) || defined(__arm64__) |
185 | uint32_t load_exclusive32( |
186 | uint32_t *target, |
187 | enum memory_order ord); |
188 | boolean_t store_exclusive32( |
189 | uint32_t *target, |
190 | uint32_t value, |
191 | enum memory_order ord); |
192 | #endif /* defined(__arm__)||defined(__arm64__) */ |
193 | |
194 | extern void usimple_unlock_nopreempt( |
195 | usimple_lock_t); |
196 | |
197 | extern hw_spin_timeout_t hw_spin_compute_timeout( |
198 | hw_spin_policy_t policy); |
199 | |
200 | extern bool hw_spin_in_ppl( |
201 | hw_spin_timeout_t to) __pure2; |
202 | |
203 | extern bool hw_spin_should_keep_spinning( |
204 | void *lock, |
205 | hw_spin_policy_t policy, |
206 | hw_spin_timeout_t to, |
207 | hw_spin_state_t *state); |
208 | |
209 | #endif /* MACH_KERNEL_PRIVATE */ |
210 | |
211 | struct usimple_lock_startup_spec { |
212 | usimple_lock_t lck; |
213 | unsigned short lck_init_arg; |
214 | }; |
215 | |
216 | extern void usimple_lock_startup_init( |
217 | struct usimple_lock_startup_spec *spec); |
218 | |
219 | #define SIMPLE_LOCK_DECLARE(var, arg) \ |
220 | decl_simple_lock_data(, var); \ |
221 | static __startup_data struct usimple_lock_startup_spec \ |
222 | __startup_usimple_lock_spec_ ## var = { &var, arg }; \ |
223 | STARTUP_ARG(LOCKS, STARTUP_RANK_FOURTH, usimple_lock_startup_init, \ |
224 | &__startup_usimple_lock_spec_ ## var) |
225 | |
226 | extern uint32_t hw_wait_while_equals32( |
227 | uint32_t *address, |
228 | uint32_t current); |
229 | |
230 | extern uint64_t hw_wait_while_equals64( |
231 | uint64_t *address, |
232 | uint64_t current); |
233 | |
234 | #if __LP64__ |
235 | #define hw_wait_while_equals_long(ptr, cur) ({ \ |
236 | static_assert(sizeof(*(ptr)) == sizeof(long)); \ |
237 | (typeof(cur))hw_wait_while_equals64(__DEVOLATILE(uint64_t *, ptr), (uint64_t)(cur)); \ |
238 | }) |
239 | #else |
240 | #define hw_wait_while_equals_long(ptr, cur) ({ \ |
241 | static_assert(sizeof(*(ptr)) == sizeof(long)); \ |
242 | (typeof(cur))hw_wait_while_equals32(__DEVOLATILE(uint32_t *, ptr), (uint32_t)(cur)); \ |
243 | }) |
244 | #endif |
245 | |
246 | |
247 | extern void usimple_lock_init( |
248 | usimple_lock_t, |
249 | unsigned short); |
250 | |
251 | extern void usimple_lock( |
252 | usimple_lock_t |
253 | LCK_GRP_ARG(lck_grp_t*)); |
254 | |
255 | extern unsigned int usimple_lock_try( |
256 | usimple_lock_t |
257 | LCK_GRP_ARG(lck_grp_t*)) __result_use_check; |
258 | |
259 | extern void usimple_lock_assert( |
260 | usimple_lock_t lock, |
261 | unsigned int type); |
262 | |
263 | extern void usimple_lock_try_lock_loop( |
264 | usimple_lock_t |
265 | LCK_GRP_ARG(lck_grp_t*)); |
266 | |
267 | #if defined(__x86_64__) |
268 | extern unsigned int usimple_lock_try_lock_mp_signal_safe_loop_deadline( |
269 | usimple_lock_t, |
270 | uint64_t |
271 | LCK_GRP_ARG(lck_grp_t*)) /* __result_use_check */; |
272 | |
273 | extern unsigned int usimple_lock_try_lock_mp_signal_safe_loop_duration( |
274 | usimple_lock_t, |
275 | uint64_t |
276 | LCK_GRP_ARG(lck_grp_t*)) __result_use_check; |
277 | #endif |
278 | |
279 | extern void usimple_unlock( |
280 | usimple_lock_t); |
281 | |
282 | #if !LCK_GRP_USE_ARG |
283 | #define usimple_lock(lck, grp) \ |
284 | usimple_lock(lck) |
285 | |
286 | #define usimple_lock_try(lck, grp) \ |
287 | usimple_lock_try(lck) |
288 | |
289 | #define usimple_lock_try_lock_loop(lck, grp) \ |
290 | usimple_lock_try_lock_loop(lck) |
291 | |
292 | #if defined(__x86_64__) |
293 | #define usimple_lock_try_lock_mp_signal_safe_loop_deadline(lck, ddl, grp) \ |
294 | usimple_lock_try_lock_mp_signal_safe_loop_deadline(lck, ddl) |
295 | #define usimple_lock_try_lock_mp_signal_safe_loop_duration(lck, dur, grp) \ |
296 | usimple_lock_try_lock_mp_signal_safe_loop_duration(lck, dur) |
297 | #endif |
298 | #endif /* !LCK_GRP_USE_ARG */ |
299 | |
300 | |
301 | /* |
302 | * If we got to here and we still don't have simple_lock_init |
303 | * defined, then we must either be outside the osfmk component, |
304 | * running on a true SMP, or need debug. |
305 | */ |
306 | #if !defined(simple_lock_init) |
307 | #define simple_lock_init(l, t) usimple_lock_init(l,t) |
308 | #define simple_lock(l, grp) usimple_lock(l, grp) |
309 | #define simple_unlock(l) usimple_unlock(l) |
310 | #define simple_lock_assert(l, x) usimple_lock_assert((l), (x)) |
311 | #define simple_lock_try(l, grp) usimple_lock_try(l, grp) |
312 | #define simple_lock_try_lock_loop(l, grp) usimple_lock_try_lock_loop(l, grp) |
313 | #define simple_lock_try_lock_mp_signal_safe_loop_deadline(l, ddl, grp) \ |
314 | usimple_lock_try_lock_mp_signal_safe_loop_deadline(l, ddl, grp) |
315 | #define simple_lock_try_lock_mp_signal_safe_loop_duration(l, dur, grp) \ |
316 | usimple_lock_try_lock_mp_signal_safe_loop_duration(l, dur, grp) |
317 | #define simple_lock_addr(l) (&(l)) |
318 | #endif /* !defined(simple_lock_init) */ |
319 | |
320 | #ifdef MACH_KERNEL_PRIVATE |
321 | |
322 | typedef uint32_t hw_lock_bit_t; |
323 | |
324 | extern const struct hw_spin_policy hw_lock_bit_policy; |
325 | #if __arm64__ |
326 | extern const struct hw_spin_policy hw_lock_bit_policy_2s; |
327 | #endif |
328 | extern const struct hw_spin_policy hw_lock_spin_policy; |
329 | extern const struct hw_spin_policy hw_lock_spin_panic_policy; |
330 | #if DEBUG || DEVELOPMENT |
331 | extern const struct hw_spin_policy hw_lock_test_give_up_policy; |
332 | #endif /* DEBUG || DEVELOPMENT */ |
333 | |
334 | extern void hw_lock_bit( |
335 | hw_lock_bit_t *, |
336 | unsigned int |
337 | LCK_GRP_ARG(lck_grp_t*)); |
338 | |
339 | extern void hw_lock_bit_nopreempt( |
340 | hw_lock_bit_t *, |
341 | unsigned int |
342 | LCK_GRP_ARG(lck_grp_t*)); |
343 | |
344 | |
345 | extern bool hw_lock_bit_try( |
346 | hw_lock_bit_t *, |
347 | unsigned int |
348 | LCK_GRP_ARG(lck_grp_t*)) __result_use_check; |
349 | |
350 | extern unsigned int hw_lock_bit_to( |
351 | hw_lock_bit_t *, |
352 | unsigned int, |
353 | hw_spin_policy_t |
354 | LCK_GRP_ARG(lck_grp_t*)) __result_use_check; |
355 | |
356 | extern hw_lock_status_t hw_lock_bit_to_b( |
357 | hw_lock_bit_t *, |
358 | unsigned int, |
359 | hw_spin_policy_t, |
360 | bool (^lock_pause)(void) |
361 | LCK_GRP_ARG(lck_grp_t*)); |
362 | |
363 | extern void hw_unlock_bit( |
364 | hw_lock_bit_t *, |
365 | unsigned int); |
366 | |
367 | extern void hw_unlock_bit_nopreempt( |
368 | hw_lock_bit_t *, |
369 | unsigned int); |
370 | |
371 | #define hw_lock_bit_held(l, b) \ |
372 | (((*(l)) & (1 << (b))) != 0) |
373 | |
374 | #if !LCK_GRP_USE_ARG |
375 | #define hw_lock_bit(lck, bit, grp) \ |
376 | hw_lock_bit(lck, bit) |
377 | |
378 | #define hw_lock_bit_nopreempt(lck, bit, grp) \ |
379 | hw_lock_bit_nopreempt(lck, bit) |
380 | |
381 | |
382 | #define hw_lock_bit_try(lck, bit, grp) \ |
383 | hw_lock_bit_try(lck, bit) |
384 | |
385 | #define hw_lock_bit_to(lck, bit, spec, grp) \ |
386 | hw_lock_bit_to(lck, bit, spec) |
387 | |
388 | #define hw_lock_bit_to_b(lck, bit, spec, pause, grp) \ |
389 | hw_lock_bit_to_b(lck, bit, spec, pause) |
390 | |
391 | #endif /* !LCK_GRP_USE_ARG */ |
392 | #endif /* MACH_KERNEL_PRIVATE */ |
393 | |
394 | __END_DECLS |
395 | |
396 | #pragma GCC visibility pop |
397 | |
398 | #endif /* XNU_KERNEL_PRIVATE */ |
399 | #endif /*!_KERN_SIMPLE_LOCK_H_*/ |
400 | |
401 | #endif /* KERNEL_PRIVATE */ |
402 | |