1 | /* |
2 | * Copyright (c) 2018 Apple Inc. All rights reserved. |
3 | * |
4 | * @APPLE_OSREFERENCE_LICENSE_HEADER_START@ |
5 | * |
6 | * This file contains Original Code and/or Modifications of Original Code |
7 | * as defined in and that are subject to the Apple Public Source License |
8 | * Version 2.0 (the 'License'). You may not use this file except in |
9 | * compliance with the License. The rights granted to you under the License |
10 | * may not be used to create, or enable the creation or redistribution of, |
11 | * unlawful or unlicensed copies of an Apple operating system, or to |
12 | * circumvent, violate, or enable the circumvention or violation of, any |
13 | * terms of an Apple operating system software license agreement. |
14 | * |
15 | * Please obtain a copy of the License at |
16 | * http://www.opensource.apple.com/apsl/ and read it before using this file. |
17 | * |
18 | * The Original Code and all software distributed under the License are |
19 | * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER |
20 | * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES, |
21 | * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY, |
22 | * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT. |
23 | * Please see the License for the specific language governing rights and |
24 | * limitations under the License. |
25 | * |
26 | * @APPLE_OSREFERENCE_LICENSE_HEADER_END@ |
27 | */ |
28 | |
29 | #ifndef __OS_ATOMIC_PRIVATE_H__ |
30 | #define __OS_ATOMIC_PRIVATE_H__ |
31 | |
32 | /*! |
33 | * @file <os/atomic_private.h> |
34 | * |
35 | * @brief |
36 | * This file defines nicer (terser and safer) wrappers for C11's <stdatomic.h>. |
37 | * |
38 | * @discussion |
39 | * @see xnu.git::doc/atomics.md which provides more extensive documentation |
40 | * about this header. |
41 | * |
42 | * Note that some of the macros defined in this file may be overridden by |
43 | * architecture specific headers. |
44 | * |
45 | * All the os_atomic* functions take an operation ordering argument that can be: |
46 | * - C11 memory orders: relaxed, acquire, release, acq_rel or seq_cst which |
47 | * imply a memory fence on SMP machines, and always carry the matching |
48 | * compiler barrier semantics. |
49 | * |
50 | * - the os_atomic-specific `dependency` memory ordering that is used to |
51 | * document intent to a carry a data or address dependency. |
52 | * See doc/atomics.md for more information. |
53 | * |
54 | * - a compiler barrier: compiler_acquire, compiler_release, compiler_acq_rel |
55 | * without a corresponding memory fence. |
56 | */ |
57 | |
58 | #include <os/atomic.h> |
59 | |
60 | /*! |
61 | * @group <os/atomic_private.h> tunables. |
62 | * |
63 | * @{ |
64 | * |
65 | * @brief |
66 | * @c OS_ATOMIC_CONFIG_* macros provide tunables for clients. |
67 | */ |
68 | |
69 | /*! |
70 | * @macro OS_ATOMIC_CONFIG_SMP |
71 | * |
72 | * @brief |
73 | * Whether this is used on an SMP system, defaults to 1. |
74 | */ |
75 | #ifndef OS_ATOMIC_CONFIG_SMP |
76 | #define OS_ATOMIC_CONFIG_SMP 1 |
77 | #endif // OS_ATOMIC_CONFIG_SMP |
78 | |
79 | /*! |
80 | * @macro OS_ATOMIC_CONFIG_STARVATION_FREE_ONLY |
81 | * |
82 | * @brief |
83 | * Obsolete, kept for backward compatibility |
84 | */ |
85 | #ifndef OS_ATOMIC_CONFIG_STARVATION_FREE_ONLY |
86 | #define OS_ATOMIC_CONFIG_STARVATION_FREE_ONLY 0 |
87 | #endif // OS_ATOMIC_CONFIG_STARVATION_FREE_ONLY |
88 | |
89 | /*! |
90 | * @macro OS_ATOMIC_CONFIG_MEMORY_ORDER_DEPENDENCY |
91 | * |
92 | * @brief |
93 | * Expose the os_atomic-specific fake `dependency` memory ordering. |
94 | * |
95 | * @discussion |
96 | * The dependency ordering can be used to try to "repair" C11's consume ordering |
97 | * and should be limited to extremely complex algorithms where every cycle counts. |
98 | * |
99 | * Due to the inherent risks (no compiler support) for this feature, it is |
100 | * reserved for expert and very domain-specific code only and is off by default. |
101 | * |
102 | * Default: 0 |
103 | */ |
104 | #ifndef OS_ATOMIC_CONFIG_MEMORY_ORDER_DEPENDENCY |
105 | #if XNU_KERNEL_PRIVATE |
106 | #define OS_ATOMIC_CONFIG_MEMORY_ORDER_DEPENDENCY 1 |
107 | #else |
108 | #define OS_ATOMIC_CONFIG_MEMORY_ORDER_DEPENDENCY 0 |
109 | #endif |
110 | #endif // OS_ATOMIC_CONFIG_MEMORY_ORDER_DEPENDENCY |
111 | |
112 | /*! @} */ |
113 | |
114 | /*! |
115 | * @group <os/atomic_private.h> features (arch specific). |
116 | * |
117 | * @{ |
118 | * |
119 | * @brief |
120 | * The @c OS_ATOMIC_USE_* and @c OS_ATOMIC_HAS_* defines expose some |
121 | * specificities of <os/atomic_private.h> implementation that are relevant to |
122 | * certain clients and can be used to conditionalize code. |
123 | */ |
124 | |
125 | /*! |
126 | * @const OS_ATOMIC_HAS_LLSC |
127 | * |
128 | * @brief |
129 | * Whether the platform has LL/SC features. |
130 | * |
131 | * @discussion |
132 | * When set, the os_atomic_*_exclusive() macros are defined. |
133 | */ |
134 | #if defined(__i386__) || defined(__x86_64__) |
135 | #define OS_ATOMIC_HAS_LLSC 0 |
136 | #elif defined(__arm__) || defined(__arm64__) |
137 | #define OS_ATOMIC_HAS_LLSC 1 |
138 | #else |
139 | #error unsupported architecture |
140 | #endif |
141 | |
142 | /*! |
143 | * @const OS_ATOMIC_USE_LLSC |
144 | * |
145 | * @brief |
146 | * Whether os_atomic* use LL/SC internally. |
147 | * |
148 | * @discussion |
149 | * OS_ATOMIC_USE_LLSC implies OS_ATOMIC_HAS_LLSC. |
150 | */ |
151 | #if defined(__arm64__) && defined(__ARM_ARCH_8_2__) |
152 | #define OS_ATOMIC_USE_LLSC 0 |
153 | #else |
154 | #define OS_ATOMIC_USE_LLSC OS_ATOMIC_HAS_LLSC |
155 | #endif |
156 | |
157 | /*! |
158 | * @const OS_ATOMIC_HAS_STARVATION_FREE_RMW |
159 | * |
160 | * @brief |
161 | * Whether os_atomic* Read-Modify-Write operations are starvation free |
162 | * in the current configuration. |
163 | */ |
164 | #define OS_ATOMIC_HAS_STARVATION_FREE_RMW (!OS_ATOMIC_USE_LLSC) |
165 | |
166 | /*! @} */ |
167 | |
168 | #include "atomic_private_impl.h" // Internal implementation details |
169 | |
170 | /*! |
171 | * @function os_compiler_barrier |
172 | * |
173 | * @brief |
174 | * Provide a compiler barrier according to the specified ordering. |
175 | * |
176 | * @param m |
177 | * An optional ordering among `acquire`, `release` or `acq_rel` which defaults |
178 | * to `acq_rel` when not specified. |
179 | * These are equivalent to the `compiler_acquire`, `compiler_release` and |
180 | * `compiler_acq_rel` orderings taken by the os_atomic* functions |
181 | */ |
182 | #undef os_compiler_barrier |
183 | #define os_compiler_barrier(b...) \ |
184 | os_atomic_std(atomic_signal_fence)(_os_compiler_barrier_##b) |
185 | |
186 | /*! |
187 | * @function os_atomic_thread_fence |
188 | * |
189 | * @brief |
190 | * Memory fence which is elided in non-SMP mode, but always carries the |
191 | * corresponding compiler barrier. |
192 | * |
193 | * @param m |
194 | * The ordering for this fence. |
195 | */ |
196 | #define os_atomic_thread_fence(m) ({ \ |
197 | os_atomic_std(atomic_thread_fence)(_os_atomic_mo_##m##_smp); \ |
198 | os_atomic_std(atomic_signal_fence)(_os_atomic_mo_##m); \ |
199 | }) |
200 | |
201 | /*! |
202 | * @function os_atomic_barrier_before_lock_acquire() |
203 | * |
204 | * @brief |
205 | * Appropriate barrier so that a lock acquire is fully ordered with |
206 | * any lock-release. |
207 | * |
208 | * @discussion |
209 | * Note: On all currently supported architecture, |
210 | * this is a no-op given how locks are implemented. |
211 | * |
212 | * If armv8 ever comes up with an ldapr-like kind of acquire semantics |
213 | * for RMW, then this would have to change. |
214 | */ |
215 | #define os_atomic_barrier_before_lock_acquire() ((void)0) |
216 | |
217 | /*! |
218 | * @function os_atomic_init |
219 | * |
220 | * @brief |
221 | * Wrapper for C11 atomic_init() |
222 | * |
223 | * @discussion |
224 | * This initialization is not performed atomically, and so must only be used as |
225 | * part of object initialization before the object is made visible to other |
226 | * threads/cores. |
227 | * |
228 | * @param p |
229 | * A pointer to an atomic variable. |
230 | * |
231 | * @param v |
232 | * The value to initialize the variable with. |
233 | * |
234 | * @returns |
235 | * The value loaded from @a p. |
236 | */ |
237 | #define os_atomic_init(p, v) \ |
238 | os_atomic_std(atomic_init)(os_cast_to_atomic_pointer(p), v) |
239 | |
240 | /*! |
241 | * @function os_atomic_load_is_plain, os_atomic_store_is_plain |
242 | * |
243 | * @brief |
244 | * Return whether a relaxed atomic load (resp. store) to an atomic variable |
245 | * is implemented as a single plain load (resp. store) instruction. |
246 | * |
247 | * @discussion |
248 | * Non-relaxed loads/stores may involve additional memory fence instructions |
249 | * or more complex atomic instructions. |
250 | * |
251 | * This is a construct that can safely be used in static asserts. |
252 | * |
253 | * This doesn't check for alignment and it is assumed that `p` is |
254 | * "aligned enough". |
255 | * |
256 | * @param p |
257 | * A pointer to an atomic variable. |
258 | * |
259 | * @returns |
260 | * True when relaxed atomic loads (resp. stores) compile to a plain load |
261 | * (resp. store) instruction, false otherwise. |
262 | */ |
263 | #define os_atomic_load_is_plain(p) (sizeof(*(p)) <= sizeof(void *)) |
264 | #define os_atomic_store_is_plain(p) os_atomic_load_is_plain(p) |
265 | |
266 | /*! |
267 | * @function os_atomic_load |
268 | * |
269 | * @brief |
270 | * Wrapper for C11 atomic_load_explicit(), guaranteed to compile to a single |
271 | * plain load instruction (when @a m is `relaxed`). |
272 | * |
273 | * @param p |
274 | * A pointer to an atomic variable. |
275 | * |
276 | * @param m |
277 | * The ordering to use. |
278 | * |
279 | * @returns |
280 | * The value loaded from @a p. |
281 | */ |
282 | #define os_atomic_load(p, m) ({ \ |
283 | _Static_assert(os_atomic_load_is_plain(p), "Load is wide"); \ |
284 | _os_compiler_barrier_before_atomic(m); \ |
285 | __auto_type _r = os_atomic_std(atomic_load_explicit)( \ |
286 | os_cast_to_atomic_pointer(p), _os_atomic_mo_##m##_smp); \ |
287 | _os_compiler_barrier_after_atomic(m); \ |
288 | _r; \ |
289 | }) |
290 | |
291 | /*! |
292 | * @function os_atomic_store |
293 | * |
294 | * @brief |
295 | * Wrapper for C11 atomic_store_explicit(), guaranteed to compile to a single |
296 | * plain store instruction (when @a m is `relaxed`). |
297 | * |
298 | * @param p |
299 | * A pointer to an atomic variable. |
300 | * |
301 | * @param v |
302 | * The value to store. |
303 | * |
304 | * @param m |
305 | * The ordering to use. |
306 | * |
307 | * @returns |
308 | * The value stored at @a p. |
309 | */ |
310 | #define os_atomic_store(p, v, m) ({ \ |
311 | _Static_assert(os_atomic_store_is_plain(p), "Store is wide"); \ |
312 | __auto_type _v = (v); \ |
313 | _os_compiler_barrier_before_atomic(m); \ |
314 | os_atomic_std(atomic_store_explicit)(os_cast_to_atomic_pointer(p), _v, \ |
315 | _os_atomic_mo_##m##_smp); \ |
316 | _os_compiler_barrier_after_atomic(m); \ |
317 | _v; \ |
318 | }) |
319 | |
320 | /*! |
321 | * @function os_atomic_load_wide |
322 | * |
323 | * @brief |
324 | * Wrapper for C11 atomic_load_explicit(), which may be implemented by a |
325 | * compare-exchange loop for double-wide variables. |
326 | * |
327 | * @param p |
328 | * A pointer to an atomic variable. |
329 | * |
330 | * @param m |
331 | * The ordering to use. |
332 | * |
333 | * @returns |
334 | * The value loaded from @a p. |
335 | */ |
336 | #define os_atomic_load_wide(p, m) ({ \ |
337 | _os_compiler_barrier_before_atomic(m); \ |
338 | __auto_type _r = os_atomic_std(atomic_load_explicit)( \ |
339 | os_cast_to_atomic_pointer(p), _os_atomic_mo_##m##_smp); \ |
340 | _os_compiler_barrier_after_atomic(m); \ |
341 | _r; \ |
342 | }) |
343 | |
344 | /*! |
345 | * @function os_atomic_store_wide |
346 | * |
347 | * @brief |
348 | * Wrapper for C11 atomic_store_explicit(), which may be implemented by a |
349 | * compare-exchange loop for double-wide variables. |
350 | * |
351 | * @param p |
352 | * A pointer to an atomic variable. |
353 | * |
354 | * @param v |
355 | * The value to store. |
356 | * |
357 | * @param m |
358 | * The ordering to use. |
359 | * |
360 | * @returns |
361 | * The value stored at @a p. |
362 | */ |
363 | #define os_atomic_store_wide(p, v, m) ({ \ |
364 | __auto_type _v = (v); \ |
365 | _os_compiler_barrier_before_atomic(m); \ |
366 | os_atomic_std(atomic_store_explicit)(os_cast_to_atomic_pointer(p), _v, \ |
367 | _os_atomic_mo_##m##_smp); \ |
368 | _os_compiler_barrier_after_atomic(m); \ |
369 | _v; \ |
370 | }) |
371 | |
372 | /*! |
373 | * @function os_atomic_add, os_atomic_add_orig |
374 | * |
375 | * @brief |
376 | * Wrappers for C11 atomic_fetch_add_explicit(). |
377 | * |
378 | * @param p |
379 | * A pointer to an atomic variable. |
380 | * |
381 | * @param v |
382 | * The value to add. |
383 | * |
384 | * @param m |
385 | * The ordering to use. |
386 | * |
387 | * @returns |
388 | * os_atomic_add_orig returns the value of the variable before the atomic add, |
389 | * os_atomic_add returns the value of the variable after the atomic add. |
390 | */ |
391 | #define os_atomic_add_orig(p, v, m) _os_atomic_c11_op_orig(p, v, m, fetch_add) |
392 | #define os_atomic_add(p, v, m) _os_atomic_c11_op(p, v, m, fetch_add, +) |
393 | |
394 | /*! |
395 | * @function os_atomic_inc, os_atomic_inc_orig |
396 | * |
397 | * @brief |
398 | * Perform an atomic increment. |
399 | * |
400 | * @param p |
401 | * A pointer to an atomic variable. |
402 | * |
403 | * @param m |
404 | * The ordering to use. |
405 | * |
406 | * @returns |
407 | * os_atomic_inc_orig returns the value of the variable before the atomic increment, |
408 | * os_atomic_inc returns the value of the variable after the atomic increment. |
409 | */ |
410 | #define os_atomic_inc_orig(p, m) _os_atomic_c11_op_orig(p, 1, m, fetch_add) |
411 | #define os_atomic_inc(p, m) _os_atomic_c11_op(p, 1, m, fetch_add, +) |
412 | |
413 | /*! |
414 | * @function os_atomic_sub, os_atomic_sub_orig |
415 | * |
416 | * @brief |
417 | * Wrappers for C11 atomic_fetch_sub_explicit(). |
418 | * |
419 | * @param p |
420 | * A pointer to an atomic variable. |
421 | * |
422 | * @param v |
423 | * The value to subtract. |
424 | * |
425 | * @param m |
426 | * The ordering to use. |
427 | * |
428 | * @returns |
429 | * os_atomic_sub_orig returns the value of the variable before the atomic subtract, |
430 | * os_atomic_sub returns the value of the variable after the atomic subtract. |
431 | */ |
432 | #define os_atomic_sub_orig(p, v, m) _os_atomic_c11_op_orig(p, v, m, fetch_sub) |
433 | #define os_atomic_sub(p, v, m) _os_atomic_c11_op(p, v, m, fetch_sub, -) |
434 | |
435 | /*! |
436 | * @function os_atomic_dec, os_atomic_dec_orig |
437 | * |
438 | * @brief |
439 | * Perform an atomic decrement. |
440 | * |
441 | * @param p |
442 | * A pointer to an atomic variable. |
443 | * |
444 | * @param m |
445 | * The ordering to use. |
446 | * |
447 | * @returns |
448 | * os_atomic_dec_orig returns the value of the variable before the atomic decrement, |
449 | * os_atomic_dec returns the value of the variable after the atomic decrement. |
450 | */ |
451 | #define os_atomic_dec_orig(p, m) _os_atomic_c11_op_orig(p, 1, m, fetch_sub) |
452 | #define os_atomic_dec(p, m) _os_atomic_c11_op(p, 1, m, fetch_sub, -) |
453 | |
454 | /*! |
455 | * @function os_atomic_and, os_atomic_and_orig |
456 | * |
457 | * @brief |
458 | * Wrappers for C11 atomic_fetch_and_explicit(). |
459 | * |
460 | * @param p |
461 | * A pointer to an atomic variable. |
462 | * |
463 | * @param v |
464 | * The value to and. |
465 | * |
466 | * @param m |
467 | * The ordering to use. |
468 | * |
469 | * @returns |
470 | * os_atomic_and_orig returns the value of the variable before the atomic and, |
471 | * os_atomic_and returns the value of the variable after the atomic and. |
472 | */ |
473 | #define os_atomic_and_orig(p, v, m) _os_atomic_c11_op_orig(p, v, m, fetch_and) |
474 | #define os_atomic_and(p, v, m) _os_atomic_c11_op(p, v, m, fetch_and, &) |
475 | |
476 | /*! |
477 | * @function os_atomic_andnot, os_atomic_andnot_orig |
478 | * |
479 | * @brief |
480 | * Wrappers for C11 atomic_fetch_and_explicit(p, ~value). |
481 | * |
482 | * @param p |
483 | * A pointer to an atomic variable. |
484 | * |
485 | * @param v |
486 | * The value whose complement to and. |
487 | * |
488 | * @param m |
489 | * The ordering to use. |
490 | * |
491 | * @returns |
492 | * os_atomic_andnot_orig returns the value of the variable before the atomic andnot, |
493 | * os_atomic_andnot returns the value of the variable after the atomic andnot. |
494 | */ |
495 | #define os_atomic_andnot_orig(p, v, m) _os_atomic_c11_op_orig(p, (typeof(v))~(v), m, fetch_and) |
496 | #define os_atomic_andnot(p, v, m) _os_atomic_c11_op(p, (typeof(v))~(v), m, fetch_and, &) |
497 | |
498 | /*! |
499 | * @function os_atomic_or, os_atomic_or_orig |
500 | * |
501 | * @brief |
502 | * Wrappers for C11 atomic_fetch_or_explicit(). |
503 | * |
504 | * @param p |
505 | * A pointer to an atomic variable. |
506 | * |
507 | * @param v |
508 | * The value to or. |
509 | * |
510 | * @param m |
511 | * The ordering to use. |
512 | * |
513 | * @returns |
514 | * os_atomic_or_orig returns the value of the variable before the atomic or, |
515 | * os_atomic_or returns the value of the variable after the atomic or. |
516 | */ |
517 | #define os_atomic_or_orig(p, v, m) _os_atomic_c11_op_orig(p, v, m, fetch_or) |
518 | #define os_atomic_or(p, v, m) _os_atomic_c11_op(p, v, m, fetch_or, |) |
519 | |
520 | /*! |
521 | * @function os_atomic_xor, os_atomic_xor_orig |
522 | * |
523 | * @brief |
524 | * Wrappers for C11 atomic_fetch_xor_explicit(). |
525 | * |
526 | * @param p |
527 | * A pointer to an atomic variable. |
528 | * |
529 | * @param v |
530 | * The value to xor. |
531 | * |
532 | * @param m |
533 | * The ordering to use. |
534 | * |
535 | * @returns |
536 | * os_atomic_xor_orig returns the value of the variable before the atomic xor, |
537 | * os_atomic_xor returns the value of the variable after the atomic xor. |
538 | */ |
539 | #define os_atomic_xor_orig(p, v, m) _os_atomic_c11_op_orig(p, v, m, fetch_xor) |
540 | #define os_atomic_xor(p, v, m) _os_atomic_c11_op(p, v, m, fetch_xor, ^) |
541 | |
542 | /*! |
543 | * @function os_atomic_min, os_atomic_min_orig |
544 | * |
545 | * @brief |
546 | * Wrappers for Clang's __atomic_fetch_min() |
547 | * |
548 | * @param p |
549 | * A pointer to an atomic variable. |
550 | * |
551 | * @param v |
552 | * The value to minimize. |
553 | * |
554 | * @param m |
555 | * The ordering to use. |
556 | * |
557 | * @returns |
558 | * os_atomic_min_orig returns the value of the variable before the atomic min, |
559 | * os_atomic_min returns the value of the variable after the atomic min. |
560 | */ |
561 | #define os_atomic_min_orig(p, v, m) _os_atomic_clang_op_orig(p, v, m, fetch_min) |
562 | #define os_atomic_min(p, v, m) _os_atomic_clang_op(p, v, m, fetch_min, MIN) |
563 | |
564 | /*! |
565 | * @function os_atomic_max, os_atomic_max_orig |
566 | * |
567 | * @brief |
568 | * Wrappers for Clang's __atomic_fetch_max() |
569 | * |
570 | * @param p |
571 | * A pointer to an atomic variable. |
572 | * |
573 | * @param v |
574 | * The value to maximize. |
575 | * |
576 | * @param m |
577 | * The ordering to use. |
578 | * |
579 | * @returns |
580 | * os_atomic_max_orig returns the value of the variable before the atomic max, |
581 | * os_atomic_max returns the value of the variable after the atomic max. |
582 | */ |
583 | #define os_atomic_max_orig(p, v, m) _os_atomic_clang_op_orig(p, v, m, fetch_max) |
584 | #define os_atomic_max(p, v, m) _os_atomic_clang_op(p, v, m, fetch_max, MAX) |
585 | |
586 | /*! |
587 | * @function os_atomic_xchg |
588 | * |
589 | * @brief |
590 | * Wrapper for C11 atomic_exchange_explicit(). |
591 | * |
592 | * @param p |
593 | * A pointer to an atomic variable. |
594 | * |
595 | * @param v |
596 | * The value to exchange with. |
597 | * |
598 | * @param m |
599 | * The ordering to use. |
600 | * |
601 | * @returns |
602 | * The value of the variable before the exchange. |
603 | */ |
604 | #define os_atomic_xchg(p, v, m) _os_atomic_c11_op_orig(p, v, m, exchange) |
605 | |
606 | /*! |
607 | * @function os_atomic_cmpxchg |
608 | * |
609 | * @brief |
610 | * Wrapper for C11 atomic_compare_exchange_strong_explicit(). |
611 | * |
612 | * @discussion |
613 | * Loops around os_atomic_cmpxchg() may want to consider using the |
614 | * os_atomic_rmw_loop() construct instead to take advantage of the C11 weak |
615 | * compare-exchange operation. |
616 | * |
617 | * @param p |
618 | * A pointer to an atomic variable. |
619 | * |
620 | * @param e |
621 | * The value expected in the atomic variable. |
622 | * |
623 | * @param v |
624 | * The value to store if the atomic variable has the expected value @a e. |
625 | * |
626 | * @param m |
627 | * The ordering to use in case of success. |
628 | * The ordering in case of failure is always `relaxed`. |
629 | * |
630 | * @returns |
631 | * 0 if the compare-exchange failed. |
632 | * 1 if the compare-exchange succeeded. |
633 | */ |
634 | #define os_atomic_cmpxchg(p, e, v, m) ({ \ |
635 | os_atomic_basetypeof(p) _r = (e); int _b; \ |
636 | _os_compiler_barrier_before_atomic(m); \ |
637 | _b = os_atomic_std(atomic_compare_exchange_strong_explicit)( \ |
638 | os_cast_to_atomic_pointer(p), &_r, \ |
639 | _os_atomic_value_cast(p, v), \ |
640 | _os_atomic_mo_##m##_smp, _os_atomic_mo_relaxed); \ |
641 | _os_compiler_barrier_after_atomic(m); \ |
642 | _b; \ |
643 | }) |
644 | |
645 | /*! |
646 | * @function os_atomic_cmpxchgv |
647 | * |
648 | * @brief |
649 | * Wrapper for C11 atomic_compare_exchange_strong_explicit(). |
650 | * |
651 | * @discussion |
652 | * Loops around os_atomic_cmpxchgv() may want to consider using the |
653 | * os_atomic_rmw_loop() construct instead to take advantage of the C11 weak |
654 | * compare-exchange operation. |
655 | * |
656 | * @param p |
657 | * A pointer to an atomic variable. |
658 | * |
659 | * @param e |
660 | * The value expected in the atomic variable. |
661 | * |
662 | * @param v |
663 | * The value to store if the atomic variable has the expected value @a e. |
664 | * |
665 | * @param g |
666 | * A pointer to a location that is filled with the value that was present in |
667 | * the atomic variable before the compare-exchange (whether successful or not). |
668 | * This can be used to redrive compare-exchange loops. |
669 | * |
670 | * @param m |
671 | * The ordering to use in case of success. |
672 | * The ordering in case of failure is always `relaxed`. |
673 | * |
674 | * @returns |
675 | * 0 if the compare-exchange failed. |
676 | * 1 if the compare-exchange succeeded. |
677 | */ |
678 | #define os_atomic_cmpxchgv(p, e, v, g, m) ({ \ |
679 | os_atomic_basetypeof(p) _r = (e); int _b; \ |
680 | _os_compiler_barrier_before_atomic(m); \ |
681 | _b = os_atomic_std(atomic_compare_exchange_strong_explicit)( \ |
682 | os_cast_to_atomic_pointer(p), &_r, \ |
683 | _os_atomic_value_cast(p, v), \ |
684 | _os_atomic_mo_##m##_smp, _os_atomic_mo_relaxed); \ |
685 | _os_compiler_barrier_after_atomic(m); \ |
686 | *(g) = _r; _b; \ |
687 | }) |
688 | |
689 | /*! |
690 | * @function os_atomic_rmw_loop |
691 | * |
692 | * @brief |
693 | * Advanced read-modify-write construct to wrap compare-exchange loops. |
694 | * |
695 | * @param p |
696 | * A pointer to an atomic variable to be modified. |
697 | * |
698 | * @param ov |
699 | * The name of the variable that will contain the original value of the atomic |
700 | * variable (reloaded every iteration of the loop). |
701 | * |
702 | * @param nv |
703 | * The name of the variable that will contain the new value to compare-exchange |
704 | * the atomic variable to (typically computed from @a ov every iteration of the |
705 | * loop). |
706 | * |
707 | * @param m |
708 | * The ordering to use in case of success. |
709 | * The ordering in case of failure is always `relaxed`. |
710 | * |
711 | * @param ... |
712 | * Code block that validates the value of @p ov and computes the new value of |
713 | * @p nv that the atomic variable will be compare-exchanged to in an iteration |
714 | * of the loop. |
715 | * |
716 | * The loop can be aborted using os_atomic_rmw_loop_give_up(), e.g. when the |
717 | * value of @p ov is found to be "invalid" for the ovarall operation. |
718 | * `continue` cannot be used in this context. |
719 | * |
720 | * No stores to memory should be performed within the code block as it may cause |
721 | * LL/SC transactions used to implement compare-exchange to fail persistently. |
722 | * |
723 | * @returns |
724 | * 0 if the loop was aborted with os_atomic_rmw_loop_give_up(). |
725 | * 1 if the loop completed. |
726 | */ |
727 | #define os_atomic_rmw_loop(p, ov, nv, m, ...) ({ \ |
728 | int _result = 0; \ |
729 | __auto_type _p = os_cast_to_nonatomic_pointer(p); \ |
730 | _os_compiler_barrier_before_atomic(m); \ |
731 | ov = *_p; \ |
732 | do { \ |
733 | __VA_ARGS__; \ |
734 | _result = os_atomic_std(atomic_compare_exchange_weak_explicit)( \ |
735 | os_cast_to_atomic_pointer(_p), &ov, nv, \ |
736 | _os_atomic_mo_##m##_smp, _os_atomic_mo_relaxed); \ |
737 | } while (__builtin_expect(!_result, 0)); \ |
738 | _os_compiler_barrier_after_atomic(m); \ |
739 | _result; \ |
740 | }) |
741 | |
742 | /*! |
743 | * @function os_atomic_rmw_loop_give_up |
744 | * |
745 | * @brief |
746 | * Abort an os_atomic_rmw_loop() loop. |
747 | * |
748 | * @param ... |
749 | * Optional code block to execute before the `break` out of the loop. May |
750 | * further alter the control flow (e.g. using `return`, `goto`, ...). |
751 | */ |
752 | #define os_atomic_rmw_loop_give_up(...) ({ __VA_ARGS__; break; }) |
753 | |
754 | #if OS_ATOMIC_CONFIG_MEMORY_ORDER_DEPENDENCY |
755 | |
756 | /*! |
757 | * @typedef os_atomic_dependency_t |
758 | * |
759 | * @brief |
760 | * Type for dependency tokens that can be derived from loads with dependency |
761 | * and injected into various expressions. |
762 | * |
763 | * @warning |
764 | * The implementation of atomic dependencies makes painstakingly sure that the |
765 | * compiler doesn't know that os_atomic_dependency_t::__opaque_zero is always 0. |
766 | * |
767 | * Users of os_atomic_dependency_t MUST NOT test its value (even with an |
768 | * assert), as doing so would allow the compiler to reason about the value and |
769 | * elide its use to inject hardware dependencies (thwarting the entire purpose |
770 | * of the construct). |
771 | */ |
772 | typedef struct { unsigned long __opaque_zero; } os_atomic_dependency_t; |
773 | |
774 | /*! |
775 | * @const OS_ATOMIC_DEPENDENCY_NONE |
776 | * |
777 | * @brief |
778 | * A value to pass to functions that can carry dependencies, to indicate that |
779 | * no dependency should be carried. |
780 | */ |
781 | #define OS_ATOMIC_DEPENDENCY_NONE \ |
782 | ((os_atomic_dependency_t){ 0UL }) |
783 | |
784 | /*! |
785 | * @function os_atomic_make_dependency |
786 | * |
787 | * @brief |
788 | * Create a dependency token that can be injected into expressions to force a |
789 | * hardware dependency. |
790 | * |
791 | * @discussion |
792 | * This function is only useful for cases where the dependency needs to be used |
793 | * several times. |
794 | * |
795 | * os_atomic_load_with_dependency_on() and os_atomic_inject_dependency() are |
796 | * otherwise capable of automatically creating dependency tokens. |
797 | * |
798 | * @param v |
799 | * The result of: |
800 | * - an os_atomic_load(..., dependency), |
801 | * - an os_atomic_inject_dependency(), |
802 | * - an os_atomic_load_with_dependency_on(). |
803 | * |
804 | * Note that due to implementation limitations, the type of @p v must be |
805 | * register-sized, if necessary an explicit cast is required. |
806 | * |
807 | * @returns |
808 | * An os_atomic_dependency_t token that can be used to prolongate dependency |
809 | * chains. |
810 | * |
811 | * The token value is always 0, but the compiler must never be able to reason |
812 | * about that fact (c.f. os_atomic_dependency_t) |
813 | */ |
814 | #define os_atomic_make_dependency(v) \ |
815 | ((void)(v), OS_ATOMIC_DEPENDENCY_NONE) |
816 | |
817 | /*! |
818 | * @function os_atomic_inject_dependency |
819 | * |
820 | * @brief |
821 | * Inject a hardware dependency resulting from a `dependency` load into a |
822 | * specified pointer. |
823 | * |
824 | * @param p |
825 | * A pointer to inject the dependency into. |
826 | * |
827 | * @param e |
828 | * - a dependency token returned from os_atomic_make_dependency(), |
829 | * |
830 | * - OS_ATOMIC_DEPENDENCY_NONE, which turns this operation into a no-op, |
831 | * |
832 | * - any value accepted by os_atomic_make_dependency(). |
833 | * |
834 | * @returns |
835 | * A value equal to @a p but that prolongates the dependency chain rooted at |
836 | * @a e. |
837 | */ |
838 | #define os_atomic_inject_dependency(p, e) \ |
839 | ((typeof(*(p)) *)((p) + _os_atomic_auto_dependency(e).__opaque_zero)) |
840 | |
841 | /*! |
842 | * @function os_atomic_load_with_dependency_on |
843 | * |
844 | * @brief |
845 | * Load that prolongates the dependency chain rooted at `v`. |
846 | * |
847 | * @discussion |
848 | * This is shorthand for: |
849 | * |
850 | * <code> |
851 | * os_atomic_load(os_atomic_inject_dependency(p, e), dependency) |
852 | * </code> |
853 | * |
854 | * @param p |
855 | * A pointer to an atomic variable. |
856 | * |
857 | * @param e |
858 | * - a dependency token returned from os_atomic_make_dependency(), |
859 | * |
860 | * - OS_ATOMIC_DEPENDENCY_NONE, which turns this operation into a no-op, |
861 | * |
862 | * - any value accepted by os_atomic_make_dependency(). |
863 | * |
864 | * @returns |
865 | * The value loaded from @a p. |
866 | */ |
867 | #define os_atomic_load_with_dependency_on(p, e) \ |
868 | os_atomic_load(os_atomic_inject_dependency(p, e), dependency) |
869 | |
870 | #endif // OS_ATOMIC_CONFIG_MEMORY_ORDER_DEPENDENCY |
871 | |
872 | #include "atomic_private_arch.h" // Per architecture overrides |
873 | |
874 | #endif /* __OS_ATOMIC_PRIVATE_H__ */ |
875 | |