1/*
2 * Copyright (c) 2007-2012 Apple Inc. All rights reserved.
3 * Copyright (c) 2000-2006 Apple Computer, Inc. All rights reserved.
4 *
5 * @APPLE_OSREFERENCE_LICENSE_HEADER_START@
6 *
7 * This file contains Original Code and/or Modifications of Original Code
8 * as defined in and that are subject to the Apple Public Source License
9 * Version 2.0 (the 'License'). You may not use this file except in
10 * compliance with the License. The rights granted to you under the License
11 * may not be used to create, or enable the creation or redistribution of,
12 * unlawful or unlicensed copies of an Apple operating system, or to
13 * circumvent, violate, or enable the circumvention or violation of, any
14 * terms of an Apple operating system software license agreement.
15 *
16 * Please obtain a copy of the License at
17 * http://www.opensource.apple.com/apsl/ and read it before using this file.
18 *
19 * The Original Code and all software distributed under the License are
20 * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER
21 * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES,
22 * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY,
23 * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT.
24 * Please see the License for the specific language governing rights and
25 * limitations under the License.
26 *
27 * @APPLE_OSREFERENCE_LICENSE_HEADER_END@
28 */
29/*
30 * Copyright (c) 1999 Apple Computer, Inc. All rights reserved.
31 *
32 * HISTORY
33 *
34 */
35
36#ifndef _OS_OSATOMIC_H
37#define _OS_OSATOMIC_H
38
39#include <libkern/OSBase.h>
40
41#if defined(__cplusplus)
42extern "C" {
43#endif
44
45#ifdef XNU_KERNEL_PRIVATE
46/*
47 * The macro SAFE_CAST_PTR() casts one type of pointer to another type, making sure
48 * the data the pointer is referencing is the same size. If it is not, it will cause
49 * a division by zero compiler warning. This is to work around "SInt32" being defined
50 * as "long" on ILP32 and as "int" on LP64, which would require an explicit cast to
51 * "SInt32*" when for instance passing an "int*" to OSAddAtomic() - which masks size
52 * mismatches.
53 * -- var is used, but sizeof does not evaluate the
54 * argument, i.e. we're safe against "++" etc. in var --
55 */
56#define __SAFE_CAST_PTR(type, var) (((type)(var))+(0/(sizeof(*var) == sizeof(*(type)0) ? 1 : 0)))
57#else
58#define __SAFE_CAST_PTR(type, var) ((type)(var))
59#endif
60
61/*!
62 * @header
63 *
64 * @abstract
65 * This header declares the OSAtomic group of functions for atomic
66 * reading and updating of values.
67 */
68
69/*!
70 * @function OSCompareAndSwap64
71 *
72 * @abstract
73 * 64-bit compare and swap operation.
74 *
75 * @discussion
76 * See OSCompareAndSwap.
77 */
78extern Boolean OSCompareAndSwap64(
79 UInt64 oldValue,
80 UInt64 newValue,
81 volatile UInt64 * address);
82#define OSCompareAndSwap64(a, b, c) \
83 (OSCompareAndSwap64(a, b, __SAFE_CAST_PTR(volatile UInt64*,c)))
84
85/*!
86 * @function OSAddAtomic64
87 *
88 * @abstract
89 * 64-bit atomic add operation.
90 *
91 * @discussion
92 * See OSAddAtomic.
93 */
94extern SInt64 OSAddAtomic64(
95 SInt64 theAmount,
96 volatile SInt64 * address);
97#define OSAddAtomic64(a, b) \
98 (OSAddAtomic64(a, __SAFE_CAST_PTR(volatile SInt64*,b)))
99
100/*!
101 * @function OSIncrementAtomic64
102 *
103 * @abstract
104 * 64-bit increment.
105 *
106 * @discussion
107 * See OSIncrementAtomic.
108*/
109inline static SInt64 OSIncrementAtomic64(volatile SInt64 * address)
110{
111 return OSAddAtomic64(1LL, address);
112}
113
114/*!
115 * @function OSDecrementAtomic64
116 *
117 * @abstract
118 * 64-bit decrement.
119 *
120 * @discussion
121 * See OSDecrementAtomic.
122*/
123inline static SInt64 OSDecrementAtomic64(volatile SInt64 * address)
124{
125 return OSAddAtomic64(-1LL, address);
126}
127
128#if XNU_KERNEL_PRIVATE
129/* Not to be included in headerdoc.
130 *
131 * @function OSAddAtomicLong
132 *
133 * @abstract
134 * 32/64-bit atomic add operation, depending on sizeof(long).
135 *
136 * @discussion
137 * See OSAddAtomic.
138 */
139extern long OSAddAtomicLong(
140 long theAmount,
141 volatile long * address);
142#define OSAddAtomicLong(a, b) \
143 (OSAddAtomicLong(a, __SAFE_CAST_PTR(volatile long*,b)))
144
145/* Not to be included in headerdoc.
146 *
147 * @function OSIncrementAtomicLong
148 *
149 * @abstract
150 * 32/64-bit increment, depending on sizeof(long)
151 *
152 * @discussion
153 * See OSIncrementAtomic.
154*/
155inline static long OSIncrementAtomicLong(volatile long * address)
156{
157 return OSAddAtomicLong(1L, address);
158}
159
160/* Not to be included in headerdoc.
161 *
162 * @function OSDecrementAtomicLong
163 *
164 * @abstract
165 * 32/64-bit decrement, depending on sizeof(long)
166 *@discussion See OSDecrementAtomic.
167 */
168inline static long OSDecrementAtomicLong(volatile long * address)
169{
170 return OSAddAtomicLong(-1L, address);
171}
172#endif /* XNU_KERNEL_PRIVATE */
173
174#if XNU_KERNEL_PRIVATE
175/*!
176 * @function OSCompareAndSwap8
177 *
178 * @abstract
179 * Compare and swap operation, performed atomically with respect to all devices that participate in the coherency architecture of the platform.
180 *
181 * @discussion
182 * The OSCompareAndSwap8 function compares the value at the specified address with oldVal. The value of newValue is written to the address only if oldValue and the value at the address are equal. OSCompareAndSwap returns true if newValue is written to the address; otherwise, it returns false.
183 *
184 * This function guarantees atomicity only with main system memory. It is specifically unsuitable for use on noncacheable memory such as that in devices; this function cannot guarantee atomicity, for example, on memory mapped from a PCI device. Additionally, this function incorporates a memory barrier on systems with weakly-ordered memory architectures.
185 *
186 * @param oldValue The value to compare at address.
187 * @param newValue The value to write to address if oldValue compares true.
188 * @param address The byte aligned address of the data to update atomically.
189 * @result true if newValue was written to the address.
190 */
191extern Boolean OSCompareAndSwap8(
192 UInt8 oldValue,
193 UInt8 newValue,
194 volatile UInt8 * address);
195#define OSCompareAndSwap8(a, b, c) \
196 (OSCompareAndSwap8(a, b, __SAFE_CAST_PTR(volatile UInt8*,c)))
197
198/*!
199 * @function OSCompareAndSwap16
200 *
201 * @abstract
202 * Compare and swap operation, performed atomically with respect to all devices that participate in the coherency architecture of the platform.
203 *
204 * @discussion
205 * The OSCompareAndSwap16 function compares the value at the specified address with oldVal. The value of newValue is written to the address only if oldValue and the value at the address are equal. OSCompareAndSwap returns true if newValue is written to the address; otherwise, it returns false.
206 *
207 * This function guarantees atomicity only with main system memory. It is specifically unsuitable for use on noncacheable memory such as that in devices; this function cannot guarantee atomicity, for example, on memory mapped from a PCI device. Additionally, this function incorporates a memory barrier on systems with weakly-ordered memory architectures.
208 *
209 * @param oldValue The value to compare at address.
210 * @param newValue The value to write to address if oldValue compares true.
211 * @param address The 2-byte aligned address of the data to update atomically.
212 * @result true if newValue was written to the address.
213 */
214extern Boolean OSCompareAndSwap16(
215 UInt16 oldValue,
216 UInt16 newValue,
217 volatile UInt16 * address);
218#define OSCompareAndSwap16(a, b, c) \
219 (OSCompareAndSwap16(a, b, __SAFE_CAST_PTR(volatile UInt16*,c)))
220
221#endif /* XNU_KERNEL_PRIVATE */
222
223/*!
224 * @function OSCompareAndSwap
225 *
226 * @abstract
227 * Compare and swap operation, performed atomically with respect to all devices that participate in the coherency architecture of the platform.
228 *
229 * @discussion
230 * The OSCompareAndSwap function compares the value at the specified address with oldVal. The value of newValue is written to the address only if oldValue and the value at the address are equal. OSCompareAndSwap returns true if newValue is written to the address; otherwise, it returns false.
231 *
232 * This function guarantees atomicity only with main system memory. It is specifically unsuitable for use on noncacheable memory such as that in devices; this function cannot guarantee atomicity, for example, on memory mapped from a PCI device. Additionally, this function incorporates a memory barrier on systems with weakly-ordered memory architectures.
233 *
234 * @param oldValue The value to compare at address.
235 * @param newValue The value to write to address if oldValue compares true.
236 * @param address The 4-byte aligned address of the data to update atomically.
237 * @result true if newValue was written to the address.
238 */
239extern Boolean OSCompareAndSwap(
240 UInt32 oldValue,
241 UInt32 newValue,
242 volatile UInt32 * address);
243#define OSCompareAndSwap(a, b, c) \
244 (OSCompareAndSwap(a, b, __SAFE_CAST_PTR(volatile UInt32*,c)))
245
246/*!
247 * @function OSCompareAndSwapPtr
248 *
249 * @abstract
250 * Compare and swap operation, performed atomically with respect to all devices that participate in the coherency architecture of the platform.
251 *
252 * @discussion
253 * The OSCompareAndSwapPtr function compares the pointer-sized value at the specified address with oldVal. The value of newValue is written to the address only if oldValue and the value at the address are equal. OSCompareAndSwapPtr returns true if newValue is written to the address; otherwise, it returns false.
254 *
255 * This function guarantees atomicity only with main system memory. It is specifically unsuitable for use on noncacheable memory such as that in devices; this function cannot guarantee atomicity, for example, on memory mapped from a PCI device. Additionally, this function incorporates a memory barrier on systems with weakly-ordered memory architectures.
256 * @param oldValue The pointer value to compare at address.
257 * @param newValue The pointer value to write to address if oldValue compares true.
258 * @param address The pointer-size aligned address of the data to update atomically.
259 * @result true if newValue was written to the address.
260 */
261extern Boolean OSCompareAndSwapPtr(
262 void * oldValue,
263 void * newValue,
264 void * volatile * address);
265#define OSCompareAndSwapPtr(a, b, c) \
266 (OSCompareAndSwapPtr(a, b, __SAFE_CAST_PTR(void * volatile *,c)))
267
268/*!
269 * @function OSAddAtomic
270 *
271 * @abstract
272 * 32-bit add operation, performed atomically with respect to all devices that participate in the coherency architecture of the platform.
273 *
274 * @discussion
275 * The OSAddAtomic function adds the specified amount to the value at the specified address and returns the original value.
276 *
277 * This function guarantees atomicity only with main system memory. It is specifically unsuitable for use on noncacheable memory such as that in devices; this function cannot guarantee atomicity, for example, on memory mapped from a PCI device. Previous incarnations of this function incorporated a memory barrier on systems with weakly-ordered memory architectures, but current versions contain no barriers.
278 * @param amount The amount to add.
279 * @param address The 4-byte aligned address of the value to update atomically.
280 * @result The value before the addition
281 */
282extern SInt32 OSAddAtomic(
283 SInt32 amount,
284 volatile SInt32 * address);
285#define OSAddAtomic(a, b) \
286 (OSAddAtomic(a, __SAFE_CAST_PTR(volatile SInt32*,b)))
287
288/*!
289 * @function OSAddAtomic16
290 *
291 * @abstract
292 * 16-bit add operation, performed atomically with respect to all devices that participate in the coherency architecture of the platform.
293 *
294 * @discussion
295 * The OSAddAtomic16 function adds the specified amount to the value at the specified address and returns the original value.
296 *
297 * This function guarantees atomicity only with main system memory. It is specifically unsuitable for use on noncacheable memory such as that in devices; this function cannot guarantee atomicity, for example, on memory mapped from a PCI device. Previous incarnations of this function incorporated a memory barrier on systems with weakly-ordered memory architectures, but current versions contain no barriers.
298 * @param address The 2-byte aligned address of the value to update atomically.
299 * @result The value before the addition
300 */
301extern SInt16 OSAddAtomic16(
302 SInt32 amount,
303 volatile SInt16 * address);
304
305/*!
306 * @function OSAddAtomic8
307 *
308 * @abstract
309 * 8-bit add operation, performed atomically with respect to all devices that participate in the coherency architecture of the platform.
310 *
311 * @discussion
312 * The OSAddAtomic8 function adds the specified amount to the value at the specified address and returns the original value.
313 *
314 * This function guarantees atomicity only with main system memory. It is specifically unsuitable for use on noncacheable memory such as that in devices; this function cannot guarantee atomicity, for example, on memory mapped from a PCI device. Previous incarnations of this function incorporated a memory barrier on systems with weakly-ordered memory architectures, but current versions contain no barriers.
315 * @param amount The amount to add.
316 * @param address The address of the value to update atomically.
317 * @result The value before the addition.
318 */
319extern SInt8 OSAddAtomic8(
320 SInt32 amount,
321 volatile SInt8 * address);
322
323/*!
324 * @function OSIncrementAtomic
325 *
326 * @abstract
327 * 32-bit increment operation, performed atomically with respect to all devices that participate in the coherency architecture of the platform.
328 *
329 * @discussion
330 * The OSIncrementAtomic function increments the value at the specified address by one and returns the original value.
331 *
332 * This function guarantees atomicity only with main system memory. It is specifically unsuitable for use on noncacheable memory such as that in devices; this function cannot guarantee atomicity, for example, on memory mapped from a PCI device.
333 * @param address The 4-byte aligned address of the value to update atomically.
334 * @result The value before the increment.
335 */
336extern SInt32 OSIncrementAtomic(volatile SInt32 * address);
337#define OSIncrementAtomic(a) \
338 (OSIncrementAtomic(__SAFE_CAST_PTR(volatile SInt32*,a)))
339
340/*!
341 * @function OSIncrementAtomic16
342 *
343 * @abstract
344 * 16-bit increment operation, performed atomically with respect to all devices that participate in the coherency architecture of the platform.
345 *
346 * @discussion
347 * The OSIncrementAtomic16 function increments the value at the specified address by one and returns the original value.
348 *
349 * This function guarantees atomicity only with main system memory. It is specifically unsuitable for use on noncacheable memory such as that in devices; this function cannot guarantee atomicity, for example, on memory mapped from a PCI device. Previous incarnations of this function incorporated a memory barrier on systems with weakly-ordered memory architectures, but current versions contain no barriers.
350 * @param address The 2-byte aligned address of the value to update atomically.
351 * @result The value before the increment.
352 */
353extern SInt16 OSIncrementAtomic16(volatile SInt16 * address);
354
355/*!
356 * @function OSIncrementAtomic8
357 *
358 * @abstract
359 * 8-bit increment operation, performed atomically with respect to all devices that participate in the coherency architecture of the platform.
360 *
361 * @discussion
362 * The OSIncrementAtomic8 function increments the value at the specified address by one and returns the original value.
363 *
364 * This function guarantees atomicity only with main system memory. It is specifically unsuitable for use on noncacheable memory such as that in devices; this function cannot guarantee atomicity, for example, on memory mapped from a PCI device. Previous incarnations of this function incorporated a memory barrier on systems with weakly-ordered memory architectures, but current versions contain no barriers.
365 * @param address The address of the value to update atomically.
366 * @result The value before the increment.
367 */
368extern SInt8 OSIncrementAtomic8(volatile SInt8 * address);
369
370/*!
371 * @function OSDecrementAtomic
372 *
373 * @abstract
374 * 32-bit decrement operation, performed atomically with respect to all devices that participate in the coherency architecture of the platform.
375 *
376 * @discussion
377 * The OSDecrementAtomic function decrements the value at the specified address by one and returns the original value.
378 *
379 * This function guarantees atomicity only with main system memory. It is specifically unsuitable for use on noncacheable memory such as that in devices; this function cannot guarantee atomicity, for example, on memory mapped from a PCI device. Previous incarnations of this function incorporated a memory barrier on systems with weakly-ordered memory architectures, but current versions contain no barriers.
380 * @param address The 4-byte aligned address of the value to update atomically.
381 * @result The value before the decrement.
382 */
383extern SInt32 OSDecrementAtomic(volatile SInt32 * address);
384#define OSDecrementAtomic(a) \
385 (OSDecrementAtomic(__SAFE_CAST_PTR(volatile SInt32*,a)))
386
387/*!
388 * @function OSDecrementAtomic16
389 *
390 * @abstract
391 * 16-bit decrement operation, performed atomically with respect to all devices that participate in the coherency architecture of the platform.
392 *
393 * @discussion
394 * The OSDecrementAtomic16 function decrements the value at the specified address by one and returns the original value.
395 *
396 * This function guarantees atomicity only with main system memory. It is specifically unsuitable for use on noncacheable memory such as that in devices; this function cannot guarantee atomicity, for example, on memory mapped from a PCI device. Previous incarnations of this function incorporated a memory barrier on systems with weakly-ordered memory architectures, but current versions contain no barriers.
397 * @param address The 2-byte aligned address of the value to update atomically.
398 * @result The value before the decrement.
399 */
400extern SInt16 OSDecrementAtomic16(volatile SInt16 * address);
401
402/*!
403 * @function OSDecrementAtomic8
404 *
405 * @abstract
406 * 8-bit decrement operation, performed atomically with respect to all devices that participate in the coherency architecture of the platform.
407 *
408 * @discussion
409 * The OSDecrementAtomic8 function decrements the value at the specified address by one and returns the original value.
410 *
411 * This function guarantees atomicity only with main system memory. It is specifically unsuitable for use on noncacheable memory such as that in devices; this function cannot guarantee atomicity, for example, on memory mapped from a PCI device. Previous incarnations of this function incorporated a memory barrier on systems with weakly-ordered memory architectures, but current versions contain no barriers.
412 * @param address The address of the value to update atomically.
413 * @result The value before the decrement.
414 */
415extern SInt8 OSDecrementAtomic8(volatile SInt8 * address);
416
417/*!
418 * @function OSBitAndAtomic
419 *
420 * @abstract
421 * 32-bit logical and operation, performed atomically with respect to all devices that participate in the coherency architecture of the platform.
422 *
423 * @discussion
424 * The OSBitAndAtomic function logically ands the bits of the specified mask into the value at the specified address and returns the original value.
425 *
426 * This function guarantees atomicity only with main system memory. It is specifically unsuitable for use on noncacheable memory such as that in devices; this function cannot guarantee atomicity, for example, on memory mapped from a PCI device. Previous incarnations of this function incorporated a memory barrier on systems with weakly-ordered memory architectures, but current versions contain no barriers..
427 * @param mask The mask to logically and with the value.
428 * @param address The 4-byte aligned address of the value to update atomically.
429 * @result The value before the bitwise operation
430 */
431extern UInt32 OSBitAndAtomic(
432 UInt32 mask,
433 volatile UInt32 * address);
434#define OSBitAndAtomic(a, b) \
435 (OSBitAndAtomic(a, __SAFE_CAST_PTR(volatile UInt32*,b)))
436
437/*!
438 * @function OSBitAndAtomic16
439 *
440 * @abstract
441 * 16-bit logical and operation, performed atomically with respect to all devices that participate in the coherency architecture of the platform.
442 *
443 * @discussion
444 * The OSBitAndAtomic16 function logically ands the bits of the specified mask into the value at the specified address and returns the original value.
445 *
446 * This function guarantees atomicity only with main system memory. It is specifically unsuitable for use on noncacheable memory such as that in devices; this function cannot guarantee atomicity, for example, on memory mapped from a PCI device.
447 * @param mask The mask to logically and with the value.
448 * @param address The 2-byte aligned address of the value to update atomically.
449 * @result The value before the bitwise operation.
450 */
451extern UInt16 OSBitAndAtomic16(
452 UInt32 mask,
453 volatile UInt16 * address);
454
455/*!
456 * @function OSBitAndAtomic8
457 *
458 * @abstract
459 * 8-bit logical and operation, performed atomically with respect to all devices that participate in the coherency architecture of the platform.
460 *
461 * @discussion
462 * The OSBitAndAtomic8 function logically ands the bits of the specified mask into the value at the specified address and returns the original value.
463 *
464 * This function guarantees atomicity only with main system memory. It is specifically unsuitable for use on noncacheable memory such as that in devices; this function cannot guarantee atomicity, for example, on memory mapped from a PCI device.
465 * @param mask The mask to logically and with the value.
466 * @param address The address of the value to update atomically.
467 * @result The value before the bitwise operation.
468 */
469extern UInt8 OSBitAndAtomic8(
470 UInt32 mask,
471 volatile UInt8 * address);
472
473/*!
474 * @function OSBitOrAtomic
475 *
476 * @abstract
477 * 32-bit logical or operation, performed atomically with respect to all devices that participate in the coherency architecture of the platform.
478 *
479 * @discussion
480 * The OSBitOrAtomic function logically ors the bits of the specified mask into the value at the specified address and returns the original value.
481 *
482 * This function guarantees atomicity only with main system memory. It is specifically unsuitable for use on noncacheable memory such as that in devices; this function cannot guarantee atomicity, for example, on memory mapped from a PCI device.
483 * @param mask The mask to logically or with the value.
484 * @param address The 4-byte aligned address of the value to update atomically.
485 * @result The value before the bitwise operation.
486 */
487extern UInt32 OSBitOrAtomic(
488 UInt32 mask,
489 volatile UInt32 * address);
490#define OSBitOrAtomic(a, b) \
491 (OSBitOrAtomic(a, __SAFE_CAST_PTR(volatile UInt32*,b)))
492
493/*!
494 * @function OSBitOrAtomic16
495 *
496 * @abstract
497 * 16-bit logical or operation, performed atomically with respect to all devices that participate in the coherency architecture of the platform.
498 *
499 * @discussion
500 * The OSBitOrAtomic16 function logically ors the bits of the specified mask into the value at the specified address and returns the original value.
501 *
502 * This function guarantees atomicity only with main system memory. It is specifically unsuitable for use on noncacheable memory such as that in devices; this function cannot guarantee atomicity, for example, on memory mapped from a PCI device.
503 * @param mask The mask to logically or with the value.
504 * @param address The 2-byte aligned address of the value to update atomically.
505 * @result The value before the bitwise operation.
506 */
507extern UInt16 OSBitOrAtomic16(
508 UInt32 mask,
509 volatile UInt16 * address);
510
511/*!
512 * @function OSBitOrAtomic8
513 *
514 * @abstract
515 * 8-bit logical or operation, performed atomically with respect to all devices that participate in the coherency architecture of the platform.
516 *
517 * This function guarantees atomicity only with main system memory. It is specifically unsuitable for use on noncacheable memory such as that in devices; this function cannot guarantee atomicity, for example, on memory mapped from a PCI device.
518 *
519 * @discussion
520 * The OSBitOrAtomic8 function logically ors the bits of the specified mask into the value at the specified address and returns the original value.
521 * @param mask The mask to logically or with the value.
522 * @param address The address of the value to update atomically.
523 * @result The value before the bitwise operation.
524 */
525extern UInt8 OSBitOrAtomic8(
526 UInt32 mask,
527 volatile UInt8 * address);
528
529/*!
530 * @function OSBitXorAtomic
531 *
532 * @abstract
533 * 32-bit logical xor operation, performed atomically with respect to all devices that participate in the coherency architecture of the platform.
534 *
535 * This function guarantees atomicity only with main system memory. It is specifically unsuitable for use on noncacheable memory such as that in devices; this function cannot guarantee atomicity, for example, on memory mapped from a PCI device.
536 *
537 * @discussion
538 * The OSBitXorAtomic function logically xors the bits of the specified mask into the value at the specified address and returns the original value.
539 * @param mask The mask to logically or with the value.
540 * @param address The 4-byte aligned address of the value to update atomically.
541 * @result The value before the bitwise operation.
542 */
543extern UInt32 OSBitXorAtomic(
544 UInt32 mask,
545 volatile UInt32 * address);
546#define OSBitXorAtomic(a, b) \
547 (OSBitXorAtomic(a, __SAFE_CAST_PTR(volatile UInt32*,b)))
548
549/*!
550 * @function OSBitXorAtomic16
551 *
552 * @abstract
553 * 16-bit logical xor operation, performed atomically with respect to all devices that participate in the coherency architecture of the platform.
554 *
555 * @discussion
556 * The OSBitXorAtomic16 function logically xors the bits of the specified mask into the value at the specified address and returns the original value.
557 *
558 * This function guarantees atomicity only with main system memory. It is specifically unsuitable for use on noncacheable memory such as that in devices; this function cannot guarantee atomicity, for example, on memory mapped from a PCI device.
559 * @param mask The mask to logically or with the value.
560 * @param address The 2-byte aligned address of the value to update atomically.
561 * @result The value before the bitwise operation.
562 */
563extern UInt16 OSBitXorAtomic16(
564 UInt32 mask,
565 volatile UInt16 * address);
566
567/*!
568 * @function OSBitXorAtomic8
569 *
570 * @abstract
571 * 8-bit logical xor operation, performed atomically with respect to all devices that participate in the coherency architecture of the platform.
572 *
573 * This function guarantees atomicity only with main system memory. It is specifically unsuitable for use on noncacheable memory such as that in devices; this function cannot guarantee atomicity, for example, on memory mapped from a PCI device.
574 *
575 * @discussion
576 * The OSBitXorAtomic8 function logically xors the bits of the specified mask into the value at the specified address and returns the original value.
577 * @param mask The mask to logically or with the value.
578 * @param address The address of the value to update atomically.
579 * @result The value before the bitwise operation.
580 */
581extern UInt8 OSBitXorAtomic8(
582 UInt32 mask,
583 volatile UInt8 * address);
584
585/*!
586 * @function OSTestAndSet
587 *
588 * @abstract
589 * Bit test and set operation, performed atomically with respect to all devices that participate in the coherency architecture of the platform.
590 *
591 * This function guarantees atomicity only with main system memory. It is specifically unsuitable for use on noncacheable memory such as that in devices; this function cannot guarantee atomicity, for example, on memory mapped from a PCI device.
592 *
593 * @discussion
594 * The OSTestAndSet function sets a single bit in a byte at a specified address. It returns true if the bit was already set, false otherwise.
595 * @param bit The bit number in the range 0 through 7. Bit 0 is the most significant.
596 * @param startAddress The address of the byte to update atomically.
597 * @result true if the bit was already set, false otherwise.
598 */
599extern Boolean OSTestAndSet(
600 UInt32 bit,
601 volatile UInt8 * startAddress);
602
603/*!
604 * @function OSTestAndClear
605 *
606 * @abstract
607 * Bit test and clear operation, performed atomically with respect to all devices that participate in the coherency architecture of the platform.
608 *
609 * @discussion
610 * The OSTestAndClear function clears a single bit in a byte at a specified address. It returns true if the bit was already clear, false otherwise.
611 *
612 * This function guarantees atomicity only with main system memory. It is specifically unsuitable for use on noncacheable memory such as that in devices; this function cannot guarantee atomicity, for example, on memory mapped from a PCI device.
613 * @param bit The bit number in the range 0 through 7. Bit 0 is the most significant.
614 * @param startAddress The address of the byte to update atomically.
615 * @result true if the bit was already clear, false otherwise.
616 */
617extern Boolean OSTestAndClear(
618 UInt32 bit,
619 volatile UInt8 * startAddress);
620
621/*!
622 * @defined OS_SPINLOCK_INIT
623 *
624 * @abstract
625 * The default value for an OSSpinLock.
626 *
627 * @discussion
628 * The convention is that unlocked is zero, locked is nonzero.
629 */
630#define OS_SPINLOCK_INIT 0
631
632/*!
633 * @typedef OSSpinLock
634 *
635 * @abstract
636 * Data type for a spinlock.
637 *
638 * @discussion
639 * You should always initialize a spinlock to OS_SPINLOCK_INIT before using it.
640 */
641typedef SInt32 OSSpinLock;
642
643#ifdef PRIVATE
644/*!
645 * @function OSSpinLockTry
646 *
647 * @abstract
648 * Locks a spinlock if it would not block.
649 *
650 * @discussion
651 * Multiprocessor locks used within the shared memory area between the kernel and event system. These must work in both user and kernel mode.
652 *
653 * @result
654 * Returns false if the lock was already held by another thread, true if it took the lock successfully.
655 */
656extern Boolean OSSpinLockTry(volatile OSSpinLock * lock);
657
658/*!
659 * @function OSSpinLockUnlock
660 *
661 * @abstract
662 * Unlocks a spinlock.
663 *
664 * @discussion
665 * Unlocks a spinlock.
666 */
667extern void OSSpinLockUnlock(volatile OSSpinLock * lock);
668#endif /* PRIVATE */
669
670/*!
671 * @function OSSynchronizeIO
672 *
673 * @abstract
674 * The OSSynchronizeIO routine ensures orderly load and store operations to noncached memory mapped I/O devices.
675 *
676 * @discussion
677 * The OSSynchronizeIO routine ensures orderly load and store operations to noncached memory mapped I/O devices. It executes the eieio instruction on PowerPC processors.
678 */
679#if defined(__arm__) || defined(__arm64__)
680extern void OSSynchronizeIO(void);
681#else
682static __inline__ void OSSynchronizeIO(void)
683{
684}
685#endif
686
687#if defined(KERNEL_PRIVATE)
688
689#if defined(__arm__) || defined(__arm64__)
690static inline void OSMemoryBarrier(void) {
691 __asm__ volatile("dmb ish" ::: "memory");
692}
693#elif defined(__i386__) || defined(__x86_64__)
694#if defined(XNU_KERNEL_PRIVATE)
695static inline void OSMemoryBarrier(void) {
696 __asm__ volatile("mfence" ::: "memory");
697}
698#endif /* XNU_KERNEL_PRIVATE */
699#endif
700
701#endif /* KERNEL_PRIVATE */
702
703#if defined(__cplusplus)
704}
705#endif
706
707#endif /* ! _OS_OSATOMIC_H */
708