1 | /* |
2 | * Copyright (c) 2007-2012 Apple Inc. All rights reserved. |
3 | * Copyright (c) 2000-2006 Apple Computer, Inc. All rights reserved. |
4 | * |
5 | * @APPLE_OSREFERENCE_LICENSE_HEADER_START@ |
6 | * |
7 | * This file contains Original Code and/or Modifications of Original Code |
8 | * as defined in and that are subject to the Apple Public Source License |
9 | * Version 2.0 (the 'License'). You may not use this file except in |
10 | * compliance with the License. The rights granted to you under the License |
11 | * may not be used to create, or enable the creation or redistribution of, |
12 | * unlawful or unlicensed copies of an Apple operating system, or to |
13 | * circumvent, violate, or enable the circumvention or violation of, any |
14 | * terms of an Apple operating system software license agreement. |
15 | * |
16 | * Please obtain a copy of the License at |
17 | * http://www.opensource.apple.com/apsl/ and read it before using this file. |
18 | * |
19 | * The Original Code and all software distributed under the License are |
20 | * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER |
21 | * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES, |
22 | * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY, |
23 | * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT. |
24 | * Please see the License for the specific language governing rights and |
25 | * limitations under the License. |
26 | * |
27 | * @APPLE_OSREFERENCE_LICENSE_HEADER_END@ |
28 | */ |
29 | /* |
30 | * Copyright (c) 1999 Apple Computer, Inc. All rights reserved. |
31 | * |
32 | * HISTORY |
33 | * |
34 | */ |
35 | |
36 | #ifndef _OS_OSATOMIC_H |
37 | #define _OS_OSATOMIC_H |
38 | |
39 | #include <libkern/OSBase.h> |
40 | #include <string.h> |
41 | |
42 | #if defined(__cplusplus) |
43 | extern "C" { |
44 | #endif |
45 | |
46 | #ifdef XNU_KERNEL_PRIVATE |
47 | /* |
48 | * The macro SAFE_CAST_PTR() casts one type of pointer to another type, |
49 | * making sure the data the pointer is referencing is the same size. |
50 | * If it is not, it will cause a "size mismatch" assertion. |
51 | * |
52 | * This is to work around "SInt32" being defined as "long" on ILP32 and as "int" |
53 | * on LP64, which would require an explicit cast to "SInt32*" when for instance |
54 | * passing an "int*" to OSAddAtomic() - which masks size mismatches. |
55 | * -- var is used, but sizeof does not evaluate the |
56 | * argument, i.e. we're safe against "++" etc. in var -- |
57 | */ |
58 | #define __SAFE_CAST_PTR(type, var) \ |
59 | ({ _Static_assert(sizeof(*(var)) == sizeof(*(type)NULL), "size mismatch"); ((type)(var)); }) |
60 | #else |
61 | #define __SAFE_CAST_PTR(type, var) ((type)(var)) |
62 | #endif |
63 | |
64 | /*! |
65 | * @header |
66 | * |
67 | * @abstract |
68 | * This header declares the OSAtomic group of functions for atomic |
69 | * reading and updating of values. |
70 | */ |
71 | |
72 | /*! |
73 | * @function OSCompareAndSwap64 |
74 | * |
75 | * @abstract |
76 | * 64-bit compare and swap operation. |
77 | * |
78 | * @discussion |
79 | * See OSCompareAndSwap. |
80 | */ |
81 | extern Boolean OSCompareAndSwap64( |
82 | UInt64 oldValue, |
83 | UInt64 newValue, |
84 | volatile UInt64 * address); |
85 | #define OSCompareAndSwap64(a, b, c) \ |
86 | (OSCompareAndSwap64(a, b, __SAFE_CAST_PTR(volatile UInt64*,c))) |
87 | |
88 | /*! |
89 | * @function OSAddAtomic64 |
90 | * |
91 | * @abstract |
92 | * 64-bit atomic add operation. |
93 | * |
94 | * @discussion |
95 | * See OSAddAtomic. |
96 | */ |
97 | extern SInt64 OSAddAtomic64( |
98 | SInt64 theAmount, |
99 | volatile SInt64 * address); |
100 | #define OSAddAtomic64(a, b) \ |
101 | (OSAddAtomic64(a, __SAFE_CAST_PTR(volatile SInt64*,b))) |
102 | |
103 | /*! |
104 | * @function OSIncrementAtomic64 |
105 | * |
106 | * @abstract |
107 | * 64-bit increment. |
108 | * |
109 | * @discussion |
110 | * See OSIncrementAtomic. |
111 | */ |
112 | inline static SInt64 |
113 | OSIncrementAtomic64(volatile SInt64 * address) |
114 | { |
115 | return OSAddAtomic64(1LL, address); |
116 | } |
117 | |
118 | /*! |
119 | * @function OSDecrementAtomic64 |
120 | * |
121 | * @abstract |
122 | * 64-bit decrement. |
123 | * |
124 | * @discussion |
125 | * See OSDecrementAtomic. |
126 | */ |
127 | inline static SInt64 |
128 | OSDecrementAtomic64(volatile SInt64 * address) |
129 | { |
130 | return OSAddAtomic64(-1LL, address); |
131 | } |
132 | |
133 | #if XNU_KERNEL_PRIVATE |
134 | /* Not to be included in headerdoc. |
135 | * |
136 | * @function OSAddAtomicLong |
137 | * |
138 | * @abstract |
139 | * 32/64-bit atomic add operation, depending on sizeof(long). |
140 | * |
141 | * @discussion |
142 | * See OSAddAtomic. |
143 | */ |
144 | extern long OSAddAtomicLong( |
145 | long theAmount, |
146 | volatile long * address); |
147 | #define OSAddAtomicLong(a, b) \ |
148 | (OSAddAtomicLong(a, __SAFE_CAST_PTR(volatile long*,b))) |
149 | |
150 | /* Not to be included in headerdoc. |
151 | * |
152 | * @function OSIncrementAtomicLong |
153 | * |
154 | * @abstract |
155 | * 32/64-bit increment, depending on sizeof(long) |
156 | * |
157 | * @discussion |
158 | * See OSIncrementAtomic. |
159 | */ |
160 | inline static long |
161 | OSIncrementAtomicLong(volatile long * address) |
162 | { |
163 | return OSAddAtomicLong(1L, address); |
164 | } |
165 | |
166 | /* Not to be included in headerdoc. |
167 | * |
168 | * @function OSDecrementAtomicLong |
169 | * |
170 | * @abstract |
171 | * 32/64-bit decrement, depending on sizeof(long) |
172 | *@discussion See OSDecrementAtomic. |
173 | */ |
174 | inline static long |
175 | OSDecrementAtomicLong(volatile long * address) |
176 | { |
177 | return OSAddAtomicLong(-1L, address); |
178 | } |
179 | #endif /* XNU_KERNEL_PRIVATE */ |
180 | |
181 | #if XNU_KERNEL_PRIVATE |
182 | /*! |
183 | * @function OSCompareAndSwap8 |
184 | * |
185 | * @abstract |
186 | * Compare and swap operation, performed atomically with respect to all devices that participate in the coherency architecture of the platform. |
187 | * |
188 | * @discussion |
189 | * The OSCompareAndSwap8 function compares the value at the specified address with oldVal. The value of newValue is written to the address only if oldValue and the value at the address are equal. OSCompareAndSwap returns true if newValue is written to the address; otherwise, it returns false. |
190 | * |
191 | * This function guarantees atomicity only with main system memory. It is specifically unsuitable for use on noncacheable memory such as that in devices; this function cannot guarantee atomicity, for example, on memory mapped from a PCI device. Additionally, this function incorporates a memory barrier on systems with weakly-ordered memory architectures. |
192 | * |
193 | * @param oldValue The value to compare at address. |
194 | * @param newValue The value to write to address if oldValue compares true. |
195 | * @param address The byte aligned address of the data to update atomically. |
196 | * @result true if newValue was written to the address. |
197 | */ |
198 | extern Boolean OSCompareAndSwap8( |
199 | UInt8 oldValue, |
200 | UInt8 newValue, |
201 | volatile UInt8 * address); |
202 | #define OSCompareAndSwap8(a, b, c) \ |
203 | (OSCompareAndSwap8(a, b, __SAFE_CAST_PTR(volatile UInt8*,c))) |
204 | |
205 | /*! |
206 | * @function OSCompareAndSwap16 |
207 | * |
208 | * @abstract |
209 | * Compare and swap operation, performed atomically with respect to all devices that participate in the coherency architecture of the platform. |
210 | * |
211 | * @discussion |
212 | * The OSCompareAndSwap16 function compares the value at the specified address with oldVal. The value of newValue is written to the address only if oldValue and the value at the address are equal. OSCompareAndSwap returns true if newValue is written to the address; otherwise, it returns false. |
213 | * |
214 | * This function guarantees atomicity only with main system memory. It is specifically unsuitable for use on noncacheable memory such as that in devices; this function cannot guarantee atomicity, for example, on memory mapped from a PCI device. Additionally, this function incorporates a memory barrier on systems with weakly-ordered memory architectures. |
215 | * |
216 | * @param oldValue The value to compare at address. |
217 | * @param newValue The value to write to address if oldValue compares true. |
218 | * @param address The 2-byte aligned address of the data to update atomically. |
219 | * @result true if newValue was written to the address. |
220 | */ |
221 | extern Boolean OSCompareAndSwap16( |
222 | UInt16 oldValue, |
223 | UInt16 newValue, |
224 | volatile UInt16 * address); |
225 | #define OSCompareAndSwap16(a, b, c) \ |
226 | (OSCompareAndSwap16(a, b, __SAFE_CAST_PTR(volatile UInt16*,c))) |
227 | |
228 | #endif /* XNU_KERNEL_PRIVATE */ |
229 | |
230 | /*! |
231 | * @function OSCompareAndSwap |
232 | * |
233 | * @abstract |
234 | * Compare and swap operation, performed atomically with respect to all devices that participate in the coherency architecture of the platform. |
235 | * |
236 | * @discussion |
237 | * The OSCompareAndSwap function compares the value at the specified address with oldVal. The value of newValue is written to the address only if oldValue and the value at the address are equal. OSCompareAndSwap returns true if newValue is written to the address; otherwise, it returns false. |
238 | * |
239 | * This function guarantees atomicity only with main system memory. It is specifically unsuitable for use on noncacheable memory such as that in devices; this function cannot guarantee atomicity, for example, on memory mapped from a PCI device. Additionally, this function incorporates a memory barrier on systems with weakly-ordered memory architectures. |
240 | * |
241 | * @param oldValue The value to compare at address. |
242 | * @param newValue The value to write to address if oldValue compares true. |
243 | * @param address The 4-byte aligned address of the data to update atomically. |
244 | * @result true if newValue was written to the address. |
245 | */ |
246 | extern Boolean OSCompareAndSwap( |
247 | UInt32 oldValue, |
248 | UInt32 newValue, |
249 | volatile UInt32 * address); |
250 | #define OSCompareAndSwap(a, b, c) \ |
251 | (OSCompareAndSwap(a, b, __SAFE_CAST_PTR(volatile UInt32*,c))) |
252 | |
253 | /*! |
254 | * @function OSCompareAndSwapPtr |
255 | * |
256 | * @abstract |
257 | * Compare and swap operation, performed atomically with respect to all devices that participate in the coherency architecture of the platform. |
258 | * |
259 | * @discussion |
260 | * The OSCompareAndSwapPtr function compares the pointer-sized value at the specified address with oldVal. The value of newValue is written to the address only if oldValue and the value at the address are equal. OSCompareAndSwapPtr returns true if newValue is written to the address; otherwise, it returns false. |
261 | * |
262 | * This function guarantees atomicity only with main system memory. It is specifically unsuitable for use on noncacheable memory such as that in devices; this function cannot guarantee atomicity, for example, on memory mapped from a PCI device. Additionally, this function incorporates a memory barrier on systems with weakly-ordered memory architectures. |
263 | * @param oldValue The pointer value to compare at address. |
264 | * @param newValue The pointer value to write to address if oldValue compares true. |
265 | * @param address The pointer-size aligned address of the data to update atomically. |
266 | * @result true if newValue was written to the address. |
267 | */ |
268 | extern Boolean OSCompareAndSwapPtr( |
269 | void * oldValue, |
270 | void * newValue, |
271 | void * volatile * address); |
272 | #define OSCompareAndSwapPtr(a, b, c) \ |
273 | (OSCompareAndSwapPtr(a, b, __SAFE_CAST_PTR(void * volatile *,c))) |
274 | |
275 | /*! |
276 | * @function OSAddAtomic |
277 | * |
278 | * @abstract |
279 | * 32-bit add operation, performed atomically with respect to all devices that participate in the coherency architecture of the platform. |
280 | * |
281 | * @discussion |
282 | * The OSAddAtomic function adds the specified amount to the value at the specified address and returns the original value. |
283 | * |
284 | * This function guarantees atomicity only with main system memory. It is specifically unsuitable for use on noncacheable memory such as that in devices; this function cannot guarantee atomicity, for example, on memory mapped from a PCI device. Previous incarnations of this function incorporated a memory barrier on systems with weakly-ordered memory architectures, but current versions contain no barriers. |
285 | * @param amount The amount to add. |
286 | * @param address The 4-byte aligned address of the value to update atomically. |
287 | * @result The value before the addition |
288 | */ |
289 | extern SInt32 OSAddAtomic( |
290 | SInt32 amount, |
291 | volatile SInt32 * address); |
292 | #define OSAddAtomic(a, b) \ |
293 | (OSAddAtomic(a, __SAFE_CAST_PTR(volatile SInt32*,b))) |
294 | |
295 | /*! |
296 | * @function OSAddAtomic16 |
297 | * |
298 | * @abstract |
299 | * 16-bit add operation, performed atomically with respect to all devices that participate in the coherency architecture of the platform. |
300 | * |
301 | * @discussion |
302 | * The OSAddAtomic16 function adds the specified amount to the value at the specified address and returns the original value. |
303 | * |
304 | * This function guarantees atomicity only with main system memory. It is specifically unsuitable for use on noncacheable memory such as that in devices; this function cannot guarantee atomicity, for example, on memory mapped from a PCI device. Previous incarnations of this function incorporated a memory barrier on systems with weakly-ordered memory architectures, but current versions contain no barriers. |
305 | * @param address The 2-byte aligned address of the value to update atomically. |
306 | * @result The value before the addition |
307 | */ |
308 | extern SInt16 OSAddAtomic16( |
309 | SInt32 amount, |
310 | volatile SInt16 * address); |
311 | |
312 | /*! |
313 | * @function OSAddAtomic8 |
314 | * |
315 | * @abstract |
316 | * 8-bit add operation, performed atomically with respect to all devices that participate in the coherency architecture of the platform. |
317 | * |
318 | * @discussion |
319 | * The OSAddAtomic8 function adds the specified amount to the value at the specified address and returns the original value. |
320 | * |
321 | * This function guarantees atomicity only with main system memory. It is specifically unsuitable for use on noncacheable memory such as that in devices; this function cannot guarantee atomicity, for example, on memory mapped from a PCI device. Previous incarnations of this function incorporated a memory barrier on systems with weakly-ordered memory architectures, but current versions contain no barriers. |
322 | * @param amount The amount to add. |
323 | * @param address The address of the value to update atomically. |
324 | * @result The value before the addition. |
325 | */ |
326 | extern SInt8 OSAddAtomic8( |
327 | SInt32 amount, |
328 | volatile SInt8 * address); |
329 | |
330 | /*! |
331 | * @function OSIncrementAtomic |
332 | * |
333 | * @abstract |
334 | * 32-bit increment operation, performed atomically with respect to all devices that participate in the coherency architecture of the platform. |
335 | * |
336 | * @discussion |
337 | * The OSIncrementAtomic function increments the value at the specified address by one and returns the original value. |
338 | * |
339 | * This function guarantees atomicity only with main system memory. It is specifically unsuitable for use on noncacheable memory such as that in devices; this function cannot guarantee atomicity, for example, on memory mapped from a PCI device. |
340 | * @param address The 4-byte aligned address of the value to update atomically. |
341 | * @result The value before the increment. |
342 | */ |
343 | extern SInt32 OSIncrementAtomic(volatile SInt32 * address); |
344 | #define OSIncrementAtomic(a) \ |
345 | (OSIncrementAtomic(__SAFE_CAST_PTR(volatile SInt32*,a))) |
346 | |
347 | /*! |
348 | * @function OSIncrementAtomic16 |
349 | * |
350 | * @abstract |
351 | * 16-bit increment operation, performed atomically with respect to all devices that participate in the coherency architecture of the platform. |
352 | * |
353 | * @discussion |
354 | * The OSIncrementAtomic16 function increments the value at the specified address by one and returns the original value. |
355 | * |
356 | * This function guarantees atomicity only with main system memory. It is specifically unsuitable for use on noncacheable memory such as that in devices; this function cannot guarantee atomicity, for example, on memory mapped from a PCI device. Previous incarnations of this function incorporated a memory barrier on systems with weakly-ordered memory architectures, but current versions contain no barriers. |
357 | * @param address The 2-byte aligned address of the value to update atomically. |
358 | * @result The value before the increment. |
359 | */ |
360 | extern SInt16 OSIncrementAtomic16(volatile SInt16 * address); |
361 | |
362 | /*! |
363 | * @function OSIncrementAtomic8 |
364 | * |
365 | * @abstract |
366 | * 8-bit increment operation, performed atomically with respect to all devices that participate in the coherency architecture of the platform. |
367 | * |
368 | * @discussion |
369 | * The OSIncrementAtomic8 function increments the value at the specified address by one and returns the original value. |
370 | * |
371 | * This function guarantees atomicity only with main system memory. It is specifically unsuitable for use on noncacheable memory such as that in devices; this function cannot guarantee atomicity, for example, on memory mapped from a PCI device. Previous incarnations of this function incorporated a memory barrier on systems with weakly-ordered memory architectures, but current versions contain no barriers. |
372 | * @param address The address of the value to update atomically. |
373 | * @result The value before the increment. |
374 | */ |
375 | extern SInt8 OSIncrementAtomic8(volatile SInt8 * address); |
376 | |
377 | /*! |
378 | * @function OSDecrementAtomic |
379 | * |
380 | * @abstract |
381 | * 32-bit decrement operation, performed atomically with respect to all devices that participate in the coherency architecture of the platform. |
382 | * |
383 | * @discussion |
384 | * The OSDecrementAtomic function decrements the value at the specified address by one and returns the original value. |
385 | * |
386 | * This function guarantees atomicity only with main system memory. It is specifically unsuitable for use on noncacheable memory such as that in devices; this function cannot guarantee atomicity, for example, on memory mapped from a PCI device. Previous incarnations of this function incorporated a memory barrier on systems with weakly-ordered memory architectures, but current versions contain no barriers. |
387 | * @param address The 4-byte aligned address of the value to update atomically. |
388 | * @result The value before the decrement. |
389 | */ |
390 | extern SInt32 OSDecrementAtomic(volatile SInt32 * address); |
391 | #define OSDecrementAtomic(a) \ |
392 | (OSDecrementAtomic(__SAFE_CAST_PTR(volatile SInt32*,a))) |
393 | |
394 | /*! |
395 | * @function OSDecrementAtomic16 |
396 | * |
397 | * @abstract |
398 | * 16-bit decrement operation, performed atomically with respect to all devices that participate in the coherency architecture of the platform. |
399 | * |
400 | * @discussion |
401 | * The OSDecrementAtomic16 function decrements the value at the specified address by one and returns the original value. |
402 | * |
403 | * This function guarantees atomicity only with main system memory. It is specifically unsuitable for use on noncacheable memory such as that in devices; this function cannot guarantee atomicity, for example, on memory mapped from a PCI device. Previous incarnations of this function incorporated a memory barrier on systems with weakly-ordered memory architectures, but current versions contain no barriers. |
404 | * @param address The 2-byte aligned address of the value to update atomically. |
405 | * @result The value before the decrement. |
406 | */ |
407 | extern SInt16 OSDecrementAtomic16(volatile SInt16 * address); |
408 | |
409 | /*! |
410 | * @function OSDecrementAtomic8 |
411 | * |
412 | * @abstract |
413 | * 8-bit decrement operation, performed atomically with respect to all devices that participate in the coherency architecture of the platform. |
414 | * |
415 | * @discussion |
416 | * The OSDecrementAtomic8 function decrements the value at the specified address by one and returns the original value. |
417 | * |
418 | * This function guarantees atomicity only with main system memory. It is specifically unsuitable for use on noncacheable memory such as that in devices; this function cannot guarantee atomicity, for example, on memory mapped from a PCI device. Previous incarnations of this function incorporated a memory barrier on systems with weakly-ordered memory architectures, but current versions contain no barriers. |
419 | * @param address The address of the value to update atomically. |
420 | * @result The value before the decrement. |
421 | */ |
422 | extern SInt8 OSDecrementAtomic8(volatile SInt8 * address); |
423 | |
424 | /*! |
425 | * @function OSBitAndAtomic |
426 | * |
427 | * @abstract |
428 | * 32-bit logical and operation, performed atomically with respect to all devices that participate in the coherency architecture of the platform. |
429 | * |
430 | * @discussion |
431 | * The OSBitAndAtomic function logically ands the bits of the specified mask into the value at the specified address and returns the original value. |
432 | * |
433 | * This function guarantees atomicity only with main system memory. It is specifically unsuitable for use on noncacheable memory such as that in devices; this function cannot guarantee atomicity, for example, on memory mapped from a PCI device. Previous incarnations of this function incorporated a memory barrier on systems with weakly-ordered memory architectures, but current versions contain no barriers.. |
434 | * @param mask The mask to logically and with the value. |
435 | * @param address The 4-byte aligned address of the value to update atomically. |
436 | * @result The value before the bitwise operation |
437 | */ |
438 | extern UInt32 OSBitAndAtomic( |
439 | UInt32 mask, |
440 | volatile UInt32 * address); |
441 | #define OSBitAndAtomic(a, b) \ |
442 | (OSBitAndAtomic(a, __SAFE_CAST_PTR(volatile UInt32*,b))) |
443 | |
444 | /*! |
445 | * @function OSBitAndAtomic16 |
446 | * |
447 | * @abstract |
448 | * 16-bit logical and operation, performed atomically with respect to all devices that participate in the coherency architecture of the platform. |
449 | * |
450 | * @discussion |
451 | * The OSBitAndAtomic16 function logically ands the bits of the specified mask into the value at the specified address and returns the original value. |
452 | * |
453 | * This function guarantees atomicity only with main system memory. It is specifically unsuitable for use on noncacheable memory such as that in devices; this function cannot guarantee atomicity, for example, on memory mapped from a PCI device. |
454 | * @param mask The mask to logically and with the value. |
455 | * @param address The 2-byte aligned address of the value to update atomically. |
456 | * @result The value before the bitwise operation. |
457 | */ |
458 | extern UInt16 OSBitAndAtomic16( |
459 | UInt32 mask, |
460 | volatile UInt16 * address); |
461 | |
462 | /*! |
463 | * @function OSBitAndAtomic8 |
464 | * |
465 | * @abstract |
466 | * 8-bit logical and operation, performed atomically with respect to all devices that participate in the coherency architecture of the platform. |
467 | * |
468 | * @discussion |
469 | * The OSBitAndAtomic8 function logically ands the bits of the specified mask into the value at the specified address and returns the original value. |
470 | * |
471 | * This function guarantees atomicity only with main system memory. It is specifically unsuitable for use on noncacheable memory such as that in devices; this function cannot guarantee atomicity, for example, on memory mapped from a PCI device. |
472 | * @param mask The mask to logically and with the value. |
473 | * @param address The address of the value to update atomically. |
474 | * @result The value before the bitwise operation. |
475 | */ |
476 | extern UInt8 OSBitAndAtomic8( |
477 | UInt32 mask, |
478 | volatile UInt8 * address); |
479 | |
480 | /*! |
481 | * @function OSBitOrAtomic |
482 | * |
483 | * @abstract |
484 | * 32-bit logical or operation, performed atomically with respect to all devices that participate in the coherency architecture of the platform. |
485 | * |
486 | * @discussion |
487 | * The OSBitOrAtomic function logically ors the bits of the specified mask into the value at the specified address and returns the original value. |
488 | * |
489 | * This function guarantees atomicity only with main system memory. It is specifically unsuitable for use on noncacheable memory such as that in devices; this function cannot guarantee atomicity, for example, on memory mapped from a PCI device. |
490 | * @param mask The mask to logically or with the value. |
491 | * @param address The 4-byte aligned address of the value to update atomically. |
492 | * @result The value before the bitwise operation. |
493 | */ |
494 | extern UInt32 OSBitOrAtomic( |
495 | UInt32 mask, |
496 | volatile UInt32 * address); |
497 | #define OSBitOrAtomic(a, b) \ |
498 | (OSBitOrAtomic(a, __SAFE_CAST_PTR(volatile UInt32*,b))) |
499 | |
500 | /*! |
501 | * @function OSBitOrAtomic16 |
502 | * |
503 | * @abstract |
504 | * 16-bit logical or operation, performed atomically with respect to all devices that participate in the coherency architecture of the platform. |
505 | * |
506 | * @discussion |
507 | * The OSBitOrAtomic16 function logically ors the bits of the specified mask into the value at the specified address and returns the original value. |
508 | * |
509 | * This function guarantees atomicity only with main system memory. It is specifically unsuitable for use on noncacheable memory such as that in devices; this function cannot guarantee atomicity, for example, on memory mapped from a PCI device. |
510 | * @param mask The mask to logically or with the value. |
511 | * @param address The 2-byte aligned address of the value to update atomically. |
512 | * @result The value before the bitwise operation. |
513 | */ |
514 | extern UInt16 OSBitOrAtomic16( |
515 | UInt32 mask, |
516 | volatile UInt16 * address); |
517 | |
518 | /*! |
519 | * @function OSBitOrAtomic8 |
520 | * |
521 | * @abstract |
522 | * 8-bit logical or operation, performed atomically with respect to all devices that participate in the coherency architecture of the platform. |
523 | * |
524 | * This function guarantees atomicity only with main system memory. It is specifically unsuitable for use on noncacheable memory such as that in devices; this function cannot guarantee atomicity, for example, on memory mapped from a PCI device. |
525 | * |
526 | * @discussion |
527 | * The OSBitOrAtomic8 function logically ors the bits of the specified mask into the value at the specified address and returns the original value. |
528 | * @param mask The mask to logically or with the value. |
529 | * @param address The address of the value to update atomically. |
530 | * @result The value before the bitwise operation. |
531 | */ |
532 | extern UInt8 OSBitOrAtomic8( |
533 | UInt32 mask, |
534 | volatile UInt8 * address); |
535 | |
536 | /*! |
537 | * @function OSBitXorAtomic |
538 | * |
539 | * @abstract |
540 | * 32-bit logical xor operation, performed atomically with respect to all devices that participate in the coherency architecture of the platform. |
541 | * |
542 | * This function guarantees atomicity only with main system memory. It is specifically unsuitable for use on noncacheable memory such as that in devices; this function cannot guarantee atomicity, for example, on memory mapped from a PCI device. |
543 | * |
544 | * @discussion |
545 | * The OSBitXorAtomic function logically xors the bits of the specified mask into the value at the specified address and returns the original value. |
546 | * @param mask The mask to logically or with the value. |
547 | * @param address The 4-byte aligned address of the value to update atomically. |
548 | * @result The value before the bitwise operation. |
549 | */ |
550 | extern UInt32 OSBitXorAtomic( |
551 | UInt32 mask, |
552 | volatile UInt32 * address); |
553 | #define OSBitXorAtomic(a, b) \ |
554 | (OSBitXorAtomic(a, __SAFE_CAST_PTR(volatile UInt32*,b))) |
555 | |
556 | /*! |
557 | * @function OSBitXorAtomic16 |
558 | * |
559 | * @abstract |
560 | * 16-bit logical xor operation, performed atomically with respect to all devices that participate in the coherency architecture of the platform. |
561 | * |
562 | * @discussion |
563 | * The OSBitXorAtomic16 function logically xors the bits of the specified mask into the value at the specified address and returns the original value. |
564 | * |
565 | * This function guarantees atomicity only with main system memory. It is specifically unsuitable for use on noncacheable memory such as that in devices; this function cannot guarantee atomicity, for example, on memory mapped from a PCI device. |
566 | * @param mask The mask to logically or with the value. |
567 | * @param address The 2-byte aligned address of the value to update atomically. |
568 | * @result The value before the bitwise operation. |
569 | */ |
570 | extern UInt16 OSBitXorAtomic16( |
571 | UInt32 mask, |
572 | volatile UInt16 * address); |
573 | |
574 | /*! |
575 | * @function OSBitXorAtomic8 |
576 | * |
577 | * @abstract |
578 | * 8-bit logical xor operation, performed atomically with respect to all devices that participate in the coherency architecture of the platform. |
579 | * |
580 | * This function guarantees atomicity only with main system memory. It is specifically unsuitable for use on noncacheable memory such as that in devices; this function cannot guarantee atomicity, for example, on memory mapped from a PCI device. |
581 | * |
582 | * @discussion |
583 | * The OSBitXorAtomic8 function logically xors the bits of the specified mask into the value at the specified address and returns the original value. |
584 | * @param mask The mask to logically or with the value. |
585 | * @param address The address of the value to update atomically. |
586 | * @result The value before the bitwise operation. |
587 | */ |
588 | extern UInt8 OSBitXorAtomic8( |
589 | UInt32 mask, |
590 | volatile UInt8 * address); |
591 | |
592 | /*! |
593 | * @function OSTestAndSet |
594 | * |
595 | * @abstract |
596 | * Bit test and set operation, performed atomically with respect to all devices that participate in the coherency architecture of the platform. |
597 | * |
598 | * This function guarantees atomicity only with main system memory. It is specifically unsuitable for use on noncacheable memory such as that in devices; this function cannot guarantee atomicity, for example, on memory mapped from a PCI device. |
599 | * |
600 | * @discussion |
601 | * The OSTestAndSet function sets a single bit in a byte at a specified address. It returns true if the bit was already set, false otherwise. |
602 | * @param bit The bit number in the range 0 through 7. Bit 0 is the most significant. |
603 | * @param startAddress The address of the byte to update atomically. |
604 | * @result true if the bit was already set, false otherwise. |
605 | */ |
606 | extern Boolean OSTestAndSet( |
607 | UInt32 bit, |
608 | volatile UInt8 * startAddress); |
609 | |
610 | /*! |
611 | * @function OSTestAndClear |
612 | * |
613 | * @abstract |
614 | * Bit test and clear operation, performed atomically with respect to all devices that participate in the coherency architecture of the platform. |
615 | * |
616 | * @discussion |
617 | * The OSTestAndClear function clears a single bit in a byte at a specified address. It returns true if the bit was already clear, false otherwise. |
618 | * |
619 | * This function guarantees atomicity only with main system memory. It is specifically unsuitable for use on noncacheable memory such as that in devices; this function cannot guarantee atomicity, for example, on memory mapped from a PCI device. |
620 | * @param bit The bit number in the range 0 through 7. Bit 0 is the most significant. |
621 | * @param startAddress The address of the byte to update atomically. |
622 | * @result true if the bit was already clear, false otherwise. |
623 | */ |
624 | extern Boolean OSTestAndClear( |
625 | UInt32 bit, |
626 | volatile UInt8 * startAddress); |
627 | |
628 | /*! |
629 | * @defined OS_SPINLOCK_INIT |
630 | * |
631 | * @abstract |
632 | * The default value for an OSSpinLock. |
633 | * |
634 | * @discussion |
635 | * The convention is that unlocked is zero, locked is nonzero. |
636 | */ |
637 | #define OS_SPINLOCK_INIT 0 |
638 | |
639 | /*! |
640 | * @typedef OSSpinLock |
641 | * |
642 | * @abstract |
643 | * Data type for a spinlock. |
644 | * |
645 | * @discussion |
646 | * You should always initialize a spinlock to OS_SPINLOCK_INIT before using it. |
647 | */ |
648 | typedef SInt32 OSSpinLock; |
649 | |
650 | #ifdef PRIVATE |
651 | /*! |
652 | * @function OSSpinLockTry |
653 | * |
654 | * @abstract |
655 | * Locks a spinlock if it would not block. |
656 | * |
657 | * @discussion |
658 | * Multiprocessor locks used within the shared memory area between the kernel and event system. These must work in both user and kernel mode. |
659 | * |
660 | * @result |
661 | * Returns false if the lock was already held by another thread, true if it took the lock successfully. |
662 | */ |
663 | extern Boolean OSSpinLockTry(volatile OSSpinLock * lock); |
664 | |
665 | /*! |
666 | * @function OSSpinLockUnlock |
667 | * |
668 | * @abstract |
669 | * Unlocks a spinlock. |
670 | * |
671 | * @discussion |
672 | * Unlocks a spinlock. |
673 | */ |
674 | extern void OSSpinLockUnlock(volatile OSSpinLock * lock); |
675 | #endif /* PRIVATE */ |
676 | |
677 | /*! |
678 | * @function OSSynchronizeIO |
679 | * |
680 | * @abstract |
681 | * The OSSynchronizeIO routine ensures orderly load and store operations to noncached memory mapped I/O devices. |
682 | * |
683 | * @discussion |
684 | * The OSSynchronizeIO routine ensures orderly load and store operations to noncached memory mapped I/O devices. It executes the eieio instruction on PowerPC processors. |
685 | */ |
686 | #if defined(__arm__) || defined(__arm64__) |
687 | extern void OSSynchronizeIO(void); |
688 | #else |
689 | static __inline__ void |
690 | OSSynchronizeIO(void) |
691 | { |
692 | } |
693 | #endif |
694 | |
695 | #if defined(KERNEL_PRIVATE) |
696 | |
697 | #if defined(__arm__) || defined(__arm64__) |
698 | static inline void |
699 | OSMemoryBarrier(void) |
700 | { |
701 | __asm__ volatile ("dmb ish" ::: "memory" ); |
702 | } |
703 | #elif defined(__i386__) || defined(__x86_64__) |
704 | #if defined(XNU_KERNEL_PRIVATE) |
705 | static inline void |
706 | OSMemoryBarrier(void) |
707 | { |
708 | __asm__ volatile ("mfence" ::: "memory" ); |
709 | } |
710 | #endif /* XNU_KERNEL_PRIVATE */ |
711 | #endif |
712 | |
713 | #endif /* KERNEL_PRIVATE */ |
714 | |
715 | #if defined(__cplusplus) |
716 | } |
717 | #endif |
718 | |
719 | #endif /* ! _OS_OSATOMIC_H */ |
720 | |