1 | /* |
2 | * Copyright (c) 2000-2015 Apple Computer, Inc. All rights reserved. |
3 | * |
4 | * @APPLE_OSREFERENCE_LICENSE_HEADER_START@ |
5 | * |
6 | * This file contains Original Code and/or Modifications of Original Code |
7 | * as defined in and that are subject to the Apple Public Source License |
8 | * Version 2.0 (the 'License'). You may not use this file except in |
9 | * compliance with the License. The rights granted to you under the License |
10 | * may not be used to create, or enable the creation or redistribution of, |
11 | * unlawful or unlicensed copies of an Apple operating system, or to |
12 | * circumvent, violate, or enable the circumvention or violation of, any |
13 | * terms of an Apple operating system software license agreement. |
14 | * |
15 | * Please obtain a copy of the License at |
16 | * http://www.opensource.apple.com/apsl/ and read it before using this file. |
17 | * |
18 | * The Original Code and all software distributed under the License are |
19 | * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER |
20 | * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES, |
21 | * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY, |
22 | * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT. |
23 | * Please see the License for the specific language governing rights and |
24 | * limitations under the License. |
25 | * |
26 | * @APPLE_OSREFERENCE_LICENSE_HEADER_END@ |
27 | */ |
28 | |
29 | #include <libkern/OSAtomic.h> |
30 | #include <kern/debug.h> |
31 | #include <machine/atomic.h> |
32 | #include <stdbool.h> |
33 | |
34 | #ifndef NULL |
35 | #define NULL ((void *)0) |
36 | #endif |
37 | |
38 | #define ATOMIC_DEBUG DEBUG |
39 | |
40 | #if ATOMIC_DEBUG |
41 | #define ALIGN_TEST(p, t) do{if((uintptr_t)p&(sizeof(t)-1)) panic("Unaligned atomic pointer %p",p);}while(0) |
42 | #else |
43 | #define ALIGN_TEST(p, t) do{}while(0) |
44 | #endif |
45 | |
46 | /* |
47 | * atomic operations |
48 | * These are _the_ atomic operations, now implemented via compiler built-ins. |
49 | * It is expected that this C implementation is a candidate for Link-Time- |
50 | * Optimization inlining, whereas the assembler implementations they replace |
51 | * were not. |
52 | */ |
53 | |
54 | #undef OSCompareAndSwap8 |
55 | Boolean |
56 | OSCompareAndSwap8(UInt8 oldValue, UInt8 newValue, volatile UInt8 *address) |
57 | { |
58 | return (Boolean)os_atomic_cmpxchg(address, oldValue, newValue, acq_rel); |
59 | } |
60 | |
61 | #undef OSCompareAndSwap16 |
62 | Boolean |
63 | OSCompareAndSwap16(UInt16 oldValue, UInt16 newValue, volatile UInt16 *address) |
64 | { |
65 | return (Boolean)os_atomic_cmpxchg(address, oldValue, newValue, acq_rel); |
66 | } |
67 | |
68 | #undef OSCompareAndSwap |
69 | Boolean |
70 | OSCompareAndSwap(UInt32 oldValue, UInt32 newValue, volatile UInt32 *address) |
71 | { |
72 | ALIGN_TEST(address, UInt32); |
73 | return (Boolean)os_atomic_cmpxchg(address, oldValue, newValue, acq_rel); |
74 | } |
75 | |
76 | #undef OSCompareAndSwap64 |
77 | Boolean |
78 | OSCompareAndSwap64(UInt64 oldValue, UInt64 newValue, volatile UInt64 *address) |
79 | { |
80 | /* |
81 | * _Atomic uint64 requires 8-byte alignment on all architectures. |
82 | * This silences the compiler cast warning. ALIGN_TEST() verifies |
83 | * that the cast was legal, if defined. |
84 | */ |
85 | _Atomic UInt64 *aligned_addr = (_Atomic UInt64 *)(uintptr_t)address; |
86 | |
87 | ALIGN_TEST(address, UInt64); |
88 | return (Boolean)os_atomic_cmpxchg(aligned_addr, oldValue, newValue, acq_rel); |
89 | } |
90 | |
91 | #undef OSCompareAndSwapPtr |
92 | Boolean |
93 | OSCompareAndSwapPtr(void *oldValue, void *newValue, void * volatile *address) |
94 | { |
95 | return (Boolean)os_atomic_cmpxchg(address, oldValue, newValue, acq_rel); |
96 | } |
97 | |
98 | SInt8 |
99 | OSAddAtomic8(SInt32 amount, volatile SInt8 *address) |
100 | { |
101 | return os_atomic_add_orig(address, (SInt8)amount, relaxed); |
102 | } |
103 | |
104 | SInt16 |
105 | OSAddAtomic16(SInt32 amount, volatile SInt16 *address) |
106 | { |
107 | return os_atomic_add_orig(address, (SInt16)amount, relaxed); |
108 | } |
109 | |
110 | #undef OSAddAtomic |
111 | SInt32 |
112 | OSAddAtomic(SInt32 amount, volatile SInt32 *address) |
113 | { |
114 | ALIGN_TEST(address, UInt32); |
115 | return os_atomic_add_orig(address, amount, relaxed); |
116 | } |
117 | |
118 | #undef OSAddAtomic64 |
119 | SInt64 |
120 | OSAddAtomic64(SInt64 amount, volatile SInt64 *address) |
121 | { |
122 | _Atomic SInt64* aligned_address = (_Atomic SInt64*)(uintptr_t)address; |
123 | |
124 | ALIGN_TEST(address, SInt64); |
125 | return os_atomic_add_orig(aligned_address, amount, relaxed); |
126 | } |
127 | |
128 | #undef OSAddAtomicLong |
129 | long |
130 | OSAddAtomicLong(long theAmount, volatile long *address) |
131 | { |
132 | return os_atomic_add_orig(address, theAmount, relaxed); |
133 | } |
134 | |
135 | #undef OSIncrementAtomic |
136 | SInt32 |
137 | OSIncrementAtomic(volatile SInt32 * value) |
138 | { |
139 | return os_atomic_inc_orig(value, relaxed); |
140 | } |
141 | |
142 | #undef OSDecrementAtomic |
143 | SInt32 |
144 | OSDecrementAtomic(volatile SInt32 * value) |
145 | { |
146 | return os_atomic_dec_orig(value, relaxed); |
147 | } |
148 | |
149 | #undef OSBitAndAtomic |
150 | UInt32 |
151 | OSBitAndAtomic(UInt32 mask, volatile UInt32 * value) |
152 | { |
153 | return os_atomic_and_orig(value, mask, relaxed); |
154 | } |
155 | |
156 | #undef OSBitOrAtomic |
157 | UInt32 |
158 | OSBitOrAtomic(UInt32 mask, volatile UInt32 * value) |
159 | { |
160 | return os_atomic_or_orig(value, mask, relaxed); |
161 | } |
162 | |
163 | #undef OSBitXorAtomic |
164 | UInt32 |
165 | OSBitXorAtomic(UInt32 mask, volatile UInt32 * value) |
166 | { |
167 | return os_atomic_xor_orig(value, mask, relaxed); |
168 | } |
169 | |
170 | static Boolean |
171 | OSTestAndSetClear(UInt32 bit, bool wantSet, volatile UInt8 * startAddress) |
172 | { |
173 | UInt8 mask = 1; |
174 | UInt8 oldValue, newValue; |
175 | UInt8 wantValue; |
176 | UInt8 *address; |
177 | |
178 | address = (UInt8 *)(uintptr_t)(startAddress + (bit / 8)); |
179 | mask <<= (7 - (bit % 8)); |
180 | wantValue = wantSet ? mask : 0; |
181 | |
182 | return !os_atomic_rmw_loop(address, oldValue, newValue, relaxed, { |
183 | if ((oldValue & mask) == wantValue) { |
184 | os_atomic_rmw_loop_give_up(break); |
185 | } |
186 | newValue = (oldValue & ~mask) | wantValue; |
187 | }); |
188 | } |
189 | |
190 | Boolean |
191 | OSTestAndSet(UInt32 bit, volatile UInt8 * startAddress) |
192 | { |
193 | return OSTestAndSetClear(bit, true, startAddress); |
194 | } |
195 | |
196 | Boolean |
197 | OSTestAndClear(UInt32 bit, volatile UInt8 * startAddress) |
198 | { |
199 | return OSTestAndSetClear(bit, false, startAddress); |
200 | } |
201 | |
202 | /* |
203 | * silly unaligned versions |
204 | */ |
205 | |
206 | SInt8 |
207 | OSIncrementAtomic8(volatile SInt8 * value) |
208 | { |
209 | return os_atomic_inc_orig(value, relaxed); |
210 | } |
211 | |
212 | SInt8 |
213 | OSDecrementAtomic8(volatile SInt8 * value) |
214 | { |
215 | return os_atomic_dec_orig(value, relaxed); |
216 | } |
217 | |
218 | UInt8 |
219 | OSBitAndAtomic8(UInt32 mask, volatile UInt8 * value) |
220 | { |
221 | return os_atomic_and_orig(value, (UInt8)mask, relaxed); |
222 | } |
223 | |
224 | UInt8 |
225 | OSBitOrAtomic8(UInt32 mask, volatile UInt8 * value) |
226 | { |
227 | return os_atomic_or_orig(value, (UInt8)mask, relaxed); |
228 | } |
229 | |
230 | UInt8 |
231 | OSBitXorAtomic8(UInt32 mask, volatile UInt8 * value) |
232 | { |
233 | return os_atomic_xor_orig(value, (UInt8)mask, relaxed); |
234 | } |
235 | |
236 | SInt16 |
237 | OSIncrementAtomic16(volatile SInt16 * value) |
238 | { |
239 | return OSAddAtomic16(amount: 1, address: value); |
240 | } |
241 | |
242 | SInt16 |
243 | OSDecrementAtomic16(volatile SInt16 * value) |
244 | { |
245 | return OSAddAtomic16(amount: -1, address: value); |
246 | } |
247 | |
248 | UInt16 |
249 | OSBitAndAtomic16(UInt32 mask, volatile UInt16 * value) |
250 | { |
251 | return os_atomic_and_orig(value, (UInt16)mask, relaxed); |
252 | } |
253 | |
254 | UInt16 |
255 | OSBitOrAtomic16(UInt32 mask, volatile UInt16 * value) |
256 | { |
257 | return os_atomic_or_orig(value, (UInt16)mask, relaxed); |
258 | } |
259 | |
260 | UInt16 |
261 | OSBitXorAtomic16(UInt32 mask, volatile UInt16 * value) |
262 | { |
263 | return os_atomic_xor_orig(value, (UInt16)mask, relaxed); |
264 | } |
265 | |