1 | /* |
2 | * Copyright (c) 2021 Apple Inc. All rights reserved. |
3 | * |
4 | * @APPLE_OSREFERENCE_LICENSE_HEADER_START@ |
5 | * |
6 | * This file contains Original Code and/or Modifications of Original Code |
7 | * as defined in and that are subject to the Apple Public Source License |
8 | * Version 2.0 (the 'License'). You may not use this file except in |
9 | * compliance with the License. The rights granted to you under the License |
10 | * may not be used to create, or enable the creation or redistribution of, |
11 | * unlawful or unlicensed copies of an Apple operating system, or to |
12 | * circumvent, violate, or enable the circumvention or violation of, any |
13 | * terms of an Apple operating system software license agreement. |
14 | * |
15 | * Please obtain a copy of the License at |
16 | * http://www.opensource.apple.com/apsl/ and read it before using this file. |
17 | * |
18 | * The Original Code and all software distributed under the License are |
19 | * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER |
20 | * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES, |
21 | * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY, |
22 | * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT. |
23 | * Please see the License for the specific language governing rights and |
24 | * limitations under the License. |
25 | * |
26 | * @APPLE_OSREFERENCE_LICENSE_HEADER_END@ |
27 | */ |
28 | |
29 | #ifndef _KERN_RW_LOCK_H_ |
30 | #define _KERN_RW_LOCK_H_ |
31 | |
32 | #include <kern/lock_types.h> |
33 | #include <kern/lock_group.h> |
34 | #include <kern/lock_attr.h> |
35 | |
36 | #ifdef XNU_KERNEL_PRIVATE |
37 | #include <kern/startup.h> |
38 | #endif /* XNU_KERNEL_PRIVATE */ |
39 | |
40 | __BEGIN_DECLS |
41 | |
42 | #ifdef MACH_KERNEL_PRIVATE |
43 | |
44 | typedef union { |
45 | struct { |
46 | uint16_t shared_count; /* No. of shared granted request */ |
47 | uint16_t |
48 | interlock: 1, /* Interlock */ |
49 | priv_excl: 1, /* priority for Writer */ |
50 | want_upgrade: 1, /* Read-to-write upgrade waiting */ |
51 | want_excl: 1, /* Writer is waiting, or locked for write */ |
52 | r_waiting: 1, /* Someone is sleeping on lock */ |
53 | w_waiting: 1, /* Writer is sleeping on lock */ |
54 | can_sleep: 1, /* Can attempts to lock go to sleep? */ |
55 | _pad2: 8, /* padding */ |
56 | tag_valid: 1; /* Field is actually a tag, not a bitfield */ |
57 | }; |
58 | uint32_t data; /* Single word version of bitfields and shared count */ |
59 | } lck_rw_word_t; |
60 | |
61 | typedef struct { |
62 | uint32_t lck_rw_unused : 24; /* tsid one day ... */ |
63 | uint32_t lck_rw_type : 8; /* LCK_TYPE_RW */ |
64 | uint32_t lck_rw_padding; |
65 | lck_rw_word_t lck_rw; |
66 | uint32_t lck_rw_owner; /* ctid_t */ |
67 | } lck_rw_t; /* arm: 8 arm64: 16 x86: 16 */ |
68 | |
69 | #define lck_rw_shared_count lck_rw.shared_count |
70 | #define lck_rw_interlock lck_rw.interlock |
71 | #define lck_rw_priv_excl lck_rw.priv_excl |
72 | #define lck_rw_want_upgrade lck_rw.want_upgrade |
73 | #define lck_rw_want_excl lck_rw.want_excl |
74 | #define lck_r_waiting lck_rw.r_waiting |
75 | #define lck_w_waiting lck_rw.w_waiting |
76 | #define lck_rw_can_sleep lck_rw.can_sleep |
77 | #define lck_rw_data lck_rw.data |
78 | // tag and data reference the same memory. When the tag_valid bit is set, |
79 | // the data word should be treated as a tag instead of a bitfield. |
80 | #define lck_rw_tag_valid lck_rw.tag_valid |
81 | #define lck_rw_tag lck_rw.data |
82 | |
83 | #define LCK_RW_SHARED_READER_OFFSET 0 |
84 | #define LCK_RW_INTERLOCK_BIT 16 |
85 | #define LCK_RW_PRIV_EXCL_BIT 17 |
86 | #define LCK_RW_WANT_UPGRADE_BIT 18 |
87 | #define LCK_RW_WANT_EXCL_BIT 19 |
88 | #define LCK_RW_R_WAITING_BIT 20 |
89 | #define LCK_RW_W_WAITING_BIT 21 |
90 | #define LCK_RW_CAN_SLEEP_BIT 22 |
91 | // 23-30 |
92 | #define LCK_RW_TAG_VALID_BIT 31 |
93 | |
94 | #define LCK_RW_INTERLOCK (1U << LCK_RW_INTERLOCK_BIT) |
95 | #define LCK_RW_R_WAITING (1U << LCK_RW_R_WAITING_BIT) |
96 | #define LCK_RW_W_WAITING (1U << LCK_RW_W_WAITING_BIT) |
97 | #define LCK_RW_WANT_UPGRADE (1U << LCK_RW_WANT_UPGRADE_BIT) |
98 | #define LCK_RW_WANT_EXCL (1U << LCK_RW_WANT_EXCL_BIT) |
99 | #define LCK_RW_TAG_VALID (1U << LCK_RW_TAG_VALID_BIT) |
100 | #define LCK_RW_PRIV_EXCL (1U << LCK_RW_PRIV_EXCL_BIT) |
101 | #define LCK_RW_SHARED_MASK (0xffff << LCK_RW_SHARED_READER_OFFSET) |
102 | #define LCK_RW_SHARED_READER (0x1 << LCK_RW_SHARED_READER_OFFSET) |
103 | |
104 | #define LCK_RW_TAG_DESTROYED ((LCK_RW_TAG_VALID | 0xdddddeadu)) /* lock marked as Destroyed */ |
105 | |
106 | #elif KERNEL_PRIVATE |
107 | typedef struct { |
108 | uintptr_t opaque[2] __kernel_data_semantics; |
109 | } lck_rw_t; |
110 | #else /* @KERNEL_PRIVATE */ |
111 | typedef struct __lck_rw_t__ lck_rw_t; |
112 | #endif /* !KERNEL_PRIVATE */ |
113 | |
114 | #if DEVELOPMENT || DEBUG |
115 | #ifdef XNU_KERNEL_PRIVATE |
116 | |
117 | #define DEBUG_RW 1 |
118 | #define LCK_RW_EXPECTED_MAX_NUMBER 3 /* Expected number per thread of concurrently held rw_lock */ |
119 | |
120 | #if __LP64__ |
121 | #define LCK_RW_CALLER_PACKED_BITS 48 |
122 | #define LCK_RW_CALLER_PACKED_SHIFT 0 |
123 | #define LCK_RW_CALLER_PACKED_BASE 0 |
124 | #else |
125 | #define LCK_RW_CALLER_PACKED_BITS 32 |
126 | #define LCK_RW_CALLER_PACKED_SHIFT 0 |
127 | #define LCK_RW_CALLER_PACKED_BASE 0 |
128 | #endif |
129 | |
130 | _Static_assert(!VM_PACKING_IS_BASE_RELATIVE(LCK_RW_CALLER_PACKED), |
131 | "Make sure the rwlde_caller_packed pointer packing is based on arithmetic shifts" ); |
132 | |
133 | |
134 | struct __attribute__ ((packed)) rw_lock_debug_entry { |
135 | lck_rw_t *rwlde_lock; // rw_lock held |
136 | int8_t rwlde_mode_count; // -1 is held in write mode, positive value is the recursive read count |
137 | #if __LP64__ |
138 | uintptr_t rwlde_caller_packed: LCK_RW_CALLER_PACKED_BITS; // caller that created the entry |
139 | #else |
140 | uintptr_t rwlde_caller_packed; // caller that created the entry |
141 | #endif |
142 | }; |
143 | typedef struct rw_lock_debug { |
144 | struct rw_lock_debug_entry rwld_locks[LCK_RW_EXPECTED_MAX_NUMBER]; /* rw_lock debug info of currently held locks */ |
145 | uint8_t rwld_locks_saved : 7, /* number of locks saved in rwld_locks */ |
146 | rwld_overflow : 1; /* lock_entry was full, so it might be inaccurate */ |
147 | uint32_t rwld_locks_acquired; /* number of locks acquired */ |
148 | } rw_lock_debug_t; |
149 | |
150 | _Static_assert(LCK_RW_EXPECTED_MAX_NUMBER <= 127, "LCK_RW_EXPECTED_MAX_NUMBER bigger than rwld_locks_saved" ); |
151 | |
152 | #endif /* XNU_KERNEL_PRIVATE */ |
153 | #endif /* DEVELOPMENT || DEBUG */ |
154 | |
155 | typedef unsigned int lck_rw_type_t; |
156 | |
157 | #define LCK_RW_TYPE_SHARED 0x01 |
158 | #define LCK_RW_TYPE_EXCLUSIVE 0x02 |
159 | |
160 | #define decl_lck_rw_data(class, name) class lck_rw_t name |
161 | |
162 | #if XNU_KERNEL_PRIVATE |
163 | /* |
164 | * Auto-initializing rw-locks declarations |
165 | * ------------------------------------ |
166 | * |
167 | * Unless you need to configure your locks in very specific ways, |
168 | * there is no point creating explicit lock attributes. For most |
169 | * static locks, this declaration macro can be used: |
170 | * |
171 | * - LCK_RW_DECLARE. |
172 | * |
173 | * For cases when some particular attributes need to be used, |
174 | * LCK_RW_DECLARE_ATTR takes a variable declared with |
175 | * LCK_ATTR_DECLARE as an argument. |
176 | */ |
177 | |
178 | struct lck_rw_startup_spec { |
179 | lck_rw_t *lck; |
180 | lck_grp_t *lck_grp; |
181 | lck_attr_t *lck_attr; |
182 | }; |
183 | |
184 | extern void lck_rw_startup_init( |
185 | struct lck_rw_startup_spec *spec); |
186 | |
187 | #define LCK_RW_DECLARE_ATTR(var, grp, attr) \ |
188 | lck_rw_t var; \ |
189 | static __startup_data struct lck_rw_startup_spec \ |
190 | __startup_lck_rw_spec_ ## var = { &var, grp, attr }; \ |
191 | STARTUP_ARG(LOCKS, STARTUP_RANK_FOURTH, lck_rw_startup_init, \ |
192 | &__startup_lck_rw_spec_ ## var) |
193 | |
194 | #define LCK_RW_DECLARE(var, grp) \ |
195 | LCK_RW_DECLARE_ATTR(var, grp, LCK_ATTR_NULL) |
196 | |
197 | #define LCK_RW_ASSERT_SHARED 0x01 |
198 | #define LCK_RW_ASSERT_EXCLUSIVE 0x02 |
199 | #define LCK_RW_ASSERT_HELD 0x03 |
200 | #define LCK_RW_ASSERT_NOTHELD 0x04 |
201 | |
202 | #if MACH_ASSERT |
203 | extern boolean_t lck_rw_assert_enabled; |
204 | #define LCK_RW_ASSERT(lck, type) do { \ |
205 | if (__improbable(!(lck_opts_get() & LCK_OPTION_DISABLE_RW_DEBUG))) { \ |
206 | lck_rw_assert((lck),(type)); \ |
207 | } \ |
208 | } while (0) |
209 | #else /* MACH_ASSERT */ |
210 | #define LCK_RW_ASSERT(lck, type) |
211 | #endif /* MACH_ASSERT */ |
212 | |
213 | #endif /* XNU_KERNEL_PRIVATE */ |
214 | |
215 | |
216 | /*! |
217 | * @function lck_rw_alloc_init |
218 | * |
219 | * @abstract |
220 | * Allocates and initializes a rw_lock_t. |
221 | * |
222 | * @discussion |
223 | * The function can block. See lck_rw_init() for initialization details. |
224 | * |
225 | * @param grp lock group to associate with the lock. |
226 | * @param attr lock attribute to initialize the lock. |
227 | * |
228 | * @returns NULL or the allocated lock |
229 | */ |
230 | extern lck_rw_t *lck_rw_alloc_init( |
231 | lck_grp_t *grp, |
232 | lck_attr_t *attr); |
233 | |
234 | /*! |
235 | * @function lck_rw_init |
236 | * |
237 | * @abstract |
238 | * Initializes a rw_lock_t. |
239 | * |
240 | * @discussion |
241 | * Usage statistics for the lock are going to be added to the lock group provided. |
242 | * |
243 | * The lock attribute can be LCK_ATTR_NULL or an attribute can be allocated with |
244 | * lck_attr_alloc_init. So far however none of the attribute settings are supported. |
245 | * |
246 | * @param lck lock to initialize. |
247 | * @param grp lock group to associate with the lock. |
248 | * @param attr lock attribute to initialize the lock. |
249 | */ |
250 | extern void lck_rw_init( |
251 | lck_rw_t *lck, |
252 | lck_grp_t *grp, |
253 | lck_attr_t *attr); |
254 | |
255 | /*! |
256 | * @function lck_rw_free |
257 | * |
258 | * @abstract |
259 | * Frees a rw_lock previously allocated with lck_rw_alloc_init(). |
260 | * |
261 | * @discussion |
262 | * The lock must be not held by any thread. |
263 | * |
264 | * @param lck rw_lock to free. |
265 | */ |
266 | extern void lck_rw_free( |
267 | lck_rw_t *lck, |
268 | lck_grp_t *grp); |
269 | |
270 | /*! |
271 | * @function lck_rw_destroy |
272 | * |
273 | * @abstract |
274 | * Destroys a rw_lock previously initialized with lck_rw_init(). |
275 | * |
276 | * @discussion |
277 | * The lock must be not held by any thread. |
278 | * |
279 | * @param lck rw_lock to destroy. |
280 | */ |
281 | extern void lck_rw_destroy( |
282 | lck_rw_t *lck, |
283 | lck_grp_t *grp); |
284 | |
285 | /*! |
286 | * @function lck_rw_lock |
287 | * |
288 | * @abstract |
289 | * Locks a rw_lock with the specified type. |
290 | * |
291 | * @discussion |
292 | * See lck_rw_lock_shared() or lck_rw_lock_exclusive() for more details. |
293 | * |
294 | * @param lck rw_lock to lock. |
295 | * @param lck_rw_type LCK_RW_TYPE_SHARED or LCK_RW_TYPE_EXCLUSIVE |
296 | */ |
297 | extern void lck_rw_lock( |
298 | lck_rw_t *lck, |
299 | lck_rw_type_t lck_rw_type); |
300 | |
301 | /*! |
302 | * @function lck_rw_try_lock |
303 | * |
304 | * @abstract |
305 | * Tries to locks a rw_lock with the specified type. |
306 | * |
307 | * @discussion |
308 | * This function will return and not wait/block in case the lock is already held. |
309 | * See lck_rw_try_lock_shared() or lck_rw_try_lock_exclusive() for more details. |
310 | * |
311 | * @param lck rw_lock to lock. |
312 | * @param lck_rw_type LCK_RW_TYPE_SHARED or LCK_RW_TYPE_EXCLUSIVE |
313 | * |
314 | * @returns TRUE if the lock is successfully acquired, FALSE in case it was already held. |
315 | */ |
316 | extern boolean_t lck_rw_try_lock( |
317 | lck_rw_t *lck, |
318 | lck_rw_type_t lck_rw_type); |
319 | |
320 | /*! |
321 | * @function lck_rw_unlock |
322 | * |
323 | * @abstract |
324 | * Unlocks a rw_lock previously locked with lck_rw_type. |
325 | * |
326 | * @discussion |
327 | * The lock must be unlocked by the same thread it was locked from. |
328 | * The type of the lock/unlock have to match, unless an upgrade/downgrade was performed while |
329 | * holding the lock. |
330 | * |
331 | * @param lck rw_lock to unlock. |
332 | * @param lck_rw_type LCK_RW_TYPE_SHARED or LCK_RW_TYPE_EXCLUSIVE |
333 | */ |
334 | extern void lck_rw_unlock( |
335 | lck_rw_t *lck, |
336 | lck_rw_type_t lck_rw_type); |
337 | |
338 | /*! |
339 | * @function lck_rw_lock_shared |
340 | * |
341 | * @abstract |
342 | * Locks a rw_lock in shared mode. |
343 | * |
344 | * @discussion |
345 | * This function can block. |
346 | * Multiple threads can acquire the lock in shared mode at the same time, but only one thread at a time |
347 | * can acquire it in exclusive mode. |
348 | * If the lock is held in shared mode and there are no writers waiting, a reader will be able to acquire |
349 | * the lock without waiting. |
350 | * If the lock is held in shared mode and there is at least a writer waiting, a reader will wait |
351 | * for all the writers to make progress. |
352 | * NOTE: the thread cannot return to userspace while the lock is held. Recursive locking is not supported. |
353 | * |
354 | * @param lck rw_lock to lock. |
355 | */ |
356 | extern void lck_rw_lock_shared( |
357 | lck_rw_t *lck); |
358 | |
359 | |
360 | #if MACH_KERNEL_PRIVATE |
361 | /*! |
362 | * @function lck_rw_lock_shared_b |
363 | * |
364 | * @abstract |
365 | * Locks a rw_lock in shared mode. Returns early if the lock can't be acquired |
366 | * and the specified block returns true. |
367 | * |
368 | * @discussion |
369 | * Identical to lck_rw_lock_shared() but can return early if the lock can't be |
370 | * acquired and the specified block returns true. The block is called |
371 | * repeatedly when waiting to acquire the lock. |
372 | * Should only be called when the lock cannot sleep (i.e. when |
373 | * lock->lck_rw_can_sleep is false). |
374 | * |
375 | * @param lock rw_lock to lock. |
376 | * @param lock_pause block invoked while waiting to acquire lock |
377 | * |
378 | * @returns Returns TRUE if the lock is successfully taken, |
379 | * FALSE if the block returns true and the lock has |
380 | * not been acquired. |
381 | */ |
382 | extern boolean_t |
383 | lck_rw_lock_shared_b( |
384 | lck_rw_t * lock, |
385 | bool (^lock_pause)(void)); |
386 | |
387 | /*! |
388 | * @function lck_rw_lock_exclusive_b |
389 | * |
390 | * @abstract |
391 | * Locks a rw_lock in exclusive mode. Returns early if the lock can't be acquired |
392 | * and the specified block returns true. |
393 | * |
394 | * @discussion |
395 | * Identical to lck_rw_lock_exclusive() but can return early if the lock can't be |
396 | * acquired and the specified block returns true. The block is called |
397 | * repeatedly when waiting to acquire the lock. |
398 | * Should only be called when the lock cannot sleep (i.e. when |
399 | * lock->lck_rw_can_sleep is false). |
400 | * |
401 | * @param lock rw_lock to lock. |
402 | * @param lock_pause block invoked while waiting to acquire lock |
403 | * |
404 | * @returns Returns TRUE if the lock is successfully taken, |
405 | * FALSE if the block returns true and the lock has |
406 | * not been acquired. |
407 | */ |
408 | extern boolean_t |
409 | lck_rw_lock_exclusive_b( |
410 | lck_rw_t * lock, |
411 | bool (^lock_pause)(void)); |
412 | #endif /* MACH_KERNEL_PRIVATE */ |
413 | |
414 | /*! |
415 | * @function lck_rw_lock_shared_to_exclusive |
416 | * |
417 | * @abstract |
418 | * Upgrades a rw_lock held in shared mode to exclusive. |
419 | * |
420 | * @discussion |
421 | * This function can block. |
422 | * Only one reader at a time can upgrade to exclusive mode. If the upgrades fails the function will |
423 | * return with the lock not held. |
424 | * The caller needs to hold the lock in shared mode to upgrade it. |
425 | * |
426 | * @param lck rw_lock already held in shared mode to upgrade. |
427 | * |
428 | * @returns TRUE if the lock was upgraded, FALSE if it was not possible. |
429 | * If the function was not able to upgrade the lock, the lock will be dropped |
430 | * by the function. |
431 | */ |
432 | extern boolean_t lck_rw_lock_shared_to_exclusive( |
433 | lck_rw_t *lck); |
434 | |
435 | /*! |
436 | * @function lck_rw_unlock_shared |
437 | * |
438 | * @abstract |
439 | * Unlocks a rw_lock previously locked in shared mode. |
440 | * |
441 | * @discussion |
442 | * The same thread that locked the lock needs to unlock it. |
443 | * |
444 | * @param lck rw_lock held in shared mode to unlock. |
445 | */ |
446 | extern void lck_rw_unlock_shared( |
447 | lck_rw_t *lck); |
448 | |
449 | /*! |
450 | * @function lck_rw_lock_exclusive |
451 | * |
452 | * @abstract |
453 | * Locks a rw_lock in exclusive mode. |
454 | * |
455 | * @discussion |
456 | * This function can block. |
457 | * Multiple threads can acquire the lock in shared mode at the same time, but only one thread at a time |
458 | * can acquire it in exclusive mode. |
459 | * NOTE: the thread cannot return to userspace while the lock is held. Recursive locking is not supported. |
460 | * |
461 | * @param lck rw_lock to lock. |
462 | */ |
463 | extern void lck_rw_lock_exclusive( |
464 | lck_rw_t *lck); |
465 | |
466 | /*! |
467 | * @function lck_rw_lock_exclusive_to_shared |
468 | * |
469 | * @abstract |
470 | * Downgrades a rw_lock held in exclusive mode to shared. |
471 | * |
472 | * @discussion |
473 | * The caller needs to hold the lock in exclusive mode to be able to downgrade it. |
474 | * |
475 | * @param lck rw_lock already held in exclusive mode to downgrade. |
476 | */ |
477 | extern void lck_rw_lock_exclusive_to_shared( |
478 | lck_rw_t *lck); |
479 | |
480 | /*! |
481 | * @function lck_rw_unlock_exclusive |
482 | * |
483 | * @abstract |
484 | * Unlocks a rw_lock previously locked in exclusive mode. |
485 | * |
486 | * @discussion |
487 | * The same thread that locked the lock needs to unlock it. |
488 | * |
489 | * @param lck rw_lock held in exclusive mode to unlock. |
490 | */ |
491 | extern void lck_rw_unlock_exclusive( |
492 | lck_rw_t *lck); |
493 | |
494 | /*! |
495 | * @function lck_rw_sleep |
496 | * |
497 | * @abstract |
498 | * Assert_wait on an event while holding the rw_lock. |
499 | * |
500 | * @discussion |
501 | * the flags can decide how to re-acquire the lock upon wake up |
502 | * (LCK_SLEEP_SHARED, or LCK_SLEEP_EXCLUSIVE, or LCK_SLEEP_UNLOCK) |
503 | * and if the priority needs to be kept boosted until the lock is |
504 | * re-acquired (LCK_SLEEP_PROMOTED_PRI). |
505 | * |
506 | * @param lck rw_lock to use to synch the assert_wait. |
507 | * @param lck_sleep_action flags. |
508 | * @param event event to assert_wait on. |
509 | * @param interruptible wait type. |
510 | */ |
511 | extern wait_result_t lck_rw_sleep( |
512 | lck_rw_t *lck, |
513 | lck_sleep_action_t lck_sleep_action, |
514 | event_t event, |
515 | wait_interrupt_t interruptible); |
516 | |
517 | /*! |
518 | * @function lck_rw_sleep_deadline |
519 | * |
520 | * @abstract |
521 | * Assert_wait_deadline on an event while holding the rw_lock. |
522 | * |
523 | * @discussion |
524 | * the flags can decide how to re-acquire the lock upon wake up |
525 | * (LCK_SLEEP_SHARED, or LCK_SLEEP_EXCLUSIVE, or LCK_SLEEP_UNLOCK) |
526 | * and if the priority needs to be kept boosted until the lock is |
527 | * re-acquired (LCK_SLEEP_PROMOTED_PRI). |
528 | * |
529 | * @param lck rw_lock to use to synch the assert_wait. |
530 | * @param lck_sleep_action flags. |
531 | * @param event event to assert_wait on. |
532 | * @param interruptible wait type. |
533 | * @param deadline maximum time after which being woken up |
534 | */ |
535 | extern wait_result_t lck_rw_sleep_deadline( |
536 | lck_rw_t *lck, |
537 | lck_sleep_action_t lck_sleep_action, |
538 | event_t event, |
539 | wait_interrupt_t interruptible, |
540 | uint64_t deadline); |
541 | |
542 | #ifdef XNU_KERNEL_PRIVATE |
543 | /*! |
544 | * @function lck_rw_assert |
545 | * |
546 | * @abstract |
547 | * Asserts the rw_lock is held. |
548 | * |
549 | * @discussion |
550 | * read-write locks do not have a concept of ownership when held in shared mode, |
551 | * so this function merely asserts that someone is holding the lock, not necessarily the caller. |
552 | * However if rw_lock_debug is on, a best effort mechanism to track the owners is in place, and |
553 | * this function can be more accurate. |
554 | * Type can be LCK_RW_ASSERT_SHARED, LCK_RW_ASSERT_EXCLUSIVE, LCK_RW_ASSERT_HELD |
555 | * LCK_RW_ASSERT_NOTHELD. |
556 | * |
557 | * @param lck rw_lock to check. |
558 | * @param type assert type |
559 | */ |
560 | extern void lck_rw_assert( |
561 | lck_rw_t *lck, |
562 | unsigned int type); |
563 | |
564 | /*! |
565 | * @function kdp_lck_rw_lock_is_acquired_exclusive |
566 | * |
567 | * @abstract |
568 | * Checks if a rw_lock is held exclusevely. |
569 | * |
570 | * @discussion |
571 | * NOT SAFE: To be used only by kernel debugger to avoid deadlock. |
572 | * |
573 | * @param lck lock to check |
574 | * |
575 | * @returns TRUE if the lock is held exclusevely |
576 | */ |
577 | extern boolean_t kdp_lck_rw_lock_is_acquired_exclusive( |
578 | lck_rw_t *lck); |
579 | |
580 | /*! |
581 | * @function lck_rw_lock_exclusive_check_contended |
582 | * |
583 | * @abstract |
584 | * Locks a rw_lock in exclusive mode. |
585 | * |
586 | * @discussion |
587 | * This routine IS EXPERIMENTAL. |
588 | * It's only used for the vm object lock, and use for other subsystems is UNSUPPORTED. |
589 | * Note that the return value is ONLY A HEURISTIC w.r.t. the lock's contention. |
590 | * |
591 | * @param lck rw_lock to lock. |
592 | * |
593 | * @returns Returns TRUE if the thread spun or blocked while attempting to acquire the lock, FALSE |
594 | * otherwise. |
595 | */ |
596 | extern bool lck_rw_lock_exclusive_check_contended( |
597 | lck_rw_t *lck); |
598 | |
599 | /*! |
600 | * @function lck_rw_lock_yield_shared |
601 | * |
602 | * @abstract |
603 | * Yields a rw_lock held in shared mode. |
604 | * |
605 | * @discussion |
606 | * This function can block. |
607 | * Yields the lock in case there are writers waiting. |
608 | * The yield will unlock, block, and re-lock the lock in shared mode. |
609 | * |
610 | * @param lck rw_lock already held in shared mode to yield. |
611 | * @param force_yield if set to true it will always yield irrespective of the lock status |
612 | * |
613 | * @returns TRUE if the lock was yield, FALSE otherwise |
614 | */ |
615 | extern bool lck_rw_lock_yield_shared( |
616 | lck_rw_t *lck, |
617 | boolean_t force_yield); |
618 | |
619 | __enum_decl(lck_rw_yield_t, uint32_t, { |
620 | LCK_RW_YIELD_WRITERS_ONLY, |
621 | LCK_RW_YIELD_ANY_WAITER, |
622 | LCK_RW_YIELD_ALWAYS, |
623 | }); |
624 | |
625 | /*! |
626 | * @function lck_rw_lock_yield_exclusive |
627 | * |
628 | * @abstract |
629 | * Yields a rw_lock held in exclusive mode. |
630 | * |
631 | * @discussion |
632 | * This function can block. |
633 | * Yields the lock in case there are writers waiting. |
634 | * The yield will unlock, block, and re-lock the lock in exclusive mode. |
635 | * |
636 | * @param lck rw_lock already held in exclusive mode to yield. |
637 | * @param mode when to yield. |
638 | * |
639 | * @returns TRUE if the lock was yield, FALSE otherwise |
640 | */ |
641 | extern bool lck_rw_lock_yield_exclusive( |
642 | lck_rw_t *lck, |
643 | lck_rw_yield_t mode); |
644 | |
645 | #endif /* XNU_KERNEL_PRIVATE */ |
646 | |
647 | #if MACH_KERNEL_PRIVATE |
648 | |
649 | /*! |
650 | * @function lck_rw_lock_count_inc |
651 | * |
652 | * @abstract |
653 | * Increments the number of rwlock held by the (current) thread. |
654 | */ |
655 | extern void lck_rw_lock_count_inc( |
656 | thread_t thread, |
657 | const void *lock); |
658 | |
659 | /*! |
660 | * @function lck_rw_lock_count_inc |
661 | * |
662 | * @abstract |
663 | * Decrements the number of rwlock held by the (current) thread. |
664 | */ |
665 | extern void lck_rw_lock_count_dec( |
666 | thread_t thread, |
667 | const void *lock); |
668 | |
669 | /*! |
670 | * @function lck_rw_set_promotion_locked |
671 | * |
672 | * @abstract |
673 | * Callout from context switch if the thread goes |
674 | * off core with a positive rwlock_count. |
675 | * |
676 | * @discussion |
677 | * Called at splsched with the thread locked. |
678 | * |
679 | * @param thread thread to promote. |
680 | */ |
681 | extern void lck_rw_set_promotion_locked( |
682 | thread_t thread); |
683 | |
684 | #endif /* MACH_KERNEL_PRIVATE */ |
685 | |
686 | #ifdef KERNEL_PRIVATE |
687 | /*! |
688 | * @function lck_rw_try_lock_shared |
689 | * |
690 | * @abstract |
691 | * Tries to locks a rw_lock in read mode. |
692 | * |
693 | * @discussion |
694 | * This function will return and not block in case the lock is already held. |
695 | * See lck_rw_lock_shared for more details. |
696 | * |
697 | * @param lck rw_lock to lock. |
698 | * |
699 | * @returns TRUE if the lock is successfully acquired, FALSE in case it was already held. |
700 | */ |
701 | extern boolean_t lck_rw_try_lock_shared( |
702 | lck_rw_t *lck); |
703 | |
704 | /*! |
705 | * @function lck_rw_try_lock_exclusive |
706 | * |
707 | * @abstract |
708 | * Tries to locks a rw_lock in write mode. |
709 | * |
710 | * @discussion |
711 | * This function will return and not block in case the lock is already held. |
712 | * See lck_rw_lock_exclusive for more details. |
713 | * |
714 | * @param lck rw_lock to lock. |
715 | * |
716 | * @returns TRUE if the lock is successfully acquired, FALSE in case it was already held. |
717 | */ |
718 | extern boolean_t lck_rw_try_lock_exclusive( |
719 | lck_rw_t *lck); |
720 | |
721 | /*! |
722 | * @function lck_rw_done |
723 | * |
724 | * @abstract |
725 | * Force unlocks a rw_lock without consistency checks. |
726 | * |
727 | * @discussion |
728 | * Do not use unless sure you can avoid consistency checks. |
729 | * |
730 | * @param lck rw_lock to unlock. |
731 | */ |
732 | extern lck_rw_type_t lck_rw_done( |
733 | lck_rw_t *lck); |
734 | #endif /* KERNEL_PRIVATE */ |
735 | |
736 | __END_DECLS |
737 | |
738 | #endif /* _KERN_RW_LOCK_H_ */ |
739 | |