1 | /* |
2 | * Copyright (c) 2003-2019 Apple Inc. All rights reserved. |
3 | * |
4 | * @APPLE_OSREFERENCE_LICENSE_HEADER_START@ |
5 | * |
6 | * This file contains Original Code and/or Modifications of Original Code |
7 | * as defined in and that are subject to the Apple Public Source License |
8 | * Version 2.0 (the 'License'). You may not use this file except in |
9 | * compliance with the License. The rights granted to you under the License |
10 | * may not be used to create, or enable the creation or redistribution of, |
11 | * unlawful or unlicensed copies of an Apple operating system, or to |
12 | * circumvent, violate, or enable the circumvention or violation of, any |
13 | * terms of an Apple operating system software license agreement. |
14 | * |
15 | * Please obtain a copy of the License at |
16 | * http://www.opensource.apple.com/apsl/ and read it before using this file. |
17 | * |
18 | * The Original Code and all software distributed under the License are |
19 | * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER |
20 | * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES, |
21 | * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY, |
22 | * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT. |
23 | * Please see the License for the specific language governing rights and |
24 | * limitations under the License. |
25 | * |
26 | * @APPLE_OSREFERENCE_LICENSE_HEADER_END@ |
27 | */ |
28 | |
29 | #ifndef _KERN_LOCKS_H_ |
30 | #define _KERN_LOCKS_H_ |
31 | |
32 | #include <sys/cdefs.h> |
33 | #include <sys/appleapiopts.h> |
34 | |
35 | #include <mach/boolean.h> |
36 | #include <machine/locks.h> |
37 | |
38 | #include <kern/kern_types.h> |
39 | #include <kern/lock_attr.h> |
40 | #include <kern/lock_group.h> |
41 | #include <kern/lock_mtx.h> |
42 | #include <kern/lock_rw.h> |
43 | #include <kern/lock_types.h> |
44 | #ifdef KERNEL_PRIVATE |
45 | #include <kern/ticket_lock.h> |
46 | #endif |
47 | #ifdef XNU_KERNEL_PRIVATE |
48 | #include <kern/startup.h> |
49 | #include <kern/percpu.h> |
50 | #endif /* XNU_KERNEL_PRIVATE */ |
51 | |
52 | __BEGIN_DECLS |
53 | |
54 | #define decl_lck_spin_data(class, name) class lck_spin_t name |
55 | |
56 | extern lck_spin_t *lck_spin_alloc_init( |
57 | lck_grp_t *grp, |
58 | lck_attr_t *attr); |
59 | |
60 | extern void lck_spin_init( |
61 | lck_spin_t *lck, |
62 | lck_grp_t *grp, |
63 | lck_attr_t *attr); |
64 | |
65 | extern void lck_spin_lock( |
66 | lck_spin_t *lck); |
67 | |
68 | extern void lck_spin_lock_grp( |
69 | lck_spin_t *lck, |
70 | lck_grp_t *grp); |
71 | |
72 | extern void lck_spin_unlock( |
73 | lck_spin_t *lck); |
74 | |
75 | extern void lck_spin_destroy( |
76 | lck_spin_t *lck, |
77 | lck_grp_t *grp); |
78 | |
79 | extern void lck_spin_free( |
80 | lck_spin_t *lck, |
81 | lck_grp_t *grp); |
82 | |
83 | extern wait_result_t lck_spin_sleep( |
84 | lck_spin_t *lck, |
85 | lck_sleep_action_t lck_sleep_action, |
86 | event_t event, |
87 | wait_interrupt_t interruptible); |
88 | |
89 | extern wait_result_t lck_spin_sleep_grp( |
90 | lck_spin_t *lck, |
91 | lck_sleep_action_t lck_sleep_action, |
92 | event_t event, |
93 | wait_interrupt_t interruptible, |
94 | lck_grp_t *grp); |
95 | |
96 | extern wait_result_t lck_spin_sleep_deadline( |
97 | lck_spin_t *lck, |
98 | lck_sleep_action_t lck_sleep_action, |
99 | event_t event, |
100 | wait_interrupt_t interruptible, |
101 | uint64_t deadline); |
102 | |
103 | #ifdef KERNEL_PRIVATE |
104 | |
105 | extern void lck_spin_lock_nopreempt( |
106 | lck_spin_t *lck); |
107 | |
108 | extern void lck_spin_lock_nopreempt_grp( |
109 | lck_spin_t *lck, lck_grp_t *grp); |
110 | |
111 | extern void lck_spin_unlock_nopreempt( |
112 | lck_spin_t *lck); |
113 | |
114 | extern boolean_t lck_spin_try_lock_grp( |
115 | lck_spin_t *lck, |
116 | lck_grp_t *grp); |
117 | |
118 | extern boolean_t lck_spin_try_lock( |
119 | lck_spin_t *lck); |
120 | |
121 | extern boolean_t lck_spin_try_lock_nopreempt( |
122 | lck_spin_t *lck); |
123 | |
124 | extern boolean_t lck_spin_try_lock_nopreempt_grp( |
125 | lck_spin_t *lck, |
126 | lck_grp_t *grp); |
127 | |
128 | /* NOT SAFE: To be used only by kernel debugger to avoid deadlock. */ |
129 | extern boolean_t kdp_lck_spin_is_acquired( |
130 | lck_spin_t *lck); |
131 | |
132 | /* |
133 | * Name: lck_spin_sleep_with_inheritor |
134 | * |
135 | * Description: |
136 | * deschedule the current thread and wait on the waitq associated with event |
137 | * to be woken up. |
138 | * |
139 | * While waiting, the sched priority of the waiting thread will contribute to |
140 | * the push of the event that will be directed to the inheritor specified. |
141 | * |
142 | * An interruptible mode and deadline can be specified to return earlier from |
143 | * the wait. |
144 | * |
145 | * Args: |
146 | * Arg1: lck_spin_t lock used to protect the sleep. |
147 | * The lock will be dropped while sleeping and reaquired before |
148 | * returning according to the sleep action specified. |
149 | * Arg2: sleep action. LCK_SLEEP_DEFAULT, LCK_SLEEP_UNLOCK. |
150 | * Arg3: event to wait on. |
151 | * Arg4: thread to propagate the event push to. |
152 | * Arg5: interruptible flag for wait. |
153 | * Arg6: deadline for wait. |
154 | * |
155 | * Conditions: |
156 | * Lock must be held. |
157 | * |
158 | * Returns with the lock held according to the sleep action specified. |
159 | * Lock will be dropped while waiting. |
160 | * |
161 | * The inheritor specified cannot return to user space or exit until another |
162 | * inheritor is specified for the event or a wakeup for the event is called. |
163 | * |
164 | * Returns: result of the wait. |
165 | */ |
166 | extern wait_result_t lck_spin_sleep_with_inheritor( |
167 | lck_spin_t *lock, |
168 | lck_sleep_action_t lck_sleep_action, |
169 | event_t event, |
170 | thread_t inheritor, |
171 | wait_interrupt_t interruptible, |
172 | uint64_t deadline); |
173 | |
174 | #if MACH_KERNEL_PRIVATE |
175 | |
176 | /* |
177 | * Name: hw_lck_ticket_sleep_with_inheritor |
178 | * |
179 | * Description: |
180 | * deschedule the current thread and wait on the waitq associated with event |
181 | * to be woken up. |
182 | * |
183 | * While waiting, the sched priority of the waiting thread will contribute to |
184 | * the push of the event that will be directed to the inheritor specified. |
185 | * |
186 | * An interruptible mode and deadline can be specified to return earlier from |
187 | * the wait. |
188 | * |
189 | * Args: |
190 | * Arg1: hw_lck_ticket_t lock used to protect the sleep. |
191 | * The lock will be dropped while sleeping and reaquired before |
192 | * returning according to the sleep action specified. |
193 | * Arg2: lck_grp_t associated with the lock. |
194 | * Arg3: sleep action. LCK_SLEEP_DEFAULT, LCK_SLEEP_UNLOCK. |
195 | * Arg3: event to wait on. |
196 | * Arg5: thread to propagate the event push to. |
197 | * Arg6: interruptible flag for wait. |
198 | * Arg7: deadline for wait. |
199 | * |
200 | * Conditions: |
201 | * Lock must be held. |
202 | * |
203 | * Returns with the lock held according to the sleep action specified. |
204 | * |
205 | * Lock will be dropped while waiting. |
206 | * |
207 | * The inheritor specified cannot return to user space or exit until another |
208 | * inheritor is specified for the event or a wakeup for the event is called. |
209 | * |
210 | * Returns: result of the wait. |
211 | */ |
212 | extern wait_result_t hw_lck_ticket_sleep_with_inheritor( |
213 | hw_lck_ticket_t *lock, |
214 | lck_grp_t *grp, |
215 | lck_sleep_action_t lck_sleep_action, |
216 | event_t event, |
217 | thread_t inheritor, |
218 | wait_interrupt_t interruptible, |
219 | uint64_t deadline); |
220 | |
221 | #endif |
222 | |
223 | /* |
224 | * Name: lck_ticket_sleep_with_inheritor |
225 | * |
226 | * Description: |
227 | * deschedule the current thread and wait on the waitq associated with event |
228 | * to be woken up. |
229 | * |
230 | * While waiting, the sched priority of the waiting thread will contribute to |
231 | * the push of the event that will be directed to the inheritor specified. |
232 | * |
233 | * An interruptible mode and deadline can be specified to return earlier from |
234 | * the wait. |
235 | * |
236 | * Args: |
237 | * Arg1: lck_ticket_t lock used to protect the sleep. |
238 | * The lock will be dropped while sleeping and reaquired before |
239 | * returning according to the sleep action specified. |
240 | * Arg2: lck_grp_t associated with the lock. |
241 | * Arg3: sleep action. LCK_SLEEP_DEFAULT, LCK_SLEEP_UNLOCK. |
242 | * Arg3: event to wait on. |
243 | * Arg5: thread to propagate the event push to. |
244 | * Arg6: interruptible flag for wait. |
245 | * Arg7: deadline for wait. |
246 | * |
247 | * Conditions: |
248 | * Lock must be held. |
249 | * |
250 | * Returns with the lock held according to the sleep action specified. |
251 | * |
252 | * Lock will be dropped while waiting. |
253 | * |
254 | * The inheritor specified cannot return to user space or exit until another |
255 | * inheritor is specified for the event or a wakeup for the event is called. |
256 | * |
257 | * Returns: result of the wait. |
258 | */ |
259 | extern wait_result_t lck_ticket_sleep_with_inheritor( |
260 | lck_ticket_t *lock, |
261 | lck_grp_t *grp, |
262 | lck_sleep_action_t lck_sleep_action, |
263 | event_t event, |
264 | thread_t inheritor, |
265 | wait_interrupt_t interruptible, |
266 | uint64_t deadline); |
267 | |
268 | /* |
269 | * Name: lck_mtx_sleep_with_inheritor |
270 | * |
271 | * Description: |
272 | * deschedule the current thread and wait on the waitq associated with event |
273 | * to be woken up. |
274 | * |
275 | * While waiting, the sched priority of the waiting thread will contribute to |
276 | * the push of the event that will be directed to the inheritor specified. |
277 | * |
278 | * An interruptible mode and deadline can be specified to return earlier from |
279 | * the wait. |
280 | * |
281 | * Args: |
282 | * Arg1: lck_mtx_t lock used to protect the sleep. |
283 | * The lock will be dropped while sleeping and reaquired before |
284 | * returning according to the sleep action specified. |
285 | * Arg2: sleep action. LCK_SLEEP_DEFAULT, LCK_SLEEP_UNLOCK, LCK_SLEEP_SPIN, LCK_SLEEP_SPIN_ALWAYS. |
286 | * Arg3: event to wait on. |
287 | * Arg4: thread to propagate the event push to. |
288 | * Arg5: interruptible flag for wait. |
289 | * Arg6: deadline for wait. |
290 | * |
291 | * Conditions: |
292 | * Lock must be held. |
293 | * |
294 | * Returns with the lock held according to the sleep action specified. |
295 | * |
296 | * Lock will be dropped while waiting. |
297 | * |
298 | * The inheritor specified cannot return to user space or exit until another |
299 | * inheritor is specified for the event or a wakeup for the event is called. |
300 | * |
301 | * Returns: result of the wait. |
302 | */ |
303 | extern wait_result_t lck_mtx_sleep_with_inheritor( |
304 | lck_mtx_t *lock, |
305 | lck_sleep_action_t lck_sleep_action, |
306 | event_t event, |
307 | thread_t inheritor, |
308 | wait_interrupt_t interruptible, |
309 | uint64_t deadline); |
310 | |
311 | /* |
312 | * Name: lck_rw_sleep_with_inheritor |
313 | * |
314 | * Description: |
315 | * deschedule the current thread and wait on the waitq associated with event |
316 | * to be woken up. |
317 | * |
318 | * While waiting, the sched priority of the waiting thread will contribute to |
319 | * the push of the event that will be directed to the inheritor specified. |
320 | * |
321 | * An interruptible mode and deadline can be specified to return earlier from |
322 | * the wait. |
323 | * |
324 | * Args: |
325 | * Arg1: lck_rw_t lock used to protect the sleep. |
326 | * The lock will be dropped while sleeping and reaquired before |
327 | * returning according to the sleep action specified. |
328 | * Arg2: sleep action. LCK_SLEEP_DEFAULT, LCK_SLEEP_SHARED, LCK_SLEEP_EXCLUSIVE. |
329 | * Arg3: event to wait on. |
330 | * Arg4: thread to propagate the event push to. |
331 | * Arg5: interruptible flag for wait. |
332 | * Arg6: deadline for wait. |
333 | * |
334 | * Conditions: |
335 | * Lock must be held. |
336 | * |
337 | * Returns with the lock held according to the sleep action specified. |
338 | * |
339 | * Lock will be dropped while waiting. |
340 | * |
341 | * The inheritor specified cannot return to user space or exit until another |
342 | * inheritor is specified for the event or a wakeup for the event is called. |
343 | * |
344 | * Returns: result of the wait. |
345 | */ |
346 | extern wait_result_t lck_rw_sleep_with_inheritor( |
347 | lck_rw_t *lock, |
348 | lck_sleep_action_t lck_sleep_action, |
349 | event_t event, |
350 | thread_t inheritor, |
351 | wait_interrupt_t interruptible, |
352 | uint64_t deadline); |
353 | |
354 | /* |
355 | * Name: wakeup_one_with_inheritor |
356 | * |
357 | * Description: |
358 | * Wake up one waiter for event if any. |
359 | * |
360 | * The thread woken up will be the one with the higher sched priority waiting |
361 | * on event. |
362 | * |
363 | * The push for the event will be transferred from the last inheritor to the |
364 | * woken up thread. |
365 | * |
366 | * Args: |
367 | * Arg1: event to wake from. |
368 | * Arg2: wait result to pass to the woken up thread. |
369 | * Arg3: pointer for storing the thread wokenup. |
370 | * |
371 | * Returns: KERN_NOT_WAITING if no threads were waiting, KERN_SUCCESS otherwise. |
372 | * |
373 | * Conditions: |
374 | * The new woken up inheritor cannot return to user space or exit until |
375 | * another inheritor is specified for the event or a new wakeup for the event |
376 | * is performed. |
377 | * |
378 | * A reference for the woken thread is acquired. |
379 | * |
380 | * NOTE: this cannot be called from interrupt context. |
381 | */ |
382 | extern kern_return_t wakeup_one_with_inheritor( |
383 | event_t event, |
384 | wait_result_t result, |
385 | lck_wake_action_t action, |
386 | thread_t *thread_wokenup); |
387 | |
388 | extern kern_return_t wakeup_thread_with_inheritor( |
389 | event_t event, |
390 | wait_result_t result, |
391 | lck_wake_action_t action, |
392 | thread_t thread_towake); |
393 | |
394 | /* |
395 | * Name: wakeup_all_with_inheritor |
396 | * |
397 | * Description: wake up all waiters waiting for event. The old inheritor will lose the push. |
398 | * |
399 | * Args: |
400 | * Arg1: event to wake from. |
401 | * Arg2: wait result to pass to the woken up threads. |
402 | * |
403 | * Returns: KERN_NOT_WAITING if no threads were waiting, KERN_SUCCESS otherwise. |
404 | * |
405 | * Conditions: NOTE: this cannot be called from interrupt context. |
406 | */ |
407 | extern kern_return_t wakeup_all_with_inheritor( |
408 | event_t event, |
409 | wait_result_t result); |
410 | |
411 | /* |
412 | * Name: change_sleep_inheritor |
413 | * |
414 | * Description: |
415 | * Redirect the push of the waiting threads of event to the new inheritor specified. |
416 | * |
417 | * Args: |
418 | * Arg1: event to redirect the push. |
419 | * Arg2: new inheritor for event. |
420 | * |
421 | * Returns: KERN_NOT_WAITING if no threads were waiting, KERN_SUCCESS otherwise. |
422 | * |
423 | * Conditions: |
424 | * In case of success, the new inheritor cannot return to user space or exit |
425 | * until another inheritor is specified for the event or a wakeup for the |
426 | * event is called. |
427 | * |
428 | * NOTE: this cannot be called from interrupt context. |
429 | */ |
430 | extern kern_return_t change_sleep_inheritor( |
431 | event_t event, |
432 | thread_t inheritor); |
433 | |
434 | |
435 | #if XNU_KERNEL_PRIVATE |
436 | |
437 | /* |
438 | * Bits layout of cond_swi_var32/cond_swi_var64. |
439 | * First SWI_COND_OWNER_BITS are reserved for the owner |
440 | * the remaining can be used by the caller |
441 | */ |
442 | #define SWI_COND_OWNER_BITS 20 |
443 | #define SWI_COND_CALLER_BITS (32 - SWI_COND_OWNER_BITS) |
444 | |
445 | typedef struct cond_swi_var32 { |
446 | union { |
447 | uint32_t cond32_data; |
448 | struct { |
449 | uint32_t cond32_owner: SWI_COND_OWNER_BITS, |
450 | cond32_caller_bits: SWI_COND_CALLER_BITS; |
451 | }; |
452 | }; |
453 | } cond_swi_var32_s; |
454 | |
455 | typedef struct cond_swi_var64 { |
456 | union { |
457 | uint64_t cond64_data; |
458 | struct { |
459 | uint32_t cond64_owner: SWI_COND_OWNER_BITS, |
460 | cond64_caller_bits: SWI_COND_CALLER_BITS; |
461 | uint32_t ; |
462 | }; |
463 | }; |
464 | } cond_swi_var64_s; |
465 | |
466 | typedef struct cond_swi_var *cond_swi_var_t; |
467 | |
468 | /* |
469 | * Name: cond_sleep_with_inheritor32 |
470 | * |
471 | * Description: Conditionally sleeps with inheritor, with condition variable of 32bits. |
472 | * Allows a thread to conditionally sleep while indicating which thread should |
473 | * inherit the priority push associated with the condition. |
474 | * The condition should be expressed through a cond_swi_var32_s pointer. |
475 | * The condition needs to be populated by the caller with the ctid of the |
476 | * thread that should inherit the push. The remaining bits of the condition |
477 | * can be used by the caller to implement its own synchronization logic. |
478 | * A copy of the condition value observed by the caller when it decided to call |
479 | * this function should be provided to prevent races with matching wakeups. |
480 | * This function will atomically check the value stored in the condition against |
481 | * the expected/observed one provided. If the check doesn't pass the thread will not |
482 | * sleep and the function will return. |
483 | * The ctid provided in the condition will be used only after a successful |
484 | * check. |
485 | * |
486 | * Args: |
487 | * Arg1: cond_swi_var32_s pointer that stores the condition to check. |
488 | * Arg2: cond_swi_var32_s observed value to check for conditionally sleep. |
489 | * Arg3: interruptible flag for wait. |
490 | * Arg4: deadline for wait. |
491 | * |
492 | * Conditions: |
493 | * The inheritor specified cannot return to user space or exit until another |
494 | * inheritor is specified for the cond or a wakeup for the cond is called. |
495 | * |
496 | * Returns: result of the wait. |
497 | */ |
498 | extern wait_result_t cond_sleep_with_inheritor32( |
499 | cond_swi_var_t cond, |
500 | cond_swi_var32_s expected_cond, |
501 | wait_interrupt_t interruptible, |
502 | uint64_t deadline); |
503 | |
504 | /* |
505 | * Name: cond_sleep_with_inheritor64 |
506 | * |
507 | * Description: Conditionally sleeps with inheritor, with condition variable of 64bits. |
508 | * Allows a thread to conditionally sleep while indicating which thread should |
509 | * inherit the priority push associated with the condition. |
510 | * The condition should be expressed through a cond_swi_var64_s pointer. |
511 | * The condition needs to be populated by the caller with the ctid of the |
512 | * thread that should inherit the push. The remaining bits of the condition |
513 | * can be used by the caller to implement its own synchronization logic. |
514 | * A copy of the condition value observed by the caller when it decided to call |
515 | * this function should be provided to prevent races with matching wakeups. |
516 | * This function will atomically check the value stored in the condition against |
517 | * the expected/observed one provided. If the check doesn't pass the thread will not |
518 | * sleep and the function will return. |
519 | * The ctid provided in the condition will be used only after a successful |
520 | * check. |
521 | * |
522 | * Args: |
523 | * Arg1: cond_swi_var64_s pointer that stores the condition to check. |
524 | * Arg2: cond_swi_var64_s observed value to check for conditionally sleep. |
525 | * Arg3: interruptible flag for wait. |
526 | * Arg4: deadline for wait. |
527 | * |
528 | * Conditions: |
529 | * The inheritor specified cannot return to user space or exit until another |
530 | * inheritor is specified for the cond or a wakeup for the cond is called. |
531 | * |
532 | * Returns: result of the wait. |
533 | */ |
534 | extern wait_result_t cond_sleep_with_inheritor64( |
535 | cond_swi_var_t cond, |
536 | cond_swi_var64_s expected_cond, |
537 | wait_interrupt_t interruptible, |
538 | uint64_t deadline); |
539 | |
540 | /* |
541 | * Name: cond_sleep_with_inheritor64_mask |
542 | * |
543 | * Description: Conditionally sleeps with inheritor, with condition variable of 64bits. |
544 | * Allows a thread to conditionally sleep while indicating which thread should |
545 | * inherit the priority push associated with the condition. |
546 | * The condition should be expressed through a cond_swi_var64_s pointer. |
547 | * The condition needs to be populated by the caller with the ctid of the |
548 | * thread that should inherit the push. The remaining bits of the condition |
549 | * can be used by the caller to implement its own synchronization logic. |
550 | * A copy of the condition value observed by the caller when it decided to call |
551 | * this function should be provided to prevent races with matching wakeups. |
552 | * This function will atomically check the value stored in the condition against |
553 | * the expected/observed one provided only for the bits that are set in the mask. |
554 | * If the check doesn't pass the thread will not sleep and the function will return. |
555 | * The ctid provided in the condition will be used only after a successful |
556 | * check. |
557 | * |
558 | * Args: |
559 | * Arg1: cond_swi_var64_s pointer that stores the condition to check. |
560 | * Arg2: cond_swi_var64_s observed value to check for conditionally sleep. |
561 | * Arg3: mask to apply to the condition to check. |
562 | * Arg4: interruptible flag for wait. |
563 | * Arg5: deadline for wait. |
564 | * |
565 | * Conditions: |
566 | * The inheritor specified cannot return to user space or exit until another |
567 | * inheritor is specified for the cond or a wakeup for the cond is called. |
568 | * |
569 | * Returns: result of the wait. |
570 | */ |
571 | extern wait_result_t cond_sleep_with_inheritor64_mask( |
572 | cond_swi_var_t cond, |
573 | cond_swi_var64_s expected_cond, |
574 | uint64_t check_mask, |
575 | wait_interrupt_t interruptible, |
576 | uint64_t deadline); |
577 | |
578 | /* |
579 | * Name: cond_wakeup_one_with_inheritor |
580 | * |
581 | * Description: Wake up one waiter waiting on the condition (if any). |
582 | * The thread woken up will be the one with the higher sched priority waiting on the condition. |
583 | * The push for the condition will be transferred from the last inheritor to the woken up thread. |
584 | * |
585 | * Args: |
586 | * Arg1: condition to wake from. |
587 | * Arg2: wait result to pass to the woken up thread. |
588 | * Arg3: pointer for storing the thread wokenup. |
589 | * |
590 | * Returns: KERN_NOT_WAITING if no threads were waiting, KERN_SUCCESS otherwise. |
591 | * |
592 | * Conditions: |
593 | * The new woken up inheritor cannot return to user space or exit until |
594 | * another inheritor is specified for the event or a new wakeup for the event |
595 | * is performed. |
596 | * |
597 | * A reference for the woken thread is acquired. |
598 | * |
599 | * NOTE: this cannot be called from interrupt context. |
600 | */ |
601 | extern kern_return_t cond_wakeup_one_with_inheritor( |
602 | cond_swi_var_t cond, |
603 | wait_result_t result, |
604 | lck_wake_action_t action, |
605 | thread_t *thread_wokenup); |
606 | |
607 | /* |
608 | * Name: cond_wakeup_all_with_inheritor |
609 | * |
610 | * Description: Wake up all waiters waiting on the same condition. The old inheritor will lose the push. |
611 | * |
612 | * Args: |
613 | * Arg1: condition to wake from. |
614 | * Arg2: wait result to pass to the woken up threads. |
615 | * |
616 | * Returns: KERN_NOT_WAITING if no threads were waiting, KERN_SUCCESS otherwise. |
617 | * |
618 | * Conditions: NOTE: this cannot be called from interrupt context. |
619 | */ |
620 | extern kern_return_t cond_wakeup_all_with_inheritor( |
621 | cond_swi_var_t cond, |
622 | wait_result_t result); |
623 | |
624 | /* |
625 | * gate structure |
626 | */ |
627 | typedef struct gate { |
628 | uintptr_t gt_data; // thread holder, interlock bit and waiter bit |
629 | struct turnstile *gt_turnstile; // turnstile, protected by the interlock bit |
630 | union { |
631 | struct { |
632 | uint32_t gt_refs:16, // refs using the gate, protected by interlock bit |
633 | gt_alloc:1, // gate was allocated with gate_alloc_init |
634 | gt_type:2, // type bits for validity |
635 | gt_flags_pad:13; // unused |
636 | }; |
637 | uint32_t gt_flags; |
638 | }; |
639 | } gate_t; |
640 | |
641 | #else /* XNU_KERNEL_PRIVATE */ |
642 | |
643 | typedef struct gate { |
644 | uintptr_t opaque1; |
645 | uintptr_t opaque2; |
646 | uint32_t opaque3; |
647 | } gate_t; |
648 | |
649 | #endif /* XNU_KERNEL_PRIVATE */ |
650 | |
651 | /* |
652 | * Possible gate_wait_result_t values. |
653 | */ |
654 | __options_decl(gate_wait_result_t, unsigned int, { |
655 | GATE_HANDOFF = 0x00, /* gate was handedoff to current thread */ |
656 | GATE_OPENED = 0x01, /* gate was opened */ |
657 | GATE_TIMED_OUT = 0x02, /* wait timedout */ |
658 | GATE_INTERRUPTED = 0x03, /* wait was interrupted */ |
659 | }); |
660 | |
661 | /* |
662 | * Gate flags used by gate_assert |
663 | */ |
664 | __options_decl(gate_assert_flags_t, unsigned int, { |
665 | GATE_ASSERT_CLOSED = 0x00, /* asserts the gate is currently closed */ |
666 | GATE_ASSERT_OPEN = 0x01, /* asserts the gate is currently open */ |
667 | GATE_ASSERT_HELD = 0x02, /* asserts the gate is closed and held by current_thread() */ |
668 | }); |
669 | |
670 | /* |
671 | * Gate flags used by gate_handoff |
672 | */ |
673 | __options_decl(gate_handoff_flags_t, unsigned int, { |
674 | GATE_HANDOFF_DEFAULT = 0x00, /* a waiter must exist to handoff the gate */ |
675 | GATE_HANDOFF_OPEN_IF_NO_WAITERS = 0x1, /* behave like a gate_open() if there are no waiters */ |
676 | }); |
677 | |
678 | /* |
679 | * Name: decl_lck_rw_gate_data |
680 | * |
681 | * Description: declares a gate variable with specified storage class. |
682 | * The gate itself will be stored in this variable and it is the caller's responsibility |
683 | * to ensure that this variable's memory is going to be accessible by all threads that will use |
684 | * the gate. |
685 | * Every gate function will require a pointer to this variable as parameter. The same pointer should |
686 | * be used in every thread. |
687 | * |
688 | * The variable needs to be initialized once with lck_rw_gate_init() and destroyed once with |
689 | * lck_rw_gate_destroy() when not needed anymore. |
690 | * |
691 | * The gate will be used in conjunction with a lck_rw_t. |
692 | * |
693 | * Args: |
694 | * Arg1: storage class. |
695 | * Arg2: variable name. |
696 | */ |
697 | #define decl_lck_rw_gate_data(class, name) class gate_t name |
698 | |
699 | /* |
700 | * Name: lck_rw_gate_init |
701 | * |
702 | * Description: initializes a variable declared with decl_lck_rw_gate_data. |
703 | * |
704 | * Args: |
705 | * Arg1: lck_rw_t lock used to protect the gate. |
706 | * Arg2: pointer to the gate data declared with decl_lck_rw_gate_data. |
707 | */ |
708 | extern void lck_rw_gate_init(lck_rw_t *lock, gate_t *gate); |
709 | |
710 | /* |
711 | * Name: lck_rw_gate_destroy |
712 | * |
713 | * Description: destroys a variable previously initialized |
714 | * with lck_rw_gate_init(). |
715 | * |
716 | * Args: |
717 | * Arg1: lck_rw_t lock used to protect the gate. |
718 | * Arg2: pointer to the gate data declared with decl_lck_rw_gate_data. |
719 | */ |
720 | extern void lck_rw_gate_destroy(lck_rw_t *lock, gate_t *gate); |
721 | |
722 | /* |
723 | * Name: lck_rw_gate_alloc_init |
724 | * |
725 | * Description: allocates and initializes a gate_t. |
726 | * |
727 | * Args: |
728 | * Arg1: lck_rw_t lock used to protect the gate. |
729 | * |
730 | * Returns: |
731 | * gate_t allocated. |
732 | */ |
733 | extern gate_t* lck_rw_gate_alloc_init(lck_rw_t *lock); |
734 | |
735 | /* |
736 | * Name: lck_rw_gate_free |
737 | * |
738 | * Description: destroys and tries to free a gate previously allocated |
739 | * with lck_rw_gate_alloc_init(). |
740 | * The gate free might be delegated to the last thread returning |
741 | * from the gate_wait(). |
742 | * |
743 | * Args: |
744 | * Arg1: lck_rw_t lock used to protect the gate. |
745 | * Arg2: pointer to the gate obtained with lck_rw_gate_alloc_init(). |
746 | */ |
747 | extern void lck_rw_gate_free(lck_rw_t *lock, gate_t *gate); |
748 | |
749 | /* |
750 | * Name: lck_rw_gate_try_close |
751 | * |
752 | * Description: Tries to close the gate. |
753 | * In case of success the current thread will be set as |
754 | * the holder of the gate. |
755 | * |
756 | * Args: |
757 | * Arg1: lck_rw_t lock used to protect the gate. |
758 | * Arg2: pointer to the gate data declared with decl_lck_rw_gate_data. |
759 | * |
760 | * Conditions: Lock must be held. Returns with the lock held. |
761 | * |
762 | * Returns: |
763 | * KERN_SUCCESS in case the gate was successfully closed. The current thread |
764 | * is the new holder of the gate. |
765 | * |
766 | * A matching lck_rw_gate_open() or lck_rw_gate_handoff() needs to be called |
767 | * later on to wake up possible waiters on the gate before returning to |
768 | * userspace. |
769 | * |
770 | * If the intent is to conditionally probe the gate before waiting, the lock |
771 | * must not be dropped between the calls to lck_rw_gate_try_close() and |
772 | * lck_rw_gate_wait(). |
773 | * |
774 | * KERN_FAILURE in case the gate was already closed. |
775 | * Will panic if the current thread was already the holder of the gate. |
776 | * |
777 | * lck_rw_gate_wait() should be called instead if the intent is to |
778 | * unconditionally wait on this gate. |
779 | * |
780 | * The calls to lck_rw_gate_try_close() and lck_rw_gate_wait() should |
781 | * be done without dropping the lock that is protecting the gate in between. |
782 | */ |
783 | extern kern_return_t lck_rw_gate_try_close(lck_rw_t *lock, gate_t *gate); |
784 | |
785 | /* |
786 | * Name: lck_rw_gate_close |
787 | * |
788 | * Description: Closes the gate. The current thread will be set as |
789 | * the holder of the gate. Will panic if the gate is already closed. |
790 | * A matching lck_rw_gate_open() or lck_rw_gate_handoff() needs to be called later on |
791 | * to wake up possible waiters on the gate before returning to userspace. |
792 | * |
793 | * Args: |
794 | * Arg1: lck_rw_t lock used to protect the gate. |
795 | * Arg2: pointer to the gate data declared with decl_lck_rw_gate_data. |
796 | * |
797 | * Conditions: Lock must be held. Returns with the lock held. |
798 | * The gate must be open. |
799 | * |
800 | */ |
801 | extern void lck_rw_gate_close(lck_rw_t *lock, gate_t *gate); |
802 | |
803 | |
804 | /* |
805 | * Name: lck_rw_gate_open |
806 | * |
807 | * Description: Opens the gate and wakes up possible waiters. |
808 | * |
809 | * Args: |
810 | * Arg1: lck_rw_t lock used to protect the gate. |
811 | * Arg2: pointer to the gate data declared with decl_lck_rw_gate_data. |
812 | * |
813 | * Conditions: Lock must be held. Returns with the lock held. |
814 | * The current thread must be the holder of the gate. |
815 | * |
816 | */ |
817 | extern void lck_rw_gate_open(lck_rw_t *lock, gate_t *gate); |
818 | |
819 | /* |
820 | * Name: lck_rw_gate_handoff |
821 | * |
822 | * Description: Tries to transfer the ownership of the gate. The waiter with highest sched |
823 | * priority will be selected as the new holder of the gate, and woken up, |
824 | * with the gate remaining in the closed state throughout. |
825 | * If no waiters are present, the gate will be kept closed and KERN_NOT_WAITING |
826 | * will be returned. |
827 | * GATE_HANDOFF_OPEN_IF_NO_WAITERS flag can be used to specify if the gate should be opened in |
828 | * case no waiters were found. |
829 | * |
830 | * |
831 | * Args: |
832 | * Arg1: lck_rw_t lock used to protect the gate. |
833 | * Arg2: pointer to the gate data declared with decl_lck_rw_gate_data. |
834 | * Arg3: flags - GATE_HANDOFF_DEFAULT or GATE_HANDOFF_OPEN_IF_NO_WAITERS |
835 | * |
836 | * Conditions: Lock must be held. Returns with the lock held. |
837 | * The current thread must be the holder of the gate. |
838 | * |
839 | * Returns: |
840 | * KERN_SUCCESS in case one of the waiters became the new holder. |
841 | * KERN_NOT_WAITING in case there were no waiters. |
842 | * |
843 | */ |
844 | extern kern_return_t lck_rw_gate_handoff(lck_rw_t *lock, gate_t *gate, gate_handoff_flags_t flags); |
845 | |
846 | /* |
847 | * Name: lck_rw_gate_steal |
848 | * |
849 | * Description: Set the current ownership of the gate. It sets the current thread as the |
850 | * new holder of the gate. |
851 | * A matching lck_rw_gate_open() or lck_rw_gate_handoff() needs to be called later on |
852 | * to wake up possible waiters on the gate before returning to userspace. |
853 | * NOTE: the previous holder should not call lck_rw_gate_open() or lck_rw_gate_handoff() |
854 | * anymore. |
855 | * |
856 | * |
857 | * Args: |
858 | * Arg1: lck_rw_t lock used to protect the gate. |
859 | * Arg2: pointer to the gate data declared with decl_lck_rw_gate_data. |
860 | * |
861 | * Conditions: Lock must be held. Returns with the lock held. |
862 | * The gate must be closed and the current thread must not already be the holder. |
863 | * |
864 | */ |
865 | extern void lck_rw_gate_steal(lck_rw_t *lock, gate_t *gate); |
866 | |
867 | /* |
868 | * Name: lck_rw_gate_wait |
869 | * |
870 | * Description: Waits for the current thread to become the holder of the gate or for the |
871 | * gate to become open. An interruptible mode and deadline can be specified |
872 | * to return earlier from the wait. |
873 | * |
874 | * Args: |
875 | * Arg1: lck_rw_t lock used to protect the gate. |
876 | * Arg2: pointer to the gate data declared with decl_lck_rw_gate_data. |
877 | * Arg3: sleep action. LCK_SLEEP_DEFAULT, LCK_SLEEP_SHARED, LCK_SLEEP_EXCLUSIVE, LCK_SLEEP_UNLOCK. |
878 | * Arg3: interruptible flag for wait. |
879 | * Arg4: deadline |
880 | * |
881 | * Conditions: Lock must be held. Returns with the lock held according to the sleep action specified. |
882 | * Lock will be dropped while waiting. |
883 | * The gate must be closed. |
884 | * |
885 | * Returns: Reason why the thread was woken up. |
886 | * GATE_HANDOFF - the current thread was handed off the ownership of the gate. |
887 | * A matching lck_rw_gate_open() or lck_rw_gate_handoff() needs to be called later on. |
888 | * to wake up possible waiters on the gate before returning to userspace. |
889 | * GATE_OPENED - the gate was opened by the holder. |
890 | * GATE_TIMED_OUT - the thread was woken up by a timeout. |
891 | * GATE_INTERRUPTED - the thread was interrupted while sleeping. |
892 | */ |
893 | extern gate_wait_result_t lck_rw_gate_wait( |
894 | lck_rw_t *lock, |
895 | gate_t *gate, |
896 | lck_sleep_action_t lck_sleep_action, |
897 | wait_interrupt_t interruptible, |
898 | uint64_t deadline); |
899 | |
900 | /* |
901 | * Name: lck_rw_gate_assert |
902 | * |
903 | * Description: asserts that the gate is in the specified state. |
904 | * |
905 | * Args: |
906 | * Arg1: lck_rw_t lock used to protect the gate. |
907 | * Arg2: pointer to the gate data declared with decl_lck_rw_gate_data. |
908 | * Arg3: flags to specified assert type. |
909 | * GATE_ASSERT_CLOSED - the gate is currently closed |
910 | * GATE_ASSERT_OPEN - the gate is currently opened |
911 | * GATE_ASSERT_HELD - the gate is currently closed and the current thread is the holder |
912 | */ |
913 | extern void lck_rw_gate_assert(lck_rw_t *lock, gate_t *gate, gate_assert_flags_t flags); |
914 | |
915 | /* |
916 | * Name: decl_lck_mtx_gate_data |
917 | * |
918 | * Description: declares a gate variable with specified storage class. |
919 | * The gate itself will be stored in this variable and it is the caller's responsibility |
920 | * to ensure that this variable's memory is going to be accessible by all threads that will use |
921 | * the gate. |
922 | * Every gate function will require a pointer to this variable as parameter. The same pointer should |
923 | * be used in every thread. |
924 | * |
925 | * The variable needs to be initialized once with lck_mtx_gate_init() and destroyed once with |
926 | * lck_mtx_gate_destroy() when not needed anymore. |
927 | * |
928 | * The gate will be used in conjunction with a lck_mtx_t. |
929 | * |
930 | * Args: |
931 | * Arg1: storage class. |
932 | * Arg2: variable name. |
933 | */ |
934 | #define decl_lck_mtx_gate_data(class, name) class gate_t name |
935 | |
936 | /* |
937 | * Name: lck_mtx_gate_init |
938 | * |
939 | * Description: initializes a variable declared with decl_lck_mtx_gate_data. |
940 | * |
941 | * Args: |
942 | * Arg1: lck_mtx_t lock used to protect the gate. |
943 | * Arg2: pointer to the gate data declared with decl_lck_mtx_gate_data. |
944 | */ |
945 | extern void lck_mtx_gate_init(lck_mtx_t *lock, gate_t *gate); |
946 | |
947 | /* |
948 | * Name: lck_mtx_gate_destroy |
949 | * |
950 | * Description: destroys a variable previously initialized |
951 | * with lck_mtx_gate_init(). |
952 | * |
953 | * Args: |
954 | * Arg1: lck_mtx_t lock used to protect the gate. |
955 | * Arg2: pointer to the gate data declared with decl_lck_mtx_gate_data. |
956 | */ |
957 | extern void lck_mtx_gate_destroy(lck_mtx_t *lock, gate_t *gate); |
958 | |
959 | /* |
960 | * Name: lck_mtx_gate_alloc_init |
961 | * |
962 | * Description: allocates and initializes a gate_t. |
963 | * |
964 | * Args: |
965 | * Arg1: lck_mtx_t lock used to protect the gate. |
966 | * |
967 | * Returns: |
968 | * gate_t allocated. |
969 | */ |
970 | extern gate_t* lck_mtx_gate_alloc_init(lck_mtx_t *lock); |
971 | |
972 | /* |
973 | * Name: lck_mtx_gate_free |
974 | * |
975 | * Description: destroys and tries to free a gate previously allocated |
976 | * with lck_mtx_gate_alloc_init(). |
977 | * The gate free might be delegated to the last thread returning |
978 | * from the gate_wait(). |
979 | * |
980 | * Args: |
981 | * Arg1: lck_mtx_t lock used to protect the gate. |
982 | * Arg2: pointer to the gate obtained with lck_mtx_gate_alloc_init(). |
983 | */ |
984 | extern void lck_mtx_gate_free(lck_mtx_t *lock, gate_t *gate); |
985 | |
986 | /* |
987 | * Name: lck_mtx_gate_try_close |
988 | * |
989 | * Description: Tries to close the gate. |
990 | * In case of success the current thread will be set as |
991 | * the holder of the gate. |
992 | * |
993 | * Args: |
994 | * Arg1: lck_mtx_t lock used to protect the gate. |
995 | * Arg2: pointer to the gate data declared with decl_lck_mtx_gate_data. |
996 | * |
997 | * Conditions: Lock must be held. Returns with the lock held. |
998 | * |
999 | * Returns: |
1000 | * KERN_SUCCESS in case the gate was successfully closed. The current thread |
1001 | * is the new holder of the gate. |
1002 | * |
1003 | * A matching lck_mtx_gate_open() or lck_mtx_gate_handoff() needs to be called |
1004 | * later on to wake up possible waiters on the gate before returning to |
1005 | * userspace. |
1006 | * |
1007 | * If the intent is to conditionally probe the gate before waiting, the lock |
1008 | * must not be dropped between the calls to lck_mtx_gate_try_close() and |
1009 | * lck_mtx_gate_wait(). |
1010 | * |
1011 | * KERN_FAILURE in case the gate was already closed. Will panic if the current |
1012 | * thread was already the holder of the gate. |
1013 | * |
1014 | * lck_mtx_gate_wait() should be called instead if the intent is to |
1015 | * unconditionally wait on this gate. |
1016 | * |
1017 | * The calls to lck_mtx_gate_try_close() and lck_mtx_gate_wait() should |
1018 | * be done without dropping the lock that is protecting the gate in between. |
1019 | */ |
1020 | extern kern_return_t lck_mtx_gate_try_close(lck_mtx_t *lock, gate_t *gate); |
1021 | |
1022 | /* |
1023 | * Name: lck_mtx_gate_close |
1024 | * |
1025 | * Description: Closes the gate. The current thread will be set as |
1026 | * the holder of the gate. Will panic if the gate is already closed. |
1027 | * A matching lck_mtx_gate_open() or lck_mtx_gate_handoff() needs to be called later on |
1028 | * to wake up possible waiters on the gate before returning to userspace. |
1029 | * |
1030 | * Args: |
1031 | * Arg1: lck_mtx_t lock used to protect the gate. |
1032 | * Arg2: pointer to the gate data declared with decl_lck_mtx_gate_data. |
1033 | * |
1034 | * Conditions: Lock must be held. Returns with the lock held. |
1035 | * The gate must be open. |
1036 | * |
1037 | */ |
1038 | extern void lck_mtx_gate_close(lck_mtx_t *lock, gate_t *gate); |
1039 | |
1040 | /* |
1041 | * Name: lck_mtx_gate_open |
1042 | * |
1043 | * Description: Opens of the gate and wakes up possible waiters. |
1044 | * |
1045 | * Args: |
1046 | * Arg1: lck_mtx_t lock used to protect the gate. |
1047 | * Arg2: pointer to the gate data declared with decl_lck_mtx_gate_data. |
1048 | * |
1049 | * Conditions: Lock must be held. Returns with the lock held. |
1050 | * The current thread must be the holder of the gate. |
1051 | * |
1052 | */ |
1053 | extern void lck_mtx_gate_open(lck_mtx_t *lock, gate_t *gate); |
1054 | |
1055 | /* |
1056 | * Name: lck_mtx_gate_handoff |
1057 | * |
1058 | * Description: Tries to transfer the ownership of the gate. The waiter with highest sched |
1059 | * priority will be selected as the new holder of the gate, and woken up, |
1060 | * with the gate remaining in the closed state throughout. |
1061 | * If no waiters are present, the gate will be kept closed and KERN_NOT_WAITING |
1062 | * will be returned. |
1063 | * GATE_HANDOFF_OPEN_IF_NO_WAITERS flag can be used to specify if the gate should be opened in |
1064 | * case no waiters were found. |
1065 | * |
1066 | * |
1067 | * Args: |
1068 | * Arg1: lck_mtx_t lock used to protect the gate. |
1069 | * Arg2: pointer to the gate data declared with decl_lck_mtx_gate_data. |
1070 | * Arg3: flags - GATE_HANDOFF_DEFAULT or GATE_HANDOFF_OPEN_IF_NO_WAITERS |
1071 | * |
1072 | * Conditions: Lock must be held. Returns with the lock held. |
1073 | * The current thread must be the holder of the gate. |
1074 | * |
1075 | * Returns: |
1076 | * KERN_SUCCESS in case one of the waiters became the new holder. |
1077 | * KERN_NOT_WAITING in case there were no waiters. |
1078 | * |
1079 | */ |
1080 | extern kern_return_t lck_mtx_gate_handoff(lck_mtx_t *lock, gate_t *gate, gate_handoff_flags_t flags); |
1081 | |
1082 | /* |
1083 | * Name: lck_mtx_gate_steal |
1084 | * |
1085 | * Description: Steals the ownership of the gate. It sets the current thread as the |
1086 | * new holder of the gate. |
1087 | * A matching lck_mtx_gate_open() or lck_mtx_gate_handoff() needs to be called later on |
1088 | * to wake up possible waiters on the gate before returning to userspace. |
1089 | * NOTE: the previous holder should not call lck_mtx_gate_open() or lck_mtx_gate_handoff() |
1090 | * anymore. |
1091 | * |
1092 | * |
1093 | * Args: |
1094 | * Arg1: lck_mtx_t lock used to protect the gate. |
1095 | * Arg2: pointer to the gate data declared with decl_lck_mtx_gate_data. |
1096 | * |
1097 | * Conditions: Lock must be held. Returns with the lock held. |
1098 | * The gate must be closed and the current thread must not already be the holder. |
1099 | * |
1100 | */ |
1101 | extern void lck_mtx_gate_steal(lck_mtx_t *lock, gate_t *gate); |
1102 | |
1103 | /* |
1104 | * Name: lck_mtx_gate_wait |
1105 | * |
1106 | * Description: Waits for the current thread to become the holder of the gate or for the |
1107 | * gate to become open. An interruptible mode and deadline can be specified |
1108 | * to return earlier from the wait. |
1109 | * |
1110 | * Args: |
1111 | * Arg1: lck_mtx_t lock used to protect the gate. |
1112 | * Arg2: pointer to the gate data declared with decl_lck_mtx_gate_data. |
1113 | * Arg3: sleep action. LCK_SLEEP_DEFAULT, LCK_SLEEP_UNLOCK, LCK_SLEEP_SPIN, LCK_SLEEP_SPIN_ALWAYS. |
1114 | * Arg3: interruptible flag for wait. |
1115 | * Arg4: deadline |
1116 | * |
1117 | * Conditions: Lock must be held. Returns with the lock held according to the sleep action specified. |
1118 | * Lock will be dropped while waiting. |
1119 | * The gate must be closed. |
1120 | * |
1121 | * Returns: Reason why the thread was woken up. |
1122 | * GATE_HANDOFF - the current thread was handed off the ownership of the gate. |
1123 | * A matching lck_mtx_gate_open() or lck_mtx_gate_handoff() needs to be called later on |
1124 | * to wake up possible waiters on the gate before returning to userspace. |
1125 | * GATE_OPENED - the gate was opened by the holder. |
1126 | * GATE_TIMED_OUT - the thread was woken up by a timeout. |
1127 | * GATE_INTERRUPTED - the thread was interrupted while sleeping. |
1128 | */ |
1129 | extern gate_wait_result_t lck_mtx_gate_wait( |
1130 | lck_mtx_t *lock, |
1131 | gate_t *gate, |
1132 | lck_sleep_action_t lck_sleep_action, |
1133 | wait_interrupt_t interruptible, |
1134 | uint64_t deadline); |
1135 | |
1136 | /* |
1137 | * Name: lck_mtx_gate_assert |
1138 | * |
1139 | * Description: asserts that the gate is in the specified state. |
1140 | * |
1141 | * Args: |
1142 | * Arg1: lck_mtx_t lock used to protect the gate. |
1143 | * Arg2: pointer to the gate data declared with decl_lck_mtx_gate_data. |
1144 | * Arg3: flags to specified assert type. |
1145 | * GATE_ASSERT_CLOSED - the gate is currently closed |
1146 | * GATE_ASSERT_OPEN - the gate is currently opened |
1147 | * GATE_ASSERT_HELD - the gate is currently closed and the current thread is the holder |
1148 | */ |
1149 | extern void lck_mtx_gate_assert(lck_mtx_t *lock, gate_t *gate, gate_assert_flags_t flags); |
1150 | |
1151 | extern void lck_spin_assert( |
1152 | const lck_spin_t *lck, |
1153 | unsigned int type); |
1154 | |
1155 | #if CONFIG_PV_TICKET |
1156 | __startup_func extern void lck_init_pv(void); |
1157 | #endif |
1158 | |
1159 | #endif /* KERNEL_PRIVATE */ |
1160 | |
1161 | #if MACH_ASSERT |
1162 | #define LCK_SPIN_ASSERT(lck, type) lck_spin_assert((lck),(type)) |
1163 | #else /* MACH_ASSERT */ |
1164 | #define LCK_SPIN_ASSERT(lck, type) |
1165 | #endif /* MACH_ASSERT */ |
1166 | |
1167 | #if DEBUG |
1168 | #define LCK_SPIN_ASSERT_DEBUG(lck, type) lck_spin_assert((lck),(type)) |
1169 | #else /* DEBUG */ |
1170 | #define LCK_SPIN_ASSERT_DEBUG(lck, type) |
1171 | #endif /* DEBUG */ |
1172 | |
1173 | #define LCK_ASSERT_OWNED 1 |
1174 | #define LCK_ASSERT_NOTOWNED 2 |
1175 | |
1176 | #ifdef MACH_KERNEL_PRIVATE |
1177 | |
1178 | typedef struct lck_spinlock_to_info { |
1179 | void *lock; |
1180 | #if DEBUG || DEVELOPMENT |
1181 | uintptr_t owner_thread_orig; |
1182 | #endif /* DEBUG || DEVELOPMENT */ |
1183 | uintptr_t owner_thread_cur; |
1184 | int owner_cpu; |
1185 | uint32_t extra; |
1186 | } *lck_spinlock_to_info_t; |
1187 | |
1188 | extern volatile lck_spinlock_to_info_t lck_spinlock_timeout_in_progress; |
1189 | PERCPU_DECL(struct lck_spinlock_to_info, lck_spinlock_to_info); |
1190 | |
1191 | typedef struct lck_tktlock_pv_info { |
1192 | void *ltpi_lck; |
1193 | uint8_t ltpi_wt; |
1194 | } *lck_tktlock_pv_info_t; |
1195 | |
1196 | PERCPU_DECL(struct lck_tktlock_pv_info, lck_tktlock_pv_info); |
1197 | |
1198 | extern void lck_spinlock_timeout_set_orig_owner( |
1199 | uintptr_t owner); |
1200 | |
1201 | extern void lck_spinlock_timeout_set_orig_ctid( |
1202 | uint32_t ctid); |
1203 | |
1204 | extern lck_spinlock_to_info_t lck_spinlock_timeout_hit( |
1205 | void *lck, |
1206 | uintptr_t owner); |
1207 | |
1208 | #endif /* MACH_KERNEL_PRIVATE */ |
1209 | #if XNU_KERNEL_PRIVATE |
1210 | |
1211 | uintptr_t unslide_for_kdebug(const void* object) __pure2; |
1212 | |
1213 | struct lck_attr_startup_spec { |
1214 | lck_attr_t *lck_attr; |
1215 | uint32_t lck_attr_set_flags; |
1216 | uint32_t lck_attr_clear_flags; |
1217 | }; |
1218 | |
1219 | struct lck_spin_startup_spec { |
1220 | lck_spin_t *lck; |
1221 | lck_grp_t *lck_grp; |
1222 | lck_attr_t *lck_attr; |
1223 | }; |
1224 | |
1225 | struct lck_ticket_startup_spec { |
1226 | lck_ticket_t *lck; |
1227 | lck_grp_t *lck_grp; |
1228 | }; |
1229 | |
1230 | extern void lck_attr_startup_init( |
1231 | struct lck_attr_startup_spec *spec); |
1232 | |
1233 | extern void lck_spin_startup_init( |
1234 | struct lck_spin_startup_spec *spec); |
1235 | |
1236 | extern void lck_ticket_startup_init( |
1237 | struct lck_ticket_startup_spec *spec); |
1238 | |
1239 | /* |
1240 | * Auto-initializing locks declarations |
1241 | * ------------------------------------ |
1242 | * |
1243 | * Unless you need to configure your locks in very specific ways, |
1244 | * there is no point creating explicit lock attributes. For most |
1245 | * static locks, these declaration macros can be used: |
1246 | * |
1247 | * - LCK_SPIN_DECLARE for spinlocks, |
1248 | * - LCK_MTX_DECLARE for mutexes, |
1249 | * |
1250 | * For cases when some particular attributes need to be used, |
1251 | * these come in *_ATTR variants that take a variable declared with |
1252 | * LCK_ATTR_DECLARE as an argument. |
1253 | */ |
1254 | #define LCK_ATTR_DECLARE(var, set_flags, clear_flags) \ |
1255 | SECURITY_READ_ONLY_LATE(lck_attr_t) var; \ |
1256 | static __startup_data struct lck_attr_startup_spec \ |
1257 | __startup_lck_attr_spec_ ## var = { &var, set_flags, clear_flags }; \ |
1258 | STARTUP_ARG(LOCKS, STARTUP_RANK_SECOND, lck_attr_startup_init, \ |
1259 | &__startup_lck_attr_spec_ ## var) |
1260 | |
1261 | #define LCK_SPIN_DECLARE_ATTR(var, grp, attr) \ |
1262 | lck_spin_t var; \ |
1263 | static __startup_data struct lck_spin_startup_spec \ |
1264 | __startup_lck_spin_spec_ ## var = { &var, grp, attr }; \ |
1265 | STARTUP_ARG(LOCKS, STARTUP_RANK_FOURTH, lck_spin_startup_init, \ |
1266 | &__startup_lck_spin_spec_ ## var) |
1267 | |
1268 | #define LCK_SPIN_DECLARE(var, grp) \ |
1269 | LCK_SPIN_DECLARE_ATTR(var, grp, LCK_ATTR_NULL) |
1270 | |
1271 | #define LCK_TICKET_DECLARE(var, grp) \ |
1272 | lck_ticket_t var; \ |
1273 | static __startup_data struct lck_ticket_startup_spec \ |
1274 | __startup_lck_ticket_spec_ ## var = { &var, grp }; \ |
1275 | STARTUP_ARG(LOCKS, STARTUP_RANK_FOURTH, lck_ticket_startup_init, \ |
1276 | &__startup_lck_ticket_spec_ ## var) |
1277 | |
1278 | #endif /* XNU_KERNEL_PRIVATE */ |
1279 | |
1280 | __END_DECLS |
1281 | |
1282 | #endif /* _KERN_LOCKS_H_ */ |
1283 | |