1/*
2 * Copyright (c) 2014-2021 Apple Computer, Inc. All rights reserved.
3 *
4 * @APPLE_OSREFERENCE_LICENSE_HEADER_START@
5 *
6 * This file contains Original Code and/or Modifications of Original Code
7 * as defined in and that are subject to the Apple Public Source License
8 * Version 2.0 (the 'License'). You may not use this file except in
9 * compliance with the License. The rights granted to you under the License
10 * may not be used to create, or enable the creation or redistribution of,
11 * unlawful or unlicensed copies of an Apple operating system, or to
12 * circumvent, violate, or enable the circumvention or violation of, any
13 * terms of an Apple operating system software license agreement.
14 *
15 * Please obtain a copy of the License at
16 * http://www.opensource.apple.com/apsl/ and read it before using this file.
17 *
18 * The Original Code and all software distributed under the License are
19 * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER
20 * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES,
21 * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY,
22 * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT.
23 * Please see the License for the specific language governing rights and
24 * limitations under the License.
25 *
26 * @APPLE_OSREFERENCE_LICENSE_HEADER_END@
27 */
28#ifndef _WAITQ_H_
29#define _WAITQ_H_
30#ifdef KERNEL_PRIVATE
31
32#include <mach/mach_types.h>
33#include <mach/sync_policy.h>
34#include <mach/kern_return.h> /* for kern_return_t */
35
36#include <kern/kern_types.h> /* for wait_queue_t */
37#include <kern/queue.h>
38#include <kern/assert.h>
39
40#include <sys/cdefs.h>
41
42#ifdef XNU_KERNEL_PRIVATE
43/* priority queue static asserts fail for __ARM64_ARCH_8_32__ kext builds */
44#include <kern/priority_queue.h>
45#ifdef MACH_KERNEL_PRIVATE
46#include <kern/spl.h>
47#include <kern/ticket_lock.h>
48#include <kern/circle_queue.h>
49#include <kern/mpsc_queue.h>
50
51#include <machine/cpu_number.h>
52#include <machine/machine_routines.h> /* machine_timeout_suspended() */
53#endif /* MACH_KERNEL_PRIVATE */
54#endif /* XNU_KERNEL_PRIVATE */
55
56__BEGIN_DECLS __ASSUME_PTR_ABI_SINGLE_BEGIN
57
58#pragma GCC visibility push(hidden)
59
60/*!
61 * @enum waitq_wakeup_flags_t
62 *
63 * @const WAITQ_WAKEUP_DEFAULT
64 * Use the default behavior for wakeup.
65 *
66 * @const WAITQ_UPDATE_INHERITOR
67 * If the wait queue is a turnstile,
68 * set its inheritor to the woken up thread,
69 * or clear the inheritor if the last thread is woken up.
70 *
71 #if MACH_KERNEL_PRIVATE
72 * @const WAITQ_PROMOTE_PRIORITY (Mach IPC only)
73 * Promote the woken up thread(s) with a MINPRI_WAITQ floor,
74 * until it calls waitq_clear_promotion_locked().
75 *
76 * @const WAITQ_UNLOCK (waitq_wakeup64_*_locked only)
77 * Unlock the wait queue before any thread_go() is called for woken up threads.
78 *
79 * @const WAITQ_ENABLE_INTERRUPTS (waitq_wakeup64_*_locked only)
80 * Also enable interrupts when unlocking the wait queue.
81 *
82 * @const WAITQ_KEEP_LOCKED (waitq_wakeup64_*_locked only)
83 * Keep the wait queue locked for this call.
84 *
85 * @const WAITQ_HANDOFF (waitq_wakeup64_one, waitq_wakeup64_identify*)
86 * Attempt a handoff to the woken up thread.
87 #endif
88 */
89__options_decl(waitq_wakeup_flags_t, uint32_t, {
90 WAITQ_WAKEUP_DEFAULT = 0x0000,
91 WAITQ_UPDATE_INHERITOR = 0x0001,
92#if MACH_KERNEL_PRIVATE
93 WAITQ_PROMOTE_PRIORITY = 0x0002,
94 WAITQ_UNLOCK = 0x0004,
95 WAITQ_KEEP_LOCKED = 0x0000,
96 WAITQ_HANDOFF = 0x0008,
97 WAITQ_ENABLE_INTERRUPTS = 0x0010,
98#endif /* MACH_KERNEL_PRIVATE */
99});
100
101/* Opaque sizes and alignment used for struct verification */
102#if __arm__ || __arm64__
103 #define WQ_OPAQUE_ALIGN __BIGGEST_ALIGNMENT__
104 #if __arm__
105 #define WQ_OPAQUE_SIZE 32
106 #else
107 #define WQ_OPAQUE_SIZE 40
108 #endif
109#elif __x86_64__
110 #define WQ_OPAQUE_ALIGN 8
111 #define WQ_OPAQUE_SIZE 48
112#else
113 #error Unknown size requirement
114#endif
115
116#ifdef __cplusplus
117#define __waitq_transparent_union
118#else
119#define __waitq_transparent_union __attribute__((__transparent_union__))
120#endif
121
122/**
123 * @typedef waitq_t
124 *
125 * @brief
126 * This is an abstract typedef used to denote waitq APIs that can be called
127 * on any kind of wait queue (or wait queue set).
128 */
129typedef union {
130 struct waitq *wq_q;
131 struct waitq_set *wqs_set;
132 struct select_set *wqs_sel;
133} __waitq_transparent_union waitq_t;
134
135#if !MACH_KERNEL_PRIVATE
136
137/*
138 * The opaque waitq structure is here mostly for AIO and selinfo,
139 * but could potentially be used by other BSD subsystems.
140 */
141struct waitq {
142 char opaque[WQ_OPAQUE_SIZE];
143} __attribute__((aligned(WQ_OPAQUE_ALIGN)));
144
145#endif /* MACH_KERNEL_PRIVATE */
146#ifdef XNU_KERNEL_PRIVATE
147
148/**
149 * @typedef waitq_link_t
150 *
151 * @brief
152 * Union that represents any kind of wait queue link.
153 *
154 * @discussion
155 * Unlike @c waitq_t which can be used safely on its own because
156 * @c waitq_type() can return which actual wait queue type is pointed at,
157 * @c waitq_link_t can't be used without knowing the type of wait queue
158 * (or wait queue set) it refers to.
159 */
160typedef union {
161 struct waitq_link_hdr *wqlh;
162 struct waitq_sellink *wqls;
163 struct waitq_link *wqll;
164} __waitq_transparent_union waitq_link_t;
165
166#define WQL_NULL ((waitq_link_t){ .wqlh = NULL })
167
168/**
169 * @typedef waitq_link_list_t
170 *
171 * @brief
172 * List of wait queue links (used for cleanup).
173 *
174 * @discussion
175 * This type is engineered so that the way it links elements is equivalent
176 * to the "forward" linking of a circle queue.
177 */
178typedef struct waitq_link_list_entry {
179 struct waitq_link_list_entry *next;
180} waitq_link_list_t;
181
182/**
183 * @enum waitq_type_t
184 *
185 * @brief
186 * List of all possible wait queue (and wait queue set) types.
187 *
188 * @description
189 * (I) mark IRQ safe queues
190 * (P) mark queues that prepost to sets
191 * (S) mark wait queue sets
192 * (keep those together to allow range checks for irq-safe/sets)
193 */
194__enum_decl(waitq_type_t, uint32_t, {
195 WQT_INVALID = 0x0, /**< ( ) invalid type, unintialized */
196 WQT_QUEUE = 0x1, /**< (I) general wait queue */
197 WQT_TURNSTILE = 0x2, /**< (I) wait queue used in @c turnstile */
198 WQT_PORT = 0x3, /**< (P) wait queue used in @c ipc_port_t */
199 WQT_SELECT = 0x4, /**< (P) wait queue used in @c selinfo */
200 WQT_PORT_SET = 0x5, /**< (S) wait queue set used in @c ipc_pset_t */
201 WQT_SELECT_SET = 0x6, /**< (S) wait queue set used for @c select() */
202});
203
204#ifdef MACH_KERNEL_PRIVATE
205#pragma mark Mach-only types and helpers
206
207/*
208 * The waitq needs WAITQ_FLAGS_BITS, which leaves 27 or 59 bits
209 * for the eventmask.
210 */
211#define WAITQ_FLAGS_BITS 5
212#define _EVENT_MASK_BITS (8 * sizeof(waitq_flags_t) - WAITQ_FLAGS_BITS)
213
214#if __arm64__
215typedef uint32_t waitq_flags_t;
216#else
217typedef unsigned long waitq_flags_t;
218#endif
219
220/* Make sure the port abuse of bits doesn't overflow the evntmask size */
221#define WAITQ_FLAGS_OVERFLOWS(...) \
222 (sizeof(struct { waitq_flags_t bits : WAITQ_FLAGS_BITS, __VA_ARGS__; }) \
223 > sizeof(waitq_flags_t))
224
225#define WAITQ_FLAGS(prefix, ...) \
226 struct { \
227 waitq_type_t prefix##_type:3; \
228 waitq_flags_t \
229 prefix##_fifo:1, /* fifo wakeup policy? */ \
230 prefix##_preposted:1 /* queue was preposted */ \
231 - 2 * WAITQ_FLAGS_OVERFLOWS(__VA_ARGS__), \
232 __VA_ARGS__; \
233 }
234
235/*
236 * _type:
237 * the waitq type (a WQT_* value)
238 *
239 * _fifo:
240 * whether the wakeup policy is FIFO or LIFO.
241 *
242 * _preposted:
243 * o WQT_PORT: the port message queue is not empty
244 * o WQT_SELECT_SET: has the set been preposted to
245 * o others: unused
246 *
247 * _eventmask:
248 * o WQT_QUEUE: (global queues) mask events being waited on
249 * o WQT_PORT: many bits (see ipc_port_t)
250 * o WQT_PORT_SET: port_set index in its space
251 * o WQT_SELECT_SET: selset_conflict (is the conflict queue hooked)
252 * o other: unused
253 *
254 * _interlock:
255 * The lock of the waitq/waitq_set
256 *
257 * _queue/_prio_queue/_ts:
258 * o WQT_QUEUE,
259 * WQT_SELECT,
260 * WQT_PORT_SET,
261 * WQT_SELECT_SET: circle queue of waiting threads
262 * o WQT_TURNSTILE: priority queue of waiting threads
263 * o WQT_PORT: pointer to the receive turnstile of the port
264 *
265 * _links/_inheritor/_sellinks:
266 * o WQT_PORT: linkages to WQT_PORT_SET waitq sets
267 * o WQT_SELECT: linkages to WQT_SELECT_SET select sets
268 * o WQT_TURNSTILE: turnstile inheritor
269 * o WQT_PORT_SET: WQT_PORT linkages that haven't preposted
270 * o other: unused
271 */
272#define WAITQ_HDR(prefix, ...) \
273 WAITQ_FLAGS(prefix, __VA_ARGS__); \
274 hw_lck_ticket_t prefix##_interlock; \
275 uint8_t prefix##_padding[sizeof(waitq_flags_t) - \
276 sizeof(hw_lck_ticket_t)]; \
277 union { \
278 circle_queue_head_t prefix##_queue; \
279 struct priority_queue_sched_max prefix##_prio_queue; \
280 struct turnstile *prefix##_ts; \
281 }; \
282 union { \
283 circle_queue_head_t prefix##_links; \
284 waitq_link_list_t prefix##_sellinks; \
285 void *prefix##_inheritor; \
286 struct mpsc_queue_chain prefix##_defer; \
287 }
288
289/**
290 * @struct waitq
291 *
292 * @discussion
293 * This is the definition of the common event wait queue
294 * that the scheduler APIs understand. It is used
295 * internally by the gerneralized event waiting mechanism
296 * (assert_wait), and also for items that maintain their
297 * own wait queues (such as ports and semaphores).
298 *
299 * It is not published to other kernel components.
300 *
301 * NOTE: Hardware locks are used to protect event wait
302 * queues since interrupt code is free to post events to
303 * them.
304 */
305struct waitq {
306 WAITQ_HDR(waitq, waitq_eventmask:_EVENT_MASK_BITS);
307} __attribute__((aligned(WQ_OPAQUE_ALIGN)));
308
309/**
310 * @struct waitq_set
311 *
312 * @brief
313 * This is the definition of a waitq set used in port-sets.
314 *
315 * @discussion
316 * The wqset_index field is used to stash the pset index for debugging
317 * purposes (not the full name as it would truncate).
318 */
319struct waitq_set {
320 WAITQ_HDR(wqset, wqset_index:_EVENT_MASK_BITS);
321 circle_queue_head_t wqset_preposts;
322};
323
324/**
325 * @struct select_set
326 *
327 * @brief
328 * This is the definition of a waitq set used to back the select syscall.
329 */
330struct select_set {
331 WAITQ_HDR(selset, selset_conflict:1);
332 uint64_t selset_id;
333};
334
335static inline waitq_type_t
336waitq_type(waitq_t wq)
337{
338 return wq.wq_q->waitq_type;
339}
340
341static inline bool
342waitq_same(waitq_t wq1, waitq_t wq2)
343{
344 return wq1.wq_q == wq2.wq_q;
345}
346
347static inline bool
348waitq_is_null(waitq_t wq)
349{
350 return wq.wq_q == NULL;
351}
352
353/*!
354 * @function waitq_wait_possible()
355 *
356 * @brief
357 * Check if the thread is in a state where it could assert wait.
358 *
359 * @discussion
360 * If a thread is between assert_wait and thread block, another
361 * assert wait is not allowed.
362 */
363extern bool waitq_wait_possible(thread_t thread);
364
365static inline bool
366waitq_preposts(waitq_t wq)
367{
368 switch (waitq_type(wq)) {
369 case WQT_PORT:
370 case WQT_SELECT:
371 return true;
372 default:
373 return false;
374 }
375}
376
377static inline bool
378waitq_irq_safe(waitq_t waitq)
379{
380 switch (waitq_type(wq: waitq)) {
381 case WQT_QUEUE:
382 case WQT_TURNSTILE:
383 return true;
384 default:
385 return false;
386 }
387}
388
389static inline bool
390waitq_valid(waitq_t waitq)
391{
392 return waitq.wq_q && waitq.wq_q->waitq_interlock.lck_valid;
393}
394
395/*
396 * global waitqs
397 */
398extern struct waitq *_global_eventq(char *event, size_t event_length);
399#define global_eventq(event) _global_eventq((char *)&(event), sizeof(event))
400
401static inline waitq_wakeup_flags_t
402waitq_flags_splx(spl_t spl_level)
403{
404 return spl_level ? WAITQ_ENABLE_INTERRUPTS : WAITQ_WAKEUP_DEFAULT;
405}
406
407#endif /* MACH_KERNEL_PRIVATE */
408#pragma mark locking
409
410/*!
411 * @function waitq_lock()
412 *
413 * @brief
414 * Lock a wait queue or wait queue set.
415 *
416 * @discussion
417 * It is the responsibility of the caller to disable
418 * interrupts if the queue is IRQ safe.
419 */
420extern void waitq_lock(waitq_t wq);
421
422/*!
423 * @function waitq_unlock()
424 *
425 * @brief
426 * Unlock a wait queue or wait queue set.
427 *
428 * @discussion
429 * It is the responsibility of the caller to reenable
430 * interrupts if the queue is IRQ safe.
431 */
432extern void waitq_unlock(waitq_t wq);
433
434/**
435 * @function waitq_is_valid()
436 *
437 * @brief
438 * Returns whether a wait queue or wait queue set has been invalidated.
439 */
440extern bool waitq_is_valid(waitq_t wq);
441
442#ifdef MACH_KERNEL_PRIVATE
443
444/**
445 * @function waitq_invalidate()
446 *
447 * @brief
448 * Invalidate a waitq.
449 *
450 * @discussion
451 * It is the responsibility of the caller to make sure that:
452 * - all waiters are woken up
453 * - linkages and preposts are cleared (non IRQ Safe waitqs).
454 */
455extern void waitq_invalidate(waitq_t wq);
456
457/*!
458 * @function waitq_held()
459 *
460 * @brief
461 * Returns whether someone is holding the lock of the specified wait queue.
462 */
463extern bool waitq_held(waitq_t wq) __result_use_check;
464
465/*!
466 * @function waitq_lock_allow_invalid()
467 *
468 * @brief
469 * Lock the specified wait queue if it is valid.
470 *
471 * @discussion
472 * This function allows for the backing memory of the specified wait queue
473 * to be unmapped.
474 *
475 * Combining this with the zone allocator @c ZC_SEQUESTER feature
476 * (along with @c ZC_ZFREE_CLEARMEM) allows to create clever schemes
477 * (See @c ipc_right_lookup_read()).
478 */
479extern bool waitq_lock_allow_invalid(waitq_t wq) __result_use_check;
480
481/*!
482 * @function waitq_lock_reserve()
483 *
484 * @brief
485 * Reserves the lock of the specified wait queue.
486 *
487 * @discussion
488 * Wait queue locks are "ordered" and a reservation in the lock queue
489 * can be acquired. This can be used to resolve certain lock inversions
490 * without risks for the memory backing the wait queue to disappear.
491 *
492 * See <kern/ticket_lock.h> for details.
493 *
494 * @param wq the specified wait queue
495 * @param ticket a pointer to memory to hold the reservation
496 * @returns
497 * - true if the lock was acquired
498 * - false otherwise, and @c waitq_lock_wait() @em must be called
499 * to wait for this ticket.
500 */
501extern bool waitq_lock_reserve(waitq_t wq, uint32_t *ticket) __result_use_check;
502
503/*!
504 * @function waitq_lock_wait()
505 *
506 * @brief
507 * Wait for a ticket acquired with @c waitq_lock_reserve().
508 */
509extern void waitq_lock_wait(waitq_t wq, uint32_t ticket);
510
511/*!
512 * @function waitq_lock_try()
513 *
514 * @brief
515 * Attempts to acquire the lock of the specified wait queue.
516 *
517 * @discussion
518 * Using @c waitq_lock_try() is discouraged as it leads to inefficient
519 * algorithms prone to contention.
520 *
521 * Schemes based on @c waitq_lock_reserve() / @c waitq_lock_wait() is preferred.
522 *
523 */
524extern bool waitq_lock_try(waitq_t wq) __result_use_check;
525
526#endif /* MACH_KERNEL_PRIVATE */
527#pragma mark assert_wait / wakeup
528
529/**
530 * @function waitq_assert_wait64()
531 *
532 * @brief
533 * Declare a thread's intent to wait on @c waitq for @c wait_event.
534 *
535 * @discussion
536 * @c waitq must be unlocked
537 */
538extern wait_result_t waitq_assert_wait64(
539 waitq_t waitq,
540 event64_t wait_event,
541 wait_interrupt_t interruptible,
542 uint64_t deadline);
543
544/**
545 * @function waitq_assert_wait64_leeway()
546 *
547 * @brief
548 * Declare a thread's intent to wait on @c waitq for @c wait_event.
549 *
550 * @discussion
551 * @c waitq must be unlocked
552 */
553extern wait_result_t waitq_assert_wait64_leeway(
554 waitq_t waitq,
555 event64_t wait_event,
556 wait_interrupt_t interruptible,
557 wait_timeout_urgency_t urgency,
558 uint64_t deadline,
559 uint64_t leeway);
560
561/**
562 * @function waitq_wakeup64_one()
563 *
564 * @brief
565 * Wakeup a single thread from a waitq that's waiting for a given event.
566 *
567 * @discussion
568 * @c waitq must be unlocked
569 */
570extern kern_return_t waitq_wakeup64_one(
571 waitq_t waitq,
572 event64_t wake_event,
573 wait_result_t result,
574 waitq_wakeup_flags_t flags);
575
576/**
577 * @functiong waitq_wakeup64_all()
578 *
579 * @brief
580 * Wakeup all threads from a waitq that are waiting for a given event.
581 *
582 * @description
583 * This function will set the inheritor of the wait queue
584 * to TURNSTILE_INHERITOR_NULL if it is a turnstile wait queue.
585 *
586 * @c waitq must be unlocked
587 */
588extern kern_return_t waitq_wakeup64_all(
589 waitq_t waitq,
590 event64_t wake_event,
591 wait_result_t result,
592 waitq_wakeup_flags_t flags);
593
594/**
595 * @function waitq_wakeup64_identify()
596 *
597 * @brief
598 * Wakeup one thread waiting on 'waitq' for 'wake_event'
599 *
600 * @discussion
601 * @c waitq must be unlocked.
602 *
603 * May temporarily disable and re-enable interrupts
604 *
605 * @returns
606 * - THREAD_NULL if no thread was waiting
607 * - a reference to a thread that was waiting on @c waitq.
608 */
609extern thread_t waitq_wakeup64_identify(
610 waitq_t waitq,
611 event64_t wake_event,
612 wait_result_t result,
613 waitq_wakeup_flags_t flags);
614
615/**
616 * @function waitq_wakeup64_thread()
617 *
618 * @brief
619 * Wakeup a specific thread iff it's waiting on @c waitq for @c wake_event.
620 *
621 * @discussion
622 * @c waitq must be unlocked and must be IRQ safe.
623 * @c thread must be unlocked
624 *
625 * May temporarily disable and re-enable interrupts
626 */
627extern kern_return_t waitq_wakeup64_thread(
628 struct waitq *waitq,
629 event64_t wake_event,
630 thread_t thread,
631 wait_result_t result);
632
633#pragma mark Mach-only assert_wait / wakeup
634#ifdef MACH_KERNEL_PRIVATE
635
636/**
637 * @function waitq_clear_promotion_locked()
638 *
639 * @brief
640 * Clear a potential thread priority promotion from a waitq wakeup
641 * with @c WAITQ_PROMOTE_PRIORITY.
642 *
643 * @discussion
644 * @c waitq must be locked.
645 *
646 * This must be called on the thread which was woken up
647 * with @c TH_SFLAG_WAITQ_PROMOTED.
648 */
649extern void waitq_clear_promotion_locked(
650 waitq_t waitq,
651 thread_t thread);
652
653/**
654 * @function waitq_pull_thread_locked()
655 *
656 * @brief
657 * Remove @c thread from its current blocking state on @c waitq.
658 *
659 * @discussion
660 * This function is only used by clear_wait_internal in sched_prim.c
661 * (which itself is called by the timer wakeup path and clear_wait()).
662 *
663 * @c thread must is locked (the function might drop and reacquire the lock).
664 *
665 * @returns
666 * - true if the thread has been pulled successfuly.
667 * - false otherwise, if the thread was no longer waiting on this waitq.
668 */
669extern bool waitq_pull_thread_locked(
670 waitq_t waitq,
671 thread_t thread);
672
673/**
674 * @function waitq_assert_wait64_locked()
675 *
676 * @brief
677 * Declare a thread's intent to wait on @c waitq for @c wait_event.
678 *
679 * @discussion
680 * @c waitq must be locked.
681 *
682 * Note that @c waitq might be unlocked and relocked during this call
683 * if it is a waitq set.
684 */
685extern wait_result_t waitq_assert_wait64_locked(
686 waitq_t waitq,
687 event64_t wait_event,
688 wait_interrupt_t interruptible,
689 wait_timeout_urgency_t urgency,
690 uint64_t deadline,
691 uint64_t leeway,
692 thread_t thread);
693
694/**
695 * @function waitq_wakeup64_all_locked()
696 *
697 * @brief
698 * Wakeup all threads waiting on @c waitq for @c wake_event
699 *
700 * @discussion
701 * @c waitq must be locked.
702 *
703 * May temporarily disable and re-enable interrupts
704 * and re-adjust thread priority of each awoken thread.
705 */
706extern kern_return_t waitq_wakeup64_all_locked(
707 waitq_t waitq,
708 event64_t wake_event,
709 wait_result_t result,
710 waitq_wakeup_flags_t flags);
711
712/**
713 * @function waitq_wakeup64_one_locked()
714 *
715 * @brief
716 * Wakeup one thread waiting on @c waitq for @c wake_event.
717 *
718 * @discussion
719 * @c waitq must be locked.
720 *
721 * May temporarily disable and re-enable interrupts.
722 */
723extern kern_return_t waitq_wakeup64_one_locked(
724 waitq_t waitq,
725 event64_t wake_event,
726 wait_result_t result,
727 waitq_wakeup_flags_t flags);
728
729/**
730 * @function waitq_wakeup64_identify_locked()
731 *
732 * @brief
733 * Wakeup one thread waiting on 'waitq' for 'wake_event'
734 *
735 * @returns
736 * Returns a thread that is pulled from waitq but not set runnable yet.
737 * Must be paired with waitq_resume_identified_thread to set it runnable -
738 * between these two points preemption is disabled.
739 */
740extern thread_t waitq_wakeup64_identify_locked(
741 waitq_t waitq,
742 event64_t wake_event,
743 wait_result_t result,
744 waitq_wakeup_flags_t flags);
745
746/**
747 * @function waitq_resume_identified_thread()
748 *
749 * @brief
750 * Set a thread runnable that has been woken with waitq_wakeup64_identify_locked
751 */
752extern void waitq_resume_identified_thread(
753 waitq_t waitq,
754 thread_t thread,
755 wait_result_t result,
756 waitq_wakeup_flags_t flags);
757
758/**
759 * @function waitq_resume_and_bind_identified_thread()
760 *
761 * @brief
762 * Set a thread runnable that has been woken with
763 * waitq_wakeup64_identify_locked, and bind it to a processor at the same time.
764 */
765extern void waitq_resume_and_bind_identified_thread(
766 waitq_t waitq,
767 thread_t thread,
768 processor_t processor,
769 wait_result_t result,
770 waitq_wakeup_flags_t flags);
771
772/**
773 * @function waitq_wakeup64_thread_and_unlock()
774 *
775 * @brief
776 * Wakeup a specific thread iff it's waiting on @c waitq for @c wake_event.
777 *
778 * @discussion
779 * @c waitq must IRQ safe and locked, unlocked on return.
780 * @c thread must be unlocked
781 */
782extern kern_return_t waitq_wakeup64_thread_and_unlock(
783 struct waitq *waitq,
784 event64_t wake_event,
785 thread_t thread,
786 wait_result_t result);
787
788#endif /* MACH_KERNEL_PRIVATE */
789#pragma mark waitq links
790
791/*!
792 * @function waitq_link_alloc()
793 *
794 * @brief
795 * Allocates a linkage object to be used with a wait queue of the specified type.
796 */
797extern waitq_link_t waitq_link_alloc(
798 waitq_type_t type);
799
800/*!
801 * @function waitq_link_free()
802 *
803 * @brief
804 * Frees a linkage object that was used with a wait queue of the specified type.
805 */
806extern void waitq_link_free(
807 waitq_type_t type,
808 waitq_link_t link);
809
810/*!
811 * @function waitq_link_free_list()
812 *
813 * @brief
814 * Frees a list of linkage object that was used with a wait queue
815 * of the specified type.
816 */
817extern void waitq_link_free_list(
818 waitq_type_t type,
819 waitq_link_list_t *list);
820
821#pragma mark wait queues lifecycle
822
823/*!
824 * @function waitq_init()
825 *
826 * @brief
827 * Initializes a wait queue.
828 *
829 * @discussion
830 * @c type must be a valid type.
831 */
832extern void waitq_init(
833 waitq_t waitq,
834 waitq_type_t type,
835 int policy);
836
837/*!
838 * @function waitq_deinit()
839 *
840 * @brief
841 * Destroys a wait queue.
842 *
843 * @discussion
844 * @c waitq can't be a select set.
845 */
846extern void waitq_deinit(
847 waitq_t waitq);
848
849#pragma mark port wait queues and port set waitq sets
850#ifdef MACH_KERNEL_PRIVATE
851
852/**
853 * @function waitq_link_locked()
854 *
855 * @brief
856 * Link the specified port wait queue to a specified port set wait queue set.
857 *
858 * @discussion
859 * This function doesn't handle preposting/waking up the set
860 * when the wait queue is already preposted.
861 *
862 * @param waitq the port wait queue to link, must be locked.
863 * @param wqset the port set wait queue set to link, must be locked.
864 * @param link a pointer to a link allocated with
865 * @c waitq_link_alloc(WQT_PORT_SET).
866 */
867extern kern_return_t waitq_link_locked(
868 struct waitq *waitq,
869 struct waitq_set *wqset,
870 waitq_link_t *link);
871
872/**
873 * @function waitq_link_prepost_locked()
874 *
875 * @brief
876 * Force a given link to be preposted.
877 *
878 * @param waitq the port wait queue to link, must be locked.
879 * @param wqset the port set wait queue set to link, must be locked.
880 */
881extern kern_return_t waitq_link_prepost_locked(
882 struct waitq *waitq,
883 struct waitq_set *wqset);
884
885/**
886 * @function
887 * Unlinks the specified port wait queue from a specified port set wait queue set.
888 *
889 * @param waitq the port wait queue to unlink, must be locked.
890 * @param wqset the port set wait queue set to link, must be locked.
891 * @returns
892 * - @c WQL_NULL if the port wasn't a member of the set.
893 * - a link to consume with @c waitq_link_free() otherwise.
894 */
895extern waitq_link_t waitq_unlink_locked(
896 struct waitq *waitq,
897 struct waitq_set *wqset);
898
899/**
900 * @function waitq_unlink_all_locked()
901 *
902 * @brief
903 * Unlink the specified wait queue from all sets to which it belongs
904 *
905 * @param waitq the port wait queue to link, must be locked.
906 * @param except_wqset do not unlink this wqset.
907 * @param free_l a waitq link list to which links to free will be added.
908 * the caller must call @c waitq_link_free_list() on it.
909 */
910extern void waitq_unlink_all_locked(
911 struct waitq *waitq,
912 struct waitq_set *except_wqset,
913 waitq_link_list_t *free_l);
914
915/**
916 * @function waitq_set_unlink_all_locked()
917 *
918 * @brief
919 * Unlink all wait queues from this set.
920 *
921 * @discussion
922 * The @c wqset lock might be dropped and reacquired during this call.
923 *
924 * @param wqset the port-set wait queue set to unlink, must be locked.
925 * @param free_l a waitq link list to which links to free will be added.
926 * the caller must call @c waitq_link_free_list() on it.
927 */
928extern void waitq_set_unlink_all_locked(
929 struct waitq_set *wqset,
930 waitq_link_list_t *free_l);
931
932/**
933 * @function waitq_set_foreach_member_locked()
934 *
935 * @brief
936 * Iterate all ports members of a port-set wait queue set.
937 *
938 * @param wqset the port-set wait queue set to unlink.
939 * @param cb a block called for each port wait queue in the set.
940 * those wait queues aren't locked (and can't safely
941 * be because @c wqset is locked the whole time
942 * and this would constitute a lock inversion).
943 */
944extern void waitq_set_foreach_member_locked(
945 struct waitq_set *wqset,
946 void (^cb)(struct waitq *));
947
948__options_decl(wqs_prepost_flags_t, uint32_t, {
949 WQS_PREPOST_PEEK = 0x1,
950 WQS_PREPOST_LOCK = 0x2,
951});
952
953/**
954 * @function waitq_set_first_prepost()
955 *
956 * @brief
957 * Return the first preposted wait queue from the list of preposts of this set.
958 *
959 * @discussion
960 * The @c wqset lock might be dropped and reacquired during this call.
961 *
962 * @param wqset the port-set wait queue set to unlink, must be locked.
963 * @param flags
964 * - if @c WQS_PREPOST_LOCK is set, the returned wait queue is locked
965 * - if @c WQS_PREPOST_PEEK is set, this function assumes that no event
966 * will be dequeued and the prepost list order is unchanged,
967 * else the returned wait queue is put at the end of the prepost list.
968 */
969struct waitq *waitq_set_first_prepost(
970 struct waitq_set *wqset,
971 wqs_prepost_flags_t flags);
972
973/**
974 * @function waitq_clear_prepost_locked()
975 *
976 * @brief
977 * Clear all preposts originating from the specified wait queue.
978 *
979 * @discussion
980 * @c waitq must be locked.
981 *
982 * This function only lazily marks the waitq as no longer preposting,
983 * and doesn't clear the preposts for two reasons:
984 * - it avoids some lock contention by not acquiring the set locks,
985 * - it allows for ports that keep receiving messages to keep their slot
986 * in the prepost queue of sets, which improves fairness.
987 *
988 * Sets it is a member of will discover this when a thread
989 * tries to receive through it.
990 */
991extern void waitq_clear_prepost_locked(
992 struct waitq *waitq);
993
994/**
995 * @function ipc_pset_prepost()
996 *
997 * @brief
998 * Upcall from the waitq code to prepost to the kevent subsystem.
999 *
1000 * @discussion
1001 * Called with the pset and waitq locks held.
1002 * (in ipc_pset.c).
1003 */
1004extern void ipc_pset_prepost(
1005 struct waitq_set *wqset,
1006 struct waitq *waitq);
1007
1008#endif /* MACH_KERNEL_PRIVATE */
1009#pragma mark select wait queues and select port set waitq sets
1010
1011extern struct waitq select_conflict_queue;
1012
1013/*!
1014 * @function select_set_alloc()
1015 *
1016 * @brief
1017 * Allocates a select wait queue set.
1018 *
1019 * @discussion
1020 * select sets assume that they are only manipulated
1021 * from the context of the thread they belong to.
1022 */
1023extern struct select_set *select_set_alloc(void);
1024
1025/*!
1026 * @function select_set_free()
1027 *
1028 * @brief
1029 * Frees a select set allocated with @c select_set_alloc().
1030 */
1031extern void select_set_free(
1032 struct select_set *selset);
1033
1034/*!
1035 * @function select_set_link()
1036 *
1037 * @brief
1038 * Links a select wait queue into a select wait queue set.
1039 *
1040 * @param waitq a wait queue of type @c WQT_SELECT.
1041 * @param selset a select set
1042 * @param linkp a pointer to a linkage allocated
1043 * with @c waitq_link_alloc(WQT_SELECT_SET),
1044 * which gets niled out if the linkage is used.
1045 */
1046extern void select_set_link(
1047 struct waitq *waitq,
1048 struct select_set *selset,
1049 waitq_link_t *linkp);
1050
1051/*!
1052 * @function select_set_reset()
1053 *
1054 * @brief
1055 * Resets a select set to prepare it for reuse.
1056 *
1057 * @discussion
1058 * This operation is lazy and will not unlink select wait queues
1059 * from the select set.
1060 */
1061extern void select_set_reset(
1062 struct select_set *selset);
1063
1064/*!
1065 * @function select_waitq_wakeup_and_deinit()
1066 *
1067 * @brief
1068 * Combined wakeup, unlink, and deinit under a single lock hold for select().
1069 *
1070 * @discussion
1071 * @c waitq must be a @c WQT_SELECT queue.
1072 */
1073extern void select_waitq_wakeup_and_deinit(
1074 struct waitq *waitq,
1075 event64_t wake_event,
1076 wait_result_t result);
1077
1078#endif /* XNU_KERNEL_PRIVATE */
1079
1080#pragma GCC visibility pop
1081
1082__ASSUME_PTR_ABI_SINGLE_END __END_DECLS
1083
1084#endif /* KERNEL_PRIVATE */
1085#endif /* _WAITQ_H_ */
1086