1/*
2 * Copyright (c) 2000 Apple Computer, Inc. All rights reserved.
3 *
4 * @APPLE_OSREFERENCE_LICENSE_HEADER_START@
5 *
6 * This file contains Original Code and/or Modifications of Original Code
7 * as defined in and that are subject to the Apple Public Source License
8 * Version 2.0 (the 'License'). You may not use this file except in
9 * compliance with the License. The rights granted to you under the License
10 * may not be used to create, or enable the creation or redistribution of,
11 * unlawful or unlicensed copies of an Apple operating system, or to
12 * circumvent, violate, or enable the circumvention or violation of, any
13 * terms of an Apple operating system software license agreement.
14 *
15 * Please obtain a copy of the License at
16 * http://www.opensource.apple.com/apsl/ and read it before using this file.
17 *
18 * The Original Code and all software distributed under the License are
19 * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER
20 * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES,
21 * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY,
22 * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT.
23 * Please see the License for the specific language governing rights and
24 * limitations under the License.
25 *
26 * @APPLE_OSREFERENCE_LICENSE_HEADER_END@
27 */
28/*-
29 * Copyright (c) 1991, 1993
30 * The Regents of the University of California. All rights reserved.
31 *
32 * Redistribution and use in source and binary forms, with or without
33 * modification, are permitted provided that the following conditions
34 * are met:
35 * 1. Redistributions of source code must retain the above copyright
36 * notice, this list of conditions and the following disclaimer.
37 * 2. Redistributions in binary form must reproduce the above copyright
38 * notice, this list of conditions and the following disclaimer in the
39 * documentation and/or other materials provided with the distribution.
40 * 4. Neither the name of the University nor the names of its contributors
41 * may be used to endorse or promote products derived from this software
42 * without specific prior written permission.
43 *
44 * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
45 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
46 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
47 * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
48 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
49 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
50 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
51 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
52 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
53 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
54 * SUCH DAMAGE.
55 *
56 * @(#)queue.h 8.5 (Berkeley) 8/20/94
57 */
58
59#ifndef _SYS_QUEUE_H_
60#define _SYS_QUEUE_H_
61
62#ifdef KERNEL_PRIVATE
63#include <kern/debug.h> /* panic function call */
64#include <sys/cdefs.h> /* __improbable in kernelspace */
65#else
66#ifndef __improbable
67#define __improbable(x) (x) /* noop in userspace */
68#endif /* __improbable */
69#endif /* KERNEL_PRIVATE */
70
71/*
72 * This file defines five types of data structures: singly-linked lists,
73 * singly-linked tail queues, lists, tail queues, and circular queues.
74 *
75 * A singly-linked list is headed by a single forward pointer. The elements
76 * are singly linked for minimum space and pointer manipulation overhead at
77 * the expense of O(n) removal for arbitrary elements. New elements can be
78 * added to the list after an existing element or at the head of the list.
79 * Elements being removed from the head of the list should use the explicit
80 * macro for this purpose for optimum efficiency. A singly-linked list may
81 * only be traversed in the forward direction. Singly-linked lists are ideal
82 * for applications with large datasets and few or no removals or for
83 * implementing a LIFO queue.
84 *
85 * A singly-linked tail queue is headed by a pair of pointers, one to the
86 * head of the list and the other to the tail of the list. The elements are
87 * singly linked for minimum space and pointer manipulation overhead at the
88 * expense of O(n) removal for arbitrary elements. New elements can be added
89 * to the list after an existing element, at the head of the list, or at the
90 * end of the list. Elements being removed from the head of the tail queue
91 * should use the explicit macro for this purpose for optimum efficiency.
92 * A singly-linked tail queue may only be traversed in the forward direction.
93 * Singly-linked tail queues are ideal for applications with large datasets
94 * and few or no removals or for implementing a FIFO queue.
95 *
96 * A list is headed by a single forward pointer (or an array of forward
97 * pointers for a hash table header). The elements are doubly linked
98 * so that an arbitrary element can be removed without a need to
99 * traverse the list. New elements can be added to the list before
100 * or after an existing element or at the head of the list. A list
101 * may only be traversed in the forward direction.
102 *
103 * A tail queue is headed by a pair of pointers, one to the head of the
104 * list and the other to the tail of the list. The elements are doubly
105 * linked so that an arbitrary element can be removed without a need to
106 * traverse the list. New elements can be added to the list before or
107 * after an existing element, at the head of the list, or at the end of
108 * the list. A tail queue may be traversed in either direction.
109 *
110 * A circle queue is headed by a pair of pointers, one to the head of the
111 * list and the other to the tail of the list. The elements are doubly
112 * linked so that an arbitrary element can be removed without a need to
113 * traverse the list. New elements can be added to the list before or after
114 * an existing element, at the head of the list, or at the end of the list.
115 * A circle queue may be traversed in either direction, but has a more
116 * complex end of list detection.
117 * Note that circle queues are deprecated, because, as the removal log
118 * in FreeBSD states, "CIRCLEQs are a disgrace to everything Knuth taught
119 * us in Volume 1 Chapter 2. [...] Use TAILQ instead, it provides the same
120 * functionality." Code using them will continue to compile, but they
121 * are no longer documented on the man page.
122 *
123 * For details on the use of these macros, see the queue(3) manual page.
124 *
125 *
126 * SLIST LIST STAILQ TAILQ CIRCLEQ
127 * _HEAD + + + + +
128 * _HEAD_INITIALIZER + + + + -
129 * _ENTRY + + + + +
130 * _INIT + + + + +
131 * _EMPTY + + + + +
132 * _FIRST + + + + +
133 * _NEXT + + + + +
134 * _PREV - - - + +
135 * _LAST - - + + +
136 * _FOREACH + + + + +
137 * _FOREACH_SAFE + + + + -
138 * _FOREACH_REVERSE - - - + -
139 * _FOREACH_REVERSE_SAFE - - - + -
140 * _INSERT_HEAD + + + + +
141 * _INSERT_BEFORE - + - + +
142 * _INSERT_AFTER + + + + +
143 * _INSERT_TAIL - - + + +
144 * _CONCAT - - + + -
145 * _REMOVE_AFTER + - + - -
146 * _REMOVE_HEAD + - + - -
147 * _REMOVE_HEAD_UNTIL - - + - -
148 * _REMOVE + + + + +
149 * _SWAP - + + + -
150 *
151 */
152#ifdef QUEUE_MACRO_DEBUG
153/* Store the last 2 places the queue element or head was altered */
154struct qm_trace {
155 char * lastfile;
156 int lastline;
157 char * prevfile;
158 int prevline;
159};
160
161#define TRACEBUF struct qm_trace trace;
162#define TRASHIT(x) do {(x) = (void *)-1;} while (0)
163
164#define QMD_TRACE_HEAD(head) do { \
165 (head)->trace.prevline = (head)->trace.lastline; \
166 (head)->trace.prevfile = (head)->trace.lastfile; \
167 (head)->trace.lastline = __LINE__; \
168 (head)->trace.lastfile = __FILE__; \
169} while (0)
170
171#define QMD_TRACE_ELEM(elem) do { \
172 (elem)->trace.prevline = (elem)->trace.lastline; \
173 (elem)->trace.prevfile = (elem)->trace.lastfile; \
174 (elem)->trace.lastline = __LINE__; \
175 (elem)->trace.lastfile = __FILE__; \
176} while (0)
177
178#else
179#define QMD_TRACE_ELEM(elem)
180#define QMD_TRACE_HEAD(head)
181#define TRACEBUF
182#define TRASHIT(x)
183#endif /* QUEUE_MACRO_DEBUG */
184
185/*
186 * Horrible macros to enable use of code that was meant to be C-specific
187 * (and which push struct onto type) in C++; without these, C++ code
188 * that uses these macros in the context of a class will blow up
189 * due to "struct" being preprended to "type" by the macros, causing
190 * inconsistent use of tags.
191 *
192 * This approach is necessary because these are macros; we have to use
193 * these on a per-macro basis (because the queues are implemented as
194 * macros, disabling this warning in the scope of the header file is
195 * insufficient), whuch means we can't use #pragma, and have to use
196 * _Pragma. We only need to use these for the queue macros that
197 * prepend "struct" to "type" and will cause C++ to blow up.
198 */
199#if defined(__clang__) && defined(__cplusplus)
200#define __MISMATCH_TAGS_PUSH \
201 _Pragma("clang diagnostic push") \
202 _Pragma("clang diagnostic ignored \"-Wmismatched-tags\"")
203#define __MISMATCH_TAGS_POP \
204 _Pragma("clang diagnostic pop")
205#else
206#define __MISMATCH_TAGS_PUSH
207#define __MISMATCH_TAGS_POP
208#endif
209
210/*!
211 * Ensures that these macros can safely be used in structs when compiling with
212 * clang. The macros do not allow for nullability attributes to be specified due
213 * to how they are expanded. For example:
214 *
215 * SLIST_HEAD(, foo _Nullable) bar;
216 *
217 * expands to
218 *
219 * struct {
220 * struct foo _Nullable *slh_first;
221 * }
222 *
223 * which is not valid because the nullability specifier has to apply to the
224 * pointer. So just ignore nullability completeness in all the places where this
225 * is an issue.
226 */
227#if defined(__clang__)
228#define __NULLABILITY_COMPLETENESS_PUSH \
229 _Pragma("clang diagnostic push") \
230 _Pragma("clang diagnostic ignored \"-Wnullability-completeness\"")
231#define __NULLABILITY_COMPLETENESS_POP \
232 _Pragma("clang diagnostic pop")
233#else
234#define __NULLABILITY_COMPLETENESS_PUSH
235#define __NULLABILITY_COMPLETENESS_POP
236#endif
237
238/*
239 * Singly-linked List declarations.
240 */
241#define SLIST_HEAD(name, type) \
242__MISMATCH_TAGS_PUSH \
243__NULLABILITY_COMPLETENESS_PUSH \
244struct name { \
245 struct type *slh_first; /* first element */ \
246} \
247__NULLABILITY_COMPLETENESS_POP \
248__MISMATCH_TAGS_POP
249
250#define SLIST_HEAD_INITIALIZER(head) \
251 { NULL }
252
253#define SLIST_ENTRY(type) \
254__MISMATCH_TAGS_PUSH \
255__NULLABILITY_COMPLETENESS_PUSH \
256struct { \
257 struct type *sle_next; /* next element */ \
258} \
259__NULLABILITY_COMPLETENESS_POP \
260__MISMATCH_TAGS_POP
261
262/*
263 * Singly-linked List functions.
264 */
265#define SLIST_EMPTY(head) ((head)->slh_first == NULL)
266
267#define SLIST_FIRST(head) ((head)->slh_first)
268
269#define SLIST_FOREACH(var, head, field) \
270 for ((var) = SLIST_FIRST((head)); \
271 (var); \
272 (var) = SLIST_NEXT((var), field))
273
274#define SLIST_FOREACH_SAFE(var, head, field, tvar) \
275 for ((var) = SLIST_FIRST((head)); \
276 (var) && ((tvar) = SLIST_NEXT((var), field), 1); \
277 (var) = (tvar))
278
279#define SLIST_FOREACH_PREVPTR(var, varp, head, field) \
280 for ((varp) = &SLIST_FIRST((head)); \
281 ((var) = *(varp)) != NULL; \
282 (varp) = &SLIST_NEXT((var), field))
283
284#define SLIST_INIT(head) do { \
285 SLIST_FIRST((head)) = NULL; \
286} while (0)
287
288#define SLIST_INSERT_AFTER(slistelm, elm, field) do { \
289 SLIST_NEXT((elm), field) = SLIST_NEXT((slistelm), field); \
290 SLIST_NEXT((slistelm), field) = (elm); \
291} while (0)
292
293#define SLIST_INSERT_HEAD(head, elm, field) do { \
294 SLIST_NEXT((elm), field) = SLIST_FIRST((head)); \
295 SLIST_FIRST((head)) = (elm); \
296} while (0)
297
298#define SLIST_NEXT(elm, field) ((elm)->field.sle_next)
299
300#define SLIST_REMOVE(head, elm, type, field) \
301__MISMATCH_TAGS_PUSH \
302__NULLABILITY_COMPLETENESS_PUSH \
303do { \
304 if (SLIST_FIRST((head)) == (elm)) { \
305 SLIST_REMOVE_HEAD((head), field); \
306 } \
307 else { \
308 struct type *curelm = SLIST_FIRST((head)); \
309 while (SLIST_NEXT(curelm, field) != (elm)) \
310 curelm = SLIST_NEXT(curelm, field); \
311 SLIST_REMOVE_AFTER(curelm, field); \
312 } \
313 TRASHIT((elm)->field.sle_next); \
314} while (0) \
315__NULLABILITY_COMPLETENESS_POP \
316__MISMATCH_TAGS_POP
317
318#define SLIST_REMOVE_AFTER(elm, field) do { \
319 SLIST_NEXT(elm, field) = \
320 SLIST_NEXT(SLIST_NEXT(elm, field), field); \
321} while (0)
322
323#define SLIST_REMOVE_HEAD(head, field) do { \
324 SLIST_FIRST((head)) = SLIST_NEXT(SLIST_FIRST((head)), field); \
325} while (0)
326
327/*
328 * Singly-linked Tail queue declarations.
329 */
330#define STAILQ_HEAD(name, type) \
331__MISMATCH_TAGS_PUSH \
332__NULLABILITY_COMPLETENESS_PUSH \
333struct name { \
334 struct type *stqh_first;/* first element */ \
335 struct type **stqh_last;/* addr of last next element */ \
336} \
337__NULLABILITY_COMPLETENESS_POP \
338__MISMATCH_TAGS_POP
339
340#define STAILQ_HEAD_INITIALIZER(head) \
341 { NULL, &(head).stqh_first }
342
343#define STAILQ_ENTRY(type) \
344__MISMATCH_TAGS_PUSH \
345__NULLABILITY_COMPLETENESS_PUSH \
346struct { \
347 struct type *stqe_next; /* next element */ \
348} \
349__NULLABILITY_COMPLETENESS_POP \
350__MISMATCH_TAGS_POP
351
352/*
353 * Singly-linked Tail queue functions.
354 */
355#define STAILQ_CONCAT(head1, head2) do { \
356 if (!STAILQ_EMPTY((head2))) { \
357 *(head1)->stqh_last = (head2)->stqh_first; \
358 (head1)->stqh_last = (head2)->stqh_last; \
359 STAILQ_INIT((head2)); \
360 } \
361} while (0)
362
363#define STAILQ_EMPTY(head) ((head)->stqh_first == NULL)
364
365#define STAILQ_FIRST(head) ((head)->stqh_first)
366
367#define STAILQ_FOREACH(var, head, field) \
368 for((var) = STAILQ_FIRST((head)); \
369 (var); \
370 (var) = STAILQ_NEXT((var), field))
371
372
373#define STAILQ_FOREACH_SAFE(var, head, field, tvar) \
374 for ((var) = STAILQ_FIRST((head)); \
375 (var) && ((tvar) = STAILQ_NEXT((var), field), 1); \
376 (var) = (tvar))
377
378#define STAILQ_INIT(head) do { \
379 STAILQ_FIRST((head)) = NULL; \
380 (head)->stqh_last = &STAILQ_FIRST((head)); \
381} while (0)
382
383#define STAILQ_INSERT_AFTER(head, tqelm, elm, field) do { \
384 if ((STAILQ_NEXT((elm), field) = STAILQ_NEXT((tqelm), field)) == NULL)\
385 (head)->stqh_last = &STAILQ_NEXT((elm), field); \
386 STAILQ_NEXT((tqelm), field) = (elm); \
387} while (0)
388
389#define STAILQ_INSERT_HEAD(head, elm, field) do { \
390 if ((STAILQ_NEXT((elm), field) = STAILQ_FIRST((head))) == NULL) \
391 (head)->stqh_last = &STAILQ_NEXT((elm), field); \
392 STAILQ_FIRST((head)) = (elm); \
393} while (0)
394
395#define STAILQ_INSERT_TAIL(head, elm, field) do { \
396 STAILQ_NEXT((elm), field) = NULL; \
397 *(head)->stqh_last = (elm); \
398 (head)->stqh_last = &STAILQ_NEXT((elm), field); \
399} while (0)
400
401#define STAILQ_LAST(head, type, field) \
402__MISMATCH_TAGS_PUSH \
403__NULLABILITY_COMPLETENESS_PUSH \
404 (STAILQ_EMPTY((head)) ? \
405 NULL : \
406 ((struct type *)(void *) \
407 ((char *)((head)->stqh_last) - __offsetof(struct type, field))))\
408__NULLABILITY_COMPLETENESS_POP \
409__MISMATCH_TAGS_POP
410
411#define STAILQ_NEXT(elm, field) ((elm)->field.stqe_next)
412
413#define STAILQ_REMOVE(head, elm, type, field) \
414__MISMATCH_TAGS_PUSH \
415__NULLABILITY_COMPLETENESS_PUSH \
416do { \
417 if (STAILQ_FIRST((head)) == (elm)) { \
418 STAILQ_REMOVE_HEAD((head), field); \
419 } \
420 else { \
421 struct type *curelm = STAILQ_FIRST((head)); \
422 while (STAILQ_NEXT(curelm, field) != (elm)) \
423 curelm = STAILQ_NEXT(curelm, field); \
424 STAILQ_REMOVE_AFTER(head, curelm, field); \
425 } \
426 TRASHIT((elm)->field.stqe_next); \
427} while (0) \
428__NULLABILITY_COMPLETENESS_POP \
429__MISMATCH_TAGS_POP
430
431#define STAILQ_REMOVE_HEAD(head, field) do { \
432 if ((STAILQ_FIRST((head)) = \
433 STAILQ_NEXT(STAILQ_FIRST((head)), field)) == NULL) \
434 (head)->stqh_last = &STAILQ_FIRST((head)); \
435} while (0)
436
437#define STAILQ_REMOVE_HEAD_UNTIL(head, elm, field) do { \
438 if ((STAILQ_FIRST((head)) = STAILQ_NEXT((elm), field)) == NULL) \
439 (head)->stqh_last = &STAILQ_FIRST((head)); \
440} while (0)
441
442#define STAILQ_REMOVE_AFTER(head, elm, field) do { \
443 if ((STAILQ_NEXT(elm, field) = \
444 STAILQ_NEXT(STAILQ_NEXT(elm, field), field)) == NULL) \
445 (head)->stqh_last = &STAILQ_NEXT((elm), field); \
446} while (0)
447
448#define STAILQ_SWAP(head1, head2, type) \
449__MISMATCH_TAGS_PUSH \
450__NULLABILITY_COMPLETENESS_PUSH \
451do { \
452 struct type *swap_first = STAILQ_FIRST(head1); \
453 struct type **swap_last = (head1)->stqh_last; \
454 STAILQ_FIRST(head1) = STAILQ_FIRST(head2); \
455 (head1)->stqh_last = (head2)->stqh_last; \
456 STAILQ_FIRST(head2) = swap_first; \
457 (head2)->stqh_last = swap_last; \
458 if (STAILQ_EMPTY(head1)) \
459 (head1)->stqh_last = &STAILQ_FIRST(head1); \
460 if (STAILQ_EMPTY(head2)) \
461 (head2)->stqh_last = &STAILQ_FIRST(head2); \
462} while (0) \
463__NULLABILITY_COMPLETENESS_POP \
464__MISMATCH_TAGS_POP
465
466
467/*
468 * List declarations.
469 */
470#define LIST_HEAD(name, type) \
471__MISMATCH_TAGS_PUSH \
472__NULLABILITY_COMPLETENESS_PUSH \
473struct name { \
474 struct type *lh_first; /* first element */ \
475} \
476__NULLABILITY_COMPLETENESS_POP \
477__MISMATCH_TAGS_POP
478
479#define LIST_HEAD_INITIALIZER(head) \
480 { NULL }
481
482#define LIST_ENTRY(type) \
483__MISMATCH_TAGS_PUSH \
484__NULLABILITY_COMPLETENESS_PUSH \
485struct { \
486 struct type *le_next; /* next element */ \
487 struct type **le_prev; /* address of previous next element */ \
488} \
489__NULLABILITY_COMPLETENESS_POP \
490__MISMATCH_TAGS_POP
491
492/*
493 * List functions.
494 */
495
496#ifdef KERNEL_PRIVATE
497#define LIST_CHECK_HEAD(head, field) do { \
498 if (__improbable( \
499 LIST_FIRST((head)) != NULL && \
500 LIST_FIRST((head))->field.le_prev != \
501 &LIST_FIRST((head)))) \
502 panic("Bad list head %p first->prev != head @%u", \
503 (head), __LINE__); \
504} while (0)
505
506#define LIST_CHECK_NEXT(elm, field) do { \
507 if (__improbable( \
508 LIST_NEXT((elm), field) != NULL && \
509 LIST_NEXT((elm), field)->field.le_prev != \
510 &((elm)->field.le_next))) \
511 panic("Bad link elm %p next->prev != elm @%u", \
512 (elm), __LINE__); \
513} while (0)
514
515#define LIST_CHECK_PREV(elm, field) do { \
516 if (__improbable(*(elm)->field.le_prev != (elm))) \
517 panic("Bad link elm %p prev->next != elm @%u", \
518 (elm), __LINE__); \
519} while (0)
520#else
521#define LIST_CHECK_HEAD(head, field)
522#define LIST_CHECK_NEXT(elm, field)
523#define LIST_CHECK_PREV(elm, field)
524#endif /* KERNEL_PRIVATE */
525
526#define LIST_EMPTY(head) ((head)->lh_first == NULL)
527
528#define LIST_FIRST(head) ((head)->lh_first)
529
530#define LIST_FOREACH(var, head, field) \
531 for ((var) = LIST_FIRST((head)); \
532 (var); \
533 (var) = LIST_NEXT((var), field))
534
535#define LIST_FOREACH_SAFE(var, head, field, tvar) \
536 for ((var) = LIST_FIRST((head)); \
537 (var) && ((tvar) = LIST_NEXT((var), field), 1); \
538 (var) = (tvar))
539
540#define LIST_INIT(head) do { \
541 LIST_FIRST((head)) = NULL; \
542} while (0)
543
544#define LIST_INSERT_AFTER(listelm, elm, field) do { \
545 LIST_CHECK_NEXT(listelm, field); \
546 if ((LIST_NEXT((elm), field) = LIST_NEXT((listelm), field)) != NULL)\
547 LIST_NEXT((listelm), field)->field.le_prev = \
548 &LIST_NEXT((elm), field); \
549 LIST_NEXT((listelm), field) = (elm); \
550 (elm)->field.le_prev = &LIST_NEXT((listelm), field); \
551} while (0)
552
553#define LIST_INSERT_BEFORE(listelm, elm, field) do { \
554 LIST_CHECK_PREV(listelm, field); \
555 (elm)->field.le_prev = (listelm)->field.le_prev; \
556 LIST_NEXT((elm), field) = (listelm); \
557 *(listelm)->field.le_prev = (elm); \
558 (listelm)->field.le_prev = &LIST_NEXT((elm), field); \
559} while (0)
560
561#define LIST_INSERT_HEAD(head, elm, field) do { \
562 LIST_CHECK_HEAD((head), field); \
563 if ((LIST_NEXT((elm), field) = LIST_FIRST((head))) != NULL) \
564 LIST_FIRST((head))->field.le_prev = &LIST_NEXT((elm), field);\
565 LIST_FIRST((head)) = (elm); \
566 (elm)->field.le_prev = &LIST_FIRST((head)); \
567} while (0)
568
569#define LIST_NEXT(elm, field) ((elm)->field.le_next)
570
571#define LIST_REMOVE(elm, field) do { \
572 LIST_CHECK_NEXT(elm, field); \
573 LIST_CHECK_PREV(elm, field); \
574 if (LIST_NEXT((elm), field) != NULL) \
575 LIST_NEXT((elm), field)->field.le_prev = \
576 (elm)->field.le_prev; \
577 *(elm)->field.le_prev = LIST_NEXT((elm), field); \
578 TRASHIT((elm)->field.le_next); \
579 TRASHIT((elm)->field.le_prev); \
580} while (0)
581
582#define LIST_SWAP(head1, head2, type, field) \
583__MISMATCH_TAGS_PUSH \
584__NULLABILITY_COMPLETENESS_PUSH \
585do { \
586 struct type *swap_tmp = LIST_FIRST((head1)); \
587 LIST_FIRST((head1)) = LIST_FIRST((head2)); \
588 LIST_FIRST((head2)) = swap_tmp; \
589 if ((swap_tmp = LIST_FIRST((head1))) != NULL) \
590 swap_tmp->field.le_prev = &LIST_FIRST((head1)); \
591 if ((swap_tmp = LIST_FIRST((head2))) != NULL) \
592 swap_tmp->field.le_prev = &LIST_FIRST((head2)); \
593} while (0) \
594__NULLABILITY_COMPLETENESS_POP \
595__MISMATCH_TAGS_POP
596
597/*
598 * Tail queue declarations.
599 */
600#define TAILQ_HEAD(name, type) \
601__MISMATCH_TAGS_PUSH \
602__NULLABILITY_COMPLETENESS_PUSH \
603struct name { \
604 struct type *tqh_first; /* first element */ \
605 struct type **tqh_last; /* addr of last next element */ \
606 TRACEBUF \
607} \
608__NULLABILITY_COMPLETENESS_POP \
609__MISMATCH_TAGS_POP
610
611#define TAILQ_HEAD_INITIALIZER(head) \
612 { NULL, &(head).tqh_first }
613
614#define TAILQ_ENTRY(type) \
615__MISMATCH_TAGS_PUSH \
616__NULLABILITY_COMPLETENESS_PUSH \
617struct { \
618 struct type *tqe_next; /* next element */ \
619 struct type **tqe_prev; /* address of previous next element */ \
620 TRACEBUF \
621} \
622__NULLABILITY_COMPLETENESS_POP \
623__MISMATCH_TAGS_POP
624
625/*
626 * Tail queue functions.
627 */
628#ifdef KERNEL_PRIVATE
629#define TAILQ_CHECK_HEAD(head, field) do { \
630 if (__improbable( \
631 TAILQ_FIRST((head)) != NULL && \
632 TAILQ_FIRST((head))->field.tqe_prev != \
633 &TAILQ_FIRST((head)))) \
634 panic("Bad tailq head %p first->prev != head @%u", \
635 (head), __LINE__); \
636} while (0)
637
638#define TAILQ_CHECK_NEXT(elm, field) do { \
639 if (__improbable( \
640 TAILQ_NEXT((elm), field) != NULL && \
641 TAILQ_NEXT((elm), field)->field.tqe_prev != \
642 &((elm)->field.tqe_next))) \
643 panic("Bad tailq elm %p next->prev != elm @%u", \
644 (elm), __LINE__); \
645} while(0)
646
647#define TAILQ_CHECK_PREV(elm, field) do { \
648 if (__improbable(*(elm)->field.tqe_prev != (elm))) \
649 panic("Bad tailq elm %p prev->next != elm @%u", \
650 (elm), __LINE__); \
651} while(0)
652#else
653#define TAILQ_CHECK_HEAD(head, field)
654#define TAILQ_CHECK_NEXT(elm, field)
655#define TAILQ_CHECK_PREV(elm, field)
656#endif /* KERNEL_PRIVATE */
657
658#define TAILQ_CONCAT(head1, head2, field) do { \
659 if (!TAILQ_EMPTY(head2)) { \
660 *(head1)->tqh_last = (head2)->tqh_first; \
661 (head2)->tqh_first->field.tqe_prev = (head1)->tqh_last; \
662 (head1)->tqh_last = (head2)->tqh_last; \
663 TAILQ_INIT((head2)); \
664 QMD_TRACE_HEAD(head1); \
665 QMD_TRACE_HEAD(head2); \
666 } \
667} while (0)
668
669#define TAILQ_EMPTY(head) ((head)->tqh_first == NULL)
670
671#define TAILQ_FIRST(head) ((head)->tqh_first)
672
673#define TAILQ_FOREACH(var, head, field) \
674 for ((var) = TAILQ_FIRST((head)); \
675 (var); \
676 (var) = TAILQ_NEXT((var), field))
677
678#define TAILQ_FOREACH_SAFE(var, head, field, tvar) \
679 for ((var) = TAILQ_FIRST((head)); \
680 (var) && ((tvar) = TAILQ_NEXT((var), field), 1); \
681 (var) = (tvar))
682
683#define TAILQ_FOREACH_REVERSE(var, head, headname, field) \
684 for ((var) = TAILQ_LAST((head), headname); \
685 (var); \
686 (var) = TAILQ_PREV((var), headname, field))
687
688#define TAILQ_FOREACH_REVERSE_SAFE(var, head, headname, field, tvar) \
689 for ((var) = TAILQ_LAST((head), headname); \
690 (var) && ((tvar) = TAILQ_PREV((var), headname, field), 1); \
691 (var) = (tvar))
692
693#if XNU_KERNEL_PRIVATE
694/*
695 * Can be used when the initialized HEAD was just bzeroed
696 * Works around deficiencies in clang analysis of initialization patterns.
697 * See: <rdar://problem/47939050>
698 */
699#define TAILQ_INIT_AFTER_BZERO(head) do { \
700 (head)->tqh_last = &TAILQ_FIRST((head)); \
701} while (0)
702#endif /* XNU_KERNEL_PRIVATE */
703
704#define TAILQ_INIT(head) do { \
705 TAILQ_FIRST((head)) = NULL; \
706 (head)->tqh_last = &TAILQ_FIRST((head)); \
707 QMD_TRACE_HEAD(head); \
708} while (0)
709
710
711#define TAILQ_INSERT_AFTER(head, listelm, elm, field) do { \
712 TAILQ_CHECK_NEXT(listelm, field); \
713 if ((TAILQ_NEXT((elm), field) = TAILQ_NEXT((listelm), field)) != NULL)\
714 TAILQ_NEXT((elm), field)->field.tqe_prev = \
715 &TAILQ_NEXT((elm), field); \
716 else { \
717 (head)->tqh_last = &TAILQ_NEXT((elm), field); \
718 QMD_TRACE_HEAD(head); \
719 } \
720 TAILQ_NEXT((listelm), field) = (elm); \
721 (elm)->field.tqe_prev = &TAILQ_NEXT((listelm), field); \
722 QMD_TRACE_ELEM(&(elm)->field); \
723 QMD_TRACE_ELEM(&listelm->field); \
724} while (0)
725
726#define TAILQ_INSERT_BEFORE(listelm, elm, field) do { \
727 TAILQ_CHECK_PREV(listelm, field); \
728 (elm)->field.tqe_prev = (listelm)->field.tqe_prev; \
729 TAILQ_NEXT((elm), field) = (listelm); \
730 *(listelm)->field.tqe_prev = (elm); \
731 (listelm)->field.tqe_prev = &TAILQ_NEXT((elm), field); \
732 QMD_TRACE_ELEM(&(elm)->field); \
733 QMD_TRACE_ELEM(&listelm->field); \
734} while (0)
735
736#define TAILQ_INSERT_HEAD(head, elm, field) do { \
737 TAILQ_CHECK_HEAD(head, field); \
738 if ((TAILQ_NEXT((elm), field) = TAILQ_FIRST((head))) != NULL) \
739 TAILQ_FIRST((head))->field.tqe_prev = \
740 &TAILQ_NEXT((elm), field); \
741 else \
742 (head)->tqh_last = &TAILQ_NEXT((elm), field); \
743 TAILQ_FIRST((head)) = (elm); \
744 (elm)->field.tqe_prev = &TAILQ_FIRST((head)); \
745 QMD_TRACE_HEAD(head); \
746 QMD_TRACE_ELEM(&(elm)->field); \
747} while (0)
748
749#define TAILQ_INSERT_TAIL(head, elm, field) do { \
750 TAILQ_NEXT((elm), field) = NULL; \
751 (elm)->field.tqe_prev = (head)->tqh_last; \
752 *(head)->tqh_last = (elm); \
753 (head)->tqh_last = &TAILQ_NEXT((elm), field); \
754 QMD_TRACE_HEAD(head); \
755 QMD_TRACE_ELEM(&(elm)->field); \
756} while (0)
757
758#define TAILQ_LAST(head, headname) \
759__MISMATCH_TAGS_PUSH \
760__NULLABILITY_COMPLETENESS_PUSH \
761 (*(((struct headname *)((head)->tqh_last))->tqh_last)) \
762__NULLABILITY_COMPLETENESS_POP \
763__MISMATCH_TAGS_POP
764
765#define TAILQ_NEXT(elm, field) ((elm)->field.tqe_next)
766
767#define TAILQ_PREV(elm, headname, field) \
768__MISMATCH_TAGS_PUSH \
769__NULLABILITY_COMPLETENESS_PUSH \
770 (*(((struct headname *)((elm)->field.tqe_prev))->tqh_last)) \
771__NULLABILITY_COMPLETENESS_POP \
772__MISMATCH_TAGS_POP
773
774#define TAILQ_REMOVE(head, elm, field) do { \
775 TAILQ_CHECK_NEXT(elm, field); \
776 TAILQ_CHECK_PREV(elm, field); \
777 if ((TAILQ_NEXT((elm), field)) != NULL) \
778 TAILQ_NEXT((elm), field)->field.tqe_prev = \
779 (elm)->field.tqe_prev; \
780 else { \
781 (head)->tqh_last = (elm)->field.tqe_prev; \
782 QMD_TRACE_HEAD(head); \
783 } \
784 *(elm)->field.tqe_prev = TAILQ_NEXT((elm), field); \
785 TRASHIT((elm)->field.tqe_next); \
786 TRASHIT((elm)->field.tqe_prev); \
787 QMD_TRACE_ELEM(&(elm)->field); \
788} while (0)
789
790/*
791 * Why did they switch to spaces for this one macro?
792 */
793#define TAILQ_SWAP(head1, head2, type, field) \
794__MISMATCH_TAGS_PUSH \
795__NULLABILITY_COMPLETENESS_PUSH \
796do { \
797 struct type *swap_first = (head1)->tqh_first; \
798 struct type **swap_last = (head1)->tqh_last; \
799 (head1)->tqh_first = (head2)->tqh_first; \
800 (head1)->tqh_last = (head2)->tqh_last; \
801 (head2)->tqh_first = swap_first; \
802 (head2)->tqh_last = swap_last; \
803 if ((swap_first = (head1)->tqh_first) != NULL) \
804 swap_first->field.tqe_prev = &(head1)->tqh_first; \
805 else \
806 (head1)->tqh_last = &(head1)->tqh_first; \
807 if ((swap_first = (head2)->tqh_first) != NULL) \
808 swap_first->field.tqe_prev = &(head2)->tqh_first; \
809 else \
810 (head2)->tqh_last = &(head2)->tqh_first; \
811} while (0) \
812__NULLABILITY_COMPLETENESS_POP \
813__MISMATCH_TAGS_POP
814
815/*
816 * Circular queue definitions.
817 */
818#define CIRCLEQ_HEAD(name, type) \
819__MISMATCH_TAGS_PUSH \
820__NULLABILITY_COMPLETENESS_PUSH \
821struct name { \
822 struct type *cqh_first; /* first element */ \
823 struct type *cqh_last; /* last element */ \
824} \
825__NULLABILITY_COMPLETENESS_POP \
826__MISMATCH_TAGS_POP
827
828#define CIRCLEQ_ENTRY(type) \
829__MISMATCH_TAGS_PUSH \
830__NULLABILITY_COMPLETENESS_PUSH \
831struct { \
832 struct type *cqe_next; /* next element */ \
833 struct type *cqe_prev; /* previous element */ \
834} \
835__NULLABILITY_COMPLETENESS_POP \
836__MISMATCH_TAGS_POP
837
838/*
839 * Circular queue functions.
840 */
841#ifdef KERNEL_PRIVATE
842#define CIRCLEQ_CHECK_HEAD(head, field) do { \
843 if (__improbable( \
844 CIRCLEQ_FIRST((head)) != ((void*)(head)) && \
845 CIRCLEQ_FIRST((head))->field.cqe_prev != ((void*)(head))))\
846 panic("Bad circleq head %p first->prev != head @%u", \
847 (head), __LINE__); \
848} while(0)
849#define CIRCLEQ_CHECK_NEXT(head, elm, field) do { \
850 if (__improbable( \
851 CIRCLEQ_NEXT((elm), field) != ((void*)(head)) && \
852 CIRCLEQ_NEXT((elm), field)->field.cqe_prev != (elm))) \
853 panic("Bad circleq elm %p next->prev != elm @%u", \
854 (elm), __LINE__); \
855} while(0)
856#define CIRCLEQ_CHECK_PREV(head, elm, field) do { \
857 if (__improbable( \
858 CIRCLEQ_PREV((elm), field) != ((void*)(head)) && \
859 CIRCLEQ_PREV((elm), field)->field.cqe_next != (elm))) \
860 panic("Bad circleq elm %p prev->next != elm @%u", \
861 (elm), __LINE__); \
862} while(0)
863#else
864#define CIRCLEQ_CHECK_HEAD(head, field)
865#define CIRCLEQ_CHECK_NEXT(head, elm, field)
866#define CIRCLEQ_CHECK_PREV(head, elm, field)
867#endif /* KERNEL_PRIVATE */
868
869#define CIRCLEQ_EMPTY(head) ((head)->cqh_first == (void *)(head))
870
871#define CIRCLEQ_FIRST(head) ((head)->cqh_first)
872
873#define CIRCLEQ_FOREACH(var, head, field) \
874 for((var) = (head)->cqh_first; \
875 (var) != (void *)(head); \
876 (var) = (var)->field.cqe_next)
877
878#define CIRCLEQ_INIT(head) do { \
879 (head)->cqh_first = (void *)(head); \
880 (head)->cqh_last = (void *)(head); \
881} while (0)
882
883#define CIRCLEQ_INSERT_AFTER(head, listelm, elm, field) do { \
884 CIRCLEQ_CHECK_NEXT(head, listelm, field); \
885 (elm)->field.cqe_next = (listelm)->field.cqe_next; \
886 (elm)->field.cqe_prev = (listelm); \
887 if ((listelm)->field.cqe_next == (void *)(head)) \
888 (head)->cqh_last = (elm); \
889 else \
890 (listelm)->field.cqe_next->field.cqe_prev = (elm); \
891 (listelm)->field.cqe_next = (elm); \
892} while (0)
893
894#define CIRCLEQ_INSERT_BEFORE(head, listelm, elm, field) do { \
895 CIRCLEQ_CHECK_PREV(head, listelm, field); \
896 (elm)->field.cqe_next = (listelm); \
897 (elm)->field.cqe_prev = (listelm)->field.cqe_prev; \
898 if ((listelm)->field.cqe_prev == (void *)(head)) \
899 (head)->cqh_first = (elm); \
900 else \
901 (listelm)->field.cqe_prev->field.cqe_next = (elm); \
902 (listelm)->field.cqe_prev = (elm); \
903} while (0)
904
905#define CIRCLEQ_INSERT_HEAD(head, elm, field) do { \
906 CIRCLEQ_CHECK_HEAD(head, field); \
907 (elm)->field.cqe_next = (head)->cqh_first; \
908 (elm)->field.cqe_prev = (void *)(head); \
909 if ((head)->cqh_last == (void *)(head)) \
910 (head)->cqh_last = (elm); \
911 else \
912 (head)->cqh_first->field.cqe_prev = (elm); \
913 (head)->cqh_first = (elm); \
914} while (0)
915
916#define CIRCLEQ_INSERT_TAIL(head, elm, field) do { \
917 (elm)->field.cqe_next = (void *)(head); \
918 (elm)->field.cqe_prev = (head)->cqh_last; \
919 if ((head)->cqh_first == (void *)(head)) \
920 (head)->cqh_first = (elm); \
921 else \
922 (head)->cqh_last->field.cqe_next = (elm); \
923 (head)->cqh_last = (elm); \
924} while (0)
925
926#define CIRCLEQ_LAST(head) ((head)->cqh_last)
927
928#define CIRCLEQ_NEXT(elm, field) ((elm)->field.cqe_next)
929
930#define CIRCLEQ_PREV(elm, field) ((elm)->field.cqe_prev)
931
932#define CIRCLEQ_REMOVE(head, elm, field) do { \
933 CIRCLEQ_CHECK_NEXT(head, elm, field); \
934 CIRCLEQ_CHECK_PREV(head, elm, field); \
935 if ((elm)->field.cqe_next == (void *)(head)) \
936 (head)->cqh_last = (elm)->field.cqe_prev; \
937 else \
938 (elm)->field.cqe_next->field.cqe_prev = \
939 (elm)->field.cqe_prev; \
940 if ((elm)->field.cqe_prev == (void *)(head)) \
941 (head)->cqh_first = (elm)->field.cqe_next; \
942 else \
943 (elm)->field.cqe_prev->field.cqe_next = \
944 (elm)->field.cqe_next; \
945} while (0)
946
947#ifdef _KERNEL
948
949#if NOTFB31
950
951/*
952 * XXX insque() and remque() are an old way of handling certain queues.
953 * They bogusly assumes that all queue heads look alike.
954 */
955
956struct quehead {
957 struct quehead *qh_link;
958 struct quehead *qh_rlink;
959};
960
961#ifdef __GNUC__
962#ifdef KERNEL_PRIVATE
963static __inline void
964chkquenext(void *a)
965{
966 struct quehead *element = (struct quehead *)a;
967 if (__improbable(element->qh_link != NULL &&
968 element->qh_link->qh_rlink != element)) {
969 panic("Bad que elm %p next->prev != elm", a);
970 }
971}
972
973static __inline void
974chkqueprev(void *a)
975{
976 struct quehead *element = (struct quehead *)a;
977 if (__improbable(element->qh_rlink != NULL &&
978 element->qh_rlink->qh_link != element)) {
979 panic("Bad que elm %p prev->next != elm", a);
980 }
981}
982#else /* !KERNEL_PRIVATE */
983#define chkquenext(a)
984#define chkqueprev(a)
985#endif /* KERNEL_PRIVATE */
986
987static __inline void
988insque(void *a, void *b)
989{
990 struct quehead *element = (struct quehead *)a,
991 *head = (struct quehead *)b;
992 chkquenext(head);
993
994 element->qh_link = head->qh_link;
995 element->qh_rlink = head;
996 head->qh_link = element;
997 element->qh_link->qh_rlink = element;
998}
999
1000static __inline void
1001remque(void *a)
1002{
1003 struct quehead *element = (struct quehead *)a;
1004 chkquenext(element);
1005 chkqueprev(element);
1006
1007 element->qh_link->qh_rlink = element->qh_rlink;
1008 element->qh_rlink->qh_link = element->qh_link;
1009 element->qh_rlink = 0;
1010}
1011
1012#else /* !__GNUC__ */
1013
1014void insque(void *a, void *b);
1015void remque(void *a);
1016
1017#endif /* __GNUC__ */
1018
1019#endif /* NOTFB31 */
1020#endif /* _KERNEL */
1021
1022#endif /* !_SYS_QUEUE_H_ */
1023