1 | /* |
2 | * Copyright (c) 2003-2021 Apple Inc. All rights reserved. |
3 | * |
4 | * @APPLE_OSREFERENCE_LICENSE_HEADER_START@ |
5 | * |
6 | * This file contains Original Code and/or Modifications of Original Code |
7 | * as defined in and that are subject to the Apple Public Source License |
8 | * Version 2.0 (the 'License'). You may not use this file except in |
9 | * compliance with the License. The rights granted to you under the License |
10 | * may not be used to create, or enable the creation or redistribution of, |
11 | * unlawful or unlicensed copies of an Apple operating system, or to |
12 | * circumvent, violate, or enable the circumvention or violation of, any |
13 | * terms of an Apple operating system software license agreement. |
14 | * |
15 | * Please obtain a copy of the License at |
16 | * http://www.opensource.apple.com/apsl/ and read it before using this file. |
17 | * |
18 | * The Original Code and all software distributed under the License are |
19 | * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER |
20 | * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES, |
21 | * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY, |
22 | * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT. |
23 | * Please see the License for the specific language governing rights and |
24 | * limitations under the License. |
25 | * |
26 | * @APPLE_OSREFERENCE_LICENSE_HEADER_END@ |
27 | */ |
28 | /*- |
29 | * Copyright (c) 1999,2000,2001 Jonathan Lemon <jlemon@FreeBSD.org> |
30 | * All rights reserved. |
31 | * |
32 | * Redistribution and use in source and binary forms, with or without |
33 | * modification, are permitted provided that the following conditions |
34 | * are met: |
35 | * 1. Redistributions of source code must retain the above copyright |
36 | * notice, this list of conditions and the following disclaimer. |
37 | * 2. Redistributions in binary form must reproduce the above copyright |
38 | * notice, this list of conditions and the following disclaimer in the |
39 | * documentation and/or other materials provided with the distribution. |
40 | * |
41 | * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND |
42 | * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE |
43 | * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE |
44 | * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE |
45 | * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL |
46 | * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS |
47 | * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) |
48 | * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT |
49 | * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY |
50 | * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF |
51 | * SUCH DAMAGE. |
52 | * |
53 | * $FreeBSD: src/sys/sys/event.h,v 1.5.2.5 2001/12/14 19:21:22 jlemon Exp $ |
54 | */ |
55 | |
56 | #ifndef _SYS_EVENT_PRIVATE_H_ |
57 | #define _SYS_EVENT_PRIVATE_H_ |
58 | |
59 | #include <machine/types.h> |
60 | #include <stdint.h> |
61 | #include <sys/cdefs.h> |
62 | #include <sys/event.h> |
63 | #include <sys/queue.h> |
64 | #ifndef KERNEL_PRIVATE |
65 | #include <sys/types.h> |
66 | #endif |
67 | #ifdef XNU_KERNEL_PRIVATE |
68 | #include <kern/debug.h> /* panic */ |
69 | #include <kern/kern_types.h> |
70 | #include <mach/vm_param.h> |
71 | #include <pthread/priority_private.h> |
72 | #include <sys/fcntl.h> /* FREAD, FWRITE */ |
73 | #endif /* XNU_KERNEL_PRIVATE */ |
74 | |
75 | /* |
76 | * Filter types |
77 | */ |
78 | /* Additional filter types in event.h */ |
79 | #define EVFILT_UNUSED_11 (-11) /* (-11) unused */ |
80 | #define EVFILT_SOCK (-13) /* Socket events */ |
81 | #define EVFILT_MEMORYSTATUS (-14) /* Memorystatus events */ |
82 | #define EVFILT_NW_CHANNEL (-16) /* Skywalk channel events */ |
83 | #define EVFILT_WORKLOOP (-17) /* Workloop events */ |
84 | #define EVFILT_EXCLAVES_NOTIFICATION (-18) /* Exclave notification */ |
85 | /* If additional filter types are added, make sure to update |
86 | * EVFILT_SYSCOUNT in event.h!!! |
87 | */ |
88 | |
89 | #ifdef KERNEL_PRIVATE |
90 | |
91 | #pragma pack(4) |
92 | |
93 | struct user64_kevent { |
94 | uint64_t ident; /* identifier for this event */ |
95 | int16_t filter; /* filter for event */ |
96 | uint16_t flags; /* general flags */ |
97 | uint32_t fflags; /* filter-specific flags */ |
98 | int64_t data; /* filter-specific data */ |
99 | user_addr_t udata; /* opaque user data identifier */ |
100 | }; |
101 | |
102 | struct user32_kevent { |
103 | uint32_t ident; /* identifier for this event */ |
104 | int16_t filter; /* filter for event */ |
105 | uint16_t flags; /* general flags */ |
106 | uint32_t fflags; /* filter-specific flags */ |
107 | int32_t data; /* filter-specific data */ |
108 | user32_addr_t udata; /* opaque user data identifier */ |
109 | }; |
110 | |
111 | #pragma pack() |
112 | |
113 | #endif /* KERNEL_PRIVATE */ |
114 | |
115 | struct kevent_qos_s { |
116 | uint64_t ident; /* identifier for this event */ |
117 | int16_t filter; /* filter for event */ |
118 | uint16_t flags; /* general flags */ |
119 | int32_t qos; /* quality of service */ |
120 | uint64_t udata; /* opaque user data identifier */ |
121 | uint32_t fflags; /* filter-specific flags */ |
122 | uint32_t xflags; /* extra filter-specific flags */ |
123 | int64_t data; /* filter-specific data */ |
124 | uint64_t ext[4]; /* filter-specific extensions */ |
125 | }; |
126 | |
127 | /* |
128 | * Type definition for names/ids of dynamically allocated kqueues. |
129 | */ |
130 | typedef uint64_t kqueue_id_t; |
131 | |
132 | /* |
133 | * Rather than provide an EV_SET_QOS macro for kevent_qos_t structure |
134 | * initialization, we encourage use of named field initialization support |
135 | * instead. |
136 | */ |
137 | |
138 | // was KEVENT_FLAG_STACK_EVENTS 0x000004 |
139 | #define KEVENT_FLAG_STACK_DATA 0x000008 /* output data allocated as stack (grows down) */ |
140 | // KEVENT_FLAG_POLL 0x000010 |
141 | #define KEVENT_FLAG_WORKQ 0x000020 /* interact with the default workq kq */ |
142 | // KEVENT_FLAG_LEGACY32 0x000040 |
143 | // KEVENT_FLAG_LEGACY64 0x000080 |
144 | // KEVENT_FLAG_PROC64 0x000100 |
145 | #define KEVENT_FLAG_WORKQ_MANAGER 0x000200 /* obsolete */ |
146 | #define KEVENT_FLAG_WORKLOOP 0x000400 /* interact with the specified workloop kq */ |
147 | #define KEVENT_FLAG_PARKING 0x000800 /* workq thread is parking */ |
148 | // KEVENT_FLAG_KERNEL 0x001000 |
149 | // KEVENT_FLAG_DYNAMIC_KQUEUE 0x002000 |
150 | // KEVENT_FLAG_NEEDS_END_PROCESSING 0x004000 |
151 | #define KEVENT_FLAG_WORKLOOP_SERVICER_ATTACH 0x008000 /* obsolete */ |
152 | #define KEVENT_FLAG_WORKLOOP_SERVICER_DETACH 0x010000 /* obsolete */ |
153 | #define KEVENT_FLAG_DYNAMIC_KQ_MUST_EXIST 0x020000 /* kq lookup by id must exist */ |
154 | #define KEVENT_FLAG_DYNAMIC_KQ_MUST_NOT_EXIST 0x040000 /* kq lookup by id must not exist */ |
155 | #define KEVENT_FLAG_WORKLOOP_NO_WQ_THREAD 0x080000 /* obsolete */ |
156 | |
157 | #ifdef XNU_KERNEL_PRIVATE |
158 | |
159 | #define KEVENT_FLAG_POLL 0x0010 /* Call is for poll() */ |
160 | #define KEVENT_FLAG_LEGACY32 0x0040 /* event data in legacy 32-bit format */ |
161 | #define KEVENT_FLAG_LEGACY64 0x0080 /* event data in legacy 64-bit format */ |
162 | #define KEVENT_FLAG_PROC64 0x0100 /* proc is 64bits */ |
163 | #define KEVENT_FLAG_KERNEL 0x1000 /* caller is in-kernel */ |
164 | #define KEVENT_FLAG_DYNAMIC_KQUEUE 0x2000 /* kqueue is dynamically allocated */ |
165 | #define KEVENT_FLAG_NEEDS_END_PROCESSING 0x4000 /* end processing required before returning */ |
166 | |
167 | #define KEVENT_ID_FLAG_USER (KEVENT_FLAG_WORKLOOP | \ |
168 | KEVENT_FLAG_DYNAMIC_KQ_MUST_EXIST | KEVENT_FLAG_DYNAMIC_KQ_MUST_NOT_EXIST) |
169 | |
170 | #define KEVENT_FLAG_USER (KEVENT_FLAG_IMMEDIATE | KEVENT_FLAG_ERROR_EVENTS | \ |
171 | KEVENT_FLAG_STACK_DATA | KEVENT_FLAG_WORKQ | KEVENT_FLAG_WORKLOOP | \ |
172 | KEVENT_FLAG_DYNAMIC_KQ_MUST_EXIST | KEVENT_FLAG_DYNAMIC_KQ_MUST_NOT_EXIST) |
173 | |
174 | /* |
175 | * Since some filter ops are not part of the standard sysfilt_ops, we use |
176 | * kn_filtid starting from EVFILT_SYSCOUNT to identify these cases. This is to |
177 | * let kn_fops() get the correct fops for all cases. |
178 | */ |
179 | #define EVFILTID_KQREAD (EVFILT_SYSCOUNT) |
180 | #define EVFILTID_PIPE_N (EVFILT_SYSCOUNT + 1) |
181 | #define EVFILTID_PIPE_R (EVFILT_SYSCOUNT + 2) |
182 | #define EVFILTID_PIPE_W (EVFILT_SYSCOUNT + 3) |
183 | #define EVFILTID_PTSD (EVFILT_SYSCOUNT + 4) |
184 | #define EVFILTID_SOREAD (EVFILT_SYSCOUNT + 5) |
185 | #define EVFILTID_SOWRITE (EVFILT_SYSCOUNT + 6) |
186 | #define EVFILTID_SCK (EVFILT_SYSCOUNT + 7) |
187 | #define EVFILTID_SOEXCEPT (EVFILT_SYSCOUNT + 8) |
188 | #define EVFILTID_SPEC (EVFILT_SYSCOUNT + 9) |
189 | #define EVFILTID_BPFREAD (EVFILT_SYSCOUNT + 10) |
190 | #define EVFILTID_NECP_FD (EVFILT_SYSCOUNT + 11) |
191 | #define EVFILTID_SKYWALK_CHANNEL_W (EVFILT_SYSCOUNT + 12) |
192 | #define EVFILTID_SKYWALK_CHANNEL_R (EVFILT_SYSCOUNT + 13) |
193 | #define EVFILTID_SKYWALK_CHANNEL_E (EVFILT_SYSCOUNT + 14) |
194 | #define EVFILTID_FSEVENT (EVFILT_SYSCOUNT + 15) |
195 | #define EVFILTID_VN (EVFILT_SYSCOUNT + 16) |
196 | #define EVFILTID_TTY (EVFILT_SYSCOUNT + 17) |
197 | #define EVFILTID_PTMX (EVFILT_SYSCOUNT + 18) |
198 | #define EVFILTID_MACH_PORT (EVFILT_SYSCOUNT + 19) |
199 | #define EVFILTID_MACH_PORT_SET (EVFILT_SYSCOUNT + 20) |
200 | |
201 | #define EVFILTID_DETACHED (EVFILT_SYSCOUNT + 21) |
202 | #define EVFILTID_MAX (EVFILT_SYSCOUNT + 22) |
203 | |
204 | #endif /* defined(XNU_KERNEL_PRIVATE) */ |
205 | |
206 | #define EV_SET_QOS 0 |
207 | |
208 | /* |
209 | * data/hint fflags for EVFILT_WORKLOOP, shared with userspace |
210 | * |
211 | * The ident for thread requests should be the dynamic ID of the workloop |
212 | * The ident for each sync waiter must be unique to that waiter [for this workloop] |
213 | * |
214 | * |
215 | * Commands: |
216 | * |
217 | * @const NOTE_WL_THREAD_REQUEST [in/out] |
218 | * The kevent represents asynchronous userspace work and its associated QoS. |
219 | * There can only be a single knote with this flag set per workloop. |
220 | * |
221 | * @const NOTE_WL_SYNC_WAIT [in/out] |
222 | * This bit is set when the caller is waiting to become the owner of a workloop. |
223 | * If the NOTE_WL_SYNC_WAKE bit is already set then the caller is not blocked, |
224 | * else it blocks until it is set. |
225 | * |
226 | * The QoS field of the knote is used to push on other owners or servicers. |
227 | * |
228 | * @const NOTE_WL_SYNC_WAKE [in/out] |
229 | * Marks the waiter knote as being eligible to become an owner |
230 | * This bit can only be set once, trying it again will fail with EALREADY. |
231 | * |
232 | * @const NOTE_WL_SYNC_IPC [in/out] |
233 | * The knote is a sync IPC redirected turnstile push. |
234 | * |
235 | * Flags/Modifiers: |
236 | * |
237 | * @const NOTE_WL_UPDATE_QOS [in] (only NOTE_WL_THREAD_REQUEST) |
238 | * For successful updates (EV_ADD only), learn the new userspace async QoS from |
239 | * the kevent qos field. |
240 | * |
241 | * @const NOTE_WL_END_OWNERSHIP [in] |
242 | * If the update is successful (including deletions) or returns ESTALE, and |
243 | * the caller thread or the "suspended" thread is currently owning the workloop, |
244 | * then ownership is forgotten. |
245 | * |
246 | * @const NOTE_WL_DISCOVER_OWNER [in] |
247 | * If the update is successful (including deletions), learn the owner identity |
248 | * from the loaded value during debounce. This requires an address to have been |
249 | * filled in the EV_EXTIDX_WL_ADDR ext field, but doesn't require a mask to have |
250 | * been set in the EV_EXTIDX_WL_MASK. |
251 | * |
252 | * @const NOTE_WL_IGNORE_ESTALE [in] |
253 | * If the operation would fail with ESTALE, mask the error and pretend the |
254 | * update was successful. However the operation itself didn't happen, meaning |
255 | * that: |
256 | * - attaching a new knote will not happen |
257 | * - dropping an existing knote will not happen |
258 | * - NOTE_WL_UPDATE_QOS or NOTE_WL_DISCOVER_OWNER will have no effect |
259 | * |
260 | * This modifier doesn't affect NOTE_WL_END_OWNERSHIP. |
261 | */ |
262 | #define NOTE_WL_THREAD_REQUEST 0x00000001 |
263 | #define NOTE_WL_SYNC_WAIT 0x00000004 |
264 | #define NOTE_WL_SYNC_WAKE 0x00000008 |
265 | #define NOTE_WL_SYNC_IPC 0x80000000 |
266 | #define NOTE_WL_COMMANDS_MASK 0x8000000f /* Mask of all the [in] commands above */ |
267 | |
268 | #define NOTE_WL_UPDATE_QOS 0x00000010 |
269 | #define NOTE_WL_END_OWNERSHIP 0x00000020 |
270 | #define NOTE_WL_DISCOVER_OWNER 0x00000080 |
271 | #define NOTE_WL_IGNORE_ESTALE 0x00000100 |
272 | #define NOTE_WL_UPDATES_MASK 0x000001f0 /* Mask of all the [in] updates above */ |
273 | |
274 | #define NOTE_WL_UPDATE_OWNER 0 /* ... compatibility define ... */ |
275 | |
276 | /* |
277 | * EVFILT_WORKLOOP ext[] array indexes/meanings. |
278 | */ |
279 | #define EV_EXTIDX_WL_LANE 0 /* lane identifier [in: sync waiter] |
280 | * [out: thread request] */ |
281 | #define EV_EXTIDX_WL_ADDR 1 /* debounce address [in: NULL==no debounce] */ |
282 | #define EV_EXTIDX_WL_MASK 2 /* debounce mask [in] */ |
283 | #define EV_EXTIDX_WL_VALUE 3 /* debounce value [in: not current->ESTALE] |
284 | * [out: new/debounce value] */ |
285 | |
286 | /* |
287 | * If NOTE_EXIT_MEMORY is present, these bits indicate specific jetsam condition. |
288 | */ |
289 | #define NOTE_EXIT_MEMORY_DETAIL_MASK 0xfe000000 |
290 | #define NOTE_EXIT_MEMORY_VMPAGESHORTAGE 0x80000000 /* jetsam condition: lowest jetsam priority proc killed due to vm page shortage */ |
291 | #define NOTE_EXIT_MEMORY_VMTHRASHING 0x40000000 /* jetsam condition: lowest jetsam priority proc killed due to vm thrashing */ |
292 | #define NOTE_EXIT_MEMORY_HIWAT 0x20000000 /* jetsam condition: process reached its high water mark */ |
293 | #define NOTE_EXIT_MEMORY_PID 0x10000000 /* jetsam condition: special pid kill requested */ |
294 | #define NOTE_EXIT_MEMORY_IDLE 0x08000000 /* jetsam condition: idle process cleaned up */ |
295 | #define NOTE_EXIT_MEMORY_VNODE 0X04000000 /* jetsam condition: virtual node kill */ |
296 | #define NOTE_EXIT_MEMORY_FCTHRASHING 0x02000000 /* jetsam condition: lowest jetsam priority proc killed due to filecache thrashing */ |
297 | |
298 | /* |
299 | * data/hint fflags for EVFILT_MEMORYSTATUS, shared with userspace. |
300 | */ |
301 | #define NOTE_MEMORYSTATUS_PRESSURE_NORMAL 0x00000001 /* system memory pressure has returned to normal */ |
302 | #define NOTE_MEMORYSTATUS_PRESSURE_WARN 0x00000002 /* system memory pressure has changed to the warning state */ |
303 | #define NOTE_MEMORYSTATUS_PRESSURE_CRITICAL 0x00000004 /* system memory pressure has changed to the critical state */ |
304 | #define NOTE_MEMORYSTATUS_LOW_SWAP 0x00000008 /* system is in a low-swap state */ |
305 | #define NOTE_MEMORYSTATUS_PROC_LIMIT_WARN 0x00000010 /* process memory limit has hit a warning state */ |
306 | #define NOTE_MEMORYSTATUS_PROC_LIMIT_CRITICAL 0x00000020 /* process memory limit has hit a critical state - soft limit */ |
307 | #define NOTE_MEMORYSTATUS_MSL_STATUS 0xf0000000 /* bits used to request change to process MSL status */ |
308 | |
309 | #ifdef KERNEL_PRIVATE |
310 | /* |
311 | * data/hint fflags for EVFILT_MEMORYSTATUS, but not shared with userspace. |
312 | */ |
313 | #define NOTE_MEMORYSTATUS_PROC_LIMIT_WARN_ACTIVE 0x00000040 /* Used to restrict sending a warn event only once, per active limit, soft limits only */ |
314 | #define NOTE_MEMORYSTATUS_PROC_LIMIT_WARN_INACTIVE 0x00000080 /* Used to restrict sending a warn event only once, per inactive limit, soft limit only */ |
315 | #define NOTE_MEMORYSTATUS_PROC_LIMIT_CRITICAL_ACTIVE 0x00000100 /* Used to restrict sending a critical event only once per active limit, soft limit only */ |
316 | #define NOTE_MEMORYSTATUS_PROC_LIMIT_CRITICAL_INACTIVE 0x00000200 /* Used to restrict sending a critical event only once per inactive limit, soft limit only */ |
317 | #define NOTE_MEMORYSTATUS_JETSAM_FG_BAND 0x00000400 /* jetsam is approaching foreground band */ |
318 | |
319 | /* |
320 | * Use this mask to protect the kernel private flags. |
321 | */ |
322 | #define EVFILT_MEMORYSTATUS_ALL_MASK \ |
323 | (NOTE_MEMORYSTATUS_PRESSURE_NORMAL | NOTE_MEMORYSTATUS_PRESSURE_WARN | NOTE_MEMORYSTATUS_PRESSURE_CRITICAL | NOTE_MEMORYSTATUS_LOW_SWAP | \ |
324 | NOTE_MEMORYSTATUS_PROC_LIMIT_WARN | NOTE_MEMORYSTATUS_PROC_LIMIT_CRITICAL | NOTE_MEMORYSTATUS_MSL_STATUS) |
325 | |
326 | #endif /* KERNEL_PRIVATE */ |
327 | |
328 | typedef enum vm_pressure_level { |
329 | kVMPressureNormal = 0, |
330 | kVMPressureWarning = 1, |
331 | kVMPressureUrgent = 2, |
332 | kVMPressureCritical = 3, |
333 | kVMPressureJetsam = 4, /* jetsam approaching FG bands */ |
334 | } vm_pressure_level_t; |
335 | |
336 | /* |
337 | * data/hint fflags for EVFILT_SOCK, shared with userspace. |
338 | * |
339 | */ |
340 | #define NOTE_CONNRESET 0x00000001 /* Received RST */ |
341 | #define NOTE_READCLOSED 0x00000002 /* Read side is shutdown */ |
342 | #define NOTE_WRITECLOSED 0x00000004 /* Write side is shutdown */ |
343 | #define NOTE_TIMEOUT 0x00000008 /* timeout: rexmt, keep-alive or persist */ |
344 | #define NOTE_NOSRCADDR 0x00000010 /* source address not available */ |
345 | #define NOTE_IFDENIED 0x00000020 /* interface denied connection */ |
346 | #define NOTE_SUSPEND 0x00000040 /* output queue suspended */ |
347 | #define NOTE_RESUME 0x00000080 /* output queue resumed */ |
348 | #define NOTE_KEEPALIVE 0x00000100 /* TCP Keepalive received */ |
349 | #define NOTE_ADAPTIVE_WTIMO 0x00000200 /* TCP adaptive write timeout */ |
350 | #define NOTE_ADAPTIVE_RTIMO 0x00000400 /* TCP adaptive read timeout */ |
351 | #define NOTE_CONNECTED 0x00000800 /* socket is connected */ |
352 | #define NOTE_DISCONNECTED 0x00001000 /* socket is disconnected */ |
353 | #define NOTE_CONNINFO_UPDATED 0x00002000 /* connection info was updated */ |
354 | #define NOTE_NOTIFY_ACK 0x00004000 /* notify acknowledgement */ |
355 | #define NOTE_WAKE_PKT 0x00008000 /* received wake packet */ |
356 | |
357 | #define EVFILT_SOCK_LEVEL_TRIGGER_MASK \ |
358 | (NOTE_READCLOSED | NOTE_WRITECLOSED | NOTE_SUSPEND | NOTE_RESUME | \ |
359 | NOTE_CONNECTED | NOTE_DISCONNECTED) |
360 | |
361 | #define EVFILT_SOCK_ALL_MASK \ |
362 | (NOTE_CONNRESET | NOTE_READCLOSED | NOTE_WRITECLOSED | NOTE_TIMEOUT | \ |
363 | NOTE_NOSRCADDR | NOTE_IFDENIED | NOTE_SUSPEND | NOTE_RESUME | \ |
364 | NOTE_KEEPALIVE | NOTE_ADAPTIVE_WTIMO | NOTE_ADAPTIVE_RTIMO | \ |
365 | NOTE_CONNECTED | NOTE_DISCONNECTED | NOTE_CONNINFO_UPDATED | \ |
366 | NOTE_NOTIFY_ACK | NOTE_WAKE_PKT) |
367 | |
368 | /* |
369 | * data/hint fflags for EVFILT_NW_CHANNEL, shared with userspace. |
370 | */ |
371 | #define NOTE_FLOW_ADV_UPDATE 0x00000001 /* flow advisory update */ |
372 | #define NOTE_CHANNEL_EVENT 0x00000002 /* generic channel event */ |
373 | #define NOTE_IF_ADV_UPD 0x00000004 /* Interface advisory update */ |
374 | |
375 | #define EVFILT_NW_CHANNEL_ALL_MASK \ |
376 | (NOTE_FLOW_ADV_UPDATE | NOTE_CHANNEL_EVENT | NOTE_IF_ADV_UPD) |
377 | |
378 | #ifdef KERNEL_PRIVATE |
379 | |
380 | #ifdef XNU_KERNEL_PRIVATE |
381 | LIST_HEAD(knote_list, knote); |
382 | TAILQ_HEAD(kqtailq, knote); /* a list of "queued" events */ |
383 | |
384 | /* index into various kq queues */ |
385 | typedef uint8_t kq_index_t; |
386 | |
387 | /* lskq(1) knows about this type */ |
388 | __options_decl(kn_status_t, uint16_t /* 12 bits really */, { |
389 | KN_ACTIVE = 0x001, /* event has been triggered */ |
390 | KN_QUEUED = 0x002, /* event is on queue */ |
391 | KN_DISABLED = 0x004, /* event is disabled */ |
392 | KN_DROPPING = 0x008, /* knote is being dropped */ |
393 | KN_LOCKED = 0x010, /* knote is locked (kq_knlocks) */ |
394 | KN_POSTING = 0x020, /* f_event() in flight */ |
395 | // was KN_STAYACTIVE = 0x040, |
396 | KN_DEFERDELETE = 0x080, /* defer delete until re-enabled */ |
397 | KN_MERGE_QOS = 0x100, /* f_event() / f_* ran concurrently and overrides must merge */ |
398 | KN_REQVANISH = 0x200, /* requested EV_VANISH */ |
399 | KN_VANISHED = 0x400, /* has vanished */ |
400 | KN_SUPPRESSED = 0x800, /* event is suppressed during delivery */ |
401 | }); |
402 | |
403 | #if CONFIG_EXCLAVES |
404 | /* forward declaration of exclaves_resource */ |
405 | struct exclaves_resource; |
406 | #endif /* CONFIG_EXCLAVES */ |
407 | |
408 | #if __LP64__ |
409 | #define KNOTE_KQ_PACKED_BITS 42 |
410 | #define KNOTE_KQ_PACKED_SHIFT 0 |
411 | #define KNOTE_KQ_PACKED_BASE 0 |
412 | #else |
413 | #define KNOTE_KQ_PACKED_BITS 32 |
414 | #define KNOTE_KQ_PACKED_SHIFT 0 |
415 | #define KNOTE_KQ_PACKED_BASE 0 |
416 | #endif |
417 | |
418 | _Static_assert(!VM_PACKING_IS_BASE_RELATIVE(KNOTE_KQ_PACKED), |
419 | "Make sure the knote pointer packing is based on arithmetic shifts" ); |
420 | |
421 | struct kqueue; |
422 | struct knote { |
423 | TAILQ_ENTRY(knote) kn_tqe; /* linkage for tail queue */ |
424 | SLIST_ENTRY(knote) kn_link; /* linkage for fd search list */ |
425 | SLIST_ENTRY(knote) kn_selnext; /* klist element chain */ |
426 | #define KNOTE_AUTODETACHED ((struct knote *) -1) |
427 | #define KNOTE_IS_AUTODETACHED(kn) ((kn)->kn_selnext.sle_next == KNOTE_AUTODETACHED) |
428 | |
429 | kn_status_t kn_status : 12; |
430 | uintptr_t |
431 | kn_qos_index:4, /* in-use qos index */ |
432 | kn_qos_override:3, /* qos override index */ |
433 | kn_is_fd:1, /* knote is an fd */ |
434 | kn_vnode_kqok:1, |
435 | kn_vnode_use_ofst:1; |
436 | #if __LP64__ |
437 | uintptr_t kn_kq_packed : KNOTE_KQ_PACKED_BITS; |
438 | #else |
439 | uintptr_t kn_kq_packed; |
440 | #endif |
441 | |
442 | /* per filter stash of data (pointer, uint32_t or uint64_t) */ |
443 | union { |
444 | uintptr_t kn_hook; /* Manually PAC-ed, see knote_kn_hook_get_raw() */ |
445 | uint32_t kn_hook32; |
446 | }; |
447 | |
448 | /* per filter pointer to the resource being watched */ |
449 | union { |
450 | struct fileproc *XNU_PTRAUTH_SIGNED_PTR("knote.fp" ) kn_fp; |
451 | struct proc *XNU_PTRAUTH_SIGNED_PTR("knote.proc" ) kn_proc; |
452 | struct ipc_port *XNU_PTRAUTH_SIGNED_PTR("knote.ipc_port" ) kn_ipc_port; |
453 | struct ipc_pset *XNU_PTRAUTH_SIGNED_PTR("knote.ipc_pset" ) kn_ipc_pset; |
454 | struct thread_call *XNU_PTRAUTH_SIGNED_PTR("knote.thcall" ) kn_thcall; |
455 | struct thread *XNU_PTRAUTH_SIGNED_PTR("knote.thread" ) kn_thread; |
456 | #if CONFIG_EXCLAVES |
457 | struct exclaves_resource *XNU_PTRAUTH_SIGNED_PTR("knote.exclaves_resource" ) kn_exclaves_resource; |
458 | #endif /* CONFIG_EXCLAVES*/ |
459 | }; |
460 | |
461 | /* |
462 | * Mimic kevent_qos so that knote_fill_kevent code is not horrid, |
463 | * but with subtleties: |
464 | * |
465 | * - kevent_qos_s::filter is 16bits where ours is 8, and we use the top |
466 | * bits to store the real specialized filter. |
467 | * knote_fill_kevent* will always force the top bits to 0xff. |
468 | * |
469 | * - kevent_qos_s::xflags is not kept, kn_sfflags takes its place, |
470 | * knote_fill_kevent* will set xflags to 0. |
471 | * |
472 | * - kevent_qos_s::data is saved as kn_sdata and filters are encouraged |
473 | * to use knote_fill_kevent, knote_fill_kevent_with_sdata will copy |
474 | * kn_sdata as the output value. |
475 | * |
476 | * knote_fill_kevent_with_sdata() programatically asserts |
477 | * these aliasings are respected. |
478 | */ |
479 | struct kevent_internal_s { |
480 | uint64_t kei_ident; /* identifier for this event */ |
481 | #ifdef __LITTLE_ENDIAN__ |
482 | int8_t kei_filter; /* filter for event */ |
483 | uint8_t kei_filtid; /* actual filter for event */ |
484 | #else |
485 | uint8_t kei_filtid; /* actual filter for event */ |
486 | int8_t kei_filter; /* filter for event */ |
487 | #endif |
488 | uint16_t kei_flags; /* general flags */ |
489 | int32_t kei_qos; /* quality of service */ |
490 | uint64_t kei_udata; /* opaque user data identifier */ |
491 | uint32_t kei_fflags; /* filter-specific flags */ |
492 | uint32_t kei_sfflags; /* knote: saved fflags */ |
493 | int64_t kei_sdata; /* knote: filter-specific saved data */ |
494 | uint64_t kei_ext[4]; /* filter-specific extensions */ |
495 | } kn_kevent; |
496 | |
497 | #define kn_id kn_kevent.kei_ident |
498 | #define kn_filtid kn_kevent.kei_filtid |
499 | #define kn_filter kn_kevent.kei_filter |
500 | #define kn_flags kn_kevent.kei_flags |
501 | #define kn_qos kn_kevent.kei_qos |
502 | #define kn_udata kn_kevent.kei_udata |
503 | #define kn_fflags kn_kevent.kei_fflags |
504 | #define kn_sfflags kn_kevent.kei_sfflags |
505 | #define kn_sdata kn_kevent.kei_sdata |
506 | #define kn_ext kn_kevent.kei_ext |
507 | }; |
508 | |
509 | static inline struct kqueue * |
510 | knote_get_kq(struct knote *kn) |
511 | { |
512 | vm_offset_t ptr = VM_UNPACK_POINTER(kn->kn_kq_packed, KNOTE_KQ_PACKED); |
513 | return __unsafe_forge_single(struct kqueue *, ptr); |
514 | } |
515 | |
516 | static inline int |
517 | knote_get_seltype(struct knote *kn) |
518 | { |
519 | switch (kn->kn_filter) { |
520 | case EVFILT_READ: |
521 | return FREAD; |
522 | case EVFILT_WRITE: |
523 | return FWRITE; |
524 | default: |
525 | panic("%s(%p): invalid filter %d\n" , |
526 | __func__, kn, kn->kn_filter); |
527 | return 0; |
528 | } |
529 | } |
530 | |
531 | struct kevent_ctx_s { |
532 | uint64_t kec_data_avail; /* address of remaining data size */ |
533 | union { |
534 | user_addr_t kec_data_out; /* extra data pointer */ |
535 | struct pollfd *kec_poll_fds; /* poll fds */ |
536 | }; |
537 | user_size_t kec_data_size; /* total extra data size */ |
538 | user_size_t kec_data_resid; /* residual extra data size */ |
539 | uint64_t kec_deadline; /* wait deadline unless KEVENT_FLAG_IMMEDIATE */ |
540 | struct fileproc *kec_fp; /* fileproc to pass to fp_drop or NULL */ |
541 | int kec_fd; /* fd to pass to fp_drop or -1 */ |
542 | |
543 | /* the fields below are only set during process / scan */ |
544 | int kec_process_nevents; /* user-level event count */ |
545 | int kec_process_noutputs; /* number of events output */ |
546 | unsigned int kec_process_flags; /* kevent flags, only set for process */ |
547 | user_addr_t kec_process_eventlist; /* user-level event list address */ |
548 | }; |
549 | typedef struct kevent_ctx_s *kevent_ctx_t; |
550 | |
551 | kevent_ctx_t |
552 | kevent_get_context(thread_t thread); |
553 | |
554 | /* |
555 | * Filter operators |
556 | * |
557 | * These routines, provided by each filter, are called to attach, detach, deliver events, |
558 | * change/update filter registration and process/deliver events: |
559 | * |
560 | * - the f_attach, f_touch, f_process and f_detach callbacks are always |
561 | * serialized with respect to each other for the same knote. |
562 | * |
563 | * - the f_event routine is called with a use-count taken on the knote to |
564 | * prolongate its lifetime and protect against drop, but is not otherwise |
565 | * serialized with other routine calls. |
566 | * |
567 | * - the f_detach routine is always called last, and is serialized with all |
568 | * other callbacks, including f_event calls. |
569 | * |
570 | * |
571 | * Here are more details: |
572 | * |
573 | * f_isfd - |
574 | * identifies if the "ident" field in the kevent structure is a file-descriptor. |
575 | * |
576 | * If so, the knote is associated with the file descriptor prior to attach and |
577 | * auto-removed when the file descriptor is closed (this latter behavior may change |
578 | * for EV_DISPATCH2 kevent types to allow delivery of events identifying unintended |
579 | * closes). |
580 | * |
581 | * Otherwise the knote is hashed by the ident and has no auto-close behavior. |
582 | * |
583 | * f_adjusts_qos - |
584 | * identifies if the filter can adjust its QoS during its lifetime. |
585 | * |
586 | * Filters using this facility should request the new overrides they want |
587 | * using the appropriate FILTER_{RESET,ADJUST}_EVENT_QOS extended codes. |
588 | * |
589 | * Currently, EVFILT_MACHPORT is the only filter using this facility. |
590 | * |
591 | * f_extended_codes - |
592 | * identifies if the filter returns extended codes from its routines |
593 | * (see FILTER_ACTIVE, ...) or 0 / 1 values. |
594 | * |
595 | * f_attach - |
596 | * called to attach the knote to the underlying object that will be delivering events |
597 | * through it when EV_ADD is supplied and no existing matching event is found |
598 | * |
599 | * provided a knote that is pre-attached to the fd or hashed (see above) but is |
600 | * specially marked to avoid concurrent access until the attach is complete. The |
601 | * kevent structure embedded in this knote has been filled in with a sanitized |
602 | * version of the user-supplied kevent data. However, the user-supplied filter-specific |
603 | * flags (fflags) and data fields have been moved into the knote's kn_sfflags and kn_sdata |
604 | * fields respectively. These are usually interpretted as a set of "interest" flags and |
605 | * data by each filter - to be matched against delivered events. |
606 | * |
607 | * The attach operator indicated errors by setting the EV_ERROR flog in the flags field |
608 | * embedded in the knote's kevent structure - with the specific error indicated in the |
609 | * corresponding data field. |
610 | * |
611 | * The return value indicates if the knote should already be considered "activated" at |
612 | * the time of attach (one or more of the interest events has already occured). |
613 | * |
614 | * f_detach - |
615 | * called to disassociate the knote from the underlying object delivering events |
616 | * the filter should not attempt to deliver events through this knote after this |
617 | * operation returns control to the kq system. |
618 | * |
619 | * f_event - |
620 | * if the knote() function (or KNOTE() macro) is called against a list of knotes, |
621 | * this operator will be called on each knote in the list. |
622 | * |
623 | * The "hint" parameter is completely filter-specific, but usually indicates an |
624 | * event or set of events that have occured against the source object associated |
625 | * with the list. |
626 | * |
627 | * The return value indicates if the knote should already be considered "activated" at |
628 | * the time of attach (one or more of the interest events has already occured). |
629 | * |
630 | * f_process - |
631 | * called when attempting to deliver triggered events to user-space. |
632 | * |
633 | * If the knote was previously activated, this operator will be called when a |
634 | * thread is trying to deliver events to user-space. The filter gets one last |
635 | * chance to determine if the event/events are still interesting for this knote |
636 | * (are the conditions still right to deliver an event). If so, the filter |
637 | * fills in the output kevent structure with the information to be delivered. |
638 | * |
639 | * The input context/data parameter is used during event delivery. Some |
640 | * filters allow additional data delivery as part of event delivery. This |
641 | * context field indicates if space was made available for these additional |
642 | * items and how that space is to be allocated/carved-out. |
643 | * |
644 | * The filter may set EV_CLEAR or EV_ONESHOT in the output flags field to indicate |
645 | * special post-delivery dispositions for the knote. |
646 | * |
647 | * EV_CLEAR - indicates that all matching events have been delivered. Even |
648 | * though there were events to deliver now, there will not be any |
649 | * more until some additional events are delivered to the knote |
650 | * via the f_event operator, or the interest set is changed via |
651 | * the f_touch operator. The knote can remain deactivated after |
652 | * processing this event delivery. |
653 | * |
654 | * EV_ONESHOT - indicates that this is the last event to be delivered via |
655 | * this knote. It will automatically be deleted upon delivery |
656 | * (or if in dispatch-mode, upon re-enablement after this delivery). |
657 | * |
658 | * The return value indicates if the knote has delivered an output event. |
659 | * Unless one of the special output flags was set in the output kevent, a non- |
660 | * zero return value ALSO indicates that the knote should be re-activated |
661 | * for future event processing (in case it delivers level-based or a multi-edge |
662 | * type events like message queues that already exist). |
663 | * |
664 | * NOTE: In the future, the boolean may change to an enum that allows more |
665 | * explicit indication of just delivering a current event vs delivering |
666 | * an event with more events still pending. |
667 | * |
668 | * f_touch - |
669 | * called to update the knote with new state from the user during |
670 | * EVFILT_ADD/ENABLE/DISABLE on an already-attached knote. |
671 | * |
672 | * f_touch should copy relevant new data from the kevent into the knote. |
673 | * |
674 | * operator must lock against concurrent f_event operations. |
675 | * |
676 | * A return value of 1 indicates that the knote should now be considered |
677 | * 'activated'. |
678 | * |
679 | * f_touch can set EV_ERROR with specific error in the data field to |
680 | * return an error to the client. You should return 1 to indicate that |
681 | * the kevent needs to be activated and processed. |
682 | * |
683 | * f_allow_drop - |
684 | * |
685 | * [OPTIONAL] If this function is non-null, then it indicates that the |
686 | * filter wants to validate EV_DELETE events. This is necessary if |
687 | * a particular filter needs to synchronize knote deletion with its own |
688 | * filter lock. |
689 | * |
690 | * When true is returned, the the EV_DELETE is allowed and can proceed. |
691 | * |
692 | * If false is returned, the EV_DELETE doesn't proceed, and the passed in |
693 | * kevent is used for the copyout to userspace. |
694 | * |
695 | * Currently, EVFILT_WORKLOOP is the only filter using this facility. |
696 | * |
697 | * f_post_register_wait - |
698 | * [OPTIONAL] called when attach or touch return the FILTER_REGISTER_WAIT |
699 | * extended code bit. It is possible to use this facility when the last |
700 | * register command wants to wait. |
701 | * |
702 | * Currently, EVFILT_WORKLOOP is the only filter using this facility. |
703 | * |
704 | * f_sanitized_copyout - |
705 | * [OPTIONAL] If this function is non-null, then it should be used so |
706 | * that the filter can provide a sanitized copy of the current contents |
707 | * of a knote to userspace. This prevents leaking of any sensitive |
708 | * information like kernel pointers which might be stashed in filter |
709 | * specific data. |
710 | * |
711 | * Currently, EVFILT_MACHPORT uses this facility. |
712 | */ |
713 | |
714 | struct _kevent_register; |
715 | struct knote_lock_ctx; |
716 | struct proc; |
717 | struct uthread; |
718 | struct waitq; |
719 | struct thread_group; |
720 | |
721 | struct filterops { |
722 | bool f_isfd; /* true if ident == filedescriptor */ |
723 | bool f_adjusts_qos; /* true if the filter can override the knote */ |
724 | bool f_extended_codes; /* hooks return extended codes */ |
725 | |
726 | int (*f_attach)(struct knote *kn, struct kevent_qos_s *kev); |
727 | void (*f_detach)(struct knote *kn); |
728 | int (*f_event)(struct knote *kn, long hint); |
729 | int (*f_touch)(struct knote *kn, struct kevent_qos_s *kev); |
730 | int (*f_process)(struct knote *kn, struct kevent_qos_s *kev); |
731 | |
732 | /* optional & advanced */ |
733 | bool (*f_allow_drop)(struct knote *kn, struct kevent_qos_s *kev); |
734 | void (*f_post_register_wait)(struct uthread *uth, struct knote *kn, |
735 | struct _kevent_register *ss_kr); |
736 | void (*f_sanitized_copyout)(struct knote *kn, struct kevent_qos_s *kev); |
737 | }; |
738 | |
739 | /* |
740 | * Extended codes returned by filter routines when f_extended_codes is set. |
741 | * |
742 | * FILTER_ACTIVE |
743 | * The filter is active and a call to f_process() may return an event. |
744 | * |
745 | * For f_process() the meaning is slightly different: the knote will be |
746 | * activated again as long as f_process returns FILTER_ACTIVE, unless |
747 | * EV_CLEAR is set, which require a new f_event to reactivate the knote. |
748 | * |
749 | * Valid: f_attach, f_event, f_touch, f_process |
750 | * Implicit: - |
751 | * Ignored: - |
752 | * |
753 | * FILTER_REGISTER_WAIT |
754 | * The filter wants its f_post_register_wait() to be called. |
755 | * |
756 | * Note: It is only valid to ask for this behavior for a workloop kqueue, |
757 | * and is really only meant to be used by EVFILT_WORKLOOP. |
758 | * |
759 | * Valid: f_attach, f_touch |
760 | * Implicit: - |
761 | * Ignored: f_event, f_process |
762 | * |
763 | * FILTER_UPDATE_REQ_QOS |
764 | * The filter wants the passed in QoS to be updated as the new intrinsic qos |
765 | * for this knote. If the kevent `qos` field is 0, no update is performed. |
766 | * |
767 | * This also will reset the event QoS, so FILTER_ADJUST_EVENT_QOS() must |
768 | * also be used if an override should be maintained. |
769 | * |
770 | * Note: when this is used in f_touch, the incoming qos validation |
771 | * is under the responsiblity of the filter. |
772 | * |
773 | * Valid: f_touch |
774 | * Implicit: f_attach |
775 | * Ignored: f_event, f_process |
776 | * |
777 | * FILTER_RESET_EVENT_QOS |
778 | * FILTER_ADJUST_EVENT_QOS(qos) |
779 | * The filter wants the QoS of the next event delivery to be overridden |
780 | * at the specified QoS. This allows for the next event QoS to be elevated |
781 | * from the knote requested qos (See FILTER_UPDATE_REQ_QOS). |
782 | * |
783 | * Event QoS Overrides are reset when a particular knote is no longer |
784 | * active. Hence this is ignored if FILTER_ACTIVE isn't also returned. |
785 | * |
786 | * Races between an f_event() and any other f_* routine asking for |
787 | * a specific QoS override are handled generically and the filters do not |
788 | * have to worry about them. |
789 | * |
790 | * To use this facility, filters MUST set their f_adjusts_qos bit to true. |
791 | * |
792 | * It is expected that filters will return the new QoS they expect to be |
793 | * applied from any f_* callback except for f_process() where no specific |
794 | * information should be provided. Filters should not try to hide no-ops, |
795 | * kevent will already optimize these away. |
796 | * |
797 | * Valid: f_touch, f_attach, f_event, f_process |
798 | * Implicit: - |
799 | * Ignored: - |
800 | * |
801 | * FILTER_THREADREQ_NODEFEER |
802 | * The filter has moved a turnstile priority push away from the current |
803 | * thread, preemption has been disabled, and thread requests need to be |
804 | * commited before preemption is re-enabled. |
805 | * |
806 | * |
807 | * Valid: f_attach, f_touch |
808 | * Implicit: - |
809 | * Invalid: f_event, f_process |
810 | */ |
811 | #define FILTER_ACTIVE 0x00000001 |
812 | #define FILTER_REGISTER_WAIT 0x00000002 |
813 | #define FILTER_UPDATE_REQ_QOS 0x00000004 |
814 | #define FILTER_ADJUST_EVENT_QOS_BIT 0x00000008 |
815 | #define FILTER_ADJUST_EVENT_QOS_MASK 0x00000070 |
816 | #define FILTER_ADJUST_EVENT_QOS_SHIFT 4 |
817 | #define FILTER_ADJUST_EVENT_QOS(qos) \ |
818 | (((qos) << FILTER_ADJUST_EVENT_QOS_SHIFT) | FILTER_ADJUST_EVENT_QOS_BIT) |
819 | #define FILTER_GET_EVENT_QOS(result) \ |
820 | ((result >> FILTER_ADJUST_EVENT_QOS_SHIFT) & THREAD_QOS_LAST) |
821 | #define FILTER_RESET_EVENT_QOS FILTER_ADJUST_EVENT_QOS_BIT |
822 | #define FILTER_THREADREQ_NODEFEER 0x00000080 |
823 | #define FILTER_ADJUST_EVENT_IOTIER_BIT 0x00000100 |
824 | |
825 | #define filter_call(_ops, call) \ |
826 | ((_ops)->f_extended_codes ? (_ops)->call : !!((_ops)->call)) |
827 | |
828 | SLIST_HEAD(klist, knote); |
829 | extern void knote_init(void); |
830 | extern void klist_init(struct klist *list); |
831 | |
832 | #define KNOTE(list, hint) knote(list, hint, false) |
833 | #define KNOTE_ATTACH(list, kn) knote_attach(list, kn) |
834 | #define KNOTE_DETACH(list, kn) knote_detach(list, kn) |
835 | |
836 | extern void knote(struct klist *list, long hint, bool autodetach); |
837 | extern int knote_attach(struct klist *list, struct knote *kn); |
838 | extern int knote_detach(struct klist *list, struct knote *kn); |
839 | extern void knote_vanish(struct klist *list, bool make_active); |
840 | |
841 | extern void knote_set_error(struct knote *kn, int error); |
842 | extern int64_t knote_low_watermark(const struct knote *kn) __pure2; |
843 | extern void knote_fill_kevent_with_sdata(struct knote *kn, struct kevent_qos_s *kev); |
844 | extern void knote_fill_kevent(struct knote *kn, struct kevent_qos_s *kev, int64_t data); |
845 | |
846 | extern void *knote_kn_hook_get_raw(struct knote *kn); |
847 | // Must be called after having specified the filtid + filter in the knote |
848 | extern void knote_kn_hook_set_raw(struct knote *kn, void *kn_hook); |
849 | |
850 | extern void knote_fdclose(struct proc *p, int fd); |
851 | extern const struct filterops *knote_fops(struct knote *kn); |
852 | |
853 | extern struct turnstile *kqueue_turnstile(struct kqueue *); |
854 | extern struct turnstile *kqueue_alloc_turnstile(struct kqueue *); |
855 | extern void kqueue_set_iotier_override(struct kqueue *kqu, uint8_t iotier_override); |
856 | extern uint8_t kqueue_get_iotier_override(struct kqueue *kqu); |
857 | |
858 | int kevent_proc_copy_uptrs(void *proc, uint64_t *buf, uint32_t bufsize); |
859 | #if CONFIG_PREADOPT_TG |
860 | extern void kqueue_set_preadopted_thread_group(struct kqueue *kq, struct thread_group *tg, thread_qos_t qos); |
861 | extern bool kqueue_process_preadopt_thread_group(thread_t t, struct kqueue *kq, struct thread_group *tg); |
862 | #endif |
863 | |
864 | int kevent_copyout_proc_dynkqids(void *proc, user_addr_t ubuf, |
865 | uint32_t ubufsize, int32_t *nkqueues_out); |
866 | int kevent_copyout_dynkqinfo(void *proc, kqueue_id_t kq_id, user_addr_t ubuf, |
867 | uint32_t ubufsize, int32_t *size_out); |
868 | int kevent_copyout_dynkqextinfo(void *proc, kqueue_id_t kq_id, user_addr_t ubuf, |
869 | uint32_t ubufsize, int32_t *nknotes_out); |
870 | |
871 | extern int filt_wlattach_sync_ipc(struct knote *kn); |
872 | extern void filt_wldetach_sync_ipc(struct knote *kn); |
873 | |
874 | extern int kevent_workq_internal(struct proc *p, |
875 | user_addr_t changelist, int nchanges, |
876 | user_addr_t eventlist, int nevents, |
877 | user_addr_t data_out, user_size_t *data_available, |
878 | unsigned int flags, int32_t *retval); |
879 | |
880 | #elif defined(KERNEL_PRIVATE) /* !XNU_KERNEL_PRIVATE: kexts still need a klist structure definition */ |
881 | |
882 | struct proc; |
883 | struct knote; |
884 | SLIST_HEAD(klist, knote); |
885 | |
886 | #endif /* !XNU_KERNEL_PRIVATE && KERNEL_PRIVATE */ |
887 | |
888 | #else /* KERNEL_PRIVATE */ |
889 | |
890 | __BEGIN_DECLS |
891 | int kevent_qos(int kq, |
892 | const struct kevent_qos_s *changelist, int nchanges, |
893 | struct kevent_qos_s *eventlist, int nevents, |
894 | void *data_out, size_t *data_available, |
895 | unsigned int flags); |
896 | |
897 | int kevent_id(kqueue_id_t id, |
898 | const struct kevent_qos_s *changelist, int nchanges, |
899 | struct kevent_qos_s *eventlist, int nevents, |
900 | void *data_out, size_t *data_available, |
901 | unsigned int flags); |
902 | |
903 | __END_DECLS |
904 | |
905 | |
906 | #endif /* KERNEL_PRIVATE */ |
907 | |
908 | /* Flags for pending events notified by kernel via return-to-kernel ast */ |
909 | #define R2K_WORKLOOP_PENDING_EVENTS 0x1 |
910 | #define R2K_WORKQ_PENDING_EVENTS 0x2 |
911 | |
912 | /* Flags for notifying what to do when there is a workqueue quantum expiry */ |
913 | #define PTHREAD_WQ_QUANTUM_EXPIRY_NARROW 0x1 |
914 | #define PTHREAD_WQ_QUANTUM_EXPIRY_SHUFFLE 0x2 |
915 | |
916 | #endif /* !_SYS_EVENT_PRIVATE_H_ */ |
917 | |