1 | /* |
2 | * Copyright (c) 2000-2021 Apple Inc. All rights reserved. |
3 | * |
4 | * @APPLE_OSREFERENCE_LICENSE_HEADER_START@ |
5 | * |
6 | * This file contains Original Code and/or Modifications of Original Code |
7 | * as defined in and that are subject to the Apple Public Source License |
8 | * Version 2.0 (the 'License'). You may not use this file except in |
9 | * compliance with the License. The rights granted to you under the License |
10 | * may not be used to create, or enable the creation or redistribution of, |
11 | * unlawful or unlicensed copies of an Apple operating system, or to |
12 | * circumvent, violate, or enable the circumvention or violation of, any |
13 | * terms of an Apple operating system software license agreement. |
14 | * |
15 | * Please obtain a copy of the License at |
16 | * http://www.opensource.apple.com/apsl/ and read it before using this file. |
17 | * |
18 | * The Original Code and all software distributed under the License are |
19 | * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER |
20 | * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES, |
21 | * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY, |
22 | * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT. |
23 | * Please see the License for the specific language governing rights and |
24 | * limitations under the License. |
25 | * |
26 | * @APPLE_OSREFERENCE_LICENSE_HEADER_END@ |
27 | * |
28 | */ |
29 | /*- |
30 | * Copyright (c) 1999,2000,2001 Jonathan Lemon <jlemon@FreeBSD.org> |
31 | * All rights reserved. |
32 | * |
33 | * Redistribution and use in source and binary forms, with or without |
34 | * modification, are permitted provided that the following conditions |
35 | * are met: |
36 | * 1. Redistributions of source code must retain the above copyright |
37 | * notice, this list of conditions and the following disclaimer. |
38 | * 2. Redistributions in binary form must reproduce the above copyright |
39 | * notice, this list of conditions and the following disclaimer in the |
40 | * documentation and/or other materials provided with the distribution. |
41 | * |
42 | * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND |
43 | * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE |
44 | * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE |
45 | * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE |
46 | * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL |
47 | * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS |
48 | * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) |
49 | * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT |
50 | * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY |
51 | * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF |
52 | * SUCH DAMAGE. |
53 | */ |
54 | /* |
55 | * @(#)kern_event.c 1.0 (3/31/2000) |
56 | */ |
57 | #include <stdint.h> |
58 | #include <machine/atomic.h> |
59 | |
60 | #include <sys/param.h> |
61 | #include <sys/systm.h> |
62 | #include <sys/filedesc.h> |
63 | #include <sys/kernel.h> |
64 | #include <sys/proc_internal.h> |
65 | #include <sys/kauth.h> |
66 | #include <sys/malloc.h> |
67 | #include <sys/unistd.h> |
68 | #include <sys/file_internal.h> |
69 | #include <sys/fcntl.h> |
70 | #include <sys/select.h> |
71 | #include <sys/queue.h> |
72 | #include <sys/event.h> |
73 | #include <sys/eventvar.h> |
74 | #include <sys/protosw.h> |
75 | #include <sys/socket.h> |
76 | #include <sys/socketvar.h> |
77 | #include <sys/stat.h> |
78 | #include <sys/syscall.h> // SYS_* constants |
79 | #include <sys/sysctl.h> |
80 | #include <sys/uio.h> |
81 | #include <sys/sysproto.h> |
82 | #include <sys/user.h> |
83 | #include <sys/vnode_internal.h> |
84 | #include <string.h> |
85 | #include <sys/proc_info.h> |
86 | #include <sys/codesign.h> |
87 | #include <sys/pthread_shims.h> |
88 | #include <sys/kdebug.h> |
89 | #include <os/base.h> |
90 | #include <pexpert/pexpert.h> |
91 | |
92 | #include <kern/thread_group.h> |
93 | #include <kern/locks.h> |
94 | #include <kern/clock.h> |
95 | #include <kern/cpu_data.h> |
96 | #include <kern/policy_internal.h> |
97 | #include <kern/thread_call.h> |
98 | #include <kern/sched_prim.h> |
99 | #include <kern/waitq.h> |
100 | #include <kern/zalloc.h> |
101 | #include <kern/kalloc.h> |
102 | #include <kern/assert.h> |
103 | #include <kern/ast.h> |
104 | #include <kern/thread.h> |
105 | #include <kern/kcdata.h> |
106 | #include <kern/work_interval.h> |
107 | |
108 | #include <pthread/priority_private.h> |
109 | #include <pthread/workqueue_syscalls.h> |
110 | #include <pthread/workqueue_internal.h> |
111 | #include <libkern/libkern.h> |
112 | |
113 | #include <os/log.h> |
114 | |
115 | #include "net/net_str_id.h" |
116 | |
117 | #if SKYWALK && defined(XNU_TARGET_OS_OSX) |
118 | #include <skywalk/lib/net_filter_event.h> |
119 | |
120 | extern bool net_check_compatible_alf(void); |
121 | #endif /* SKYWALK && XNU_TARGET_OS_OSX */ |
122 | |
123 | #include <mach/task.h> |
124 | #include <libkern/section_keywords.h> |
125 | |
126 | #if CONFIG_MEMORYSTATUS |
127 | #include <sys/kern_memorystatus.h> |
128 | #endif |
129 | |
130 | #if DEVELOPMENT || DEBUG |
131 | #define KEVENT_PANIC_ON_WORKLOOP_OWNERSHIP_LEAK (1U << 0) |
132 | #define KEVENT_PANIC_ON_NON_ENQUEUED_PROCESS (1U << 1) |
133 | TUNABLE(uint32_t, kevent_debug_flags, "kevent_debug" , 0); |
134 | #endif |
135 | |
136 | static LCK_GRP_DECLARE(kq_lck_grp, "kqueue" ); |
137 | SECURITY_READ_ONLY_EARLY(vm_packing_params_t) kn_kq_packing_params = |
138 | VM_PACKING_PARAMS(KNOTE_KQ_PACKED); |
139 | |
140 | extern mach_port_name_t ipc_entry_name_mask(mach_port_name_t name); /* osfmk/ipc/ipc_entry.h */ |
141 | extern int cansignal(struct proc *, kauth_cred_t, struct proc *, int); /* bsd/kern/kern_sig.c */ |
142 | |
143 | #define KEV_EVTID(code) BSDDBG_CODE(DBG_BSD_KEVENT, (code)) |
144 | |
145 | static int kqueue_select(struct fileproc *fp, int which, void *wq_link_id, |
146 | vfs_context_t ctx); |
147 | static int kqueue_close(struct fileglob *fg, vfs_context_t ctx); |
148 | static int kqueue_kqfilter(struct fileproc *fp, struct knote *kn, |
149 | struct kevent_qos_s *kev); |
150 | static int kqueue_drain(struct fileproc *fp, vfs_context_t ctx); |
151 | |
152 | static const struct fileops kqueueops = { |
153 | .fo_type = DTYPE_KQUEUE, |
154 | .fo_read = fo_no_read, |
155 | .fo_write = fo_no_write, |
156 | .fo_ioctl = fo_no_ioctl, |
157 | .fo_select = kqueue_select, |
158 | .fo_close = kqueue_close, |
159 | .fo_drain = kqueue_drain, |
160 | .fo_kqfilter = kqueue_kqfilter, |
161 | }; |
162 | |
163 | static inline int kevent_modern_copyout(struct kevent_qos_s *, user_addr_t *); |
164 | static int kevent_register_wait_prepare(struct knote *kn, struct kevent_qos_s *kev, int result); |
165 | static void kevent_register_wait_block(struct turnstile *ts, thread_t handoff_thread, |
166 | thread_continue_t cont, struct _kevent_register *cont_args) __dead2; |
167 | static void kevent_register_wait_return(struct _kevent_register *cont_args) __dead2; |
168 | static void kevent_register_wait_cleanup(struct knote *kn); |
169 | |
170 | static struct kqtailq *kqueue_get_suppressed_queue(kqueue_t kq, struct knote *kn); |
171 | static void kqueue_threadreq_initiate(struct kqueue *kq, workq_threadreq_t, kq_index_t qos, int flags); |
172 | |
173 | static void kqworkq_unbind(proc_t p, workq_threadreq_t); |
174 | static thread_qos_t kqworkq_unbind_locked(struct kqworkq *kqwq, workq_threadreq_t, thread_t thread); |
175 | static workq_threadreq_t kqworkq_get_request(struct kqworkq *kqwq, kq_index_t qos_index); |
176 | static void kqueue_update_iotier_override(kqueue_t kqu); |
177 | |
178 | static void kqworkloop_unbind(struct kqworkloop *kwql); |
179 | |
180 | enum kqwl_unbind_locked_mode { |
181 | KQWL_OVERRIDE_DROP_IMMEDIATELY, |
182 | KQWL_OVERRIDE_DROP_DELAYED, |
183 | }; |
184 | static void kqworkloop_unbind_locked(struct kqworkloop *kwql, thread_t thread, |
185 | enum kqwl_unbind_locked_mode how); |
186 | static void kqworkloop_unbind_delayed_override_drop(thread_t thread); |
187 | static kq_index_t kqworkloop_override(struct kqworkloop *kqwl); |
188 | static void kqworkloop_set_overcommit(struct kqworkloop *kqwl); |
189 | enum { |
190 | KQWL_UTQ_NONE, |
191 | /* |
192 | * The wakeup qos is the qos of QUEUED knotes. |
193 | * |
194 | * This QoS is accounted for with the events override in the |
195 | * kqr_override_index field. It is raised each time a new knote is queued at |
196 | * a given QoS. The kqwl_wakeup_qos field is a superset of the non empty |
197 | * knote buckets and is recomputed after each event delivery. |
198 | */ |
199 | KQWL_UTQ_UPDATE_WAKEUP_QOS, |
200 | KQWL_UTQ_RECOMPUTE_WAKEUP_QOS, |
201 | KQWL_UTQ_UNBINDING, /* attempt to rebind */ |
202 | KQWL_UTQ_PARKING, |
203 | /* |
204 | * The wakeup override is for suppressed knotes that have fired again at |
205 | * a higher QoS than the one for which they are suppressed already. |
206 | * This override is cleared when the knote suppressed list becomes empty. |
207 | */ |
208 | KQWL_UTQ_UPDATE_WAKEUP_OVERRIDE, |
209 | KQWL_UTQ_RESET_WAKEUP_OVERRIDE, |
210 | /* |
211 | * The QoS is the maximum QoS of an event enqueued on this workloop in |
212 | * userland. It is copied from the only EVFILT_WORKLOOP knote with |
213 | * a NOTE_WL_THREAD_REQUEST bit set allowed on this workloop. If there is no |
214 | * such knote, this QoS is 0. |
215 | */ |
216 | KQWL_UTQ_SET_QOS_INDEX, |
217 | KQWL_UTQ_REDRIVE_EVENTS, |
218 | }; |
219 | static void kqworkloop_update_threads_qos(struct kqworkloop *kqwl, int op, kq_index_t qos); |
220 | static int kqworkloop_end_processing(struct kqworkloop *kqwl, int flags, int kevent_flags); |
221 | |
222 | static struct knote *knote_alloc(void); |
223 | static void knote_free(struct knote *kn); |
224 | static int kq_add_knote(struct kqueue *kq, struct knote *kn, |
225 | struct knote_lock_ctx *knlc, struct proc *p); |
226 | static struct knote *kq_find_knote_and_kq_lock(struct kqueue *kq, |
227 | struct kevent_qos_s *kev, bool is_fd, struct proc *p); |
228 | |
229 | static void knote_activate(kqueue_t kqu, struct knote *kn, int result); |
230 | static void knote_dequeue(kqueue_t kqu, struct knote *kn); |
231 | |
232 | static void knote_apply_touch(kqueue_t kqu, struct knote *kn, |
233 | struct kevent_qos_s *kev, int result); |
234 | static void knote_suppress(kqueue_t kqu, struct knote *kn); |
235 | static void knote_unsuppress(kqueue_t kqu, struct knote *kn); |
236 | static void knote_drop(kqueue_t kqu, struct knote *kn, struct knote_lock_ctx *knlc); |
237 | |
238 | // both these functions may dequeue the knote and it is up to the caller |
239 | // to enqueue the knote back |
240 | static void knote_adjust_qos(struct kqueue *kq, struct knote *kn, int result); |
241 | static void knote_reset_priority(kqueue_t kqu, struct knote *kn, pthread_priority_t pp); |
242 | |
243 | static ZONE_DEFINE(knote_zone, "knote zone" , |
244 | sizeof(struct knote), ZC_CACHING | ZC_ZFREE_CLEARMEM); |
245 | static ZONE_DEFINE(kqfile_zone, "kqueue file zone" , |
246 | sizeof(struct kqfile), ZC_ZFREE_CLEARMEM | ZC_NOTBITAG); |
247 | static ZONE_DEFINE(kqworkq_zone, "kqueue workq zone" , |
248 | sizeof(struct kqworkq), ZC_ZFREE_CLEARMEM | ZC_NOTBITAG); |
249 | static ZONE_DEFINE(kqworkloop_zone, "kqueue workloop zone" , |
250 | sizeof(struct kqworkloop), ZC_CACHING | ZC_ZFREE_CLEARMEM | ZC_NOTBITAG); |
251 | |
252 | #define KN_HASH(val, mask) (((val) ^ (val >> 8)) & (mask)) |
253 | |
254 | static int filt_no_attach(struct knote *kn, struct kevent_qos_s *kev); |
255 | static void filt_no_detach(struct knote *kn); |
256 | static int filt_bad_event(struct knote *kn, long hint); |
257 | static int filt_bad_touch(struct knote *kn, struct kevent_qos_s *kev); |
258 | static int filt_bad_process(struct knote *kn, struct kevent_qos_s *kev); |
259 | |
260 | SECURITY_READ_ONLY_EARLY(static struct filterops) bad_filtops = { |
261 | .f_attach = filt_no_attach, |
262 | .f_detach = filt_no_detach, |
263 | .f_event = filt_bad_event, |
264 | .f_touch = filt_bad_touch, |
265 | .f_process = filt_bad_process, |
266 | }; |
267 | |
268 | #if CONFIG_MEMORYSTATUS |
269 | extern const struct filterops memorystatus_filtops; |
270 | #endif /* CONFIG_MEMORYSTATUS */ |
271 | extern const struct filterops fs_filtops; |
272 | extern const struct filterops sig_filtops; |
273 | extern const struct filterops machport_attach_filtops; |
274 | extern const struct filterops mach_port_filtops; |
275 | extern const struct filterops mach_port_set_filtops; |
276 | extern const struct filterops pipe_nfiltops; |
277 | extern const struct filterops pipe_rfiltops; |
278 | extern const struct filterops pipe_wfiltops; |
279 | extern const struct filterops ptsd_kqops; |
280 | extern const struct filterops ptmx_kqops; |
281 | extern const struct filterops soread_filtops; |
282 | extern const struct filterops sowrite_filtops; |
283 | extern const struct filterops sock_filtops; |
284 | extern const struct filterops soexcept_filtops; |
285 | extern const struct filterops spec_filtops; |
286 | extern const struct filterops bpfread_filtops; |
287 | extern const struct filterops necp_fd_rfiltops; |
288 | #if SKYWALK |
289 | extern const struct filterops skywalk_channel_rfiltops; |
290 | extern const struct filterops skywalk_channel_wfiltops; |
291 | extern const struct filterops skywalk_channel_efiltops; |
292 | #endif /* SKYWALK */ |
293 | extern const struct filterops fsevent_filtops; |
294 | extern const struct filterops vnode_filtops; |
295 | extern const struct filterops tty_filtops; |
296 | |
297 | const static struct filterops file_filtops; |
298 | const static struct filterops kqread_filtops; |
299 | const static struct filterops proc_filtops; |
300 | const static struct filterops timer_filtops; |
301 | const static struct filterops user_filtops; |
302 | const static struct filterops workloop_filtops; |
303 | #if CONFIG_EXCLAVES |
304 | extern const struct filterops exclaves_notification_filtops; |
305 | #endif /* CONFIG_EXCLAVES */ |
306 | |
307 | /* |
308 | * |
309 | * Rules for adding new filters to the system: |
310 | * Public filters: |
311 | * - Add a new "EVFILT_" option value to bsd/sys/event.h (typically a negative value) |
312 | * in the exported section of the header |
313 | * - Update the EVFILT_SYSCOUNT value to reflect the new addition |
314 | * - Add a filterops to the sysfilt_ops array. Public filters should be added at the end |
315 | * of the Public Filters section in the array. |
316 | * Private filters: |
317 | * - Add a new "EVFILT_" value to bsd/sys/event_private.h (typically a positive value) |
318 | * - Update the EVFILTID_MAX value to reflect the new addition |
319 | * - Add a filterops to the sysfilt_ops. Private filters should be added at the end of |
320 | * the Private filters section of the array. |
321 | */ |
322 | static_assert(EVFILTID_MAX < UINT8_MAX, "kn_filtid expects this to be true" ); |
323 | static const struct filterops * const sysfilt_ops[EVFILTID_MAX] = { |
324 | /* Public Filters */ |
325 | [~EVFILT_READ] = &file_filtops, |
326 | [~EVFILT_WRITE] = &file_filtops, |
327 | [~EVFILT_AIO] = &bad_filtops, |
328 | [~EVFILT_VNODE] = &file_filtops, |
329 | [~EVFILT_PROC] = &proc_filtops, |
330 | [~EVFILT_SIGNAL] = &sig_filtops, |
331 | [~EVFILT_TIMER] = &timer_filtops, |
332 | [~EVFILT_MACHPORT] = &machport_attach_filtops, |
333 | [~EVFILT_FS] = &fs_filtops, |
334 | [~EVFILT_USER] = &user_filtops, |
335 | [~EVFILT_UNUSED_11] = &bad_filtops, |
336 | [~EVFILT_VM] = &bad_filtops, |
337 | [~EVFILT_SOCK] = &file_filtops, |
338 | #if CONFIG_MEMORYSTATUS |
339 | [~EVFILT_MEMORYSTATUS] = &memorystatus_filtops, |
340 | #else |
341 | [~EVFILT_MEMORYSTATUS] = &bad_filtops, |
342 | #endif |
343 | [~EVFILT_EXCEPT] = &file_filtops, |
344 | #if SKYWALK |
345 | [~EVFILT_NW_CHANNEL] = &file_filtops, |
346 | #else /* !SKYWALK */ |
347 | [~EVFILT_NW_CHANNEL] = &bad_filtops, |
348 | #endif /* !SKYWALK */ |
349 | [~EVFILT_WORKLOOP] = &workloop_filtops, |
350 | #if CONFIG_EXCLAVES |
351 | [~EVFILT_EXCLAVES_NOTIFICATION] = &exclaves_notification_filtops, |
352 | #else /* !CONFIG_EXCLAVES */ |
353 | [~EVFILT_EXCLAVES_NOTIFICATION] = &bad_filtops, |
354 | #endif /* CONFIG_EXCLAVES*/ |
355 | |
356 | /* Private filters */ |
357 | [EVFILTID_KQREAD] = &kqread_filtops, |
358 | [EVFILTID_PIPE_N] = &pipe_nfiltops, |
359 | [EVFILTID_PIPE_R] = &pipe_rfiltops, |
360 | [EVFILTID_PIPE_W] = &pipe_wfiltops, |
361 | [EVFILTID_PTSD] = &ptsd_kqops, |
362 | [EVFILTID_SOREAD] = &soread_filtops, |
363 | [EVFILTID_SOWRITE] = &sowrite_filtops, |
364 | [EVFILTID_SCK] = &sock_filtops, |
365 | [EVFILTID_SOEXCEPT] = &soexcept_filtops, |
366 | [EVFILTID_SPEC] = &spec_filtops, |
367 | [EVFILTID_BPFREAD] = &bpfread_filtops, |
368 | [EVFILTID_NECP_FD] = &necp_fd_rfiltops, |
369 | #if SKYWALK |
370 | [EVFILTID_SKYWALK_CHANNEL_W] = &skywalk_channel_wfiltops, |
371 | [EVFILTID_SKYWALK_CHANNEL_R] = &skywalk_channel_rfiltops, |
372 | [EVFILTID_SKYWALK_CHANNEL_E] = &skywalk_channel_efiltops, |
373 | #else /* !SKYWALK */ |
374 | [EVFILTID_SKYWALK_CHANNEL_W] = &bad_filtops, |
375 | [EVFILTID_SKYWALK_CHANNEL_R] = &bad_filtops, |
376 | [EVFILTID_SKYWALK_CHANNEL_E] = &bad_filtops, |
377 | #endif /* !SKYWALK */ |
378 | [EVFILTID_FSEVENT] = &fsevent_filtops, |
379 | [EVFILTID_VN] = &vnode_filtops, |
380 | [EVFILTID_TTY] = &tty_filtops, |
381 | [EVFILTID_PTMX] = &ptmx_kqops, |
382 | [EVFILTID_MACH_PORT] = &mach_port_filtops, |
383 | [EVFILTID_MACH_PORT_SET] = &mach_port_set_filtops, |
384 | |
385 | /* fake filter for detached knotes, keep last */ |
386 | [EVFILTID_DETACHED] = &bad_filtops, |
387 | }; |
388 | |
389 | static inline bool |
390 | kqr_thread_bound(workq_threadreq_t kqr) |
391 | { |
392 | return kqr->tr_state == WORKQ_TR_STATE_BOUND; |
393 | } |
394 | |
395 | static inline bool |
396 | kqr_thread_requested_pending(workq_threadreq_t kqr) |
397 | { |
398 | workq_tr_state_t tr_state = kqr->tr_state; |
399 | return tr_state > WORKQ_TR_STATE_IDLE && tr_state < WORKQ_TR_STATE_BOUND; |
400 | } |
401 | |
402 | static inline bool |
403 | kqr_thread_requested(workq_threadreq_t kqr) |
404 | { |
405 | return kqr->tr_state != WORKQ_TR_STATE_IDLE; |
406 | } |
407 | |
408 | static inline thread_t |
409 | kqr_thread_fast(workq_threadreq_t kqr) |
410 | { |
411 | assert(kqr_thread_bound(kqr)); |
412 | return kqr->tr_thread; |
413 | } |
414 | |
415 | static inline thread_t |
416 | kqr_thread(workq_threadreq_t kqr) |
417 | { |
418 | return kqr_thread_bound(kqr) ? kqr->tr_thread : THREAD_NULL; |
419 | } |
420 | |
421 | static inline struct kqworkloop * |
422 | kqr_kqworkloop(workq_threadreq_t kqr) |
423 | { |
424 | if (kqr->tr_flags & WORKQ_TR_FLAG_WORKLOOP) { |
425 | return __container_of(kqr, struct kqworkloop, kqwl_request); |
426 | } |
427 | return NULL; |
428 | } |
429 | |
430 | static inline kqueue_t |
431 | kqr_kqueue(proc_t p, workq_threadreq_t kqr) |
432 | { |
433 | kqueue_t kqu; |
434 | if (kqr->tr_flags & WORKQ_TR_FLAG_WORKLOOP) { |
435 | kqu.kqwl = kqr_kqworkloop(kqr); |
436 | } else { |
437 | kqu.kqwq = p->p_fd.fd_wqkqueue; |
438 | assert(kqr >= kqu.kqwq->kqwq_request && |
439 | kqr < kqu.kqwq->kqwq_request + KQWQ_NBUCKETS); |
440 | } |
441 | return kqu; |
442 | } |
443 | |
444 | #if CONFIG_PREADOPT_TG |
445 | /* There are no guarantees about which locks are held when this is called */ |
446 | inline thread_group_qos_t |
447 | kqr_preadopt_thread_group(workq_threadreq_t req) |
448 | { |
449 | struct kqworkloop *kqwl = kqr_kqworkloop(kqr: req); |
450 | return kqwl ? os_atomic_load(&kqwl->kqwl_preadopt_tg, relaxed) : NULL; |
451 | } |
452 | |
453 | /* There are no guarantees about which locks are held when this is called */ |
454 | inline _Atomic(thread_group_qos_t) * |
455 | kqr_preadopt_thread_group_addr(workq_threadreq_t req) |
456 | { |
457 | struct kqworkloop *kqwl = kqr_kqworkloop(kqr: req); |
458 | return kqwl ? (&kqwl->kqwl_preadopt_tg) : NULL; |
459 | } |
460 | #endif |
461 | |
462 | /* |
463 | * kqueue/note lock implementations |
464 | * |
465 | * The kqueue lock guards the kq state, the state of its queues, |
466 | * and the kqueue-aware status and locks of individual knotes. |
467 | * |
468 | * The kqueue workq lock is used to protect state guarding the |
469 | * interaction of the kqueue with the workq. This state cannot |
470 | * be guarded by the kq lock - as it needs to be taken when we |
471 | * already have the waitq set lock held (during the waitq hook |
472 | * callback). It might be better to use the waitq lock itself |
473 | * for this, but the IRQ requirements make that difficult). |
474 | * |
475 | * Knote flags, filter flags, and associated data are protected |
476 | * by the underlying object lock - and are only ever looked at |
477 | * by calling the filter to get a [consistent] snapshot of that |
478 | * data. |
479 | */ |
480 | |
481 | static inline void |
482 | kqlock(kqueue_t kqu) |
483 | { |
484 | lck_spin_lock(lck: &kqu.kq->kq_lock); |
485 | } |
486 | |
487 | static inline void |
488 | kqlock_held(__assert_only kqueue_t kqu) |
489 | { |
490 | LCK_SPIN_ASSERT(&kqu.kq->kq_lock, LCK_ASSERT_OWNED); |
491 | } |
492 | |
493 | static inline void |
494 | kqunlock(kqueue_t kqu) |
495 | { |
496 | lck_spin_unlock(lck: &kqu.kq->kq_lock); |
497 | } |
498 | |
499 | static inline void |
500 | knhash_lock(struct filedesc *fdp) |
501 | { |
502 | lck_mtx_lock(lck: &fdp->fd_knhashlock); |
503 | } |
504 | |
505 | static inline void |
506 | knhash_unlock(struct filedesc *fdp) |
507 | { |
508 | lck_mtx_unlock(lck: &fdp->fd_knhashlock); |
509 | } |
510 | |
511 | /* wait event for knote locks */ |
512 | static inline event_t |
513 | knote_lock_wev(struct knote *kn) |
514 | { |
515 | return (event_t)(&kn->kn_hook); |
516 | } |
517 | |
518 | /* wait event for kevent_register_wait_* */ |
519 | static inline event64_t |
520 | knote_filt_wev64(struct knote *kn) |
521 | { |
522 | /* kdp_workloop_sync_wait_find_owner knows about this */ |
523 | return CAST_EVENT64_T(kn); |
524 | } |
525 | |
526 | /* wait event for knote_post/knote_drop */ |
527 | static inline event_t |
528 | knote_post_wev(struct knote *kn) |
529 | { |
530 | return &kn->kn_kevent; |
531 | } |
532 | |
533 | /*! |
534 | * @function knote_has_qos |
535 | * |
536 | * @brief |
537 | * Whether the knote has a regular QoS. |
538 | * |
539 | * @discussion |
540 | * kn_qos_override is: |
541 | * - 0 on kqfiles |
542 | * - THREAD_QOS_LAST for special buckets (manager) |
543 | * |
544 | * Other values mean the knote participates to QoS propagation. |
545 | */ |
546 | static inline bool |
547 | knote_has_qos(struct knote *kn) |
548 | { |
549 | return kn->kn_qos_override > 0 && kn->kn_qos_override < THREAD_QOS_LAST; |
550 | } |
551 | |
552 | #pragma mark knote locks |
553 | |
554 | /* |
555 | * Enum used by the knote_lock_* functions. |
556 | * |
557 | * KNOTE_KQ_LOCK_ALWAYS |
558 | * The function will always return with the kq lock held. |
559 | * |
560 | * KNOTE_KQ_LOCK_ON_SUCCESS |
561 | * The function will return with the kq lock held if it was successful |
562 | * (knote_lock() is the only function that can fail). |
563 | * |
564 | * KNOTE_KQ_LOCK_ON_FAILURE |
565 | * The function will return with the kq lock held if it was unsuccessful |
566 | * (knote_lock() is the only function that can fail). |
567 | * |
568 | * KNOTE_KQ_UNLOCK: |
569 | * The function returns with the kq unlocked. |
570 | */ |
571 | enum kqlocking { |
572 | KNOTE_KQ_LOCK_ALWAYS, |
573 | KNOTE_KQ_LOCK_ON_SUCCESS, |
574 | KNOTE_KQ_LOCK_ON_FAILURE, |
575 | KNOTE_KQ_UNLOCK, |
576 | }; |
577 | |
578 | static struct knote_lock_ctx * |
579 | knote_lock_ctx_find(kqueue_t kqu, struct knote *kn) |
580 | { |
581 | struct knote_lock_ctx *ctx; |
582 | LIST_FOREACH(ctx, &kqu.kq->kq_knlocks, knlc_link) { |
583 | if (ctx->knlc_knote == kn) { |
584 | return ctx; |
585 | } |
586 | } |
587 | panic("knote lock context not found: %p" , kn); |
588 | __builtin_trap(); |
589 | } |
590 | |
591 | /* slowpath of knote_lock() */ |
592 | __attribute__((noinline)) |
593 | static bool __result_use_check |
594 | knote_lock_slow(kqueue_t kqu, struct knote *kn, |
595 | struct knote_lock_ctx *knlc, int kqlocking) |
596 | { |
597 | struct knote_lock_ctx *owner_lc; |
598 | struct uthread *uth = current_uthread(); |
599 | wait_result_t wr; |
600 | |
601 | kqlock_held(kqu); |
602 | |
603 | owner_lc = knote_lock_ctx_find(kqu, kn); |
604 | #if DEBUG || DEVELOPMENT |
605 | knlc->knlc_state = KNOTE_LOCK_CTX_WAITING; |
606 | #endif |
607 | owner_lc->knlc_waiters++; |
608 | |
609 | /* |
610 | * Make our lock context visible to knote_unlock() |
611 | */ |
612 | uth->uu_knlock = knlc; |
613 | |
614 | wr = lck_spin_sleep_with_inheritor(lock: &kqu.kq->kq_lock, lck_sleep_action: LCK_SLEEP_UNLOCK, |
615 | event: knote_lock_wev(kn), inheritor: owner_lc->knlc_thread, |
616 | THREAD_UNINT | THREAD_WAIT_NOREPORT, TIMEOUT_WAIT_FOREVER); |
617 | |
618 | if (wr == THREAD_RESTART) { |
619 | /* |
620 | * We haven't been woken up by knote_unlock() but knote_unlock_cancel. |
621 | * We need to cleanup the state since no one did. |
622 | */ |
623 | uth->uu_knlock = NULL; |
624 | #if DEBUG || DEVELOPMENT |
625 | assert(knlc->knlc_state == KNOTE_LOCK_CTX_WAITING); |
626 | knlc->knlc_state = KNOTE_LOCK_CTX_UNLOCKED; |
627 | #endif |
628 | |
629 | if (kqlocking == KNOTE_KQ_LOCK_ALWAYS || |
630 | kqlocking == KNOTE_KQ_LOCK_ON_FAILURE) { |
631 | kqlock(kqu); |
632 | } |
633 | return false; |
634 | } else { |
635 | if (kqlocking == KNOTE_KQ_LOCK_ALWAYS || |
636 | kqlocking == KNOTE_KQ_LOCK_ON_SUCCESS) { |
637 | kqlock(kqu); |
638 | #if DEBUG || DEVELOPMENT |
639 | /* |
640 | * This state is set under the lock so we can't |
641 | * really assert this unless we hold the lock. |
642 | */ |
643 | assert(knlc->knlc_state == KNOTE_LOCK_CTX_LOCKED); |
644 | #endif |
645 | } |
646 | return true; |
647 | } |
648 | } |
649 | |
650 | /* |
651 | * Attempts to take the "knote" lock. |
652 | * |
653 | * Called with the kqueue lock held. |
654 | * |
655 | * Returns true if the knote lock is acquired, false if it has been dropped |
656 | */ |
657 | static bool __result_use_check |
658 | knote_lock(kqueue_t kqu, struct knote *kn, struct knote_lock_ctx *knlc, |
659 | enum kqlocking kqlocking) |
660 | { |
661 | kqlock_held(kqu); |
662 | |
663 | #if DEBUG || DEVELOPMENT |
664 | assert(knlc->knlc_state == KNOTE_LOCK_CTX_UNLOCKED); |
665 | #endif |
666 | knlc->knlc_knote = kn; |
667 | knlc->knlc_thread = current_thread(); |
668 | knlc->knlc_waiters = 0; |
669 | |
670 | if (__improbable(kn->kn_status & KN_LOCKED)) { |
671 | return knote_lock_slow(kqu, kn, knlc, kqlocking); |
672 | } |
673 | |
674 | /* |
675 | * When the knote will be dropped, the knote lock is taken before |
676 | * KN_DROPPING is set, and then the knote will be removed from any |
677 | * hash table that references it before the lock is canceled. |
678 | */ |
679 | assert((kn->kn_status & KN_DROPPING) == 0); |
680 | LIST_INSERT_HEAD(&kqu.kq->kq_knlocks, knlc, knlc_link); |
681 | kn->kn_status |= KN_LOCKED; |
682 | #if DEBUG || DEVELOPMENT |
683 | knlc->knlc_state = KNOTE_LOCK_CTX_LOCKED; |
684 | #endif |
685 | |
686 | if (kqlocking == KNOTE_KQ_UNLOCK || |
687 | kqlocking == KNOTE_KQ_LOCK_ON_FAILURE) { |
688 | kqunlock(kqu); |
689 | } |
690 | return true; |
691 | } |
692 | |
693 | /* |
694 | * Unlocks a knote successfully locked with knote_lock(). |
695 | * |
696 | * Called with the kqueue lock held. |
697 | * |
698 | * Returns with the kqueue lock held according to KNOTE_KQ_* mode. |
699 | */ |
700 | static void |
701 | knote_unlock(kqueue_t kqu, struct knote *kn, |
702 | struct knote_lock_ctx *knlc, enum kqlocking kqlocking) |
703 | { |
704 | kqlock_held(kqu); |
705 | |
706 | assert(knlc->knlc_knote == kn); |
707 | assert(kn->kn_status & KN_LOCKED); |
708 | #if DEBUG || DEVELOPMENT |
709 | assert(knlc->knlc_state == KNOTE_LOCK_CTX_LOCKED); |
710 | #endif |
711 | |
712 | LIST_REMOVE(knlc, knlc_link); |
713 | |
714 | if (knlc->knlc_waiters) { |
715 | thread_t thread = THREAD_NULL; |
716 | |
717 | wakeup_one_with_inheritor(event: knote_lock_wev(kn), THREAD_AWAKENED, |
718 | action: LCK_WAKE_DEFAULT, thread_wokenup: &thread); |
719 | |
720 | /* |
721 | * knote_lock_slow() publishes the lock context of waiters |
722 | * in uthread::uu_knlock. |
723 | * |
724 | * Reach out and make this context the new owner. |
725 | */ |
726 | struct uthread *ut = get_bsdthread_info(thread); |
727 | struct knote_lock_ctx *next_owner_lc = ut->uu_knlock; |
728 | |
729 | assert(next_owner_lc->knlc_knote == kn); |
730 | next_owner_lc->knlc_waiters = knlc->knlc_waiters - 1; |
731 | LIST_INSERT_HEAD(&kqu.kq->kq_knlocks, next_owner_lc, knlc_link); |
732 | #if DEBUG || DEVELOPMENT |
733 | next_owner_lc->knlc_state = KNOTE_LOCK_CTX_LOCKED; |
734 | #endif |
735 | ut->uu_knlock = NULL; |
736 | thread_deallocate_safe(thread); |
737 | } else { |
738 | kn->kn_status &= ~KN_LOCKED; |
739 | } |
740 | |
741 | if ((kn->kn_status & KN_MERGE_QOS) && !(kn->kn_status & KN_POSTING)) { |
742 | /* |
743 | * No f_event() in flight anymore, we can leave QoS "Merge" mode |
744 | * |
745 | * See knote_adjust_qos() |
746 | */ |
747 | kn->kn_status &= ~KN_MERGE_QOS; |
748 | } |
749 | if (kqlocking == KNOTE_KQ_UNLOCK) { |
750 | kqunlock(kqu); |
751 | } |
752 | #if DEBUG || DEVELOPMENT |
753 | knlc->knlc_state = KNOTE_LOCK_CTX_UNLOCKED; |
754 | #endif |
755 | } |
756 | |
757 | /* |
758 | * Aborts all waiters for a knote lock, and unlock the knote. |
759 | * |
760 | * Called with the kqueue lock held. |
761 | * |
762 | * Returns with the kqueue unlocked. |
763 | */ |
764 | static void |
765 | knote_unlock_cancel(struct kqueue *kq, struct knote *kn, |
766 | struct knote_lock_ctx *knlc) |
767 | { |
768 | kqlock_held(kqu: kq); |
769 | |
770 | assert(knlc->knlc_knote == kn); |
771 | assert(kn->kn_status & KN_LOCKED); |
772 | assert(kn->kn_status & KN_DROPPING); |
773 | |
774 | LIST_REMOVE(knlc, knlc_link); |
775 | kn->kn_status &= ~KN_LOCKED; |
776 | kqunlock(kqu: kq); |
777 | |
778 | if (knlc->knlc_waiters) { |
779 | wakeup_all_with_inheritor(event: knote_lock_wev(kn), THREAD_RESTART); |
780 | } |
781 | #if DEBUG || DEVELOPMENT |
782 | knlc->knlc_state = KNOTE_LOCK_CTX_UNLOCKED; |
783 | #endif |
784 | } |
785 | |
786 | /* |
787 | * Call the f_event hook of a given filter. |
788 | * |
789 | * Takes a use count to protect against concurrent drops. |
790 | * Called with the object lock held. |
791 | */ |
792 | static void |
793 | knote_post(struct knote *kn, long hint) |
794 | { |
795 | struct kqueue *kq = knote_get_kq(kn); |
796 | int dropping, result; |
797 | |
798 | kqlock(kqu: kq); |
799 | |
800 | if (__improbable(kn->kn_status & (KN_DROPPING | KN_VANISHED))) { |
801 | return kqunlock(kqu: kq); |
802 | } |
803 | |
804 | if (__improbable(kn->kn_status & KN_POSTING)) { |
805 | panic("KNOTE() called concurrently on knote %p" , kn); |
806 | } |
807 | |
808 | kn->kn_status |= KN_POSTING; |
809 | |
810 | kqunlock(kqu: kq); |
811 | result = filter_call(knote_fops(kn), f_event(kn, hint)); |
812 | kqlock(kqu: kq); |
813 | |
814 | /* Someone dropped the knote/the monitored object vanished while we |
815 | * were in f_event, swallow the side effects of the post. |
816 | */ |
817 | dropping = (kn->kn_status & (KN_DROPPING | KN_VANISHED)); |
818 | |
819 | if (!dropping && (result & FILTER_ADJUST_EVENT_IOTIER_BIT)) { |
820 | kqueue_update_iotier_override(kqu: kq); |
821 | } |
822 | |
823 | if (!dropping && (result & FILTER_ACTIVE)) { |
824 | knote_activate(kqu: kq, kn, result); |
825 | } |
826 | |
827 | if ((kn->kn_status & KN_LOCKED) == 0) { |
828 | /* |
829 | * There's no other f_* call in flight, we can leave QoS "Merge" mode. |
830 | * |
831 | * See knote_adjust_qos() |
832 | */ |
833 | kn->kn_status &= ~(KN_POSTING | KN_MERGE_QOS); |
834 | } else { |
835 | kn->kn_status &= ~KN_POSTING; |
836 | } |
837 | |
838 | if (__improbable(dropping)) { |
839 | thread_wakeup(knote_post_wev(kn)); |
840 | } |
841 | |
842 | kqunlock(kqu: kq); |
843 | } |
844 | |
845 | /* |
846 | * Called by knote_drop() and knote_fdclose() to wait for the last f_event() |
847 | * caller to be done. |
848 | * |
849 | * - kq locked at entry |
850 | * - kq unlocked at exit |
851 | */ |
852 | static void |
853 | knote_wait_for_post(struct kqueue *kq, struct knote *kn) |
854 | { |
855 | kqlock_held(kqu: kq); |
856 | |
857 | assert(kn->kn_status & (KN_DROPPING | KN_VANISHED)); |
858 | |
859 | if (kn->kn_status & KN_POSTING) { |
860 | lck_spin_sleep(lck: &kq->kq_lock, lck_sleep_action: LCK_SLEEP_UNLOCK, event: knote_post_wev(kn), |
861 | THREAD_UNINT | THREAD_WAIT_NOREPORT); |
862 | } else { |
863 | kqunlock(kqu: kq); |
864 | } |
865 | } |
866 | |
867 | #pragma mark knote helpers for filters |
868 | |
869 | OS_ALWAYS_INLINE |
870 | void * |
871 | knote_kn_hook_get_raw(struct knote *kn) |
872 | { |
873 | uintptr_t *addr = &kn->kn_hook; |
874 | |
875 | void *hook = (void *) *addr; |
876 | #if __has_feature(ptrauth_calls) |
877 | if (hook) { |
878 | uint16_t blend = kn->kn_filter; |
879 | blend |= (kn->kn_filtid << 8); |
880 | blend ^= OS_PTRAUTH_DISCRIMINATOR("kn.kn_hook" ); |
881 | |
882 | hook = ptrauth_auth_data(hook, ptrauth_key_process_independent_data, |
883 | ptrauth_blend_discriminator(addr, blend)); |
884 | } |
885 | #endif |
886 | |
887 | return hook; |
888 | } |
889 | |
890 | OS_ALWAYS_INLINE void |
891 | knote_kn_hook_set_raw(struct knote *kn, void *kn_hook) |
892 | { |
893 | uintptr_t *addr = &kn->kn_hook; |
894 | #if __has_feature(ptrauth_calls) |
895 | if (kn_hook) { |
896 | uint16_t blend = kn->kn_filter; |
897 | blend |= (kn->kn_filtid << 8); |
898 | blend ^= OS_PTRAUTH_DISCRIMINATOR("kn.kn_hook" ); |
899 | |
900 | kn_hook = ptrauth_sign_unauthenticated(kn_hook, |
901 | ptrauth_key_process_independent_data, |
902 | ptrauth_blend_discriminator(addr, blend)); |
903 | } |
904 | #endif |
905 | *addr = (uintptr_t) kn_hook; |
906 | } |
907 | |
908 | OS_ALWAYS_INLINE |
909 | void |
910 | knote_set_error(struct knote *kn, int error) |
911 | { |
912 | kn->kn_flags |= EV_ERROR; |
913 | kn->kn_sdata = error; |
914 | } |
915 | |
916 | OS_ALWAYS_INLINE |
917 | int64_t |
918 | knote_low_watermark(const struct knote *kn) |
919 | { |
920 | return (kn->kn_sfflags & NOTE_LOWAT) ? kn->kn_sdata : 1; |
921 | } |
922 | |
923 | /*! |
924 | * @function knote_fill_kevent_with_sdata |
925 | * |
926 | * @brief |
927 | * Fills in a kevent from the current content of a knote. |
928 | * |
929 | * @discussion |
930 | * This is meant to be called from filter's f_process hooks. |
931 | * The kevent data is filled with kn->kn_sdata. |
932 | * |
933 | * kn->kn_fflags is cleared if kn->kn_flags has EV_CLEAR set. |
934 | * |
935 | * Using knote_fill_kevent is typically preferred. |
936 | */ |
937 | OS_ALWAYS_INLINE |
938 | void |
939 | knote_fill_kevent_with_sdata(struct knote *kn, struct kevent_qos_s *kev) |
940 | { |
941 | #define knote_assert_aliases(name1, offs1, name2) \ |
942 | static_assert(offsetof(struct kevent_qos_s, name1) + offs1 == \ |
943 | offsetof(struct kevent_internal_s, name2), \ |
944 | "kevent_qos_s::" #name1 " and kevent_internal_s::" #name2 "need to alias") |
945 | /* |
946 | * All the code makes assumptions on these aliasing, |
947 | * so make sure we fail the build if we ever ever ever break them. |
948 | */ |
949 | knote_assert_aliases(ident, 0, kei_ident); |
950 | #ifdef __LITTLE_ENDIAN__ |
951 | knote_assert_aliases(filter, 0, kei_filter); // non trivial overlap |
952 | knote_assert_aliases(filter, 1, kei_filtid); // non trivial overlap |
953 | #else |
954 | knote_assert_aliases(filter, 0, kei_filtid); // non trivial overlap |
955 | knote_assert_aliases(filter, 1, kei_filter); // non trivial overlap |
956 | #endif |
957 | knote_assert_aliases(flags, 0, kei_flags); |
958 | knote_assert_aliases(qos, 0, kei_qos); |
959 | knote_assert_aliases(udata, 0, kei_udata); |
960 | knote_assert_aliases(fflags, 0, kei_fflags); |
961 | knote_assert_aliases(xflags, 0, kei_sfflags); // non trivial overlap |
962 | knote_assert_aliases(data, 0, kei_sdata); // non trivial overlap |
963 | knote_assert_aliases(ext, 0, kei_ext); |
964 | #undef knote_assert_aliases |
965 | |
966 | /* |
967 | * Fix the differences between kevent_qos_s and kevent_internal_s: |
968 | * - xflags is where kn_sfflags lives, we need to zero it |
969 | * - fixup the high bits of `filter` where kn_filtid lives |
970 | */ |
971 | *kev = *(struct kevent_qos_s *)&kn->kn_kevent; |
972 | kev->xflags = 0; |
973 | kev->filter |= 0xff00; |
974 | if (kn->kn_flags & EV_CLEAR) { |
975 | kn->kn_fflags = 0; |
976 | } |
977 | } |
978 | |
979 | /*! |
980 | * @function knote_fill_kevent |
981 | * |
982 | * @brief |
983 | * Fills in a kevent from the current content of a knote. |
984 | * |
985 | * @discussion |
986 | * This is meant to be called from filter's f_process hooks. |
987 | * The kevent data is filled with the passed in data. |
988 | * |
989 | * kn->kn_fflags is cleared if kn->kn_flags has EV_CLEAR set. |
990 | */ |
991 | OS_ALWAYS_INLINE |
992 | void |
993 | knote_fill_kevent(struct knote *kn, struct kevent_qos_s *kev, int64_t data) |
994 | { |
995 | knote_fill_kevent_with_sdata(kn, kev); |
996 | kev->filter = kn->kn_filter; |
997 | kev->data = data; |
998 | } |
999 | |
1000 | |
1001 | #pragma mark file_filtops |
1002 | |
1003 | static int |
1004 | filt_fileattach(struct knote *kn, struct kevent_qos_s *kev) |
1005 | { |
1006 | return fo_kqfilter(fp: kn->kn_fp, kn, kev); |
1007 | } |
1008 | |
1009 | SECURITY_READ_ONLY_EARLY(static struct filterops) file_filtops = { |
1010 | .f_isfd = 1, |
1011 | .f_attach = filt_fileattach, |
1012 | }; |
1013 | |
1014 | #pragma mark kqread_filtops |
1015 | |
1016 | #define f_flag fp_glob->fg_flag |
1017 | #define f_ops fp_glob->fg_ops |
1018 | #define f_lflags fp_glob->fg_lflags |
1019 | |
1020 | static void |
1021 | filt_kqdetach(struct knote *kn) |
1022 | { |
1023 | struct kqfile *kqf = (struct kqfile *)fp_get_data(fp: kn->kn_fp); |
1024 | struct kqueue *kq = &kqf->kqf_kqueue; |
1025 | |
1026 | kqlock(kqu: kq); |
1027 | KNOTE_DETACH(&kqf->kqf_sel.si_note, kn); |
1028 | kqunlock(kqu: kq); |
1029 | } |
1030 | |
1031 | static int |
1032 | filt_kqueue(struct knote *kn, __unused long hint) |
1033 | { |
1034 | struct kqueue *kq = (struct kqueue *)fp_get_data(fp: kn->kn_fp); |
1035 | |
1036 | return kq->kq_count > 0; |
1037 | } |
1038 | |
1039 | static int |
1040 | filt_kqtouch(struct knote *kn, struct kevent_qos_s *kev) |
1041 | { |
1042 | #pragma unused(kev) |
1043 | struct kqueue *kq = (struct kqueue *)fp_get_data(fp: kn->kn_fp); |
1044 | int res; |
1045 | |
1046 | kqlock(kqu: kq); |
1047 | res = (kq->kq_count > 0); |
1048 | kqunlock(kqu: kq); |
1049 | |
1050 | return res; |
1051 | } |
1052 | |
1053 | static int |
1054 | filt_kqprocess(struct knote *kn, struct kevent_qos_s *kev) |
1055 | { |
1056 | struct kqueue *kq = (struct kqueue *)fp_get_data(fp: kn->kn_fp); |
1057 | int res = 0; |
1058 | |
1059 | kqlock(kqu: kq); |
1060 | if (kq->kq_count) { |
1061 | knote_fill_kevent(kn, kev, data: kq->kq_count); |
1062 | res = 1; |
1063 | } |
1064 | kqunlock(kqu: kq); |
1065 | |
1066 | return res; |
1067 | } |
1068 | |
1069 | SECURITY_READ_ONLY_EARLY(static struct filterops) kqread_filtops = { |
1070 | .f_isfd = 1, |
1071 | .f_detach = filt_kqdetach, |
1072 | .f_event = filt_kqueue, |
1073 | .f_touch = filt_kqtouch, |
1074 | .f_process = filt_kqprocess, |
1075 | }; |
1076 | |
1077 | #pragma mark proc_filtops |
1078 | |
1079 | static int |
1080 | filt_procattach(struct knote *kn, __unused struct kevent_qos_s *kev) |
1081 | { |
1082 | struct proc *p; |
1083 | |
1084 | assert(PID_MAX < NOTE_PDATAMASK); |
1085 | |
1086 | if ((kn->kn_sfflags & (NOTE_TRACK | NOTE_TRACKERR | NOTE_CHILD)) != 0) { |
1087 | knote_set_error(kn, ENOTSUP); |
1088 | return 0; |
1089 | } |
1090 | |
1091 | p = proc_find(pid: (int)kn->kn_id); |
1092 | if (p == NULL) { |
1093 | knote_set_error(kn, ESRCH); |
1094 | return 0; |
1095 | } |
1096 | |
1097 | const uint32_t NoteExitStatusBits = NOTE_EXIT | NOTE_EXITSTATUS; |
1098 | |
1099 | if ((kn->kn_sfflags & NoteExitStatusBits) == NoteExitStatusBits) { |
1100 | do { |
1101 | pid_t selfpid = proc_selfpid(); |
1102 | |
1103 | if (p->p_ppid == selfpid) { |
1104 | break; /* parent => ok */ |
1105 | } |
1106 | if ((p->p_lflag & P_LTRACED) != 0 && |
1107 | (p->p_oppid == selfpid)) { |
1108 | break; /* parent-in-waiting => ok */ |
1109 | } |
1110 | if (cansignal(current_proc(), kauth_cred_get(), p, SIGKILL)) { |
1111 | break; /* allowed to signal => ok */ |
1112 | } |
1113 | proc_rele(p); |
1114 | knote_set_error(kn, EACCES); |
1115 | return 0; |
1116 | } while (0); |
1117 | } |
1118 | |
1119 | kn->kn_proc = p; |
1120 | kn->kn_flags |= EV_CLEAR; /* automatically set */ |
1121 | kn->kn_sdata = 0; /* incoming data is ignored */ |
1122 | |
1123 | proc_klist_lock(); |
1124 | |
1125 | KNOTE_ATTACH(&p->p_klist, kn); |
1126 | |
1127 | proc_klist_unlock(); |
1128 | |
1129 | proc_rele(p); |
1130 | |
1131 | /* |
1132 | * only captures edge-triggered events after this point |
1133 | * so it can't already be fired. |
1134 | */ |
1135 | return 0; |
1136 | } |
1137 | |
1138 | |
1139 | /* |
1140 | * The knote may be attached to a different process, which may exit, |
1141 | * leaving nothing for the knote to be attached to. In that case, |
1142 | * the pointer to the process will have already been nulled out. |
1143 | */ |
1144 | static void |
1145 | filt_procdetach(struct knote *kn) |
1146 | { |
1147 | struct proc *p; |
1148 | |
1149 | proc_klist_lock(); |
1150 | |
1151 | p = kn->kn_proc; |
1152 | if (p != PROC_NULL) { |
1153 | kn->kn_proc = PROC_NULL; |
1154 | KNOTE_DETACH(&p->p_klist, kn); |
1155 | } |
1156 | |
1157 | proc_klist_unlock(); |
1158 | } |
1159 | |
1160 | static int |
1161 | filt_procevent(struct knote *kn, long hint) |
1162 | { |
1163 | u_int event; |
1164 | |
1165 | /* ALWAYS CALLED WITH proc_klist_lock */ |
1166 | |
1167 | /* |
1168 | * Note: a lot of bits in hint may be obtained from the knote |
1169 | * To free some of those bits, see <rdar://problem/12592988> Freeing up |
1170 | * bits in hint for filt_procevent |
1171 | * |
1172 | * mask off extra data |
1173 | */ |
1174 | event = (u_int)hint & NOTE_PCTRLMASK; |
1175 | |
1176 | /* |
1177 | * termination lifecycle events can happen while a debugger |
1178 | * has reparented a process, in which case notifications |
1179 | * should be quashed except to the tracing parent. When |
1180 | * the debugger reaps the child (either via wait4(2) or |
1181 | * process exit), the child will be reparented to the original |
1182 | * parent and these knotes re-fired. |
1183 | */ |
1184 | if (event & NOTE_EXIT) { |
1185 | if ((kn->kn_proc->p_oppid != 0) |
1186 | && (proc_getpid(knote_get_kq(kn)->kq_p) != kn->kn_proc->p_ppid)) { |
1187 | /* |
1188 | * This knote is not for the current ptrace(2) parent, ignore. |
1189 | */ |
1190 | return 0; |
1191 | } |
1192 | } |
1193 | |
1194 | /* |
1195 | * if the user is interested in this event, record it. |
1196 | */ |
1197 | if (kn->kn_sfflags & event) { |
1198 | kn->kn_fflags |= event; |
1199 | } |
1200 | |
1201 | #pragma clang diagnostic push |
1202 | #pragma clang diagnostic ignored "-Wdeprecated-declarations" |
1203 | if ((event == NOTE_REAP) || ((event == NOTE_EXIT) && !(kn->kn_sfflags & NOTE_REAP))) { |
1204 | kn->kn_flags |= (EV_EOF | EV_ONESHOT); |
1205 | } |
1206 | #pragma clang diagnostic pop |
1207 | |
1208 | |
1209 | /* |
1210 | * The kernel has a wrapper in place that returns the same data |
1211 | * as is collected here, in kn_hook32. Any changes to how |
1212 | * NOTE_EXITSTATUS and NOTE_EXIT_DETAIL are collected |
1213 | * should also be reflected in the proc_pidnoteexit() wrapper. |
1214 | */ |
1215 | if (event == NOTE_EXIT) { |
1216 | kn->kn_hook32 = 0; |
1217 | if ((kn->kn_sfflags & NOTE_EXITSTATUS) != 0) { |
1218 | kn->kn_fflags |= NOTE_EXITSTATUS; |
1219 | kn->kn_hook32 |= (hint & NOTE_PDATAMASK); |
1220 | } |
1221 | if ((kn->kn_sfflags & NOTE_EXIT_DETAIL) != 0) { |
1222 | kn->kn_fflags |= NOTE_EXIT_DETAIL; |
1223 | if ((kn->kn_proc->p_lflag & |
1224 | P_LTERM_DECRYPTFAIL) != 0) { |
1225 | kn->kn_hook32 |= NOTE_EXIT_DECRYPTFAIL; |
1226 | } |
1227 | if ((kn->kn_proc->p_lflag & |
1228 | P_LTERM_JETSAM) != 0) { |
1229 | kn->kn_hook32 |= NOTE_EXIT_MEMORY; |
1230 | switch (kn->kn_proc->p_lflag & P_JETSAM_MASK) { |
1231 | case P_JETSAM_VMPAGESHORTAGE: |
1232 | kn->kn_hook32 |= NOTE_EXIT_MEMORY_VMPAGESHORTAGE; |
1233 | break; |
1234 | case P_JETSAM_VMTHRASHING: |
1235 | kn->kn_hook32 |= NOTE_EXIT_MEMORY_VMTHRASHING; |
1236 | break; |
1237 | case P_JETSAM_FCTHRASHING: |
1238 | kn->kn_hook32 |= NOTE_EXIT_MEMORY_FCTHRASHING; |
1239 | break; |
1240 | case P_JETSAM_VNODE: |
1241 | kn->kn_hook32 |= NOTE_EXIT_MEMORY_VNODE; |
1242 | break; |
1243 | case P_JETSAM_HIWAT: |
1244 | kn->kn_hook32 |= NOTE_EXIT_MEMORY_HIWAT; |
1245 | break; |
1246 | case P_JETSAM_PID: |
1247 | kn->kn_hook32 |= NOTE_EXIT_MEMORY_PID; |
1248 | break; |
1249 | case P_JETSAM_IDLEEXIT: |
1250 | kn->kn_hook32 |= NOTE_EXIT_MEMORY_IDLE; |
1251 | break; |
1252 | } |
1253 | } |
1254 | if ((proc_getcsflags(kn->kn_proc) & |
1255 | CS_KILLED) != 0) { |
1256 | kn->kn_hook32 |= NOTE_EXIT_CSERROR; |
1257 | } |
1258 | } |
1259 | } |
1260 | |
1261 | /* if we have any matching state, activate the knote */ |
1262 | return kn->kn_fflags != 0; |
1263 | } |
1264 | |
1265 | static int |
1266 | filt_proctouch(struct knote *kn, struct kevent_qos_s *kev) |
1267 | { |
1268 | int res; |
1269 | |
1270 | proc_klist_lock(); |
1271 | |
1272 | /* accept new filter flags and mask off output events no long interesting */ |
1273 | kn->kn_sfflags = kev->fflags; |
1274 | |
1275 | /* restrict the current results to the (smaller?) set of new interest */ |
1276 | /* |
1277 | * For compatibility with previous implementations, we leave kn_fflags |
1278 | * as they were before. |
1279 | */ |
1280 | //kn->kn_fflags &= kn->kn_sfflags; |
1281 | |
1282 | res = (kn->kn_fflags != 0); |
1283 | |
1284 | proc_klist_unlock(); |
1285 | |
1286 | return res; |
1287 | } |
1288 | |
1289 | static int |
1290 | filt_procprocess(struct knote *kn, struct kevent_qos_s *kev) |
1291 | { |
1292 | int res = 0; |
1293 | |
1294 | proc_klist_lock(); |
1295 | if (kn->kn_fflags) { |
1296 | knote_fill_kevent(kn, kev, data: kn->kn_hook32); |
1297 | kn->kn_hook32 = 0; |
1298 | res = 1; |
1299 | } |
1300 | proc_klist_unlock(); |
1301 | return res; |
1302 | } |
1303 | |
1304 | SECURITY_READ_ONLY_EARLY(static struct filterops) proc_filtops = { |
1305 | .f_attach = filt_procattach, |
1306 | .f_detach = filt_procdetach, |
1307 | .f_event = filt_procevent, |
1308 | .f_touch = filt_proctouch, |
1309 | .f_process = filt_procprocess, |
1310 | }; |
1311 | |
1312 | #pragma mark timer_filtops |
1313 | |
1314 | struct filt_timer_params { |
1315 | uint64_t deadline; /* deadline in abs/cont time |
1316 | * (or 0 if NOTE_ABSOLUTE and deadline is in past) */ |
1317 | uint64_t leeway; /* leeway in abstime, or 0 if none */ |
1318 | uint64_t interval; /* interval in abstime or 0 if non-repeating timer */ |
1319 | }; |
1320 | |
1321 | /* |
1322 | * Values stored in the knote at rest (using Mach absolute time units) |
1323 | * |
1324 | * kn->kn_thcall where the thread_call object is stored |
1325 | * kn->kn_ext[0] next deadline or 0 if immediate expiration |
1326 | * kn->kn_ext[1] leeway value |
1327 | * kn->kn_sdata interval timer: the interval |
1328 | * absolute/deadline timer: 0 |
1329 | * kn->kn_hook32 timer state (with gencount) |
1330 | * |
1331 | * TIMER_IDLE: |
1332 | * The timer has either never been scheduled or been cancelled. |
1333 | * It is safe to schedule a new one in this state. |
1334 | * |
1335 | * TIMER_ARMED: |
1336 | * The timer has been scheduled |
1337 | * |
1338 | * TIMER_FIRED |
1339 | * The timer has fired and an event needs to be delivered. |
1340 | * When in this state, the callout may still be running. |
1341 | * |
1342 | * TIMER_IMMEDIATE |
1343 | * The timer has fired at registration time, and the callout was never |
1344 | * dispatched. |
1345 | */ |
1346 | #define TIMER_IDLE 0x0 |
1347 | #define TIMER_ARMED 0x1 |
1348 | #define TIMER_FIRED 0x2 |
1349 | #define TIMER_IMMEDIATE 0x3 |
1350 | #define TIMER_STATE_MASK 0x3 |
1351 | #define TIMER_GEN_INC 0x4 |
1352 | |
1353 | static void |
1354 | filt_timer_set_params(struct knote *kn, struct filt_timer_params *params) |
1355 | { |
1356 | kn->kn_ext[0] = params->deadline; |
1357 | kn->kn_ext[1] = params->leeway; |
1358 | kn->kn_sdata = params->interval; |
1359 | } |
1360 | |
1361 | /* |
1362 | * filt_timervalidate - process data from user |
1363 | * |
1364 | * Sets up the deadline, interval, and leeway from the provided user data |
1365 | * |
1366 | * Input: |
1367 | * kn_sdata timer deadline or interval time |
1368 | * kn_sfflags style of timer, unit of measurement |
1369 | * |
1370 | * Output: |
1371 | * struct filter_timer_params to apply to the filter with |
1372 | * filt_timer_set_params when changes are ready to be commited. |
1373 | * |
1374 | * Returns: |
1375 | * EINVAL Invalid user data parameters |
1376 | * ERANGE Various overflows with the parameters |
1377 | * |
1378 | * Called with timer filter lock held. |
1379 | */ |
1380 | static int |
1381 | filt_timervalidate(const struct kevent_qos_s *kev, |
1382 | struct filt_timer_params *params) |
1383 | { |
1384 | /* |
1385 | * There are 5 knobs that need to be chosen for a timer registration: |
1386 | * |
1387 | * A) Units of time (what is the time duration of the specified number) |
1388 | * Absolute and interval take: |
1389 | * NOTE_SECONDS, NOTE_USECONDS, NOTE_NSECONDS, NOTE_MACHTIME |
1390 | * Defaults to milliseconds if not specified |
1391 | * |
1392 | * B) Clock epoch (what is the zero point of the specified number) |
1393 | * For interval, there is none |
1394 | * For absolute, defaults to the gettimeofday/calendar epoch |
1395 | * With NOTE_MACHTIME, uses mach_absolute_time() |
1396 | * With NOTE_MACHTIME and NOTE_MACH_CONTINUOUS_TIME, uses mach_continuous_time() |
1397 | * |
1398 | * C) The knote's behavior on delivery |
1399 | * Interval timer causes the knote to arm for the next interval unless one-shot is set |
1400 | * Absolute is a forced one-shot timer which deletes on delivery |
1401 | * TODO: Add a way for absolute to be not forced one-shot |
1402 | * |
1403 | * D) Whether the time duration is relative to now or absolute |
1404 | * Interval fires at now + duration when it is set up |
1405 | * Absolute fires at now + difference between now walltime and passed in walltime |
1406 | * With NOTE_MACHTIME it fires at an absolute MAT or MCT. |
1407 | * |
1408 | * E) Whether the timer continues to tick across sleep |
1409 | * By default all three do not. |
1410 | * For interval and absolute, NOTE_MACH_CONTINUOUS_TIME causes them to tick across sleep |
1411 | * With NOTE_ABSOLUTE | NOTE_MACHTIME | NOTE_MACH_CONTINUOUS_TIME: |
1412 | * expires when mach_continuous_time() is > the passed in value. |
1413 | */ |
1414 | |
1415 | uint64_t multiplier; |
1416 | |
1417 | boolean_t use_abstime = FALSE; |
1418 | |
1419 | switch (kev->fflags & (NOTE_SECONDS | NOTE_USECONDS | NOTE_NSECONDS | NOTE_MACHTIME)) { |
1420 | case NOTE_SECONDS: |
1421 | multiplier = NSEC_PER_SEC; |
1422 | break; |
1423 | case NOTE_USECONDS: |
1424 | multiplier = NSEC_PER_USEC; |
1425 | break; |
1426 | case NOTE_NSECONDS: |
1427 | multiplier = 1; |
1428 | break; |
1429 | case NOTE_MACHTIME: |
1430 | multiplier = 0; |
1431 | use_abstime = TRUE; |
1432 | break; |
1433 | case 0: /* milliseconds (default) */ |
1434 | multiplier = NSEC_PER_SEC / 1000; |
1435 | break; |
1436 | default: |
1437 | return EINVAL; |
1438 | } |
1439 | |
1440 | /* transform the leeway in kn_ext[1] to same time scale */ |
1441 | if (kev->fflags & NOTE_LEEWAY) { |
1442 | uint64_t leeway_abs; |
1443 | |
1444 | if (use_abstime) { |
1445 | leeway_abs = (uint64_t)kev->ext[1]; |
1446 | } else { |
1447 | uint64_t leeway_ns; |
1448 | if (os_mul_overflow((uint64_t)kev->ext[1], multiplier, &leeway_ns)) { |
1449 | return ERANGE; |
1450 | } |
1451 | |
1452 | nanoseconds_to_absolutetime(nanoseconds: leeway_ns, result: &leeway_abs); |
1453 | } |
1454 | |
1455 | params->leeway = leeway_abs; |
1456 | } else { |
1457 | params->leeway = 0; |
1458 | } |
1459 | |
1460 | if (kev->fflags & NOTE_ABSOLUTE) { |
1461 | uint64_t deadline_abs; |
1462 | |
1463 | if (use_abstime) { |
1464 | deadline_abs = (uint64_t)kev->data; |
1465 | } else { |
1466 | uint64_t calendar_deadline_ns; |
1467 | |
1468 | if (os_mul_overflow((uint64_t)kev->data, multiplier, &calendar_deadline_ns)) { |
1469 | return ERANGE; |
1470 | } |
1471 | |
1472 | /* calendar_deadline_ns is in nanoseconds since the epoch */ |
1473 | |
1474 | clock_sec_t seconds; |
1475 | clock_nsec_t nanoseconds; |
1476 | |
1477 | /* |
1478 | * Note that the conversion through wall-time is only done once. |
1479 | * |
1480 | * If the relationship between MAT and gettimeofday changes, |
1481 | * the underlying timer does not update. |
1482 | * |
1483 | * TODO: build a wall-time denominated timer_call queue |
1484 | * and a flag to request DTRTing with wall-time timers |
1485 | */ |
1486 | clock_get_calendar_nanotime(secs: &seconds, nanosecs: &nanoseconds); |
1487 | |
1488 | uint64_t calendar_now_ns = (uint64_t)seconds * NSEC_PER_SEC + nanoseconds; |
1489 | |
1490 | /* if deadline is in the future */ |
1491 | if (calendar_now_ns < calendar_deadline_ns) { |
1492 | uint64_t interval_ns = calendar_deadline_ns - calendar_now_ns; |
1493 | uint64_t interval_abs; |
1494 | |
1495 | nanoseconds_to_absolutetime(nanoseconds: interval_ns, result: &interval_abs); |
1496 | |
1497 | /* |
1498 | * Note that the NOTE_MACH_CONTINUOUS_TIME flag here only |
1499 | * causes the timer to keep ticking across sleep, but |
1500 | * it does not change the calendar timebase. |
1501 | */ |
1502 | |
1503 | if (kev->fflags & NOTE_MACH_CONTINUOUS_TIME) { |
1504 | clock_continuoustime_interval_to_deadline(abstime: interval_abs, |
1505 | result: &deadline_abs); |
1506 | } else { |
1507 | clock_absolutetime_interval_to_deadline(abstime: interval_abs, |
1508 | result: &deadline_abs); |
1509 | } |
1510 | } else { |
1511 | deadline_abs = 0; /* cause immediate expiration */ |
1512 | } |
1513 | } |
1514 | |
1515 | params->deadline = deadline_abs; |
1516 | params->interval = 0; /* NOTE_ABSOLUTE is non-repeating */ |
1517 | } else if (kev->data < 0) { |
1518 | /* |
1519 | * Negative interval timers fire immediately, once. |
1520 | * |
1521 | * Ideally a negative interval would be an error, but certain clients |
1522 | * pass negative values on accident, and expect an event back. |
1523 | * |
1524 | * In the old implementation the timer would repeat with no delay |
1525 | * N times until mach_absolute_time() + (N * interval) underflowed, |
1526 | * then it would wait ~forever by accidentally arming a timer for the far future. |
1527 | * |
1528 | * We now skip the power-wasting hot spin phase and go straight to the idle phase. |
1529 | */ |
1530 | |
1531 | params->deadline = 0; /* expire immediately */ |
1532 | params->interval = 0; /* non-repeating */ |
1533 | } else { |
1534 | uint64_t interval_abs = 0; |
1535 | |
1536 | if (use_abstime) { |
1537 | interval_abs = (uint64_t)kev->data; |
1538 | } else { |
1539 | uint64_t interval_ns; |
1540 | if (os_mul_overflow((uint64_t)kev->data, multiplier, &interval_ns)) { |
1541 | return ERANGE; |
1542 | } |
1543 | |
1544 | nanoseconds_to_absolutetime(nanoseconds: interval_ns, result: &interval_abs); |
1545 | } |
1546 | |
1547 | uint64_t deadline = 0; |
1548 | |
1549 | if (kev->fflags & NOTE_MACH_CONTINUOUS_TIME) { |
1550 | clock_continuoustime_interval_to_deadline(abstime: interval_abs, result: &deadline); |
1551 | } else { |
1552 | clock_absolutetime_interval_to_deadline(abstime: interval_abs, result: &deadline); |
1553 | } |
1554 | |
1555 | params->deadline = deadline; |
1556 | params->interval = interval_abs; |
1557 | } |
1558 | |
1559 | return 0; |
1560 | } |
1561 | |
1562 | /* |
1563 | * filt_timerexpire - the timer callout routine |
1564 | */ |
1565 | static void |
1566 | filt_timerexpire(void *knx, void *state_on_arm) |
1567 | { |
1568 | struct knote *kn = knx; |
1569 | |
1570 | uint32_t state = (uint32_t)(uintptr_t)state_on_arm; |
1571 | uint32_t fired_state = state ^ TIMER_ARMED ^ TIMER_FIRED; |
1572 | |
1573 | if (os_atomic_cmpxchg(&kn->kn_hook32, state, fired_state, relaxed)) { |
1574 | // our f_event always would say FILTER_ACTIVE, |
1575 | // so be leaner and just do it. |
1576 | struct kqueue *kq = knote_get_kq(kn); |
1577 | kqlock(kqu: kq); |
1578 | knote_activate(kqu: kq, kn, FILTER_ACTIVE); |
1579 | kqunlock(kqu: kq); |
1580 | } else { |
1581 | /* |
1582 | * The timer has been reprogrammed or canceled since it was armed, |
1583 | * and this is a late firing for the timer, just ignore it. |
1584 | */ |
1585 | } |
1586 | } |
1587 | |
1588 | /* |
1589 | * Does this deadline needs a timer armed for it, or has it expired? |
1590 | */ |
1591 | static bool |
1592 | filt_timer_is_ready(struct knote *kn) |
1593 | { |
1594 | uint64_t now, deadline = kn->kn_ext[0]; |
1595 | |
1596 | if (deadline == 0) { |
1597 | return true; |
1598 | } |
1599 | |
1600 | if (kn->kn_sfflags & NOTE_MACH_CONTINUOUS_TIME) { |
1601 | now = mach_continuous_time(); |
1602 | } else { |
1603 | now = mach_absolute_time(); |
1604 | } |
1605 | return deadline <= now; |
1606 | } |
1607 | |
1608 | /* |
1609 | * Arm a timer |
1610 | * |
1611 | * It is the responsibility of the caller to make sure the timer call |
1612 | * has completed or been cancelled properly prior to arming it. |
1613 | */ |
1614 | static void |
1615 | filt_timerarm(struct knote *kn) |
1616 | { |
1617 | uint64_t deadline = kn->kn_ext[0]; |
1618 | uint64_t leeway = kn->kn_ext[1]; |
1619 | uint32_t state; |
1620 | |
1621 | int filter_flags = kn->kn_sfflags; |
1622 | unsigned int timer_flags = 0; |
1623 | |
1624 | if (filter_flags & NOTE_CRITICAL) { |
1625 | timer_flags |= THREAD_CALL_DELAY_USER_CRITICAL; |
1626 | } else if (filter_flags & NOTE_BACKGROUND) { |
1627 | timer_flags |= THREAD_CALL_DELAY_USER_BACKGROUND; |
1628 | } else { |
1629 | timer_flags |= THREAD_CALL_DELAY_USER_NORMAL; |
1630 | } |
1631 | |
1632 | if (filter_flags & NOTE_LEEWAY) { |
1633 | timer_flags |= THREAD_CALL_DELAY_LEEWAY; |
1634 | } |
1635 | |
1636 | if (filter_flags & NOTE_MACH_CONTINUOUS_TIME) { |
1637 | timer_flags |= THREAD_CALL_CONTINUOUS; |
1638 | } |
1639 | |
1640 | /* |
1641 | * Move to ARMED. |
1642 | * |
1643 | * We increase the gencount, and setup the thread call with this expected |
1644 | * state. It means that if there was a previous generation of the timer in |
1645 | * flight that needs to be ignored, then 3 things are possible: |
1646 | * |
1647 | * - the timer fires first, filt_timerexpire() and sets the state to FIRED |
1648 | * but we clobber it with ARMED and a new gencount. The knote will still |
1649 | * be activated, but filt_timerprocess() which is serialized with this |
1650 | * call will not see the FIRED bit set and will not deliver an event. |
1651 | * |
1652 | * - this code runs first, but filt_timerexpire() comes second. Because it |
1653 | * knows an old gencount, it will debounce and not activate the knote. |
1654 | * |
1655 | * - filt_timerexpire() wasn't in flight yet, and thread_call_enter below |
1656 | * will just cancel it properly. |
1657 | * |
1658 | * This is important as userspace expects to never be woken up for past |
1659 | * timers after filt_timertouch ran. |
1660 | */ |
1661 | state = os_atomic_load(&kn->kn_hook32, relaxed); |
1662 | state &= ~TIMER_STATE_MASK; |
1663 | state += TIMER_GEN_INC + TIMER_ARMED; |
1664 | os_atomic_store(&kn->kn_hook32, state, relaxed); |
1665 | |
1666 | thread_call_enter_delayed_with_leeway(call: kn->kn_thcall, |
1667 | param1: (void *)(uintptr_t)state, deadline, leeway, flags: timer_flags); |
1668 | } |
1669 | |
1670 | /* |
1671 | * Mark a timer as "already fired" when it is being reprogrammed |
1672 | * |
1673 | * If there is a timer in flight, this will do a best effort at canceling it, |
1674 | * but will not wait. If the thread call was in flight, having set the |
1675 | * TIMER_IMMEDIATE bit will debounce a filt_timerexpire() racing with this |
1676 | * cancelation. |
1677 | */ |
1678 | static void |
1679 | filt_timerfire_immediate(struct knote *kn) |
1680 | { |
1681 | uint32_t state; |
1682 | |
1683 | static_assert(TIMER_IMMEDIATE == TIMER_STATE_MASK, |
1684 | "validate that this atomic or will transition to IMMEDIATE" ); |
1685 | state = os_atomic_or_orig(&kn->kn_hook32, TIMER_IMMEDIATE, relaxed); |
1686 | |
1687 | if ((state & TIMER_STATE_MASK) == TIMER_ARMED) { |
1688 | thread_call_cancel(call: kn->kn_thcall); |
1689 | } |
1690 | } |
1691 | |
1692 | /* |
1693 | * Allocate a thread call for the knote's lifetime, and kick off the timer. |
1694 | */ |
1695 | static int |
1696 | filt_timerattach(struct knote *kn, struct kevent_qos_s *kev) |
1697 | { |
1698 | thread_call_t callout; |
1699 | struct filt_timer_params params; |
1700 | int error; |
1701 | |
1702 | if ((error = filt_timervalidate(kev, params: ¶ms)) != 0) { |
1703 | knote_set_error(kn, error); |
1704 | return 0; |
1705 | } |
1706 | |
1707 | callout = thread_call_allocate_with_options(func: filt_timerexpire, |
1708 | param0: (thread_call_param_t)kn, pri: THREAD_CALL_PRIORITY_HIGH, |
1709 | options: THREAD_CALL_OPTIONS_ONCE); |
1710 | |
1711 | if (NULL == callout) { |
1712 | knote_set_error(kn, ENOMEM); |
1713 | return 0; |
1714 | } |
1715 | |
1716 | filt_timer_set_params(kn, params: ¶ms); |
1717 | kn->kn_thcall = callout; |
1718 | kn->kn_flags |= EV_CLEAR; |
1719 | os_atomic_store(&kn->kn_hook32, TIMER_IDLE, relaxed); |
1720 | |
1721 | /* NOTE_ABSOLUTE implies EV_ONESHOT */ |
1722 | if (kn->kn_sfflags & NOTE_ABSOLUTE) { |
1723 | kn->kn_flags |= EV_ONESHOT; |
1724 | } |
1725 | |
1726 | if (filt_timer_is_ready(kn)) { |
1727 | os_atomic_store(&kn->kn_hook32, TIMER_IMMEDIATE, relaxed); |
1728 | return FILTER_ACTIVE; |
1729 | } else { |
1730 | filt_timerarm(kn); |
1731 | return 0; |
1732 | } |
1733 | } |
1734 | |
1735 | /* |
1736 | * Shut down the timer if it's running, and free the callout. |
1737 | */ |
1738 | static void |
1739 | filt_timerdetach(struct knote *kn) |
1740 | { |
1741 | __assert_only boolean_t freed; |
1742 | |
1743 | /* |
1744 | * Unconditionally cancel to make sure there can't be any filt_timerexpire() |
1745 | * running anymore. |
1746 | */ |
1747 | thread_call_cancel_wait(call: kn->kn_thcall); |
1748 | freed = thread_call_free(call: kn->kn_thcall); |
1749 | assert(freed); |
1750 | } |
1751 | |
1752 | /* |
1753 | * filt_timertouch - update timer knote with new user input |
1754 | * |
1755 | * Cancel and restart the timer based on new user data. When |
1756 | * the user picks up a knote, clear the count of how many timer |
1757 | * pops have gone off (in kn_data). |
1758 | */ |
1759 | static int |
1760 | filt_timertouch(struct knote *kn, struct kevent_qos_s *kev) |
1761 | { |
1762 | struct filt_timer_params params; |
1763 | uint32_t changed_flags = (kn->kn_sfflags ^ kev->fflags); |
1764 | int error; |
1765 | |
1766 | if (kev->qos && (knote_get_kq(kn)->kq_state & KQ_WORKLOOP) && |
1767 | !_pthread_priority_thread_qos(pp: kev->qos)) { |
1768 | /* validate usage of FILTER_UPDATE_REQ_QOS */ |
1769 | kev->flags |= EV_ERROR; |
1770 | kev->data = ERANGE; |
1771 | return 0; |
1772 | } |
1773 | |
1774 | if (changed_flags & NOTE_ABSOLUTE) { |
1775 | kev->flags |= EV_ERROR; |
1776 | kev->data = EINVAL; |
1777 | return 0; |
1778 | } |
1779 | |
1780 | if ((error = filt_timervalidate(kev, params: ¶ms)) != 0) { |
1781 | kev->flags |= EV_ERROR; |
1782 | kev->data = error; |
1783 | return 0; |
1784 | } |
1785 | |
1786 | /* capture the new values used to compute deadline */ |
1787 | filt_timer_set_params(kn, params: ¶ms); |
1788 | kn->kn_sfflags = kev->fflags; |
1789 | |
1790 | if (filt_timer_is_ready(kn)) { |
1791 | filt_timerfire_immediate(kn); |
1792 | return FILTER_ACTIVE | FILTER_UPDATE_REQ_QOS; |
1793 | } else { |
1794 | filt_timerarm(kn); |
1795 | return FILTER_UPDATE_REQ_QOS; |
1796 | } |
1797 | } |
1798 | |
1799 | /* |
1800 | * filt_timerprocess - query state of knote and snapshot event data |
1801 | * |
1802 | * Determine if the timer has fired in the past, snapshot the state |
1803 | * of the kevent for returning to user-space, and clear pending event |
1804 | * counters for the next time. |
1805 | */ |
1806 | static int |
1807 | filt_timerprocess(struct knote *kn, struct kevent_qos_s *kev) |
1808 | { |
1809 | uint32_t state = os_atomic_load(&kn->kn_hook32, relaxed); |
1810 | |
1811 | /* |
1812 | * filt_timerprocess is serialized with any filter routine except for |
1813 | * filt_timerexpire which atomically does a TIMER_ARMED -> TIMER_FIRED |
1814 | * transition, and on success, activates the knote. |
1815 | * |
1816 | * Hence, we don't need atomic modifications of the state, only to peek at |
1817 | * whether we see any of the "FIRED" state, and if we do, it is safe to |
1818 | * do simple state machine transitions. |
1819 | */ |
1820 | switch (state & TIMER_STATE_MASK) { |
1821 | case TIMER_IDLE: |
1822 | case TIMER_ARMED: |
1823 | /* |
1824 | * This can happen if a touch resets a timer that had fired |
1825 | * without being processed |
1826 | */ |
1827 | return 0; |
1828 | } |
1829 | |
1830 | os_atomic_store(&kn->kn_hook32, state & ~TIMER_STATE_MASK, relaxed); |
1831 | |
1832 | /* |
1833 | * Copy out the interesting kevent state, |
1834 | * but don't leak out the raw time calculations. |
1835 | * |
1836 | * TODO: potential enhancements - tell the user about: |
1837 | * - deadline to which this timer thought it was expiring |
1838 | * - return kn_sfflags in the fflags field so the client can know |
1839 | * under what flags the timer fired |
1840 | */ |
1841 | knote_fill_kevent(kn, kev, data: 1); |
1842 | kev->ext[0] = 0; |
1843 | /* kev->ext[1] = 0; JMM - shouldn't we hide this too? */ |
1844 | |
1845 | if (kn->kn_sdata != 0) { |
1846 | /* |
1847 | * This is a 'repeating' timer, so we have to emit |
1848 | * how many intervals expired between the arm |
1849 | * and the process. |
1850 | * |
1851 | * A very strange style of interface, because |
1852 | * this could easily be done in the client... |
1853 | */ |
1854 | |
1855 | uint64_t now; |
1856 | |
1857 | if (kn->kn_sfflags & NOTE_MACH_CONTINUOUS_TIME) { |
1858 | now = mach_continuous_time(); |
1859 | } else { |
1860 | now = mach_absolute_time(); |
1861 | } |
1862 | |
1863 | uint64_t first_deadline = kn->kn_ext[0]; |
1864 | uint64_t interval_abs = kn->kn_sdata; |
1865 | uint64_t orig_arm_time = first_deadline - interval_abs; |
1866 | |
1867 | assert(now > orig_arm_time); |
1868 | assert(now > first_deadline); |
1869 | |
1870 | uint64_t elapsed = now - orig_arm_time; |
1871 | |
1872 | uint64_t num_fired = elapsed / interval_abs; |
1873 | |
1874 | /* |
1875 | * To reach this code, we must have seen the timer pop |
1876 | * and be in repeating mode, so therefore it must have been |
1877 | * more than 'interval' time since the attach or last |
1878 | * successful touch. |
1879 | */ |
1880 | assert(num_fired > 0); |
1881 | |
1882 | /* report how many intervals have elapsed to the user */ |
1883 | kev->data = (int64_t)num_fired; |
1884 | |
1885 | /* We only need to re-arm the timer if it's not about to be destroyed */ |
1886 | if ((kn->kn_flags & EV_ONESHOT) == 0) { |
1887 | /* fire at the end of the next interval */ |
1888 | uint64_t new_deadline = first_deadline + num_fired * interval_abs; |
1889 | |
1890 | assert(new_deadline > now); |
1891 | |
1892 | kn->kn_ext[0] = new_deadline; |
1893 | |
1894 | /* |
1895 | * This can't shortcut setting up the thread call, because |
1896 | * knote_process deactivates EV_CLEAR knotes unconditionnally. |
1897 | */ |
1898 | filt_timerarm(kn); |
1899 | } |
1900 | } |
1901 | |
1902 | return FILTER_ACTIVE; |
1903 | } |
1904 | |
1905 | SECURITY_READ_ONLY_EARLY(static struct filterops) timer_filtops = { |
1906 | .f_extended_codes = true, |
1907 | .f_attach = filt_timerattach, |
1908 | .f_detach = filt_timerdetach, |
1909 | .f_event = filt_bad_event, |
1910 | .f_touch = filt_timertouch, |
1911 | .f_process = filt_timerprocess, |
1912 | }; |
1913 | |
1914 | #pragma mark user_filtops |
1915 | |
1916 | static int |
1917 | filt_userattach(struct knote *kn, __unused struct kevent_qos_s *kev) |
1918 | { |
1919 | if (kn->kn_sfflags & NOTE_TRIGGER) { |
1920 | kn->kn_hook32 = FILTER_ACTIVE; |
1921 | } else { |
1922 | kn->kn_hook32 = 0; |
1923 | } |
1924 | return kn->kn_hook32; |
1925 | } |
1926 | |
1927 | static int |
1928 | filt_usertouch(struct knote *kn, struct kevent_qos_s *kev) |
1929 | { |
1930 | uint32_t ffctrl; |
1931 | int fflags; |
1932 | |
1933 | ffctrl = kev->fflags & NOTE_FFCTRLMASK; |
1934 | fflags = kev->fflags & NOTE_FFLAGSMASK; |
1935 | switch (ffctrl) { |
1936 | case NOTE_FFNOP: |
1937 | break; |
1938 | case NOTE_FFAND: |
1939 | kn->kn_sfflags &= fflags; |
1940 | break; |
1941 | case NOTE_FFOR: |
1942 | kn->kn_sfflags |= fflags; |
1943 | break; |
1944 | case NOTE_FFCOPY: |
1945 | kn->kn_sfflags = fflags; |
1946 | break; |
1947 | } |
1948 | kn->kn_sdata = kev->data; |
1949 | |
1950 | if (kev->fflags & NOTE_TRIGGER) { |
1951 | kn->kn_hook32 = FILTER_ACTIVE; |
1952 | } |
1953 | return (int)kn->kn_hook32; |
1954 | } |
1955 | |
1956 | static int |
1957 | filt_userprocess(struct knote *kn, struct kevent_qos_s *kev) |
1958 | { |
1959 | int result = (int)kn->kn_hook32; |
1960 | |
1961 | if (result) { |
1962 | /* EVFILT_USER returns the data that was passed in */ |
1963 | knote_fill_kevent_with_sdata(kn, kev); |
1964 | kev->fflags = kn->kn_sfflags; |
1965 | if (kn->kn_flags & EV_CLEAR) { |
1966 | /* knote_fill_kevent cleared kn_fflags */ |
1967 | kn->kn_hook32 = 0; |
1968 | } |
1969 | } |
1970 | |
1971 | return result; |
1972 | } |
1973 | |
1974 | SECURITY_READ_ONLY_EARLY(static struct filterops) user_filtops = { |
1975 | .f_extended_codes = true, |
1976 | .f_attach = filt_userattach, |
1977 | .f_detach = filt_no_detach, |
1978 | .f_event = filt_bad_event, |
1979 | .f_touch = filt_usertouch, |
1980 | .f_process = filt_userprocess, |
1981 | }; |
1982 | |
1983 | #pragma mark workloop_filtops |
1984 | |
1985 | #define EPREEMPTDISABLED (-1) |
1986 | |
1987 | static inline void |
1988 | filt_wllock(struct kqworkloop *kqwl) |
1989 | { |
1990 | lck_spin_lock(lck: &kqwl->kqwl_statelock); |
1991 | } |
1992 | |
1993 | static inline void |
1994 | filt_wlunlock(struct kqworkloop *kqwl) |
1995 | { |
1996 | lck_spin_unlock(lck: &kqwl->kqwl_statelock); |
1997 | } |
1998 | |
1999 | /* |
2000 | * Returns true when the interlock for the turnstile is the workqueue lock |
2001 | * |
2002 | * When this is the case, all turnstiles operations are delegated |
2003 | * to the workqueue subsystem. |
2004 | * |
2005 | * This is required because kqueue_threadreq_bind_prepost only holds the |
2006 | * workqueue lock but needs to move the inheritor from the workloop turnstile |
2007 | * away from the creator thread, so that this now fulfilled request cannot be |
2008 | * picked anymore by other threads. |
2009 | */ |
2010 | static inline bool |
2011 | filt_wlturnstile_interlock_is_workq(struct kqworkloop *kqwl) |
2012 | { |
2013 | return kqr_thread_requested_pending(kqr: &kqwl->kqwl_request); |
2014 | } |
2015 | |
2016 | static void |
2017 | filt_wlupdate_inheritor(struct kqworkloop *kqwl, struct turnstile *ts, |
2018 | turnstile_update_flags_t flags) |
2019 | { |
2020 | turnstile_inheritor_t inheritor = TURNSTILE_INHERITOR_NULL; |
2021 | workq_threadreq_t kqr = &kqwl->kqwl_request; |
2022 | |
2023 | /* |
2024 | * binding to the workq should always happen through |
2025 | * workq_kern_threadreq_update_inheritor() |
2026 | */ |
2027 | assert(!filt_wlturnstile_interlock_is_workq(kqwl)); |
2028 | |
2029 | if ((inheritor = kqwl->kqwl_owner)) { |
2030 | flags |= TURNSTILE_INHERITOR_THREAD; |
2031 | } else if ((inheritor = kqr_thread(kqr))) { |
2032 | flags |= TURNSTILE_INHERITOR_THREAD; |
2033 | } |
2034 | |
2035 | turnstile_update_inheritor(turnstile: ts, new_inheritor: inheritor, flags); |
2036 | } |
2037 | |
2038 | #define EVFILT_WORKLOOP_EFAULT_RETRY_COUNT 100 |
2039 | #define FILT_WLATTACH 0 |
2040 | #define FILT_WLTOUCH 1 |
2041 | #define FILT_WLDROP 2 |
2042 | |
2043 | __result_use_check |
2044 | static int |
2045 | filt_wlupdate(struct kqworkloop *kqwl, struct knote *kn, |
2046 | struct kevent_qos_s *kev, kq_index_t qos_index, int op) |
2047 | { |
2048 | user_addr_t uaddr = CAST_USER_ADDR_T(kev->ext[EV_EXTIDX_WL_ADDR]); |
2049 | workq_threadreq_t kqr = &kqwl->kqwl_request; |
2050 | thread_t cur_owner, new_owner, = THREAD_NULL; |
2051 | kq_index_t cur_override = THREAD_QOS_UNSPECIFIED; |
2052 | int efault_retry = EVFILT_WORKLOOP_EFAULT_RETRY_COUNT; |
2053 | int action = KQWL_UTQ_NONE, error = 0; |
2054 | bool wl_inheritor_updated = false, needs_wake = false; |
2055 | uint64_t kdata = kev->ext[EV_EXTIDX_WL_VALUE]; |
2056 | uint64_t mask = kev->ext[EV_EXTIDX_WL_MASK]; |
2057 | uint64_t udata = 0; |
2058 | struct turnstile *ts = TURNSTILE_NULL; |
2059 | |
2060 | filt_wllock(kqwl); |
2061 | |
2062 | again: |
2063 | new_owner = cur_owner = kqwl->kqwl_owner; |
2064 | |
2065 | /* |
2066 | * Phase 1: |
2067 | * |
2068 | * If asked, load the uint64 value at the user provided address and compare |
2069 | * it against the passed in mask and expected value. |
2070 | * |
2071 | * If NOTE_WL_DISCOVER_OWNER is specified, translate the loaded name as |
2072 | * a thread reference. |
2073 | * |
2074 | * If NOTE_WL_END_OWNERSHIP is specified and the currently known owner is |
2075 | * the current thread, then end ownership. |
2076 | * |
2077 | * Lastly decide whether we need to perform a QoS update. |
2078 | */ |
2079 | if (uaddr) { |
2080 | /* |
2081 | * Until <rdar://problem/24999882> exists, |
2082 | * disabling preemption copyin forces any |
2083 | * vm_fault we encounter to fail. |
2084 | */ |
2085 | error = copyin_atomic64(user_addr: uaddr, u64: &udata); |
2086 | |
2087 | /* |
2088 | * If we get EFAULT, drop locks, and retry. |
2089 | * If we still get an error report it, |
2090 | * else assume the memory has been faulted |
2091 | * and attempt to copyin under lock again. |
2092 | */ |
2093 | switch (error) { |
2094 | case 0: |
2095 | break; |
2096 | case EFAULT: |
2097 | if (efault_retry-- > 0) { |
2098 | filt_wlunlock(kqwl); |
2099 | error = copyin_atomic64(user_addr: uaddr, u64: &udata); |
2100 | filt_wllock(kqwl); |
2101 | if (error == 0) { |
2102 | goto again; |
2103 | } |
2104 | } |
2105 | OS_FALLTHROUGH; |
2106 | default: |
2107 | goto out; |
2108 | } |
2109 | |
2110 | /* Update state as copied in. */ |
2111 | kev->ext[EV_EXTIDX_WL_VALUE] = udata; |
2112 | |
2113 | if ((udata & mask) != (kdata & mask)) { |
2114 | error = ESTALE; |
2115 | } else if (kev->fflags & NOTE_WL_DISCOVER_OWNER) { |
2116 | /* |
2117 | * Decipher the owner port name, and translate accordingly. |
2118 | * The low 2 bits were borrowed for other flags, so mask them off. |
2119 | * |
2120 | * Then attempt translation to a thread reference or fail. |
2121 | */ |
2122 | mach_port_name_t name = (mach_port_name_t)udata & ~0x3; |
2123 | if (name != MACH_PORT_NULL) { |
2124 | name = ipc_entry_name_mask(name); |
2125 | extra_thread_ref = port_name_to_thread(port_name: name, |
2126 | options: PORT_INTRANS_THREAD_IN_CURRENT_TASK); |
2127 | if (extra_thread_ref == THREAD_NULL) { |
2128 | error = EOWNERDEAD; |
2129 | goto out; |
2130 | } |
2131 | new_owner = extra_thread_ref; |
2132 | } |
2133 | } |
2134 | } |
2135 | |
2136 | if ((kev->fflags & NOTE_WL_END_OWNERSHIP) && new_owner == current_thread()) { |
2137 | new_owner = THREAD_NULL; |
2138 | } |
2139 | |
2140 | if (error == 0) { |
2141 | if ((kev->fflags & NOTE_WL_THREAD_REQUEST) && (kev->flags & EV_DELETE)) { |
2142 | action = KQWL_UTQ_SET_QOS_INDEX; |
2143 | } else if (qos_index && kqr->tr_kq_qos_index != qos_index) { |
2144 | action = KQWL_UTQ_SET_QOS_INDEX; |
2145 | } |
2146 | |
2147 | if (op == FILT_WLTOUCH) { |
2148 | /* |
2149 | * Save off any additional fflags/data we just accepted |
2150 | * But only keep the last round of "update" bits we acted on which helps |
2151 | * debugging a lot. |
2152 | */ |
2153 | kn->kn_sfflags &= ~NOTE_WL_UPDATES_MASK; |
2154 | kn->kn_sfflags |= kev->fflags; |
2155 | if (kev->fflags & NOTE_WL_SYNC_WAKE) { |
2156 | needs_wake = (kn->kn_thread != THREAD_NULL); |
2157 | } |
2158 | } else if (op == FILT_WLDROP) { |
2159 | if ((kn->kn_sfflags & (NOTE_WL_SYNC_WAIT | NOTE_WL_SYNC_WAKE)) == |
2160 | NOTE_WL_SYNC_WAIT) { |
2161 | /* |
2162 | * When deleting a SYNC_WAIT knote that hasn't been woken up |
2163 | * explicitly, issue a wake up. |
2164 | */ |
2165 | kn->kn_sfflags |= NOTE_WL_SYNC_WAKE; |
2166 | needs_wake = (kn->kn_thread != THREAD_NULL); |
2167 | } |
2168 | } |
2169 | } |
2170 | |
2171 | /* |
2172 | * Phase 2: |
2173 | * |
2174 | * Commit ownership and QoS changes if any, possibly wake up waiters |
2175 | */ |
2176 | |
2177 | if (cur_owner == new_owner && action == KQWL_UTQ_NONE && !needs_wake) { |
2178 | goto out; |
2179 | } |
2180 | |
2181 | kqlock(kqu: kqwl); |
2182 | |
2183 | /* If already tracked as servicer, don't track as owner */ |
2184 | if (new_owner == kqr_thread(kqr)) { |
2185 | new_owner = THREAD_NULL; |
2186 | } |
2187 | |
2188 | if (cur_owner != new_owner) { |
2189 | kqwl->kqwl_owner = new_owner; |
2190 | if (new_owner == extra_thread_ref) { |
2191 | /* we just transfered this ref to kqwl_owner */ |
2192 | extra_thread_ref = THREAD_NULL; |
2193 | } |
2194 | cur_override = kqworkloop_override(kqwl); |
2195 | |
2196 | if (new_owner) { |
2197 | /* override it before we drop the old */ |
2198 | if (cur_override != THREAD_QOS_UNSPECIFIED) { |
2199 | thread_add_kevent_override(thread: new_owner, qos_override: cur_override); |
2200 | } |
2201 | if (kqr_thread_requested_pending(kqr)) { |
2202 | if (action == KQWL_UTQ_NONE) { |
2203 | action = KQWL_UTQ_REDRIVE_EVENTS; |
2204 | } |
2205 | } |
2206 | } else if (action == KQWL_UTQ_NONE && |
2207 | !kqr_thread_requested(kqr) && |
2208 | kqwl->kqwl_wakeup_qos) { |
2209 | action = KQWL_UTQ_REDRIVE_EVENTS; |
2210 | } |
2211 | } |
2212 | |
2213 | if (action != KQWL_UTQ_NONE) { |
2214 | kqworkloop_update_threads_qos(kqwl, op: action, qos: qos_index); |
2215 | } |
2216 | |
2217 | ts = kqwl->kqwl_turnstile; |
2218 | if (cur_owner != new_owner && ts) { |
2219 | if (action == KQWL_UTQ_REDRIVE_EVENTS) { |
2220 | /* |
2221 | * Note that when action is KQWL_UTQ_REDRIVE_EVENTS, |
2222 | * the code went through workq_kern_threadreq_initiate() |
2223 | * and the workqueue has set the inheritor already |
2224 | */ |
2225 | assert(filt_wlturnstile_interlock_is_workq(kqwl)); |
2226 | } else if (filt_wlturnstile_interlock_is_workq(kqwl)) { |
2227 | workq_kern_threadreq_lock(p: kqwl->kqwl_p); |
2228 | workq_kern_threadreq_update_inheritor(p: kqwl->kqwl_p, kqr, owner: new_owner, |
2229 | ts, flags: TURNSTILE_IMMEDIATE_UPDATE); |
2230 | workq_kern_threadreq_unlock(p: kqwl->kqwl_p); |
2231 | if (!filt_wlturnstile_interlock_is_workq(kqwl)) { |
2232 | /* |
2233 | * If the workq is no longer the interlock, then |
2234 | * workq_kern_threadreq_update_inheritor() has finished a bind |
2235 | * and we need to fallback to the regular path. |
2236 | */ |
2237 | filt_wlupdate_inheritor(kqwl, ts, flags: TURNSTILE_IMMEDIATE_UPDATE); |
2238 | } |
2239 | wl_inheritor_updated = true; |
2240 | } else { |
2241 | filt_wlupdate_inheritor(kqwl, ts, flags: TURNSTILE_IMMEDIATE_UPDATE); |
2242 | wl_inheritor_updated = true; |
2243 | } |
2244 | |
2245 | /* |
2246 | * We need a turnstile reference because we are dropping the interlock |
2247 | * and the caller has not called turnstile_prepare. |
2248 | */ |
2249 | if (wl_inheritor_updated) { |
2250 | turnstile_reference(turnstile: ts); |
2251 | } |
2252 | } |
2253 | |
2254 | if (needs_wake && ts) { |
2255 | waitq_wakeup64_thread(waitq: &ts->ts_waitq, wake_event: knote_filt_wev64(kn), |
2256 | thread: kn->kn_thread, THREAD_AWAKENED); |
2257 | if (op == FILT_WLATTACH || op == FILT_WLTOUCH) { |
2258 | disable_preemption(); |
2259 | error = EPREEMPTDISABLED; |
2260 | } |
2261 | } |
2262 | |
2263 | kqunlock(kqu: kqwl); |
2264 | |
2265 | out: |
2266 | /* |
2267 | * Phase 3: |
2268 | * |
2269 | * Unlock and cleanup various lingering references and things. |
2270 | */ |
2271 | filt_wlunlock(kqwl); |
2272 | |
2273 | #if CONFIG_WORKLOOP_DEBUG |
2274 | KQWL_HISTORY_WRITE_ENTRY(kqwl, { |
2275 | .updater = current_thread(), |
2276 | .servicer = kqr_thread(kqr), /* Note: racy */ |
2277 | .old_owner = cur_owner, |
2278 | .new_owner = new_owner, |
2279 | |
2280 | .kev_ident = kev->ident, |
2281 | .error = (int16_t)error, |
2282 | .kev_flags = kev->flags, |
2283 | .kev_fflags = kev->fflags, |
2284 | |
2285 | .kev_mask = mask, |
2286 | .kev_value = kdata, |
2287 | .in_value = udata, |
2288 | }); |
2289 | #endif // CONFIG_WORKLOOP_DEBUG |
2290 | |
2291 | if (wl_inheritor_updated) { |
2292 | turnstile_update_inheritor_complete(turnstile: ts, flags: TURNSTILE_INTERLOCK_NOT_HELD); |
2293 | turnstile_deallocate_safe(turnstile: ts); |
2294 | } |
2295 | |
2296 | if (cur_owner && new_owner != cur_owner) { |
2297 | if (cur_override != THREAD_QOS_UNSPECIFIED) { |
2298 | thread_drop_kevent_override(thread: cur_owner); |
2299 | } |
2300 | thread_deallocate_safe(thread: cur_owner); |
2301 | } |
2302 | if (extra_thread_ref) { |
2303 | thread_deallocate_safe(thread: extra_thread_ref); |
2304 | } |
2305 | return error; |
2306 | } |
2307 | |
2308 | /* |
2309 | * Remembers the last updated that came in from userspace for debugging reasons. |
2310 | * - fflags is mirrored from the userspace kevent |
2311 | * - ext[i, i != VALUE] is mirrored from the userspace kevent |
2312 | * - ext[VALUE] is set to what the kernel loaded atomically |
2313 | * - data is set to the error if any |
2314 | */ |
2315 | static inline void |
2316 | filt_wlremember_last_update(struct knote *kn, struct kevent_qos_s *kev, |
2317 | int error) |
2318 | { |
2319 | kn->kn_fflags = kev->fflags; |
2320 | kn->kn_sdata = error; |
2321 | memcpy(dst: kn->kn_ext, src: kev->ext, n: sizeof(kev->ext)); |
2322 | } |
2323 | |
2324 | static int |
2325 | filt_wlupdate_sync_ipc(struct kqworkloop *kqwl, struct knote *kn, |
2326 | struct kevent_qos_s *kev, int op) |
2327 | { |
2328 | user_addr_t uaddr = (user_addr_t) kev->ext[EV_EXTIDX_WL_ADDR]; |
2329 | uint64_t kdata = kev->ext[EV_EXTIDX_WL_VALUE]; |
2330 | uint64_t mask = kev->ext[EV_EXTIDX_WL_MASK]; |
2331 | uint64_t udata = 0; |
2332 | int efault_retry = EVFILT_WORKLOOP_EFAULT_RETRY_COUNT; |
2333 | int error = 0; |
2334 | |
2335 | if (op == FILT_WLATTACH) { |
2336 | (void)kqueue_alloc_turnstile(&kqwl->kqwl_kqueue); |
2337 | } else if (uaddr == 0) { |
2338 | return 0; |
2339 | } |
2340 | |
2341 | filt_wllock(kqwl); |
2342 | |
2343 | again: |
2344 | |
2345 | /* |
2346 | * Do the debounce thing, the lock serializing the state is the knote lock. |
2347 | */ |
2348 | if (uaddr) { |
2349 | /* |
2350 | * Until <rdar://problem/24999882> exists, |
2351 | * disabling preemption copyin forces any |
2352 | * vm_fault we encounter to fail. |
2353 | */ |
2354 | error = copyin_atomic64(user_addr: uaddr, u64: &udata); |
2355 | |
2356 | /* |
2357 | * If we get EFAULT, drop locks, and retry. |
2358 | * If we still get an error report it, |
2359 | * else assume the memory has been faulted |
2360 | * and attempt to copyin under lock again. |
2361 | */ |
2362 | switch (error) { |
2363 | case 0: |
2364 | break; |
2365 | case EFAULT: |
2366 | if (efault_retry-- > 0) { |
2367 | filt_wlunlock(kqwl); |
2368 | error = copyin_atomic64(user_addr: uaddr, u64: &udata); |
2369 | filt_wllock(kqwl); |
2370 | if (error == 0) { |
2371 | goto again; |
2372 | } |
2373 | } |
2374 | OS_FALLTHROUGH; |
2375 | default: |
2376 | goto out; |
2377 | } |
2378 | |
2379 | kev->ext[EV_EXTIDX_WL_VALUE] = udata; |
2380 | kn->kn_ext[EV_EXTIDX_WL_VALUE] = udata; |
2381 | |
2382 | if ((udata & mask) != (kdata & mask)) { |
2383 | error = ESTALE; |
2384 | goto out; |
2385 | } |
2386 | } |
2387 | |
2388 | if (op == FILT_WLATTACH) { |
2389 | error = filt_wlattach_sync_ipc(kn); |
2390 | if (error == 0) { |
2391 | disable_preemption(); |
2392 | error = EPREEMPTDISABLED; |
2393 | } |
2394 | } |
2395 | |
2396 | out: |
2397 | filt_wlunlock(kqwl); |
2398 | return error; |
2399 | } |
2400 | |
2401 | static int |
2402 | filt_wlattach(struct knote *kn, struct kevent_qos_s *kev) |
2403 | { |
2404 | struct kqueue *kq = knote_get_kq(kn); |
2405 | struct kqworkloop *kqwl = (struct kqworkloop *)kq; |
2406 | int error = 0, result = 0; |
2407 | kq_index_t qos_index = 0; |
2408 | |
2409 | if (__improbable((kq->kq_state & KQ_WORKLOOP) == 0)) { |
2410 | error = ENOTSUP; |
2411 | goto out; |
2412 | } |
2413 | |
2414 | uint32_t command = (kn->kn_sfflags & NOTE_WL_COMMANDS_MASK); |
2415 | switch (command) { |
2416 | case NOTE_WL_THREAD_REQUEST: |
2417 | if (kn->kn_id != kqwl->kqwl_dynamicid) { |
2418 | error = EINVAL; |
2419 | goto out; |
2420 | } |
2421 | qos_index = _pthread_priority_thread_qos(pp: kn->kn_qos); |
2422 | if (qos_index == THREAD_QOS_UNSPECIFIED) { |
2423 | error = ERANGE; |
2424 | goto out; |
2425 | } |
2426 | if (kqwl->kqwl_request.tr_kq_qos_index) { |
2427 | /* |
2428 | * There already is a thread request, and well, you're only allowed |
2429 | * one per workloop, so fail the attach. |
2430 | */ |
2431 | error = EALREADY; |
2432 | goto out; |
2433 | } |
2434 | break; |
2435 | case NOTE_WL_SYNC_WAIT: |
2436 | case NOTE_WL_SYNC_WAKE: |
2437 | if (kn->kn_id == kqwl->kqwl_dynamicid) { |
2438 | error = EINVAL; |
2439 | goto out; |
2440 | } |
2441 | if ((kn->kn_flags & EV_DISABLE) == 0) { |
2442 | error = EINVAL; |
2443 | goto out; |
2444 | } |
2445 | if (kn->kn_sfflags & NOTE_WL_END_OWNERSHIP) { |
2446 | error = EINVAL; |
2447 | goto out; |
2448 | } |
2449 | break; |
2450 | |
2451 | case NOTE_WL_SYNC_IPC: |
2452 | if ((kn->kn_flags & EV_DISABLE) == 0) { |
2453 | error = EINVAL; |
2454 | goto out; |
2455 | } |
2456 | if (kn->kn_sfflags & (NOTE_WL_UPDATE_QOS | NOTE_WL_DISCOVER_OWNER)) { |
2457 | error = EINVAL; |
2458 | goto out; |
2459 | } |
2460 | break; |
2461 | default: |
2462 | error = EINVAL; |
2463 | goto out; |
2464 | } |
2465 | |
2466 | if (command == NOTE_WL_SYNC_IPC) { |
2467 | error = filt_wlupdate_sync_ipc(kqwl, kn, kev, FILT_WLATTACH); |
2468 | } else { |
2469 | error = filt_wlupdate(kqwl, kn, kev, qos_index, FILT_WLATTACH); |
2470 | } |
2471 | |
2472 | if (error == EPREEMPTDISABLED) { |
2473 | error = 0; |
2474 | result = FILTER_THREADREQ_NODEFEER; |
2475 | } |
2476 | out: |
2477 | if (error) { |
2478 | /* If userland wants ESTALE to be hidden, fail the attach anyway */ |
2479 | if (error == ESTALE && (kn->kn_sfflags & NOTE_WL_IGNORE_ESTALE)) { |
2480 | error = 0; |
2481 | } |
2482 | knote_set_error(kn, error); |
2483 | return result; |
2484 | } |
2485 | if (command == NOTE_WL_SYNC_WAIT) { |
2486 | return kevent_register_wait_prepare(kn, kev, result); |
2487 | } |
2488 | /* Just attaching the thread request successfully will fire it */ |
2489 | if (command == NOTE_WL_THREAD_REQUEST) { |
2490 | /* |
2491 | * Thread Request knotes need an explicit touch to be active again, |
2492 | * so delivering an event needs to also consume it. |
2493 | */ |
2494 | kn->kn_flags |= EV_CLEAR; |
2495 | return result | FILTER_ACTIVE; |
2496 | } |
2497 | return result; |
2498 | } |
2499 | |
2500 | static void __dead2 |
2501 | filt_wlwait_continue(void *parameter, wait_result_t wr) |
2502 | { |
2503 | struct _kevent_register *cont_args = parameter; |
2504 | struct kqworkloop *kqwl = cont_args->kqwl; |
2505 | |
2506 | kqlock(kqu: kqwl); |
2507 | if (filt_wlturnstile_interlock_is_workq(kqwl)) { |
2508 | workq_kern_threadreq_lock(p: kqwl->kqwl_p); |
2509 | turnstile_complete(proprietor: (uintptr_t)kqwl, tstore: &kqwl->kqwl_turnstile, NULL, type: TURNSTILE_WORKLOOPS); |
2510 | workq_kern_threadreq_unlock(p: kqwl->kqwl_p); |
2511 | } else { |
2512 | turnstile_complete(proprietor: (uintptr_t)kqwl, tstore: &kqwl->kqwl_turnstile, NULL, type: TURNSTILE_WORKLOOPS); |
2513 | } |
2514 | kqunlock(kqu: kqwl); |
2515 | |
2516 | turnstile_cleanup(); |
2517 | |
2518 | if (wr == THREAD_INTERRUPTED) { |
2519 | cont_args->kev.flags |= EV_ERROR; |
2520 | cont_args->kev.data = EINTR; |
2521 | } else if (wr != THREAD_AWAKENED) { |
2522 | panic("Unexpected wait result: %d" , wr); |
2523 | } |
2524 | |
2525 | kevent_register_wait_return(cont_args); |
2526 | } |
2527 | |
2528 | /* |
2529 | * Called with the workloop mutex held, most of the time never returns as it |
2530 | * calls filt_wlwait_continue through a continuation. |
2531 | */ |
2532 | static void __dead2 |
2533 | filt_wlpost_register_wait(struct uthread *uth, struct knote *kn, |
2534 | struct _kevent_register *cont_args) |
2535 | { |
2536 | struct kqworkloop *kqwl = cont_args->kqwl; |
2537 | workq_threadreq_t kqr = &kqwl->kqwl_request; |
2538 | struct turnstile *ts; |
2539 | bool workq_locked = false; |
2540 | |
2541 | kqlock_held(kqu: kqwl); |
2542 | |
2543 | if (filt_wlturnstile_interlock_is_workq(kqwl)) { |
2544 | workq_kern_threadreq_lock(p: kqwl->kqwl_p); |
2545 | workq_locked = true; |
2546 | } |
2547 | |
2548 | ts = turnstile_prepare(proprietor: (uintptr_t)kqwl, tstore: &kqwl->kqwl_turnstile, |
2549 | TURNSTILE_NULL, type: TURNSTILE_WORKLOOPS); |
2550 | |
2551 | if (workq_locked) { |
2552 | workq_kern_threadreq_update_inheritor(p: kqwl->kqwl_p, |
2553 | kqr: &kqwl->kqwl_request, owner: kqwl->kqwl_owner, ts, |
2554 | flags: TURNSTILE_DELAYED_UPDATE); |
2555 | if (!filt_wlturnstile_interlock_is_workq(kqwl)) { |
2556 | /* |
2557 | * if the interlock is no longer the workqueue lock, |
2558 | * then we don't need to hold it anymore. |
2559 | */ |
2560 | workq_kern_threadreq_unlock(p: kqwl->kqwl_p); |
2561 | workq_locked = false; |
2562 | } |
2563 | } |
2564 | if (!workq_locked) { |
2565 | /* |
2566 | * If the interlock is the workloop's, then it's our responsibility to |
2567 | * call update_inheritor, so just do it. |
2568 | */ |
2569 | filt_wlupdate_inheritor(kqwl, ts, flags: TURNSTILE_DELAYED_UPDATE); |
2570 | } |
2571 | |
2572 | thread_set_pending_block_hint(thread: get_machthread(uth), block_hint: kThreadWaitWorkloopSyncWait); |
2573 | waitq_assert_wait64(waitq: &ts->ts_waitq, wait_event: knote_filt_wev64(kn), |
2574 | THREAD_ABORTSAFE, TIMEOUT_WAIT_FOREVER); |
2575 | |
2576 | if (workq_locked) { |
2577 | workq_kern_threadreq_unlock(p: kqwl->kqwl_p); |
2578 | } |
2579 | |
2580 | thread_t thread = kqwl->kqwl_owner ?: kqr_thread(kqr); |
2581 | if (thread) { |
2582 | thread_reference(thread); |
2583 | } |
2584 | |
2585 | kevent_register_wait_block(ts, handoff_thread: thread, cont: filt_wlwait_continue, cont_args); |
2586 | } |
2587 | |
2588 | /* called in stackshot context to report the thread responsible for blocking this thread */ |
2589 | void |
2590 | kdp_workloop_sync_wait_find_owner(__assert_only thread_t thread, |
2591 | event64_t event, thread_waitinfo_t *waitinfo) |
2592 | { |
2593 | struct knote *kn = (struct knote *)event; |
2594 | |
2595 | zone_require(zone: knote_zone, addr: kn); |
2596 | |
2597 | assert(kn->kn_thread == thread); |
2598 | |
2599 | struct kqueue *kq = knote_get_kq(kn); |
2600 | |
2601 | zone_require(zone: kqworkloop_zone, addr: kq); |
2602 | assert(kq->kq_state & KQ_WORKLOOP); |
2603 | |
2604 | struct kqworkloop *kqwl = (struct kqworkloop *)kq; |
2605 | workq_threadreq_t kqr = &kqwl->kqwl_request; |
2606 | |
2607 | thread_t kqwl_owner = kqwl->kqwl_owner; |
2608 | |
2609 | if (kqwl_owner != THREAD_NULL) { |
2610 | thread_require(thread: kqwl_owner); |
2611 | waitinfo->owner = thread_tid(thread: kqwl->kqwl_owner); |
2612 | } else if ((kqr->tr_state >= WORKQ_TR_STATE_BINDING) && (kqr->tr_thread != NULL)) { |
2613 | thread_require(thread: kqr->tr_thread); |
2614 | waitinfo->owner = thread_tid(thread: kqr->tr_thread); |
2615 | } else if (kqr_thread_requested_pending(kqr)) { /* > idle, < bound */ |
2616 | waitinfo->owner = STACKSHOT_WAITOWNER_THREQUESTED; |
2617 | } else { |
2618 | waitinfo->owner = 0; |
2619 | } |
2620 | |
2621 | waitinfo->context = kqwl->kqwl_dynamicid; |
2622 | } |
2623 | |
2624 | static void |
2625 | filt_wldetach(struct knote *kn) |
2626 | { |
2627 | if (kn->kn_sfflags & NOTE_WL_SYNC_IPC) { |
2628 | filt_wldetach_sync_ipc(kn); |
2629 | } else if (kn->kn_thread) { |
2630 | kevent_register_wait_cleanup(kn); |
2631 | } |
2632 | } |
2633 | |
2634 | static int |
2635 | filt_wlvalidate_kev_flags(struct knote *kn, struct kevent_qos_s *kev, |
2636 | thread_qos_t *qos_index) |
2637 | { |
2638 | uint32_t new_commands = kev->fflags & NOTE_WL_COMMANDS_MASK; |
2639 | uint32_t sav_commands = kn->kn_sfflags & NOTE_WL_COMMANDS_MASK; |
2640 | |
2641 | if ((kev->fflags & NOTE_WL_DISCOVER_OWNER) && (kev->flags & EV_DELETE)) { |
2642 | return EINVAL; |
2643 | } |
2644 | if (kev->fflags & NOTE_WL_UPDATE_QOS) { |
2645 | if (kev->flags & EV_DELETE) { |
2646 | return EINVAL; |
2647 | } |
2648 | if (sav_commands != NOTE_WL_THREAD_REQUEST) { |
2649 | return EINVAL; |
2650 | } |
2651 | if (!(*qos_index = _pthread_priority_thread_qos(pp: kev->qos))) { |
2652 | return ERANGE; |
2653 | } |
2654 | } |
2655 | |
2656 | switch (new_commands) { |
2657 | case NOTE_WL_THREAD_REQUEST: |
2658 | /* thread requests can only update themselves */ |
2659 | if (sav_commands != NOTE_WL_THREAD_REQUEST) { |
2660 | return EINVAL; |
2661 | } |
2662 | break; |
2663 | |
2664 | case NOTE_WL_SYNC_WAIT: |
2665 | if (kev->fflags & NOTE_WL_END_OWNERSHIP) { |
2666 | return EINVAL; |
2667 | } |
2668 | goto sync_checks; |
2669 | |
2670 | case NOTE_WL_SYNC_WAKE: |
2671 | sync_checks: |
2672 | if (!(sav_commands & (NOTE_WL_SYNC_WAIT | NOTE_WL_SYNC_WAKE))) { |
2673 | return EINVAL; |
2674 | } |
2675 | if ((kev->flags & (EV_ENABLE | EV_DELETE)) == EV_ENABLE) { |
2676 | return EINVAL; |
2677 | } |
2678 | break; |
2679 | |
2680 | case NOTE_WL_SYNC_IPC: |
2681 | if (sav_commands != NOTE_WL_SYNC_IPC) { |
2682 | return EINVAL; |
2683 | } |
2684 | if ((kev->flags & (EV_ENABLE | EV_DELETE)) == EV_ENABLE) { |
2685 | return EINVAL; |
2686 | } |
2687 | break; |
2688 | |
2689 | default: |
2690 | return EINVAL; |
2691 | } |
2692 | return 0; |
2693 | } |
2694 | |
2695 | static int |
2696 | filt_wltouch(struct knote *kn, struct kevent_qos_s *kev) |
2697 | { |
2698 | struct kqworkloop *kqwl = (struct kqworkloop *)knote_get_kq(kn); |
2699 | thread_qos_t qos_index = THREAD_QOS_UNSPECIFIED; |
2700 | int result = 0; |
2701 | |
2702 | int error = filt_wlvalidate_kev_flags(kn, kev, qos_index: &qos_index); |
2703 | if (error) { |
2704 | goto out; |
2705 | } |
2706 | |
2707 | uint32_t command = kev->fflags & NOTE_WL_COMMANDS_MASK; |
2708 | if (command == NOTE_WL_SYNC_IPC) { |
2709 | error = filt_wlupdate_sync_ipc(kqwl, kn, kev, FILT_WLTOUCH); |
2710 | } else { |
2711 | error = filt_wlupdate(kqwl, kn, kev, qos_index, FILT_WLTOUCH); |
2712 | filt_wlremember_last_update(kn, kev, error); |
2713 | } |
2714 | if (error == EPREEMPTDISABLED) { |
2715 | error = 0; |
2716 | result = FILTER_THREADREQ_NODEFEER; |
2717 | } |
2718 | |
2719 | out: |
2720 | if (error) { |
2721 | if (error == ESTALE && (kev->fflags & NOTE_WL_IGNORE_ESTALE)) { |
2722 | /* If userland wants ESTALE to be hidden, do not activate */ |
2723 | return result; |
2724 | } |
2725 | kev->flags |= EV_ERROR; |
2726 | kev->data = error; |
2727 | return result; |
2728 | } |
2729 | if (command == NOTE_WL_SYNC_WAIT && !(kn->kn_sfflags & NOTE_WL_SYNC_WAKE)) { |
2730 | return kevent_register_wait_prepare(kn, kev, result); |
2731 | } |
2732 | /* Just touching the thread request successfully will fire it */ |
2733 | if (command == NOTE_WL_THREAD_REQUEST) { |
2734 | if (kev->fflags & NOTE_WL_UPDATE_QOS) { |
2735 | result |= FILTER_UPDATE_REQ_QOS; |
2736 | } |
2737 | result |= FILTER_ACTIVE; |
2738 | } |
2739 | return result; |
2740 | } |
2741 | |
2742 | static bool |
2743 | filt_wlallow_drop(struct knote *kn, struct kevent_qos_s *kev) |
2744 | { |
2745 | struct kqworkloop *kqwl = (struct kqworkloop *)knote_get_kq(kn); |
2746 | |
2747 | int error = filt_wlvalidate_kev_flags(kn, kev, NULL); |
2748 | if (error) { |
2749 | goto out; |
2750 | } |
2751 | |
2752 | uint32_t command = (kev->fflags & NOTE_WL_COMMANDS_MASK); |
2753 | if (command == NOTE_WL_SYNC_IPC) { |
2754 | error = filt_wlupdate_sync_ipc(kqwl, kn, kev, FILT_WLDROP); |
2755 | } else { |
2756 | error = filt_wlupdate(kqwl, kn, kev, qos_index: 0, FILT_WLDROP); |
2757 | filt_wlremember_last_update(kn, kev, error); |
2758 | } |
2759 | assert(error != EPREEMPTDISABLED); |
2760 | |
2761 | out: |
2762 | if (error) { |
2763 | if (error == ESTALE && (kev->fflags & NOTE_WL_IGNORE_ESTALE)) { |
2764 | return false; |
2765 | } |
2766 | kev->flags |= EV_ERROR; |
2767 | kev->data = error; |
2768 | return false; |
2769 | } |
2770 | return true; |
2771 | } |
2772 | |
2773 | static int |
2774 | filt_wlprocess(struct knote *kn, struct kevent_qos_s *kev) |
2775 | { |
2776 | struct kqworkloop *kqwl = (struct kqworkloop *)knote_get_kq(kn); |
2777 | int rc = 0; |
2778 | |
2779 | assert(kn->kn_sfflags & NOTE_WL_THREAD_REQUEST); |
2780 | |
2781 | kqlock(kqu: kqwl); |
2782 | |
2783 | if (kqwl->kqwl_owner) { |
2784 | /* |
2785 | * <rdar://problem/33584321> userspace sometimes due to events being |
2786 | * delivered but not triggering a drain session can cause a process |
2787 | * of the thread request knote. |
2788 | * |
2789 | * When that happens, the automatic deactivation due to process |
2790 | * would swallow the event, so we have to activate the knote again. |
2791 | */ |
2792 | knote_activate(kqu: kqwl, kn, FILTER_ACTIVE); |
2793 | } else { |
2794 | #if DEBUG || DEVELOPMENT |
2795 | if (kevent_debug_flags & KEVENT_PANIC_ON_NON_ENQUEUED_PROCESS) { |
2796 | /* |
2797 | * see src/queue_internal.h in libdispatch |
2798 | */ |
2799 | #define DISPATCH_QUEUE_ENQUEUED 0x1ull |
2800 | user_addr_t addr = CAST_USER_ADDR_T(kn->kn_ext[EV_EXTIDX_WL_ADDR]); |
2801 | task_t t = current_task(); |
2802 | uint64_t val; |
2803 | if (addr && task_is_active(t) && !task_is_halting(t) && |
2804 | copyin_atomic64(addr, &val) == 0 && |
2805 | val && (val & DISPATCH_QUEUE_ENQUEUED) == 0 && |
2806 | (val >> 48) != 0xdead && (val >> 48) != 0 && (val >> 48) != 0xffff) { |
2807 | panic("kevent: workloop %#016llx is not enqueued " |
2808 | "(kn:%p dq_state:%#016llx kev.dq_state:%#016llx)" , |
2809 | kn->kn_udata, kn, val, kn->kn_ext[EV_EXTIDX_WL_VALUE]); |
2810 | } |
2811 | } |
2812 | #endif |
2813 | knote_fill_kevent(kn, kev, data: 0); |
2814 | kev->fflags = kn->kn_sfflags; |
2815 | rc |= FILTER_ACTIVE; |
2816 | } |
2817 | |
2818 | kqunlock(kqu: kqwl); |
2819 | |
2820 | if (rc & FILTER_ACTIVE) { |
2821 | workq_thread_set_max_qos(p: kqwl->kqwl_p, kqr: &kqwl->kqwl_request); |
2822 | } |
2823 | return rc; |
2824 | } |
2825 | |
2826 | SECURITY_READ_ONLY_EARLY(static struct filterops) workloop_filtops = { |
2827 | .f_extended_codes = true, |
2828 | .f_attach = filt_wlattach, |
2829 | .f_detach = filt_wldetach, |
2830 | .f_event = filt_bad_event, |
2831 | .f_touch = filt_wltouch, |
2832 | .f_process = filt_wlprocess, |
2833 | .f_allow_drop = filt_wlallow_drop, |
2834 | .f_post_register_wait = filt_wlpost_register_wait, |
2835 | }; |
2836 | |
2837 | #pragma mark - kqueues allocation and deallocation |
2838 | |
2839 | OS_NOINLINE |
2840 | static void |
2841 | kqworkloop_dealloc(struct kqworkloop *, bool hash_remove); |
2842 | |
2843 | static inline bool |
2844 | kqworkloop_try_retain(struct kqworkloop *kqwl) |
2845 | { |
2846 | return os_ref_retain_try_raw(&kqwl->kqwl_retains, NULL); |
2847 | } |
2848 | |
2849 | static inline void |
2850 | kqworkloop_retain(struct kqworkloop *kqwl) |
2851 | { |
2852 | return os_ref_retain_raw(&kqwl->kqwl_retains, NULL); |
2853 | } |
2854 | |
2855 | OS_ALWAYS_INLINE |
2856 | static inline void |
2857 | kqueue_retain(kqueue_t kqu) |
2858 | { |
2859 | if (kqu.kq->kq_state & KQ_DYNAMIC) { |
2860 | kqworkloop_retain(kqwl: kqu.kqwl); |
2861 | } |
2862 | } |
2863 | |
2864 | OS_ALWAYS_INLINE |
2865 | static inline void |
2866 | kqworkloop_release_live(struct kqworkloop *kqwl) |
2867 | { |
2868 | os_ref_release_live_raw(&kqwl->kqwl_retains, NULL); |
2869 | } |
2870 | |
2871 | OS_ALWAYS_INLINE |
2872 | static inline void |
2873 | kqueue_release_live(kqueue_t kqu) |
2874 | { |
2875 | if (kqu.kq->kq_state & KQ_DYNAMIC) { |
2876 | kqworkloop_release_live(kqwl: kqu.kqwl); |
2877 | } |
2878 | } |
2879 | |
2880 | OS_ALWAYS_INLINE |
2881 | static inline void |
2882 | kqworkloop_release(struct kqworkloop *kqwl) |
2883 | { |
2884 | if (os_ref_release_raw(&kqwl->kqwl_retains, NULL) == 0) { |
2885 | kqworkloop_dealloc(kqwl, true); |
2886 | } |
2887 | } |
2888 | |
2889 | OS_ALWAYS_INLINE |
2890 | static inline void |
2891 | kqueue_release(kqueue_t kqu) |
2892 | { |
2893 | if (kqu.kq->kq_state & KQ_DYNAMIC) { |
2894 | kqworkloop_release(kqwl: kqu.kqwl); |
2895 | } |
2896 | } |
2897 | |
2898 | /*! |
2899 | * @function kqueue_destroy |
2900 | * |
2901 | * @brief |
2902 | * Common part to all kqueue dealloc functions. |
2903 | */ |
2904 | OS_NOINLINE |
2905 | static void |
2906 | kqueue_destroy(kqueue_t kqu, zone_t zone) |
2907 | { |
2908 | lck_spin_destroy(lck: &kqu.kq->kq_lock, grp: &kq_lck_grp); |
2909 | |
2910 | zfree(zone, kqu.kq); |
2911 | } |
2912 | |
2913 | /*! |
2914 | * @function kqueue_init |
2915 | * |
2916 | * @brief |
2917 | * Common part to all kqueue alloc functions. |
2918 | */ |
2919 | static kqueue_t |
2920 | kqueue_init(kqueue_t kqu) |
2921 | { |
2922 | lck_spin_init(lck: &kqu.kq->kq_lock, grp: &kq_lck_grp, LCK_ATTR_NULL); |
2923 | return kqu; |
2924 | } |
2925 | |
2926 | #pragma mark kqfile allocation and deallocation |
2927 | |
2928 | /*! |
2929 | * @function kqueue_dealloc |
2930 | * |
2931 | * @brief |
2932 | * Detach all knotes from a kqfile and free it. |
2933 | * |
2934 | * @discussion |
2935 | * We walk each list looking for knotes referencing this |
2936 | * this kqueue. If we find one, we try to drop it. But |
2937 | * if we fail to get a drop reference, that will wait |
2938 | * until it is dropped. So, we can just restart again |
2939 | * safe in the assumption that the list will eventually |
2940 | * not contain any more references to this kqueue (either |
2941 | * we dropped them all, or someone else did). |
2942 | * |
2943 | * Assumes no new events are being added to the kqueue. |
2944 | * Nothing locked on entry or exit. |
2945 | */ |
2946 | void |
2947 | kqueue_dealloc(struct kqueue *kq) |
2948 | { |
2949 | KNOTE_LOCK_CTX(knlc); |
2950 | struct proc *p = kq->kq_p; |
2951 | struct filedesc *fdp = &p->p_fd; |
2952 | struct knote *kn; |
2953 | |
2954 | assert(kq && (kq->kq_state & (KQ_WORKLOOP | KQ_WORKQ)) == 0); |
2955 | |
2956 | proc_fdlock(p); |
2957 | for (int i = 0; i < fdp->fd_knlistsize; i++) { |
2958 | kn = SLIST_FIRST(&fdp->fd_knlist[i]); |
2959 | while (kn != NULL) { |
2960 | if (kq == knote_get_kq(kn)) { |
2961 | kqlock(kqu: kq); |
2962 | proc_fdunlock(p); |
2963 | if (knote_lock(kqu: kq, kn, knlc: &knlc, kqlocking: KNOTE_KQ_LOCK_ON_SUCCESS)) { |
2964 | knote_drop(kqu: kq, kn, knlc: &knlc); |
2965 | } |
2966 | proc_fdlock(p); |
2967 | /* start over at beginning of list */ |
2968 | kn = SLIST_FIRST(&fdp->fd_knlist[i]); |
2969 | continue; |
2970 | } |
2971 | kn = SLIST_NEXT(kn, kn_link); |
2972 | } |
2973 | } |
2974 | |
2975 | knhash_lock(fdp); |
2976 | proc_fdunlock(p); |
2977 | |
2978 | if (fdp->fd_knhashmask != 0) { |
2979 | for (int i = 0; i < (int)fdp->fd_knhashmask + 1; i++) { |
2980 | kn = SLIST_FIRST(&fdp->fd_knhash[i]); |
2981 | while (kn != NULL) { |
2982 | if (kq == knote_get_kq(kn)) { |
2983 | kqlock(kqu: kq); |
2984 | knhash_unlock(fdp); |
2985 | if (knote_lock(kqu: kq, kn, knlc: &knlc, kqlocking: KNOTE_KQ_LOCK_ON_SUCCESS)) { |
2986 | knote_drop(kqu: kq, kn, knlc: &knlc); |
2987 | } |
2988 | knhash_lock(fdp); |
2989 | /* start over at beginning of list */ |
2990 | kn = SLIST_FIRST(&fdp->fd_knhash[i]); |
2991 | continue; |
2992 | } |
2993 | kn = SLIST_NEXT(kn, kn_link); |
2994 | } |
2995 | } |
2996 | } |
2997 | knhash_unlock(fdp); |
2998 | |
2999 | kqueue_destroy(kqu: kq, zone: kqfile_zone); |
3000 | } |
3001 | |
3002 | /*! |
3003 | * @function kqueue_alloc |
3004 | * |
3005 | * @brief |
3006 | * Allocate a kqfile. |
3007 | */ |
3008 | struct kqueue * |
3009 | kqueue_alloc(struct proc *p) |
3010 | { |
3011 | struct kqfile *kqf; |
3012 | |
3013 | /* |
3014 | * kqfiles are created with kqueue() so we need to wait for |
3015 | * the first kevent syscall to know which bit among |
3016 | * KQ_KEV_{32,64,QOS} will be set in kqf_state |
3017 | */ |
3018 | kqf = zalloc_flags(kqfile_zone, Z_WAITOK | Z_ZERO); |
3019 | kqf->kqf_p = p; |
3020 | TAILQ_INIT_AFTER_BZERO(&kqf->kqf_queue); |
3021 | TAILQ_INIT_AFTER_BZERO(&kqf->kqf_suppressed); |
3022 | |
3023 | return kqueue_init(kqu: kqf).kq; |
3024 | } |
3025 | |
3026 | /*! |
3027 | * @function kqueue_internal |
3028 | * |
3029 | * @brief |
3030 | * Core implementation for kqueue and guarded_kqueue_np() |
3031 | */ |
3032 | int |
3033 | kqueue_internal(struct proc *p, fp_initfn_t fp_init, void *initarg, int32_t *retval) |
3034 | { |
3035 | struct kqueue *kq; |
3036 | struct fileproc *fp; |
3037 | int fd, error; |
3038 | |
3039 | error = falloc_withinit(p, p_cred: current_cached_proc_cred(p), |
3040 | ctx: vfs_context_current(), resultfp: &fp, resultfd: &fd, fp_init, initarg); |
3041 | if (error) { |
3042 | return error; |
3043 | } |
3044 | |
3045 | kq = kqueue_alloc(p); |
3046 | if (kq == NULL) { |
3047 | fp_free(p, fd, fp); |
3048 | return ENOMEM; |
3049 | } |
3050 | |
3051 | fp->fp_flags |= FP_CLOEXEC | FP_CLOFORK; |
3052 | fp->f_flag = FREAD | FWRITE; |
3053 | fp->f_ops = &kqueueops; |
3054 | fp_set_data(fp, fg_data: kq); |
3055 | fp->f_lflags |= FG_CONFINED; |
3056 | |
3057 | proc_fdlock(p); |
3058 | procfdtbl_releasefd(p, fd, NULL); |
3059 | fp_drop(p, fd, fp, locked: 1); |
3060 | proc_fdunlock(p); |
3061 | |
3062 | *retval = fd; |
3063 | return error; |
3064 | } |
3065 | |
3066 | /*! |
3067 | * @function kqueue |
3068 | * |
3069 | * @brief |
3070 | * The kqueue syscall. |
3071 | */ |
3072 | int |
3073 | kqueue(struct proc *p, __unused struct kqueue_args *uap, int32_t *retval) |
3074 | { |
3075 | return kqueue_internal(p, NULL, NULL, retval); |
3076 | } |
3077 | |
3078 | #pragma mark kqworkq allocation and deallocation |
3079 | |
3080 | /*! |
3081 | * @function kqworkq_dealloc |
3082 | * |
3083 | * @brief |
3084 | * Deallocates a workqueue kqueue. |
3085 | * |
3086 | * @discussion |
3087 | * This only happens at process death, or for races with concurrent |
3088 | * kevent_get_kqwq calls, hence we don't have to care about knotes referencing |
3089 | * this kqueue, either there are none, or someone else took care of them. |
3090 | */ |
3091 | void |
3092 | kqworkq_dealloc(struct kqworkq *kqwq) |
3093 | { |
3094 | kqueue_destroy(kqu: kqwq, zone: kqworkq_zone); |
3095 | } |
3096 | |
3097 | /*! |
3098 | * @function kqworkq_alloc |
3099 | * |
3100 | * @brief |
3101 | * Allocates a workqueue kqueue. |
3102 | * |
3103 | * @discussion |
3104 | * This is the slow path of kevent_get_kqwq. |
3105 | * This takes care of making sure procs have a single workq kqueue. |
3106 | */ |
3107 | OS_NOINLINE |
3108 | static struct kqworkq * |
3109 | kqworkq_alloc(struct proc *p, unsigned int flags) |
3110 | { |
3111 | struct kqworkq *kqwq, *tmp; |
3112 | |
3113 | kqwq = zalloc_flags(kqworkq_zone, Z_WAITOK | Z_ZERO); |
3114 | |
3115 | assert((flags & KEVENT_FLAG_LEGACY32) == 0); |
3116 | if (flags & KEVENT_FLAG_LEGACY64) { |
3117 | kqwq->kqwq_state = KQ_WORKQ | KQ_KEV64; |
3118 | } else { |
3119 | kqwq->kqwq_state = KQ_WORKQ | KQ_KEV_QOS; |
3120 | } |
3121 | kqwq->kqwq_p = p; |
3122 | |
3123 | for (int i = 0; i < KQWQ_NBUCKETS; i++) { |
3124 | TAILQ_INIT_AFTER_BZERO(&kqwq->kqwq_queue[i]); |
3125 | TAILQ_INIT_AFTER_BZERO(&kqwq->kqwq_suppressed[i]); |
3126 | } |
3127 | for (int i = 0; i < KQWQ_NBUCKETS; i++) { |
3128 | /* |
3129 | * Because of how the bucketized system works, we mix overcommit |
3130 | * sources with not overcommit: each time we move a knote from |
3131 | * one bucket to the next due to overrides, we'd had to track |
3132 | * overcommitness, and it's really not worth it in the workloop |
3133 | * enabled world that track this faithfully. |
3134 | * |
3135 | * Incidentally, this behaves like the original manager-based |
3136 | * kqwq where event delivery always happened (hence is |
3137 | * "overcommit") |
3138 | */ |
3139 | kqwq->kqwq_request[i].tr_state = WORKQ_TR_STATE_IDLE; |
3140 | kqwq->kqwq_request[i].tr_flags = WORKQ_TR_FLAG_KEVENT; |
3141 | if (i != KQWQ_QOS_MANAGER) { |
3142 | kqwq->kqwq_request[i].tr_flags |= WORKQ_TR_FLAG_OVERCOMMIT; |
3143 | } |
3144 | kqwq->kqwq_request[i].tr_kq_qos_index = (kq_index_t)i + 1; |
3145 | } |
3146 | |
3147 | kqueue_init(kqu: kqwq); |
3148 | |
3149 | if (!os_atomic_cmpxchgv(&p->p_fd.fd_wqkqueue, NULL, kqwq, &tmp, release)) { |
3150 | kqworkq_dealloc(kqwq); |
3151 | return tmp; |
3152 | } |
3153 | |
3154 | return kqwq; |
3155 | } |
3156 | |
3157 | #pragma mark kqworkloop allocation and deallocation |
3158 | |
3159 | #define KQ_HASH(val, mask) (((val) ^ (val >> 8)) & (mask)) |
3160 | #define CONFIG_KQ_HASHSIZE CONFIG_KN_HASHSIZE |
3161 | |
3162 | OS_ALWAYS_INLINE |
3163 | static inline void |
3164 | kqhash_lock(struct filedesc *fdp) |
3165 | { |
3166 | lck_mtx_lock_spin_always(lck: &fdp->fd_kqhashlock); |
3167 | } |
3168 | |
3169 | OS_ALWAYS_INLINE |
3170 | static inline void |
3171 | kqhash_unlock(struct filedesc *fdp) |
3172 | { |
3173 | lck_mtx_unlock(lck: &fdp->fd_kqhashlock); |
3174 | } |
3175 | |
3176 | OS_ALWAYS_INLINE |
3177 | static inline void |
3178 | kqworkloop_hash_insert_locked(struct filedesc *fdp, kqueue_id_t id, |
3179 | struct kqworkloop *kqwl) |
3180 | { |
3181 | struct kqwllist *list = &fdp->fd_kqhash[KQ_HASH(id, fdp->fd_kqhashmask)]; |
3182 | LIST_INSERT_HEAD(list, kqwl, kqwl_hashlink); |
3183 | } |
3184 | |
3185 | OS_ALWAYS_INLINE |
3186 | static inline struct kqworkloop * |
3187 | kqworkloop_hash_lookup_locked(struct filedesc *fdp, kqueue_id_t id) |
3188 | { |
3189 | struct kqwllist *list = &fdp->fd_kqhash[KQ_HASH(id, fdp->fd_kqhashmask)]; |
3190 | struct kqworkloop *kqwl; |
3191 | |
3192 | LIST_FOREACH(kqwl, list, kqwl_hashlink) { |
3193 | if (kqwl->kqwl_dynamicid == id) { |
3194 | return kqwl; |
3195 | } |
3196 | } |
3197 | return NULL; |
3198 | } |
3199 | |
3200 | static struct kqworkloop * |
3201 | kqworkloop_hash_lookup_and_retain(struct filedesc *fdp, kqueue_id_t kq_id) |
3202 | { |
3203 | struct kqworkloop *kqwl = NULL; |
3204 | |
3205 | kqhash_lock(fdp); |
3206 | if (__probable(fdp->fd_kqhash)) { |
3207 | kqwl = kqworkloop_hash_lookup_locked(fdp, id: kq_id); |
3208 | if (kqwl && !kqworkloop_try_retain(kqwl)) { |
3209 | kqwl = NULL; |
3210 | } |
3211 | } |
3212 | kqhash_unlock(fdp); |
3213 | return kqwl; |
3214 | } |
3215 | |
3216 | OS_NOINLINE |
3217 | static void |
3218 | kqworkloop_hash_init(struct filedesc *fdp) |
3219 | { |
3220 | struct kqwllist *alloc_hash; |
3221 | u_long alloc_mask; |
3222 | |
3223 | kqhash_unlock(fdp); |
3224 | alloc_hash = hashinit(CONFIG_KQ_HASHSIZE, M_KQUEUE, hashmask: &alloc_mask); |
3225 | kqhash_lock(fdp); |
3226 | |
3227 | /* See if we won the race */ |
3228 | if (__probable(fdp->fd_kqhashmask == 0)) { |
3229 | fdp->fd_kqhash = alloc_hash; |
3230 | fdp->fd_kqhashmask = alloc_mask; |
3231 | } else { |
3232 | kqhash_unlock(fdp); |
3233 | hashdestroy(alloc_hash, M_KQUEUE, hashmask: alloc_mask); |
3234 | kqhash_lock(fdp); |
3235 | } |
3236 | } |
3237 | |
3238 | /* |
3239 | * kqueue iotier override is only supported for kqueue that has |
3240 | * only one port as a mach port source. Updating the iotier |
3241 | * override on the mach port source will update the override |
3242 | * on kqueue as well. Since kqueue with iotier override will |
3243 | * only have one port attached, there is no logic for saturation |
3244 | * like qos override, the iotier override of mach port source |
3245 | * would be reflected in kevent iotier override. |
3246 | */ |
3247 | void |
3248 | kqueue_set_iotier_override(kqueue_t kqu, uint8_t iotier_override) |
3249 | { |
3250 | if (!(kqu.kq->kq_state & KQ_WORKLOOP)) { |
3251 | return; |
3252 | } |
3253 | |
3254 | struct kqworkloop *kqwl = kqu.kqwl; |
3255 | os_atomic_store(&kqwl->kqwl_iotier_override, iotier_override, relaxed); |
3256 | } |
3257 | |
3258 | uint8_t |
3259 | kqueue_get_iotier_override(kqueue_t kqu) |
3260 | { |
3261 | if (!(kqu.kq->kq_state & KQ_WORKLOOP)) { |
3262 | return THROTTLE_LEVEL_END; |
3263 | } |
3264 | |
3265 | struct kqworkloop *kqwl = kqu.kqwl; |
3266 | return os_atomic_load(&kqwl->kqwl_iotier_override, relaxed); |
3267 | } |
3268 | |
3269 | #if CONFIG_PREADOPT_TG |
3270 | /* |
3271 | * This function is called with a borrowed reference on the thread group without |
3272 | * kq lock held with the mqueue lock held. It may or may not have the knote lock |
3273 | * (called from both fevent as well as fattach/ftouch). Upon success, an |
3274 | * additional reference on the TG is taken |
3275 | */ |
3276 | void |
3277 | kqueue_set_preadopted_thread_group(kqueue_t kqu, struct thread_group *tg, thread_qos_t qos) |
3278 | { |
3279 | if (!(kqu.kq->kq_state & KQ_WORKLOOP)) { |
3280 | KDBG_RELEASE(MACHDBG_CODE(DBG_MACH_THREAD_GROUP, MACH_THREAD_GROUP_PREADOPT_NA), |
3281 | (uintptr_t)thread_tid(current_thread()), 0, 0, 0); |
3282 | return; |
3283 | } |
3284 | |
3285 | struct kqworkloop *kqwl = kqu.kqwl; |
3286 | |
3287 | assert(qos < THREAD_QOS_LAST); |
3288 | |
3289 | thread_group_retain(tg); |
3290 | |
3291 | thread_group_qos_t old_tg; thread_group_qos_t new_tg; |
3292 | int ret = os_atomic_rmw_loop(&kqwl->kqwl_preadopt_tg, old_tg, new_tg, relaxed, { |
3293 | if (!KQWL_CAN_ADOPT_PREADOPT_TG(old_tg)) { |
3294 | os_atomic_rmw_loop_give_up(break); |
3295 | } |
3296 | |
3297 | if (old_tg != KQWL_PREADOPTED_TG_NULL) { |
3298 | /* |
3299 | * Note that old_tg could be a NULL TG pointer but with a QoS |
3300 | * set. See also workq_thread_reset_pri. |
3301 | * |
3302 | * Compare the QoS of existing preadopted tg with new one and |
3303 | * only overwrite the thread group if we have one with a higher |
3304 | * QoS. |
3305 | */ |
3306 | thread_qos_t existing_qos = KQWL_GET_PREADOPTED_TG_QOS(old_tg); |
3307 | if (existing_qos >= qos) { |
3308 | os_atomic_rmw_loop_give_up(break); |
3309 | } |
3310 | } |
3311 | |
3312 | // Transfer the ref taken earlier in the function to the kqwl |
3313 | new_tg = KQWL_ENCODE_PREADOPTED_TG_QOS(tg, qos); |
3314 | }); |
3315 | |
3316 | if (ret) { |
3317 | KQWL_PREADOPT_TG_HISTORY_WRITE_ENTRY(kqwl, KQWL_PREADOPT_OP_INCOMING_IPC, old_tg, tg); |
3318 | |
3319 | if (KQWL_HAS_VALID_PREADOPTED_TG(old_tg)) { |
3320 | thread_group_deallocate_safe(KQWL_GET_PREADOPTED_TG(old_tg)); |
3321 | } |
3322 | |
3323 | os_atomic_store(&kqwl->kqwl_preadopt_tg_needs_redrive, KQWL_PREADOPT_TG_NEEDS_REDRIVE, release); |
3324 | } else { |
3325 | // We failed to write to the kqwl_preadopt_tg, drop the ref we took |
3326 | // earlier in the function |
3327 | thread_group_deallocate_safe(tg); |
3328 | } |
3329 | } |
3330 | |
3331 | /* |
3332 | * Called from fprocess of EVFILT_MACHPORT without the kqueue lock held. |
3333 | */ |
3334 | bool |
3335 | kqueue_process_preadopt_thread_group(thread_t thread, struct kqueue *kq, struct thread_group *tg) |
3336 | { |
3337 | bool success = false; |
3338 | if (kq->kq_state & KQ_WORKLOOP) { |
3339 | struct kqworkloop *kqwl = (struct kqworkloop *) kq; |
3340 | thread_group_qos_t old_tg; |
3341 | success = os_atomic_cmpxchgv(&kqwl->kqwl_preadopt_tg, |
3342 | KQWL_PREADOPTED_TG_SENTINEL, KQWL_PREADOPTED_TG_PROCESSED, |
3343 | &old_tg, relaxed); |
3344 | if (success) { |
3345 | thread_set_preadopt_thread_group(t: thread, tg); |
3346 | } else if (KQWL_HAS_PERMANENT_PREADOPTED_TG(old_tg)) { |
3347 | /* |
3348 | * Technically the following set_preadopt should be a no-op since this |
3349 | * servicer thread preadopts kqwl's permanent tg at bind time. |
3350 | * See kqueue_threadreq_bind. |
3351 | */ |
3352 | thread_set_preadopt_thread_group(t: thread, KQWL_GET_PREADOPTED_TG(old_tg)); |
3353 | } else { |
3354 | assert(old_tg == KQWL_PREADOPTED_TG_PROCESSED || |
3355 | old_tg == KQWL_PREADOPTED_TG_NEVER); |
3356 | } |
3357 | } |
3358 | return success; |
3359 | } |
3360 | #endif |
3361 | |
3362 | /*! |
3363 | * @function kqworkloop_dealloc |
3364 | * |
3365 | * @brief |
3366 | * Deallocates a workloop kqueue. |
3367 | * |
3368 | * @discussion |
3369 | * Knotes hold references on the workloop, so we can't really reach this |
3370 | * function unless all of these are already gone. |
3371 | * |
3372 | * Nothing locked on entry or exit. |
3373 | * |
3374 | * @param hash_remove |
3375 | * Whether to remove the workloop from its hash table. |
3376 | */ |
3377 | static void |
3378 | kqworkloop_dealloc(struct kqworkloop *kqwl, bool hash_remove) |
3379 | { |
3380 | thread_t cur_owner; |
3381 | |
3382 | cur_owner = kqwl->kqwl_owner; |
3383 | if (cur_owner) { |
3384 | if (kqworkloop_override(kqwl) != THREAD_QOS_UNSPECIFIED) { |
3385 | thread_drop_kevent_override(thread: cur_owner); |
3386 | } |
3387 | thread_deallocate(thread: cur_owner); |
3388 | kqwl->kqwl_owner = THREAD_NULL; |
3389 | } |
3390 | |
3391 | if (kqwl->kqwl_state & KQ_HAS_TURNSTILE) { |
3392 | struct turnstile *ts; |
3393 | turnstile_complete(proprietor: (uintptr_t)kqwl, tstore: &kqwl->kqwl_turnstile, |
3394 | turnstile: &ts, type: TURNSTILE_WORKLOOPS); |
3395 | turnstile_cleanup(); |
3396 | turnstile_deallocate(turnstile: ts); |
3397 | } |
3398 | |
3399 | if (hash_remove) { |
3400 | struct filedesc *fdp = &kqwl->kqwl_p->p_fd; |
3401 | |
3402 | kqhash_lock(fdp); |
3403 | LIST_REMOVE(kqwl, kqwl_hashlink); |
3404 | #if CONFIG_PROC_RESOURCE_LIMITS |
3405 | fdp->num_kqwls--; |
3406 | #endif |
3407 | kqhash_unlock(fdp); |
3408 | } |
3409 | |
3410 | #if CONFIG_PREADOPT_TG |
3411 | thread_group_qos_t tg = os_atomic_load(&kqwl->kqwl_preadopt_tg, relaxed); |
3412 | if (KQWL_HAS_VALID_PREADOPTED_TG(tg)) { |
3413 | thread_group_release(KQWL_GET_PREADOPTED_TG(tg)); |
3414 | } |
3415 | #endif |
3416 | |
3417 | assert(TAILQ_EMPTY(&kqwl->kqwl_suppressed)); |
3418 | assert(kqwl->kqwl_owner == THREAD_NULL); |
3419 | assert(kqwl->kqwl_turnstile == TURNSTILE_NULL); |
3420 | |
3421 | lck_spin_destroy(lck: &kqwl->kqwl_statelock, grp: &kq_lck_grp); |
3422 | kqueue_destroy(kqu: kqwl, zone: kqworkloop_zone); |
3423 | } |
3424 | |
3425 | /*! |
3426 | * @function kqworkloop_init |
3427 | * |
3428 | * @brief |
3429 | * Initializes an allocated kqworkloop. |
3430 | */ |
3431 | static void |
3432 | kqworkloop_init(struct kqworkloop *kqwl, proc_t p, |
3433 | kqueue_id_t id, workq_threadreq_param_t *trp |
3434 | #if CONFIG_PREADOPT_TG |
3435 | , struct thread_group *trp_permanent_preadopt_tg |
3436 | #endif |
3437 | ) |
3438 | { |
3439 | kqwl->kqwl_state = KQ_WORKLOOP | KQ_DYNAMIC | KQ_KEV_QOS; |
3440 | os_ref_init_raw(&kqwl->kqwl_retains, NULL); |
3441 | kqwl->kqwl_dynamicid = id; |
3442 | kqwl->kqwl_p = p; |
3443 | if (trp) { |
3444 | kqwl->kqwl_params = trp->trp_value; |
3445 | } |
3446 | |
3447 | workq_tr_flags_t tr_flags = WORKQ_TR_FLAG_WORKLOOP; |
3448 | if (trp) { |
3449 | if (trp->trp_flags & TRP_PRIORITY) { |
3450 | tr_flags |= WORKQ_TR_FLAG_WL_OUTSIDE_QOS; |
3451 | } |
3452 | if (trp->trp_flags) { |
3453 | tr_flags |= WORKQ_TR_FLAG_WL_PARAMS; |
3454 | } |
3455 | } |
3456 | kqwl->kqwl_request.tr_state = WORKQ_TR_STATE_IDLE; |
3457 | kqwl->kqwl_request.tr_flags = tr_flags; |
3458 | os_atomic_store(&kqwl->kqwl_iotier_override, (uint8_t)THROTTLE_LEVEL_END, relaxed); |
3459 | #if CONFIG_PREADOPT_TG |
3460 | if (trp_permanent_preadopt_tg) { |
3461 | /* |
3462 | * This kqwl is permanently configured with a thread group. |
3463 | * By using THREAD_QOS_LAST, we make sure kqueue_set_preadopted_thread_group |
3464 | * has no effect on kqwl_preadopt_tg. At this point, +1 ref on |
3465 | * trp_permanent_preadopt_tg is transferred to the kqwl. |
3466 | */ |
3467 | thread_group_qos_t kqwl_preadopt_tg; |
3468 | kqwl_preadopt_tg = KQWL_ENCODE_PERMANENT_PREADOPTED_TG(trp_permanent_preadopt_tg); |
3469 | os_atomic_store(&kqwl->kqwl_preadopt_tg, kqwl_preadopt_tg, relaxed); |
3470 | } else if (task_is_app(task: current_task())) { |
3471 | /* |
3472 | * Not a specially preconfigured kqwl so it is open to participate in sync IPC |
3473 | * thread group preadoption; but, apps will never adopt a thread group that |
3474 | * is not their own. This is a gross hack to simulate the post-process that |
3475 | * is done in the voucher subsystem today for thread groups. |
3476 | */ |
3477 | os_atomic_store(&kqwl->kqwl_preadopt_tg, KQWL_PREADOPTED_TG_NEVER, relaxed); |
3478 | } |
3479 | #endif |
3480 | |
3481 | for (int i = 0; i < KQWL_NBUCKETS; i++) { |
3482 | TAILQ_INIT_AFTER_BZERO(&kqwl->kqwl_queue[i]); |
3483 | } |
3484 | TAILQ_INIT_AFTER_BZERO(&kqwl->kqwl_suppressed); |
3485 | |
3486 | lck_spin_init(lck: &kqwl->kqwl_statelock, grp: &kq_lck_grp, LCK_ATTR_NULL); |
3487 | |
3488 | kqueue_init(kqu: kqwl); |
3489 | } |
3490 | |
3491 | #if CONFIG_PROC_RESOURCE_LIMITS |
3492 | void |
3493 | kqworkloop_check_limit_exceeded(struct filedesc *fdp) |
3494 | { |
3495 | int num_kqwls = fdp->num_kqwls; |
3496 | if (!kqwl_above_soft_limit_notified(fdp) && fdp->kqwl_dyn_soft_limit > 0 && |
3497 | num_kqwls > fdp->kqwl_dyn_soft_limit) { |
3498 | kqwl_above_soft_limit_send_notification(fdp); |
3499 | act_set_astproc_resource(current_thread()); |
3500 | } else if (!kqwl_above_hard_limit_notified(fdp) && fdp->kqwl_dyn_hard_limit > 0 |
3501 | && num_kqwls > fdp->kqwl_dyn_hard_limit) { |
3502 | kqwl_above_hard_limit_send_notification(fdp); |
3503 | act_set_astproc_resource(current_thread()); |
3504 | } |
3505 | } |
3506 | #endif |
3507 | |
3508 | /*! |
3509 | * @function kqworkloop_get_or_create |
3510 | * |
3511 | * @brief |
3512 | * Wrapper around kqworkloop_init that handles the uniquing of workloops. |
3513 | * |
3514 | * @returns |
3515 | * 0: success |
3516 | * EINVAL: invalid parameters |
3517 | * EEXIST: KEVENT_FLAG_DYNAMIC_KQ_MUST_NOT_EXIST is set and a collision exists. |
3518 | * ENOENT: KEVENT_FLAG_DYNAMIC_KQ_MUST_EXIST is set and the entry wasn't found. |
3519 | * ENOMEM: allocation failed |
3520 | */ |
3521 | static int |
3522 | kqworkloop_get_or_create(struct proc *p, kqueue_id_t id, |
3523 | workq_threadreq_param_t *trp, |
3524 | #if CONFIG_PREADOPT_TG |
3525 | struct thread_group *trp_permanent_preadopt_tg, |
3526 | #endif |
3527 | unsigned int flags, struct kqworkloop **kqwlp) |
3528 | { |
3529 | struct filedesc *fdp = &p->p_fd; |
3530 | struct kqworkloop *alloc_kqwl = NULL; |
3531 | struct kqworkloop *kqwl = NULL; |
3532 | int error = 0; |
3533 | |
3534 | assert(!trp || (flags & KEVENT_FLAG_DYNAMIC_KQ_MUST_NOT_EXIST)); |
3535 | |
3536 | if (id == 0 || id == (kqueue_id_t)-1) { |
3537 | return EINVAL; |
3538 | } |
3539 | |
3540 | for (;;) { |
3541 | kqhash_lock(fdp); |
3542 | if (__improbable(fdp->fd_kqhash == NULL)) { |
3543 | kqworkloop_hash_init(fdp); |
3544 | } |
3545 | |
3546 | kqwl = kqworkloop_hash_lookup_locked(fdp, id); |
3547 | if (kqwl) { |
3548 | if (__improbable(flags & KEVENT_FLAG_DYNAMIC_KQ_MUST_NOT_EXIST)) { |
3549 | /* |
3550 | * If MUST_NOT_EXIST was passed, even if we would have failed |
3551 | * the try_retain, it could have gone the other way, and |
3552 | * userspace can't tell. Let'em fix their race. |
3553 | */ |
3554 | error = EEXIST; |
3555 | break; |
3556 | } |
3557 | |
3558 | if (__probable(kqworkloop_try_retain(kqwl))) { |
3559 | /* |
3560 | * This is a valid live workloop ! |
3561 | */ |
3562 | *kqwlp = kqwl; |
3563 | error = 0; |
3564 | break; |
3565 | } |
3566 | } |
3567 | |
3568 | if (__improbable(flags & KEVENT_FLAG_DYNAMIC_KQ_MUST_EXIST)) { |
3569 | error = ENOENT; |
3570 | break; |
3571 | } |
3572 | |
3573 | /* |
3574 | * We didn't find what we were looking for. |
3575 | * |
3576 | * If this is the second time we reach this point (alloc_kqwl != NULL), |
3577 | * then we're done. |
3578 | * |
3579 | * If this is the first time we reach this point (alloc_kqwl == NULL), |
3580 | * then try to allocate one without blocking. |
3581 | */ |
3582 | if (__probable(alloc_kqwl == NULL)) { |
3583 | alloc_kqwl = zalloc_flags(kqworkloop_zone, Z_NOWAIT | Z_ZERO); |
3584 | } |
3585 | if (__probable(alloc_kqwl)) { |
3586 | #if CONFIG_PROC_RESOURCE_LIMITS |
3587 | fdp->num_kqwls++; |
3588 | kqworkloop_check_limit_exceeded(fdp); |
3589 | #endif |
3590 | kqworkloop_init(kqwl: alloc_kqwl, p, id, trp |
3591 | #if CONFIG_PREADOPT_TG |
3592 | , trp_permanent_preadopt_tg |
3593 | #endif |
3594 | ); |
3595 | kqworkloop_hash_insert_locked(fdp, id, kqwl: alloc_kqwl); |
3596 | kqhash_unlock(fdp); |
3597 | *kqwlp = alloc_kqwl; |
3598 | return 0; |
3599 | } |
3600 | |
3601 | /* |
3602 | * We have to block to allocate a workloop, drop the lock, |
3603 | * allocate one, but then we need to retry lookups as someone |
3604 | * else could race with us. |
3605 | */ |
3606 | kqhash_unlock(fdp); |
3607 | |
3608 | alloc_kqwl = zalloc_flags(kqworkloop_zone, Z_WAITOK | Z_ZERO); |
3609 | } |
3610 | |
3611 | kqhash_unlock(fdp); |
3612 | |
3613 | if (__improbable(alloc_kqwl)) { |
3614 | zfree(kqworkloop_zone, alloc_kqwl); |
3615 | } |
3616 | |
3617 | return error; |
3618 | } |
3619 | |
3620 | #pragma mark - knotes |
3621 | |
3622 | static int |
3623 | filt_no_attach(struct knote *kn, __unused struct kevent_qos_s *kev) |
3624 | { |
3625 | knote_set_error(kn, ENOTSUP); |
3626 | return 0; |
3627 | } |
3628 | |
3629 | static void |
3630 | filt_no_detach(__unused struct knote *kn) |
3631 | { |
3632 | } |
3633 | |
3634 | static int __dead2 |
3635 | filt_bad_event(struct knote *kn, long hint) |
3636 | { |
3637 | panic("%s[%d](%p, %ld)" , __func__, kn->kn_filter, kn, hint); |
3638 | } |
3639 | |
3640 | static int __dead2 |
3641 | filt_bad_touch(struct knote *kn, struct kevent_qos_s *kev) |
3642 | { |
3643 | panic("%s[%d](%p, %p)" , __func__, kn->kn_filter, kn, kev); |
3644 | } |
3645 | |
3646 | static int __dead2 |
3647 | filt_bad_process(struct knote *kn, struct kevent_qos_s *kev) |
3648 | { |
3649 | panic("%s[%d](%p, %p)" , __func__, kn->kn_filter, kn, kev); |
3650 | } |
3651 | |
3652 | /* |
3653 | * knotes_dealloc - detach all knotes for the process and drop them |
3654 | * |
3655 | * Process is in such a state that it will not try to allocate |
3656 | * any more knotes during this process (stopped for exit or exec). |
3657 | */ |
3658 | void |
3659 | knotes_dealloc(proc_t p) |
3660 | { |
3661 | struct filedesc *fdp = &p->p_fd; |
3662 | struct kqueue *kq; |
3663 | struct knote *kn; |
3664 | struct klist *kn_hash = NULL; |
3665 | u_long kn_hashmask; |
3666 | int i; |
3667 | |
3668 | proc_fdlock(p); |
3669 | |
3670 | /* Close all the fd-indexed knotes up front */ |
3671 | if (fdp->fd_knlistsize > 0) { |
3672 | for (i = 0; i < fdp->fd_knlistsize; i++) { |
3673 | while ((kn = SLIST_FIRST(&fdp->fd_knlist[i])) != NULL) { |
3674 | kq = knote_get_kq(kn); |
3675 | kqlock(kqu: kq); |
3676 | proc_fdunlock(p); |
3677 | knote_drop(kqu: kq, kn, NULL); |
3678 | proc_fdlock(p); |
3679 | } |
3680 | } |
3681 | /* free the table */ |
3682 | kfree_type(struct klist, fdp->fd_knlistsize, fdp->fd_knlist); |
3683 | } |
3684 | fdp->fd_knlistsize = 0; |
3685 | |
3686 | proc_fdunlock(p); |
3687 | |
3688 | knhash_lock(fdp); |
3689 | |
3690 | /* Clean out all the hashed knotes as well */ |
3691 | if (fdp->fd_knhashmask != 0) { |
3692 | for (i = 0; i <= (int)fdp->fd_knhashmask; i++) { |
3693 | while ((kn = SLIST_FIRST(&fdp->fd_knhash[i])) != NULL) { |
3694 | kq = knote_get_kq(kn); |
3695 | kqlock(kqu: kq); |
3696 | knhash_unlock(fdp); |
3697 | knote_drop(kqu: kq, kn, NULL); |
3698 | knhash_lock(fdp); |
3699 | } |
3700 | } |
3701 | kn_hash = fdp->fd_knhash; |
3702 | kn_hashmask = fdp->fd_knhashmask; |
3703 | fdp->fd_knhashmask = 0; |
3704 | fdp->fd_knhash = NULL; |
3705 | } |
3706 | |
3707 | knhash_unlock(fdp); |
3708 | |
3709 | if (kn_hash) { |
3710 | hashdestroy(kn_hash, M_KQUEUE, hashmask: kn_hashmask); |
3711 | } |
3712 | } |
3713 | |
3714 | /* |
3715 | * kqworkloops_dealloc - rebalance retains on kqworkloops created with |
3716 | * scheduling parameters |
3717 | * |
3718 | * Process is in such a state that it will not try to allocate |
3719 | * any more kqs or knotes during this process (stopped for exit or exec). |
3720 | */ |
3721 | void |
3722 | kqworkloops_dealloc(proc_t p) |
3723 | { |
3724 | struct filedesc *fdp = &p->p_fd; |
3725 | struct kqworkloop *kqwl, *kqwln; |
3726 | struct kqwllist tofree; |
3727 | |
3728 | if (!fdt_flag_test(fdp, FD_WORKLOOP)) { |
3729 | return; |
3730 | } |
3731 | |
3732 | kqhash_lock(fdp); |
3733 | |
3734 | if (fdp->fd_kqhashmask == 0) { |
3735 | kqhash_unlock(fdp); |
3736 | return; |
3737 | } |
3738 | |
3739 | LIST_INIT(&tofree); |
3740 | |
3741 | for (size_t i = 0; i <= fdp->fd_kqhashmask; i++) { |
3742 | LIST_FOREACH_SAFE(kqwl, &fdp->fd_kqhash[i], kqwl_hashlink, kqwln) { |
3743 | #if CONFIG_PREADOPT_TG |
3744 | /* |
3745 | * kqworkloops that have scheduling parameters have an |
3746 | * implicit retain from kqueue_workloop_ctl that needs |
3747 | * to be balanced on process exit. |
3748 | */ |
3749 | __assert_only thread_group_qos_t preadopt_tg; |
3750 | preadopt_tg = os_atomic_load(&kqwl->kqwl_preadopt_tg, relaxed); |
3751 | #endif |
3752 | assert(kqwl->kqwl_params |
3753 | #if CONFIG_PREADOPT_TG |
3754 | || KQWL_HAS_PERMANENT_PREADOPTED_TG(preadopt_tg) |
3755 | #endif |
3756 | ); |
3757 | |
3758 | LIST_REMOVE(kqwl, kqwl_hashlink); |
3759 | LIST_INSERT_HEAD(&tofree, kqwl, kqwl_hashlink); |
3760 | } |
3761 | } |
3762 | #if CONFIG_PROC_RESOURCE_LIMITS |
3763 | fdp->num_kqwls = 0; |
3764 | #endif |
3765 | kqhash_unlock(fdp); |
3766 | |
3767 | LIST_FOREACH_SAFE(kqwl, &tofree, kqwl_hashlink, kqwln) { |
3768 | uint32_t ref = os_ref_get_count_raw(rc: &kqwl->kqwl_retains); |
3769 | if (ref != 1) { |
3770 | panic("kq(%p) invalid refcount %d" , kqwl, ref); |
3771 | } |
3772 | kqworkloop_dealloc(kqwl, false); |
3773 | } |
3774 | } |
3775 | |
3776 | static int |
3777 | kevent_register_validate_priority(struct kqueue *kq, struct knote *kn, |
3778 | struct kevent_qos_s *kev) |
3779 | { |
3780 | /* We don't care about the priority of a disabled or deleted knote */ |
3781 | if (kev->flags & (EV_DISABLE | EV_DELETE)) { |
3782 | return 0; |
3783 | } |
3784 | |
3785 | if (kq->kq_state & KQ_WORKLOOP) { |
3786 | /* |
3787 | * Workloops need valid priorities with a QOS (excluding manager) for |
3788 | * any enabled knote. |
3789 | * |
3790 | * When it is pre-existing, just make sure it has a valid QoS as |
3791 | * kevent_register() will not use the incoming priority (filters who do |
3792 | * have the responsibility to validate it again, see filt_wltouch). |
3793 | * |
3794 | * If the knote is being made, validate the incoming priority. |
3795 | */ |
3796 | if (!_pthread_priority_thread_qos(pp: kn ? kn->kn_qos : kev->qos)) { |
3797 | return ERANGE; |
3798 | } |
3799 | } |
3800 | |
3801 | return 0; |
3802 | } |
3803 | |
3804 | /* |
3805 | * Prepare a filter for waiting after register. |
3806 | * |
3807 | * The f_post_register_wait hook will be called later by kevent_register() |
3808 | * and should call kevent_register_wait_block() |
3809 | */ |
3810 | static int |
3811 | kevent_register_wait_prepare(struct knote *kn, struct kevent_qos_s *kev, int rc) |
3812 | { |
3813 | thread_t thread = current_thread(); |
3814 | |
3815 | assert(knote_fops(kn)->f_extended_codes); |
3816 | |
3817 | if (kn->kn_thread == NULL) { |
3818 | thread_reference(thread); |
3819 | kn->kn_thread = thread; |
3820 | } else if (kn->kn_thread != thread) { |
3821 | /* |
3822 | * kn_thread may be set from a previous aborted wait |
3823 | * However, it has to be from the same thread. |
3824 | */ |
3825 | kev->flags |= EV_ERROR; |
3826 | kev->data = EXDEV; |
3827 | return 0; |
3828 | } |
3829 | |
3830 | return FILTER_REGISTER_WAIT | rc; |
3831 | } |
3832 | |
3833 | /* |
3834 | * Cleanup a kevent_register_wait_prepare() effect for threads that have been |
3835 | * aborted instead of properly woken up with thread_wakeup_thread(). |
3836 | */ |
3837 | static void |
3838 | kevent_register_wait_cleanup(struct knote *kn) |
3839 | { |
3840 | thread_t thread = kn->kn_thread; |
3841 | kn->kn_thread = NULL; |
3842 | thread_deallocate(thread); |
3843 | } |
3844 | |
3845 | /* |
3846 | * Must be called at the end of a f_post_register_wait call from a filter. |
3847 | */ |
3848 | static void |
3849 | kevent_register_wait_block(struct turnstile *ts, thread_t thread, |
3850 | thread_continue_t cont, struct _kevent_register *cont_args) |
3851 | { |
3852 | turnstile_update_inheritor_complete(turnstile: ts, flags: TURNSTILE_INTERLOCK_HELD); |
3853 | kqunlock(kqu: cont_args->kqwl); |
3854 | cont_args->handoff_thread = thread; |
3855 | thread_handoff_parameter(thread, continuation: cont, parameter: cont_args, THREAD_HANDOFF_NONE); |
3856 | } |
3857 | |
3858 | /* |
3859 | * Called by Filters using a f_post_register_wait to return from their wait. |
3860 | */ |
3861 | static void |
3862 | kevent_register_wait_return(struct _kevent_register *cont_args) |
3863 | { |
3864 | struct kqworkloop *kqwl = cont_args->kqwl; |
3865 | struct kevent_qos_s *kev = &cont_args->kev; |
3866 | int error = 0; |
3867 | |
3868 | if (cont_args->handoff_thread) { |
3869 | thread_deallocate(thread: cont_args->handoff_thread); |
3870 | } |
3871 | |
3872 | if (kev->flags & (EV_ERROR | EV_RECEIPT)) { |
3873 | if ((kev->flags & EV_ERROR) == 0) { |
3874 | kev->flags |= EV_ERROR; |
3875 | kev->data = 0; |
3876 | } |
3877 | error = kevent_modern_copyout(kev, &cont_args->ueventlist); |
3878 | if (error == 0) { |
3879 | cont_args->eventout++; |
3880 | } |
3881 | } |
3882 | |
3883 | kqworkloop_release(kqwl); |
3884 | if (error == 0) { |
3885 | *(int32_t *)¤t_uthread()->uu_rval = cont_args->eventout; |
3886 | } |
3887 | unix_syscall_return(error); |
3888 | } |
3889 | |
3890 | /* |
3891 | * kevent_register - add a new event to a kqueue |
3892 | * |
3893 | * Creates a mapping between the event source and |
3894 | * the kqueue via a knote data structure. |
3895 | * |
3896 | * Because many/most the event sources are file |
3897 | * descriptor related, the knote is linked off |
3898 | * the filedescriptor table for quick access. |
3899 | * |
3900 | * called with nothing locked |
3901 | * caller holds a reference on the kqueue |
3902 | */ |
3903 | |
3904 | int |
3905 | kevent_register(struct kqueue *kq, struct kevent_qos_s *kev, |
3906 | struct knote **kn_out) |
3907 | { |
3908 | struct proc *p = kq->kq_p; |
3909 | const struct filterops *fops; |
3910 | struct knote *kn = NULL; |
3911 | int result = 0, error = 0; |
3912 | unsigned short kev_flags = kev->flags; |
3913 | KNOTE_LOCK_CTX(knlc); |
3914 | |
3915 | if (__probable(kev->filter < 0 && kev->filter + EVFILT_SYSCOUNT >= 0)) { |
3916 | fops = sysfilt_ops[~kev->filter]; /* to 0-base index */ |
3917 | } else { |
3918 | error = EINVAL; |
3919 | goto out; |
3920 | } |
3921 | |
3922 | /* restrict EV_VANISHED to adding udata-specific dispatch kevents */ |
3923 | if (__improbable((kev->flags & EV_VANISHED) && |
3924 | (kev->flags & (EV_ADD | EV_DISPATCH2)) != (EV_ADD | EV_DISPATCH2))) { |
3925 | error = EINVAL; |
3926 | goto out; |
3927 | } |
3928 | |
3929 | /* Simplify the flags - delete and disable overrule */ |
3930 | if (kev->flags & EV_DELETE) { |
3931 | kev->flags &= ~EV_ADD; |
3932 | } |
3933 | if (kev->flags & EV_DISABLE) { |
3934 | kev->flags &= ~EV_ENABLE; |
3935 | } |
3936 | |
3937 | if (kq->kq_state & KQ_WORKLOOP) { |
3938 | KDBG_DEBUG(KEV_EVTID(BSD_KEVENT_KQWL_REGISTER), |
3939 | ((struct kqworkloop *)kq)->kqwl_dynamicid, |
3940 | kev->udata, kev->flags, kev->filter); |
3941 | } else if (kq->kq_state & KQ_WORKQ) { |
3942 | KDBG_DEBUG(KEV_EVTID(BSD_KEVENT_KQWQ_REGISTER), |
3943 | 0, kev->udata, kev->flags, kev->filter); |
3944 | } else { |
3945 | KDBG_DEBUG(KEV_EVTID(BSD_KEVENT_KQ_REGISTER), |
3946 | VM_KERNEL_UNSLIDE_OR_PERM(kq), |
3947 | kev->udata, kev->flags, kev->filter); |
3948 | } |
3949 | |
3950 | restart: |
3951 | /* find the matching knote from the fd tables/hashes */ |
3952 | kn = kq_find_knote_and_kq_lock(kq, kev, is_fd: fops->f_isfd, p); |
3953 | error = kevent_register_validate_priority(kq, kn, kev); |
3954 | result = 0; |
3955 | if (error) { |
3956 | if (kn) { |
3957 | kqunlock(kqu: kq); |
3958 | } |
3959 | goto out; |
3960 | } |
3961 | |
3962 | if (kn == NULL && (kev->flags & EV_ADD) == 0) { |
3963 | /* |
3964 | * No knote found, EV_ADD wasn't specified |
3965 | */ |
3966 | |
3967 | if ((kev_flags & EV_ADD) && (kev_flags & EV_DELETE) && |
3968 | (kq->kq_state & KQ_WORKLOOP)) { |
3969 | /* |
3970 | * For workloops, understand EV_ADD|EV_DELETE as a "soft" delete |
3971 | * that doesn't care about ENOENT, so just pretend the deletion |
3972 | * happened. |
3973 | */ |
3974 | } else { |
3975 | error = ENOENT; |
3976 | } |
3977 | goto out; |
3978 | } else if (kn == NULL) { |
3979 | /* |
3980 | * No knote found, need to attach a new one (attach) |
3981 | */ |
3982 | |
3983 | struct fileproc *knote_fp = NULL; |
3984 | |
3985 | /* grab a file reference for the new knote */ |
3986 | if (fops->f_isfd) { |
3987 | if ((error = fp_lookup(p, fd: (int)kev->ident, resultfp: &knote_fp, locked: 0)) != 0) { |
3988 | goto out; |
3989 | } |
3990 | } |
3991 | |
3992 | kn = knote_alloc(); |
3993 | kn->kn_fp = knote_fp; |
3994 | kn->kn_is_fd = fops->f_isfd; |
3995 | kn->kn_kq_packed = VM_PACK_POINTER((vm_offset_t)kq, KNOTE_KQ_PACKED); |
3996 | kn->kn_status = 0; |
3997 | |
3998 | /* was vanish support requested */ |
3999 | if (kev->flags & EV_VANISHED) { |
4000 | kev->flags &= ~EV_VANISHED; |
4001 | kn->kn_status |= KN_REQVANISH; |
4002 | } |
4003 | |
4004 | /* snapshot matching/dispatching protocol flags into knote */ |
4005 | if (kev->flags & EV_DISABLE) { |
4006 | kn->kn_status |= KN_DISABLED; |
4007 | } |
4008 | |
4009 | /* |
4010 | * copy the kevent state into knote |
4011 | * protocol is that fflags and data |
4012 | * are saved off, and cleared before |
4013 | * calling the attach routine. |
4014 | * |
4015 | * - kn->kn_sfflags aliases with kev->xflags |
4016 | * - kn->kn_sdata aliases with kev->data |
4017 | * - kn->kn_filter is the top 8 bits of kev->filter |
4018 | */ |
4019 | kn->kn_kevent = *(struct kevent_internal_s *)kev; |
4020 | kn->kn_sfflags = kev->fflags; |
4021 | kn->kn_filtid = (uint8_t)~kev->filter; |
4022 | kn->kn_fflags = 0; |
4023 | knote_reset_priority(kqu: kq, kn, pp: kev->qos); |
4024 | |
4025 | /* Add the knote for lookup thru the fd table */ |
4026 | error = kq_add_knote(kq, kn, knlc: &knlc, p); |
4027 | if (error) { |
4028 | knote_free(kn); |
4029 | if (knote_fp != NULL) { |
4030 | fp_drop(p, fd: (int)kev->ident, fp: knote_fp, locked: 0); |
4031 | } |
4032 | |
4033 | if (error == ERESTART) { |
4034 | goto restart; |
4035 | } |
4036 | goto out; |
4037 | } |
4038 | |
4039 | /* fp reference count now applies to knote */ |
4040 | |
4041 | /* |
4042 | * we can't use filter_call() because f_attach can change the filter ops |
4043 | * for a filter that supports f_extended_codes, so we need to reload |
4044 | * knote_fops() and not use `fops`. |
4045 | */ |
4046 | result = fops->f_attach(kn, kev); |
4047 | if (result && !knote_fops(kn)->f_extended_codes) { |
4048 | result = FILTER_ACTIVE; |
4049 | } |
4050 | |
4051 | kqlock(kqu: kq); |
4052 | |
4053 | if (result & FILTER_THREADREQ_NODEFEER) { |
4054 | enable_preemption(); |
4055 | } |
4056 | |
4057 | if (kn->kn_flags & EV_ERROR) { |
4058 | /* |
4059 | * Failed to attach correctly, so drop. |
4060 | */ |
4061 | kn->kn_filtid = EVFILTID_DETACHED; |
4062 | error = (int)kn->kn_sdata; |
4063 | knote_drop(kqu: kq, kn, knlc: &knlc); |
4064 | result = 0; |
4065 | goto out; |
4066 | } |
4067 | |
4068 | /* |
4069 | * end "attaching" phase - now just attached |
4070 | * |
4071 | * Mark the thread request overcommit, if appropos |
4072 | * |
4073 | * If the attach routine indicated that an |
4074 | * event is already fired, activate the knote. |
4075 | */ |
4076 | if ((kn->kn_qos & _PTHREAD_PRIORITY_OVERCOMMIT_FLAG) && |
4077 | (kq->kq_state & KQ_WORKLOOP)) { |
4078 | kqworkloop_set_overcommit(kqwl: (struct kqworkloop *)kq); |
4079 | } |
4080 | } else if (!knote_lock(kqu: kq, kn, knlc: &knlc, kqlocking: KNOTE_KQ_LOCK_ON_SUCCESS)) { |
4081 | /* |
4082 | * The knote was dropped while we were waiting for the lock, |
4083 | * we need to re-evaluate entirely |
4084 | */ |
4085 | |
4086 | goto restart; |
4087 | } else if (kev->flags & EV_DELETE) { |
4088 | /* |
4089 | * Deletion of a knote (drop) |
4090 | * |
4091 | * If the filter wants to filter drop events, let it do so. |
4092 | * |
4093 | * defer-delete: when trying to delete a disabled EV_DISPATCH2 knote, |
4094 | * we must wait for the knote to be re-enabled (unless it is being |
4095 | * re-enabled atomically here). |
4096 | */ |
4097 | |
4098 | if (knote_fops(kn)->f_allow_drop) { |
4099 | bool drop; |
4100 | |
4101 | kqunlock(kqu: kq); |
4102 | drop = knote_fops(kn)->f_allow_drop(kn, kev); |
4103 | kqlock(kqu: kq); |
4104 | |
4105 | if (!drop) { |
4106 | goto out_unlock; |
4107 | } |
4108 | } |
4109 | |
4110 | if ((kev->flags & EV_ENABLE) == 0 && |
4111 | (kn->kn_flags & EV_DISPATCH2) == EV_DISPATCH2 && |
4112 | (kn->kn_status & KN_DISABLED) != 0) { |
4113 | kn->kn_status |= KN_DEFERDELETE; |
4114 | error = EINPROGRESS; |
4115 | goto out_unlock; |
4116 | } |
4117 | |
4118 | knote_drop(kqu: kq, kn, knlc: &knlc); |
4119 | goto out; |
4120 | } else { |
4121 | /* |
4122 | * Regular update of a knote (touch) |
4123 | * |
4124 | * Call touch routine to notify filter of changes in filter values |
4125 | * (and to re-determine if any events are fired). |
4126 | * |
4127 | * If the knote is in defer-delete, avoid calling the filter touch |
4128 | * routine (it has delivered its last event already). |
4129 | * |
4130 | * If the touch routine had no failure, |
4131 | * apply the requested side effects to the knote. |
4132 | */ |
4133 | |
4134 | if (kn->kn_status & (KN_DEFERDELETE | KN_VANISHED)) { |
4135 | if (kev->flags & EV_ENABLE) { |
4136 | result = FILTER_ACTIVE; |
4137 | } |
4138 | } else { |
4139 | kqunlock(kqu: kq); |
4140 | result = filter_call(knote_fops(kn), f_touch(kn, kev)); |
4141 | kqlock(kqu: kq); |
4142 | if (result & FILTER_THREADREQ_NODEFEER) { |
4143 | enable_preemption(); |
4144 | } |
4145 | } |
4146 | |
4147 | if (kev->flags & EV_ERROR) { |
4148 | result = 0; |
4149 | goto out_unlock; |
4150 | } |
4151 | |
4152 | if ((kn->kn_flags & EV_UDATA_SPECIFIC) == 0 && |
4153 | kn->kn_udata != kev->udata) { |
4154 | // this allows klist_copy_udata() not to take locks |
4155 | os_atomic_store_wide(&kn->kn_udata, kev->udata, relaxed); |
4156 | } |
4157 | if ((kev->flags & EV_DISABLE) && !(kn->kn_status & KN_DISABLED)) { |
4158 | kn->kn_status |= KN_DISABLED; |
4159 | knote_dequeue(kqu: kq, kn); |
4160 | } |
4161 | } |
4162 | |
4163 | /* accept new kevent state */ |
4164 | knote_apply_touch(kqu: kq, kn, kev, result); |
4165 | |
4166 | out_unlock: |
4167 | /* |
4168 | * When the filter asked for a post-register wait, |
4169 | * we leave the kqueue locked for kevent_register() |
4170 | * to call the filter's f_post_register_wait hook. |
4171 | */ |
4172 | if (result & FILTER_REGISTER_WAIT) { |
4173 | knote_unlock(kqu: kq, kn, knlc: &knlc, kqlocking: KNOTE_KQ_LOCK_ALWAYS); |
4174 | *kn_out = kn; |
4175 | } else { |
4176 | knote_unlock(kqu: kq, kn, knlc: &knlc, kqlocking: KNOTE_KQ_UNLOCK); |
4177 | } |
4178 | |
4179 | out: |
4180 | /* output local errors through the kevent */ |
4181 | if (error) { |
4182 | kev->flags |= EV_ERROR; |
4183 | kev->data = error; |
4184 | } |
4185 | return result; |
4186 | } |
4187 | |
4188 | /* |
4189 | * knote_process - process a triggered event |
4190 | * |
4191 | * Validate that it is really still a triggered event |
4192 | * by calling the filter routines (if necessary). Hold |
4193 | * a use reference on the knote to avoid it being detached. |
4194 | * |
4195 | * If it is still considered triggered, we will have taken |
4196 | * a copy of the state under the filter lock. We use that |
4197 | * snapshot to dispatch the knote for future processing (or |
4198 | * not, if this was a lost event). |
4199 | * |
4200 | * Our caller assures us that nobody else can be processing |
4201 | * events from this knote during the whole operation. But |
4202 | * others can be touching or posting events to the knote |
4203 | * interspersed with our processing it. |
4204 | * |
4205 | * caller holds a reference on the kqueue. |
4206 | * kqueue locked on entry and exit - but may be dropped |
4207 | */ |
4208 | static int |
4209 | knote_process(struct knote *kn, kevent_ctx_t kectx, |
4210 | kevent_callback_t callback) |
4211 | { |
4212 | struct kevent_qos_s kev; |
4213 | struct kqueue *kq = knote_get_kq(kn); |
4214 | KNOTE_LOCK_CTX(knlc); |
4215 | int result = FILTER_ACTIVE; |
4216 | int error = 0; |
4217 | bool drop = false; |
4218 | |
4219 | /* |
4220 | * Must be active |
4221 | * Must be queued and not disabled/suppressed or dropping |
4222 | */ |
4223 | assert(kn->kn_status & KN_QUEUED); |
4224 | assert(kn->kn_status & KN_ACTIVE); |
4225 | assert(!(kn->kn_status & (KN_DISABLED | KN_SUPPRESSED | KN_DROPPING))); |
4226 | |
4227 | if (kq->kq_state & KQ_WORKLOOP) { |
4228 | KDBG_DEBUG(KEV_EVTID(BSD_KEVENT_KQWL_PROCESS), |
4229 | ((struct kqworkloop *)kq)->kqwl_dynamicid, |
4230 | kn->kn_udata, kn->kn_status | (kn->kn_id << 32), |
4231 | kn->kn_filtid); |
4232 | } else if (kq->kq_state & KQ_WORKQ) { |
4233 | KDBG_DEBUG(KEV_EVTID(BSD_KEVENT_KQWQ_PROCESS), |
4234 | 0, kn->kn_udata, kn->kn_status | (kn->kn_id << 32), |
4235 | kn->kn_filtid); |
4236 | } else { |
4237 | KDBG_DEBUG(KEV_EVTID(BSD_KEVENT_KQ_PROCESS), |
4238 | VM_KERNEL_UNSLIDE_OR_PERM(kq), kn->kn_udata, |
4239 | kn->kn_status | (kn->kn_id << 32), kn->kn_filtid); |
4240 | } |
4241 | |
4242 | if (!knote_lock(kqu: kq, kn, knlc: &knlc, kqlocking: KNOTE_KQ_LOCK_ALWAYS)) { |
4243 | /* |
4244 | * When the knote is dropping or has dropped, |
4245 | * then there's nothing we want to process. |
4246 | */ |
4247 | return EJUSTRETURN; |
4248 | } |
4249 | |
4250 | /* |
4251 | * While waiting for the knote lock, we may have dropped the kq lock. |
4252 | * and a touch may have disabled and dequeued the knote. |
4253 | */ |
4254 | if (!(kn->kn_status & KN_QUEUED)) { |
4255 | knote_unlock(kqu: kq, kn, knlc: &knlc, kqlocking: KNOTE_KQ_LOCK_ALWAYS); |
4256 | return EJUSTRETURN; |
4257 | } |
4258 | |
4259 | /* |
4260 | * For deferred-drop or vanished events, we just create a fake |
4261 | * event to acknowledge end-of-life. Otherwise, we call the |
4262 | * filter's process routine to snapshot the kevent state under |
4263 | * the filter's locking protocol. |
4264 | * |
4265 | * suppress knotes to avoid returning the same event multiple times in |
4266 | * a single call. |
4267 | */ |
4268 | knote_suppress(kqu: kq, kn); |
4269 | |
4270 | if (kn->kn_status & (KN_DEFERDELETE | KN_VANISHED)) { |
4271 | uint16_t kev_flags = EV_DISPATCH2 | EV_ONESHOT; |
4272 | if (kn->kn_status & KN_DEFERDELETE) { |
4273 | kev_flags |= EV_DELETE; |
4274 | } else { |
4275 | kev_flags |= EV_VANISHED; |
4276 | } |
4277 | |
4278 | /* create fake event */ |
4279 | kev = (struct kevent_qos_s){ |
4280 | .filter = kn->kn_filter, |
4281 | .ident = kn->kn_id, |
4282 | .flags = kev_flags, |
4283 | .udata = kn->kn_udata, |
4284 | }; |
4285 | } else { |
4286 | kqunlock(kqu: kq); |
4287 | kev = (struct kevent_qos_s) { }; |
4288 | result = filter_call(knote_fops(kn), f_process(kn, &kev)); |
4289 | kqlock(kqu: kq); |
4290 | } |
4291 | |
4292 | /* |
4293 | * Determine how to dispatch the knote for future event handling. |
4294 | * not-fired: just return (do not callout, leave deactivated). |
4295 | * One-shot: If dispatch2, enter deferred-delete mode (unless this is |
4296 | * is the deferred delete event delivery itself). Otherwise, |
4297 | * drop it. |
4298 | * Dispatch: don't clear state, just mark it disabled. |
4299 | * Cleared: just leave it deactivated. |
4300 | * Others: re-activate as there may be more events to handle. |
4301 | * This will not wake up more handlers right now, but |
4302 | * at the completion of handling events it may trigger |
4303 | * more handler threads (TODO: optimize based on more than |
4304 | * just this one event being detected by the filter). |
4305 | */ |
4306 | if ((result & FILTER_ACTIVE) == 0) { |
4307 | if ((kn->kn_status & KN_ACTIVE) == 0) { |
4308 | /* |
4309 | * Some knotes (like EVFILT_WORKLOOP) can be reactivated from |
4310 | * within f_process() but that doesn't necessarily make them |
4311 | * ready to process, so we should leave them be. |
4312 | * |
4313 | * For other knotes, since we will not return an event, |
4314 | * there's no point keeping the knote suppressed. |
4315 | */ |
4316 | knote_unsuppress(kqu: kq, kn); |
4317 | } |
4318 | knote_unlock(kqu: kq, kn, knlc: &knlc, kqlocking: KNOTE_KQ_LOCK_ALWAYS); |
4319 | return EJUSTRETURN; |
4320 | } |
4321 | |
4322 | if (result & FILTER_ADJUST_EVENT_QOS_BIT) { |
4323 | knote_adjust_qos(kq, kn, result); |
4324 | } |
4325 | |
4326 | if (result & FILTER_ADJUST_EVENT_IOTIER_BIT) { |
4327 | kqueue_update_iotier_override(kqu: kq); |
4328 | } |
4329 | |
4330 | kev.qos = _pthread_priority_combine(base_pp: kn->kn_qos, qos: kn->kn_qos_override); |
4331 | |
4332 | if (kev.flags & EV_ONESHOT) { |
4333 | if ((kn->kn_flags & EV_DISPATCH2) == EV_DISPATCH2 && |
4334 | (kn->kn_status & KN_DEFERDELETE) == 0) { |
4335 | /* defer dropping non-delete oneshot dispatch2 events */ |
4336 | kn->kn_status |= KN_DEFERDELETE | KN_DISABLED; |
4337 | } else { |
4338 | drop = true; |
4339 | } |
4340 | } else if (kn->kn_flags & EV_DISPATCH) { |
4341 | /* disable all dispatch knotes */ |
4342 | kn->kn_status |= KN_DISABLED; |
4343 | } else if ((kn->kn_flags & EV_CLEAR) == 0) { |
4344 | /* re-activate in case there are more events */ |
4345 | knote_activate(kqu: kq, kn, FILTER_ACTIVE); |
4346 | } |
4347 | |
4348 | /* |
4349 | * callback to handle each event as we find it. |
4350 | * If we have to detach and drop the knote, do |
4351 | * it while we have the kq unlocked. |
4352 | */ |
4353 | if (drop) { |
4354 | knote_drop(kqu: kq, kn, knlc: &knlc); |
4355 | } else { |
4356 | knote_unlock(kqu: kq, kn, knlc: &knlc, kqlocking: KNOTE_KQ_UNLOCK); |
4357 | } |
4358 | |
4359 | if (kev.flags & EV_VANISHED) { |
4360 | KDBG_DEBUG(KEV_EVTID(BSD_KEVENT_KNOTE_VANISHED), |
4361 | kev.ident, kn->kn_udata, kn->kn_status | (kn->kn_id << 32), |
4362 | kn->kn_filtid); |
4363 | } |
4364 | |
4365 | error = (callback)(&kev, kectx); |
4366 | kqlock(kqu: kq); |
4367 | return error; |
4368 | } |
4369 | |
4370 | /* |
4371 | * Returns -1 if the kqueue was unbound and processing should not happen |
4372 | */ |
4373 | #define KQWQAE_BEGIN_PROCESSING 1 |
4374 | #define KQWQAE_END_PROCESSING 2 |
4375 | #define KQWQAE_UNBIND 3 |
4376 | static int |
4377 | kqworkq_acknowledge_events(struct kqworkq *kqwq, workq_threadreq_t kqr, |
4378 | int kevent_flags, int kqwqae_op) |
4379 | { |
4380 | struct knote *kn; |
4381 | int rc = 0; |
4382 | bool unbind; |
4383 | struct kqtailq *suppressq = &kqwq->kqwq_suppressed[kqr->tr_kq_qos_index - 1]; |
4384 | struct kqtailq *queue = &kqwq->kqwq_queue[kqr->tr_kq_qos_index - 1]; |
4385 | |
4386 | kqlock_held(kqu: &kqwq->kqwq_kqueue); |
4387 | |
4388 | /* |
4389 | * Return suppressed knotes to their original state. |
4390 | * For workq kqueues, suppressed ones that are still |
4391 | * truly active (not just forced into the queue) will |
4392 | * set flags we check below to see if anything got |
4393 | * woken up. |
4394 | */ |
4395 | while ((kn = TAILQ_FIRST(suppressq)) != NULL) { |
4396 | knote_unsuppress(kqu: kqwq, kn); |
4397 | } |
4398 | |
4399 | if (kqwqae_op == KQWQAE_UNBIND) { |
4400 | unbind = true; |
4401 | } else if ((kevent_flags & KEVENT_FLAG_PARKING) == 0) { |
4402 | unbind = false; |
4403 | } else { |
4404 | unbind = TAILQ_EMPTY(queue); |
4405 | } |
4406 | if (unbind) { |
4407 | thread_t thread = kqr_thread_fast(kqr); |
4408 | thread_qos_t old_override; |
4409 | |
4410 | #if DEBUG || DEVELOPMENT |
4411 | thread_t self = current_thread(); |
4412 | struct uthread *ut = get_bsdthread_info(self); |
4413 | |
4414 | assert(thread == self); |
4415 | assert(ut->uu_kqr_bound == kqr); |
4416 | #endif // DEBUG || DEVELOPMENT |
4417 | |
4418 | old_override = kqworkq_unbind_locked(kqwq, kqr, thread); |
4419 | if (!TAILQ_EMPTY(queue)) { |
4420 | /* |
4421 | * Request a new thread if we didn't process the whole |
4422 | * queue. |
4423 | */ |
4424 | kqueue_threadreq_initiate(kq: &kqwq->kqwq_kqueue, kqr, |
4425 | qos: kqr->tr_kq_qos_index, flags: 0); |
4426 | } |
4427 | if (old_override) { |
4428 | thread_drop_kevent_override(thread); |
4429 | } |
4430 | rc = -1; |
4431 | } |
4432 | |
4433 | return rc; |
4434 | } |
4435 | |
4436 | /* |
4437 | * Return 0 to indicate that processing should proceed, |
4438 | * -1 if there is nothing to process. |
4439 | * |
4440 | * Called with kqueue locked and returns the same way, |
4441 | * but may drop lock temporarily. |
4442 | */ |
4443 | static int |
4444 | kqworkq_begin_processing(struct kqworkq *kqwq, workq_threadreq_t kqr, |
4445 | int kevent_flags) |
4446 | { |
4447 | int rc = 0; |
4448 | |
4449 | KDBG_DEBUG(KEV_EVTID(BSD_KEVENT_KQWQ_PROCESS_BEGIN) | DBG_FUNC_START, |
4450 | 0, kqr->tr_kq_qos_index); |
4451 | |
4452 | rc = kqworkq_acknowledge_events(kqwq, kqr, kevent_flags, |
4453 | KQWQAE_BEGIN_PROCESSING); |
4454 | |
4455 | KDBG_DEBUG(KEV_EVTID(BSD_KEVENT_KQWQ_PROCESS_BEGIN) | DBG_FUNC_END, |
4456 | thread_tid(kqr_thread(kqr)), |
4457 | !TAILQ_EMPTY(&kqwq->kqwq_queue[kqr->tr_kq_qos_index - 1])); |
4458 | |
4459 | return rc; |
4460 | } |
4461 | |
4462 | static thread_qos_t |
4463 | kqworkloop_acknowledge_events(struct kqworkloop *kqwl) |
4464 | { |
4465 | kq_index_t qos = THREAD_QOS_UNSPECIFIED; |
4466 | struct knote *kn, *tmp; |
4467 | |
4468 | kqlock_held(kqu: kqwl); |
4469 | |
4470 | TAILQ_FOREACH_SAFE(kn, &kqwl->kqwl_suppressed, kn_tqe, tmp) { |
4471 | /* |
4472 | * If a knote that can adjust QoS is disabled because of the automatic |
4473 | * behavior of EV_DISPATCH, the knotes should stay suppressed so that |
4474 | * further overrides keep pushing. |
4475 | */ |
4476 | if (knote_fops(kn)->f_adjusts_qos && |
4477 | (kn->kn_status & KN_DISABLED) != 0 && |
4478 | (kn->kn_status & KN_DROPPING) == 0 && |
4479 | (kn->kn_flags & (EV_DISPATCH | EV_DISABLE)) == EV_DISPATCH) { |
4480 | qos = MAX(qos, kn->kn_qos_override); |
4481 | continue; |
4482 | } |
4483 | knote_unsuppress(kqu: kqwl, kn); |
4484 | } |
4485 | |
4486 | return qos; |
4487 | } |
4488 | |
4489 | static int |
4490 | kqworkloop_begin_processing(struct kqworkloop *kqwl, unsigned int kevent_flags) |
4491 | { |
4492 | workq_threadreq_t kqr = &kqwl->kqwl_request; |
4493 | struct kqueue *kq = &kqwl->kqwl_kqueue; |
4494 | int rc = 0, op = KQWL_UTQ_NONE; |
4495 | |
4496 | kqlock_held(kqu: kq); |
4497 | |
4498 | KDBG_DEBUG(KEV_EVTID(BSD_KEVENT_KQWL_PROCESS_BEGIN) | DBG_FUNC_START, |
4499 | kqwl->kqwl_dynamicid, 0, 0); |
4500 | |
4501 | /* nobody else should still be processing */ |
4502 | assert((kq->kq_state & KQ_PROCESSING) == 0); |
4503 | |
4504 | kq->kq_state |= KQ_PROCESSING; |
4505 | |
4506 | if (kevent_flags & KEVENT_FLAG_PARKING) { |
4507 | /* |
4508 | * When "parking" we want to process events and if no events are found |
4509 | * unbind. |
4510 | * |
4511 | * However, non overcommit threads sometimes park even when they have |
4512 | * more work so that the pool can narrow. For these, we need to unbind |
4513 | * early, so that calling kqworkloop_update_threads_qos() can ask the |
4514 | * workqueue subsystem whether the thread should park despite having |
4515 | * pending events. |
4516 | */ |
4517 | if (kqr->tr_flags & WORKQ_TR_FLAG_OVERCOMMIT) { |
4518 | op = KQWL_UTQ_PARKING; |
4519 | } else { |
4520 | op = KQWL_UTQ_UNBINDING; |
4521 | } |
4522 | } else if (!TAILQ_EMPTY(&kqwl->kqwl_suppressed)) { |
4523 | op = KQWL_UTQ_RESET_WAKEUP_OVERRIDE; |
4524 | } |
4525 | |
4526 | if (op != KQWL_UTQ_NONE) { |
4527 | thread_qos_t qos_override; |
4528 | thread_t thread = kqr_thread_fast(kqr); |
4529 | |
4530 | qos_override = kqworkloop_acknowledge_events(kqwl); |
4531 | |
4532 | if (op == KQWL_UTQ_UNBINDING) { |
4533 | kqworkloop_unbind_locked(kwql: kqwl, thread, |
4534 | how: KQWL_OVERRIDE_DROP_IMMEDIATELY); |
4535 | kqworkloop_release_live(kqwl); |
4536 | } |
4537 | kqworkloop_update_threads_qos(kqwl, op, qos: qos_override); |
4538 | if (op == KQWL_UTQ_PARKING && |
4539 | (!kqwl->kqwl_count || kqwl->kqwl_owner)) { |
4540 | kqworkloop_unbind_locked(kwql: kqwl, thread, |
4541 | how: KQWL_OVERRIDE_DROP_DELAYED); |
4542 | kqworkloop_release_live(kqwl); |
4543 | rc = -1; |
4544 | } else if (op == KQWL_UTQ_UNBINDING && |
4545 | kqr_thread(kqr) != thread) { |
4546 | rc = -1; |
4547 | } |
4548 | |
4549 | if (rc == -1) { |
4550 | kq->kq_state &= ~KQ_PROCESSING; |
4551 | kqworkloop_unbind_delayed_override_drop(thread); |
4552 | } |
4553 | } |
4554 | |
4555 | KDBG_DEBUG(KEV_EVTID(BSD_KEVENT_KQWL_PROCESS_BEGIN) | DBG_FUNC_END, |
4556 | kqwl->kqwl_dynamicid, 0, 0); |
4557 | |
4558 | return rc; |
4559 | } |
4560 | |
4561 | /* |
4562 | * Return 0 to indicate that processing should proceed, |
4563 | * -1 if there is nothing to process. |
4564 | * EBADF if the kqueue is draining |
4565 | * |
4566 | * Called with kqueue locked and returns the same way, |
4567 | * but may drop lock temporarily. |
4568 | * May block. |
4569 | */ |
4570 | static int |
4571 | kqfile_begin_processing(struct kqfile *kq) |
4572 | { |
4573 | kqlock_held(kqu: kq); |
4574 | |
4575 | assert((kq->kqf_state & (KQ_WORKQ | KQ_WORKLOOP)) == 0); |
4576 | KDBG_DEBUG(KEV_EVTID(BSD_KEVENT_KQ_PROCESS_BEGIN) | DBG_FUNC_START, |
4577 | VM_KERNEL_UNSLIDE_OR_PERM(kq), 0); |
4578 | |
4579 | /* wait to become the exclusive processing thread */ |
4580 | while ((kq->kqf_state & (KQ_PROCESSING | KQ_DRAIN)) == KQ_PROCESSING) { |
4581 | kq->kqf_state |= KQ_PROCWAIT; |
4582 | lck_spin_sleep(lck: &kq->kqf_lock, lck_sleep_action: LCK_SLEEP_DEFAULT, |
4583 | event: &kq->kqf_suppressed, THREAD_UNINT | THREAD_WAIT_NOREPORT); |
4584 | } |
4585 | |
4586 | if (kq->kqf_state & KQ_DRAIN) { |
4587 | KDBG_DEBUG(KEV_EVTID(BSD_KEVENT_KQ_PROCESS_BEGIN) | DBG_FUNC_END, |
4588 | VM_KERNEL_UNSLIDE_OR_PERM(kq), 2); |
4589 | return EBADF; |
4590 | } |
4591 | |
4592 | /* Nobody else processing */ |
4593 | |
4594 | /* anything left to process? */ |
4595 | if (kq->kqf_count == 0) { |
4596 | KDBG_DEBUG(KEV_EVTID(BSD_KEVENT_KQ_PROCESS_BEGIN) | DBG_FUNC_END, |
4597 | VM_KERNEL_UNSLIDE_OR_PERM(kq), 1); |
4598 | return -1; |
4599 | } |
4600 | |
4601 | /* convert to processing mode */ |
4602 | kq->kqf_state |= KQ_PROCESSING; |
4603 | |
4604 | KDBG_DEBUG(KEV_EVTID(BSD_KEVENT_KQ_PROCESS_BEGIN) | DBG_FUNC_END, |
4605 | VM_KERNEL_UNSLIDE_OR_PERM(kq), 0); |
4606 | return 0; |
4607 | } |
4608 | |
4609 | /* |
4610 | * Try to end the processing, only called when a workq thread is attempting to |
4611 | * park (KEVENT_FLAG_PARKING is set). |
4612 | * |
4613 | * When returning -1, the kqworkq is setup again so that it is ready to be |
4614 | * processed. |
4615 | */ |
4616 | static int |
4617 | kqworkq_end_processing(struct kqworkq *kqwq, workq_threadreq_t kqr, |
4618 | int kevent_flags) |
4619 | { |
4620 | if (kevent_flags & KEVENT_FLAG_PARKING) { |
4621 | /* |
4622 | * if acknowledge events "succeeds" it means there are events, |
4623 | * which is a failure condition for end_processing. |
4624 | */ |
4625 | int rc = kqworkq_acknowledge_events(kqwq, kqr, kevent_flags, |
4626 | KQWQAE_END_PROCESSING); |
4627 | if (rc == 0) { |
4628 | return -1; |
4629 | } |
4630 | } |
4631 | |
4632 | return 0; |
4633 | } |
4634 | |
4635 | /* |
4636 | * Try to end the processing, only called when a workq thread is attempting to |
4637 | * park (KEVENT_FLAG_PARKING is set). |
4638 | * |
4639 | * When returning -1, the kqworkq is setup again so that it is ready to be |
4640 | * processed (as if kqworkloop_begin_processing had just been called). |
4641 | * |
4642 | * If successful and KEVENT_FLAG_PARKING was set in the kevent_flags, |
4643 | * the kqworkloop is unbound from its servicer as a side effect. |
4644 | */ |
4645 | static int |
4646 | kqworkloop_end_processing(struct kqworkloop *kqwl, int flags, int kevent_flags) |
4647 | { |
4648 | struct kqueue *kq = &kqwl->kqwl_kqueue; |
4649 | workq_threadreq_t kqr = &kqwl->kqwl_request; |
4650 | int rc = 0; |
4651 | |
4652 | kqlock_held(kqu: kq); |
4653 | |
4654 | KDBG_DEBUG(KEV_EVTID(BSD_KEVENT_KQWL_PROCESS_END) | DBG_FUNC_START, |
4655 | kqwl->kqwl_dynamicid, 0, 0); |
4656 | |
4657 | if (kevent_flags & KEVENT_FLAG_PARKING) { |
4658 | thread_t thread = kqr_thread_fast(kqr); |
4659 | thread_qos_t qos_override; |
4660 | |
4661 | /* |
4662 | * When KEVENT_FLAG_PARKING is set, we need to attempt |
4663 | * an unbind while still under the lock. |
4664 | * |
4665 | * So we do everything kqworkloop_unbind() would do, but because |
4666 | * we're inside kqueue_process(), if the workloop actually |
4667 | * received events while our locks were dropped, we have |
4668 | * the opportunity to fail the end processing and loop again. |
4669 | * |
4670 | * This avoids going through the process-wide workqueue lock |
4671 | * hence scales better. |
4672 | */ |
4673 | assert(flags & KQ_PROCESSING); |
4674 | qos_override = kqworkloop_acknowledge_events(kqwl); |
4675 | kqworkloop_update_threads_qos(kqwl, op: KQWL_UTQ_PARKING, qos: qos_override); |
4676 | |
4677 | if (kqwl->kqwl_wakeup_qos && !kqwl->kqwl_owner) { |
4678 | rc = -1; |
4679 | } else { |
4680 | kqworkloop_unbind_locked(kwql: kqwl, thread, how: KQWL_OVERRIDE_DROP_DELAYED); |
4681 | kqworkloop_release_live(kqwl); |
4682 | kq->kq_state &= ~flags; |
4683 | kqworkloop_unbind_delayed_override_drop(thread); |
4684 | } |
4685 | } else { |
4686 | kq->kq_state &= ~flags; |
4687 | kq->kq_state |= KQ_R2K_ARMED; |
4688 | kqworkloop_update_threads_qos(kqwl, op: KQWL_UTQ_RECOMPUTE_WAKEUP_QOS, qos: 0); |
4689 | } |
4690 | |
4691 | KDBG_DEBUG(KEV_EVTID(BSD_KEVENT_KQWL_PROCESS_END) | DBG_FUNC_END, |
4692 | kqwl->kqwl_dynamicid, 0, 0); |
4693 | |
4694 | return rc; |
4695 | } |
4696 | |
4697 | /* |
4698 | * Called with kqueue lock held. |
4699 | * |
4700 | * 0: no more events |
4701 | * -1: has more events |
4702 | * EBADF: kqueue is in draining mode |
4703 | */ |
4704 | static int |
4705 | kqfile_end_processing(struct kqfile *kq) |
4706 | { |
4707 | struct knote *kn; |
4708 | int procwait; |
4709 | |
4710 | kqlock_held(kqu: kq); |
4711 | |
4712 | assert((kq->kqf_state & (KQ_WORKQ | KQ_WORKLOOP)) == 0); |
4713 | |
4714 | KDBG_DEBUG(KEV_EVTID(BSD_KEVENT_KQ_PROCESS_END), |
4715 | VM_KERNEL_UNSLIDE_OR_PERM(kq), 0); |
4716 | |
4717 | /* |
4718 | * Return suppressed knotes to their original state. |
4719 | */ |
4720 | while ((kn = TAILQ_FIRST(&kq->kqf_suppressed)) != NULL) { |
4721 | knote_unsuppress(kqu: kq, kn); |
4722 | } |
4723 | |
4724 | procwait = (kq->kqf_state & KQ_PROCWAIT); |
4725 | kq->kqf_state &= ~(KQ_PROCESSING | KQ_PROCWAIT); |
4726 | |
4727 | if (procwait) { |
4728 | /* first wake up any thread already waiting to process */ |
4729 | thread_wakeup(&kq->kqf_suppressed); |
4730 | } |
4731 | |
4732 | if (kq->kqf_state & KQ_DRAIN) { |
4733 | return EBADF; |
4734 | } |
4735 | return kq->kqf_count != 0 ? -1 : 0; |
4736 | } |
4737 | |
4738 | static int |
4739 | kqueue_workloop_ctl_internal(proc_t p, uintptr_t cmd, uint64_t __unused options, |
4740 | struct kqueue_workloop_params *params, int *retval) |
4741 | { |
4742 | int error = 0; |
4743 | struct kqworkloop *kqwl; |
4744 | struct filedesc *fdp = &p->p_fd; |
4745 | workq_threadreq_param_t trp = { }; |
4746 | #if CONFIG_PREADOPT_TG |
4747 | struct thread_group *trp_permanent_preadopt_tg = NULL; |
4748 | integer_t trp_preadopt_priority = 0; |
4749 | integer_t trp_preadopt_policy = 0; |
4750 | #endif /* CONFIG_PREADOPT_TG */ |
4751 | |
4752 | switch (cmd) { |
4753 | case KQ_WORKLOOP_CREATE: |
4754 | if (!params->kqwlp_flags) { |
4755 | error = EINVAL; |
4756 | break; |
4757 | } |
4758 | |
4759 | if ((params->kqwlp_flags & KQ_WORKLOOP_CREATE_SCHED_PRI) && |
4760 | (params->kqwlp_sched_pri < 1 || |
4761 | params->kqwlp_sched_pri > 63 /* MAXPRI_USER */)) { |
4762 | error = EINVAL; |
4763 | break; |
4764 | } |
4765 | |
4766 | if ((params->kqwlp_flags & KQ_WORKLOOP_CREATE_SCHED_POL) && |
4767 | invalid_policy(params->kqwlp_sched_pol)) { |
4768 | error = EINVAL; |
4769 | break; |
4770 | } |
4771 | |
4772 | if ((params->kqwlp_flags & KQ_WORKLOOP_CREATE_CPU_PERCENT) && |
4773 | (params->kqwlp_cpu_percent <= 0 || |
4774 | params->kqwlp_cpu_percent > 100 || |
4775 | params->kqwlp_cpu_refillms <= 0 || |
4776 | params->kqwlp_cpu_refillms > 0x00ffffff)) { |
4777 | error = EINVAL; |
4778 | break; |
4779 | } |
4780 | |
4781 | if (params->kqwlp_flags & KQ_WORKLOOP_CREATE_WORK_INTERVAL) { |
4782 | #if CONFIG_PREADOPT_TG |
4783 | kern_return_t kr; |
4784 | kr = kern_work_interval_get_policy_from_port(port_name: params->kqwl_wi_port, |
4785 | policy: &trp_preadopt_policy, |
4786 | priority: &trp_preadopt_priority, |
4787 | tg: &trp_permanent_preadopt_tg); |
4788 | if (kr != KERN_SUCCESS) { |
4789 | error = EINVAL; |
4790 | break; |
4791 | } |
4792 | /* The work interval comes with scheduling policy. */ |
4793 | if (trp_preadopt_policy) { |
4794 | trp.trp_flags |= TRP_POLICY; |
4795 | trp.trp_pol = (uint8_t)trp_preadopt_policy; |
4796 | |
4797 | trp.trp_flags |= TRP_PRIORITY; |
4798 | trp.trp_pri = (uint8_t)trp_preadopt_priority; |
4799 | } |
4800 | /* |
4801 | * We take +1 ref on a thread group backing this work interval |
4802 | * via kern_work_interval_get_policy_from_port and pass it on to kqwl. |
4803 | * If, for whatever reasons, kqworkloop_get_or_create fails, we |
4804 | * get back this ref. |
4805 | */ |
4806 | #else |
4807 | error = ENOTSUP; |
4808 | break; |
4809 | #endif /* CONFIG_PREADOPT_TG */ |
4810 | } |
4811 | |
4812 | if (!(trp.trp_flags & (TRP_POLICY | TRP_PRIORITY))) { |
4813 | /* |
4814 | * We always prefer scheduling policy + priority that comes with |
4815 | * a work interval. It it does not exist, we fallback to what the user |
4816 | * has asked. |
4817 | */ |
4818 | if (params->kqwlp_flags & KQ_WORKLOOP_CREATE_SCHED_PRI) { |
4819 | trp.trp_flags |= TRP_PRIORITY; |
4820 | trp.trp_pri = (uint8_t)params->kqwlp_sched_pri; |
4821 | } |
4822 | if (params->kqwlp_flags & KQ_WORKLOOP_CREATE_SCHED_POL) { |
4823 | trp.trp_flags |= TRP_POLICY; |
4824 | trp.trp_pol = (uint8_t)params->kqwlp_sched_pol; |
4825 | } |
4826 | if (params->kqwlp_flags & KQ_WORKLOOP_CREATE_CPU_PERCENT) { |
4827 | trp.trp_flags |= TRP_CPUPERCENT; |
4828 | trp.trp_cpupercent = (uint8_t)params->kqwlp_cpu_percent; |
4829 | trp.trp_refillms = params->kqwlp_cpu_refillms; |
4830 | } |
4831 | } |
4832 | |
4833 | error = kqworkloop_get_or_create(p, id: params->kqwlp_id, trp: &trp, |
4834 | #if CONFIG_PREADOPT_TG |
4835 | trp_permanent_preadopt_tg, |
4836 | #endif /* CONFIG_PREADOPT_TG */ |
4837 | KEVENT_FLAG_DYNAMIC_KQUEUE | KEVENT_FLAG_WORKLOOP | |
4838 | KEVENT_FLAG_DYNAMIC_KQ_MUST_NOT_EXIST, kqwlp: &kqwl); |
4839 | if (error) { |
4840 | #if CONFIG_PREADOPT_TG |
4841 | /* In case of success, kqwl consumes this +1 ref. */ |
4842 | if (trp_permanent_preadopt_tg) { |
4843 | thread_group_release(tg: trp_permanent_preadopt_tg); |
4844 | } |
4845 | #endif |
4846 | break; |
4847 | } |
4848 | |
4849 | if (!fdt_flag_test(fdp, FD_WORKLOOP)) { |
4850 | /* FD_WORKLOOP indicates we've ever created a workloop |
4851 | * via this syscall but its only ever added to a process, never |
4852 | * removed. |
4853 | */ |
4854 | proc_fdlock(p); |
4855 | fdt_flag_set(fdp, FD_WORKLOOP); |
4856 | proc_fdunlock(p); |
4857 | } |
4858 | break; |
4859 | case KQ_WORKLOOP_DESTROY: |
4860 | error = kqworkloop_get_or_create(p, id: params->kqwlp_id, NULL, |
4861 | #if CONFIG_PREADOPT_TG |
4862 | NULL, |
4863 | #endif /* CONFIG_PREADOPT_TG */ |
4864 | KEVENT_FLAG_DYNAMIC_KQUEUE | KEVENT_FLAG_WORKLOOP | |
4865 | KEVENT_FLAG_DYNAMIC_KQ_MUST_EXIST, kqwlp: &kqwl); |
4866 | if (error) { |
4867 | break; |
4868 | } |
4869 | kqlock(kqu: kqwl); |
4870 | trp.trp_value = kqwl->kqwl_params; |
4871 | if (trp.trp_flags && !(trp.trp_flags & TRP_RELEASED)) { |
4872 | trp.trp_flags |= TRP_RELEASED; |
4873 | kqwl->kqwl_params = trp.trp_value; |
4874 | kqworkloop_release_live(kqwl); |
4875 | } else { |
4876 | error = EINVAL; |
4877 | } |
4878 | kqunlock(kqu: kqwl); |
4879 | kqworkloop_release(kqwl); |
4880 | break; |
4881 | } |
4882 | *retval = 0; |
4883 | return error; |
4884 | } |
4885 | |
4886 | int |
4887 | kqueue_workloop_ctl(proc_t p, struct kqueue_workloop_ctl_args *uap, int *retval) |
4888 | { |
4889 | struct kqueue_workloop_params params = { |
4890 | .kqwlp_id = 0, |
4891 | }; |
4892 | if (uap->sz < sizeof(params.kqwlp_version)) { |
4893 | return EINVAL; |
4894 | } |
4895 | |
4896 | size_t copyin_sz = MIN(sizeof(params), uap->sz); |
4897 | int rv = copyin(uap->addr, ¶ms, copyin_sz); |
4898 | if (rv) { |
4899 | return rv; |
4900 | } |
4901 | |
4902 | if (params.kqwlp_version != (int)uap->sz) { |
4903 | return EINVAL; |
4904 | } |
4905 | |
4906 | return kqueue_workloop_ctl_internal(p, cmd: uap->cmd, options: uap->options, params: ¶ms, |
4907 | retval); |
4908 | } |
4909 | |
4910 | static int |
4911 | kqueue_select(struct fileproc *fp, int which, void *wql, __unused vfs_context_t ctx) |
4912 | { |
4913 | struct kqfile *kq = (struct kqfile *)fp_get_data(fp); |
4914 | int retnum = 0; |
4915 | |
4916 | assert((kq->kqf_state & (KQ_WORKLOOP | KQ_WORKQ)) == 0); |
4917 | |
4918 | if (which == FREAD) { |
4919 | kqlock(kqu: kq); |
4920 | if (kqfile_begin_processing(kq) == 0) { |
4921 | retnum = kq->kqf_count; |
4922 | kqfile_end_processing(kq); |
4923 | } else if ((kq->kqf_state & KQ_DRAIN) == 0) { |
4924 | selrecord(selector: kq->kqf_p, &kq->kqf_sel, wql); |
4925 | } |
4926 | kqunlock(kqu: kq); |
4927 | } |
4928 | return retnum; |
4929 | } |
4930 | |
4931 | /* |
4932 | * kqueue_close - |
4933 | */ |
4934 | static int |
4935 | kqueue_close(struct fileglob *fg, __unused vfs_context_t ctx) |
4936 | { |
4937 | struct kqfile *kqf = fg_get_data(fg); |
4938 | |
4939 | assert((kqf->kqf_state & (KQ_WORKLOOP | KQ_WORKQ)) == 0); |
4940 | kqlock(kqu: kqf); |
4941 | selthreadclear(&kqf->kqf_sel); |
4942 | kqunlock(kqu: kqf); |
4943 | kqueue_dealloc(kq: &kqf->kqf_kqueue); |
4944 | fg_set_data(fg, NULL); |
4945 | return 0; |
4946 | } |
4947 | |
4948 | /* |
4949 | * Max depth of the nested kq path that can be created. |
4950 | * Note that this has to be less than the size of kq_level |
4951 | * to avoid wrapping around and mislabeling the level. We also |
4952 | * want to be aggressive about this so that we don't overflow the |
4953 | * kernel stack while posting kevents |
4954 | */ |
4955 | #define MAX_NESTED_KQ 10 |
4956 | |
4957 | /* |
4958 | * The callers has taken a use-count reference on this kqueue and will donate it |
4959 | * to the kqueue we are being added to. This keeps the kqueue from closing until |
4960 | * that relationship is torn down. |
4961 | */ |
4962 | static int |
4963 | kqueue_kqfilter(struct fil |
---|