1/*
2 * Copyright (c) 2000-2021 Apple Inc. All rights reserved.
3 *
4 * @APPLE_OSREFERENCE_LICENSE_HEADER_START@
5 *
6 * This file contains Original Code and/or Modifications of Original Code
7 * as defined in and that are subject to the Apple Public Source License
8 * Version 2.0 (the 'License'). You may not use this file except in
9 * compliance with the License. The rights granted to you under the License
10 * may not be used to create, or enable the creation or redistribution of,
11 * unlawful or unlicensed copies of an Apple operating system, or to
12 * circumvent, violate, or enable the circumvention or violation of, any
13 * terms of an Apple operating system software license agreement.
14 *
15 * Please obtain a copy of the License at
16 * http://www.opensource.apple.com/apsl/ and read it before using this file.
17 *
18 * The Original Code and all software distributed under the License are
19 * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER
20 * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES,
21 * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY,
22 * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT.
23 * Please see the License for the specific language governing rights and
24 * limitations under the License.
25 *
26 * @APPLE_OSREFERENCE_LICENSE_HEADER_END@
27 */
28/* Copyright (c) 1995 NeXT Computer, Inc. All Rights Reserved */
29/*
30 * Copyright (c) 1982, 1986, 1989, 1993
31 * The Regents of the University of California. All rights reserved.
32 * (c) UNIX System Laboratories, Inc.
33 * All or some portions of this file are derived from material licensed
34 * to the University of California by American Telephone and Telegraph
35 * Co. or Unix System Laboratories, Inc. and are reproduced herein with
36 * the permission of UNIX System Laboratories, Inc.
37 *
38 * Redistribution and use in source and binary forms, with or without
39 * modification, are permitted provided that the following conditions
40 * are met:
41 * 1. Redistributions of source code must retain the above copyright
42 * notice, this list of conditions and the following disclaimer.
43 * 2. Redistributions in binary form must reproduce the above copyright
44 * notice, this list of conditions and the following disclaimer in the
45 * documentation and/or other materials provided with the distribution.
46 * 3. All advertising materials mentioning features or use of this software
47 * must display the following acknowledgement:
48 * This product includes software developed by the University of
49 * California, Berkeley and its contributors.
50 * 4. Neither the name of the University nor the names of its contributors
51 * may be used to endorse or promote products derived from this software
52 * without specific prior written permission.
53 *
54 * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
55 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
56 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
57 * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
58 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
59 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
60 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
61 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
62 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
63 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
64 * SUCH DAMAGE.
65 *
66 * @(#)sys_generic.c 8.9 (Berkeley) 2/14/95
67 */
68/*
69 * NOTICE: This file was modified by SPARTA, Inc. in 2006 to introduce
70 * support for mandatory and extensible security protections. This notice
71 * is included in support of clause 2.2 (b) of the Apple Public License,
72 * Version 2.0.
73 */
74
75#include <sys/param.h>
76#include <sys/systm.h>
77#include <sys/filedesc.h>
78#include <sys/ioctl.h>
79#include <sys/file_internal.h>
80#include <sys/proc_internal.h>
81#include <sys/socketvar.h>
82#include <sys/uio_internal.h>
83#include <sys/kernel.h>
84#include <sys/guarded.h>
85#include <sys/stat.h>
86#include <sys/malloc.h>
87#include <sys/sysproto.h>
88
89#include <sys/mount_internal.h>
90#include <sys/protosw.h>
91#include <sys/ev.h>
92#include <sys/user.h>
93#include <sys/kdebug.h>
94#include <sys/poll.h>
95#include <sys/event.h>
96#include <sys/eventvar.h>
97#include <sys/proc.h>
98#include <sys/kauth.h>
99
100#include <machine/smp.h>
101#include <mach/mach_types.h>
102#include <kern/kern_types.h>
103#include <kern/assert.h>
104#include <kern/kalloc.h>
105#include <kern/thread.h>
106#include <kern/clock.h>
107#include <kern/ledger.h>
108#include <kern/monotonic.h>
109#include <kern/task.h>
110#include <kern/telemetry.h>
111#include <kern/waitq.h>
112#include <kern/sched_hygiene.h>
113#include <kern/sched_prim.h>
114#include <kern/mpsc_queue.h>
115#include <kern/debug.h>
116
117#include <sys/mbuf.h>
118#include <sys/domain.h>
119#include <sys/socket.h>
120#include <sys/socketvar.h>
121#include <sys/errno.h>
122#include <sys/syscall.h>
123#include <sys/pipe.h>
124
125#include <security/audit/audit.h>
126
127#include <net/if.h>
128#include <net/route.h>
129
130#include <netinet/in.h>
131#include <netinet/in_systm.h>
132#include <netinet/ip.h>
133#include <netinet/in_pcb.h>
134#include <netinet/ip_var.h>
135#include <netinet/ip6.h>
136#include <netinet/tcp.h>
137#include <netinet/tcp_fsm.h>
138#include <netinet/tcp_seq.h>
139#include <netinet/tcp_timer.h>
140#include <netinet/tcp_var.h>
141#include <netinet/tcpip.h>
142#include <netinet/tcp_debug.h>
143/* for wait queue based select */
144#include <kern/waitq.h>
145#include <sys/vnode_internal.h>
146/* for remote time api*/
147#include <kern/remote_time.h>
148#include <os/log.h>
149#include <sys/log_data.h>
150
151#include <machine/monotonic.h>
152
153#if CONFIG_MACF
154#include <security/mac_framework.h>
155#endif
156
157#ifdef CONFIG_KDP_INTERACTIVE_DEBUGGING
158#include <mach_debug/mach_debug_types.h>
159#endif
160
161/* for entitlement check */
162#include <IOKit/IOBSD.h>
163
164/* XXX should be in a header file somewhere */
165extern kern_return_t IOBSDGetPlatformUUID(__darwin_uuid_t uuid, mach_timespec_t timeoutp);
166
167int do_uiowrite(struct proc *p, struct fileproc *fp, uio_t uio, int flags, user_ssize_t *retval);
168__private_extern__ int dofileread(vfs_context_t ctx, struct fileproc *fp,
169 user_addr_t bufp, user_size_t nbyte,
170 off_t offset, int flags, user_ssize_t *retval);
171__private_extern__ int dofilewrite(vfs_context_t ctx, struct fileproc *fp,
172 user_addr_t bufp, user_size_t nbyte,
173 off_t offset, int flags, user_ssize_t *retval);
174static int preparefileread(struct proc *p, struct fileproc **fp_ret, int fd, int check_for_vnode);
175
176/* needed by guarded_writev, etc. */
177int write_internal(struct proc *p, int fd, user_addr_t buf, user_size_t nbyte,
178 off_t offset, int flags, guardid_t *puguard, user_ssize_t *retval);
179int writev_uio(struct proc *p, int fd, user_addr_t user_iovp, int iovcnt, off_t offset, int flags,
180 guardid_t *puguard, user_ssize_t *retval);
181
182#define f_flag fp_glob->fg_flag
183#define f_type fp_glob->fg_ops->fo_type
184#define f_cred fp_glob->fg_cred
185#define f_ops fp_glob->fg_ops
186
187/*
188 * Validate if the file can be used for random access (pread, pwrite, etc).
189 *
190 * Conditions:
191 * proc_fdlock is held
192 *
193 * Returns: 0 Success
194 * ESPIPE
195 * ENXIO
196 */
197static int
198valid_for_random_access(struct fileproc *fp)
199{
200 if (__improbable(fp->f_type != DTYPE_VNODE)) {
201 return ESPIPE;
202 }
203
204 vnode_t vp = (struct vnode *)fp_get_data(fp);
205 if (__improbable(vnode_isfifo(vp))) {
206 return ESPIPE;
207 }
208
209 if (__improbable(vp->v_flag & VISTTY)) {
210 return ENXIO;
211 }
212
213 return 0;
214}
215
216/*
217 * Returns: 0 Success
218 * EBADF
219 * ESPIPE
220 * ENXIO
221 * fp_lookup:EBADF
222 * valid_for_random_access:ESPIPE
223 * valid_for_random_access:ENXIO
224 */
225static int
226preparefileread(struct proc *p, struct fileproc **fp_ret, int fd, int check_for_pread)
227{
228 int error;
229 struct fileproc *fp;
230
231 AUDIT_ARG(fd, fd);
232
233 proc_fdlock_spin(p);
234
235 error = fp_lookup(p, fd, resultfp: &fp, locked: 1);
236
237 if (error) {
238 proc_fdunlock(p);
239 return error;
240 }
241 if ((fp->f_flag & FREAD) == 0) {
242 error = EBADF;
243 goto out;
244 }
245 if (check_for_pread) {
246 if ((error = valid_for_random_access(fp))) {
247 goto out;
248 }
249 }
250
251 *fp_ret = fp;
252
253 proc_fdunlock(p);
254 return 0;
255
256out:
257 fp_drop(p, fd, fp, locked: 1);
258 proc_fdunlock(p);
259 return error;
260}
261
262static int
263fp_readv(vfs_context_t ctx, struct fileproc *fp, uio_t uio, int flags,
264 user_ssize_t *retval)
265{
266 int error;
267 user_ssize_t count;
268
269 if ((error = uio_calculateresid_user(a_uio: uio))) {
270 *retval = 0;
271 return error;
272 }
273
274 count = uio_resid(a_uio: uio);
275 error = fo_read(fp, uio, flags, ctx);
276
277 switch (error) {
278 case ERESTART:
279 case EINTR:
280 case EWOULDBLOCK:
281 if (uio_resid(a_uio: uio) != count) {
282 error = 0;
283 }
284 break;
285
286 default:
287 break;
288 }
289
290 *retval = count - uio_resid(a_uio: uio);
291 return error;
292}
293
294/*
295 * Returns: 0 Success
296 * EINVAL
297 * fo_read:???
298 */
299__private_extern__ int
300dofileread(vfs_context_t ctx, struct fileproc *fp,
301 user_addr_t bufp, user_size_t nbyte, off_t offset, int flags,
302 user_ssize_t *retval)
303{
304 UIO_STACKBUF(uio_buf, 1);
305 uio_t uio;
306 int spacetype;
307
308 if (nbyte > INT_MAX) {
309 *retval = 0;
310 return EINVAL;
311 }
312
313 spacetype = vfs_context_is64bit(ctx) ? UIO_USERSPACE64 : UIO_USERSPACE32;
314 uio = uio_createwithbuffer(a_iovcount: 1, a_offset: offset, a_spacetype: spacetype, a_iodirection: UIO_READ, a_buf_p: &uio_buf[0],
315 a_buffer_size: sizeof(uio_buf));
316
317 if (uio_addiov(a_uio: uio, a_baseaddr: bufp, a_length: nbyte) != 0) {
318 *retval = 0;
319 return EINVAL;
320 }
321
322 return fp_readv(ctx, fp, uio, flags, retval);
323}
324
325static int
326readv_internal(struct proc *p, int fd, uio_t uio, int flags,
327 user_ssize_t *retval)
328{
329 struct fileproc *fp = NULL;
330 struct vfs_context context;
331 int error;
332
333 if ((error = preparefileread(p, fp_ret: &fp, fd, check_for_pread: flags & FOF_OFFSET))) {
334 *retval = 0;
335 return error;
336 }
337
338 context = *(vfs_context_current());
339 context.vc_ucred = fp->fp_glob->fg_cred;
340
341 error = fp_readv(ctx: &context, fp, uio, flags, retval);
342
343 fp_drop(p, fd, fp, locked: 0);
344 return error;
345}
346
347static int
348read_internal(struct proc *p, int fd, user_addr_t buf, user_size_t nbyte,
349 off_t offset, int flags, user_ssize_t *retval)
350{
351 UIO_STACKBUF(uio_buf, 1);
352 uio_t uio;
353 int spacetype = IS_64BIT_PROCESS(p) ? UIO_USERSPACE64 : UIO_USERSPACE32;
354
355 if (nbyte > INT_MAX) {
356 *retval = 0;
357 return EINVAL;
358 }
359
360 uio = uio_createwithbuffer(a_iovcount: 1, a_offset: offset, a_spacetype: spacetype, a_iodirection: UIO_READ,
361 a_buf_p: &uio_buf[0], a_buffer_size: sizeof(uio_buf));
362
363 if (uio_addiov(a_uio: uio, a_baseaddr: buf, a_length: nbyte) != 0) {
364 *retval = 0;
365 return EINVAL;
366 }
367
368 return readv_internal(p, fd, uio, flags, retval);
369}
370
371int
372read_nocancel(struct proc *p, struct read_nocancel_args *uap, user_ssize_t *retval)
373{
374 return read_internal(p, fd: uap->fd, buf: uap->cbuf, nbyte: uap->nbyte, offset: (off_t)-1, flags: 0,
375 retval);
376}
377
378/*
379 * Read system call.
380 *
381 * Returns: 0 Success
382 * preparefileread:EBADF
383 * preparefileread:ESPIPE
384 * preparefileread:ENXIO
385 * preparefileread:EBADF
386 * dofileread:???
387 */
388int
389read(struct proc *p, struct read_args *uap, user_ssize_t *retval)
390{
391 __pthread_testcancel(presyscall: 1);
392 return read_nocancel(p, uap: (struct read_nocancel_args *)uap, retval);
393}
394
395int
396pread_nocancel(struct proc *p, struct pread_nocancel_args *uap, user_ssize_t *retval)
397{
398 KERNEL_DEBUG_CONSTANT((BSDDBG_CODE(DBG_BSD_SC_EXTENDED_INFO, SYS_pread) | DBG_FUNC_NONE),
399 uap->fd, uap->nbyte, (unsigned int)((uap->offset >> 32)), (unsigned int)(uap->offset), 0);
400
401 return read_internal(p, fd: uap->fd, buf: uap->buf, nbyte: uap->nbyte, offset: uap->offset,
402 FOF_OFFSET, retval);
403}
404
405/*
406 * Pread system call
407 *
408 * Returns: 0 Success
409 * preparefileread:EBADF
410 * preparefileread:ESPIPE
411 * preparefileread:ENXIO
412 * preparefileread:EBADF
413 * dofileread:???
414 */
415int
416pread(struct proc *p, struct pread_args *uap, user_ssize_t *retval)
417{
418 __pthread_testcancel(presyscall: 1);
419 return pread_nocancel(p, uap: (struct pread_nocancel_args *)uap, retval);
420}
421
422/*
423 * Vector read.
424 *
425 * Returns: 0 Success
426 * EINVAL
427 * ENOMEM
428 * preparefileread:EBADF
429 * preparefileread:ESPIPE
430 * preparefileread:ENXIO
431 * preparefileread:EBADF
432 * copyin:EFAULT
433 * rd_uio:???
434 */
435static int
436readv_uio(struct proc *p, int fd,
437 user_addr_t user_iovp, int iovcnt, off_t offset, int flags,
438 user_ssize_t *retval)
439{
440 uio_t uio = NULL;
441 int error;
442 struct user_iovec *iovp;
443
444 if (iovcnt <= 0 || iovcnt > UIO_MAXIOV) {
445 error = EINVAL;
446 goto out;
447 }
448
449 uio = uio_create(a_iovcount: iovcnt, a_offset: offset,
450 a_spacetype: (IS_64BIT_PROCESS(p) ? UIO_USERSPACE64 : UIO_USERSPACE32),
451 a_iodirection: UIO_READ);
452
453 iovp = uio_iovsaddr_user(a_uio: uio);
454 if (iovp == NULL) {
455 error = ENOMEM;
456 goto out;
457 }
458
459 error = copyin_user_iovec_array(uaddr: user_iovp,
460 spacetype: IS_64BIT_PROCESS(p) ? UIO_USERSPACE64 : UIO_USERSPACE32,
461 count: iovcnt, dst: iovp);
462
463 if (error) {
464 goto out;
465 }
466
467 error = readv_internal(p, fd, uio, flags, retval);
468
469out:
470 if (uio != NULL) {
471 uio_free(a_uio: uio);
472 }
473
474 return error;
475}
476
477int
478readv_nocancel(struct proc *p, struct readv_nocancel_args *uap, user_ssize_t *retval)
479{
480 return readv_uio(p, fd: uap->fd, user_iovp: uap->iovp, iovcnt: uap->iovcnt, offset: 0, flags: 0, retval);
481}
482
483/*
484 * Scatter read system call.
485 */
486int
487readv(struct proc *p, struct readv_args *uap, user_ssize_t *retval)
488{
489 __pthread_testcancel(presyscall: 1);
490 return readv_nocancel(p, uap: (struct readv_nocancel_args *)uap, retval);
491}
492
493int
494sys_preadv_nocancel(struct proc *p, struct preadv_nocancel_args *uap, user_ssize_t *retval)
495{
496 return readv_uio(p, fd: uap->fd, user_iovp: uap->iovp, iovcnt: uap->iovcnt, offset: uap->offset,
497 FOF_OFFSET, retval);
498}
499
500/*
501 * Preadv system call
502 */
503int
504sys_preadv(struct proc *p, struct preadv_args *uap, user_ssize_t *retval)
505{
506 __pthread_testcancel(presyscall: 1);
507 return sys_preadv_nocancel(p, uap: (struct preadv_nocancel_args *)uap, retval);
508}
509
510/*
511 * Returns: 0 Success
512 * EBADF
513 * ESPIPE
514 * ENXIO
515 * fp_lookup:EBADF
516 * fp_guard_exception:???
517 * valid_for_random_access:ESPIPE
518 * valid_for_random_access:ENXIO
519 */
520static int
521preparefilewrite(struct proc *p, struct fileproc **fp_ret, int fd, int check_for_pwrite,
522 guardid_t *puguard)
523{
524 int error;
525 struct fileproc *fp;
526
527 AUDIT_ARG(fd, fd);
528
529 proc_fdlock_spin(p);
530
531 if (puguard) {
532 error = fp_lookup_guarded(p, fd, guard: *puguard, resultfp: &fp, locked: 1);
533 if (error) {
534 proc_fdunlock(p);
535 return error;
536 }
537
538 if ((fp->f_flag & FWRITE) == 0) {
539 error = EBADF;
540 goto out;
541 }
542 } else {
543 error = fp_lookup(p, fd, resultfp: &fp, locked: 1);
544 if (error) {
545 proc_fdunlock(p);
546 return error;
547 }
548
549 /* Allow EBADF first. */
550 if ((fp->f_flag & FWRITE) == 0) {
551 error = EBADF;
552 goto out;
553 }
554
555 if (fp_isguarded(fp, GUARD_WRITE)) {
556 error = fp_guard_exception(p, fd, fp, attribs: kGUARD_EXC_WRITE);
557 goto out;
558 }
559 }
560
561 if (check_for_pwrite) {
562 if ((error = valid_for_random_access(fp))) {
563 goto out;
564 }
565 }
566
567 *fp_ret = fp;
568
569 proc_fdunlock(p);
570 return 0;
571
572out:
573 fp_drop(p, fd, fp, locked: 1);
574 proc_fdunlock(p);
575 return error;
576}
577
578static int
579fp_writev(vfs_context_t ctx, struct fileproc *fp, uio_t uio, int flags,
580 user_ssize_t *retval)
581{
582 int error;
583 user_ssize_t count;
584
585 if ((error = uio_calculateresid_user(a_uio: uio))) {
586 *retval = 0;
587 return error;
588 }
589
590 count = uio_resid(a_uio: uio);
591 error = fo_write(fp, uio, flags, ctx);
592
593 switch (error) {
594 case ERESTART:
595 case EINTR:
596 case EWOULDBLOCK:
597 if (uio_resid(a_uio: uio) != count) {
598 error = 0;
599 }
600 break;
601
602 case EPIPE:
603 if (fp->f_type != DTYPE_SOCKET &&
604 (fp->fp_glob->fg_lflags & FG_NOSIGPIPE) == 0) {
605 /* XXX Raise the signal on the thread? */
606 psignal(p: vfs_context_proc(ctx), SIGPIPE);
607 }
608 break;
609
610 default:
611 break;
612 }
613
614 if ((*retval = count - uio_resid(a_uio: uio))) {
615 os_atomic_or(&fp->fp_glob->fg_flag, FWASWRITTEN, relaxed);
616 }
617
618 return error;
619}
620
621/*
622 * Returns: 0 Success
623 * EINVAL
624 * <fo_write>:EPIPE
625 * <fo_write>:??? [indirect through struct fileops]
626 */
627__private_extern__ int
628dofilewrite(vfs_context_t ctx, struct fileproc *fp,
629 user_addr_t bufp, user_size_t nbyte, off_t offset, int flags,
630 user_ssize_t *retval)
631{
632 UIO_STACKBUF(uio_buf, 1);
633 uio_t uio;
634 int spacetype;
635
636 if (nbyte > INT_MAX) {
637 *retval = 0;
638 return EINVAL;
639 }
640
641 spacetype = vfs_context_is64bit(ctx) ? UIO_USERSPACE64 : UIO_USERSPACE32;
642 uio = uio_createwithbuffer(a_iovcount: 1, a_offset: offset, a_spacetype: spacetype, a_iodirection: UIO_WRITE, a_buf_p: &uio_buf[0],
643 a_buffer_size: sizeof(uio_buf));
644
645 if (uio_addiov(a_uio: uio, a_baseaddr: bufp, a_length: nbyte) != 0) {
646 *retval = 0;
647 return EINVAL;
648 }
649
650 return fp_writev(ctx, fp, uio, flags, retval);
651}
652
653static int
654writev_internal(struct proc *p, int fd, uio_t uio, int flags,
655 guardid_t *puguard, user_ssize_t *retval)
656{
657 struct fileproc *fp = NULL;
658 struct vfs_context context;
659 int error;
660
661 if ((error = preparefilewrite(p, fp_ret: &fp, fd, check_for_pwrite: flags & FOF_OFFSET, puguard))) {
662 *retval = 0;
663 return error;
664 }
665
666 context = *(vfs_context_current());
667 context.vc_ucred = fp->fp_glob->fg_cred;
668
669 error = fp_writev(ctx: &context, fp, uio, flags, retval);
670
671 fp_drop(p, fd, fp, locked: 0);
672 return error;
673}
674
675int
676write_internal(struct proc *p, int fd, user_addr_t buf, user_size_t nbyte,
677 off_t offset, int flags, guardid_t *puguard, user_ssize_t *retval)
678{
679 UIO_STACKBUF(uio_buf, 1);
680 uio_t uio;
681 int spacetype = IS_64BIT_PROCESS(p) ? UIO_USERSPACE64 : UIO_USERSPACE32;
682
683 if (nbyte > INT_MAX) {
684 *retval = 0;
685 return EINVAL;
686 }
687
688 uio = uio_createwithbuffer(a_iovcount: 1, a_offset: offset, a_spacetype: spacetype, a_iodirection: UIO_WRITE,
689 a_buf_p: &uio_buf[0], a_buffer_size: sizeof(uio_buf));
690
691 if (uio_addiov(a_uio: uio, a_baseaddr: buf, a_length: nbyte) != 0) {
692 *retval = 0;
693 return EINVAL;
694 }
695
696 return writev_internal(p, fd, uio, flags, puguard, retval);
697}
698
699int
700write_nocancel(struct proc *p, struct write_nocancel_args *uap, user_ssize_t *retval)
701{
702 return write_internal(p, fd: uap->fd, buf: uap->cbuf, nbyte: uap->nbyte, offset: (off_t)-1, flags: 0,
703 NULL, retval);
704}
705
706/*
707 * Write system call
708 *
709 * Returns: 0 Success
710 * EBADF
711 * fp_lookup:EBADF
712 * dofilewrite:???
713 */
714int
715write(struct proc *p, struct write_args *uap, user_ssize_t *retval)
716{
717 __pthread_testcancel(presyscall: 1);
718 return write_nocancel(p, uap: (struct write_nocancel_args *)uap, retval);
719}
720
721int
722pwrite_nocancel(struct proc *p, struct pwrite_nocancel_args *uap, user_ssize_t *retval)
723{
724 KERNEL_DEBUG_CONSTANT((BSDDBG_CODE(DBG_BSD_SC_EXTENDED_INFO, SYS_pwrite) | DBG_FUNC_NONE),
725 uap->fd, uap->nbyte, (unsigned int)((uap->offset >> 32)), (unsigned int)(uap->offset), 0);
726
727 /* XXX: Should be < 0 instead? (See man page + pwritev) */
728 if (uap->offset == (off_t)-1) {
729 return EINVAL;
730 }
731
732 return write_internal(p, fd: uap->fd, buf: uap->buf, nbyte: uap->nbyte, offset: uap->offset,
733 FOF_OFFSET, NULL, retval);
734}
735
736/*
737 * pwrite system call
738 *
739 * Returns: 0 Success
740 * EBADF
741 * ESPIPE
742 * ENXIO
743 * EINVAL
744 * fp_lookup:EBADF
745 * dofilewrite:???
746 */
747int
748pwrite(struct proc *p, struct pwrite_args *uap, user_ssize_t *retval)
749{
750 __pthread_testcancel(presyscall: 1);
751 return pwrite_nocancel(p, uap: (struct pwrite_nocancel_args *)uap, retval);
752}
753
754int
755writev_uio(struct proc *p, int fd,
756 user_addr_t user_iovp, int iovcnt, off_t offset, int flags,
757 guardid_t *puguard, user_ssize_t *retval)
758{
759 uio_t uio = NULL;
760 int error;
761 struct user_iovec *iovp;
762
763 if (iovcnt <= 0 || iovcnt > UIO_MAXIOV || offset < 0) {
764 error = EINVAL;
765 goto out;
766 }
767
768 uio = uio_create(a_iovcount: iovcnt, a_offset: offset,
769 a_spacetype: (IS_64BIT_PROCESS(p) ? UIO_USERSPACE64 : UIO_USERSPACE32),
770 a_iodirection: UIO_WRITE);
771
772 iovp = uio_iovsaddr_user(a_uio: uio);
773 if (iovp == NULL) {
774 error = ENOMEM;
775 goto out;
776 }
777
778 error = copyin_user_iovec_array(uaddr: user_iovp,
779 spacetype: IS_64BIT_PROCESS(p) ? UIO_USERSPACE64 : UIO_USERSPACE32,
780 count: iovcnt, dst: iovp);
781
782 if (error) {
783 goto out;
784 }
785
786 error = writev_internal(p, fd, uio, flags, puguard, retval);
787
788out:
789 if (uio != NULL) {
790 uio_free(a_uio: uio);
791 }
792
793 return error;
794}
795
796int
797writev_nocancel(struct proc *p, struct writev_nocancel_args *uap, user_ssize_t *retval)
798{
799 return writev_uio(p, fd: uap->fd, user_iovp: uap->iovp, iovcnt: uap->iovcnt, offset: 0, flags: 0, NULL, retval);
800}
801
802/*
803 * Gather write system call
804 */
805int
806writev(struct proc *p, struct writev_args *uap, user_ssize_t *retval)
807{
808 __pthread_testcancel(presyscall: 1);
809 return writev_nocancel(p, uap: (struct writev_nocancel_args *)uap, retval);
810}
811
812int
813sys_pwritev_nocancel(struct proc *p, struct pwritev_nocancel_args *uap, user_ssize_t *retval)
814{
815 return writev_uio(p, fd: uap->fd, user_iovp: uap->iovp, iovcnt: uap->iovcnt, offset: uap->offset,
816 FOF_OFFSET, NULL, retval);
817}
818
819/*
820 * Pwritev system call
821 */
822int
823sys_pwritev(struct proc *p, struct pwritev_args *uap, user_ssize_t *retval)
824{
825 __pthread_testcancel(presyscall: 1);
826 return sys_pwritev_nocancel(p, uap: (struct pwritev_nocancel_args *)uap, retval);
827}
828
829/*
830 * Ioctl system call
831 *
832 * Returns: 0 Success
833 * EBADF
834 * ENOTTY
835 * ENOMEM
836 * ESRCH
837 * copyin:EFAULT
838 * copyoutEFAULT
839 * fp_lookup:EBADF Bad file descriptor
840 * fo_ioctl:???
841 */
842int
843ioctl(struct proc *p, struct ioctl_args *uap, __unused int32_t *retval)
844{
845 struct fileproc *fp = NULL;
846 int error = 0;
847 u_int size = 0;
848 caddr_t datap = NULL, memp = NULL;
849 boolean_t is64bit = FALSE;
850 int tmp = 0;
851#define STK_PARAMS 128
852 char stkbuf[STK_PARAMS] = {};
853 int fd = uap->fd;
854 u_long com = uap->com;
855 struct vfs_context context = *vfs_context_current();
856
857 AUDIT_ARG(fd, uap->fd);
858 AUDIT_ARG(addr, uap->data);
859
860 is64bit = proc_is64bit(p);
861#if CONFIG_AUDIT
862 if (is64bit) {
863 AUDIT_ARG(value64, com);
864 } else {
865 AUDIT_ARG(cmd, CAST_DOWN_EXPLICIT(int, com));
866 }
867#endif /* CONFIG_AUDIT */
868
869 /*
870 * Interpret high order word to find amount of data to be
871 * copied to/from the user's address space.
872 */
873 size = IOCPARM_LEN(com);
874 if (size > IOCPARM_MAX) {
875 return ENOTTY;
876 }
877 if (size > sizeof(stkbuf)) {
878 memp = (caddr_t)kalloc_data(size, Z_WAITOK);
879 if (memp == 0) {
880 return ENOMEM;
881 }
882 datap = memp;
883 } else {
884 datap = &stkbuf[0];
885 }
886 if (com & IOC_IN) {
887 if (size) {
888 error = copyin(uap->data, datap, size);
889 if (error) {
890 goto out_nofp;
891 }
892 } else {
893 /* XXX - IOC_IN and no size? we should proably return an error here!! */
894 if (is64bit) {
895 *(user_addr_t *)datap = uap->data;
896 } else {
897 *(uint32_t *)datap = (uint32_t)uap->data;
898 }
899 }
900 } else if ((com & IOC_OUT) && size) {
901 /*
902 * Zero the buffer so the user always
903 * gets back something deterministic.
904 */
905 bzero(s: datap, n: size);
906 } else if (com & IOC_VOID) {
907 /* XXX - this is odd since IOC_VOID means no parameters */
908 if (is64bit) {
909 *(user_addr_t *)datap = uap->data;
910 } else {
911 *(uint32_t *)datap = (uint32_t)uap->data;
912 }
913 }
914
915 proc_fdlock(p);
916 error = fp_lookup(p, fd, resultfp: &fp, locked: 1);
917 if (error) {
918 proc_fdunlock(p);
919 goto out_nofp;
920 }
921
922 AUDIT_ARG(file, p, fp);
923
924 if ((fp->f_flag & (FREAD | FWRITE)) == 0) {
925 error = EBADF;
926 goto out;
927 }
928
929 context.vc_ucred = fp->fp_glob->fg_cred;
930
931#if CONFIG_MACF
932 error = mac_file_check_ioctl(cred: context.vc_ucred, fg: fp->fp_glob, cmd: com);
933 if (error) {
934 goto out;
935 }
936#endif
937
938 switch (com) {
939 case FIONCLEX:
940 fp->fp_flags &= ~FP_CLOEXEC;
941 break;
942
943 case FIOCLEX:
944 fp->fp_flags |= FP_CLOEXEC;
945 break;
946
947 case FIONBIO:
948 // FIXME (rdar://54898652)
949 //
950 // this code is broken if fnctl(F_SETFL), ioctl() are
951 // called concurrently for the same fileglob.
952 if ((tmp = *(int *)datap)) {
953 os_atomic_or(&fp->f_flag, FNONBLOCK, relaxed);
954 } else {
955 os_atomic_andnot(&fp->f_flag, FNONBLOCK, relaxed);
956 }
957 error = fo_ioctl(fp, FIONBIO, data: (caddr_t)&tmp, ctx: &context);
958 break;
959
960 case FIOASYNC:
961 // FIXME (rdar://54898652)
962 //
963 // this code is broken if fnctl(F_SETFL), ioctl() are
964 // called concurrently for the same fileglob.
965 if ((tmp = *(int *)datap)) {
966 os_atomic_or(&fp->f_flag, FASYNC, relaxed);
967 } else {
968 os_atomic_andnot(&fp->f_flag, FASYNC, relaxed);
969 }
970 error = fo_ioctl(fp, FIOASYNC, data: (caddr_t)&tmp, ctx: &context);
971 break;
972
973 case FIOSETOWN:
974 tmp = *(int *)datap;
975 if (fp->f_type == DTYPE_SOCKET) {
976 ((struct socket *)fp_get_data(fp))->so_pgid = tmp;
977 break;
978 }
979 if (fp->f_type == DTYPE_PIPE) {
980 error = fo_ioctl(fp, TIOCSPGRP, data: (caddr_t)&tmp, ctx: &context);
981 break;
982 }
983 if (tmp <= 0) {
984 tmp = -tmp;
985 } else {
986 struct proc *p1 = proc_find(pid: tmp);
987 if (p1 == 0) {
988 error = ESRCH;
989 break;
990 }
991 tmp = p1->p_pgrpid;
992 proc_rele(p: p1);
993 }
994 error = fo_ioctl(fp, TIOCSPGRP, data: (caddr_t)&tmp, ctx: &context);
995 break;
996
997 case FIOGETOWN:
998 if (fp->f_type == DTYPE_SOCKET) {
999 *(int *)datap = ((struct socket *)fp_get_data(fp))->so_pgid;
1000 break;
1001 }
1002 error = fo_ioctl(fp, TIOCGPGRP, data: datap, ctx: &context);
1003 *(int *)datap = -*(int *)datap;
1004 break;
1005
1006 default:
1007 error = fo_ioctl(fp, com, data: datap, ctx: &context);
1008 /*
1009 * Copy any data to user, size was
1010 * already set and checked above.
1011 */
1012 if (error == 0 && (com & IOC_OUT) && size) {
1013 error = copyout(datap, uap->data, (u_int)size);
1014 }
1015 break;
1016 }
1017out:
1018 fp_drop(p, fd, fp, locked: 1);
1019 proc_fdunlock(p);
1020
1021out_nofp:
1022 if (memp) {
1023 kfree_data(memp, size);
1024 }
1025 return error;
1026}
1027
1028int selwait;
1029#define SEL_FIRSTPASS 1
1030#define SEL_SECONDPASS 2
1031static int selprocess(struct proc *p, int error, int sel_pass);
1032static int selscan(struct proc *p, struct _select * sel, struct _select_data * seldata,
1033 int nfd, int32_t *retval, int sel_pass, struct select_set *selset);
1034static int selcount(struct proc *p, u_int32_t *ibits, int nfd, int *count);
1035static int seldrop_locked(struct proc *p, u_int32_t *ibits, int nfd, int lim, int *need_wakeup);
1036static int seldrop(struct proc *p, u_int32_t *ibits, int nfd, int lim);
1037static int select_internal(struct proc *p, struct select_nocancel_args *uap, uint64_t timeout, int32_t *retval);
1038
1039/*
1040 * This is used for the special device nodes that do not implement
1041 * a proper kevent filter (see filt_specattach).
1042 *
1043 * In order to enable kevents on those, the spec_filtops will pretend
1044 * to call select, and try to sniff the selrecord(), if it observes one,
1045 * the knote is attached, which pairs with selwakeup() or selthreadclear().
1046 *
1047 * The last issue remaining, is that we need to serialize filt_specdetach()
1048 * with this, but it really can't know the "selinfo" or any locking domain.
1049 * To make up for this, We protect knote list operations with a global lock,
1050 * which give us a safe shared locking domain.
1051 *
1052 * Note: It is a little distasteful, but we really have very few of those.
1053 * The big problem here is that sharing a lock domain without
1054 * any kind of shared knowledge is a little complicated.
1055 *
1056 * 1. filters can really implement their own kqueue integration
1057 * to side step this,
1058 *
1059 * 2. There's an opportunity to pick a private lock in selspec_attach()
1060 * because both the selinfo and the knote are locked at that time.
1061 * The cleanup story is however a little complicated.
1062 */
1063static LCK_GRP_DECLARE(selspec_grp, "spec_filtops");
1064static LCK_SPIN_DECLARE(selspec_lock, &selspec_grp);
1065
1066/*
1067 * The "primitive" lock is held.
1068 * The knote lock is held.
1069 */
1070void
1071selspec_attach(struct knote *kn, struct selinfo *si)
1072{
1073 struct selinfo *cur = knote_kn_hook_get_raw(kn);
1074
1075 if (cur == NULL) {
1076 si->si_flags |= SI_SELSPEC;
1077 lck_spin_lock(lck: &selspec_lock);
1078 knote_kn_hook_set_raw(kn, kn_hook: (void *) si);
1079 KNOTE_ATTACH(&si->si_note, kn);
1080 lck_spin_unlock(lck: &selspec_lock);
1081 } else {
1082 /*
1083 * selspec_attach() can be called from e.g. filt_spectouch()
1084 * which might be called before any event was dequeued.
1085 *
1086 * It is hence not impossible for the knote already be hooked.
1087 *
1088 * Note that selwakeup_internal() could possibly
1089 * already have cleared this pointer. This is a race
1090 * that filt_specprocess will debounce.
1091 */
1092 assert(si->si_flags & SI_SELSPEC);
1093 assert(cur == si);
1094 }
1095}
1096
1097/*
1098 * The "primitive" lock is _not_ held.
1099 *
1100 * knote "lock" is held
1101 */
1102void
1103selspec_detach(struct knote *kn)
1104{
1105 lck_spin_lock(lck: &selspec_lock);
1106
1107 if (!KNOTE_IS_AUTODETACHED(kn)) {
1108 struct selinfo *sip = knote_kn_hook_get_raw(kn);
1109 if (sip) {
1110 KNOTE_DETACH(&sip->si_note, kn);
1111 }
1112 }
1113
1114 knote_kn_hook_set_raw(kn, NULL);
1115
1116 lck_spin_unlock(lck: &selspec_lock);
1117}
1118
1119/*
1120 * Select system call.
1121 *
1122 * Returns: 0 Success
1123 * EINVAL Invalid argument
1124 * EAGAIN Nonconformant error if allocation fails
1125 */
1126int
1127select(struct proc *p, struct select_args *uap, int32_t *retval)
1128{
1129 __pthread_testcancel(presyscall: 1);
1130 return select_nocancel(p, (struct select_nocancel_args *)uap, retval);
1131}
1132
1133int
1134select_nocancel(struct proc *p, struct select_nocancel_args *uap, int32_t *retval)
1135{
1136 uint64_t timeout = 0;
1137
1138 if (uap->tv) {
1139 int err;
1140 struct timeval atv;
1141 if (IS_64BIT_PROCESS(p)) {
1142 struct user64_timeval atv64;
1143 err = copyin(uap->tv, (caddr_t)&atv64, sizeof(atv64));
1144 /* Loses resolution - assume timeout < 68 years */
1145 atv.tv_sec = (__darwin_time_t)atv64.tv_sec;
1146 atv.tv_usec = atv64.tv_usec;
1147 } else {
1148 struct user32_timeval atv32;
1149 err = copyin(uap->tv, (caddr_t)&atv32, sizeof(atv32));
1150 atv.tv_sec = atv32.tv_sec;
1151 atv.tv_usec = atv32.tv_usec;
1152 }
1153 if (err) {
1154 return err;
1155 }
1156
1157 if (itimerfix(tv: &atv)) {
1158 err = EINVAL;
1159 return err;
1160 }
1161
1162 clock_absolutetime_interval_to_deadline(abstime: tvtoabstime(&atv), result: &timeout);
1163 }
1164
1165 return select_internal(p, uap, timeout, retval);
1166}
1167
1168int
1169pselect(struct proc *p, struct pselect_args *uap, int32_t *retval)
1170{
1171 __pthread_testcancel(presyscall: 1);
1172 return pselect_nocancel(p, (struct pselect_nocancel_args *)uap, retval);
1173}
1174
1175int
1176pselect_nocancel(struct proc *p, struct pselect_nocancel_args *uap, int32_t *retval)
1177{
1178 int err;
1179 struct uthread *ut;
1180 uint64_t timeout = 0;
1181
1182 if (uap->ts) {
1183 struct timespec ts;
1184
1185 if (IS_64BIT_PROCESS(p)) {
1186 struct user64_timespec ts64;
1187 err = copyin(uap->ts, (caddr_t)&ts64, sizeof(ts64));
1188 ts.tv_sec = (__darwin_time_t)ts64.tv_sec;
1189 ts.tv_nsec = (long)ts64.tv_nsec;
1190 } else {
1191 struct user32_timespec ts32;
1192 err = copyin(uap->ts, (caddr_t)&ts32, sizeof(ts32));
1193 ts.tv_sec = ts32.tv_sec;
1194 ts.tv_nsec = ts32.tv_nsec;
1195 }
1196 if (err) {
1197 return err;
1198 }
1199
1200 if (!timespec_is_valid(&ts)) {
1201 return EINVAL;
1202 }
1203 clock_absolutetime_interval_to_deadline(abstime: tstoabstime(&ts), result: &timeout);
1204 }
1205
1206 ut = current_uthread();
1207
1208 if (uap->mask != USER_ADDR_NULL) {
1209 /* save current mask, then copyin and set new mask */
1210 sigset_t newset;
1211 err = copyin(uap->mask, &newset, sizeof(sigset_t));
1212 if (err) {
1213 return err;
1214 }
1215 ut->uu_oldmask = ut->uu_sigmask;
1216 ut->uu_flag |= UT_SAS_OLDMASK;
1217 ut->uu_sigmask = (newset & ~sigcantmask);
1218 }
1219
1220 err = select_internal(p, uap: (struct select_nocancel_args *)uap, timeout, retval);
1221
1222 if (err != EINTR && ut->uu_flag & UT_SAS_OLDMASK) {
1223 /*
1224 * Restore old mask (direct return case). NOTE: EINTR can also be returned
1225 * if the thread is cancelled. In that case, we don't reset the signal
1226 * mask to its original value (which usually happens in the signal
1227 * delivery path). This behavior is permitted by POSIX.
1228 */
1229 ut->uu_sigmask = ut->uu_oldmask;
1230 ut->uu_oldmask = 0;
1231 ut->uu_flag &= ~UT_SAS_OLDMASK;
1232 }
1233
1234 return err;
1235}
1236
1237void
1238select_cleanup_uthread(struct _select *sel)
1239{
1240 kfree_data(sel->ibits, 2 * sel->nbytes);
1241 sel->ibits = sel->obits = NULL;
1242 sel->nbytes = 0;
1243}
1244
1245static int
1246select_grow_uthread_cache(struct _select *sel, uint32_t nbytes)
1247{
1248 uint32_t *buf;
1249
1250 buf = kalloc_data(2 * nbytes, Z_WAITOK | Z_ZERO);
1251 if (buf) {
1252 select_cleanup_uthread(sel);
1253 sel->ibits = buf;
1254 sel->obits = buf + nbytes / sizeof(uint32_t);
1255 sel->nbytes = nbytes;
1256 return true;
1257 }
1258 return false;
1259}
1260
1261static void
1262select_bzero_uthread_cache(struct _select *sel)
1263{
1264 bzero(s: sel->ibits, n: sel->nbytes * 2);
1265}
1266
1267/*
1268 * Generic implementation of {,p}select. Care: we type-pun uap across the two
1269 * syscalls, which differ slightly. The first 4 arguments (nfds and the fd sets)
1270 * are identical. The 5th (timeout) argument points to different types, so we
1271 * unpack in the syscall-specific code, but the generic code still does a null
1272 * check on this argument to determine if a timeout was specified.
1273 */
1274static int
1275select_internal(struct proc *p, struct select_nocancel_args *uap, uint64_t timeout, int32_t *retval)
1276{
1277 struct uthread *uth = current_uthread();
1278 struct _select *sel = &uth->uu_select;
1279 struct _select_data *seldata = &uth->uu_save.uus_select_data;
1280 int error = 0;
1281 u_int ni, nw;
1282
1283 *retval = 0;
1284
1285 seldata->abstime = timeout;
1286 seldata->args = uap;
1287 seldata->retval = retval;
1288 seldata->count = 0;
1289
1290 if (uap->nd < 0) {
1291 return EINVAL;
1292 }
1293
1294 if (uap->nd > p->p_fd.fd_nfiles) {
1295 uap->nd = p->p_fd.fd_nfiles; /* forgiving; slightly wrong */
1296 }
1297 nw = howmany(uap->nd, NFDBITS);
1298 ni = nw * sizeof(fd_mask);
1299
1300 /*
1301 * if the previously allocated space for the bits is smaller than
1302 * what is requested or no space has yet been allocated for this
1303 * thread, allocate enough space now.
1304 *
1305 * Note: If this process fails, select() will return EAGAIN; this
1306 * is the same thing pool() returns in a no-memory situation, but
1307 * it is not a POSIX compliant error code for select().
1308 */
1309 if (sel->nbytes >= (3 * ni)) {
1310 select_bzero_uthread_cache(sel);
1311 } else if (!select_grow_uthread_cache(sel, nbytes: 3 * ni)) {
1312 return EAGAIN;
1313 }
1314
1315 /*
1316 * get the bits from the user address space
1317 */
1318#define getbits(name, x) \
1319 (uap->name ? copyin(uap->name, &sel->ibits[(x) * nw], ni) : 0)
1320
1321 if ((error = getbits(in, 0))) {
1322 return error;
1323 }
1324 if ((error = getbits(ou, 1))) {
1325 return error;
1326 }
1327 if ((error = getbits(ex, 2))) {
1328 return error;
1329 }
1330#undef getbits
1331
1332 if ((error = selcount(p, ibits: sel->ibits, nfd: uap->nd, count: &seldata->count))) {
1333 return error;
1334 }
1335
1336 if (uth->uu_selset == NULL) {
1337 uth->uu_selset = select_set_alloc();
1338 }
1339 return selprocess(p, error: 0, SEL_FIRSTPASS);
1340}
1341
1342static int
1343selcontinue(int error)
1344{
1345 return selprocess(p: current_proc(), error, SEL_SECONDPASS);
1346}
1347
1348
1349/*
1350 * selprocess
1351 *
1352 * Parameters: error The error code from our caller
1353 * sel_pass The pass we are on
1354 */
1355int
1356selprocess(struct proc *p, int error, int sel_pass)
1357{
1358 struct uthread *uth = current_uthread();
1359 struct _select *sel = &uth->uu_select;
1360 struct _select_data *seldata = &uth->uu_save.uus_select_data;
1361 struct select_nocancel_args *uap = seldata->args;
1362 int *retval = seldata->retval;
1363
1364 int unwind = 1;
1365 int prepost = 0;
1366 int somewakeup = 0;
1367 int doretry = 0;
1368 wait_result_t wait_result;
1369
1370 if ((error != 0) && (sel_pass == SEL_FIRSTPASS)) {
1371 unwind = 0;
1372 }
1373 if (seldata->count == 0) {
1374 unwind = 0;
1375 }
1376retry:
1377 if (error != 0) {
1378 goto done;
1379 }
1380
1381 OSBitOrAtomic(P_SELECT, &p->p_flag);
1382
1383 /* skip scans if the select is just for timeouts */
1384 if (seldata->count) {
1385 error = selscan(p, sel, seldata, nfd: uap->nd, retval, sel_pass,
1386 selset: uth->uu_selset);
1387 if (error || *retval) {
1388 goto done;
1389 }
1390 if (prepost || somewakeup) {
1391 /*
1392 * if the select of log, then we can wakeup and
1393 * discover some one else already read the data;
1394 * go to select again if time permits
1395 */
1396 prepost = 0;
1397 somewakeup = 0;
1398 doretry = 1;
1399 }
1400 }
1401
1402 if (uap->tv) {
1403 uint64_t now;
1404
1405 clock_get_uptime(result: &now);
1406 if (now >= seldata->abstime) {
1407 goto done;
1408 }
1409 }
1410
1411 if (doretry) {
1412 /* cleanup obits and try again */
1413 doretry = 0;
1414 sel_pass = SEL_FIRSTPASS;
1415 goto retry;
1416 }
1417
1418 /*
1419 * To effect a poll, the timeout argument should be
1420 * non-nil, pointing to a zero-valued timeval structure.
1421 */
1422 if (uap->tv && seldata->abstime == 0) {
1423 goto done;
1424 }
1425
1426 /* No spurious wakeups due to colls,no need to check for them */
1427 if ((sel_pass == SEL_SECONDPASS) || ((p->p_flag & P_SELECT) == 0)) {
1428 sel_pass = SEL_FIRSTPASS;
1429 goto retry;
1430 }
1431
1432 OSBitAndAtomic(~((uint32_t)P_SELECT), &p->p_flag);
1433
1434 /* if the select is just for timeout skip check */
1435 if (seldata->count && (sel_pass == SEL_SECONDPASS)) {
1436 panic("selprocess: 2nd pass assertwaiting");
1437 }
1438
1439 wait_result = waitq_assert_wait64_leeway(waitq: uth->uu_selset,
1440 NO_EVENT64, THREAD_ABORTSAFE,
1441 TIMEOUT_URGENCY_USER_NORMAL,
1442 deadline: seldata->abstime,
1443 TIMEOUT_NO_LEEWAY);
1444 if (wait_result != THREAD_AWAKENED) {
1445 /* there are no preposted events */
1446 error = tsleep1(NULL, PSOCK | PCATCH,
1447 wmesg: "select", abstime: 0, continuation: selcontinue);
1448 } else {
1449 prepost = 1;
1450 error = 0;
1451 }
1452
1453 if (error == 0) {
1454 sel_pass = SEL_SECONDPASS;
1455 if (!prepost) {
1456 somewakeup = 1;
1457 }
1458 goto retry;
1459 }
1460done:
1461 if (unwind) {
1462 seldrop(p, ibits: sel->ibits, nfd: uap->nd, lim: seldata->count);
1463 select_set_reset(selset: uth->uu_selset);
1464 }
1465 OSBitAndAtomic(~((uint32_t)P_SELECT), &p->p_flag);
1466 /* select is not restarted after signals... */
1467 if (error == ERESTART) {
1468 error = EINTR;
1469 }
1470 if (error == EWOULDBLOCK) {
1471 error = 0;
1472 }
1473
1474 if (error == 0) {
1475 uint32_t nw = howmany(uap->nd, NFDBITS);
1476 uint32_t ni = nw * sizeof(fd_mask);
1477
1478#define putbits(name, x) \
1479 (uap->name ? copyout(&sel->obits[(x) * nw], uap->name, ni) : 0)
1480 int e0 = putbits(in, 0);
1481 int e1 = putbits(ou, 1);
1482 int e2 = putbits(ex, 2);
1483
1484 error = e0 ?: e1 ?: e2;
1485#undef putbits
1486 }
1487
1488 if (error != EINTR && sel_pass == SEL_SECONDPASS && uth->uu_flag & UT_SAS_OLDMASK) {
1489 /* restore signal mask - continuation case */
1490 uth->uu_sigmask = uth->uu_oldmask;
1491 uth->uu_oldmask = 0;
1492 uth->uu_flag &= ~UT_SAS_OLDMASK;
1493 }
1494
1495 return error;
1496}
1497
1498
1499/**
1500 * remove the fileproc's underlying waitq from the supplied waitq set;
1501 * clear FP_INSELECT when appropriate
1502 *
1503 * Parameters:
1504 * fp File proc that is potentially currently in select
1505 * selset Waitq set to which the fileproc may belong
1506 * (usually this is the thread's private waitq set)
1507 * Conditions:
1508 * proc_fdlock is held
1509 */
1510static void
1511selunlinkfp(struct fileproc *fp, struct select_set *selset)
1512{
1513 if (fp->fp_flags & FP_INSELECT) {
1514 if (fp->fp_guard_attrs) {
1515 if (fp->fp_guard->fpg_wset == selset) {
1516 fp->fp_guard->fpg_wset = NULL;
1517 fp->fp_flags &= ~FP_INSELECT;
1518 }
1519 } else {
1520 if (fp->fp_wset == selset) {
1521 fp->fp_wset = NULL;
1522 fp->fp_flags &= ~FP_INSELECT;
1523 }
1524 }
1525 }
1526}
1527
1528/**
1529 * connect a fileproc to the given selset, potentially bridging to a waitq
1530 * pointed to indirectly by wq_data
1531 *
1532 * Parameters:
1533 * fp File proc potentially currently in select
1534 * selset Waitq set to which the fileproc should now belong
1535 * (usually this is the thread's private waitq set)
1536 *
1537 * Conditions:
1538 * proc_fdlock is held
1539 */
1540static void
1541sellinkfp(struct fileproc *fp, struct select_set *selset, waitq_link_t *linkp)
1542{
1543 if ((fp->fp_flags & FP_INSELECT) == 0) {
1544 if (fp->fp_guard_attrs) {
1545 fp->fp_guard->fpg_wset = selset;
1546 } else {
1547 fp->fp_wset = selset;
1548 }
1549 fp->fp_flags |= FP_INSELECT;
1550 } else {
1551 fp->fp_flags |= FP_SELCONFLICT;
1552 if (linkp->wqlh == NULL) {
1553 *linkp = waitq_link_alloc(type: WQT_SELECT_SET);
1554 }
1555 select_set_link(waitq: &select_conflict_queue, selset, linkp);
1556 }
1557}
1558
1559
1560/*
1561 * selscan
1562 *
1563 * Parameters: p Process performing the select
1564 * sel The per-thread select context structure
1565 * nfd The number of file descriptors to scan
1566 * retval The per thread system call return area
1567 * sel_pass Which pass this is; allowed values are
1568 * SEL_FIRSTPASS and SEL_SECONDPASS
1569 * selset The per thread wait queue set
1570 *
1571 * Returns: 0 Success
1572 * EIO Invalid p->p_fd field XXX Obsolete?
1573 * EBADF One of the files in the bit vector is
1574 * invalid.
1575 */
1576static int
1577selscan(struct proc *p, struct _select *sel, struct _select_data * seldata,
1578 int nfd, int32_t *retval, int sel_pass, struct select_set *selset)
1579{
1580 int msk, i, j, fd;
1581 u_int32_t bits;
1582 struct fileproc *fp;
1583 int n = 0; /* count of bits */
1584 int nc = 0; /* bit vector offset (nc'th bit) */
1585 static int flag[3] = { FREAD, FWRITE, 0 };
1586 u_int32_t *iptr, *optr;
1587 u_int nw;
1588 u_int32_t *ibits, *obits;
1589 int count;
1590 struct vfs_context context = {
1591 .vc_thread = current_thread(),
1592 };
1593 waitq_link_t link = WQL_NULL;
1594 void *s_data;
1595
1596 ibits = sel->ibits;
1597 obits = sel->obits;
1598
1599 nw = howmany(nfd, NFDBITS);
1600
1601 count = seldata->count;
1602
1603 nc = 0;
1604 if (!count) {
1605 *retval = 0;
1606 return 0;
1607 }
1608
1609 if (sel_pass == SEL_FIRSTPASS) {
1610 /*
1611 * Make sure the waitq-set is all clean:
1612 *
1613 * select loops until it finds at least one event, however it
1614 * doesn't mean that the event that woke up select is still
1615 * fired by the time the second pass runs, and then
1616 * select_internal will loop back to a first pass.
1617 */
1618 select_set_reset(selset);
1619 s_data = &link;
1620 } else {
1621 s_data = NULL;
1622 }
1623
1624 proc_fdlock(p);
1625 for (msk = 0; msk < 3; msk++) {
1626 iptr = (u_int32_t *)&ibits[msk * nw];
1627 optr = (u_int32_t *)&obits[msk * nw];
1628
1629 for (i = 0; i < nfd; i += NFDBITS) {
1630 bits = iptr[i / NFDBITS];
1631
1632 while ((j = ffs(bits)) && (fd = i + --j) < nfd) {
1633 bits &= ~(1U << j);
1634
1635 fp = fp_get_noref_locked(p, fd);
1636 if (fp == NULL) {
1637 /*
1638 * If we abort because of a bad
1639 * fd, let the caller unwind...
1640 */
1641 proc_fdunlock(p);
1642 return EBADF;
1643 }
1644 if (sel_pass == SEL_SECONDPASS) {
1645 selunlinkfp(fp, selset);
1646 } else if (link.wqlh == NULL) {
1647 link = waitq_link_alloc(type: WQT_SELECT_SET);
1648 }
1649
1650 context.vc_ucred = fp->f_cred;
1651
1652 /* The select; set the bit, if true */
1653 if (fo_select(fp, which: flag[msk], wql: s_data, ctx: &context)) {
1654 optr[fd / NFDBITS] |= (1U << (fd % NFDBITS));
1655 n++;
1656 }
1657 if (sel_pass == SEL_FIRSTPASS) {
1658 /*
1659 * Hook up the thread's waitq set either to
1660 * the fileproc structure, or to the global
1661 * conflict queue: but only on the first
1662 * select pass.
1663 */
1664 sellinkfp(fp, selset, linkp: &link);
1665 }
1666 nc++;
1667 }
1668 }
1669 }
1670 proc_fdunlock(p);
1671
1672 if (link.wqlh) {
1673 waitq_link_free(type: WQT_SELECT_SET, link);
1674 }
1675
1676 *retval = n;
1677 return 0;
1678}
1679
1680static int poll_callback(struct kevent_qos_s *, kevent_ctx_t);
1681
1682int
1683poll(struct proc *p, struct poll_args *uap, int32_t *retval)
1684{
1685 __pthread_testcancel(presyscall: 1);
1686 return poll_nocancel(p, (struct poll_nocancel_args *)uap, retval);
1687}
1688
1689
1690int
1691poll_nocancel(struct proc *p, struct poll_nocancel_args *uap, int32_t *retval)
1692{
1693 struct pollfd *fds = NULL;
1694 struct kqueue *kq = NULL;
1695 int error = 0;
1696 u_int nfds = uap->nfds;
1697 u_int rfds = 0;
1698 rlim_t nofile = proc_limitgetcur(p, RLIMIT_NOFILE);
1699 size_t ni = nfds * sizeof(struct pollfd);
1700
1701 /*
1702 * This is kinda bogus. We have fd limits, but that is not
1703 * really related to the size of the pollfd array. Make sure
1704 * we let the process use at least FD_SETSIZE entries and at
1705 * least enough for the current limits. We want to be reasonably
1706 * safe, but not overly restrictive.
1707 */
1708 if (nfds > OPEN_MAX ||
1709 (nfds > nofile && (proc_suser(p) || nfds > FD_SETSIZE))) {
1710 return EINVAL;
1711 }
1712
1713 kq = kqueue_alloc(p);
1714 if (kq == NULL) {
1715 return EAGAIN;
1716 }
1717
1718 if (nfds) {
1719 fds = (struct pollfd *)kalloc_data(ni, Z_WAITOK);
1720 if (NULL == fds) {
1721 error = EAGAIN;
1722 goto out;
1723 }
1724
1725 error = copyin(uap->fds, fds, nfds * sizeof(struct pollfd));
1726 if (error) {
1727 goto out;
1728 }
1729 }
1730
1731 /* JMM - all this P_SELECT stuff is bogus */
1732 OSBitOrAtomic(P_SELECT, &p->p_flag);
1733 for (u_int i = 0; i < nfds; i++) {
1734 short events = fds[i].events;
1735 __assert_only int rc;
1736
1737 /* per spec, ignore fd values below zero */
1738 if (fds[i].fd < 0) {
1739 fds[i].revents = 0;
1740 continue;
1741 }
1742
1743 /* convert the poll event into a kqueue kevent */
1744 struct kevent_qos_s kev = {
1745 .ident = fds[i].fd,
1746 .flags = EV_ADD | EV_ONESHOT | EV_POLL,
1747 .udata = i, /* Index into pollfd array */
1748 };
1749
1750 /* Handle input events */
1751 if (events & (POLLIN | POLLRDNORM | POLLPRI | POLLRDBAND | POLLHUP)) {
1752 kev.filter = EVFILT_READ;
1753 if (events & (POLLPRI | POLLRDBAND)) {
1754 kev.flags |= EV_OOBAND;
1755 }
1756 rc = kevent_register(kq, &kev, NULL);
1757 assert((rc & FILTER_REGISTER_WAIT) == 0);
1758 }
1759
1760 /* Handle output events */
1761 if ((kev.flags & EV_ERROR) == 0 &&
1762 (events & (POLLOUT | POLLWRNORM | POLLWRBAND))) {
1763 kev.filter = EVFILT_WRITE;
1764 rc = kevent_register(kq, &kev, NULL);
1765 assert((rc & FILTER_REGISTER_WAIT) == 0);
1766 }
1767
1768 /* Handle BSD extension vnode events */
1769 if ((kev.flags & EV_ERROR) == 0 &&
1770 (events & (POLLEXTEND | POLLATTRIB | POLLNLINK | POLLWRITE))) {
1771 kev.filter = EVFILT_VNODE;
1772 kev.fflags = 0;
1773 if (events & POLLEXTEND) {
1774 kev.fflags |= NOTE_EXTEND;
1775 }
1776 if (events & POLLATTRIB) {
1777 kev.fflags |= NOTE_ATTRIB;
1778 }
1779 if (events & POLLNLINK) {
1780 kev.fflags |= NOTE_LINK;
1781 }
1782 if (events & POLLWRITE) {
1783 kev.fflags |= NOTE_WRITE;
1784 }
1785 rc = kevent_register(kq, &kev, NULL);
1786 assert((rc & FILTER_REGISTER_WAIT) == 0);
1787 }
1788
1789 if (kev.flags & EV_ERROR) {
1790 fds[i].revents = POLLNVAL;
1791 rfds++;
1792 } else {
1793 fds[i].revents = 0;
1794 }
1795 }
1796
1797 /*
1798 * Did we have any trouble registering?
1799 * If user space passed 0 FDs, then respect any timeout value passed.
1800 * This is an extremely inefficient sleep. If user space passed one or
1801 * more FDs, and we had trouble registering _all_ of them, then bail
1802 * out. If a subset of the provided FDs failed to register, then we
1803 * will still call the kqueue_scan function.
1804 */
1805 if (nfds && (rfds == nfds)) {
1806 goto done;
1807 }
1808
1809 /* scan for, and possibly wait for, the kevents to trigger */
1810 kevent_ctx_t kectx = kevent_get_context(thread: current_thread());
1811 *kectx = (struct kevent_ctx_s){
1812 .kec_process_noutputs = rfds,
1813 .kec_process_flags = KEVENT_FLAG_POLL,
1814 .kec_deadline = 0, /* wait forever */
1815 .kec_poll_fds = fds,
1816 };
1817
1818 /*
1819 * If any events have trouble registering, an event has fired and we
1820 * shouldn't wait for events in kqueue_scan.
1821 */
1822 if (rfds) {
1823 kectx->kec_process_flags |= KEVENT_FLAG_IMMEDIATE;
1824 } else if (uap->timeout != -1) {
1825 clock_interval_to_deadline(interval: uap->timeout, NSEC_PER_MSEC,
1826 result: &kectx->kec_deadline);
1827 }
1828
1829 error = kqueue_scan(kq, flags: kectx->kec_process_flags, kectx, poll_callback);
1830 rfds = kectx->kec_process_noutputs;
1831
1832done:
1833 OSBitAndAtomic(~((uint32_t)P_SELECT), &p->p_flag);
1834 /* poll is not restarted after signals... */
1835 if (error == ERESTART) {
1836 error = EINTR;
1837 }
1838 if (error == 0) {
1839 error = copyout(fds, uap->fds, nfds * sizeof(struct pollfd));
1840 *retval = rfds;
1841 }
1842
1843out:
1844 kfree_data(fds, ni);
1845
1846 kqueue_dealloc(kq);
1847 return error;
1848}
1849
1850static int
1851poll_callback(struct kevent_qos_s *kevp, kevent_ctx_t kectx)
1852{
1853 assert(kectx->kec_process_flags & KEVENT_FLAG_POLL);
1854 struct pollfd *fds = &kectx->kec_poll_fds[kevp->udata];
1855
1856 short prev_revents = fds->revents;
1857 short mask = 0;
1858
1859 /* convert the results back into revents */
1860 if (kevp->flags & EV_EOF) {
1861 fds->revents |= POLLHUP;
1862 }
1863 if (kevp->flags & EV_ERROR) {
1864 fds->revents |= POLLERR;
1865 }
1866
1867 switch (kevp->filter) {
1868 case EVFILT_READ:
1869 if (fds->revents & POLLHUP) {
1870 mask = (POLLIN | POLLRDNORM | POLLPRI | POLLRDBAND);
1871 } else {
1872 mask = (POLLIN | POLLRDNORM);
1873 if (kevp->flags & EV_OOBAND) {
1874 mask |= (POLLPRI | POLLRDBAND);
1875 }
1876 }
1877 fds->revents |= (fds->events & mask);
1878 break;
1879
1880 case EVFILT_WRITE:
1881 if (!(fds->revents & POLLHUP)) {
1882 fds->revents |= (fds->events & (POLLOUT | POLLWRNORM | POLLWRBAND));
1883 }
1884 break;
1885
1886 case EVFILT_VNODE:
1887 if (kevp->fflags & NOTE_EXTEND) {
1888 fds->revents |= (fds->events & POLLEXTEND);
1889 }
1890 if (kevp->fflags & NOTE_ATTRIB) {
1891 fds->revents |= (fds->events & POLLATTRIB);
1892 }
1893 if (kevp->fflags & NOTE_LINK) {
1894 fds->revents |= (fds->events & POLLNLINK);
1895 }
1896 if (kevp->fflags & NOTE_WRITE) {
1897 fds->revents |= (fds->events & POLLWRITE);
1898 }
1899 break;
1900 }
1901
1902 if (fds->revents != 0 && prev_revents == 0) {
1903 kectx->kec_process_noutputs++;
1904 }
1905
1906 return 0;
1907}
1908
1909int
1910seltrue(__unused dev_t dev, __unused int flag, __unused struct proc *p)
1911{
1912 return 1;
1913}
1914
1915/*
1916 * selcount
1917 *
1918 * Count the number of bits set in the input bit vector, and establish an
1919 * outstanding fp->fp_iocount for each of the descriptors which will be in
1920 * use in the select operation.
1921 *
1922 * Parameters: p The process doing the select
1923 * ibits The input bit vector
1924 * nfd The number of fd's in the vector
1925 * countp Pointer to where to store the bit count
1926 *
1927 * Returns: 0 Success
1928 * EIO Bad per process open file table
1929 * EBADF One of the bits in the input bit vector
1930 * references an invalid fd
1931 *
1932 * Implicit: *countp (modified) Count of fd's
1933 *
1934 * Notes: This function is the first pass under the proc_fdlock() that
1935 * permits us to recognize invalid descriptors in the bit vector;
1936 * the may, however, not remain valid through the drop and
1937 * later reacquisition of the proc_fdlock().
1938 */
1939static int
1940selcount(struct proc *p, u_int32_t *ibits, int nfd, int *countp)
1941{
1942 int msk, i, j, fd;
1943 u_int32_t bits;
1944 struct fileproc *fp;
1945 int n = 0;
1946 u_int32_t *iptr;
1947 u_int nw;
1948 int error = 0;
1949 int need_wakeup = 0;
1950
1951 nw = howmany(nfd, NFDBITS);
1952
1953 proc_fdlock(p);
1954 for (msk = 0; msk < 3; msk++) {
1955 iptr = (u_int32_t *)&ibits[msk * nw];
1956 for (i = 0; i < nfd; i += NFDBITS) {
1957 bits = iptr[i / NFDBITS];
1958 while ((j = ffs(bits)) && (fd = i + --j) < nfd) {
1959 bits &= ~(1U << j);
1960
1961 fp = fp_get_noref_locked(p, fd);
1962 if (fp == NULL) {
1963 *countp = 0;
1964 error = EBADF;
1965 goto bad;
1966 }
1967 os_ref_retain_locked(rc: &fp->fp_iocount);
1968 n++;
1969 }
1970 }
1971 }
1972 proc_fdunlock(p);
1973
1974 *countp = n;
1975 return 0;
1976
1977bad:
1978 if (n == 0) {
1979 goto out;
1980 }
1981 /* Ignore error return; it's already EBADF */
1982 (void)seldrop_locked(p, ibits, nfd, lim: n, need_wakeup: &need_wakeup);
1983
1984out:
1985 proc_fdunlock(p);
1986 if (need_wakeup) {
1987 wakeup(chan: &p->p_fd.fd_fpdrainwait);
1988 }
1989 return error;
1990}
1991
1992
1993/*
1994 * seldrop_locked
1995 *
1996 * Drop outstanding wait queue references set up during selscan(); drop the
1997 * outstanding per fileproc fp_iocount picked up during the selcount().
1998 *
1999 * Parameters: p Process performing the select
2000 * ibits Input bit bector of fd's
2001 * nfd Number of fd's
2002 * lim Limit to number of vector entries to
2003 * consider, or -1 for "all"
2004 * inselect True if
2005 * need_wakeup Pointer to flag to set to do a wakeup
2006 * if f_iocont on any descriptor goes to 0
2007 *
2008 * Returns: 0 Success
2009 * EBADF One or more fds in the bit vector
2010 * were invalid, but the rest
2011 * were successfully dropped
2012 *
2013 * Notes: An fd make become bad while the proc_fdlock() is not held,
2014 * if a multithreaded application closes the fd out from under
2015 * the in progress select. In this case, we still have to
2016 * clean up after the set up on the remaining fds.
2017 */
2018static int
2019seldrop_locked(struct proc *p, u_int32_t *ibits, int nfd, int lim, int *need_wakeup)
2020{
2021 int msk, i, j, nc, fd;
2022 u_int32_t bits;
2023 struct fileproc *fp;
2024 u_int32_t *iptr;
2025 u_int nw;
2026 int error = 0;
2027 uthread_t uth = current_uthread();
2028 struct _select_data *seldata;
2029
2030 *need_wakeup = 0;
2031
2032 nw = howmany(nfd, NFDBITS);
2033 seldata = &uth->uu_save.uus_select_data;
2034
2035 nc = 0;
2036 for (msk = 0; msk < 3; msk++) {
2037 iptr = (u_int32_t *)&ibits[msk * nw];
2038 for (i = 0; i < nfd; i += NFDBITS) {
2039 bits = iptr[i / NFDBITS];
2040 while ((j = ffs(bits)) && (fd = i + --j) < nfd) {
2041 bits &= ~(1U << j);
2042 /*
2043 * If we've already dropped as many as were
2044 * counted/scanned, then we are done.
2045 */
2046 if (nc >= lim) {
2047 goto done;
2048 }
2049
2050 /*
2051 * We took an I/O reference in selcount,
2052 * so the fp can't possibly be NULL.
2053 */
2054 fp = fp_get_noref_locked_with_iocount(p, fd);
2055 selunlinkfp(fp, selset: uth->uu_selset);
2056
2057 nc++;
2058
2059 const os_ref_count_t refc = os_ref_release_locked(rc: &fp->fp_iocount);
2060 if (0 == refc) {
2061 panic("fp_iocount overdecrement!");
2062 }
2063
2064 if (1 == refc) {
2065 /*
2066 * The last iocount is responsible for clearing
2067 * selconfict flag - even if we didn't set it -
2068 * and is also responsible for waking up anyone
2069 * waiting on iocounts to drain.
2070 */
2071 if (fp->fp_flags & FP_SELCONFLICT) {
2072 fp->fp_flags &= ~FP_SELCONFLICT;
2073 }
2074 if (p->p_fd.fd_fpdrainwait) {
2075 p->p_fd.fd_fpdrainwait = 0;
2076 *need_wakeup = 1;
2077 }
2078 }
2079 }
2080 }
2081 }
2082done:
2083 return error;
2084}
2085
2086
2087static int
2088seldrop(struct proc *p, u_int32_t *ibits, int nfd, int lim)
2089{
2090 int error;
2091 int need_wakeup = 0;
2092
2093 proc_fdlock(p);
2094 error = seldrop_locked(p, ibits, nfd, lim, need_wakeup: &need_wakeup);
2095 proc_fdunlock(p);
2096 if (need_wakeup) {
2097 wakeup(chan: &p->p_fd.fd_fpdrainwait);
2098 }
2099 return error;
2100}
2101
2102/*
2103 * Record a select request.
2104 */
2105void
2106selrecord(__unused struct proc *selector, struct selinfo *sip, void *s_data)
2107{
2108 struct select_set *selset = current_uthread()->uu_selset;
2109
2110 /* do not record if this is second pass of select */
2111 if (!s_data) {
2112 return;
2113 }
2114
2115 if (selset == SELSPEC_RECORD_MARKER) {
2116 /*
2117 * The kevent subsystem is trying to sniff
2118 * the selinfo::si_note to attach to.
2119 */
2120 ((selspec_record_hook_t)s_data)(sip);
2121 } else {
2122 waitq_link_t *linkp = s_data;
2123
2124 if (!waitq_is_valid(wq: &sip->si_waitq)) {
2125 waitq_init(waitq: &sip->si_waitq, type: WQT_SELECT, SYNC_POLICY_FIFO);
2126 }
2127
2128 /* note: this checks for pre-existing linkage */
2129 select_set_link(waitq: &sip->si_waitq, selset, linkp);
2130 }
2131}
2132
2133static void
2134selwakeup_internal(struct selinfo *sip, long hint, wait_result_t wr)
2135{
2136 if (sip->si_flags & SI_SELSPEC) {
2137 /*
2138 * The "primitive" lock is held.
2139 * The knote lock is not held.
2140 *
2141 * All knotes will transition their kn_hook to NULL and we will
2142 * reeinitialize the primitive's klist
2143 */
2144 lck_spin_lock(lck: &selspec_lock);
2145 knote(list: &sip->si_note, hint, /*autodetach=*/ true);
2146 lck_spin_unlock(lck: &selspec_lock);
2147 sip->si_flags &= ~SI_SELSPEC;
2148 }
2149
2150 /*
2151 * After selrecord() has been called, selinfo owners must call
2152 * at least one of selwakeup() or selthreadclear().
2153 *
2154 * Use this opportunity to deinit the waitq
2155 * so that all linkages are garbage collected
2156 * in a combined wakeup-all + unlink + deinit call.
2157 */
2158 select_waitq_wakeup_and_deinit(waitq: &sip->si_waitq, NO_EVENT64, result: wr);
2159}
2160
2161
2162void
2163selwakeup(struct selinfo *sip)
2164{
2165 selwakeup_internal(sip, hint: 0, THREAD_AWAKENED);
2166}
2167
2168void
2169selthreadclear(struct selinfo *sip)
2170{
2171 selwakeup_internal(sip, NOTE_REVOKE, THREAD_RESTART);
2172}
2173
2174
2175/*
2176 * gethostuuid
2177 *
2178 * Description: Get the host UUID from IOKit and return it to user space.
2179 *
2180 * Parameters: uuid_buf Pointer to buffer to receive UUID
2181 * timeout Timespec for timout
2182 *
2183 * Returns: 0 Success
2184 * EWOULDBLOCK Timeout is too short
2185 * copyout:EFAULT Bad user buffer
2186 * mac_system_check_info:EPERM Client not allowed to perform this operation
2187 *
2188 * Notes: A timeout seems redundant, since if it's tolerable to not
2189 * have a system UUID in hand, then why ask for one?
2190 */
2191int
2192gethostuuid(struct proc *p, struct gethostuuid_args *uap, __unused int32_t *retval)
2193{
2194 kern_return_t kret;
2195 int error;
2196 mach_timespec_t mach_ts; /* for IOKit call */
2197 __darwin_uuid_t uuid_kern = {}; /* for IOKit call */
2198
2199 /* Check entitlement */
2200 if (!IOCurrentTaskHasEntitlement(entitlement: "com.apple.private.getprivatesysid")) {
2201#if !defined(XNU_TARGET_OS_OSX)
2202#if CONFIG_MACF
2203 if ((error = mac_system_check_info(kauth_cred_get(), "hw.uuid")) != 0) {
2204 /* EPERM invokes userspace upcall if present */
2205 return error;
2206 }
2207#endif
2208#endif
2209 }
2210
2211 /* Convert the 32/64 bit timespec into a mach_timespec_t */
2212 if (proc_is64bit(p)) {
2213 struct user64_timespec ts;
2214 error = copyin(uap->timeoutp, &ts, sizeof(ts));
2215 if (error) {
2216 return error;
2217 }
2218 mach_ts.tv_sec = (unsigned int)ts.tv_sec;
2219 mach_ts.tv_nsec = (clock_res_t)ts.tv_nsec;
2220 } else {
2221 struct user32_timespec ts;
2222 error = copyin(uap->timeoutp, &ts, sizeof(ts));
2223 if (error) {
2224 return error;
2225 }
2226 mach_ts.tv_sec = ts.tv_sec;
2227 mach_ts.tv_nsec = ts.tv_nsec;
2228 }
2229
2230 /* Call IOKit with the stack buffer to get the UUID */
2231 kret = IOBSDGetPlatformUUID(uuid: uuid_kern, timeoutp: mach_ts);
2232
2233 /*
2234 * If we get it, copy out the data to the user buffer; note that a
2235 * uuid_t is an array of characters, so this is size invariant for
2236 * 32 vs. 64 bit.
2237 */
2238 if (kret == KERN_SUCCESS) {
2239 error = copyout(uuid_kern, uap->uuid_buf, sizeof(uuid_kern));
2240 } else {
2241 error = EWOULDBLOCK;
2242 }
2243
2244 return error;
2245}
2246
2247/*
2248 * ledger
2249 *
2250 * Description: Omnibus system call for ledger operations
2251 */
2252int
2253ledger(struct proc *p, struct ledger_args *args, __unused int32_t *retval)
2254{
2255#if !CONFIG_MACF
2256#pragma unused(p)
2257#endif
2258 int rval, pid, len, error;
2259#ifdef LEDGER_DEBUG
2260 struct ledger_limit_args lla;
2261#endif
2262 task_t task;
2263 proc_t proc;
2264
2265 /* Finish copying in the necessary args before taking the proc lock */
2266 error = 0;
2267 len = 0;
2268 if (args->cmd == LEDGER_ENTRY_INFO) {
2269 error = copyin(args->arg3, (char *)&len, sizeof(len));
2270 } else if (args->cmd == LEDGER_TEMPLATE_INFO) {
2271 error = copyin(args->arg2, (char *)&len, sizeof(len));
2272 } else if (args->cmd == LEDGER_LIMIT)
2273#ifdef LEDGER_DEBUG
2274 { error = copyin(args->arg2, (char *)&lla, sizeof(lla));}
2275#else
2276 { return EINVAL; }
2277#endif
2278 else if ((args->cmd < 0) || (args->cmd > LEDGER_MAX_CMD)) {
2279 return EINVAL;
2280 }
2281
2282 if (error) {
2283 return error;
2284 }
2285 if (len < 0) {
2286 return EINVAL;
2287 }
2288
2289 rval = 0;
2290 if (args->cmd != LEDGER_TEMPLATE_INFO) {
2291 pid = (int)args->arg1;
2292 proc = proc_find(pid);
2293 if (proc == NULL) {
2294 return ESRCH;
2295 }
2296
2297#if CONFIG_MACF
2298 error = mac_proc_check_ledger(curp: p, target: proc, op: args->cmd);
2299 if (error) {
2300 proc_rele(p: proc);
2301 return error;
2302 }
2303#endif
2304
2305 task = proc_task(proc);
2306 }
2307
2308 switch (args->cmd) {
2309#ifdef LEDGER_DEBUG
2310 case LEDGER_LIMIT: {
2311 if (!kauth_cred_issuser(kauth_cred_get())) {
2312 rval = EPERM;
2313 }
2314 rval = ledger_limit(task, &lla);
2315 proc_rele(proc);
2316 break;
2317 }
2318#endif
2319 case LEDGER_INFO: {
2320 struct ledger_info info = {};
2321
2322 rval = ledger_info(task, info: &info);
2323 proc_rele(p: proc);
2324 if (rval == 0) {
2325 rval = copyout(&info, args->arg2,
2326 sizeof(info));
2327 }
2328 break;
2329 }
2330
2331 case LEDGER_ENTRY_INFO: {
2332 void *buf;
2333 int sz;
2334
2335#if CONFIG_MEMORYSTATUS
2336 task_ledger_settle_dirty_time(t: task);
2337#endif /* CONFIG_MEMORYSTATUS */
2338
2339 rval = ledger_get_task_entry_info_multiple(task, buf: &buf, len: &len);
2340 proc_rele(p: proc);
2341 if ((rval == 0) && (len >= 0)) {
2342 sz = len * sizeof(struct ledger_entry_info);
2343 rval = copyout(buf, args->arg2, sz);
2344 kfree_data(buf, sz);
2345 }
2346 if (rval == 0) {
2347 rval = copyout(&len, args->arg3, sizeof(len));
2348 }
2349 break;
2350 }
2351
2352 case LEDGER_TEMPLATE_INFO: {
2353 void *buf;
2354 int sz;
2355
2356 rval = ledger_template_info(buf: &buf, len: &len);
2357 if ((rval == 0) && (len >= 0)) {
2358 sz = len * sizeof(struct ledger_template_info);
2359 rval = copyout(buf, args->arg1, sz);
2360 kfree_data(buf, sz);
2361 }
2362 if (rval == 0) {
2363 rval = copyout(&len, args->arg2, sizeof(len));
2364 }
2365 break;
2366 }
2367
2368 default:
2369 panic("ledger syscall logic error -- command type %d", args->cmd);
2370 proc_rele(p: proc);
2371 rval = EINVAL;
2372 }
2373
2374 return rval;
2375}
2376
2377int
2378telemetry(__unused struct proc *p, struct telemetry_args *args, __unused int32_t *retval)
2379{
2380 int error = 0;
2381
2382 switch (args->cmd) {
2383#if CONFIG_TELEMETRY
2384 case TELEMETRY_CMD_TIMER_EVENT:
2385 error = telemetry_timer_event(deadline: args->deadline, interval: args->interval, leeway: args->leeway);
2386 break;
2387 case TELEMETRY_CMD_PMI_SETUP:
2388 error = telemetry_pmi_setup(pmi_type: (enum telemetry_pmi)args->deadline, interval: args->interval);
2389 break;
2390#endif /* CONFIG_TELEMETRY */
2391 case TELEMETRY_CMD_VOUCHER_NAME:
2392 if (thread_set_voucher_name(name: (mach_port_name_t)args->deadline)) {
2393 error = EINVAL;
2394 }
2395 break;
2396
2397 default:
2398 error = EINVAL;
2399 break;
2400 }
2401
2402 return error;
2403}
2404
2405/*
2406 * Logging
2407 *
2408 * Description: syscall to access kernel logging from userspace
2409 *
2410 * Args:
2411 * tag - used for syncing with userspace on the version.
2412 * flags - flags used by the syscall.
2413 * buffer - userspace address of string to copy.
2414 * size - size of buffer.
2415 */
2416int
2417log_data(__unused struct proc *p, struct log_data_args *args, int *retval)
2418{
2419 unsigned int tag = args->tag;
2420 unsigned int flags = args->flags;
2421 user_addr_t buffer = args->buffer;
2422 unsigned int size = args->size;
2423 int ret = 0;
2424 *retval = 0;
2425
2426 /* Only DEXTs are suppose to use this syscall. */
2427 if (!task_is_driver(task: current_task())) {
2428 return EPERM;
2429 }
2430
2431 /*
2432 * Tag synchronize the syscall version with userspace.
2433 * Tag == 0 => flags == OS_LOG_TYPE
2434 */
2435 if (tag != 0) {
2436 return EINVAL;
2437 }
2438
2439 /*
2440 * OS_LOG_TYPE are defined in libkern/os/log.h
2441 * In userspace they are defined in libtrace/os/log.h
2442 */
2443 if (flags != OS_LOG_TYPE_DEFAULT &&
2444 flags != OS_LOG_TYPE_INFO &&
2445 flags != OS_LOG_TYPE_DEBUG &&
2446 flags != OS_LOG_TYPE_ERROR &&
2447 flags != OS_LOG_TYPE_FAULT) {
2448 return EINVAL;
2449 }
2450
2451 if (size == 0) {
2452 return EINVAL;
2453 }
2454
2455 /* truncate to OS_LOG_DATA_MAX_SIZE */
2456 if (size > OS_LOG_DATA_MAX_SIZE) {
2457 size = OS_LOG_DATA_MAX_SIZE;
2458 }
2459
2460 char *log_msg = (char *)kalloc_data(size, Z_WAITOK);
2461 if (!log_msg) {
2462 return ENOMEM;
2463 }
2464
2465 if (copyin(buffer, log_msg, size) != 0) {
2466 ret = EFAULT;
2467 goto out;
2468 }
2469 log_msg[size - 1] = '\0';
2470
2471 /*
2472 * This will log to dmesg and logd.
2473 * The call will fail if the current
2474 * process is not a driverKit process.
2475 */
2476 os_log_driverKit(&ret, OS_LOG_DEFAULT, (os_log_type_t)flags, "%s", log_msg);
2477
2478out:
2479 if (log_msg != NULL) {
2480 kfree_data(log_msg, size);
2481 }
2482
2483 return ret;
2484}
2485
2486#if DEVELOPMENT || DEBUG
2487
2488static int
2489sysctl_mpsc_test_pingpong SYSCTL_HANDLER_ARGS
2490{
2491#pragma unused(oidp, arg1, arg2)
2492 uint64_t value = 0;
2493 int error;
2494
2495 error = SYSCTL_IN(req, &value, sizeof(value));
2496 if (error) {
2497 return error;
2498 }
2499
2500 if (error == 0 && req->newptr) {
2501 error = mpsc_test_pingpong(value, &value);
2502 if (error == 0) {
2503 error = SYSCTL_OUT(req, &value, sizeof(value));
2504 }
2505 }
2506
2507 return error;
2508}
2509SYSCTL_PROC(_kern, OID_AUTO, mpsc_test_pingpong, CTLTYPE_QUAD | CTLFLAG_RW | CTLFLAG_LOCKED,
2510 0, 0, sysctl_mpsc_test_pingpong, "Q", "MPSC tests: pingpong");
2511
2512#endif /* DEVELOPMENT || DEBUG */
2513
2514/* Telemetry, microstackshots */
2515
2516SYSCTL_NODE(_kern, OID_AUTO, microstackshot, CTLFLAG_RD | CTLFLAG_LOCKED, 0,
2517 "microstackshot info");
2518
2519extern uint32_t telemetry_sample_rate;
2520SYSCTL_UINT(_kern_microstackshot, OID_AUTO, interrupt_sample_rate,
2521 CTLFLAG_RD | CTLFLAG_LOCKED, &telemetry_sample_rate, 0,
2522 "interrupt-based sampling rate in Hz");
2523
2524#if defined(MT_CORE_INSTRS) && defined(MT_CORE_CYCLES)
2525
2526extern uint64_t mt_microstackshot_period;
2527SYSCTL_QUAD(_kern_microstackshot, OID_AUTO, pmi_sample_period,
2528 CTLFLAG_RD | CTLFLAG_LOCKED, &mt_microstackshot_period,
2529 "PMI sampling rate");
2530extern unsigned int mt_microstackshot_ctr;
2531SYSCTL_UINT(_kern_microstackshot, OID_AUTO, pmi_sample_counter,
2532 CTLFLAG_RD | CTLFLAG_LOCKED, &mt_microstackshot_ctr, 0,
2533 "PMI counter");
2534
2535#endif /* defined(MT_CORE_INSTRS) && defined(MT_CORE_CYCLES) */
2536
2537/*Remote Time api*/
2538SYSCTL_NODE(_machdep, OID_AUTO, remotetime, CTLFLAG_RD | CTLFLAG_LOCKED, 0, "Remote time api");
2539
2540#if DEVELOPMENT || DEBUG
2541#if CONFIG_MACH_BRIDGE_SEND_TIME
2542extern _Atomic uint32_t bt_init_flag;
2543extern uint32_t mach_bridge_timer_enable(uint32_t, int);
2544
2545SYSCTL_INT(_machdep_remotetime, OID_AUTO, bridge_timer_init_flag,
2546 CTLFLAG_RD | CTLFLAG_LOCKED, &bt_init_flag, 0, "");
2547
2548static int sysctl_mach_bridge_timer_enable SYSCTL_HANDLER_ARGS
2549{
2550#pragma unused(oidp, arg1, arg2)
2551 uint32_t value = 0;
2552 int error = 0;
2553 /* User is querying buffer size */
2554 if (req->oldptr == USER_ADDR_NULL && req->newptr == USER_ADDR_NULL) {
2555 req->oldidx = sizeof(value);
2556 return 0;
2557 }
2558 if (os_atomic_load(&bt_init_flag, acquire)) {
2559 if (req->newptr) {
2560 int new_value = 0;
2561 error = SYSCTL_IN(req, &new_value, sizeof(new_value));
2562 if (error) {
2563 return error;
2564 }
2565 if (new_value == 0 || new_value == 1) {
2566 value = mach_bridge_timer_enable(new_value, 1);
2567 } else {
2568 return EPERM;
2569 }
2570 } else {
2571 value = mach_bridge_timer_enable(0, 0);
2572 }
2573 }
2574 error = SYSCTL_OUT(req, &value, sizeof(value));
2575 return error;
2576}
2577
2578SYSCTL_PROC(_machdep_remotetime, OID_AUTO, bridge_timer_enable,
2579 CTLTYPE_INT | CTLFLAG_RW | CTLFLAG_LOCKED,
2580 0, 0, sysctl_mach_bridge_timer_enable, "I", "");
2581
2582#endif /* CONFIG_MACH_BRIDGE_SEND_TIME */
2583
2584static int sysctl_mach_bridge_remote_time SYSCTL_HANDLER_ARGS
2585{
2586#pragma unused(oidp, arg1, arg2)
2587 uint64_t ltime = 0, rtime = 0;
2588 if (req->oldptr == USER_ADDR_NULL) {
2589 req->oldidx = sizeof(rtime);
2590 return 0;
2591 }
2592 if (req->newptr) {
2593 int error = SYSCTL_IN(req, &ltime, sizeof(ltime));
2594 if (error) {
2595 return error;
2596 }
2597 }
2598 rtime = mach_bridge_remote_time(ltime);
2599 return SYSCTL_OUT(req, &rtime, sizeof(rtime));
2600}
2601SYSCTL_PROC(_machdep_remotetime, OID_AUTO, mach_bridge_remote_time,
2602 CTLTYPE_QUAD | CTLFLAG_RW | CTLFLAG_LOCKED,
2603 0, 0, sysctl_mach_bridge_remote_time, "Q", "");
2604
2605#endif /* DEVELOPMENT || DEBUG */
2606
2607#if CONFIG_MACH_BRIDGE_RECV_TIME
2608extern struct bt_params bt_params_get_latest(void);
2609
2610static int sysctl_mach_bridge_conversion_params SYSCTL_HANDLER_ARGS
2611{
2612#pragma unused(oidp, arg1, arg2)
2613 struct bt_params params = {};
2614 if (req->oldptr == USER_ADDR_NULL) {
2615 req->oldidx = sizeof(struct bt_params);
2616 return 0;
2617 }
2618 if (req->newptr) {
2619 return EPERM;
2620 }
2621 params = bt_params_get_latest();
2622 return SYSCTL_OUT(req, &params, MIN(sizeof(params), req->oldlen));
2623}
2624
2625SYSCTL_PROC(_machdep_remotetime, OID_AUTO, conversion_params,
2626 CTLTYPE_STRUCT | CTLFLAG_RD | CTLFLAG_LOCKED, 0,
2627 0, sysctl_mach_bridge_conversion_params, "S,bt_params", "");
2628
2629#endif /* CONFIG_MACH_BRIDGE_RECV_TIME */
2630
2631#if DEVELOPMENT || DEBUG
2632
2633#include <pexpert/pexpert.h>
2634extern int32_t sysctl_get_bound_cpuid(void);
2635extern kern_return_t sysctl_thread_bind_cpuid(int32_t cpuid);
2636static int
2637sysctl_kern_sched_thread_bind_cpu SYSCTL_HANDLER_ARGS
2638{
2639#pragma unused(oidp, arg1, arg2)
2640
2641 /*
2642 * DO NOT remove this bootarg guard or make this non-development.
2643 * This kind of binding should only be used for tests and
2644 * experiments in a custom configuration, never shipping code.
2645 */
2646
2647 if (!PE_parse_boot_argn("enable_skstb", NULL, 0)) {
2648 return ENOENT;
2649 }
2650
2651 int32_t cpuid = sysctl_get_bound_cpuid();
2652
2653 int32_t new_value;
2654 int changed;
2655 int error = sysctl_io_number(req, cpuid, sizeof(cpuid), &new_value, &changed);
2656 if (error) {
2657 return error;
2658 }
2659
2660 if (changed) {
2661 kern_return_t kr = sysctl_thread_bind_cpuid(new_value);
2662
2663 if (kr == KERN_NOT_SUPPORTED) {
2664 return ENOTSUP;
2665 }
2666
2667 if (kr == KERN_INVALID_VALUE) {
2668 return ERANGE;
2669 }
2670 }
2671
2672 return error;
2673}
2674
2675SYSCTL_PROC(_kern, OID_AUTO, sched_thread_bind_cpu, CTLTYPE_INT | CTLFLAG_RW | CTLFLAG_LOCKED,
2676 0, 0, sysctl_kern_sched_thread_bind_cpu, "I", "");
2677
2678#if __AMP__
2679extern char sysctl_get_bound_cluster_type(void);
2680extern void sysctl_thread_bind_cluster_type(char cluster_type);
2681static int
2682sysctl_kern_sched_thread_bind_cluster_type SYSCTL_HANDLER_ARGS
2683{
2684#pragma unused(oidp, arg1, arg2)
2685 char buff[4];
2686
2687 if (!PE_parse_boot_argn("enable_skstb", NULL, 0)) {
2688 return ENOENT;
2689 }
2690
2691 int error = SYSCTL_IN(req, buff, 1);
2692 if (error) {
2693 return error;
2694 }
2695 char cluster_type = buff[0];
2696
2697 if (!req->newptr) {
2698 goto out;
2699 }
2700
2701 if (cluster_type != 'E' &&
2702 cluster_type != 'e' &&
2703 cluster_type != 'P' &&
2704 cluster_type != 'p') {
2705 return EINVAL;
2706 }
2707
2708 sysctl_thread_bind_cluster_type(cluster_type);
2709
2710out:
2711 cluster_type = sysctl_get_bound_cluster_type();
2712 buff[0] = cluster_type;
2713
2714 return SYSCTL_OUT(req, buff, 1);
2715}
2716
2717SYSCTL_PROC(_kern, OID_AUTO, sched_thread_bind_cluster_type, CTLTYPE_STRING | CTLFLAG_RW | CTLFLAG_LOCKED,
2718 0, 0, sysctl_kern_sched_thread_bind_cluster_type, "A", "");
2719
2720extern char sysctl_get_task_cluster_type(void);
2721extern void sysctl_task_set_cluster_type(char cluster_type);
2722static int
2723sysctl_kern_sched_task_set_cluster_type SYSCTL_HANDLER_ARGS
2724{
2725#pragma unused(oidp, arg1, arg2)
2726 char buff[4];
2727
2728 if (!PE_parse_boot_argn("enable_skstsct", NULL, 0)) {
2729 return ENOENT;
2730 }
2731
2732 int error = SYSCTL_IN(req, buff, 1);
2733 if (error) {
2734 return error;
2735 }
2736 char cluster_type = buff[0];
2737
2738 if (!req->newptr) {
2739 goto out;
2740 }
2741
2742 if (cluster_type != 'E' &&
2743 cluster_type != 'e' &&
2744 cluster_type != 'P' &&
2745 cluster_type != 'p') {
2746 return EINVAL;
2747 }
2748
2749 sysctl_task_set_cluster_type(cluster_type);
2750out:
2751 cluster_type = sysctl_get_task_cluster_type();
2752 buff[0] = cluster_type;
2753
2754 return SYSCTL_OUT(req, buff, 1);
2755}
2756
2757SYSCTL_PROC(_kern, OID_AUTO, sched_task_set_cluster_type, CTLTYPE_STRING | CTLFLAG_RW | CTLFLAG_LOCKED,
2758 0, 0, sysctl_kern_sched_task_set_cluster_type, "A", "");
2759
2760extern kern_return_t thread_bind_cluster_id(thread_t thread, uint32_t cluster_id, thread_bind_option_t options);
2761extern uint32_t thread_bound_cluster_id(thread_t);
2762static int
2763sysctl_kern_sched_thread_bind_cluster_id SYSCTL_HANDLER_ARGS
2764{
2765#pragma unused(oidp, arg1, arg2)
2766 if (!PE_parse_boot_argn("enable_skstb", NULL, 0)) {
2767 return ENOENT;
2768 }
2769
2770 thread_t self = current_thread();
2771 int32_t cluster_id = thread_bound_cluster_id(self);
2772 int32_t new_value;
2773 int changed;
2774 int error = sysctl_io_number(req, cluster_id, sizeof(cluster_id), &new_value, &changed);
2775 if (error) {
2776 return error;
2777 }
2778
2779 if (changed) {
2780 /*
2781 * This sysctl binds the thread to the cluster without any flags, which
2782 * means it will be hard bound and not check eligibility.
2783 */
2784 kern_return_t kr = thread_bind_cluster_id(self, new_value, 0);
2785 if (kr == KERN_INVALID_VALUE) {
2786 return ERANGE;
2787 }
2788
2789 if (kr != KERN_SUCCESS) {
2790 return EINVAL;
2791 }
2792 }
2793
2794 return error;
2795}
2796
2797SYSCTL_PROC(_kern, OID_AUTO, sched_thread_bind_cluster_id, CTLTYPE_INT | CTLFLAG_RW | CTLFLAG_LOCKED,
2798 0, 0, sysctl_kern_sched_thread_bind_cluster_id, "I", "");
2799
2800#if CONFIG_SCHED_EDGE
2801
2802extern int sched_edge_restrict_ut;
2803SYSCTL_INT(_kern, OID_AUTO, sched_edge_restrict_ut, CTLFLAG_RW | CTLFLAG_LOCKED, &sched_edge_restrict_ut, 0, "Edge Scheduler Restrict UT Threads");
2804extern int sched_edge_restrict_bg;
2805SYSCTL_INT(_kern, OID_AUTO, sched_edge_restrict_bg, CTLFLAG_RW | CTLFLAG_LOCKED, &sched_edge_restrict_ut, 0, "Edge Scheduler Restrict BG Threads");
2806extern int sched_edge_migrate_ipi_immediate;
2807SYSCTL_INT(_kern, OID_AUTO, sched_edge_migrate_ipi_immediate, CTLFLAG_RW | CTLFLAG_LOCKED, &sched_edge_migrate_ipi_immediate, 0, "Edge Scheduler uses immediate IPIs for migration event based on execution latency");
2808
2809#endif /* CONFIG_SCHED_EDGE */
2810
2811#endif /* __AMP__ */
2812
2813#if SCHED_HYGIENE_DEBUG
2814
2815SYSCTL_QUAD(_kern, OID_AUTO, interrupt_masked_threshold_mt, CTLFLAG_RW | CTLFLAG_LOCKED,
2816 &interrupt_masked_timeout,
2817 "Interrupt masked duration after which a tracepoint is emitted or the device panics (in mach timebase units)");
2818
2819SYSCTL_INT(_kern, OID_AUTO, interrupt_masked_debug_mode, CTLFLAG_RW | CTLFLAG_LOCKED,
2820 &interrupt_masked_debug_mode, 0,
2821 "Enable interrupt masked tracing or panic (0: off, 1: trace, 2: panic)");
2822
2823SYSCTL_QUAD(_kern, OID_AUTO, sched_preemption_disable_threshold_mt, CTLFLAG_RW | CTLFLAG_LOCKED,
2824 &sched_preemption_disable_threshold_mt,
2825 "Preemption disablement duration after which a tracepoint is emitted or the device panics (in mach timebase units)");
2826
2827SYSCTL_INT(_kern, OID_AUTO, sched_preemption_disable_debug_mode, CTLFLAG_RW | CTLFLAG_LOCKED,
2828 &sched_preemption_disable_debug_mode, 0,
2829 "Enable preemption disablement tracing or panic (0: off, 1: trace, 2: panic)");
2830
2831static int
2832sysctl_sched_preemption_disable_stats(__unused struct sysctl_oid *oidp, __unused void *arg1, __unused int arg2, struct sysctl_req *req)
2833{
2834 extern unsigned int preemption_disable_get_max_durations(uint64_t *durations, size_t count);
2835 extern void preemption_disable_reset_max_durations(void);
2836
2837 uint64_t stats[MAX_CPUS]; // maximum per CPU
2838
2839 unsigned int ncpus = preemption_disable_get_max_durations(stats, MAX_CPUS);
2840 if (req->newlen > 0) {
2841 /* Reset when attempting to write to the sysctl. */
2842 preemption_disable_reset_max_durations();
2843 }
2844
2845 return sysctl_io_opaque(req, stats, ncpus * sizeof(uint64_t), NULL);
2846}
2847
2848SYSCTL_PROC(_kern, OID_AUTO, sched_preemption_disable_stats,
2849 CTLTYPE_OPAQUE | CTLFLAG_RW | CTLFLAG_LOCKED,
2850 0, 0, sysctl_sched_preemption_disable_stats, "I", "Preemption disablement statistics");
2851
2852#endif /* SCHED_HYGIENE_DEBUG */
2853
2854/* used for testing by exception_tests */
2855extern uint32_t ipc_control_port_options;
2856SYSCTL_INT(_kern, OID_AUTO, ipc_control_port_options,
2857 CTLFLAG_RD | CTLFLAG_LOCKED, &ipc_control_port_options, 0, "");
2858
2859#endif /* DEVELOPMENT || DEBUG */
2860
2861extern uint32_t task_exc_guard_default;
2862
2863SYSCTL_INT(_kern, OID_AUTO, task_exc_guard_default,
2864 CTLFLAG_RD | CTLFLAG_LOCKED, &task_exc_guard_default, 0, "");
2865
2866
2867static int
2868sysctl_kern_tcsm_available SYSCTL_HANDLER_ARGS
2869{
2870#pragma unused(oidp, arg1, arg2)
2871 uint32_t value = machine_csv(cve: CPUVN_CI) ? 1 : 0;
2872
2873 if (req->newptr) {
2874 return EINVAL;
2875 }
2876
2877 return SYSCTL_OUT(req, &value, sizeof(value));
2878}
2879SYSCTL_PROC(_kern, OID_AUTO, tcsm_available,
2880 CTLTYPE_INT | CTLFLAG_RD | CTLFLAG_LOCKED | CTLFLAG_MASKED | CTLFLAG_ANYBODY,
2881 0, 0, sysctl_kern_tcsm_available, "I", "");
2882
2883
2884static int
2885sysctl_kern_tcsm_enable SYSCTL_HANDLER_ARGS
2886{
2887#pragma unused(oidp, arg1, arg2)
2888 uint32_t soflags = 0;
2889 uint32_t old_value = thread_get_no_smt() ? 1 : 0;
2890
2891 int error = SYSCTL_IN(req, &soflags, sizeof(soflags));
2892 if (error) {
2893 return error;
2894 }
2895
2896 if (soflags && machine_csv(cve: CPUVN_CI)) {
2897 thread_set_no_smt(true);
2898 machine_tecs(thr: current_thread());
2899 }
2900
2901 return SYSCTL_OUT(req, &old_value, sizeof(old_value));
2902}
2903SYSCTL_PROC(_kern, OID_AUTO, tcsm_enable,
2904 CTLTYPE_INT | CTLFLAG_RW | CTLFLAG_LOCKED | CTLFLAG_MASKED | CTLFLAG_ANYBODY,
2905 0, 0, sysctl_kern_tcsm_enable, "I", "");
2906
2907static int
2908sysctl_kern_debug_get_preoslog SYSCTL_HANDLER_ARGS
2909{
2910#pragma unused(oidp, arg1, arg2)
2911 static bool oneshot_executed = false;
2912 size_t preoslog_size = 0;
2913 const char *preoslog = NULL;
2914 int ret = 0;
2915
2916 // DumpPanic passes a non-zero write value when it needs oneshot behaviour
2917 if (req->newptr != USER_ADDR_NULL) {
2918 uint8_t oneshot = 0;
2919 int error = SYSCTL_IN(req, &oneshot, sizeof(oneshot));
2920 if (error) {
2921 return error;
2922 }
2923
2924 if (oneshot) {
2925 if (!os_atomic_cmpxchg(&oneshot_executed, false, true, acq_rel)) {
2926 return EPERM;
2927 }
2928 }
2929 }
2930
2931 preoslog = sysctl_debug_get_preoslog(size: &preoslog_size);
2932 if (preoslog != NULL && preoslog_size == 0) {
2933 sysctl_debug_free_preoslog();
2934 return 0;
2935 }
2936
2937 if (preoslog == NULL || preoslog_size == 0) {
2938 return 0;
2939 }
2940
2941 if (req->oldptr == USER_ADDR_NULL) {
2942 req->oldidx = preoslog_size;
2943 return 0;
2944 }
2945
2946 ret = SYSCTL_OUT(req, preoslog, preoslog_size);
2947 sysctl_debug_free_preoslog();
2948 return ret;
2949}
2950
2951SYSCTL_PROC(_kern, OID_AUTO, preoslog, CTLTYPE_OPAQUE | CTLFLAG_RW | CTLFLAG_LOCKED,
2952 0, 0, sysctl_kern_debug_get_preoslog, "-", "");
2953
2954#if DEVELOPMENT || DEBUG
2955extern void sysctl_task_set_no_smt(char no_smt);
2956extern char sysctl_task_get_no_smt(void);
2957
2958static int
2959sysctl_kern_sched_task_set_no_smt SYSCTL_HANDLER_ARGS
2960{
2961#pragma unused(oidp, arg1, arg2)
2962 char buff[4];
2963
2964 int error = SYSCTL_IN(req, buff, 1);
2965 if (error) {
2966 return error;
2967 }
2968 char no_smt = buff[0];
2969
2970 if (!req->newptr) {
2971 goto out;
2972 }
2973
2974 sysctl_task_set_no_smt(no_smt);
2975out:
2976 no_smt = sysctl_task_get_no_smt();
2977 buff[0] = no_smt;
2978
2979 return SYSCTL_OUT(req, buff, 1);
2980}
2981
2982SYSCTL_PROC(_kern, OID_AUTO, sched_task_set_no_smt, CTLTYPE_STRING | CTLFLAG_RW | CTLFLAG_LOCKED | CTLFLAG_ANYBODY,
2983 0, 0, sysctl_kern_sched_task_set_no_smt, "A", "");
2984
2985static int
2986sysctl_kern_sched_thread_set_no_smt(__unused struct sysctl_oid *oidp, __unused void *arg1, __unused int arg2, struct sysctl_req *req)
2987{
2988 int new_value, changed;
2989 int old_value = thread_get_no_smt() ? 1 : 0;
2990 int error = sysctl_io_number(req, old_value, sizeof(int), &new_value, &changed);
2991
2992 if (changed) {
2993 thread_set_no_smt(!!new_value);
2994 }
2995
2996 return error;
2997}
2998
2999SYSCTL_PROC(_kern, OID_AUTO, sched_thread_set_no_smt,
3000 CTLTYPE_INT | CTLFLAG_RW | CTLFLAG_LOCKED | CTLFLAG_ANYBODY,
3001 0, 0, sysctl_kern_sched_thread_set_no_smt, "I", "");
3002
3003#if CONFIG_SCHED_RT_ALLOW
3004
3005#if DEVELOPMENT || DEBUG
3006#define RT_ALLOW_CTLFLAGS CTLFLAG_RW
3007#else
3008#define RT_ALLOW_CTLFLAGS CTLFLAG_RD
3009#endif /* DEVELOPMENT || DEBUG */
3010
3011static int
3012sysctl_kern_rt_allow_limit_percent(__unused struct sysctl_oid *oidp,
3013 __unused void *arg1, __unused int arg2, struct sysctl_req *req)
3014{
3015 extern uint8_t rt_allow_limit_percent;
3016
3017 int new_value = 0;
3018 int old_value = rt_allow_limit_percent;
3019 int changed = 0;
3020
3021 int error = sysctl_io_number(req, old_value, sizeof(old_value),
3022 &new_value, &changed);
3023 if (error != 0) {
3024 return error;
3025 }
3026
3027 /* Only accept a percentage between 1 and 99 inclusive. */
3028 if (changed) {
3029 if (new_value >= 100 || new_value <= 0) {
3030 return EINVAL;
3031 }
3032
3033 rt_allow_limit_percent = (uint8_t)new_value;
3034 }
3035
3036 return 0;
3037}
3038
3039SYSCTL_PROC(_kern, OID_AUTO, rt_allow_limit_percent,
3040 RT_ALLOW_CTLFLAGS | CTLTYPE_INT | CTLFLAG_LOCKED,
3041 0, 0, sysctl_kern_rt_allow_limit_percent, "I", "");
3042
3043static int
3044sysctl_kern_rt_allow_limit_interval_ms(__unused struct sysctl_oid *oidp,
3045 __unused void *arg1, __unused int arg2, struct sysctl_req *req)
3046{
3047 extern uint16_t rt_allow_limit_interval_ms;
3048
3049 uint64_t new_value = 0;
3050 uint64_t old_value = rt_allow_limit_interval_ms;
3051 int changed = 0;
3052
3053 int error = sysctl_io_number(req, old_value, sizeof(old_value),
3054 &new_value, &changed);
3055 if (error != 0) {
3056 return error;
3057 }
3058
3059 /* Value is in ns. Must be at least 1ms. */
3060 if (changed) {
3061 if (new_value < 1 || new_value > UINT16_MAX) {
3062 return EINVAL;
3063 }
3064
3065 rt_allow_limit_interval_ms = (uint16_t)new_value;
3066 }
3067
3068 return 0;
3069}
3070
3071SYSCTL_PROC(_kern, OID_AUTO, rt_allow_limit_interval_ms,
3072 RT_ALLOW_CTLFLAGS | CTLTYPE_QUAD | CTLFLAG_LOCKED,
3073 0, 0, sysctl_kern_rt_allow_limit_interval_ms, "Q", "");
3074
3075#endif /* CONFIG_SCHED_RT_ALLOW */
3076
3077
3078static int
3079sysctl_kern_task_set_filter_msg_flag SYSCTL_HANDLER_ARGS
3080{
3081#pragma unused(oidp, arg1, arg2)
3082 int new_value, changed;
3083 int old_value = task_get_filter_msg_flag(current_task()) ? 1 : 0;
3084 int error = sysctl_io_number(req, old_value, sizeof(int), &new_value, &changed);
3085
3086 if (changed) {
3087 task_set_filter_msg_flag(current_task(), !!new_value);
3088 }
3089
3090 return error;
3091}
3092
3093SYSCTL_PROC(_kern, OID_AUTO, task_set_filter_msg_flag, CTLTYPE_INT | CTLFLAG_RW | CTLFLAG_LOCKED,
3094 0, 0, sysctl_kern_task_set_filter_msg_flag, "I", "");
3095
3096#if CONFIG_PROC_RESOURCE_LIMITS
3097
3098extern mach_port_name_t current_task_get_fatal_port_name(void);
3099
3100static int
3101sysctl_kern_task_get_fatal_port SYSCTL_HANDLER_ARGS
3102{
3103#pragma unused(oidp, arg1, arg2)
3104 int port = 0;
3105 int flag = 0;
3106
3107 if (req->oldptr == USER_ADDR_NULL) {
3108 req->oldidx = sizeof(mach_port_t);
3109 return 0;
3110 }
3111
3112 int error = SYSCTL_IN(req, &flag, sizeof(flag));
3113 if (error) {
3114 return error;
3115 }
3116
3117 if (flag == 1) {
3118 port = (int)current_task_get_fatal_port_name();
3119 }
3120 return SYSCTL_OUT(req, &port, sizeof(port));
3121}
3122
3123SYSCTL_PROC(_machdep, OID_AUTO, task_get_fatal_port, CTLTYPE_INT | CTLFLAG_RW | CTLFLAG_LOCKED,
3124 0, 0, sysctl_kern_task_get_fatal_port, "I", "");
3125
3126#endif /* CONFIG_PROC_RESOURCE_LIMITS */
3127
3128extern unsigned int ipc_entry_table_count_max(void);
3129
3130static int
3131sysctl_mach_max_port_table_size SYSCTL_HANDLER_ARGS
3132{
3133#pragma unused(oidp, arg1, arg2)
3134 int old_value = ipc_entry_table_count_max();
3135 int error = sysctl_io_number(req, old_value, sizeof(int), NULL, NULL);
3136
3137 return error;
3138}
3139
3140SYSCTL_PROC(_machdep, OID_AUTO, max_port_table_size, CTLTYPE_INT | CTLFLAG_RW | CTLFLAG_LOCKED,
3141 0, 0, sysctl_mach_max_port_table_size, "I", "");
3142
3143#endif /* DEVELOPMENT || DEBUG */
3144
3145#if defined(CONFIG_KDP_INTERACTIVE_DEBUGGING) && defined(CONFIG_KDP_COREDUMP_ENCRYPTION)
3146
3147#define COREDUMP_ENCRYPTION_KEY_ENTITLEMENT "com.apple.private.coredump-encryption-key"
3148
3149static int
3150sysctl_coredump_encryption_key_update SYSCTL_HANDLER_ARGS
3151{
3152 kern_return_t ret = KERN_SUCCESS;
3153 int error = 0;
3154 struct kdp_core_encryption_key_descriptor key_descriptor = {
3155 .kcekd_format = MACH_CORE_FILEHEADER_V2_FLAG_NEXT_COREFILE_KEY_FORMAT_NIST_P256,
3156 };
3157
3158 /* Need to be root and have entitlement */
3159 if (!kauth_cred_issuser(cred: kauth_cred_get()) && !IOCurrentTaskHasEntitlement(COREDUMP_ENCRYPTION_KEY_ENTITLEMENT)) {
3160 return EPERM;
3161 }
3162
3163 // Sanity-check the given key length
3164 if (req->newlen > UINT16_MAX) {
3165 return EINVAL;
3166 }
3167
3168 // It is allowed for the caller to pass in a NULL buffer.
3169 // This indicates that they want us to forget about any public key we might have.
3170 if (req->newptr) {
3171 key_descriptor.kcekd_size = (uint16_t) req->newlen;
3172 key_descriptor.kcekd_key = kalloc_data(key_descriptor.kcekd_size, Z_WAITOK);
3173
3174 if (key_descriptor.kcekd_key == NULL) {
3175 return ENOMEM;
3176 }
3177
3178 error = SYSCTL_IN(req, key_descriptor.kcekd_key, key_descriptor.kcekd_size);
3179 if (error) {
3180 goto out;
3181 }
3182 }
3183
3184 ret = IOProvideCoreFileAccess(recipient: kdp_core_handle_new_encryption_key, recipient_context: (void *)&key_descriptor);
3185 if (KERN_SUCCESS != ret) {
3186 printf("Failed to handle the new encryption key. Error 0x%x", ret);
3187 error = EFAULT;
3188 }
3189
3190out:
3191 kfree_data(key_descriptor.kcekd_key, key_descriptor.kcekd_size);
3192 return 0;
3193}
3194
3195SYSCTL_PROC(_kern, OID_AUTO, coredump_encryption_key, CTLTYPE_OPAQUE | CTLFLAG_WR | CTLFLAG_LOCKED | CTLFLAG_MASKED,
3196 0, 0, &sysctl_coredump_encryption_key_update, "-", "Set a new encryption key for coredumps");
3197
3198#endif /* CONFIG_KDP_INTERACTIVE_DEBUGGING && CONFIG_KDP_COREDUMP_ENCRYPTION*/
3199