1/*
2 * Copyright (c) 2000-2023 Apple Inc. All rights reserved.
3 *
4 * @APPLE_OSREFERENCE_LICENSE_HEADER_START@
5 *
6 * This file contains Original Code and/or Modifications of Original Code
7 * as defined in and that are subject to the Apple Public Source License
8 * Version 2.0 (the 'License'). You may not use this file except in
9 * compliance with the License. The rights granted to you under the License
10 * may not be used to create, or enable the creation or redistribution of,
11 * unlawful or unlicensed copies of an Apple operating system, or to
12 * circumvent, violate, or enable the circumvention or violation of, any
13 * terms of an Apple operating system software license agreement.
14 *
15 * Please obtain a copy of the License at
16 * http://www.opensource.apple.com/apsl/ and read it before using this file.
17 *
18 * The Original Code and all software distributed under the License are
19 * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER
20 * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES,
21 * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY,
22 * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT.
23 * Please see the License for the specific language governing rights and
24 * limitations under the License.
25 *
26 * @APPLE_OSREFERENCE_LICENSE_HEADER_END@
27 */
28/* Copyright (c) 1995 NeXT Computer, Inc. All Rights Reserved */
29/*-
30 * Copyright (c) 1982, 1986, 1989, 1993
31 * The Regents of the University of California. All rights reserved.
32 *
33 * This code is derived from software contributed to Berkeley by
34 * Mike Karels at Berkeley Software Design, Inc.
35 *
36 * Redistribution and use in source and binary forms, with or without
37 * modification, are permitted provided that the following conditions
38 * are met:
39 * 1. Redistributions of source code must retain the above copyright
40 * notice, this list of conditions and the following disclaimer.
41 * 2. Redistributions in binary form must reproduce the above copyright
42 * notice, this list of conditions and the following disclaimer in the
43 * documentation and/or other materials provided with the distribution.
44 * 3. All advertising materials mentioning features or use of this software
45 * must display the following acknowledgement:
46 * This product includes software developed by the University of
47 * California, Berkeley and its contributors.
48 * 4. Neither the name of the University nor the names of its contributors
49 * may be used to endorse or promote products derived from this software
50 * without specific prior written permission.
51 *
52 * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
53 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
54 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
55 * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
56 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
57 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
58 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
59 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
60 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
61 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
62 * SUCH DAMAGE.
63 *
64 * @(#)kern_sysctl.c 8.4 (Berkeley) 4/14/94
65 */
66/*
67 * NOTICE: This file was modified by SPARTA, Inc. in 2005 to introduce
68 * support for mandatory and extensible security protections. This notice
69 * is included in support of clause 2.2 (b) of the Apple Public License,
70 * Version 2.0.
71 */
72
73/*
74 * DEPRECATED sysctl system call code
75 *
76 * Everything in this file is deprecated. Sysctls should be handled
77 * by the code in kern_newsysctl.c.
78 * The remaining "case" sections are supposed to be converted into
79 * SYSCTL_*-style definitions, and as soon as all of them are gone,
80 * this source file is supposed to die.
81 *
82 * DO NOT ADD ANY MORE "case" SECTIONS TO THIS FILE, instead define
83 * your sysctl with SYSCTL_INT, SYSCTL_PROC etc. in your source file.
84 */
85
86#include <sys/param.h>
87#include <sys/systm.h>
88#include <sys/kernel.h>
89#include <sys/malloc.h>
90#include <sys/proc_internal.h>
91#include <sys/kauth.h>
92#include <sys/file_internal.h>
93#include <sys/vnode_internal.h>
94#include <sys/unistd.h>
95#include <sys/buf.h>
96#include <sys/ioctl.h>
97#include <sys/namei.h>
98#include <sys/tty.h>
99#include <sys/disklabel.h>
100#include <sys/vm.h>
101#include <sys/sysctl.h>
102#include <sys/user.h>
103#include <sys/aio_kern.h>
104#include <sys/reboot.h>
105#include <sys/memory_maintenance.h>
106#include <sys/priv.h>
107#include <stdatomic.h>
108#include <uuid/uuid.h>
109
110#include <security/audit/audit.h>
111#include <kern/kalloc.h>
112
113#include <machine/smp.h>
114#include <machine/atomic.h>
115#include <machine/config.h>
116#include <mach/machine.h>
117#include <mach/mach_host.h>
118#include <mach/mach_types.h>
119#include <mach/processor_info.h>
120#include <mach/vm_param.h>
121#include <kern/debug.h>
122#include <kern/mach_param.h>
123#include <kern/task.h>
124#include <kern/thread.h>
125#include <kern/thread_group.h>
126#include <kern/processor.h>
127#include <kern/cpu_number.h>
128#include <kern/sched_prim.h>
129#include <kern/workload_config.h>
130#include <kern/iotrace.h>
131#include <vm/vm_kern.h>
132#include <vm/vm_map.h>
133#include <mach/host_info.h>
134#include <mach/exclaves.h>
135#include <kern/hvg_hypercall.h>
136#include <kdp/sk_core.h>
137
138#if DEVELOPMENT || DEBUG
139#include <kern/ext_paniclog.h>
140#endif
141
142#include <sys/mount_internal.h>
143#include <sys/kdebug.h>
144#include <sys/kern_debug.h>
145#include <sys/kern_sysctl.h>
146#include <sys/variant_internal.h>
147
148#include <IOKit/IOPlatformExpert.h>
149#include <pexpert/pexpert.h>
150
151#include <machine/machine_routines.h>
152#include <machine/exec.h>
153
154#include <nfs/nfs_conf.h>
155
156#include <vm/vm_protos.h>
157#include <vm/vm_pageout.h>
158#include <vm/vm_compressor_algorithms.h>
159#include <sys/imgsrc.h>
160#include <kern/timer_call.h>
161#include <sys/codesign.h>
162#include <IOKit/IOBSD.h>
163#if CONFIG_CSR
164#include <sys/csr.h>
165#endif
166
167#if defined(__i386__) || defined(__x86_64__)
168#include <i386/cpuid.h>
169#endif
170
171#if CONFIG_FREEZE
172#include <sys/kern_memorystatus.h>
173#endif
174
175#if KPERF
176#include <kperf/kperf.h>
177#endif
178
179#if HYPERVISOR
180#include <kern/hv_support.h>
181#endif
182
183
184#include <corecrypto/ccsha2.h>
185
186/*
187 * deliberately setting max requests to really high number
188 * so that runaway settings do not cause MALLOC overflows
189 */
190#define AIO_MAX_REQUESTS (128 * CONFIG_AIO_MAX)
191
192extern int aio_max_requests;
193extern int aio_max_requests_per_process;
194extern int aio_worker_threads;
195extern int lowpri_IO_window_msecs;
196extern int lowpri_IO_delay_msecs;
197#if DEVELOPMENT || DEBUG
198extern int nx_enabled;
199#endif
200extern int speculative_reads_disabled;
201extern unsigned int speculative_prefetch_max;
202extern unsigned int speculative_prefetch_max_iosize;
203extern unsigned int preheat_max_bytes;
204extern unsigned int preheat_min_bytes;
205extern long numvnodes;
206extern long freevnodes;
207extern long num_recycledvnodes;
208
209extern uuid_string_t bootsessionuuid_string;
210
211extern unsigned int vm_max_delayed_work_limit;
212extern unsigned int vm_max_batch;
213
214extern unsigned int vm_page_free_min;
215extern unsigned int vm_page_free_target;
216extern unsigned int vm_page_free_reserved;
217
218#if (DEVELOPMENT || DEBUG)
219extern uint32_t vm_page_creation_throttled_hard;
220extern uint32_t vm_page_creation_throttled_soft;
221#endif /* DEVELOPMENT || DEBUG */
222
223#if DEVELOPMENT || DEBUG
224extern bool bootarg_hide_process_traced;
225#endif
226
227/*
228 * Conditionally allow dtrace to see these functions for debugging purposes.
229 */
230#ifdef STATIC
231#undef STATIC
232#endif
233#if 0
234#define STATIC
235#else
236#define STATIC static
237#endif
238
239extern boolean_t mach_timer_coalescing_enabled;
240
241extern uint64_t timer_deadline_tracking_bin_1, timer_deadline_tracking_bin_2;
242
243STATIC void
244fill_user32_eproc(proc_t, struct user32_eproc *__restrict);
245STATIC void
246fill_user32_externproc(proc_t, struct user32_extern_proc *__restrict);
247STATIC void
248fill_user64_eproc(proc_t, struct user64_eproc *__restrict);
249STATIC void
250fill_user64_proc(proc_t, struct user64_kinfo_proc *__restrict);
251STATIC void
252fill_user64_externproc(proc_t, struct user64_extern_proc *__restrict);
253STATIC void
254fill_user32_proc(proc_t, struct user32_kinfo_proc *__restrict);
255
256#if CONFIG_NETBOOT
257extern int
258netboot_root(void);
259#endif
260int
261sysctl_procargs(int *name, u_int namelen, user_addr_t where,
262 size_t *sizep, proc_t cur_proc);
263STATIC int
264sysctl_procargsx(int *name, u_int namelen, user_addr_t where, size_t *sizep,
265 proc_t cur_proc, int argc_yes);
266int
267sysctl_struct(user_addr_t oldp, size_t *oldlenp, user_addr_t newp,
268 size_t newlen, void *sp, int len);
269
270STATIC int sysdoproc_filt_KERN_PROC_PID(proc_t p, void * arg);
271STATIC int sysdoproc_filt_KERN_PROC_PGRP(proc_t p, void * arg);
272STATIC int sysdoproc_filt_KERN_PROC_TTY(proc_t p, void * arg);
273STATIC int sysdoproc_filt_KERN_PROC_UID(proc_t p, void * arg);
274STATIC int sysdoproc_filt_KERN_PROC_RUID(proc_t p, void * arg);
275int sysdoproc_callback(proc_t p, void *arg);
276
277#if CONFIG_THREAD_GROUPS && (DEVELOPMENT || DEBUG)
278STATIC int sysctl_get_thread_group_id SYSCTL_HANDLER_ARGS;
279#endif
280
281/* forward declarations for non-static STATIC */
282STATIC void fill_loadavg64(struct loadavg *la, struct user64_loadavg *la64);
283STATIC void fill_loadavg32(struct loadavg *la, struct user32_loadavg *la32);
284STATIC int sysctl_handle_kern_threadname(struct sysctl_oid *oidp, void *arg1, int arg2, struct sysctl_req *req);
285STATIC int sysctl_sched_stats(struct sysctl_oid *oidp, void *arg1, int arg2, struct sysctl_req *req);
286STATIC int sysctl_sched_stats_enable(struct sysctl_oid *oidp, void *arg1, int arg2, struct sysctl_req *req);
287#if COUNT_SYSCALLS
288STATIC int sysctl_docountsyscalls SYSCTL_HANDLER_ARGS;
289#endif /* COUNT_SYSCALLS */
290#if defined(XNU_TARGET_OS_OSX)
291STATIC int sysctl_doprocargs SYSCTL_HANDLER_ARGS;
292#endif /* defined(XNU_TARGET_OS_OSX) */
293STATIC int sysctl_doprocargs2 SYSCTL_HANDLER_ARGS;
294STATIC int sysctl_prochandle SYSCTL_HANDLER_ARGS;
295STATIC int sysctl_aiomax(struct sysctl_oid *oidp, void *arg1, int arg2, struct sysctl_req *req);
296STATIC int sysctl_aioprocmax(struct sysctl_oid *oidp, void *arg1, int arg2, struct sysctl_req *req);
297STATIC int sysctl_aiothreads(struct sysctl_oid *oidp, void *arg1, int arg2, struct sysctl_req *req);
298STATIC int sysctl_maxproc(struct sysctl_oid *oidp, void *arg1, int arg2, struct sysctl_req *req);
299STATIC int sysctl_osversion(struct sysctl_oid *oidp, void *arg1, int arg2, struct sysctl_req *req);
300STATIC int sysctl_sysctl_bootargs(struct sysctl_oid *oidp, void *arg1, int arg2, struct sysctl_req *req);
301STATIC int sysctl_maxvnodes(struct sysctl_oid *oidp, void *arg1, int arg2, struct sysctl_req *req);
302STATIC int sysctl_securelvl(struct sysctl_oid *oidp, void *arg1, int arg2, struct sysctl_req *req);
303STATIC int sysctl_domainname(struct sysctl_oid *oidp, void *arg1, int arg2, struct sysctl_req *req);
304STATIC int sysctl_hostname(struct sysctl_oid *oidp, void *arg1, int arg2, struct sysctl_req *req);
305STATIC int sysctl_procname(struct sysctl_oid *oidp, void *arg1, int arg2, struct sysctl_req *req);
306STATIC int sysctl_boottime(struct sysctl_oid *oidp, void *arg1, int arg2, struct sysctl_req *req);
307STATIC int sysctl_bootuuid(struct sysctl_oid *oidp, void *arg1, int arg2, struct sysctl_req *req);
308STATIC int sysctl_symfile(struct sysctl_oid *oidp, void *arg1, int arg2, struct sysctl_req *req);
309#if CONFIG_NETBOOT
310STATIC int sysctl_netboot(struct sysctl_oid *oidp, void *arg1, int arg2, struct sysctl_req *req);
311#endif
312#ifdef CONFIG_IMGSRC_ACCESS
313STATIC int sysctl_imgsrcdev(struct sysctl_oid *oidp, void *arg1, int arg2, struct sysctl_req *req);
314#endif
315STATIC int sysctl_usrstack(struct sysctl_oid *oidp, void *arg1, int arg2, struct sysctl_req *req);
316STATIC int sysctl_usrstack64(struct sysctl_oid *oidp, void *arg1, int arg2, struct sysctl_req *req);
317#if CONFIG_COREDUMP
318STATIC int sysctl_coredump(struct sysctl_oid *oidp, void *arg1, int arg2, struct sysctl_req *req);
319STATIC int sysctl_suid_coredump(struct sysctl_oid *oidp, void *arg1, int arg2, struct sysctl_req *req);
320#endif
321STATIC int sysctl_delayterm(struct sysctl_oid *oidp, void *arg1, int arg2, struct sysctl_req *req);
322STATIC int sysctl_rage_vnode(struct sysctl_oid *oidp, void *arg1, int arg2, struct sysctl_req *req);
323STATIC int sysctl_kern_check_openevt(struct sysctl_oid *oidp, void *arg1, int arg2, struct sysctl_req *req);
324#if DEVELOPMENT || DEBUG
325STATIC int sysctl_nx(struct sysctl_oid *oidp, void *arg1, int arg2, struct sysctl_req *req);
326#endif
327STATIC int sysctl_loadavg(struct sysctl_oid *oidp, void *arg1, int arg2, struct sysctl_req *req);
328STATIC int sysctl_vm_toggle_address_reuse(struct sysctl_oid *oidp, void *arg1, int arg2, struct sysctl_req *req);
329STATIC int sysctl_swapusage(struct sysctl_oid *oidp, void *arg1, int arg2, struct sysctl_req *req);
330STATIC int fetch_process_cputype( proc_t cur_proc, int *name, u_int namelen, cpu_type_t *cputype);
331STATIC int sysctl_sysctl_native(struct sysctl_oid *oidp, void *arg1, int arg2, struct sysctl_req *req);
332STATIC int sysctl_sysctl_cputype(struct sysctl_oid *oidp, void *arg1, int arg2, struct sysctl_req *req);
333STATIC int sysctl_safeboot(struct sysctl_oid *oidp, void *arg1, int arg2, struct sysctl_req *req);
334STATIC int sysctl_singleuser(struct sysctl_oid *oidp, void *arg1, int arg2, struct sysctl_req *req);
335STATIC int sysctl_minimalboot(struct sysctl_oid *oidp, void *arg1, int arg2, struct sysctl_req *req);
336STATIC int sysctl_slide(struct sysctl_oid *oidp, void *arg1, int arg2, struct sysctl_req *req);
337
338#ifdef CONFIG_XNUPOST
339#include <tests/xnupost.h>
340
341STATIC int sysctl_debug_test_oslog_ctl(struct sysctl_oid *oidp, void *arg1, int arg2, struct sysctl_req *req);
342STATIC int sysctl_debug_test_stackshot_mutex_owner(struct sysctl_oid *oidp, void *arg1, int arg2, struct sysctl_req *req);
343STATIC int sysctl_debug_test_stackshot_rwlck_owner(struct sysctl_oid *oidp, void *arg1, int arg2, struct sysctl_req *req);
344#endif
345
346extern void IORegistrySetOSBuildVersion(char * build_version);
347extern int IOParseWorkloadConfig(workload_config_ctx_t *ctx, const char * buffer, size_t size);
348extern int IOUnparseWorkloadConfig(char *buffer, size_t *size);
349
350STATIC void
351fill_loadavg64(struct loadavg *la, struct user64_loadavg *la64)
352{
353 la64->ldavg[0] = la->ldavg[0];
354 la64->ldavg[1] = la->ldavg[1];
355 la64->ldavg[2] = la->ldavg[2];
356 la64->fscale = (user64_long_t)la->fscale;
357}
358
359STATIC void
360fill_loadavg32(struct loadavg *la, struct user32_loadavg *la32)
361{
362 la32->ldavg[0] = la->ldavg[0];
363 la32->ldavg[1] = la->ldavg[1];
364 la32->ldavg[2] = la->ldavg[2];
365 la32->fscale = (user32_long_t)la->fscale;
366}
367
368#if COUNT_SYSCALLS
369extern int do_count_syscalls;
370#endif
371
372#ifdef INSECURE
373int securelevel = -1;
374#else
375int securelevel;
376#endif
377
378STATIC int
379sysctl_handle_kern_threadname( __unused struct sysctl_oid *oidp, __unused void *arg1,
380 __unused int arg2, struct sysctl_req *req)
381{
382 int error;
383 struct uthread *ut = current_uthread();
384 user_addr_t oldp = 0, newp = 0;
385 size_t *oldlenp = NULL;
386 size_t newlen = 0;
387
388 oldp = req->oldptr;
389 oldlenp = &(req->oldlen);
390 newp = req->newptr;
391 newlen = req->newlen;
392
393 /* We want the current length, and maybe the string itself */
394 if (oldlenp) {
395 /* if we have no thread name yet tell'em we want MAXTHREADNAMESIZE - 1 */
396 size_t currlen = MAXTHREADNAMESIZE - 1;
397
398 if (ut->pth_name) {
399 /* use length of current thread name */
400 currlen = strlen(s: ut->pth_name);
401 }
402 if (oldp) {
403 if (*oldlenp < currlen) {
404 return ENOMEM;
405 }
406 /* NOTE - we do not copy the NULL terminator */
407 if (ut->pth_name) {
408 error = copyout(ut->pth_name, oldp, currlen);
409 if (error) {
410 return error;
411 }
412 }
413 }
414 /* return length of thread name minus NULL terminator (just like strlen) */
415 req->oldidx = currlen;
416 }
417
418 /* We want to set the name to something */
419 if (newp) {
420 if (newlen > (MAXTHREADNAMESIZE - 1)) {
421 return ENAMETOOLONG;
422 }
423 if (!ut->pth_name) {
424 char *tmp_pth_name = (char *)kalloc_data(MAXTHREADNAMESIZE,
425 Z_WAITOK | Z_ZERO);
426 if (!tmp_pth_name) {
427 return ENOMEM;
428 }
429 if (!OSCompareAndSwapPtr(NULL, tmp_pth_name, &ut->pth_name)) {
430 kfree_data(tmp_pth_name, MAXTHREADNAMESIZE);
431 return EBUSY;
432 }
433 } else {
434 kernel_debug_string_simple(TRACE_STRING_THREADNAME_PREV, str: ut->pth_name);
435 bzero(s: ut->pth_name, MAXTHREADNAMESIZE);
436 }
437 error = copyin(newp, ut->pth_name, newlen);
438 if (error) {
439 return error;
440 }
441
442 kernel_debug_string_simple(TRACE_STRING_THREADNAME, str: ut->pth_name);
443 }
444
445 return 0;
446}
447
448SYSCTL_PROC(_kern, KERN_THREADNAME, threadname, CTLFLAG_ANYBODY | CTLTYPE_STRING | CTLFLAG_RW | CTLFLAG_LOCKED, 0, 0, sysctl_handle_kern_threadname, "A", "");
449
450#define WORKLOAD_CONFIG_MAX_SIZE (128 * 1024 * 1024)
451
452/* Called locked - sysctl defined without CTLFLAG_LOCKED. */
453static int
454sysctl_workload_config SYSCTL_HANDLER_ARGS
455{
456#pragma unused(arg1, arg2)
457
458 char *plist_blob = NULL;
459 kern_return_t ret = KERN_FAILURE;
460 int error = -1;
461
462 /* Only allow reading of workload config on non-RELEASE kernels. */
463#if DEVELOPMENT || DEBUG
464
465 const size_t buf_size = req->oldlen;
466
467 if (!req->oldptr) {
468 /* Just looking for the size to allocate. */
469 size_t size = 0;
470 ret = IOUnparseWorkloadConfig(NULL, &size);
471 if (ret != KERN_SUCCESS) {
472 return ENOMEM;
473 }
474
475 error = SYSCTL_OUT(req, NULL, size);
476 if (error) {
477 return error;
478 }
479 } else {
480 if (buf_size > (WORKLOAD_CONFIG_MAX_SIZE - 1) ||
481 buf_size == 0) {
482 return EINVAL;
483 }
484
485 plist_blob = kalloc_data(buf_size, Z_WAITOK | Z_ZERO);
486 if (!plist_blob) {
487 return ENOMEM;
488 }
489
490 size_t size = buf_size;
491 ret = IOUnparseWorkloadConfig(plist_blob, &size);
492 if (ret != KERN_SUCCESS) {
493 kfree_data(plist_blob, buf_size);
494 return ENOMEM;
495 }
496
497 error = SYSCTL_OUT(req, plist_blob, MIN(buf_size, size));
498
499 /* If the buffer was too small to fit the entire config. */
500 if (buf_size < size) {
501 error = ENOMEM;
502 }
503
504 kfree_data(plist_blob, buf_size);
505 if (error) {
506 return error;
507 }
508 }
509#endif /* DEVELOPMENT || DEBUG */
510
511 if (req->newptr) {
512 size_t newlen = req->newlen;
513 if (newlen > (WORKLOAD_CONFIG_MAX_SIZE - 1)) {
514 return EINVAL;
515 }
516
517
518 workload_config_ctx_t *ctx = NULL;
519 /*
520 * Only allow workload_config_boot to be loaded once at boot by launchd.
521 */
522 if (current_proc() == initproc &&
523 !workload_config_initialized(ctx: &workload_config_boot)) {
524 ctx = &workload_config_boot;
525 } else {
526#if DEVELOPMENT || DEBUG
527 /*
528 * Use the devel config context otherwise. If a devel config has been
529 * initialized it will be used for lookups in place of the boot config.
530 */
531 ctx = &workload_config_devel;
532 if (workload_config_initialized(ctx)) {
533 workload_config_free(ctx);
534 }
535
536 /* The devel context can be explicitly cleared by an empty string. */
537 if (newlen == 1) {
538 return 0;
539 }
540#else
541 return EINVAL;
542#endif
543 }
544
545 plist_blob = kalloc_data(newlen + 1, Z_WAITOK | Z_ZERO);
546 if (!plist_blob) {
547 return ENOMEM;
548 }
549 error = copyin(req->newptr, plist_blob, newlen);
550 if (error) {
551 kfree_data(plist_blob, newlen + 1);
552 return error;
553 }
554 plist_blob[newlen] = '\0';
555 ret = IOParseWorkloadConfig(ctx, buffer: plist_blob, size: newlen + 1);
556
557 kfree_data(plist_blob, newlen + 1);
558 return ret == KERN_SUCCESS ? 0 : EINVAL;
559 }
560
561 return 0;
562}
563
564SYSCTL_PROC(_kern, OID_AUTO, workload_config, CTLTYPE_STRING | CTLFLAG_RW | CTLFLAG_MASKED,
565 0, 0, sysctl_workload_config, "A", "global workgroup configuration plist load/unload");
566
567#define BSD_HOST 1
568STATIC int
569sysctl_sched_stats(__unused struct sysctl_oid *oidp, __unused void *arg1, __unused int arg2, struct sysctl_req *req)
570{
571 host_basic_info_data_t hinfo;
572 kern_return_t kret;
573 uint32_t size;
574 uint32_t buf_size = 0;
575 int changed;
576 mach_msg_type_number_t count = HOST_BASIC_INFO_COUNT;
577 struct _processor_statistics_np *buf;
578 int error;
579
580 kret = host_info(host: (host_t)BSD_HOST, HOST_BASIC_INFO, host_info_out: (host_info_t)&hinfo, host_info_outCnt: &count);
581 if (kret != KERN_SUCCESS) {
582 return EINVAL;
583 }
584
585 size = sizeof(struct _processor_statistics_np) * (hinfo.logical_cpu_max + 2); /* One for RT Queue, One for Fair Share Queue */
586
587 if (req->oldlen < size) {
588 return EINVAL;
589 }
590
591 buf_size = size;
592 buf = (struct _processor_statistics_np *)kalloc_data(buf_size, Z_ZERO | Z_WAITOK);
593
594 kret = get_sched_statistics(out: buf, count: &size);
595 if (kret != KERN_SUCCESS) {
596 error = EINVAL;
597 goto out;
598 }
599
600 error = sysctl_io_opaque(req, pValue: buf, valueSize: size, changed: &changed);
601 if (error) {
602 goto out;
603 }
604
605 if (changed) {
606 panic("Sched info changed?!");
607 }
608out:
609 kfree_data(buf, buf_size);
610 return error;
611}
612
613SYSCTL_PROC(_kern, OID_AUTO, sched_stats, CTLFLAG_LOCKED, 0, 0, sysctl_sched_stats, "-", "");
614
615STATIC int
616sysctl_sched_stats_enable(__unused struct sysctl_oid *oidp, __unused void *arg1, __unused int arg2, __unused struct sysctl_req *req)
617{
618 boolean_t active;
619 int res;
620
621 if (req->newlen != sizeof(active)) {
622 return EINVAL;
623 }
624
625 res = copyin(req->newptr, &active, sizeof(active));
626 if (res != 0) {
627 return res;
628 }
629
630 return set_sched_stats_active(active);
631}
632
633SYSCTL_PROC(_kern, OID_AUTO, sched_stats_enable, CTLFLAG_LOCKED | CTLFLAG_WR, 0, 0, sysctl_sched_stats_enable, "-", "");
634
635extern uint32_t sched_debug_flags;
636SYSCTL_INT(_debug, OID_AUTO, sched, CTLFLAG_RW | CTLFLAG_LOCKED, &sched_debug_flags, 0, "scheduler debug");
637
638#if (DEBUG || DEVELOPMENT)
639extern boolean_t doprnt_hide_pointers;
640SYSCTL_INT(_debug, OID_AUTO, hide_kernel_pointers, CTLFLAG_RW | CTLFLAG_LOCKED, &doprnt_hide_pointers, 0, "hide kernel pointers from log");
641#endif
642
643
644extern int get_kernel_symfile(proc_t, char **);
645
646#if COUNT_SYSCALLS
647#define KERN_COUNT_SYSCALLS (KERN_OSTYPE + 1000)
648
649extern const unsigned int nsysent;
650extern int syscalls_log[];
651extern const char *syscallnames[];
652
653STATIC int
654sysctl_docountsyscalls SYSCTL_HANDLER_ARGS
655{
656 __unused int cmd = oidp->oid_arg2; /* subcommand*/
657 __unused int *name = arg1; /* oid element argument vector */
658 __unused int namelen = arg2; /* number of oid element arguments */
659 int error, changed;
660
661 int tmp;
662
663 /* valid values passed in:
664 * = 0 means don't keep called counts for each bsd syscall
665 * > 0 means keep called counts for each bsd syscall
666 * = 2 means dump current counts to the system log
667 * = 3 means reset all counts
668 * for example, to dump current counts:
669 * sysctl -w kern.count_calls=2
670 */
671 error = sysctl_io_number(req, do_count_syscalls,
672 sizeof(do_count_syscalls), &tmp, &changed);
673
674 if (error != 0 || !changed) {
675 return error;
676 }
677
678 if (tmp == 1) {
679 do_count_syscalls = 1;
680 } else if (tmp == 0 || tmp == 2 || tmp == 3) {
681 for (int i = 0; i < nsysent; i++) {
682 if (syscalls_log[i] != 0) {
683 if (tmp == 2) {
684 printf("%d calls - name %s \n", syscalls_log[i], syscallnames[i]);
685 } else {
686 syscalls_log[i] = 0;
687 }
688 }
689 }
690 do_count_syscalls = (tmp != 0);
691 }
692
693 return error;
694}
695SYSCTL_PROC(_kern, KERN_COUNT_SYSCALLS, count_syscalls, CTLTYPE_NODE | CTLFLAG_RD | CTLFLAG_LOCKED,
696 0, /* Pointer argument (arg1) */
697 0, /* Integer argument (arg2) */
698 sysctl_docountsyscalls, /* Handler function */
699 NULL, /* Data pointer */
700 "");
701#endif /* COUNT_SYSCALLS */
702
703/*
704 * The following sysctl_* functions should not be used
705 * any more, as they can only cope with callers in
706 * user mode: Use new-style
707 * sysctl_io_number()
708 * sysctl_io_string()
709 * sysctl_io_opaque()
710 * instead.
711 */
712
713STATIC int
714sysdoproc_filt_KERN_PROC_PID(proc_t p, void * arg)
715{
716 if (proc_getpid(p) != (pid_t)*(int*)arg) {
717 return 0;
718 } else {
719 return 1;
720 }
721}
722
723STATIC int
724sysdoproc_filt_KERN_PROC_PGRP(proc_t p, void * arg)
725{
726 if (p->p_pgrpid != (pid_t)*(int*)arg) {
727 return 0;
728 } else {
729 return 1;
730 }
731}
732
733STATIC int
734sysdoproc_filt_KERN_PROC_TTY(proc_t p, void * arg)
735{
736 struct pgrp *pg;
737 dev_t dev = NODEV;
738
739 if ((p->p_flag & P_CONTROLT) && (pg = proc_pgrp(p, NULL)) != PGRP_NULL) {
740 dev = os_atomic_load(&pg->pg_session->s_ttydev, relaxed);
741 pgrp_rele(pgrp: pg);
742 }
743
744 return dev != NODEV && dev == (dev_t)*(int *)arg;
745}
746
747STATIC int
748sysdoproc_filt_KERN_PROC_UID(proc_t p, void * arg)
749{
750 uid_t uid;
751
752 smr_proc_task_enter();
753 uid = kauth_cred_getuid(cred: proc_ucred_smr(p));
754 smr_proc_task_leave();
755
756 if (uid != (uid_t)*(int*)arg) {
757 return 0;
758 } else {
759 return 1;
760 }
761}
762
763
764STATIC int
765sysdoproc_filt_KERN_PROC_RUID(proc_t p, void * arg)
766{
767 uid_t ruid;
768
769 smr_proc_task_enter();
770 ruid = kauth_cred_getruid(cred: proc_ucred_smr(p));
771 smr_proc_task_leave();
772
773 if (ruid != (uid_t)*(int*)arg) {
774 return 0;
775 } else {
776 return 1;
777 }
778}
779
780/*
781 * try over estimating by 5 procs
782 */
783#define KERN_PROCSLOP (5 * sizeof(struct kinfo_proc))
784struct sysdoproc_args {
785 size_t buflen;
786 void *kprocp;
787 boolean_t is_64_bit;
788 user_addr_t dp;
789 size_t needed;
790 unsigned int sizeof_kproc;
791 int *errorp;
792 int uidcheck;
793 int ruidcheck;
794 int ttycheck;
795 int uidval;
796};
797
798int
799sysdoproc_callback(proc_t p, void *arg)
800{
801 struct sysdoproc_args *args = arg;
802
803 if (args->buflen >= args->sizeof_kproc) {
804 if ((args->ruidcheck != 0) && (sysdoproc_filt_KERN_PROC_RUID(p, arg: &args->uidval) == 0)) {
805 return PROC_RETURNED;
806 }
807 if ((args->uidcheck != 0) && (sysdoproc_filt_KERN_PROC_UID(p, arg: &args->uidval) == 0)) {
808 return PROC_RETURNED;
809 }
810 if ((args->ttycheck != 0) && (sysdoproc_filt_KERN_PROC_TTY(p, arg: &args->uidval) == 0)) {
811 return PROC_RETURNED;
812 }
813
814 bzero(s: args->kprocp, n: args->sizeof_kproc);
815 if (args->is_64_bit) {
816 fill_user64_proc(p, args->kprocp);
817 } else {
818 fill_user32_proc(p, args->kprocp);
819 }
820 int error = copyout(args->kprocp, args->dp, args->sizeof_kproc);
821 if (error) {
822 *args->errorp = error;
823 return PROC_RETURNED_DONE;
824 }
825 args->dp += args->sizeof_kproc;
826 args->buflen -= args->sizeof_kproc;
827 }
828 args->needed += args->sizeof_kproc;
829 return PROC_RETURNED;
830}
831
832SYSCTL_NODE(_kern, KERN_PROC, proc, CTLFLAG_RD | CTLFLAG_LOCKED, 0, "");
833STATIC int
834sysctl_prochandle SYSCTL_HANDLER_ARGS
835{
836 int cmd = oidp->oid_arg2; /* subcommand for multiple nodes */
837 int *name = arg1; /* oid element argument vector */
838 int namelen = arg2; /* number of oid element arguments */
839 user_addr_t where = req->oldptr;/* user buffer copy out address */
840
841 user_addr_t dp = where;
842 size_t needed = 0;
843 size_t buflen = where != USER_ADDR_NULL ? req->oldlen : 0;
844 int error = 0;
845 boolean_t is_64_bit = proc_is64bit(current_proc());
846 struct user32_kinfo_proc user32_kproc;
847 struct user64_kinfo_proc user_kproc;
848 int sizeof_kproc;
849 void *kprocp;
850 int (*filterfn)(proc_t, void *) = 0;
851 struct sysdoproc_args args;
852 int uidcheck = 0;
853 int ruidcheck = 0;
854 int ttycheck = 0;
855
856 if (namelen != 1 && !(namelen == 0 && cmd == KERN_PROC_ALL)) {
857 return EINVAL;
858 }
859
860 if (is_64_bit) {
861 sizeof_kproc = sizeof(user_kproc);
862 kprocp = &user_kproc;
863 } else {
864 sizeof_kproc = sizeof(user32_kproc);
865 kprocp = &user32_kproc;
866 }
867
868 switch (cmd) {
869 case KERN_PROC_PID:
870 filterfn = sysdoproc_filt_KERN_PROC_PID;
871 break;
872
873 case KERN_PROC_PGRP:
874 filterfn = sysdoproc_filt_KERN_PROC_PGRP;
875 break;
876
877 case KERN_PROC_TTY:
878 ttycheck = 1;
879 break;
880
881 case KERN_PROC_UID:
882 uidcheck = 1;
883 break;
884
885 case KERN_PROC_RUID:
886 ruidcheck = 1;
887 break;
888
889 case KERN_PROC_ALL:
890 break;
891
892 default:
893 /* must be kern.proc.<unknown> */
894 return ENOTSUP;
895 }
896
897 error = 0;
898 args.buflen = buflen;
899 args.kprocp = kprocp;
900 args.is_64_bit = is_64_bit;
901 args.dp = dp;
902 args.needed = needed;
903 args.errorp = &error;
904 args.uidcheck = uidcheck;
905 args.ruidcheck = ruidcheck;
906 args.ttycheck = ttycheck;
907 args.sizeof_kproc = sizeof_kproc;
908 if (namelen) {
909 args.uidval = name[0];
910 }
911
912 proc_iterate(flags: (PROC_ALLPROCLIST | PROC_ZOMBPROCLIST),
913 callout: sysdoproc_callback, arg: &args, filterfn, filterarg: name);
914
915 if (error) {
916 return error;
917 }
918
919 dp = args.dp;
920 needed = args.needed;
921
922 if (where != USER_ADDR_NULL) {
923 req->oldlen = dp - where;
924 if (needed > req->oldlen) {
925 return ENOMEM;
926 }
927 } else {
928 needed += KERN_PROCSLOP;
929 req->oldlen = needed;
930 }
931 /* adjust index so we return the right required/consumed amount */
932 req->oldidx += req->oldlen;
933 return 0;
934}
935
936
937/*
938 * We specify the subcommand code for multiple nodes as the 'req->arg2' value
939 * in the sysctl declaration itself, which comes into the handler function
940 * as 'oidp->oid_arg2'.
941 *
942 * For these particular sysctls, since they have well known OIDs, we could
943 * have just obtained it from the '((int *)arg1)[0]' parameter, but that would
944 * not demonstrate how to handle multiple sysctls that used OID_AUTO instead
945 * of a well known value with a common handler function. This is desirable,
946 * because we want well known values to "go away" at some future date.
947 *
948 * It should be noted that the value of '((int *)arg1)[1]' is used for many
949 * an integer parameter to the subcommand for many of these sysctls; we'd
950 * rather have used '((int *)arg1)[0]' for that, or even better, an element
951 * in a structure passed in as the the 'newp' argument to sysctlbyname(3),
952 * and then use leaf-node permissions enforcement, but that would have
953 * necessitated modifying user space code to correspond to the interface
954 * change, and we are striving for binary backward compatibility here; even
955 * though these are SPI, and not intended for use by user space applications
956 * which are not themselves system tools or libraries, some applications
957 * have erroneously used them.
958 */
959SYSCTL_PROC(_kern_proc, KERN_PROC_ALL, all, CTLTYPE_NODE | CTLFLAG_RD | CTLFLAG_LOCKED,
960 0, /* Pointer argument (arg1) */
961 KERN_PROC_ALL, /* Integer argument (arg2) */
962 sysctl_prochandle, /* Handler function */
963 NULL, /* Data is size variant on ILP32/LP64 */
964 "");
965SYSCTL_PROC(_kern_proc, KERN_PROC_PID, pid, CTLTYPE_NODE | CTLFLAG_RD | CTLFLAG_LOCKED,
966 0, /* Pointer argument (arg1) */
967 KERN_PROC_PID, /* Integer argument (arg2) */
968 sysctl_prochandle, /* Handler function */
969 NULL, /* Data is size variant on ILP32/LP64 */
970 "");
971SYSCTL_PROC(_kern_proc, KERN_PROC_TTY, tty, CTLTYPE_NODE | CTLFLAG_RD | CTLFLAG_LOCKED,
972 0, /* Pointer argument (arg1) */
973 KERN_PROC_TTY, /* Integer argument (arg2) */
974 sysctl_prochandle, /* Handler function */
975 NULL, /* Data is size variant on ILP32/LP64 */
976 "");
977SYSCTL_PROC(_kern_proc, KERN_PROC_PGRP, pgrp, CTLTYPE_NODE | CTLFLAG_RD | CTLFLAG_LOCKED,
978 0, /* Pointer argument (arg1) */
979 KERN_PROC_PGRP, /* Integer argument (arg2) */
980 sysctl_prochandle, /* Handler function */
981 NULL, /* Data is size variant on ILP32/LP64 */
982 "");
983SYSCTL_PROC(_kern_proc, KERN_PROC_UID, uid, CTLTYPE_NODE | CTLFLAG_RD | CTLFLAG_LOCKED,
984 0, /* Pointer argument (arg1) */
985 KERN_PROC_UID, /* Integer argument (arg2) */
986 sysctl_prochandle, /* Handler function */
987 NULL, /* Data is size variant on ILP32/LP64 */
988 "");
989SYSCTL_PROC(_kern_proc, KERN_PROC_RUID, ruid, CTLTYPE_NODE | CTLFLAG_RD | CTLFLAG_LOCKED,
990 0, /* Pointer argument (arg1) */
991 KERN_PROC_RUID, /* Integer argument (arg2) */
992 sysctl_prochandle, /* Handler function */
993 NULL, /* Data is size variant on ILP32/LP64 */
994 "");
995SYSCTL_PROC(_kern_proc, KERN_PROC_LCID, lcid, CTLTYPE_NODE | CTLFLAG_RD | CTLFLAG_LOCKED,
996 0, /* Pointer argument (arg1) */
997 KERN_PROC_LCID, /* Integer argument (arg2) */
998 sysctl_prochandle, /* Handler function */
999 NULL, /* Data is size variant on ILP32/LP64 */
1000 "");
1001
1002
1003/*
1004 * Fill in non-zero fields of an eproc structure for the specified process.
1005 */
1006STATIC void
1007fill_user32_eproc(proc_t p, struct user32_eproc *__restrict ep)
1008{
1009 struct pgrp *pg;
1010 struct session *sessp;
1011 kauth_cred_t my_cred;
1012
1013 pg = proc_pgrp(p, &sessp);
1014
1015 if (pg != PGRP_NULL) {
1016 ep->e_pgid = p->p_pgrpid;
1017 ep->e_jobc = pg->pg_jobc;
1018 if (sessp->s_ttyvp) {
1019 ep->e_flag = EPROC_CTTY;
1020 }
1021 }
1022
1023 ep->e_ppid = p->p_ppid;
1024
1025 smr_proc_task_enter();
1026 my_cred = proc_ucred_smr(p);
1027
1028 /* A fake historical pcred */
1029 ep->e_pcred.p_ruid = kauth_cred_getruid(cred: my_cred);
1030 ep->e_pcred.p_svuid = kauth_cred_getsvuid(cred: my_cred);
1031 ep->e_pcred.p_rgid = kauth_cred_getrgid(cred: my_cred);
1032 ep->e_pcred.p_svgid = kauth_cred_getsvgid(cred: my_cred);
1033
1034 /* A fake historical *kauth_cred_t */
1035 unsigned long refcnt = os_atomic_load(&my_cred->cr_ref, relaxed);
1036 ep->e_ucred.cr_ref = (uint32_t)MIN(refcnt, UINT32_MAX);
1037 ep->e_ucred.cr_uid = kauth_cred_getuid(cred: my_cred);
1038 ep->e_ucred.cr_ngroups = (short)posix_cred_get(cred: my_cred)->cr_ngroups;
1039 bcopy(src: posix_cred_get(cred: my_cred)->cr_groups,
1040 dst: ep->e_ucred.cr_groups, NGROUPS * sizeof(gid_t));
1041
1042 my_cred = NOCRED;
1043 smr_proc_task_leave();
1044
1045 ep->e_tdev = NODEV;
1046 if (pg != PGRP_NULL) {
1047 if (p->p_flag & P_CONTROLT) {
1048 session_lock(sess: sessp);
1049 ep->e_tdev = os_atomic_load(&sessp->s_ttydev, relaxed);
1050 ep->e_tpgid = sessp->s_ttypgrpid;
1051 session_unlock(sess: sessp);
1052 }
1053 if (SESS_LEADER(p, sessp)) {
1054 ep->e_flag |= EPROC_SLEADER;
1055 }
1056 pgrp_rele(pgrp: pg);
1057 }
1058}
1059
1060/*
1061 * Fill in non-zero fields of an LP64 eproc structure for the specified process.
1062 */
1063STATIC void
1064fill_user64_eproc(proc_t p, struct user64_eproc *__restrict ep)
1065{
1066 struct pgrp *pg;
1067 struct session *sessp;
1068 kauth_cred_t my_cred;
1069
1070 pg = proc_pgrp(p, &sessp);
1071
1072 if (pg != PGRP_NULL) {
1073 ep->e_pgid = p->p_pgrpid;
1074 ep->e_jobc = pg->pg_jobc;
1075 if (sessp->s_ttyvp) {
1076 ep->e_flag = EPROC_CTTY;
1077 }
1078 }
1079
1080 ep->e_ppid = p->p_ppid;
1081
1082 smr_proc_task_enter();
1083 my_cred = proc_ucred_smr(p);
1084
1085 /* A fake historical pcred */
1086 ep->e_pcred.p_ruid = kauth_cred_getruid(cred: my_cred);
1087 ep->e_pcred.p_svuid = kauth_cred_getsvuid(cred: my_cred);
1088 ep->e_pcred.p_rgid = kauth_cred_getrgid(cred: my_cred);
1089 ep->e_pcred.p_svgid = kauth_cred_getsvgid(cred: my_cred);
1090
1091 /* A fake historical *kauth_cred_t */
1092 unsigned long refcnt = os_atomic_load(&my_cred->cr_ref, relaxed);
1093 ep->e_ucred.cr_ref = (uint32_t)MIN(refcnt, UINT32_MAX);
1094 ep->e_ucred.cr_uid = kauth_cred_getuid(cred: my_cred);
1095 ep->e_ucred.cr_ngroups = (short)posix_cred_get(cred: my_cred)->cr_ngroups;
1096 bcopy(src: posix_cred_get(cred: my_cred)->cr_groups,
1097 dst: ep->e_ucred.cr_groups, NGROUPS * sizeof(gid_t));
1098
1099 my_cred = NOCRED;
1100 smr_proc_task_leave();
1101
1102 ep->e_tdev = NODEV;
1103 if (pg != PGRP_NULL) {
1104 if (p->p_flag & P_CONTROLT) {
1105 session_lock(sess: sessp);
1106 ep->e_tdev = os_atomic_load(&sessp->s_ttydev, relaxed);
1107 ep->e_tpgid = sessp->s_ttypgrpid;
1108 session_unlock(sess: sessp);
1109 }
1110 if (SESS_LEADER(p, sessp)) {
1111 ep->e_flag |= EPROC_SLEADER;
1112 }
1113 pgrp_rele(pgrp: pg);
1114 }
1115}
1116
1117/*
1118 * Fill in an eproc structure for the specified process.
1119 * bzeroed by our caller, so only set non-zero fields.
1120 */
1121STATIC void
1122fill_user32_externproc(proc_t p, struct user32_extern_proc *__restrict exp)
1123{
1124 exp->p_starttime.tv_sec = (user32_time_t)p->p_start.tv_sec;
1125 exp->p_starttime.tv_usec = p->p_start.tv_usec;
1126 exp->p_flag = p->p_flag;
1127#if DEVELOPMENT || DEBUG
1128 if (p->p_lflag & P_LTRACED && !bootarg_hide_process_traced) {
1129#else
1130 if (p->p_lflag & P_LTRACED) {
1131#endif
1132 exp->p_flag |= P_TRACED;
1133 }
1134 if (p->p_lflag & P_LPPWAIT) {
1135 exp->p_flag |= P_PPWAIT;
1136 }
1137 if (p->p_lflag & P_LEXIT) {
1138 exp->p_flag |= P_WEXIT;
1139 }
1140 exp->p_stat = p->p_stat;
1141 exp->p_pid = proc_getpid(p);
1142#if DEVELOPMENT || DEBUG
1143 if (bootarg_hide_process_traced) {
1144 exp->p_oppid = 0;
1145 } else
1146#endif
1147 {
1148 exp->p_oppid = p->p_oppid;
1149 }
1150 /* Mach related */
1151 exp->p_debugger = p->p_debugger;
1152 exp->sigwait = p->sigwait;
1153 /* scheduling */
1154#ifdef _PROC_HAS_SCHEDINFO_
1155 exp->p_estcpu = p->p_estcpu;
1156 exp->p_pctcpu = p->p_pctcpu;
1157 exp->p_slptime = p->p_slptime;
1158#endif
1159 exp->p_realtimer.it_interval.tv_sec =
1160 (user32_time_t)p->p_realtimer.it_interval.tv_sec;
1161 exp->p_realtimer.it_interval.tv_usec =
1162 (__int32_t)p->p_realtimer.it_interval.tv_usec;
1163
1164 exp->p_realtimer.it_value.tv_sec =
1165 (user32_time_t)p->p_realtimer.it_value.tv_sec;
1166 exp->p_realtimer.it_value.tv_usec =
1167 (__int32_t)p->p_realtimer.it_value.tv_usec;
1168
1169 exp->p_rtime.tv_sec = (user32_time_t)p->p_rtime.tv_sec;
1170 exp->p_rtime.tv_usec = (__int32_t)p->p_rtime.tv_usec;
1171
1172 exp->p_sigignore = p->p_sigignore;
1173 exp->p_sigcatch = p->p_sigcatch;
1174 exp->p_priority = p->p_priority;
1175 exp->p_nice = p->p_nice;
1176 bcopy(src: &p->p_comm, dst: &exp->p_comm, MAXCOMLEN);
1177 exp->p_xstat = (u_short)MIN(p->p_xstat, USHRT_MAX);
1178 exp->p_acflag = p->p_acflag;
1179}
1180
1181/*
1182 * Fill in an LP64 version of extern_proc structure for the specified process.
1183 */
1184STATIC void
1185fill_user64_externproc(proc_t p, struct user64_extern_proc *__restrict exp)
1186{
1187 exp->p_starttime.tv_sec = p->p_start.tv_sec;
1188 exp->p_starttime.tv_usec = p->p_start.tv_usec;
1189 exp->p_flag = p->p_flag;
1190#if DEVELOPMENT || DEBUG
1191 if (p->p_lflag & P_LTRACED && !bootarg_hide_process_traced) {
1192#else
1193 if (p->p_lflag & P_LTRACED) {
1194#endif
1195 exp->p_flag |= P_TRACED;
1196 }
1197 if (p->p_lflag & P_LPPWAIT) {
1198 exp->p_flag |= P_PPWAIT;
1199 }
1200 if (p->p_lflag & P_LEXIT) {
1201 exp->p_flag |= P_WEXIT;
1202 }
1203 exp->p_stat = p->p_stat;
1204 exp->p_pid = proc_getpid(p);
1205#if DEVELOPMENT || DEBUG
1206 if (bootarg_hide_process_traced) {
1207 exp->p_oppid = 0;
1208 } else
1209#endif
1210 {
1211 exp->p_oppid = p->p_oppid;
1212 }
1213 /* Mach related */
1214 exp->p_debugger = p->p_debugger;
1215 exp->sigwait = p->sigwait;
1216 /* scheduling */
1217#ifdef _PROC_HAS_SCHEDINFO_
1218 exp->p_estcpu = p->p_estcpu;
1219 exp->p_pctcpu = p->p_pctcpu;
1220 exp->p_slptime = p->p_slptime;
1221#endif
1222 exp->p_realtimer.it_interval.tv_sec = p->p_realtimer.it_interval.tv_sec;
1223 exp->p_realtimer.it_interval.tv_usec = p->p_realtimer.it_interval.tv_usec;
1224
1225 exp->p_realtimer.it_value.tv_sec = p->p_realtimer.it_value.tv_sec;
1226 exp->p_realtimer.it_value.tv_usec = p->p_realtimer.it_value.tv_usec;
1227
1228 exp->p_rtime.tv_sec = p->p_rtime.tv_sec;
1229 exp->p_rtime.tv_usec = p->p_rtime.tv_usec;
1230
1231 exp->p_sigignore = p->p_sigignore;
1232 exp->p_sigcatch = p->p_sigcatch;
1233 exp->p_priority = p->p_priority;
1234 exp->p_nice = p->p_nice;
1235 bcopy(src: &p->p_comm, dst: &exp->p_comm, MAXCOMLEN);
1236 exp->p_xstat = (u_short)MIN(p->p_xstat, USHRT_MAX);
1237 exp->p_acflag = p->p_acflag;
1238}
1239
1240STATIC void
1241fill_user32_proc(proc_t p, struct user32_kinfo_proc *__restrict kp)
1242{
1243 /* on a 64 bit kernel, 32 bit users get some truncated information */
1244 fill_user32_externproc(p, exp: &kp->kp_proc);
1245 fill_user32_eproc(p, ep: &kp->kp_eproc);
1246}
1247
1248STATIC void
1249fill_user64_proc(proc_t p, struct user64_kinfo_proc *__restrict kp)
1250{
1251 fill_user64_externproc(p, exp: &kp->kp_proc);
1252 fill_user64_eproc(p, ep: &kp->kp_eproc);
1253}
1254
1255#if defined(XNU_TARGET_OS_OSX)
1256/*
1257 * Return the top *sizep bytes of the user stack, or the entire area of the
1258 * user stack down through the saved exec_path, whichever is smaller.
1259 */
1260STATIC int
1261sysctl_doprocargs SYSCTL_HANDLER_ARGS
1262{
1263 __unused int cmd = oidp->oid_arg2; /* subcommand*/
1264 int *name = arg1; /* oid element argument vector */
1265 int namelen = arg2; /* number of oid element arguments */
1266 user_addr_t oldp = req->oldptr; /* user buffer copy out address */
1267 size_t *oldlenp = &req->oldlen; /* user buffer copy out size */
1268// user_addr_t newp = req->newptr; /* user buffer copy in address */
1269// size_t newlen = req->newlen; /* user buffer copy in size */
1270 int error;
1271
1272 error = sysctl_procargsx( name, namelen, where: oldp, sizep: oldlenp, cur_proc: current_proc(), argc_yes: 0);
1273
1274 /* adjust index so we return the right required/consumed amount */
1275 if (!error) {
1276 req->oldidx += req->oldlen;
1277 }
1278
1279 return error;
1280}
1281SYSCTL_PROC(_kern, KERN_PROCARGS, procargs, CTLTYPE_NODE | CTLFLAG_RD | CTLFLAG_LOCKED,
1282 0, /* Pointer argument (arg1) */
1283 0, /* Integer argument (arg2) */
1284 sysctl_doprocargs, /* Handler function */
1285 NULL, /* Data pointer */
1286 "");
1287#endif /* defined(XNU_TARGET_OS_OSX) */
1288
1289STATIC int
1290sysctl_doprocargs2 SYSCTL_HANDLER_ARGS
1291{
1292 __unused int cmd = oidp->oid_arg2; /* subcommand*/
1293 int *name = arg1; /* oid element argument vector */
1294 int namelen = arg2; /* number of oid element arguments */
1295 user_addr_t oldp = req->oldptr; /* user buffer copy out address */
1296 size_t *oldlenp = &req->oldlen; /* user buffer copy out size */
1297// user_addr_t newp = req->newptr; /* user buffer copy in address */
1298// size_t newlen = req->newlen; /* user buffer copy in size */
1299 int error;
1300
1301 error = sysctl_procargsx( name, namelen, where: oldp, sizep: oldlenp, cur_proc: current_proc(), argc_yes: 1);
1302
1303 /* adjust index so we return the right required/consumed amount */
1304 if (!error) {
1305 req->oldidx += req->oldlen;
1306 }
1307
1308 return error;
1309}
1310SYSCTL_PROC(_kern, KERN_PROCARGS2, procargs2, CTLTYPE_NODE | CTLFLAG_RD | CTLFLAG_LOCKED,
1311 0, /* Pointer argument (arg1) */
1312 0, /* Integer argument (arg2) */
1313 sysctl_doprocargs2, /* Handler function */
1314 NULL, /* Data pointer */
1315 "");
1316
1317#define SYSCTL_PROCARGS_READ_ENVVARS_ENTITLEMENT "com.apple.private.read-environment-variables"
1318STATIC int
1319sysctl_procargsx(int *name, u_int namelen, user_addr_t where,
1320 size_t *sizep, proc_t cur_proc, int argc_yes)
1321{
1322 assert(sizep != NULL);
1323 proc_t p = NULL;
1324 size_t buflen = where != USER_ADDR_NULL ? *sizep : 0;
1325 int error = 0;
1326 struct _vm_map *proc_map = NULL;
1327 struct task * task;
1328 vm_map_copy_t tmp = NULL;
1329 user_addr_t arg_addr;
1330 size_t arg_size;
1331 caddr_t data;
1332 size_t argslen = 0;
1333 size_t size = 0;
1334 vm_offset_t copy_start = 0, copy_end;
1335 vm_offset_t smallbuffer_start;
1336 kern_return_t ret;
1337 int pid;
1338 uid_t uid;
1339 int argc = -1;
1340 size_t argvsize;
1341 size_t remaining;
1342 size_t current_arg_index;
1343 size_t current_arg_len;
1344 const char * current_arg;
1345 bool omit_env_vars = true;
1346 user_addr_t user_stack;
1347 vm_map_offset_t effective_page_mask;
1348
1349 if (namelen < 1) {
1350 error = EINVAL;
1351 goto finish;
1352 }
1353
1354 if (argc_yes) {
1355 buflen -= sizeof(int); /* reserve first word to return argc */
1356 }
1357 /* we only care about buflen when where (oldp from sysctl) is not NULL. */
1358 /* when where (oldp from sysctl) is NULL and sizep (oldlenp from sysctl */
1359 /* is not NULL then the caller wants us to return the length needed to */
1360 /* hold the data we would return */
1361 if (where != USER_ADDR_NULL && (buflen <= 0 || buflen > ARG_MAX)) {
1362 error = EINVAL;
1363 goto finish;
1364 }
1365
1366 /*
1367 * Lookup process by pid
1368 */
1369 pid = name[0];
1370 p = proc_find(pid);
1371 if (p == NULL) {
1372 error = EINVAL;
1373 goto finish;
1374 }
1375
1376 /* Allow reading environment variables if any of the following are true:
1377 * - kernel is DEVELOPMENT || DEBUG
1378 * - target process is same as current_proc()
1379 * - target process is not cs_restricted
1380 * - SIP is off
1381 * - caller has an entitlement
1382 */
1383
1384#if DEVELOPMENT || DEBUG
1385 omit_env_vars = false;
1386#endif
1387 if (p == current_proc() ||
1388 !cs_restricted(p) ||
1389#if CONFIG_CSR
1390 csr_check(CSR_ALLOW_UNRESTRICTED_DTRACE) == 0 ||
1391#endif
1392 IOCurrentTaskHasEntitlement(SYSCTL_PROCARGS_READ_ENVVARS_ENTITLEMENT)
1393 ) {
1394 omit_env_vars = false;
1395 }
1396
1397 /*
1398 * Copy the top N bytes of the stack.
1399 * On all machines we have so far, the stack grows
1400 * downwards.
1401 *
1402 * If the user expects no more than N bytes of
1403 * argument list, use that as a guess for the
1404 * size.
1405 */
1406
1407 if (!p->user_stack) {
1408 error = EINVAL;
1409 goto finish;
1410 }
1411
1412 /* save off argc, argslen, user_stack before releasing the proc */
1413 argc = p->p_argc;
1414 argslen = p->p_argslen;
1415 user_stack = p->user_stack;
1416
1417 /*
1418 * When these sysctls were introduced, the first string in the strings
1419 * section was just the bare path of the executable. However, for security
1420 * reasons we now prefix this string with executable_path= so it can be
1421 * parsed getenv style. To avoid binary compatability issues with exising
1422 * callers of this sysctl, we strip it off here.
1423 * (rdar://problem/13746466)
1424 */
1425#define EXECUTABLE_KEY "executable_path="
1426 argslen -= strlen(EXECUTABLE_KEY);
1427
1428 if (where == USER_ADDR_NULL && !omit_env_vars) {
1429 /* caller only wants to know length of proc args data.
1430 * If we don't need to omit environment variables, we can skip
1431 * copying the target process stack */
1432 goto calculate_size;
1433 }
1434
1435 smr_proc_task_enter();
1436 uid = kauth_cred_getuid(cred: proc_ucred_smr(p));
1437 smr_proc_task_leave();
1438
1439 if ((uid != kauth_cred_getuid(cred: kauth_cred_get()))
1440 && suser(cred: kauth_cred_get(), acflag: &cur_proc->p_acflag)) {
1441 error = EINVAL;
1442 goto finish;
1443 }
1444
1445 /*
1446 * Before we can block (any VM code), make another
1447 * reference to the map to keep it alive. We do
1448 * that by getting a reference on the task itself.
1449 */
1450 task = proc_task(p);
1451 if (task == NULL) {
1452 error = EINVAL;
1453 goto finish;
1454 }
1455
1456 /*
1457 * Once we have a task reference we can convert that into a
1458 * map reference, which we will use in the calls below. The
1459 * task/process may change its map after we take this reference
1460 * (see execve), but the worst that will happen then is a return
1461 * of stale info (which is always a possibility).
1462 */
1463 task_reference(task);
1464 proc_rele(p);
1465 p = NULL;
1466 proc_map = get_task_map_reference(task);
1467 task_deallocate(task);
1468
1469 if (proc_map == NULL) {
1470 error = EINVAL;
1471 goto finish;
1472 }
1473
1474 effective_page_mask = vm_map_page_mask(map: proc_map);
1475
1476 arg_size = vm_map_round_page(argslen, effective_page_mask);
1477
1478 arg_addr = user_stack - arg_size;
1479
1480 ret = kmem_alloc(map: kernel_map, addrp: &copy_start, size: arg_size,
1481 flags: KMA_DATA | KMA_ZERO, VM_KERN_MEMORY_BSD);
1482 if (ret != KERN_SUCCESS) {
1483 error = ENOMEM;
1484 goto finish;
1485 }
1486
1487 copy_end = copy_start + arg_size;
1488
1489 if (vm_map_copyin(src_map: proc_map, src_addr: (vm_map_address_t)arg_addr,
1490 len: (vm_map_size_t)arg_size, FALSE, copy_result: &tmp) != KERN_SUCCESS) {
1491 error = EIO;
1492 goto finish;
1493 }
1494
1495 /*
1496 * Now that we've done the copyin from the process'
1497 * map, we can release the reference to it.
1498 */
1499 vm_map_deallocate(map: proc_map);
1500 proc_map = NULL;
1501
1502 if (vm_map_copy_overwrite(dst_map: kernel_map,
1503 dst_addr: (vm_map_address_t)copy_start,
1504 copy: tmp, copy_size: (vm_map_size_t) arg_size, FALSE) != KERN_SUCCESS) {
1505 error = EIO;
1506 goto finish;
1507 }
1508 /* tmp was consumed */
1509 tmp = NULL;
1510
1511 if (omit_env_vars) {
1512 argvsize = 0;
1513
1514 /* Iterate over everything in argv, plus one for the bare executable path */
1515 for (current_arg_index = 0; current_arg_index < argc + 1 && argvsize < argslen; ++current_arg_index) {
1516 current_arg = (const char *)(copy_end - argslen) + argvsize;
1517 remaining = argslen - argvsize;
1518 current_arg_len = strnlen(s: current_arg, n: remaining);
1519 if (current_arg_len < remaining) {
1520 /* We have space for the null terminator */
1521 current_arg_len += 1;
1522
1523 if (current_arg_index == 0) {
1524 /* The bare executable path may have multiple null bytes after it for alignment */
1525 while (current_arg_len < remaining && current_arg[current_arg_len] == 0) {
1526 current_arg_len += 1;
1527 }
1528 }
1529 }
1530 argvsize += current_arg_len;
1531 }
1532 assert(argvsize <= argslen);
1533
1534 /* Adjust argslen and copy_end to make the copyout range extend to the end of argv */
1535 copy_end = copy_end - argslen + argvsize;
1536 argslen = argvsize;
1537 }
1538
1539 if (where == USER_ADDR_NULL) {
1540 /* Skip copyout */
1541 goto calculate_size;
1542 }
1543
1544 if (buflen >= argslen) {
1545 data = (caddr_t) (copy_end - argslen);
1546 size = argslen;
1547 } else {
1548 /*
1549 * Before rdar://25397314, this function contained incorrect logic when buflen is less
1550 * than argslen. The problem was that it copied in `buflen` bytes from the end of the target
1551 * process user stack into the beginning of a buffer of size round_page(buflen), and then
1552 * copied out `buflen` bytes from the end of this buffer. The effect of this was that
1553 * the caller of this sysctl would get zeros at the end of their buffer.
1554 *
1555 * To preserve this behavior, bzero everything from copy_end-round_page(buflen)+buflen to the
1556 * end of the buffer. This emulates copying in only `buflen` bytes.
1557 *
1558 *
1559 * In the old code:
1560 *
1561 * copy_start .... size: round_page(buflen) .... copy_end
1562 * [---copied in data (size: buflen)---|--- zeros ----------]
1563 * ^
1564 * data = copy_end - buflen
1565 *
1566 *
1567 * In the new code:
1568 * copy_start .... size: round_page(p->argslen) .... full copy_end
1569 * ^ ....................... p->argslen ...............................^
1570 * ^ ^ truncated copy_end ^
1571 * ^ ^ ^ ^
1572 * ^ ................ argslen ........................ ^
1573 * ^ ^ ^ ^
1574 * [-------copied in data (size: round_page(p->argslen))-------:----env vars---]
1575 * ^ ^
1576 * ^ data = copy_end - buflen
1577 * smallbuffer_start = max(copy_end - round_page(buflen), copy_start)
1578 *
1579 *
1580 * Full copy_end: copy_end calculated from copy_start + round_page(p->argslen)
1581 * Truncated copy_end: copy_end after truncation to remove environment variables.
1582 *
1583 * If environment variables were omitted, then we use the truncated copy_end, otherwise
1584 * we use full copy_end.
1585 *
1586 * smallbuffer_start: represents where copy_start would be in the old code.
1587 * data: The beginning of the region we copyout
1588 */
1589 smallbuffer_start = copy_end - vm_map_round_page(buflen, effective_page_mask);
1590 if (smallbuffer_start < copy_start) {
1591 smallbuffer_start = copy_start;
1592 }
1593 bzero(s: (void *)(smallbuffer_start + buflen), n: copy_end - (smallbuffer_start + buflen));
1594 data = (caddr_t) (copy_end - buflen);
1595 size = buflen;
1596 }
1597
1598 if (argc_yes) {
1599 /* Put processes argc as the first word in the copyout buffer */
1600 suword(addr: where, word: argc);
1601 error = copyout(data, (where + sizeof(int)), size);
1602 size += sizeof(int);
1603 } else {
1604 error = copyout(data, where, size);
1605
1606 /*
1607 * Make the old PROCARGS work to return the executable's path
1608 * But, only if there is enough space in the provided buffer
1609 *
1610 * on entry: data [possibily] points to the beginning of the path
1611 *
1612 * Note: we keep all pointers&sizes aligned to word boundries
1613 */
1614 if ((!error) && (buflen > 0 && (u_int)buflen > size)) {
1615 int binPath_sz, alignedBinPath_sz = 0;
1616 int extraSpaceNeeded, addThis;
1617 user_addr_t placeHere;
1618 char * str = (char *) data;
1619 size_t max_len = size;
1620
1621 /* Some apps are really bad about messing up their stacks
1622 * So, we have to be extra careful about getting the length
1623 * of the executing binary. If we encounter an error, we bail.
1624 */
1625
1626 /* Limit ourselves to PATH_MAX paths */
1627 if (max_len > PATH_MAX) {
1628 max_len = PATH_MAX;
1629 }
1630
1631 binPath_sz = 0;
1632
1633 while ((binPath_sz < max_len - 1) && (*str++ != 0)) {
1634 binPath_sz++;
1635 }
1636
1637 /* If we have a NUL terminator, copy it, too */
1638 if (binPath_sz < max_len - 1) {
1639 binPath_sz += 1;
1640 }
1641
1642 /* Pre-Flight the space requiremnts */
1643
1644 /* Account for the padding that fills out binPath to the next word */
1645 alignedBinPath_sz += (binPath_sz & (sizeof(int) - 1)) ? (sizeof(int) - (binPath_sz & (sizeof(int) - 1))) : 0;
1646
1647 placeHere = where + size;
1648
1649 /* Account for the bytes needed to keep placeHere word aligned */
1650 addThis = (placeHere & (sizeof(int) - 1)) ? (sizeof(int) - (placeHere & (sizeof(int) - 1))) : 0;
1651
1652 /* Add up all the space that is needed */
1653 extraSpaceNeeded = alignedBinPath_sz + addThis + binPath_sz + (4 * sizeof(int));
1654
1655 /* is there is room to tack on argv[0]? */
1656 if ((buflen & ~(sizeof(int) - 1)) >= (size + extraSpaceNeeded)) {
1657 placeHere += addThis;
1658 suword(addr: placeHere, word: 0);
1659 placeHere += sizeof(int);
1660 suword(addr: placeHere, word: 0xBFFF0000);
1661 placeHere += sizeof(int);
1662 suword(addr: placeHere, word: 0);
1663 placeHere += sizeof(int);
1664 error = copyout(data, placeHere, binPath_sz);
1665 if (!error) {
1666 placeHere += binPath_sz;
1667 suword(addr: placeHere, word: 0);
1668 size += extraSpaceNeeded;
1669 }
1670 }
1671 }
1672 }
1673
1674calculate_size:
1675 /* Size has already been calculated for the where != NULL case */
1676 if (where == USER_ADDR_NULL) {
1677 size = argslen;
1678 if (argc_yes) {
1679 size += sizeof(int);
1680 } else {
1681 /*
1682 * old PROCARGS will return the executable's path and plus some
1683 * extra space for work alignment and data tags
1684 */
1685 size += PATH_MAX + (6 * sizeof(int));
1686 }
1687 size += (size & (sizeof(int) - 1)) ? (sizeof(int) - (size & (sizeof(int) - 1))) : 0;
1688 }
1689
1690 *sizep = size;
1691
1692finish:
1693 if (p != NULL) {
1694 proc_rele(p);
1695 }
1696 if (tmp != NULL) {
1697 vm_map_copy_discard(copy: tmp);
1698 }
1699 if (proc_map != NULL) {
1700 vm_map_deallocate(map: proc_map);
1701 }
1702 if (copy_start != (vm_offset_t) 0) {
1703 kmem_free(map: kernel_map, addr: copy_start, size: arg_size);
1704 }
1705 return error;
1706}
1707
1708
1709/*
1710 * Max number of concurrent aio requests
1711 */
1712STATIC int
1713sysctl_aiomax
1714(__unused struct sysctl_oid *oidp, __unused void *arg1, __unused int arg2, struct sysctl_req *req)
1715{
1716 int new_value, changed;
1717 int error = sysctl_io_number(req, bigValue: aio_max_requests, valueSize: sizeof(int), pValue: &new_value, changed: &changed);
1718 if (changed) {
1719 /* make sure the system-wide limit is greater than the per process limit */
1720 if (new_value >= aio_max_requests_per_process && new_value <= AIO_MAX_REQUESTS) {
1721 aio_max_requests = new_value;
1722 } else {
1723 error = EINVAL;
1724 }
1725 }
1726 return error;
1727}
1728
1729
1730/*
1731 * Max number of concurrent aio requests per process
1732 */
1733STATIC int
1734sysctl_aioprocmax
1735(__unused struct sysctl_oid *oidp, __unused void *arg1, __unused int arg2, struct sysctl_req *req)
1736{
1737 int new_value, changed;
1738 int error = sysctl_io_number(req, bigValue: aio_max_requests_per_process, valueSize: sizeof(int), pValue: &new_value, changed: &changed);
1739 if (changed) {
1740 /* make sure per process limit is less than the system-wide limit */
1741 if (new_value <= aio_max_requests && new_value >= AIO_LISTIO_MAX) {
1742 aio_max_requests_per_process = new_value;
1743 } else {
1744 error = EINVAL;
1745 }
1746 }
1747 return error;
1748}
1749
1750
1751/*
1752 * Max number of async IO worker threads
1753 */
1754STATIC int
1755sysctl_aiothreads
1756(__unused struct sysctl_oid *oidp, __unused void *arg1, __unused int arg2, struct sysctl_req *req)
1757{
1758 int new_value, changed;
1759 int error = sysctl_io_number(req, bigValue: aio_worker_threads, valueSize: sizeof(int), pValue: &new_value, changed: &changed);
1760 if (changed) {
1761 /* we only allow an increase in the number of worker threads */
1762 if (new_value > aio_worker_threads) {
1763 _aio_create_worker_threads(num: (new_value - aio_worker_threads));
1764 aio_worker_threads = new_value;
1765 } else {
1766 error = EINVAL;
1767 }
1768 }
1769 return error;
1770}
1771
1772
1773/*
1774 * System-wide limit on the max number of processes
1775 */
1776STATIC int
1777sysctl_maxproc
1778(__unused struct sysctl_oid *oidp, __unused void *arg1, __unused int arg2, struct sysctl_req *req)
1779{
1780 int new_value, changed;
1781 int error = sysctl_io_number(req, bigValue: maxproc, valueSize: sizeof(int), pValue: &new_value, changed: &changed);
1782 if (changed) {
1783 AUDIT_ARG(value32, new_value);
1784 /* make sure the system-wide limit is less than the configured hard
1785 * limit set at kernel compilation */
1786 if (new_value <= hard_maxproc && new_value > 0) {
1787 maxproc = new_value;
1788 } else {
1789 error = EINVAL;
1790 }
1791 }
1792 return error;
1793}
1794
1795extern int sched_enable_smt;
1796STATIC int
1797sysctl_sched_enable_smt
1798(__unused struct sysctl_oid *oidp, __unused void *arg1, __unused int arg2, struct sysctl_req *req)
1799{
1800 int new_value, changed;
1801 int error = sysctl_io_number(req, bigValue: sched_enable_smt, valueSize: sizeof(int), pValue: &new_value, changed: &changed);
1802 if (error) {
1803 return error;
1804 }
1805 kern_return_t kret = KERN_SUCCESS;
1806 if (changed) {
1807 AUDIT_ARG(value32, new_value);
1808 if (new_value == 0) {
1809 sched_enable_smt = 0;
1810 kret = enable_smt_processors(false);
1811 } else {
1812 sched_enable_smt = 1;
1813 kret = enable_smt_processors(true);
1814 }
1815 }
1816 switch (kret) {
1817 case KERN_SUCCESS:
1818 error = 0;
1819 break;
1820 case KERN_INVALID_ARGUMENT:
1821 error = EINVAL;
1822 break;
1823 case KERN_FAILURE:
1824 error = EBUSY;
1825 break;
1826 default:
1827 error = ENOENT;
1828 break;
1829 }
1830
1831 return error;
1832}
1833
1834SYSCTL_STRING(_kern, KERN_OSTYPE, ostype,
1835 CTLFLAG_RD | CTLFLAG_KERN | CTLFLAG_LOCKED,
1836 ostype, 0, "");
1837SYSCTL_STRING(_kern, KERN_OSRELEASE, osrelease,
1838 CTLFLAG_RD | CTLFLAG_KERN | CTLFLAG_LOCKED,
1839 osrelease, 0, "");
1840SYSCTL_INT(_kern, KERN_OSREV, osrevision,
1841 CTLFLAG_RD | CTLFLAG_KERN | CTLFLAG_LOCKED,
1842 (int *)NULL, BSD, "");
1843SYSCTL_STRING(_kern, KERN_VERSION, version,
1844 CTLFLAG_RD | CTLFLAG_KERN | CTLFLAG_LOCKED,
1845 version, 0, "");
1846SYSCTL_STRING(_kern, OID_AUTO, uuid,
1847 CTLFLAG_RD | CTLFLAG_KERN | CTLFLAG_LOCKED,
1848 &kernel_uuid_string[0], 0, "");
1849
1850SYSCTL_STRING(_kern, OID_AUTO, osbuildconfig,
1851 CTLFLAG_RD | CTLFLAG_KERN | CTLFLAG_LOCKED | CTLFLAG_MASKED,
1852 &osbuild_config[0], 0, "");
1853
1854#if DEBUG
1855#ifndef DKPR
1856#define DKPR 1
1857#endif
1858#endif
1859
1860#if DKPR
1861int debug_kprint_syscall = 0;
1862char debug_kprint_syscall_process[MAXCOMLEN + 1];
1863
1864/* Thread safe: bits and string value are not used to reclaim state */
1865SYSCTL_INT(_debug, OID_AUTO, kprint_syscall,
1866 CTLFLAG_RW | CTLFLAG_LOCKED, &debug_kprint_syscall, 0, "kprintf syscall tracing");
1867SYSCTL_STRING(_debug, OID_AUTO, kprint_syscall_process,
1868 CTLFLAG_RW | CTLFLAG_LOCKED, debug_kprint_syscall_process, sizeof(debug_kprint_syscall_process),
1869 "name of process for kprintf syscall tracing");
1870
1871int
1872debug_kprint_current_process(const char **namep)
1873{
1874 struct proc *p = current_proc();
1875
1876 if (p == NULL) {
1877 return 0;
1878 }
1879
1880 if (debug_kprint_syscall_process[0]) {
1881 /* user asked to scope tracing to a particular process name */
1882 if (0 == strncmp(debug_kprint_syscall_process,
1883 p->p_comm, sizeof(debug_kprint_syscall_process))) {
1884 /* no value in telling the user that we traced what they asked */
1885 if (namep) {
1886 *namep = NULL;
1887 }
1888
1889 return 1;
1890 } else {
1891 return 0;
1892 }
1893 }
1894
1895 /* trace all processes. Tell user what we traced */
1896 if (namep) {
1897 *namep = p->p_comm;
1898 }
1899
1900 return 1;
1901}
1902#endif
1903
1904/* PR-5293665: need to use a callback function for kern.osversion to set
1905 * osversion in IORegistry */
1906
1907STATIC int
1908sysctl_osversion(__unused struct sysctl_oid *oidp, void *arg1, int arg2, struct sysctl_req *req)
1909{
1910 int rval = 0;
1911
1912 rval = sysctl_handle_string(oidp, arg1, arg2, req);
1913
1914 if (req->newptr) {
1915 IORegistrySetOSBuildVersion(build_version: (char *)arg1);
1916 }
1917
1918 return rval;
1919}
1920
1921SYSCTL_PROC(_kern, KERN_OSVERSION, osversion,
1922 CTLFLAG_RW | CTLFLAG_KERN | CTLTYPE_STRING | CTLFLAG_LOCKED,
1923 osversion, 256 /* OSVERSIZE*/,
1924 sysctl_osversion, "A", "");
1925
1926static bool
1927_already_set_or_not_launchd(struct sysctl_req *req, char *val)
1928{
1929 if (req->newptr != 0) {
1930 /*
1931 * Can only ever be set by launchd, and only once at boot.
1932 */
1933 if (proc_getpid(req->p) != 1 || val[0] != '\0') {
1934 return true;
1935 }
1936 }
1937 return false;
1938}
1939
1940#define kRootsInstalledReadWriteEntitlement "com.apple.private.roots-installed-read-write"
1941#define kRootsInstalledReadOnlyEntitlement "com.apple.private.roots-installed-read-only"
1942uint64_t roots_installed = 0;
1943
1944static int
1945sysctl_roots_installed
1946(__unused struct sysctl_oid *oidp, void *arg1, int arg2, struct sysctl_req *req)
1947{
1948 int error = 0;
1949
1950 if (req->newptr != 0) {
1951 /* a ReadWrite entitlement is required for updating this syscl
1952 * meanwhile, only allow write once
1953 */
1954 if (!IOCurrentTaskHasEntitlement(kRootsInstalledReadWriteEntitlement) || (roots_installed != 0)) {
1955 return EPERM;
1956 }
1957 } else {
1958 /* for reader of this sysctl, need either ReadWrite or ReadOnly entitlement */
1959 if (!IOCurrentTaskHasEntitlement(kRootsInstalledReadWriteEntitlement) &&
1960 !IOCurrentTaskHasEntitlement(kRootsInstalledReadOnlyEntitlement)) {
1961 return EPERM;
1962 }
1963 }
1964
1965 error = sysctl_handle_quad(oidp, arg1, arg2, req);
1966
1967 return error;
1968}
1969
1970SYSCTL_PROC(_kern, OID_AUTO, roots_installed,
1971 CTLTYPE_QUAD | CTLFLAG_RW | CTLFLAG_ANYBODY | CTLFLAG_LOCKED,
1972 &roots_installed, sizeof(roots_installed),
1973 sysctl_roots_installed, "Q", "");
1974
1975#if XNU_TARGET_OS_OSX
1976static int
1977sysctl_system_version_compat
1978(__unused struct sysctl_oid *oidp, __unused void *arg1, __unused int arg2, struct sysctl_req *req)
1979{
1980 int oldval = (task_has_system_version_compat_enabled(task: current_task()));
1981 int new_value = 0, changed = 0;
1982
1983 int error = sysctl_io_number(req, bigValue: oldval, valueSize: sizeof(int), pValue: &new_value, changed: &changed);
1984 if (changed) {
1985 task_set_system_version_compat_enabled(task: current_task(), enable_system_version_compat: (new_value));
1986 }
1987 return error;
1988}
1989
1990SYSCTL_PROC(_kern, OID_AUTO, system_version_compat,
1991 CTLTYPE_INT | CTLFLAG_RW | CTLFLAG_ANYBODY | CTLFLAG_LOCKED,
1992 0, 0, sysctl_system_version_compat, "A", "");
1993
1994char osproductversioncompat[48] = { '\0' };
1995
1996static int
1997sysctl_osproductversioncompat(struct sysctl_oid *oidp, void *arg1, int arg2, struct sysctl_req *req)
1998{
1999 if (_already_set_or_not_launchd(req, val: osproductversioncompat)) {
2000 return EPERM;
2001 }
2002 return sysctl_handle_string(oidp, arg1, arg2, req);
2003}
2004
2005
2006SYSCTL_PROC(_kern, OID_AUTO, osproductversioncompat,
2007 CTLFLAG_RW | CTLFLAG_KERN | CTLTYPE_STRING | CTLFLAG_LOCKED,
2008 osproductversioncompat, sizeof(osproductversioncompat),
2009 sysctl_osproductversioncompat, "A", "The ProductVersion from SystemVersionCompat.plist");
2010#endif
2011
2012char osproductversion[48] = { '\0' };
2013
2014static char iossupportversion_string[48] = { '\0' };
2015
2016static int
2017sysctl_osproductversion(__unused struct sysctl_oid *oidp, void *arg1, int arg2, struct sysctl_req *req)
2018{
2019 if (_already_set_or_not_launchd(req, val: osproductversion)) {
2020 return EPERM;
2021 }
2022
2023#if XNU_TARGET_OS_OSX
2024 if (task_has_system_version_compat_enabled(task: current_task()) && (osproductversioncompat[0] != '\0')) {
2025 return sysctl_handle_string(oidp, arg1: osproductversioncompat, arg2, req);
2026 } else {
2027 return sysctl_handle_string(oidp, arg1, arg2, req);
2028 }
2029#else
2030 return sysctl_handle_string(oidp, arg1, arg2, req);
2031#endif
2032}
2033
2034#if XNU_TARGET_OS_OSX
2035static_assert(sizeof(osproductversioncompat) == sizeof(osproductversion),
2036 "osproductversion size matches osproductversioncompat size");
2037#endif
2038
2039SYSCTL_PROC(_kern, OID_AUTO, osproductversion,
2040 CTLFLAG_RW | CTLFLAG_KERN | CTLTYPE_STRING | CTLFLAG_LOCKED,
2041 osproductversion, sizeof(osproductversion),
2042 sysctl_osproductversion, "A", "The ProductVersion from SystemVersion.plist");
2043
2044char osreleasetype[48] = { '\0' };
2045
2046STATIC int
2047sysctl_osreleasetype(__unused struct sysctl_oid *oidp, void *arg1, int arg2, struct sysctl_req *req)
2048{
2049 if (_already_set_or_not_launchd(req, val: osreleasetype)) {
2050 return EPERM;
2051 }
2052 return sysctl_handle_string(oidp, arg1, arg2, req);
2053}
2054
2055void reset_osreleasetype(void);
2056
2057void
2058reset_osreleasetype(void)
2059{
2060 memset(s: osreleasetype, c: 0, n: sizeof(osreleasetype));
2061}
2062
2063SYSCTL_PROC(_kern, OID_AUTO, osreleasetype,
2064 CTLFLAG_RW | CTLFLAG_KERN | CTLTYPE_STRING | CTLFLAG_LOCKED,
2065 osreleasetype, sizeof(osreleasetype),
2066 sysctl_osreleasetype, "A", "The ReleaseType from SystemVersion.plist");
2067
2068STATIC int
2069sysctl_iossupportversion(__unused struct sysctl_oid *oidp, void *arg1, int arg2, struct sysctl_req *req)
2070{
2071 if (_already_set_or_not_launchd(req, val: iossupportversion_string)) {
2072 return EPERM;
2073 }
2074
2075 return sysctl_handle_string(oidp, arg1, arg2, req);
2076}
2077
2078SYSCTL_PROC(_kern, OID_AUTO, iossupportversion,
2079 CTLFLAG_RW | CTLFLAG_KERN | CTLTYPE_STRING | CTLFLAG_LOCKED,
2080 iossupportversion_string, sizeof(iossupportversion_string),
2081 sysctl_iossupportversion, "A", "The iOSSupportVersion from SystemVersion.plist");
2082
2083static uint64_t osvariant_status = 0;
2084
2085STATIC int
2086sysctl_osvariant_status(__unused struct sysctl_oid *oidp, void *arg1, int arg2, struct sysctl_req *req)
2087{
2088 if (req->newptr != 0) {
2089 /*
2090 * Can only ever be set by launchd, and only once.
2091 * Reset by usrctl() -> reset_osvariant_status() during
2092 * userspace reboot, since userspace could reboot into
2093 * a different variant.
2094 */
2095 if (proc_getpid(req->p) != 1 || osvariant_status != 0) {
2096 return EPERM;
2097 }
2098 }
2099
2100 int err = sysctl_handle_quad(oidp, arg1, arg2, req);
2101
2102 reset_debug_syscall_rejection_mode();
2103
2104 return err;
2105}
2106
2107SYSCTL_PROC(_kern, OID_AUTO, osvariant_status,
2108 CTLFLAG_RW | CTLTYPE_QUAD | CTLFLAG_LOCKED | CTLFLAG_MASKED,
2109 &osvariant_status, sizeof(osvariant_status),
2110 sysctl_osvariant_status, "Q", "Opaque flags used to cache OS variant information");
2111
2112static bool
2113_os_variant_check_disabled(enum os_variant_property property)
2114{
2115 return (osvariant_status >> (32 + property)) & 0x1;
2116}
2117
2118static bool
2119_os_variant_has(enum os_variant_status_flags_positions p)
2120{
2121 return ((osvariant_status >> (p * OS_VARIANT_STATUS_BIT_WIDTH)) & OS_VARIANT_STATUS_MASK) == OS_VARIANT_S_YES;
2122}
2123
2124bool
2125os_variant_has_internal_diagnostics(__unused const char *subsystem)
2126{
2127 if (_os_variant_check_disabled(property: OS_VARIANT_PROPERTY_DIAGNOSTICS)) {
2128 return false;
2129 }
2130#if XNU_TARGET_OS_OSX
2131 return _os_variant_has(p: OS_VARIANT_SFP_INTERNAL_CONTENT) || _os_variant_has(p: OS_VARIANT_SFP_INTERNAL_DIAGS_PROFILE);
2132#else
2133 return _os_variant_has(OS_VARIANT_SFP_INTERNAL_RELEASE_TYPE);
2134#endif /* XNU_TARGET_OS_OSX */
2135}
2136
2137void reset_osvariant_status(void);
2138
2139void
2140reset_osvariant_status(void)
2141{
2142 osvariant_status = 0;
2143 reset_debug_syscall_rejection_mode();
2144}
2145
2146extern void commpage_update_dyld_flags(uint64_t);
2147TUNABLE_WRITEABLE(uint64_t, dyld_flags, "dyld_flags", 0);
2148
2149STATIC int
2150sysctl_dyld_flags(__unused struct sysctl_oid *oidp, void *arg1, int arg2, struct sysctl_req *req)
2151{
2152 /*
2153 * Can only ever be set by launchd, possibly several times
2154 * as dyld may change its mind after a userspace reboot.
2155 */
2156 if (req->newptr != 0 && proc_getpid(req->p) != 1) {
2157 return EPERM;
2158 }
2159
2160 int res = sysctl_handle_quad(oidp, arg1, arg2, req);
2161 if (req->newptr && res == 0) {
2162 commpage_update_dyld_flags(dyld_flags);
2163 }
2164 return res;
2165}
2166
2167SYSCTL_PROC(_kern, OID_AUTO, dyld_flags,
2168 CTLFLAG_RW | CTLTYPE_QUAD | CTLFLAG_LOCKED | CTLFLAG_MASKED,
2169 &dyld_flags, sizeof(dyld_flags),
2170 sysctl_dyld_flags, "Q", "Opaque flags used to cache dyld system-wide configuration");
2171
2172#if defined(XNU_TARGET_OS_BRIDGE)
2173char macosproductversion[MACOS_VERS_LEN] = { '\0' };
2174
2175SYSCTL_STRING(_kern, OID_AUTO, macosproductversion,
2176 CTLFLAG_RW | CTLFLAG_KERN | CTLFLAG_LOCKED,
2177 &macosproductversion[0], MACOS_VERS_LEN, "The currently running macOS ProductVersion (from SystemVersion.plist on macOS)");
2178
2179char macosversion[MACOS_VERS_LEN] = { '\0' };
2180
2181SYSCTL_STRING(_kern, OID_AUTO, macosversion,
2182 CTLFLAG_RW | CTLFLAG_KERN | CTLFLAG_LOCKED,
2183 &macosversion[0], MACOS_VERS_LEN, "The currently running macOS build version");
2184#endif
2185
2186STATIC int
2187sysctl_sysctl_bootargs
2188(__unused struct sysctl_oid *oidp, __unused void *arg1, __unused int arg2, struct sysctl_req *req)
2189{
2190 int error;
2191 char buf[BOOT_LINE_LENGTH];
2192
2193 strlcpy(dst: buf, src: PE_boot_args(), BOOT_LINE_LENGTH);
2194 error = sysctl_io_string(req, pValue: buf, BOOT_LINE_LENGTH, trunc: 0, NULL);
2195 return error;
2196}
2197
2198SYSCTL_PROC(_kern, OID_AUTO, bootargs,
2199 CTLFLAG_LOCKED | CTLFLAG_RD | CTLFLAG_KERN | CTLTYPE_STRING,
2200 NULL, 0,
2201 sysctl_sysctl_bootargs, "A", "bootargs");
2202
2203STATIC int
2204sysctl_kernelcacheuuid(struct sysctl_oid *oidp, void *arg1, int arg2, struct sysctl_req *req)
2205{
2206 int rval = ENOENT;
2207 if (kernelcache_uuid_valid) {
2208 rval = sysctl_handle_string(oidp, arg1, arg2, req);
2209 }
2210 return rval;
2211}
2212
2213SYSCTL_PROC(_kern, OID_AUTO, kernelcacheuuid,
2214 CTLFLAG_RD | CTLFLAG_KERN | CTLTYPE_STRING | CTLFLAG_LOCKED,
2215 kernelcache_uuid_string, sizeof(kernelcache_uuid_string),
2216 sysctl_kernelcacheuuid, "A", "");
2217
2218STATIC int
2219sysctl_systemfilesetuuid(struct sysctl_oid *oidp, void *arg1, int arg2, struct sysctl_req *req)
2220{
2221 int rval = ENOENT;
2222 if (pageablekc_uuid_valid) {
2223 rval = sysctl_handle_string(oidp, arg1, arg2, req);
2224 }
2225 return rval;
2226}
2227
2228SYSCTL_PROC(_kern, OID_AUTO, systemfilesetuuid,
2229 CTLFLAG_RD | CTLFLAG_KERN | CTLTYPE_STRING | CTLFLAG_LOCKED,
2230 pageablekc_uuid_string, sizeof(pageablekc_uuid_string),
2231 sysctl_systemfilesetuuid, "A", "");
2232
2233STATIC int
2234sysctl_auxiliaryfilesetuuid(struct sysctl_oid *oidp, void *arg1, int arg2, struct sysctl_req *req)
2235{
2236 int rval = ENOENT;
2237 if (auxkc_uuid_valid) {
2238 rval = sysctl_handle_string(oidp, arg1, arg2, req);
2239 }
2240 return rval;
2241}
2242
2243SYSCTL_PROC(_kern, OID_AUTO, auxiliaryfilesetuuid,
2244 CTLFLAG_RD | CTLFLAG_KERN | CTLTYPE_STRING | CTLFLAG_LOCKED,
2245 auxkc_uuid_string, sizeof(auxkc_uuid_string),
2246 sysctl_auxiliaryfilesetuuid, "A", "");
2247
2248STATIC int
2249sysctl_filesetuuid(__unused struct sysctl_oid *oidp, __unused void *arg1, __unused int arg2, struct sysctl_req *req)
2250{
2251 int rval = ENOENT;
2252 kc_format_t kcformat;
2253 kernel_mach_header_t *mh;
2254 void *uuid = NULL;
2255 unsigned long uuidlen = 0;
2256 uuid_string_t uuid_str;
2257
2258 if (!PE_get_primary_kc_format(type: &kcformat) || kcformat != KCFormatFileset) {
2259 return rval;
2260 }
2261
2262 mh = (kernel_mach_header_t *)PE_get_kc_header(type: KCKindPrimary);
2263 uuid = getuuidfromheader(mh, &uuidlen);
2264
2265 if ((uuid != NULL) && (uuidlen == sizeof(uuid_t))) {
2266 uuid_unparse_upper(uu: *(uuid_t *)uuid, out: uuid_str);
2267 rval = sysctl_io_string(req, pValue: (char *)uuid_str, valueSize: sizeof(uuid_str), trunc: 0, NULL);
2268 }
2269
2270 return rval;
2271}
2272
2273SYSCTL_PROC(_kern, OID_AUTO, filesetuuid,
2274 CTLFLAG_RD | CTLFLAG_KERN | CTLTYPE_STRING | CTLFLAG_LOCKED,
2275 NULL, 0,
2276 sysctl_filesetuuid, "A", "");
2277
2278
2279SYSCTL_INT(_kern, KERN_MAXFILES, maxfiles,
2280 CTLFLAG_RW | CTLFLAG_KERN | CTLFLAG_LOCKED,
2281 &maxfiles, 0, "");
2282SYSCTL_INT(_kern, KERN_ARGMAX, argmax,
2283 CTLFLAG_RD | CTLFLAG_KERN | CTLFLAG_LOCKED,
2284 (int *)NULL, ARG_MAX, "");
2285SYSCTL_INT(_kern, KERN_POSIX1, posix1version,
2286 CTLFLAG_RD | CTLFLAG_KERN | CTLFLAG_LOCKED,
2287 (int *)NULL, _POSIX_VERSION, "");
2288SYSCTL_INT(_kern, KERN_NGROUPS, ngroups,
2289 CTLFLAG_RD | CTLFLAG_KERN | CTLFLAG_LOCKED,
2290 (int *)NULL, NGROUPS_MAX, "");
2291SYSCTL_INT(_kern, KERN_JOB_CONTROL, job_control,
2292 CTLFLAG_RD | CTLFLAG_KERN | CTLFLAG_LOCKED,
2293 (int *)NULL, 1, "");
2294#if 1 /* _POSIX_SAVED_IDS from <unistd.h> */
2295SYSCTL_INT(_kern, KERN_SAVED_IDS, saved_ids,
2296 CTLFLAG_RD | CTLFLAG_KERN | CTLFLAG_LOCKED,
2297 (int *)NULL, 1, "");
2298#else
2299SYSCTL_INT(_kern, KERN_SAVED_IDS, saved_ids,
2300 CTLFLAG_RD | CTLFLAG_KERN | CTLFLAG_LOCKED,
2301 NULL, 0, "");
2302#endif
2303SYSCTL_INT(_kern, OID_AUTO, num_files,
2304 CTLFLAG_RD | CTLFLAG_LOCKED,
2305 &nfiles, 0, "");
2306SYSCTL_COMPAT_INT(_kern, OID_AUTO, num_vnodes,
2307 CTLFLAG_RD | CTLFLAG_LOCKED,
2308 &numvnodes, 0, "");
2309SYSCTL_INT(_kern, OID_AUTO, num_tasks,
2310 CTLFLAG_RD | CTLFLAG_LOCKED,
2311 &task_max, 0, "");
2312SYSCTL_INT(_kern, OID_AUTO, num_threads,
2313 CTLFLAG_RD | CTLFLAG_LOCKED,
2314 &thread_max, 0, "");
2315SYSCTL_INT(_kern, OID_AUTO, num_taskthreads,
2316 CTLFLAG_RD | CTLFLAG_LOCKED,
2317 &task_threadmax, 0, "");
2318SYSCTL_LONG(_kern, OID_AUTO, num_recycledvnodes,
2319 CTLFLAG_RD | CTLFLAG_LOCKED,
2320 &num_recycledvnodes, "");
2321SYSCTL_COMPAT_INT(_kern, OID_AUTO, free_vnodes,
2322 CTLFLAG_RD | CTLFLAG_LOCKED,
2323 &freevnodes, 0, "");
2324
2325STATIC int
2326sysctl_maxvnodes(__unused struct sysctl_oid *oidp, __unused void *arg1, __unused int arg2, struct sysctl_req *req)
2327{
2328 int oldval = desiredvnodes;
2329 int error = sysctl_io_number(req, bigValue: desiredvnodes, valueSize: sizeof(int), pValue: &desiredvnodes, NULL);
2330
2331 if (oldval != desiredvnodes) {
2332 resize_namecache(newsize: desiredvnodes);
2333 }
2334
2335 return error;
2336}
2337
2338SYSCTL_INT(_kern, OID_AUTO, namecache_disabled,
2339 CTLFLAG_RW | CTLFLAG_LOCKED,
2340 &nc_disabled, 0, "");
2341
2342SYSCTL_PROC(_kern, KERN_MAXVNODES, maxvnodes,
2343 CTLTYPE_INT | CTLFLAG_RW | CTLFLAG_LOCKED,
2344 0, 0, sysctl_maxvnodes, "I", "");
2345
2346SYSCTL_PROC(_kern, KERN_MAXPROC, maxproc,
2347 CTLTYPE_INT | CTLFLAG_RW | CTLFLAG_LOCKED,
2348 0, 0, sysctl_maxproc, "I", "");
2349
2350SYSCTL_PROC(_kern, KERN_AIOMAX, aiomax,
2351 CTLTYPE_INT | CTLFLAG_RW | CTLFLAG_LOCKED,
2352 0, 0, sysctl_aiomax, "I", "");
2353
2354SYSCTL_PROC(_kern, KERN_AIOPROCMAX, aioprocmax,
2355 CTLTYPE_INT | CTLFLAG_RW | CTLFLAG_LOCKED,
2356 0, 0, sysctl_aioprocmax, "I", "");
2357
2358SYSCTL_PROC(_kern, KERN_AIOTHREADS, aiothreads,
2359 CTLTYPE_INT | CTLFLAG_RW | CTLFLAG_LOCKED,
2360 0, 0, sysctl_aiothreads, "I", "");
2361
2362SYSCTL_PROC(_kern, OID_AUTO, sched_enable_smt,
2363 CTLTYPE_INT | CTLFLAG_RW | CTLFLAG_KERN,
2364 0, 0, sysctl_sched_enable_smt, "I", "");
2365
2366extern int sched_allow_NO_SMT_threads;
2367SYSCTL_INT(_kern, OID_AUTO, sched_allow_NO_SMT_threads,
2368 CTLFLAG_KERN | CTLFLAG_RW | CTLFLAG_LOCKED,
2369 &sched_allow_NO_SMT_threads, 0, "");
2370
2371extern int sched_avoid_cpu0;
2372SYSCTL_INT(_kern, OID_AUTO, sched_rt_avoid_cpu0,
2373 CTLFLAG_KERN | CTLFLAG_RW | CTLFLAG_LOCKED,
2374 &sched_avoid_cpu0, 0, "If 1, choose cpu0 after all other primaries; if 2, choose cpu0 and cpu1 last, after all other cpus including secondaries");
2375
2376#if (DEVELOPMENT || DEBUG)
2377
2378static int
2379sysctl_kern_max_unsafe_rt_quanta(__unused struct sysctl_oid *oidp,
2380 __unused void *arg1, __unused int arg2, struct sysctl_req *req)
2381{
2382 extern void sched_set_max_unsafe_rt_quanta(int);
2383 extern int max_unsafe_rt_quanta;
2384
2385 int new_value, changed;
2386 int old_value = max_unsafe_rt_quanta;
2387 int error = sysctl_io_number(req, old_value, sizeof(int), &new_value,
2388 &changed);
2389 if (changed) {
2390 sched_set_max_unsafe_rt_quanta(new_value);
2391 }
2392
2393 return error;
2394}
2395
2396SYSCTL_PROC(_kern, OID_AUTO, max_unsafe_rt_quanta,
2397 CTLTYPE_INT | CTLFLAG_RW | CTLFLAG_LOCKED,
2398 0, 0, sysctl_kern_max_unsafe_rt_quanta, "I",
2399 "Number of quanta to allow a realtime "
2400 "thread to run before being penalized");
2401
2402static int
2403sysctl_kern_max_unsafe_fixed_quanta(__unused struct sysctl_oid *oidp,
2404 __unused void *arg1, __unused int arg2, struct sysctl_req *req)
2405{
2406 extern void sched_set_max_unsafe_fixed_quanta(int);
2407 extern int max_unsafe_fixed_quanta;
2408
2409 int new_value, changed;
2410 int old_value = max_unsafe_fixed_quanta;
2411 int error = sysctl_io_number(req, old_value, sizeof(int), &new_value,
2412 &changed);
2413 if (changed) {
2414 sched_set_max_unsafe_fixed_quanta(new_value);
2415 }
2416
2417 return error;
2418}
2419
2420SYSCTL_PROC(_kern, OID_AUTO, max_unsafe_fixed_quanta,
2421 CTLTYPE_INT | CTLFLAG_RW | CTLFLAG_LOCKED,
2422 0, 0, sysctl_kern_max_unsafe_fixed_quanta, "I",
2423 "Number of quanta to allow a fixed sched mode "
2424 "thread to run before being penalized");
2425
2426static int
2427sysctl_kern_quantum_us(__unused struct sysctl_oid *oidp, __unused void *arg1,
2428 __unused int arg2, struct sysctl_req *req)
2429{
2430 extern uint64_t sysctl_get_quantum_us(void);
2431 const uint64_t quantum_us = sysctl_get_quantum_us();
2432
2433 return sysctl_io_number(req, quantum_us, sizeof(quantum_us), NULL, NULL);
2434}
2435
2436SYSCTL_PROC(_kern, OID_AUTO, quantum_us,
2437 CTLTYPE_QUAD | CTLFLAG_RD | CTLFLAG_LOCKED,
2438 0, 0, sysctl_kern_quantum_us, "Q",
2439 "Length of scheduling quantum in microseconds");
2440
2441extern int smt_sched_bonus_16ths;
2442SYSCTL_INT(_kern, OID_AUTO, smt_sched_bonus_16ths,
2443 CTLFLAG_KERN | CTLFLAG_RW | CTLFLAG_LOCKED,
2444 &smt_sched_bonus_16ths, 0, "");
2445
2446extern int smt_timeshare_enabled;
2447SYSCTL_INT(_kern, OID_AUTO, sched_smt_timeshare_enable,
2448 CTLFLAG_KERN | CTLFLAG_RW | CTLFLAG_LOCKED,
2449 &smt_timeshare_enabled, 0, "");
2450
2451extern int sched_smt_balance;
2452SYSCTL_INT(_kern, OID_AUTO, sched_smt_balance,
2453 CTLFLAG_KERN | CTLFLAG_RW | CTLFLAG_LOCKED,
2454 &sched_smt_balance, 0, "");
2455extern int sched_allow_rt_smt;
2456SYSCTL_INT(_kern, OID_AUTO, sched_allow_rt_smt,
2457 CTLFLAG_KERN | CTLFLAG_RW | CTLFLAG_LOCKED,
2458 &sched_allow_rt_smt, 0, "");
2459extern int sched_allow_rt_steal;
2460SYSCTL_INT(_kern, OID_AUTO, sched_allow_rt_steal,
2461 CTLFLAG_KERN | CTLFLAG_RW | CTLFLAG_LOCKED,
2462 &sched_allow_rt_steal, 0, "");
2463extern int sched_backup_cpu_timeout_count;
2464SYSCTL_INT(_kern, OID_AUTO, sched_backup_cpu_timeout_count,
2465 CTLFLAG_KERN | CTLFLAG_RW | CTLFLAG_LOCKED,
2466 &sched_backup_cpu_timeout_count, 0, "The maximum number of 10us delays before allowing a backup cpu to select a thread");
2467#if __arm64__
2468/* Scheduler perfcontrol callouts sysctls */
2469SYSCTL_DECL(_kern_perfcontrol_callout);
2470SYSCTL_NODE(_kern, OID_AUTO, perfcontrol_callout, CTLFLAG_RW | CTLFLAG_LOCKED, 0,
2471 "scheduler perfcontrol callouts");
2472
2473extern int perfcontrol_callout_stats_enabled;
2474SYSCTL_INT(_kern_perfcontrol_callout, OID_AUTO, stats_enabled,
2475 CTLFLAG_KERN | CTLFLAG_RW | CTLFLAG_LOCKED,
2476 &perfcontrol_callout_stats_enabled, 0, "");
2477
2478extern uint64_t perfcontrol_callout_stat_avg(perfcontrol_callout_type_t type,
2479 perfcontrol_callout_stat_t stat);
2480
2481/* On-Core Callout */
2482STATIC int
2483sysctl_perfcontrol_callout_stat
2484(__unused struct sysctl_oid *oidp, void *arg1, int arg2, struct sysctl_req *req)
2485{
2486 perfcontrol_callout_stat_t stat = (perfcontrol_callout_stat_t)arg1;
2487 perfcontrol_callout_type_t type = (perfcontrol_callout_type_t)arg2;
2488 return sysctl_io_number(req, (int)perfcontrol_callout_stat_avg(type, stat),
2489 sizeof(int), NULL, NULL);
2490}
2491
2492SYSCTL_PROC(_kern_perfcontrol_callout, OID_AUTO, oncore_instr,
2493 CTLTYPE_INT | CTLFLAG_RD | CTLFLAG_LOCKED,
2494 (void *)PERFCONTROL_STAT_INSTRS, PERFCONTROL_CALLOUT_ON_CORE,
2495 sysctl_perfcontrol_callout_stat, "I", "");
2496SYSCTL_PROC(_kern_perfcontrol_callout, OID_AUTO, oncore_cycles,
2497 CTLTYPE_INT | CTLFLAG_RD | CTLFLAG_LOCKED,
2498 (void *)PERFCONTROL_STAT_CYCLES, PERFCONTROL_CALLOUT_ON_CORE,
2499 sysctl_perfcontrol_callout_stat, "I", "");
2500SYSCTL_PROC(_kern_perfcontrol_callout, OID_AUTO, offcore_instr,
2501 CTLTYPE_INT | CTLFLAG_RD | CTLFLAG_LOCKED,
2502 (void *)PERFCONTROL_STAT_INSTRS, PERFCONTROL_CALLOUT_OFF_CORE,
2503 sysctl_perfcontrol_callout_stat, "I", "");
2504SYSCTL_PROC(_kern_perfcontrol_callout, OID_AUTO, offcore_cycles,
2505 CTLTYPE_INT | CTLFLAG_RD | CTLFLAG_LOCKED,
2506 (void *)PERFCONTROL_STAT_CYCLES, PERFCONTROL_CALLOUT_OFF_CORE,
2507 sysctl_perfcontrol_callout_stat, "I", "");
2508SYSCTL_PROC(_kern_perfcontrol_callout, OID_AUTO, context_instr,
2509 CTLTYPE_INT | CTLFLAG_RD | CTLFLAG_LOCKED,
2510 (void *)PERFCONTROL_STAT_INSTRS, PERFCONTROL_CALLOUT_CONTEXT,
2511 sysctl_perfcontrol_callout_stat, "I", "");
2512SYSCTL_PROC(_kern_perfcontrol_callout, OID_AUTO, context_cycles,
2513 CTLTYPE_INT | CTLFLAG_RD | CTLFLAG_LOCKED,
2514 (void *)PERFCONTROL_STAT_CYCLES, PERFCONTROL_CALLOUT_CONTEXT,
2515 sysctl_perfcontrol_callout_stat, "I", "");
2516SYSCTL_PROC(_kern_perfcontrol_callout, OID_AUTO, update_instr,
2517 CTLTYPE_INT | CTLFLAG_RD | CTLFLAG_LOCKED,
2518 (void *)PERFCONTROL_STAT_INSTRS, PERFCONTROL_CALLOUT_STATE_UPDATE,
2519 sysctl_perfcontrol_callout_stat, "I", "");
2520SYSCTL_PROC(_kern_perfcontrol_callout, OID_AUTO, update_cycles,
2521 CTLTYPE_INT | CTLFLAG_RD | CTLFLAG_LOCKED,
2522 (void *)PERFCONTROL_STAT_CYCLES, PERFCONTROL_CALLOUT_STATE_UPDATE,
2523 sysctl_perfcontrol_callout_stat, "I", "");
2524
2525#if __AMP__
2526extern int sched_amp_idle_steal;
2527SYSCTL_INT(_kern, OID_AUTO, sched_amp_idle_steal,
2528 CTLFLAG_KERN | CTLFLAG_RW | CTLFLAG_LOCKED,
2529 &sched_amp_idle_steal, 0, "");
2530extern int sched_amp_spill_steal;
2531SYSCTL_INT(_kern, OID_AUTO, sched_amp_spill_steal,
2532 CTLFLAG_KERN | CTLFLAG_RW | CTLFLAG_LOCKED,
2533 &sched_amp_spill_steal, 0, "");
2534extern int sched_amp_spill_count;
2535SYSCTL_INT(_kern, OID_AUTO, sched_amp_spill_count,
2536 CTLFLAG_KERN | CTLFLAG_RW | CTLFLAG_LOCKED,
2537 &sched_amp_spill_count, 0, "");
2538extern int sched_amp_spill_deferred_ipi;
2539SYSCTL_INT(_kern, OID_AUTO, sched_amp_spill_deferred_ipi,
2540 CTLFLAG_KERN | CTLFLAG_RW | CTLFLAG_LOCKED,
2541 &sched_amp_spill_deferred_ipi, 0, "");
2542extern int sched_amp_pcores_preempt_immediate_ipi;
2543SYSCTL_INT(_kern, OID_AUTO, sched_amp_pcores_preempt_immediate_ipi,
2544 CTLFLAG_KERN | CTLFLAG_RW | CTLFLAG_LOCKED,
2545 &sched_amp_pcores_preempt_immediate_ipi, 0, "");
2546#endif /* __AMP__ */
2547#endif /* __arm64__ */
2548
2549#if __arm64__
2550extern int legacy_footprint_entitlement_mode;
2551SYSCTL_INT(_kern, OID_AUTO, legacy_footprint_entitlement_mode,
2552 CTLFLAG_KERN | CTLFLAG_RD | CTLFLAG_LOCKED,
2553 &legacy_footprint_entitlement_mode, 0, "");
2554#endif /* __arm64__ */
2555
2556/*
2557 * Realtime threads are ordered by highest priority first then,
2558 * for threads of the same priority, by earliest deadline first.
2559 * But if sched_rt_runq_strict_priority is false (the default),
2560 * a lower priority thread with an earlier deadline will be preferred
2561 * over a higher priority thread with a later deadline, as long as
2562 * both threads' computations will fit before the later deadline.
2563 */
2564extern int sched_rt_runq_strict_priority;
2565SYSCTL_INT(_kern, OID_AUTO, sched_rt_runq_strict_priority,
2566 CTLFLAG_KERN | CTLFLAG_RW | CTLFLAG_LOCKED,
2567 &sched_rt_runq_strict_priority, 0, "");
2568
2569static int
2570sysctl_kern_sched_rt_n_backup_processors(__unused struct sysctl_oid *oidp, __unused void *arg1, __unused int arg2, struct sysctl_req *req)
2571{
2572 int new_value, changed;
2573 int old_value = sched_get_rt_n_backup_processors();
2574 int error = sysctl_io_number(req, old_value, sizeof(int), &new_value, &changed);
2575 if (changed) {
2576 sched_set_rt_n_backup_processors(new_value);
2577 }
2578
2579 return error;
2580}
2581
2582SYSCTL_PROC(_kern, OID_AUTO, sched_rt_n_backup_processors,
2583 CTLTYPE_INT | CTLFLAG_RW | CTLFLAG_LOCKED,
2584 0, 0, sysctl_kern_sched_rt_n_backup_processors, "I", "");
2585
2586static int
2587sysctl_kern_sched_rt_deadline_epsilon_us(__unused struct sysctl_oid *oidp, __unused void *arg1, __unused int arg2, struct sysctl_req *req)
2588{
2589 int new_value, changed;
2590 int old_value = sched_get_rt_deadline_epsilon();
2591 int error = sysctl_io_number(req, old_value, sizeof(int), &new_value, &changed);
2592 if (changed) {
2593 sched_set_rt_deadline_epsilon(new_value);
2594 }
2595
2596 return error;
2597}
2598
2599SYSCTL_PROC(_kern, OID_AUTO, sched_rt_deadline_epsilon_us,
2600 CTLTYPE_INT | CTLFLAG_RW | CTLFLAG_LOCKED,
2601 0, 0, sysctl_kern_sched_rt_deadline_epsilon_us, "I", "");
2602
2603extern int sched_idle_delay_cpuid;
2604SYSCTL_INT(_kern, OID_AUTO, sched_idle_delay_cpuid,
2605 CTLFLAG_KERN | CTLFLAG_RW | CTLFLAG_LOCKED,
2606 &sched_idle_delay_cpuid, 0, "This cpuid will be delayed by 500us on exiting idle, to simulate interrupt or preemption delays when testing the scheduler");
2607
2608static int
2609sysctl_kern_sched_powered_cores(__unused struct sysctl_oid *oidp, __unused void *arg1, __unused int arg2, struct sysctl_req *req)
2610{
2611 int new_value, changed;
2612 int old_value = sched_get_powered_cores();
2613 int error = sysctl_io_number(req, old_value, sizeof(int), &new_value, &changed);
2614 if (changed) {
2615 sched_set_powered_cores(new_value);
2616 }
2617
2618 return error;
2619}
2620
2621SYSCTL_PROC(_kern, OID_AUTO, sched_powered_cores,
2622 CTLTYPE_INT | CTLFLAG_RW | CTLFLAG_LOCKED,
2623 0, 0, sysctl_kern_sched_powered_cores, "I", "");
2624
2625#endif /* (DEVELOPMENT || DEBUG) */
2626
2627extern uint64_t perfcontrol_requested_recommended_cores;
2628SYSCTL_QUAD(_kern, OID_AUTO, sched_recommended_cores,
2629 CTLFLAG_KERN | CTLFLAG_RD | CTLFLAG_LOCKED,
2630 &perfcontrol_requested_recommended_cores, "");
2631
2632static int
2633sysctl_kern_suspend_cluster_powerdown(__unused struct sysctl_oid *oidp, __unused void *arg1, __unused int arg2, struct sysctl_req *req)
2634{
2635 int new_value, changed;
2636 int old_value = get_cluster_powerdown_user_suspended();
2637 int error = sysctl_io_number(req, bigValue: old_value, valueSize: sizeof(int), pValue: &new_value, changed: &changed);
2638 if (!error && changed) {
2639 if (new_value > 0) {
2640 error = suspend_cluster_powerdown_from_user();
2641 } else {
2642 error = resume_cluster_powerdown_from_user();
2643 }
2644 if (error) {
2645 error = EALREADY;
2646 }
2647 }
2648
2649 return error;
2650}
2651
2652SYSCTL_PROC(_kern, OID_AUTO, suspend_cluster_powerdown,
2653 CTLTYPE_INT | CTLFLAG_RW | CTLFLAG_LOCKED,
2654 0, 0, sysctl_kern_suspend_cluster_powerdown, "I", "");
2655
2656
2657STATIC int
2658sysctl_securelvl
2659(__unused struct sysctl_oid *oidp, __unused void *arg1, __unused int arg2, struct sysctl_req *req)
2660{
2661 int new_value, changed;
2662 int error = sysctl_io_number(req, bigValue: securelevel, valueSize: sizeof(int), pValue: &new_value, changed: &changed);
2663 if (changed) {
2664 if (!(new_value < securelevel && proc_getpid(req->p) != 1)) {
2665 proc_list_lock();
2666 securelevel = new_value;
2667 proc_list_unlock();
2668 } else {
2669 error = EPERM;
2670 }
2671 }
2672 return error;
2673}
2674
2675SYSCTL_PROC(_kern, KERN_SECURELVL, securelevel,
2676 CTLTYPE_INT | CTLFLAG_RW | CTLFLAG_LOCKED,
2677 0, 0, sysctl_securelvl, "I", "");
2678
2679
2680STATIC int
2681sysctl_domainname
2682(__unused struct sysctl_oid *oidp, __unused void *arg1, __unused int arg2, struct sysctl_req *req)
2683{
2684 int error, changed;
2685 char tmpname[MAXHOSTNAMELEN] = {};
2686
2687 lck_mtx_lock(lck: &domainname_lock);
2688 strlcpy(dst: tmpname, src: domainname, n: sizeof(tmpname));
2689 lck_mtx_unlock(lck: &domainname_lock);
2690
2691 error = sysctl_io_string(req, pValue: tmpname, valueSize: sizeof(tmpname), trunc: 0, changed: &changed);
2692 if (!error && changed) {
2693 lck_mtx_lock(lck: &domainname_lock);
2694 strlcpy(dst: domainname, src: tmpname, n: sizeof(domainname));
2695 lck_mtx_unlock(lck: &domainname_lock);
2696 }
2697 return error;
2698}
2699
2700SYSCTL_PROC(_kern, KERN_DOMAINNAME, nisdomainname,
2701 CTLTYPE_STRING | CTLFLAG_RW | CTLFLAG_LOCKED,
2702 0, 0, sysctl_domainname, "A", "");
2703
2704SYSCTL_COMPAT_INT(_kern, KERN_HOSTID, hostid,
2705 CTLFLAG_RW | CTLFLAG_KERN | CTLFLAG_LOCKED,
2706 &hostid, 0, "");
2707
2708STATIC int
2709sysctl_hostname
2710(__unused struct sysctl_oid *oidp, __unused void *arg1, __unused int arg2, struct sysctl_req *req)
2711{
2712 int error, changed;
2713 char tmpname[MAXHOSTNAMELEN] = {};
2714 const char * name;
2715
2716#if XNU_TARGET_OS_OSX
2717 name = hostname;
2718#else /* XNU_TARGET_OS_OSX */
2719#define ENTITLEMENT_USER_ASSIGNED_DEVICE_NAME \
2720 "com.apple.developer.device-information.user-assigned-device-name"
2721 if (csproc_get_platform_binary(current_proc()) ||
2722 IOCurrentTaskHasEntitlement(ENTITLEMENT_USER_ASSIGNED_DEVICE_NAME)) {
2723 name = hostname;
2724 } else {
2725 /* Deny writes if we don't pass entitlement check */
2726 if (req->newptr) {
2727 return EPERM;
2728 }
2729
2730 name = "localhost";
2731 }
2732#endif /* ! XNU_TARGET_OS_OSX */
2733
2734 lck_mtx_lock(lck: &hostname_lock);
2735 strlcpy(dst: tmpname, src: name, n: sizeof(tmpname));
2736 lck_mtx_unlock(lck: &hostname_lock);
2737
2738 error = sysctl_io_string(req, pValue: tmpname, valueSize: sizeof(tmpname), trunc: 1, changed: &changed);
2739 if (!error && changed) {
2740 lck_mtx_lock(lck: &hostname_lock);
2741 strlcpy(dst: hostname, src: tmpname, n: sizeof(hostname));
2742 lck_mtx_unlock(lck: &hostname_lock);
2743 }
2744 return error;
2745}
2746
2747SYSCTL_PROC(_kern, KERN_HOSTNAME, hostname,
2748 CTLTYPE_STRING | CTLFLAG_RW | CTLFLAG_LOCKED,
2749 0, 0, sysctl_hostname, "A", "");
2750
2751STATIC int
2752sysctl_procname
2753(__unused struct sysctl_oid *oidp, __unused void *arg1, __unused int arg2, struct sysctl_req *req)
2754{
2755 /* Original code allowed writing, I'm copying this, although this all makes
2756 * no sense to me. Besides, this sysctl is never used. */
2757 return sysctl_io_string(req, pValue: &req->p->p_name[0], valueSize: (2 * MAXCOMLEN + 1), trunc: 1, NULL);
2758}
2759
2760SYSCTL_PROC(_kern, KERN_PROCNAME, procname,
2761 CTLTYPE_STRING | CTLFLAG_RW | CTLFLAG_ANYBODY | CTLFLAG_LOCKED,
2762 0, 0, sysctl_procname, "A", "");
2763
2764SYSCTL_INT(_kern, KERN_SPECULATIVE_READS, speculative_reads_disabled,
2765 CTLFLAG_RW | CTLFLAG_KERN | CTLFLAG_LOCKED,
2766 &speculative_reads_disabled, 0, "");
2767
2768SYSCTL_UINT(_kern, OID_AUTO, preheat_max_bytes,
2769 CTLFLAG_RW | CTLFLAG_KERN | CTLFLAG_LOCKED,
2770 &preheat_max_bytes, 0, "");
2771
2772SYSCTL_UINT(_kern, OID_AUTO, preheat_min_bytes,
2773 CTLFLAG_RW | CTLFLAG_KERN | CTLFLAG_LOCKED,
2774 &preheat_min_bytes, 0, "");
2775
2776SYSCTL_UINT(_kern, OID_AUTO, speculative_prefetch_max,
2777 CTLFLAG_RW | CTLFLAG_KERN | CTLFLAG_LOCKED,
2778 &speculative_prefetch_max, 0, "");
2779
2780SYSCTL_UINT(_kern, OID_AUTO, speculative_prefetch_max_iosize,
2781 CTLFLAG_RW | CTLFLAG_KERN | CTLFLAG_LOCKED,
2782 &speculative_prefetch_max_iosize, 0, "");
2783
2784SYSCTL_UINT(_kern, OID_AUTO, vm_page_free_target,
2785 CTLFLAG_RW | CTLFLAG_KERN | CTLFLAG_LOCKED,
2786 &vm_page_free_target, 0, "");
2787
2788SYSCTL_UINT(_kern, OID_AUTO, vm_page_free_min,
2789 CTLFLAG_RW | CTLFLAG_KERN | CTLFLAG_LOCKED,
2790 &vm_page_free_min, 0, "");
2791
2792SYSCTL_UINT(_kern, OID_AUTO, vm_page_free_reserved,
2793 CTLFLAG_RW | CTLFLAG_KERN | CTLFLAG_LOCKED,
2794 &vm_page_free_reserved, 0, "");
2795
2796SYSCTL_UINT(_kern, OID_AUTO, vm_page_speculative_percentage,
2797 CTLFLAG_RW | CTLFLAG_KERN | CTLFLAG_LOCKED,
2798 &vm_pageout_state.vm_page_speculative_percentage, 0, "");
2799
2800SYSCTL_UINT(_kern, OID_AUTO, vm_page_speculative_q_age_ms,
2801 CTLFLAG_RW | CTLFLAG_KERN | CTLFLAG_LOCKED,
2802 &vm_pageout_state.vm_page_speculative_q_age_ms, 0, "");
2803
2804SYSCTL_UINT(_kern, OID_AUTO, vm_max_delayed_work_limit,
2805 CTLFLAG_RW | CTLFLAG_KERN | CTLFLAG_LOCKED,
2806 &vm_max_delayed_work_limit, 0, "");
2807
2808SYSCTL_UINT(_kern, OID_AUTO, vm_max_batch,
2809 CTLFLAG_RW | CTLFLAG_KERN | CTLFLAG_LOCKED,
2810 &vm_max_batch, 0, "");
2811
2812SYSCTL_STRING(_kern, OID_AUTO, bootsessionuuid,
2813 CTLFLAG_RD | CTLFLAG_LOCKED,
2814 &bootsessionuuid_string, sizeof(bootsessionuuid_string), "");
2815
2816
2817STATIC int
2818sysctl_boottime
2819(__unused struct sysctl_oid *oidp, __unused void *arg1, __unused int arg2, struct sysctl_req *req)
2820{
2821 struct timeval tv;
2822 boottime_timeval(tv: &tv);
2823 struct proc *p = req->p;
2824
2825 if (proc_is64bit(p)) {
2826 struct user64_timeval t = {};
2827 t.tv_sec = tv.tv_sec;
2828 t.tv_usec = tv.tv_usec;
2829 return sysctl_io_opaque(req, pValue: &t, valueSize: sizeof(t), NULL);
2830 } else {
2831 struct user32_timeval t = {};
2832 t.tv_sec = (user32_time_t)tv.tv_sec;
2833 t.tv_usec = tv.tv_usec;
2834 return sysctl_io_opaque(req, pValue: &t, valueSize: sizeof(t), NULL);
2835 }
2836}
2837
2838SYSCTL_PROC(_kern, KERN_BOOTTIME, boottime,
2839 CTLTYPE_STRUCT | CTLFLAG_KERN | CTLFLAG_RD | CTLFLAG_LOCKED,
2840 0, 0, sysctl_boottime, "S,timeval", "");
2841
2842extern bool IOGetBootUUID(char *);
2843
2844/* non-static: written by imageboot.c */
2845uuid_string_t fake_bootuuid;
2846
2847STATIC int
2848sysctl_bootuuid
2849(__unused struct sysctl_oid *oidp, __unused void *arg1, __unused int arg2, struct sysctl_req *req)
2850{
2851 int error = ENOENT;
2852
2853 /* check the first byte to see if the string has been
2854 * populated. this is a uuid_STRING_t, this check would
2855 * not work with a uuid_t.
2856 */
2857 if (fake_bootuuid[0] != '\0') {
2858 error = sysctl_io_string(req, pValue: fake_bootuuid, valueSize: 0, trunc: 0, NULL);
2859 goto out;
2860 }
2861
2862 uuid_string_t uuid_string;
2863 if (IOGetBootUUID(uuid_string)) {
2864 uuid_t boot_uuid;
2865 error = uuid_parse(in: uuid_string, uu: boot_uuid);
2866 if (!error) {
2867 error = sysctl_io_string(req, __DECONST(char *, uuid_string), valueSize: 0, trunc: 0, NULL);
2868 }
2869 }
2870
2871out:
2872 return error;
2873}
2874
2875SYSCTL_PROC(_kern, OID_AUTO, bootuuid,
2876 CTLTYPE_STRING | CTLFLAG_KERN | CTLFLAG_RD | CTLFLAG_LOCKED,
2877 0, 0, sysctl_bootuuid, "A", "");
2878
2879
2880extern bool IOGetApfsPrebootUUID(char *);
2881extern bool IOGetAssociatedApfsVolgroupUUID(char *);
2882
2883STATIC int
2884sysctl_apfsprebootuuid
2885(__unused struct sysctl_oid *oidp, __unused void *arg1, __unused int arg2, struct sysctl_req *req)
2886{
2887 int error = ENOENT;
2888
2889 uuid_string_t uuid_string;
2890 if (IOGetApfsPrebootUUID(uuid_string)) {
2891 uuid_t apfs_preboot_uuid;
2892 error = uuid_parse(in: uuid_string, uu: apfs_preboot_uuid);
2893 if (!error) {
2894 error = sysctl_io_string(req, __DECONST(char *, uuid_string), valueSize: 0, trunc: 0, NULL);
2895 }
2896 }
2897
2898 return error;
2899}
2900
2901SYSCTL_PROC(_kern, OID_AUTO, apfsprebootuuid,
2902 CTLTYPE_STRING | CTLFLAG_KERN | CTLFLAG_RD | CTLFLAG_LOCKED,
2903 0, 0, sysctl_apfsprebootuuid, "A", "");
2904
2905STATIC int
2906sysctl_targetsystemvolgroupuuid
2907(__unused struct sysctl_oid *oidp, __unused void *arg1, __unused int arg2, struct sysctl_req *req)
2908{
2909 int error = ENOENT;
2910
2911 uuid_string_t uuid_string;
2912 if (IOGetApfsPrebootUUID(uuid_string)) {
2913 uuid_t apfs_preboot_uuid;
2914 error = uuid_parse(in: uuid_string, uu: apfs_preboot_uuid);
2915 if (!error) {
2916 error = sysctl_io_string(req, __DECONST(char *, uuid_string), valueSize: 0, trunc: 0, NULL);
2917 }
2918 } else {
2919 /*
2920 * In special boot modes, such as kcgen-mode, the
2921 * apfs-preboot-uuid property will not be set. Instead, a
2922 * different property, associated-volume-group, will be set
2923 * which indicates the UUID of the VolumeGroup containing the
2924 * system volume into which you will boot.
2925 */
2926 if (IOGetAssociatedApfsVolgroupUUID(uuid_string)) {
2927 uuid_t apfs_preboot_uuid;
2928 error = uuid_parse(in: uuid_string, uu: apfs_preboot_uuid);
2929 if (!error) {
2930 error = sysctl_io_string(req, __DECONST(char *, uuid_string), valueSize: 0, trunc: 0, NULL);
2931 }
2932 }
2933 }
2934
2935 return error;
2936}
2937
2938SYSCTL_PROC(_kern, OID_AUTO, targetsystemvolgroupuuid,
2939 CTLTYPE_STRING | CTLFLAG_KERN | CTLFLAG_RD | CTLFLAG_MASKED | CTLFLAG_LOCKED,
2940 0, 0, sysctl_targetsystemvolgroupuuid, "A", "");
2941
2942
2943extern bool IOGetBootManifestHash(char *, size_t *);
2944extern bool IOGetBootObjectsPath(char *);
2945
2946STATIC int
2947sysctl_bootobjectspath
2948(__unused struct sysctl_oid *oidp, __unused void *arg1, __unused int arg2, struct sysctl_req *req)
2949{
2950 int error = ENOENT;
2951
2952#if defined(__x86_64__)
2953 /* auth-root-dmg is used for the Intel BaseSystem in some flows,
2954 * e.g. createinstallmedia and as part of upgrading from 10.15 or earlier
2955 * under these scenarios, set_fake_bootuuid will be called when pivoting to
2956 * the new root filesystem. need honor the fake bootuuid.
2957 */
2958 if (fake_bootuuid[0] != '\0') {
2959 error = sysctl_io_string(req, fake_bootuuid, 0, 0, NULL);
2960 } else {
2961 /* for intel mac, boot objects reside in [preboot volume]/[bootuuid]
2962 * bootuuid and apfsprebootuuid are populated by efiboot and they are alias.
2963 */
2964 uuid_string_t uuid_string;
2965 if (IOGetBootUUID(uuid_string)) {
2966 uuid_t boot_uuid;
2967 error = uuid_parse(uuid_string, boot_uuid);
2968 if (!error) {
2969 error = sysctl_io_string(req, (char *)uuid_string, 0, 0, NULL);
2970 }
2971 }
2972 }
2973#else
2974 char boot_obj_path[MAXPATHLEN] = { "\0" };
2975 static const char kAsciiHexChars[] = "0123456789ABCDEF";
2976 unsigned int i, j;
2977
2978 /* Hashed with SHA2-384 or SHA1, boot manifest hash is 48 bytes or 20 bytes
2979 * hence, need a 97 bytes char array for the string.
2980 */
2981 size_t hash_data_size = CCSHA384_OUTPUT_SIZE;
2982 char hash_data[CCSHA384_OUTPUT_SIZE] = { "\0" };
2983 char boot_manifest_hash[CCSHA384_OUTPUT_SIZE * 2 + 1] = { "\0" };;
2984
2985 /* for Apple Silicon Macs, there is a boot-objects-path under IODeviceTree:/chosen
2986 * and boot objects reside in [preboot volume]/[boot-objects-path]
2987 * for embedded platforms, there would be a boot-manifest-hash under IODeviceTree:/chosen
2988 * and boot objects reside in [preboot volume]/[boot-manifest-hash]
2989 */
2990 if (IOGetBootObjectsPath(boot_obj_path)) {
2991 error = sysctl_io_string(req, pValue: (char *)boot_obj_path, valueSize: 0, trunc: 0, NULL);
2992 } else if (IOGetBootManifestHash(hash_data, &hash_data_size)) {
2993 j = 0;
2994 for (i = 0; i < hash_data_size; ++i) {
2995 char octet = hash_data[i];
2996 boot_manifest_hash[j++] = kAsciiHexChars[((octet & 0xF0) >> 4)];
2997 boot_manifest_hash[j++] = kAsciiHexChars[(octet & 0x0F)];
2998 }
2999 /* make sure string has null termination */
3000 boot_manifest_hash[j] = '\0';
3001 error = sysctl_io_string(req, pValue: (char *)boot_manifest_hash, valueSize: 0, trunc: 0, NULL);
3002 }
3003#endif
3004 return error;
3005}
3006
3007SYSCTL_PROC(_kern, OID_AUTO, bootobjectspath,
3008 CTLTYPE_STRING | CTLFLAG_KERN | CTLFLAG_RD | CTLFLAG_LOCKED,
3009 0, 0, sysctl_bootobjectspath, "A", "");
3010
3011
3012STATIC int
3013sysctl_symfile
3014(__unused struct sysctl_oid *oidp, __unused void *arg1, __unused int arg2, struct sysctl_req *req)
3015{
3016 char *str;
3017 int error = get_kernel_symfile(req->p, &str);
3018 if (error) {
3019 return error;
3020 }
3021 return sysctl_io_string(req, pValue: str, valueSize: 0, trunc: 0, NULL);
3022}
3023
3024
3025SYSCTL_PROC(_kern, KERN_SYMFILE, symfile,
3026 CTLTYPE_STRING | CTLFLAG_RD | CTLFLAG_LOCKED,
3027 0, 0, sysctl_symfile, "A", "");
3028
3029#if CONFIG_NETBOOT
3030STATIC int
3031sysctl_netboot
3032(__unused struct sysctl_oid *oidp, __unused void *arg1, __unused int arg2, struct sysctl_req *req)
3033{
3034 return sysctl_io_number(req, bigValue: netboot_root(), valueSize: sizeof(int), NULL, NULL);
3035}
3036
3037SYSCTL_PROC(_kern, KERN_NETBOOT, netboot,
3038 CTLTYPE_INT | CTLFLAG_RD | CTLFLAG_LOCKED,
3039 0, 0, sysctl_netboot, "I", "");
3040#endif
3041
3042#ifdef CONFIG_IMGSRC_ACCESS
3043/*
3044 * Legacy--act as if only one layer of nesting is possible.
3045 */
3046STATIC int
3047sysctl_imgsrcdev
3048(__unused struct sysctl_oid *oidp, __unused void *arg1, __unused int arg2, struct sysctl_req *req)
3049{
3050 vfs_context_t ctx = vfs_context_current();
3051 vnode_t devvp;
3052 int result;
3053
3054 if (!vfs_context_issuser(ctx)) {
3055 return EPERM;
3056 }
3057
3058 if (imgsrc_rootvnodes[0] == NULL) {
3059 return ENOENT;
3060 }
3061
3062 result = vnode_getwithref(vp: imgsrc_rootvnodes[0]);
3063 if (result != 0) {
3064 return result;
3065 }
3066
3067 devvp = vnode_mount(vp: imgsrc_rootvnodes[0])->mnt_devvp;
3068 result = vnode_getwithref(vp: devvp);
3069 if (result != 0) {
3070 goto out;
3071 }
3072
3073 result = sysctl_io_number(req, bigValue: vnode_specrdev(vp: devvp), valueSize: sizeof(dev_t), NULL, NULL);
3074
3075 vnode_put(vp: devvp);
3076out:
3077 vnode_put(vp: imgsrc_rootvnodes[0]);
3078 return result;
3079}
3080
3081SYSCTL_PROC(_kern, OID_AUTO, imgsrcdev,
3082 CTLTYPE_INT | CTLFLAG_RD | CTLFLAG_LOCKED,
3083 0, 0, sysctl_imgsrcdev, "I", "");
3084
3085STATIC int
3086sysctl_imgsrcinfo
3087(__unused struct sysctl_oid *oidp, __unused void *arg1, __unused int arg2, struct sysctl_req *req)
3088{
3089 int error;
3090 struct imgsrc_info info[MAX_IMAGEBOOT_NESTING] = {}; /* 2 for now, no problem */
3091 uint32_t i;
3092 vnode_t rvp, devvp;
3093
3094 if (imgsrc_rootvnodes[0] == NULLVP) {
3095 return ENXIO;
3096 }
3097
3098 for (i = 0; i < MAX_IMAGEBOOT_NESTING; i++) {
3099 /*
3100 * Go get the root vnode.
3101 */
3102 rvp = imgsrc_rootvnodes[i];
3103 if (rvp == NULLVP) {
3104 break;
3105 }
3106
3107 error = vnode_get(rvp);
3108 if (error != 0) {
3109 return error;
3110 }
3111
3112 /*
3113 * For now, no getting at a non-local volume.
3114 */
3115 devvp = vnode_mount(vp: rvp)->mnt_devvp;
3116 if (devvp == NULL) {
3117 vnode_put(vp: rvp);
3118 return EINVAL;
3119 }
3120
3121 error = vnode_getwithref(vp: devvp);
3122 if (error != 0) {
3123 vnode_put(vp: rvp);
3124 return error;
3125 }
3126
3127 /*
3128 * Fill in info.
3129 */
3130 info[i].ii_dev = vnode_specrdev(vp: devvp);
3131 info[i].ii_flags = 0;
3132 info[i].ii_height = i;
3133 bzero(s: info[i].ii_reserved, n: sizeof(info[i].ii_reserved));
3134
3135 vnode_put(vp: devvp);
3136 vnode_put(vp: rvp);
3137 }
3138
3139 return sysctl_io_opaque(req, pValue: info, valueSize: i * sizeof(info[0]), NULL);
3140}
3141
3142SYSCTL_PROC(_kern, OID_AUTO, imgsrcinfo,
3143 CTLTYPE_OPAQUE | CTLFLAG_RD | CTLFLAG_LOCKED,
3144 0, 0, sysctl_imgsrcinfo, "I", "");
3145
3146#endif /* CONFIG_IMGSRC_ACCESS */
3147
3148
3149SYSCTL_DECL(_kern_timer);
3150SYSCTL_NODE(_kern, OID_AUTO, timer, CTLFLAG_RW | CTLFLAG_LOCKED, 0, "timer");
3151
3152
3153SYSCTL_INT(_kern_timer, OID_AUTO, coalescing_enabled,
3154 CTLFLAG_KERN | CTLFLAG_RW | CTLFLAG_LOCKED,
3155 &mach_timer_coalescing_enabled, 0, "");
3156
3157SYSCTL_QUAD(_kern_timer, OID_AUTO, deadline_tracking_bin_1,
3158 CTLFLAG_RW | CTLFLAG_LOCKED,
3159 &timer_deadline_tracking_bin_1, "");
3160SYSCTL_QUAD(_kern_timer, OID_AUTO, deadline_tracking_bin_2,
3161 CTLFLAG_RW | CTLFLAG_LOCKED,
3162 &timer_deadline_tracking_bin_2, "");
3163
3164SYSCTL_DECL(_kern_timer_longterm);
3165SYSCTL_NODE(_kern_timer, OID_AUTO, longterm, CTLFLAG_RW | CTLFLAG_LOCKED, 0, "longterm");
3166
3167
3168/* Must match definition in osfmk/kern/timer_call.c */
3169enum {
3170 THRESHOLD, QCOUNT,
3171 ENQUEUES, DEQUEUES, ESCALATES, SCANS, PREEMPTS,
3172 LATENCY, LATENCY_MIN, LATENCY_MAX, LONG_TERM_SCAN_LIMIT,
3173 LONG_TERM_SCAN_INTERVAL, LONG_TERM_SCAN_PAUSES,
3174 SCAN_LIMIT, SCAN_INTERVAL, SCAN_PAUSES, SCAN_POSTPONES,
3175};
3176extern uint64_t timer_sysctl_get(int);
3177extern int timer_sysctl_set(int, uint64_t);
3178
3179STATIC int
3180sysctl_timer
3181(__unused struct sysctl_oid *oidp, __unused void *arg1, __unused int arg2, struct sysctl_req *req)
3182{
3183 int oid = (int)arg1;
3184 uint64_t value = timer_sysctl_get(oid);
3185 uint64_t new_value;
3186 int error;
3187 int changed;
3188
3189 error = sysctl_io_number(req, bigValue: value, valueSize: sizeof(value), pValue: &new_value, changed: &changed);
3190 if (changed) {
3191 error = timer_sysctl_set(oid, new_value);
3192 }
3193
3194 return error;
3195}
3196
3197SYSCTL_PROC(_kern_timer_longterm, OID_AUTO, threshold,
3198 CTLTYPE_QUAD | CTLFLAG_RW | CTLFLAG_LOCKED,
3199 (void *) THRESHOLD, 0, sysctl_timer, "Q", "");
3200SYSCTL_PROC(_kern_timer_longterm, OID_AUTO, scan_limit,
3201 CTLTYPE_QUAD | CTLFLAG_RW | CTLFLAG_LOCKED,
3202 (void *) LONG_TERM_SCAN_LIMIT, 0, sysctl_timer, "Q", "");
3203SYSCTL_PROC(_kern_timer_longterm, OID_AUTO, scan_interval,
3204 CTLTYPE_QUAD | CTLFLAG_RW | CTLFLAG_LOCKED,
3205 (void *) LONG_TERM_SCAN_INTERVAL, 0, sysctl_timer, "Q", "");
3206
3207SYSCTL_PROC(_kern_timer_longterm, OID_AUTO, qlen,
3208 CTLTYPE_QUAD | CTLFLAG_RD | CTLFLAG_LOCKED,
3209 (void *) QCOUNT, 0, sysctl_timer, "Q", "");
3210SYSCTL_PROC(_kern_timer_longterm, OID_AUTO, scan_pauses,
3211 CTLTYPE_QUAD | CTLFLAG_RD | CTLFLAG_LOCKED,
3212 (void *) LONG_TERM_SCAN_PAUSES, 0, sysctl_timer, "Q", "");
3213
3214#if DEBUG
3215SYSCTL_PROC(_kern_timer_longterm, OID_AUTO, enqueues,
3216 CTLTYPE_QUAD | CTLFLAG_RD | CTLFLAG_LOCKED,
3217 (void *) ENQUEUES, 0, sysctl_timer, "Q", "");
3218SYSCTL_PROC(_kern_timer_longterm, OID_AUTO, dequeues,
3219 CTLTYPE_QUAD | CTLFLAG_RD | CTLFLAG_LOCKED,
3220 (void *) DEQUEUES, 0, sysctl_timer, "Q", "");
3221SYSCTL_PROC(_kern_timer_longterm, OID_AUTO, escalates,
3222 CTLTYPE_QUAD | CTLFLAG_RD | CTLFLAG_LOCKED,
3223 (void *) ESCALATES, 0, sysctl_timer, "Q", "");
3224SYSCTL_PROC(_kern_timer_longterm, OID_AUTO, scans,
3225 CTLTYPE_QUAD | CTLFLAG_RD | CTLFLAG_LOCKED,
3226 (void *) SCANS, 0, sysctl_timer, "Q", "");
3227SYSCTL_PROC(_kern_timer_longterm, OID_AUTO, preempts,
3228 CTLTYPE_QUAD | CTLFLAG_RD | CTLFLAG_LOCKED,
3229 (void *) PREEMPTS, 0, sysctl_timer, "Q", "");
3230SYSCTL_PROC(_kern_timer_longterm, OID_AUTO, latency,
3231 CTLTYPE_QUAD | CTLFLAG_RD | CTLFLAG_LOCKED,
3232 (void *) LATENCY, 0, sysctl_timer, "Q", "");
3233SYSCTL_PROC(_kern_timer_longterm, OID_AUTO, latency_min,
3234 CTLTYPE_QUAD | CTLFLAG_RD | CTLFLAG_LOCKED,
3235 (void *) LATENCY_MIN, 0, sysctl_timer, "Q", "");
3236SYSCTL_PROC(_kern_timer_longterm, OID_AUTO, latency_max,
3237 CTLTYPE_QUAD | CTLFLAG_RD | CTLFLAG_LOCKED,
3238 (void *) LATENCY_MAX, 0, sysctl_timer, "Q", "");
3239#endif /* DEBUG */
3240
3241SYSCTL_PROC(_kern_timer, OID_AUTO, scan_limit,
3242 CTLTYPE_QUAD | CTLFLAG_RW | CTLFLAG_LOCKED,
3243 (void *) SCAN_LIMIT, 0, sysctl_timer, "Q", "");
3244SYSCTL_PROC(_kern_timer, OID_AUTO, scan_interval,
3245 CTLTYPE_QUAD | CTLFLAG_RW | CTLFLAG_LOCKED,
3246 (void *) SCAN_INTERVAL, 0, sysctl_timer, "Q", "");
3247SYSCTL_PROC(_kern_timer, OID_AUTO, scan_pauses,
3248 CTLTYPE_QUAD | CTLFLAG_RD | CTLFLAG_LOCKED,
3249 (void *) SCAN_PAUSES, 0, sysctl_timer, "Q", "");
3250SYSCTL_PROC(_kern_timer, OID_AUTO, scan_postpones,
3251 CTLTYPE_QUAD | CTLFLAG_RD | CTLFLAG_LOCKED,
3252 (void *) SCAN_POSTPONES, 0, sysctl_timer, "Q", "");
3253
3254STATIC int
3255sysctl_usrstack
3256(__unused struct sysctl_oid *oidp, __unused void *arg1, __unused int arg2, struct sysctl_req *req)
3257{
3258 return sysctl_io_number(req, bigValue: (int)req->p->user_stack, valueSize: sizeof(int), NULL, NULL);
3259}
3260
3261SYSCTL_PROC(_kern, KERN_USRSTACK32, usrstack,
3262 CTLTYPE_INT | CTLFLAG_RD | CTLFLAG_LOCKED,
3263 0, 0, sysctl_usrstack, "I", "");
3264
3265STATIC int
3266sysctl_usrstack64
3267(__unused struct sysctl_oid *oidp, __unused void *arg1, __unused int arg2, struct sysctl_req *req)
3268{
3269 return sysctl_io_number(req, bigValue: req->p->user_stack, valueSize: sizeof(req->p->user_stack), NULL, NULL);
3270}
3271
3272SYSCTL_PROC(_kern, KERN_USRSTACK64, usrstack64,
3273 CTLTYPE_QUAD | CTLFLAG_RD | CTLFLAG_LOCKED,
3274 0, 0, sysctl_usrstack64, "Q", "");
3275
3276
3277#if EXCLAVES_COREDUMP
3278
3279/* secure kernel coredump support. */
3280extern unsigned int sc_dump_mode;
3281SYSCTL_UINT(_kern, OID_AUTO, secure_coredump, CTLFLAG_RD, &sc_dump_mode, 0, "secure_coredump");
3282
3283#endif /* EXCLAVES_COREDUMP */
3284
3285
3286#if CONFIG_COREDUMP
3287
3288SYSCTL_STRING(_kern, KERN_COREFILE, corefile,
3289 CTLFLAG_RW | CTLFLAG_KERN | CTLFLAG_LOCKED,
3290 corefilename, sizeof(corefilename), "");
3291
3292SYSCTL_STRING(_kern, OID_AUTO, drivercorefile,
3293 CTLFLAG_RW | CTLFLAG_KERN | CTLFLAG_LOCKED,
3294 drivercorefilename, sizeof(drivercorefilename), "");
3295
3296STATIC int
3297sysctl_coredump
3298(__unused struct sysctl_oid *oidp, __unused void *arg1, __unused int arg2, struct sysctl_req *req)
3299{
3300#ifdef SECURE_KERNEL
3301 (void)req;
3302 return ENOTSUP;
3303#else
3304 int new_value, changed;
3305 int error = sysctl_io_number(req, bigValue: do_coredump, valueSize: sizeof(int), pValue: &new_value, changed: &changed);
3306 if (changed) {
3307 if ((new_value == 0) || (new_value == 1)) {
3308 do_coredump = new_value;
3309 } else {
3310 error = EINVAL;
3311 }
3312 }
3313 return error;
3314#endif
3315}
3316
3317SYSCTL_PROC(_kern, KERN_COREDUMP, coredump,
3318 CTLTYPE_INT | CTLFLAG_RW | CTLFLAG_LOCKED,
3319 0, 0, sysctl_coredump, "I", "");
3320
3321STATIC int
3322sysctl_suid_coredump
3323(__unused struct sysctl_oid *oidp, __unused void *arg1, __unused int arg2, struct sysctl_req *req)
3324{
3325#ifdef SECURE_KERNEL
3326 (void)req;
3327 return ENOTSUP;
3328#else
3329 int new_value, changed;
3330 int error = sysctl_io_number(req, bigValue: sugid_coredump, valueSize: sizeof(int), pValue: &new_value, changed: &changed);
3331 if (changed) {
3332 if ((new_value == 0) || (new_value == 1)) {
3333 sugid_coredump = new_value;
3334 } else {
3335 error = EINVAL;
3336 }
3337 }
3338 return error;
3339#endif
3340}
3341
3342SYSCTL_PROC(_kern, KERN_SUGID_COREDUMP, sugid_coredump,
3343 CTLTYPE_INT | CTLFLAG_RW | CTLFLAG_LOCKED,
3344 0, 0, sysctl_suid_coredump, "I", "");
3345
3346#endif /* CONFIG_COREDUMP */
3347
3348STATIC int
3349sysctl_delayterm
3350(__unused struct sysctl_oid *oidp, __unused void *arg1, __unused int arg2, struct sysctl_req *req)
3351{
3352 struct proc *p = req->p;
3353 int new_value, changed;
3354 int error = sysctl_io_number(req, bigValue: (req->p->p_lflag & P_LDELAYTERM)? 1: 0, valueSize: sizeof(int), pValue: &new_value, changed: &changed);
3355 if (changed) {
3356 proc_lock(p);
3357 if (new_value) {
3358 req->p->p_lflag |= P_LDELAYTERM;
3359 } else {
3360 req->p->p_lflag &= ~P_LDELAYTERM;
3361 }
3362 proc_unlock(p);
3363 }
3364 return error;
3365}
3366
3367SYSCTL_PROC(_kern, KERN_PROCDELAYTERM, delayterm,
3368 CTLTYPE_INT | CTLFLAG_RW | CTLFLAG_LOCKED,
3369 0, 0, sysctl_delayterm, "I", "");
3370
3371
3372STATIC int
3373sysctl_rage_vnode
3374(__unused struct sysctl_oid *oidp, __unused void *arg1, __unused int arg2, struct sysctl_req *req)
3375{
3376 struct proc *p = req->p;
3377 struct uthread *ut;
3378 int new_value, old_value, changed;
3379 int error;
3380
3381 ut = current_uthread();
3382
3383 if (ut->uu_flag & UT_RAGE_VNODES) {
3384 old_value = KERN_RAGE_THREAD;
3385 } else if (p->p_lflag & P_LRAGE_VNODES) {
3386 old_value = KERN_RAGE_PROC;
3387 } else {
3388 old_value = 0;
3389 }
3390
3391 error = sysctl_io_number(req, bigValue: old_value, valueSize: sizeof(int), pValue: &new_value, changed: &changed);
3392
3393 if ((error == 0) && (changed != 0)) {
3394 switch (new_value) {
3395 case KERN_RAGE_PROC:
3396 proc_lock(p);
3397 p->p_lflag |= P_LRAGE_VNODES;
3398 proc_unlock(p);
3399 break;
3400 case KERN_UNRAGE_PROC:
3401 proc_lock(p);
3402 p->p_lflag &= ~P_LRAGE_VNODES;
3403 proc_unlock(p);
3404 break;
3405
3406 case KERN_RAGE_THREAD:
3407 ut->uu_flag |= UT_RAGE_VNODES;
3408 break;
3409 case KERN_UNRAGE_THREAD:
3410 ut = current_uthread();
3411 ut->uu_flag &= ~UT_RAGE_VNODES;
3412 break;
3413 }
3414 }
3415 return error;
3416}
3417
3418SYSCTL_PROC(_kern, KERN_RAGEVNODE, rage_vnode,
3419 CTLTYPE_INT | CTLFLAG_RW | CTLFLAG_ANYBODY | CTLFLAG_LOCKED,
3420 0, 0, sysctl_rage_vnode, "I", "");
3421
3422/* XXX until filecoordinationd fixes a bit of inverted logic. */
3423STATIC int
3424sysctl_vfsnspace
3425(__unused struct sysctl_oid *oidp, __unused void *arg1, __unused int arg2, struct sysctl_req *req)
3426{
3427 int old_value = 0, new_value, changed;
3428
3429 return sysctl_io_number(req, bigValue: old_value, valueSize: sizeof(int), pValue: &new_value,
3430 changed: &changed);
3431}
3432
3433SYSCTL_PROC(_kern, OID_AUTO, vfsnspace,
3434 CTLTYPE_INT | CTLFLAG_RW | CTLFLAG_ANYBODY | CTLFLAG_LOCKED,
3435 0, 0, sysctl_vfsnspace, "I", "");
3436
3437/* XXX move this interface into libproc and remove this sysctl */
3438STATIC int
3439sysctl_setthread_cpupercent
3440(__unused struct sysctl_oid *oidp, __unused void *arg1, __unused int arg2, struct sysctl_req *req)
3441{
3442 int new_value, old_value;
3443 int error = 0;
3444 kern_return_t kret = KERN_SUCCESS;
3445 uint8_t percent = 0;
3446 int ms_refill = 0;
3447
3448 if (!req->newptr) {
3449 return 0;
3450 }
3451
3452 old_value = 0;
3453
3454 if ((error = sysctl_io_number(req, bigValue: old_value, valueSize: sizeof(old_value), pValue: &new_value, NULL)) != 0) {
3455 return error;
3456 }
3457
3458 percent = new_value & 0xff; /* low 8 bytes for perent */
3459 ms_refill = (new_value >> 8) & 0xffffff; /* upper 24bytes represent ms refill value */
3460 if (percent > 100) {
3461 return EINVAL;
3462 }
3463
3464 /*
3465 * If the caller is specifying a percentage of 0, this will unset the CPU limit, if present.
3466 */
3467 kret = percent == 0 ?
3468 thread_set_cpulimit(THREAD_CPULIMIT_DISABLE, percentage: 0, interval_ns: 0) :
3469 thread_set_cpulimit(THREAD_CPULIMIT_BLOCK, percentage: percent, interval_ns: ms_refill * (int)NSEC_PER_MSEC);
3470
3471 if (kret != 0) {
3472 return EIO;
3473 }
3474
3475 return 0;
3476}
3477
3478SYSCTL_PROC(_kern, OID_AUTO, setthread_cpupercent,
3479 CTLTYPE_INT | CTLFLAG_WR | CTLFLAG_ANYBODY,
3480 0, 0, sysctl_setthread_cpupercent, "I", "set thread cpu percentage limit");
3481
3482
3483STATIC int
3484sysctl_kern_check_openevt
3485(__unused struct sysctl_oid *oidp, __unused void *arg1, __unused int arg2, struct sysctl_req *req)
3486{
3487 struct proc *p = req->p;
3488 int new_value, old_value, changed;
3489 int error;
3490
3491 if (p->p_flag & P_CHECKOPENEVT) {
3492 old_value = KERN_OPENEVT_PROC;
3493 } else {
3494 old_value = 0;
3495 }
3496
3497 error = sysctl_io_number(req, bigValue: old_value, valueSize: sizeof(int), pValue: &new_value, changed: &changed);
3498
3499 if ((error == 0) && (changed != 0)) {
3500 switch (new_value) {
3501 case KERN_OPENEVT_PROC:
3502 OSBitOrAtomic(P_CHECKOPENEVT, &p->p_flag);
3503 break;
3504
3505 case KERN_UNOPENEVT_PROC:
3506 OSBitAndAtomic(~((uint32_t)P_CHECKOPENEVT), &p->p_flag);
3507 break;
3508
3509 default:
3510 error = EINVAL;
3511 }
3512 }
3513 return error;
3514}
3515
3516SYSCTL_PROC(_kern, KERN_CHECKOPENEVT, check_openevt, CTLTYPE_INT | CTLFLAG_RW | CTLFLAG_ANYBODY | CTLFLAG_LOCKED,
3517 0, 0, sysctl_kern_check_openevt, "I", "set the per-process check-open-evt flag");
3518
3519
3520#if DEVELOPMENT || DEBUG
3521STATIC int
3522sysctl_nx
3523(__unused struct sysctl_oid *oidp, __unused void *arg1, __unused int arg2, struct sysctl_req *req)
3524{
3525#ifdef SECURE_KERNEL
3526 (void)req;
3527 return ENOTSUP;
3528#else
3529 int new_value, changed;
3530 int error;
3531
3532 error = sysctl_io_number(req, nx_enabled, sizeof(nx_enabled), &new_value, &changed);
3533 if (error) {
3534 return error;
3535 }
3536
3537 if (changed) {
3538#if defined(__x86_64__)
3539 /*
3540 * Only allow setting if NX is supported on the chip
3541 */
3542 if (!(cpuid_extfeatures() & CPUID_EXTFEATURE_XD)) {
3543 return ENOTSUP;
3544 }
3545#endif
3546 nx_enabled = new_value;
3547 }
3548 return error;
3549#endif /* SECURE_KERNEL */
3550}
3551#endif
3552
3553#if DEVELOPMENT || DEBUG
3554SYSCTL_PROC(_kern, KERN_NX_PROTECTION, nx,
3555 CTLTYPE_INT | CTLFLAG_RW | CTLFLAG_KERN | CTLFLAG_LOCKED,
3556 0, 0, sysctl_nx, "I", "");
3557#endif
3558
3559STATIC int
3560sysctl_loadavg
3561(__unused struct sysctl_oid *oidp, __unused void *arg1, __unused int arg2, struct sysctl_req *req)
3562{
3563 if (proc_is64bit(req->p)) {
3564 struct user64_loadavg loadinfo64 = {};
3565 fill_loadavg64(la: &averunnable, la64: &loadinfo64);
3566 return sysctl_io_opaque(req, pValue: &loadinfo64, valueSize: sizeof(loadinfo64), NULL);
3567 } else {
3568 struct user32_loadavg loadinfo32 = {};
3569 fill_loadavg32(la: &averunnable, la32: &loadinfo32);
3570 return sysctl_io_opaque(req, pValue: &loadinfo32, valueSize: sizeof(loadinfo32), NULL);
3571 }
3572}
3573
3574SYSCTL_PROC(_vm, VM_LOADAVG, loadavg,
3575 CTLTYPE_STRUCT | CTLFLAG_RD | CTLFLAG_LOCKED,
3576 0, 0, sysctl_loadavg, "S,loadavg", "");
3577
3578/*
3579 * Note: Thread safe; vm_map_lock protects in vm_toggle_entry_reuse()
3580 */
3581STATIC int
3582sysctl_vm_toggle_address_reuse(__unused struct sysctl_oid *oidp, __unused void *arg1,
3583 __unused int arg2, struct sysctl_req *req)
3584{
3585 int old_value = 0, new_value = 0, error = 0;
3586
3587 if (vm_toggle_entry_reuse( VM_TOGGLE_GETVALUE, &old_value )) {
3588 return error;
3589 }
3590 error = sysctl_io_number(req, bigValue: old_value, valueSize: sizeof(int), pValue: &new_value, NULL);
3591 if (!error) {
3592 return vm_toggle_entry_reuse(new_value, NULL);
3593 }
3594 return error;
3595}
3596
3597SYSCTL_PROC(_debug, OID_AUTO, toggle_address_reuse, CTLFLAG_ANYBODY | CTLTYPE_INT | CTLFLAG_RW | CTLFLAG_LOCKED, 0, 0, sysctl_vm_toggle_address_reuse, "I", "");
3598
3599#ifdef CONFIG_XNUPOST
3600
3601extern uint32_t xnupost_get_estimated_testdata_size(void);
3602extern int xnupost_reset_all_tests(void);
3603
3604STATIC int
3605sysctl_handle_xnupost_get_tests SYSCTL_HANDLER_ARGS
3606{
3607 /* fixup unused arguments warnings */
3608 __unused int _oa2 = arg2;
3609 __unused void * _oa1 = arg1;
3610 __unused struct sysctl_oid * _oidp = oidp;
3611
3612 int error = 0;
3613 user_addr_t oldp = 0;
3614 user_addr_t newp = 0;
3615 uint32_t usedbytes = 0;
3616
3617 oldp = req->oldptr;
3618 newp = req->newptr;
3619
3620 if (newp) {
3621 return ENOTSUP;
3622 }
3623
3624 if ((void *)oldp == NULL) {
3625 /* return estimated size for second call where info can be placed */
3626 req->oldidx = xnupost_get_estimated_testdata_size();
3627 } else {
3628 error = xnupost_export_testdata((void *)oldp, req->oldlen, &usedbytes);
3629 req->oldidx = usedbytes;
3630 }
3631
3632 return error;
3633}
3634
3635SYSCTL_PROC(_debug,
3636 OID_AUTO,
3637 xnupost_get_tests,
3638 CTLFLAG_MASKED | CTLFLAG_ANYBODY | CTLTYPE_OPAQUE | CTLFLAG_RD | CTLFLAG_LOCKED,
3639 0,
3640 0,
3641 sysctl_handle_xnupost_get_tests,
3642 "-",
3643 "read xnupost test data in kernel");
3644
3645#if CONFIG_EXT_PANICLOG
3646/*
3647 * Extensible panic log test hooks
3648 */
3649static int
3650sysctl_debug_ext_paniclog_test_hook SYSCTL_HANDLER_ARGS
3651{
3652#pragma unused(arg1, arg2)
3653 int rval = 0;
3654 uint32_t test_option = 0;
3655
3656 rval = sysctl_handle_int(oidp, &test_option, 0, req);
3657
3658 if (rval == 0 && req->newptr) {
3659 rval = ext_paniclog_test_hook(test_option);
3660 }
3661
3662 return rval;
3663}
3664
3665SYSCTL_PROC(_debug, OID_AUTO, ext_paniclog_test_hook,
3666 CTLTYPE_INT | CTLFLAG_RW,
3667 0, 0,
3668 sysctl_debug_ext_paniclog_test_hook, "A", "ext paniclog test hook");
3669
3670#endif
3671
3672STATIC int
3673sysctl_debug_xnupost_ctl SYSCTL_HANDLER_ARGS
3674{
3675 /* fixup unused arguments warnings */
3676 __unused int _oa2 = arg2;
3677 __unused void * _oa1 = arg1;
3678 __unused struct sysctl_oid * _oidp = oidp;
3679
3680#define ARRCOUNT 4
3681 /*
3682 * INPUT: ACTION, PARAM1, PARAM2, PARAM3
3683 * OUTPUT: RESULTCODE, ADDITIONAL DATA
3684 */
3685 int32_t outval[ARRCOUNT] = {0};
3686 int32_t input[ARRCOUNT] = {0};
3687 int32_t out_size = sizeof(outval);
3688 int32_t in_size = sizeof(input);
3689 int error = 0;
3690
3691 /* if this is NULL call to find out size, send out size info */
3692 if (!req->newptr) {
3693 goto out;
3694 }
3695
3696 /* pull in provided value from userspace */
3697 error = SYSCTL_IN(req, &input[0], in_size);
3698 if (error) {
3699 return error;
3700 }
3701
3702 if (input[0] == XTCTL_RESET_TESTDATA) {
3703 outval[0] = xnupost_reset_all_tests();
3704 goto out;
3705 }
3706
3707out:
3708 error = SYSCTL_OUT(req, &outval[0], out_size);
3709 return error;
3710}
3711
3712SYSCTL_PROC(_debug,
3713 OID_AUTO,
3714 xnupost_testctl,
3715 CTLFLAG_MASKED | CTLFLAG_ANYBODY | CTLTYPE_OPAQUE | CTLFLAG_RW | CTLFLAG_LOCKED,
3716 0,
3717 0,
3718 sysctl_debug_xnupost_ctl,
3719 "I",
3720 "xnupost control for kernel testing");
3721
3722extern void test_oslog_handleOSLogCtl(int32_t * in, int32_t * out, int32_t arraycount);
3723
3724STATIC int
3725sysctl_debug_test_oslog_ctl(__unused struct sysctl_oid * oidp, __unused void * arg1, __unused int arg2, struct sysctl_req * req)
3726{
3727#define ARRCOUNT 4
3728 int32_t outval[ARRCOUNT] = {0};
3729 int32_t input[ARRCOUNT] = {0};
3730 int32_t size_outval = sizeof(outval);
3731 int32_t size_inval = sizeof(input);
3732 int32_t error;
3733
3734 /* if this is NULL call to find out size, send out size info */
3735 if (!req->newptr) {
3736 error = SYSCTL_OUT(req, &outval[0], size_outval);
3737 return error;
3738 }
3739
3740 /* pull in provided value from userspace */
3741 error = SYSCTL_IN(req, &input[0], size_inval);
3742 if (error) {
3743 return error;
3744 }
3745
3746 test_oslog_handleOSLogCtl(input, outval, ARRCOUNT);
3747
3748 error = SYSCTL_OUT(req, &outval[0], size_outval);
3749
3750 return error;
3751}
3752
3753SYSCTL_PROC(_debug,
3754 OID_AUTO,
3755 test_OSLogCtl,
3756 CTLFLAG_MASKED | CTLFLAG_ANYBODY | CTLTYPE_OPAQUE | CTLFLAG_RW | CTLFLAG_LOCKED,
3757 0,
3758 0,
3759 sysctl_debug_test_oslog_ctl,
3760 "I",
3761 "testing oslog in kernel");
3762
3763#include <mach/task.h>
3764#include <mach/semaphore.h>
3765
3766static LCK_GRP_DECLARE(sysctl_debug_test_stackshot_owner_grp, "test-stackshot-owner-grp");
3767static LCK_MTX_DECLARE(sysctl_debug_test_stackshot_owner_init_mtx,
3768 &sysctl_debug_test_stackshot_owner_grp);
3769
3770/* This is a sysctl for testing collection of owner info on a lock in kernel space. A multi-threaded
3771 * test from userland sets this sysctl in such a way that a thread blocks in kernel mode, and a
3772 * stackshot is taken to see if the owner of the lock can be identified.
3773 *
3774 * We can't return to userland with a kernel lock held, so be sure to unlock before we leave.
3775 * the semaphores allow us to artificially create cases where the lock is being held and the
3776 * thread is hanging / taking a long time to do something. */
3777
3778volatile char sysctl_debug_test_stackshot_mtx_inited = 0;
3779semaphore_t sysctl_debug_test_stackshot_mutex_sem;
3780lck_mtx_t sysctl_debug_test_stackshot_owner_lck;
3781
3782#define SYSCTL_DEBUG_MTX_ACQUIRE_WAIT 1
3783#define SYSCTL_DEBUG_MTX_ACQUIRE_NOWAIT 2
3784#define SYSCTL_DEBUG_MTX_SIGNAL 3
3785#define SYSCTL_DEBUG_MTX_TEARDOWN 4
3786
3787STATIC int
3788sysctl_debug_test_stackshot_mutex_owner(__unused struct sysctl_oid *oidp, __unused void *arg1, __unused int arg2, struct sysctl_req *req)
3789{
3790 long long option = -1;
3791 /* if the user tries to read the sysctl, we tell them what the address of the lock is (to test against stackshot's output) */
3792 long long mtx_unslid_addr = (long long)VM_KERNEL_UNSLIDE_OR_PERM(&sysctl_debug_test_stackshot_owner_lck);
3793 int error = sysctl_io_number(req, mtx_unslid_addr, sizeof(long long), (void*)&option, NULL);
3794
3795 lck_mtx_lock(&sysctl_debug_test_stackshot_owner_init_mtx);
3796 if (!sysctl_debug_test_stackshot_mtx_inited) {
3797 lck_mtx_init(&sysctl_debug_test_stackshot_owner_lck,
3798 &sysctl_debug_test_stackshot_owner_grp,
3799 LCK_ATTR_NULL);
3800 semaphore_create(kernel_task,
3801 &sysctl_debug_test_stackshot_mutex_sem,
3802 SYNC_POLICY_FIFO, 0);
3803 sysctl_debug_test_stackshot_mtx_inited = 1;
3804 }
3805 lck_mtx_unlock(&sysctl_debug_test_stackshot_owner_init_mtx);
3806
3807 if (!error) {
3808 switch (option) {
3809 case SYSCTL_DEBUG_MTX_ACQUIRE_NOWAIT:
3810 lck_mtx_lock(&sysctl_debug_test_stackshot_owner_lck);
3811 lck_mtx_unlock(&sysctl_debug_test_stackshot_owner_lck);
3812 break;
3813 case SYSCTL_DEBUG_MTX_ACQUIRE_WAIT:
3814 lck_mtx_lock(&sysctl_debug_test_stackshot_owner_lck);
3815 semaphore_wait(sysctl_debug_test_stackshot_mutex_sem);
3816 lck_mtx_unlock(&sysctl_debug_test_stackshot_owner_lck);
3817 break;
3818 case SYSCTL_DEBUG_MTX_SIGNAL:
3819 semaphore_signal(sysctl_debug_test_stackshot_mutex_sem);
3820 break;
3821 case SYSCTL_DEBUG_MTX_TEARDOWN:
3822 lck_mtx_lock(&sysctl_debug_test_stackshot_owner_init_mtx);
3823
3824 lck_mtx_destroy(&sysctl_debug_test_stackshot_owner_lck,
3825 &sysctl_debug_test_stackshot_owner_grp);
3826 semaphore_destroy(kernel_task,
3827 sysctl_debug_test_stackshot_mutex_sem);
3828 sysctl_debug_test_stackshot_mtx_inited = 0;
3829
3830 lck_mtx_unlock(&sysctl_debug_test_stackshot_owner_init_mtx);
3831 break;
3832 case -1: /* user just wanted to read the value, so do nothing */
3833 break;
3834 default:
3835 error = EINVAL;
3836 break;
3837 }
3838 }
3839 return error;
3840}
3841
3842/* we can't return to userland with a kernel rwlock held, so be sure to unlock before we leave.
3843 * the semaphores allow us to artificially create cases where the lock is being held and the
3844 * thread is hanging / taking a long time to do something. */
3845
3846SYSCTL_PROC(_debug,
3847 OID_AUTO,
3848 test_MutexOwnerCtl,
3849 CTLFLAG_MASKED | CTLFLAG_ANYBODY | CTLTYPE_QUAD | CTLFLAG_RW | CTLFLAG_LOCKED,
3850 0,
3851 0,
3852 sysctl_debug_test_stackshot_mutex_owner,
3853 "-",
3854 "Testing mutex owner in kernel");
3855
3856volatile char sysctl_debug_test_stackshot_rwlck_inited = 0;
3857lck_rw_t sysctl_debug_test_stackshot_owner_rwlck;
3858semaphore_t sysctl_debug_test_stackshot_rwlck_sem;
3859
3860#define SYSCTL_DEBUG_KRWLCK_RACQUIRE_NOWAIT 1
3861#define SYSCTL_DEBUG_KRWLCK_RACQUIRE_WAIT 2
3862#define SYSCTL_DEBUG_KRWLCK_WACQUIRE_NOWAIT 3
3863#define SYSCTL_DEBUG_KRWLCK_WACQUIRE_WAIT 4
3864#define SYSCTL_DEBUG_KRWLCK_SIGNAL 5
3865#define SYSCTL_DEBUG_KRWLCK_TEARDOWN 6
3866
3867STATIC int
3868sysctl_debug_test_stackshot_rwlck_owner(__unused struct sysctl_oid *oidp, __unused void *arg1, __unused int arg2, struct sysctl_req *req)
3869{
3870 long long option = -1;
3871 /* if the user tries to read the sysctl, we tell them what the address of the lock is
3872 * (to test against stackshot's output) */
3873 long long rwlck_unslid_addr = (long long)VM_KERNEL_UNSLIDE_OR_PERM(&sysctl_debug_test_stackshot_owner_rwlck);
3874 int error = sysctl_io_number(req, rwlck_unslid_addr, sizeof(long long), (void*)&option, NULL);
3875
3876 lck_mtx_lock(&sysctl_debug_test_stackshot_owner_init_mtx);
3877 if (!sysctl_debug_test_stackshot_rwlck_inited) {
3878 lck_rw_init(&sysctl_debug_test_stackshot_owner_rwlck,
3879 &sysctl_debug_test_stackshot_owner_grp,
3880 LCK_ATTR_NULL);
3881 semaphore_create(kernel_task,
3882 &sysctl_debug_test_stackshot_rwlck_sem,
3883 SYNC_POLICY_FIFO,
3884 0);
3885 sysctl_debug_test_stackshot_rwlck_inited = 1;
3886 }
3887 lck_mtx_unlock(&sysctl_debug_test_stackshot_owner_init_mtx);
3888
3889 if (!error) {
3890 switch (option) {
3891 case SYSCTL_DEBUG_KRWLCK_RACQUIRE_NOWAIT:
3892 lck_rw_lock(&sysctl_debug_test_stackshot_owner_rwlck, LCK_RW_TYPE_SHARED);
3893 lck_rw_unlock(&sysctl_debug_test_stackshot_owner_rwlck, LCK_RW_TYPE_SHARED);
3894 break;
3895 case SYSCTL_DEBUG_KRWLCK_RACQUIRE_WAIT:
3896 lck_rw_lock(&sysctl_debug_test_stackshot_owner_rwlck, LCK_RW_TYPE_SHARED);
3897 semaphore_wait(sysctl_debug_test_stackshot_rwlck_sem);
3898 lck_rw_unlock(&sysctl_debug_test_stackshot_owner_rwlck, LCK_RW_TYPE_SHARED);
3899 break;
3900 case SYSCTL_DEBUG_KRWLCK_WACQUIRE_NOWAIT:
3901 lck_rw_lock(&sysctl_debug_test_stackshot_owner_rwlck, LCK_RW_TYPE_EXCLUSIVE);
3902 lck_rw_unlock(&sysctl_debug_test_stackshot_owner_rwlck, LCK_RW_TYPE_EXCLUSIVE);
3903 break;
3904 case SYSCTL_DEBUG_KRWLCK_WACQUIRE_WAIT:
3905 lck_rw_lock(&sysctl_debug_test_stackshot_owner_rwlck, LCK_RW_TYPE_EXCLUSIVE);
3906 semaphore_wait(sysctl_debug_test_stackshot_rwlck_sem);
3907 lck_rw_unlock(&sysctl_debug_test_stackshot_owner_rwlck, LCK_RW_TYPE_EXCLUSIVE);
3908 break;
3909 case SYSCTL_DEBUG_KRWLCK_SIGNAL:
3910 semaphore_signal(sysctl_debug_test_stackshot_rwlck_sem);
3911 break;
3912 case SYSCTL_DEBUG_KRWLCK_TEARDOWN:
3913 lck_mtx_lock(&sysctl_debug_test_stackshot_owner_init_mtx);
3914
3915 lck_rw_destroy(&sysctl_debug_test_stackshot_owner_rwlck,
3916 &sysctl_debug_test_stackshot_owner_grp);
3917 semaphore_destroy(kernel_task,
3918 sysctl_debug_test_stackshot_rwlck_sem);
3919 sysctl_debug_test_stackshot_rwlck_inited = 0;
3920
3921 lck_mtx_unlock(&sysctl_debug_test_stackshot_owner_init_mtx);
3922 break;
3923 case -1: /* user just wanted to read the value, so do nothing */
3924 break;
3925 default:
3926 error = EINVAL;
3927 break;
3928 }
3929 }
3930 return error;
3931}
3932
3933
3934SYSCTL_PROC(_debug,
3935 OID_AUTO,
3936 test_RWLockOwnerCtl,
3937 CTLFLAG_MASKED | CTLFLAG_ANYBODY | CTLTYPE_QUAD | CTLFLAG_RW | CTLFLAG_LOCKED,
3938 0,
3939 0,
3940 sysctl_debug_test_stackshot_rwlck_owner,
3941 "-",
3942 "Testing rwlock owner in kernel");
3943#endif /* !CONFIG_XNUPOST */
3944
3945STATIC int
3946sysctl_swapusage
3947(__unused struct sysctl_oid *oidp, __unused void *arg1, __unused int arg2, struct sysctl_req *req)
3948{
3949 int error;
3950 uint64_t swap_total;
3951 uint64_t swap_avail;
3952 vm_size_t swap_pagesize;
3953 boolean_t swap_encrypted;
3954 struct xsw_usage xsu = {};
3955
3956 error = macx_swapinfo(total_p: &swap_total,
3957 avail_p: &swap_avail,
3958 pagesize_p: &swap_pagesize,
3959 encrypted_p: &swap_encrypted);
3960 if (error) {
3961 return error;
3962 }
3963
3964 xsu.xsu_total = swap_total;
3965 xsu.xsu_avail = swap_avail;
3966 xsu.xsu_used = swap_total - swap_avail;
3967 xsu.xsu_pagesize = (u_int32_t)MIN(swap_pagesize, UINT32_MAX);
3968 xsu.xsu_encrypted = swap_encrypted;
3969 return sysctl_io_opaque(req, pValue: &xsu, valueSize: sizeof(xsu), NULL);
3970}
3971
3972
3973
3974SYSCTL_PROC(_vm, VM_SWAPUSAGE, swapusage,
3975 CTLTYPE_STRUCT | CTLFLAG_RD | CTLFLAG_LOCKED,
3976 0, 0, sysctl_swapusage, "S,xsw_usage", "");
3977
3978extern int vm_swap_enabled;
3979SYSCTL_INT(_vm, OID_AUTO, swap_enabled, CTLFLAG_RD | CTLFLAG_LOCKED, &vm_swap_enabled, 0, "");
3980
3981#if DEVELOPMENT || DEBUG
3982extern int vm_num_swap_files_config;
3983extern int vm_num_swap_files;
3984extern lck_mtx_t vm_swap_data_lock;
3985#define VM_MAX_SWAP_FILE_NUM 100
3986
3987static int
3988sysctl_vm_config_num_swap_files SYSCTL_HANDLER_ARGS
3989{
3990#pragma unused(arg1, arg2)
3991 int error = 0, val = vm_num_swap_files_config;
3992
3993 error = sysctl_handle_int(oidp, &val, 0, req);
3994 if (error || !req->newptr) {
3995 goto out;
3996 }
3997
3998 if (!VM_CONFIG_SWAP_IS_ACTIVE && !VM_CONFIG_FREEZER_SWAP_IS_ACTIVE) {
3999 printf("Swap is disabled\n");
4000 error = EINVAL;
4001 goto out;
4002 }
4003
4004 lck_mtx_lock(&vm_swap_data_lock);
4005
4006 if (val < vm_num_swap_files) {
4007 printf("Cannot configure fewer swap files than already exist.\n");
4008 error = EINVAL;
4009 lck_mtx_unlock(&vm_swap_data_lock);
4010 goto out;
4011 }
4012
4013 if (val > VM_MAX_SWAP_FILE_NUM) {
4014 printf("Capping number of swap files to upper bound.\n");
4015 val = VM_MAX_SWAP_FILE_NUM;
4016 }
4017
4018 vm_num_swap_files_config = val;
4019 lck_mtx_unlock(&vm_swap_data_lock);
4020out:
4021
4022 return 0;
4023}
4024
4025SYSCTL_PROC(_debug, OID_AUTO, num_swap_files_configured, CTLFLAG_ANYBODY | CTLTYPE_INT | CTLFLAG_RW | CTLFLAG_LOCKED, 0, 0, sysctl_vm_config_num_swap_files, "I", "");
4026#endif /* DEVELOPMENT || DEBUG */
4027
4028/* this kernel does NOT implement shared_region_make_private_np() */
4029SYSCTL_INT(_kern, KERN_SHREG_PRIVATIZABLE, shreg_private,
4030 CTLFLAG_RD | CTLFLAG_LOCKED,
4031 (int *)NULL, 0, "");
4032
4033STATIC int
4034fetch_process_cputype(
4035 proc_t cur_proc,
4036 int *name,
4037 u_int namelen,
4038 cpu_type_t *cputype)
4039{
4040 proc_t p = PROC_NULL;
4041 int refheld = 0;
4042 cpu_type_t ret = 0;
4043 int error = 0;
4044
4045 if (namelen == 0) {
4046 p = cur_proc;
4047 } else if (namelen == 1) {
4048 p = proc_find(pid: name[0]);
4049 if (p == NULL) {
4050 return EINVAL;
4051 }
4052 refheld = 1;
4053 } else {
4054 error = EINVAL;
4055 goto out;
4056 }
4057
4058 ret = cpu_type() & ~CPU_ARCH_MASK;
4059 if (IS_64BIT_PROCESS(p)) {
4060 ret |= CPU_ARCH_ABI64;
4061 }
4062
4063 *cputype = ret;
4064
4065 if (refheld != 0) {
4066 proc_rele(p);
4067 }
4068out:
4069 return error;
4070}
4071
4072
4073STATIC int
4074sysctl_sysctl_native(__unused struct sysctl_oid *oidp, void *arg1, int arg2,
4075 struct sysctl_req *req)
4076{
4077 int error;
4078 cpu_type_t proc_cputype = 0;
4079 if ((error = fetch_process_cputype(cur_proc: req->p, name: (int *)arg1, namelen: arg2, cputype: &proc_cputype)) != 0) {
4080 return error;
4081 }
4082 int res = 1;
4083 if ((proc_cputype & ~CPU_ARCH_MASK) != (cpu_type() & ~CPU_ARCH_MASK)) {
4084 res = 0;
4085 }
4086 return SYSCTL_OUT(req, &res, sizeof(res));
4087}
4088SYSCTL_PROC(_sysctl, OID_AUTO, proc_native, CTLTYPE_INT | CTLFLAG_RD | CTLFLAG_LOCKED, 0, 0, sysctl_sysctl_native, "I", "proc_native");
4089
4090STATIC int
4091sysctl_sysctl_cputype(__unused struct sysctl_oid *oidp, void *arg1, int arg2,
4092 struct sysctl_req *req)
4093{
4094 int error;
4095 cpu_type_t proc_cputype = 0;
4096 if ((error = fetch_process_cputype(cur_proc: req->p, name: (int *)arg1, namelen: arg2, cputype: &proc_cputype)) != 0) {
4097 return error;
4098 }
4099 return SYSCTL_OUT(req, &proc_cputype, sizeof(proc_cputype));
4100}
4101SYSCTL_PROC(_sysctl, OID_AUTO, proc_cputype, CTLTYPE_NODE | CTLFLAG_RD | CTLFLAG_LOCKED, 0, 0, sysctl_sysctl_cputype, "I", "proc_cputype");
4102
4103STATIC int
4104sysctl_safeboot
4105(__unused struct sysctl_oid *oidp, __unused void *arg1, __unused int arg2, struct sysctl_req *req)
4106{
4107 return sysctl_io_number(req, bigValue: boothowto & RB_SAFEBOOT ? 1 : 0, valueSize: sizeof(int), NULL, NULL);
4108}
4109
4110SYSCTL_PROC(_kern, KERN_SAFEBOOT, safeboot,
4111 CTLTYPE_INT | CTLFLAG_RD | CTLFLAG_LOCKED,
4112 0, 0, sysctl_safeboot, "I", "");
4113
4114STATIC int
4115sysctl_singleuser
4116(__unused struct sysctl_oid *oidp, __unused void *arg1, __unused int arg2, struct sysctl_req *req)
4117{
4118 return sysctl_io_number(req, bigValue: boothowto & RB_SINGLE ? 1 : 0, valueSize: sizeof(int), NULL, NULL);
4119}
4120
4121SYSCTL_PROC(_kern, OID_AUTO, singleuser,
4122 CTLTYPE_INT | CTLFLAG_RD | CTLFLAG_LOCKED,
4123 0, 0, sysctl_singleuser, "I", "");
4124
4125STATIC int
4126sysctl_minimalboot
4127(__unused struct sysctl_oid *oidp, __unused void *arg1, __unused int arg2, struct sysctl_req *req)
4128{
4129 return sysctl_io_number(req, bigValue: minimalboot, valueSize: sizeof(int), NULL, NULL);
4130}
4131
4132SYSCTL_PROC(_kern, OID_AUTO, minimalboot,
4133 CTLTYPE_INT | CTLFLAG_RD | CTLFLAG_LOCKED,
4134 0, 0, sysctl_minimalboot, "I", "");
4135
4136/*
4137 * Controls for debugging affinity sets - see osfmk/kern/affinity.c
4138 */
4139extern boolean_t affinity_sets_enabled;
4140extern int affinity_sets_mapping;
4141
4142SYSCTL_INT(_kern, OID_AUTO, affinity_sets_enabled,
4143 CTLFLAG_RW | CTLFLAG_LOCKED, (int *) &affinity_sets_enabled, 0, "hinting enabled");
4144SYSCTL_INT(_kern, OID_AUTO, affinity_sets_mapping,
4145 CTLFLAG_RW | CTLFLAG_LOCKED, &affinity_sets_mapping, 0, "mapping policy");
4146
4147/*
4148 * Boolean indicating if KASLR is active.
4149 */
4150STATIC int
4151sysctl_slide
4152(__unused struct sysctl_oid *oidp, __unused void *arg1, __unused int arg2, struct sysctl_req *req)
4153{
4154 uint32_t slide;
4155
4156 slide = vm_kernel_slide ? 1 : 0;
4157
4158 return sysctl_io_number( req, bigValue: slide, valueSize: sizeof(int), NULL, NULL);
4159}
4160
4161SYSCTL_PROC(_kern, OID_AUTO, slide,
4162 CTLTYPE_INT | CTLFLAG_RD | CTLFLAG_LOCKED,
4163 0, 0, sysctl_slide, "I", "");
4164
4165#if DEBUG || DEVELOPMENT
4166#if defined(__arm64__)
4167extern vm_offset_t segTEXTEXECB;
4168
4169static int
4170sysctl_kernel_text_exec_base_slide SYSCTL_HANDLER_ARGS
4171{
4172#pragma unused(arg1, arg2, oidp)
4173 unsigned long slide = 0;
4174 kc_format_t kc_format;
4175
4176 PE_get_primary_kc_format(&kc_format);
4177
4178 if (kc_format == KCFormatFileset) {
4179 void *kch = PE_get_kc_header(KCKindPrimary);
4180 slide = (unsigned long)segTEXTEXECB - (unsigned long)kch + vm_kernel_slide;
4181 }
4182 return SYSCTL_OUT(req, &slide, sizeof(slide));
4183}
4184
4185SYSCTL_QUAD(_kern, OID_AUTO, kernel_slide, CTLFLAG_RD | CTLFLAG_KERN | CTLFLAG_LOCKED, &vm_kernel_slide, "");
4186SYSCTL_QUAD(_kern, OID_AUTO, kernel_text_exec_base, CTLFLAG_RD | CTLFLAG_KERN | CTLFLAG_LOCKED, &segTEXTEXECB, "");
4187SYSCTL_PROC(_kern, OID_AUTO, kernel_text_exec_base_slide, CTLTYPE_QUAD | CTLFLAG_RD | CTLFLAG_LOCKED, 0, 0, sysctl_kernel_text_exec_base_slide, "Q", "");
4188#endif /* defined(__arm64__) */
4189
4190/* User address of the PFZ */
4191extern user32_addr_t commpage_text32_location;
4192extern user64_addr_t commpage_text64_location;
4193
4194STATIC int
4195sysctl_pfz_start SYSCTL_HANDLER_ARGS
4196{
4197#pragma unused(oidp, arg1, arg2)
4198
4199#ifdef __LP64__
4200 return sysctl_io_number(req, commpage_text64_location, sizeof(user64_addr_t), NULL, NULL);
4201#else
4202 return sysctl_io_number(req, commpage_text32_location, sizeof(user32_addr_t), NULL, NULL);
4203#endif
4204}
4205
4206SYSCTL_PROC(_kern, OID_AUTO, pfz,
4207 CTLTYPE_QUAD | CTLFLAG_RD | CTLFLAG_LOCKED | CTLFLAG_MASKED,
4208 0, 0, sysctl_pfz_start, "I", "");
4209#endif
4210
4211
4212/*
4213 * Limit on total memory users can wire.
4214 *
4215 * vm_global_user_wire_limit - system wide limit on wired memory from all processes combined.
4216 *
4217 * vm_per_task_user_wire_limit - per address space limit on wired memory. This puts a cap on the process's rlimit value.
4218 *
4219 * These values are initialized to reasonable defaults at boot time based on the available physical memory in
4220 * kmem_init().
4221 *
4222 * All values are in bytes.
4223 */
4224
4225vm_map_size_t vm_global_user_wire_limit;
4226vm_map_size_t vm_per_task_user_wire_limit;
4227extern uint64_t max_mem_actual, max_mem;
4228
4229uint64_t vm_add_wire_count_over_global_limit;
4230uint64_t vm_add_wire_count_over_user_limit;
4231/*
4232 * We used to have a global in the kernel called vm_global_no_user_wire_limit which was the inverse
4233 * of vm_global_user_wire_limit. But maintaining both of those is silly, and vm_global_user_wire_limit is the
4234 * real limit.
4235 * This function is for backwards compatibility with userspace
4236 * since we exposed the old global via a sysctl.
4237 */
4238STATIC int
4239sysctl_global_no_user_wire_amount(__unused struct sysctl_oid *oidp, __unused void *arg1, __unused int arg2, struct sysctl_req *req)
4240{
4241 vm_map_size_t old_value;
4242 vm_map_size_t new_value;
4243 int changed;
4244 int error;
4245 uint64_t config_memsize = max_mem;
4246#if defined(XNU_TARGET_OS_OSX)
4247 config_memsize = max_mem_actual;
4248#endif /* defined(XNU_TARGET_OS_OSX) */
4249
4250 old_value = (vm_map_size_t)(config_memsize - vm_global_user_wire_limit);
4251 error = sysctl_io_number(req, bigValue: old_value, valueSize: sizeof(vm_map_size_t), pValue: &new_value, changed: &changed);
4252 if (changed) {
4253 if ((uint64_t)new_value > config_memsize) {
4254 error = EINVAL;
4255 } else {
4256 vm_global_user_wire_limit = (vm_map_size_t)(config_memsize - new_value);
4257 }
4258 }
4259 return error;
4260}
4261/*
4262 * There needs to be a more automatic/elegant way to do this
4263 */
4264SYSCTL_QUAD(_vm, OID_AUTO, global_user_wire_limit, CTLFLAG_RW | CTLFLAG_LOCKED, &vm_global_user_wire_limit, "");
4265SYSCTL_QUAD(_vm, OID_AUTO, user_wire_limit, CTLFLAG_RW | CTLFLAG_LOCKED, &vm_per_task_user_wire_limit, "");
4266SYSCTL_PROC(_vm, OID_AUTO, global_no_user_wire_amount, CTLTYPE_QUAD | CTLFLAG_RW | CTLFLAG_LOCKED, 0, 0, &sysctl_global_no_user_wire_amount, "Q", "");
4267
4268/*
4269 * Relaxed atomic RW of a 64bit value via sysctl.
4270 */
4271STATIC int
4272sysctl_r_64bit_atomic(uint64_t *ptr, struct sysctl_req *req)
4273{
4274 uint64_t old_value;
4275 uint64_t new_value;
4276 int error;
4277
4278 old_value = os_atomic_load_wide(ptr, relaxed);
4279 error = sysctl_io_number(req, bigValue: old_value, valueSize: sizeof(vm_map_size_t), pValue: &new_value, NULL);
4280 return error;
4281}
4282STATIC int
4283sysctl_add_wire_count_over_global_limit(__unused struct sysctl_oid *oidp, __unused void *arg1, __unused int arg2, struct sysctl_req *req)
4284{
4285 return sysctl_r_64bit_atomic(ptr: &vm_add_wire_count_over_global_limit, req);
4286}
4287STATIC int
4288sysctl_add_wire_count_over_user_limit(__unused struct sysctl_oid *oidp, __unused void *arg1, __unused int arg2, struct sysctl_req *req)
4289{
4290 return sysctl_r_64bit_atomic(ptr: &vm_add_wire_count_over_user_limit, req);
4291}
4292
4293SYSCTL_PROC(_vm, OID_AUTO, add_wire_count_over_global_limit, CTLTYPE_QUAD | CTLFLAG_RD | CTLFLAG_LOCKED, 0, 0, &sysctl_add_wire_count_over_global_limit, "Q", "");
4294SYSCTL_PROC(_vm, OID_AUTO, add_wire_count_over_user_limit, CTLTYPE_QUAD | CTLFLAG_RD | CTLFLAG_LOCKED, 0, 0, &sysctl_add_wire_count_over_user_limit, "Q", "");
4295
4296#if DEVELOPMENT || DEBUG
4297/* These sysctls are used to test the wired limit. */
4298extern unsigned int vm_page_wire_count;
4299extern uint32_t vm_lopage_free_count;
4300SYSCTL_INT(_vm, OID_AUTO, page_wire_count, CTLFLAG_RD | CTLFLAG_LOCKED, &vm_page_wire_count, 0, "");
4301SYSCTL_INT(_vm, OID_AUTO, lopage_free_count, CTLFLAG_RD | CTLFLAG_LOCKED, &vm_lopage_free_count, 0, "");
4302
4303/*
4304 * Setting the per task variable exclude_physfootprint_ledger to 1 will allow the calling task to exclude memory entries that are
4305 * tagged by VM_LEDGER_TAG_DEFAULT and flagged by VM_LEDGER_FLAG_EXCLUDE_FOOTPRINT_DEBUG from its phys_footprint ledger.
4306 */
4307
4308STATIC int
4309sysctl_rw_task_no_footprint_for_debug(struct sysctl_oid *oidp __unused, void *arg1 __unused, int arg2 __unused, struct sysctl_req *req)
4310{
4311 int error;
4312 int value;
4313 proc_t p = current_proc();
4314
4315 if (req->newptr) {
4316 // Write request
4317 error = SYSCTL_IN(req, &value, sizeof(value));
4318 if (!error) {
4319 if (value == 1) {
4320 task_set_no_footprint_for_debug(proc_task(p), TRUE);
4321 } else if (value == 0) {
4322 task_set_no_footprint_for_debug(proc_task(p), FALSE);
4323 } else {
4324 error = EINVAL;
4325 }
4326 }
4327 } else {
4328 // Read request
4329 value = task_get_no_footprint_for_debug(proc_task(p));
4330 error = SYSCTL_OUT(req, &value, sizeof(value));
4331 }
4332 return error;
4333}
4334
4335SYSCTL_PROC(_vm, OID_AUTO, task_no_footprint_for_debug,
4336 CTLTYPE_INT | CTLFLAG_RW | CTLFLAG_LOCKED | CTLFLAG_ANYBODY,
4337 0, 0, &sysctl_rw_task_no_footprint_for_debug, "I", "Allow debug memory to be excluded from this task's memory footprint (debug only)");
4338
4339#endif /* DEVELOPMENT || DEBUG */
4340
4341
4342extern int vm_map_copy_overwrite_aligned_src_not_internal;
4343extern int vm_map_copy_overwrite_aligned_src_not_symmetric;
4344extern int vm_map_copy_overwrite_aligned_src_large;
4345SYSCTL_INT(_vm, OID_AUTO, vm_copy_src_not_internal, CTLFLAG_RD | CTLFLAG_LOCKED, &vm_map_copy_overwrite_aligned_src_not_internal, 0, "");
4346SYSCTL_INT(_vm, OID_AUTO, vm_copy_src_not_symmetric, CTLFLAG_RD | CTLFLAG_LOCKED, &vm_map_copy_overwrite_aligned_src_not_symmetric, 0, "");
4347SYSCTL_INT(_vm, OID_AUTO, vm_copy_src_large, CTLFLAG_RD | CTLFLAG_LOCKED, &vm_map_copy_overwrite_aligned_src_large, 0, "");
4348
4349
4350extern uint32_t vm_page_external_count;
4351
4352SYSCTL_INT(_vm, OID_AUTO, vm_page_external_count, CTLFLAG_RD | CTLFLAG_LOCKED, &vm_page_external_count, 0, "");
4353
4354SYSCTL_INT(_vm, OID_AUTO, vm_page_filecache_min, CTLFLAG_RD | CTLFLAG_LOCKED, &vm_pageout_state.vm_page_filecache_min, 0, "");
4355SYSCTL_INT(_vm, OID_AUTO, vm_page_xpmapped_min, CTLFLAG_RD | CTLFLAG_LOCKED, &vm_pageout_state.vm_page_xpmapped_min, 0, "");
4356
4357#if DEVELOPMENT || DEBUG
4358SYSCTL_INT(_vm, OID_AUTO, vm_page_filecache_min_divisor, CTLFLAG_RW | CTLFLAG_LOCKED, &vm_pageout_state.vm_page_filecache_min_divisor, 0, "");
4359SYSCTL_INT(_vm, OID_AUTO, vm_page_xpmapped_min_divisor, CTLFLAG_RW | CTLFLAG_LOCKED, &vm_pageout_state.vm_page_xpmapped_min_divisor, 0, "");
4360extern boolean_t vps_yield_for_pgqlockwaiters;
4361SYSCTL_INT(_vm, OID_AUTO, vm_pageoutscan_yields_for_pageQlockwaiters, CTLFLAG_RW | CTLFLAG_LOCKED, &vps_yield_for_pgqlockwaiters, 0, "");
4362#endif
4363
4364extern int vm_compressor_mode;
4365extern int vm_compressor_is_active;
4366extern int vm_compressor_available;
4367extern uint32_t c_seg_bufsize;
4368extern uint64_t compressor_pool_size;
4369extern uint32_t vm_ripe_target_age;
4370extern uint32_t swapout_target_age;
4371extern int64_t compressor_bytes_used;
4372extern int64_t c_segment_input_bytes;
4373extern int64_t c_segment_compressed_bytes;
4374extern uint32_t compressor_eval_period_in_msecs;
4375extern uint32_t compressor_sample_min_in_msecs;
4376extern uint32_t compressor_sample_max_in_msecs;
4377extern uint32_t compressor_thrashing_threshold_per_10msecs;
4378extern uint32_t compressor_thrashing_min_per_10msecs;
4379extern uint32_t vm_compressor_time_thread;
4380
4381#if DEVELOPMENT || DEBUG
4382extern uint32_t vm_compressor_minorcompact_threshold_divisor;
4383extern uint32_t vm_compressor_majorcompact_threshold_divisor;
4384extern uint32_t vm_compressor_unthrottle_threshold_divisor;
4385extern uint32_t vm_compressor_catchup_threshold_divisor;
4386
4387extern uint32_t vm_compressor_minorcompact_threshold_divisor_overridden;
4388extern uint32_t vm_compressor_majorcompact_threshold_divisor_overridden;
4389extern uint32_t vm_compressor_unthrottle_threshold_divisor_overridden;
4390extern uint32_t vm_compressor_catchup_threshold_divisor_overridden;
4391
4392extern vmct_stats_t vmct_stats;
4393
4394
4395STATIC int
4396sysctl_minorcompact_threshold_divisor(__unused struct sysctl_oid *oidp, __unused void *arg1, __unused int arg2, struct sysctl_req *req)
4397{
4398 int new_value, changed;
4399 int error = sysctl_io_number(req, vm_compressor_minorcompact_threshold_divisor, sizeof(int), &new_value, &changed);
4400
4401 if (changed) {
4402 vm_compressor_minorcompact_threshold_divisor = new_value;
4403 vm_compressor_minorcompact_threshold_divisor_overridden = 1;
4404 }
4405 return error;
4406}
4407
4408SYSCTL_PROC(_vm, OID_AUTO, compressor_minorcompact_threshold_divisor,
4409 CTLTYPE_INT | CTLFLAG_LOCKED | CTLFLAG_RW,
4410 0, 0, sysctl_minorcompact_threshold_divisor, "I", "");
4411
4412
4413STATIC int
4414sysctl_majorcompact_threshold_divisor(__unused struct sysctl_oid *oidp, __unused void *arg1, __unused int arg2, struct sysctl_req *req)
4415{
4416 int new_value, changed;
4417 int error = sysctl_io_number(req, vm_compressor_majorcompact_threshold_divisor, sizeof(int), &new_value, &changed);
4418
4419 if (changed) {
4420 vm_compressor_majorcompact_threshold_divisor = new_value;
4421 vm_compressor_majorcompact_threshold_divisor_overridden = 1;
4422 }
4423 return error;
4424}
4425
4426SYSCTL_PROC(_vm, OID_AUTO, compressor_majorcompact_threshold_divisor,
4427 CTLTYPE_INT | CTLFLAG_LOCKED | CTLFLAG_RW,
4428 0, 0, sysctl_majorcompact_threshold_divisor, "I", "");
4429
4430
4431STATIC int
4432sysctl_unthrottle_threshold_divisor(__unused struct sysctl_oid *oidp, __unused void *arg1, __unused int arg2, struct sysctl_req *req)
4433{
4434 int new_value, changed;
4435 int error = sysctl_io_number(req, vm_compressor_unthrottle_threshold_divisor, sizeof(int), &new_value, &changed);
4436
4437 if (changed) {
4438 vm_compressor_unthrottle_threshold_divisor = new_value;
4439 vm_compressor_unthrottle_threshold_divisor_overridden = 1;
4440 }
4441 return error;
4442}
4443
4444SYSCTL_PROC(_vm, OID_AUTO, compressor_unthrottle_threshold_divisor,
4445 CTLTYPE_INT | CTLFLAG_LOCKED | CTLFLAG_RW,
4446 0, 0, sysctl_unthrottle_threshold_divisor, "I", "");
4447
4448
4449STATIC int
4450sysctl_catchup_threshold_divisor(__unused struct sysctl_oid *oidp, __unused void *arg1, __unused int arg2, struct sysctl_req *req)
4451{
4452 int new_value, changed;
4453 int error = sysctl_io_number(req, vm_compressor_catchup_threshold_divisor, sizeof(int), &new_value, &changed);
4454
4455 if (changed) {
4456 vm_compressor_catchup_threshold_divisor = new_value;
4457 vm_compressor_catchup_threshold_divisor_overridden = 1;
4458 }
4459 return error;
4460}
4461
4462SYSCTL_PROC(_vm, OID_AUTO, compressor_catchup_threshold_divisor,
4463 CTLTYPE_INT | CTLFLAG_LOCKED | CTLFLAG_RW,
4464 0, 0, sysctl_catchup_threshold_divisor, "I", "");
4465#endif
4466
4467
4468SYSCTL_QUAD(_vm, OID_AUTO, compressor_input_bytes, CTLFLAG_RD | CTLFLAG_LOCKED, &c_segment_input_bytes, "");
4469SYSCTL_QUAD(_vm, OID_AUTO, compressor_compressed_bytes, CTLFLAG_RD | CTLFLAG_LOCKED, &c_segment_compressed_bytes, "");
4470SYSCTL_QUAD(_vm, OID_AUTO, compressor_bytes_used, CTLFLAG_RD | CTLFLAG_LOCKED, &compressor_bytes_used, "");
4471
4472SYSCTL_INT(_vm, OID_AUTO, compressor_mode, CTLFLAG_RD | CTLFLAG_LOCKED, &vm_compressor_mode, 0, "");
4473SYSCTL_INT(_vm, OID_AUTO, compressor_is_active, CTLFLAG_RD | CTLFLAG_LOCKED, &vm_compressor_is_active, 0, "");
4474