1/*
2 * Copyright (c) 2000-2007 Apple Inc. All rights reserved.
3 *
4 * @APPLE_OSREFERENCE_LICENSE_HEADER_START@
5 *
6 * This file contains Original Code and/or Modifications of Original Code
7 * as defined in and that are subject to the Apple Public Source License
8 * Version 2.0 (the 'License'). You may not use this file except in
9 * compliance with the License. The rights granted to you under the License
10 * may not be used to create, or enable the creation or redistribution of,
11 * unlawful or unlicensed copies of an Apple operating system, or to
12 * circumvent, violate, or enable the circumvention or violation of, any
13 * terms of an Apple operating system software license agreement.
14 *
15 * Please obtain a copy of the License at
16 * http://www.opensource.apple.com/apsl/ and read it before using this file.
17 *
18 * The Original Code and all software distributed under the License are
19 * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER
20 * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES,
21 * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY,
22 * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT.
23 * Please see the License for the specific language governing rights and
24 * limitations under the License.
25 *
26 * @APPLE_OSREFERENCE_LICENSE_HEADER_END@
27 */
28/*-
29 * Copyright (c) 1982, 1986, 1989, 1993
30 * The Regents of the University of California. All rights reserved.
31 *
32 * This code is derived from software contributed to Berkeley by
33 * Mike Karels at Berkeley Software Design, Inc.
34 *
35 * Quite extensively rewritten by Poul-Henning Kamp of the FreeBSD
36 * project, to make these variables more userfriendly.
37 *
38 * Redistribution and use in source and binary forms, with or without
39 * modification, are permitted provided that the following conditions
40 * are met:
41 * 1. Redistributions of source code must retain the above copyright
42 * notice, this list of conditions and the following disclaimer.
43 * 2. Redistributions in binary form must reproduce the above copyright
44 * notice, this list of conditions and the following disclaimer in the
45 * documentation and/or other materials provided with the distribution.
46 * 3. All advertising materials mentioning features or use of this software
47 * must display the following acknowledgement:
48 * This product includes software developed by the University of
49 * California, Berkeley and its contributors.
50 * 4. Neither the name of the University nor the names of its contributors
51 * may be used to endorse or promote products derived from this software
52 * without specific prior written permission.
53 *
54 * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
55 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
56 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
57 * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
58 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
59 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
60 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
61 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
62 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
63 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
64 * SUCH DAMAGE.
65 *
66 * @(#)kern_sysctl.c 8.4 (Berkeley) 4/14/94
67 */
68
69#include <sys/param.h>
70#include <sys/kernel.h>
71#include <sys/systm.h>
72#include <sys/sysctl.h>
73#include <sys/proc_internal.h>
74#include <sys/unistd.h>
75
76#if defined(SMP)
77#include <machine/smp.h>
78#endif
79
80#include <sys/param.h> /* XXX prune includes */
81#include <sys/systm.h>
82#include <sys/kernel.h>
83#include <sys/malloc.h>
84#include <sys/proc.h>
85#include <sys/file_internal.h>
86#include <sys/vnode.h>
87#include <sys/unistd.h>
88#include <sys/ioctl.h>
89#include <sys/namei.h>
90#include <sys/tty.h>
91#include <sys/disklabel.h>
92#include <sys/vm.h>
93#include <sys/sysctl.h>
94#include <sys/user.h>
95#include <mach/machine.h>
96#include <mach/mach_types.h>
97#include <mach/vm_param.h>
98#include <kern/task.h>
99#include <vm/vm_kern.h>
100#include <vm/vm_map.h>
101#include <vm/vm_protos.h>
102#include <mach/host_info.h>
103#include <kern/pms.h>
104#include <pexpert/device_tree.h>
105#include <pexpert/pexpert.h>
106#include <kern/sched_prim.h>
107#include <console/serial_protos.h>
108
109extern vm_map_t bsd_pageable_map;
110
111#include <sys/mount_internal.h>
112#include <sys/kdebug.h>
113
114#include <IOKit/IOPlatformExpert.h>
115#include <pexpert/pexpert.h>
116
117#include <machine/config.h>
118#include <machine/machine_routines.h>
119#include <machine/cpu_capabilities.h>
120
121#include <mach/mach_host.h> /* for host_info() */
122
123#if defined(__i386__) || defined(__x86_64__)
124#include <i386/cpuid.h> /* for cpuid_info() */
125#endif
126
127#if defined(__arm64__)
128#include <arm/cpuid.h> /* for cpuid_info() & cache_info() */
129#endif
130
131#if defined(CONFIG_XNUPOST)
132#include <tests/ktest.h>
133#endif
134
135/**
136 * Prevents an issue with creating the sysctl node hw.optional.arm on some
137 * platforms. If the 'arm' macro is defined, then the word "arm" is preprocessed
138 * to 1. As the 'arm' macro is not used in this file, we do not need to redefine
139 * after we are done.
140 */
141#if defined(arm)
142#undef arm
143#endif /* defined(arm) */
144
145#ifndef MAX
146#define MAX(a, b) (a >= b ? a : b)
147#endif
148
149#if defined(__arm64__) && defined(CONFIG_XNUPOST)
150kern_return_t arm_cpu_capabilities_legacy_test(void);
151#endif /* defined(__arm64__) && defined(CONFIG_XNUPOST) */
152
153/* XXX This should be in a BSD accessible Mach header, but isn't. */
154extern unsigned int vm_page_wire_count;
155
156static int cputhreadtype, cpu64bit;
157static uint64_t cacheconfig[10];
158static int packages;
159
160static char * osenvironment = NULL;
161static uint32_t osenvironment_size = 0;
162static int osenvironment_initialized = 0;
163
164static uint32_t ephemeral_storage = 0;
165static uint32_t use_recovery_securityd = 0;
166
167static struct {
168 uint32_t ephemeral_storage:1;
169 uint32_t use_recovery_securityd:1;
170} property_existence = {0, 0};
171
172SYSCTL_EXTENSIBLE_NODE(, 0, sysctl, CTLFLAG_RW | CTLFLAG_LOCKED, 0,
173 "Sysctl internal magic");
174SYSCTL_EXTENSIBLE_NODE(, CTL_KERN, kern, CTLFLAG_RW | CTLFLAG_LOCKED, 0,
175 "High kernel, proc, limits &c");
176SYSCTL_EXTENSIBLE_NODE(, CTL_VM, vm, CTLFLAG_RW | CTLFLAG_LOCKED, 0,
177 "Virtual memory");
178SYSCTL_EXTENSIBLE_NODE(, CTL_VFS, vfs, CTLFLAG_RW | CTLFLAG_LOCKED, 0,
179 "File system");
180SYSCTL_EXTENSIBLE_NODE(, CTL_NET, net, CTLFLAG_RW | CTLFLAG_LOCKED, 0,
181 "Network, (see socket.h)");
182SYSCTL_EXTENSIBLE_NODE(, CTL_DEBUG, debug, CTLFLAG_RW | CTLFLAG_LOCKED, 0,
183 "Debugging");
184#if DEBUG || DEVELOPMENT
185SYSCTL_NODE(_debug, OID_AUTO, test,
186 CTLFLAG_RW | CTLFLAG_LOCKED | CTLFLAG_MASKED, 0, "tests");
187#endif
188SYSCTL_NODE(, CTL_HW, hw, CTLFLAG_RW | CTLFLAG_LOCKED, 0,
189 "hardware");
190SYSCTL_EXTENSIBLE_NODE(, CTL_MACHDEP, machdep, CTLFLAG_RW | CTLFLAG_LOCKED, 0,
191 "machine dependent");
192SYSCTL_NODE(, CTL_USER, user, CTLFLAG_RW | CTLFLAG_LOCKED, 0,
193 "user-level");
194
195SYSCTL_NODE(_kern, OID_AUTO, bridge, CTLFLAG_RW | CTLFLAG_LOCKED, 0,
196 "bridge");
197
198#define SYSCTL_RETURN(r, x) SYSCTL_OUT(r, &x, sizeof(x))
199
200/******************************************************************************
201 * hw.* MIB
202 */
203
204#define CTLHW_RETQUAD (1U << 31)
205#define CTLHW_LOCAL (1U << 30)
206#define CTLHW_PERFLEVEL (1U << 29)
207
208#define HW_LOCAL_CPUTHREADTYPE (1 | CTLHW_LOCAL)
209#define HW_LOCAL_PHYSICALCPU (2 | CTLHW_LOCAL)
210#define HW_LOCAL_PHYSICALCPUMAX (3 | CTLHW_LOCAL)
211#define HW_LOCAL_LOGICALCPU (4 | CTLHW_LOCAL)
212#define HW_LOCAL_LOGICALCPUMAX (5 | CTLHW_LOCAL)
213#define HW_LOCAL_CPUTYPE (6 | CTLHW_LOCAL)
214#define HW_LOCAL_CPUSUBTYPE (7 | CTLHW_LOCAL)
215#define HW_LOCAL_CPUFAMILY (8 | CTLHW_LOCAL)
216#define HW_LOCAL_CPUSUBFAMILY (9 | CTLHW_LOCAL)
217#define HW_NPERFLEVELS (10 | CTLHW_LOCAL)
218#define HW_PERFLEVEL_PHYSICALCPU (11 | CTLHW_PERFLEVEL)
219#define HW_PERFLEVEL_PHYSICALCPUMAX (12 | CTLHW_PERFLEVEL)
220#define HW_PERFLEVEL_LOGICALCPU (13 | CTLHW_PERFLEVEL)
221#define HW_PERFLEVEL_LOGICALCPUMAX (14 | CTLHW_PERFLEVEL)
222#define HW_PERFLEVEL_L1ICACHESIZE (15 | CTLHW_PERFLEVEL)
223#define HW_PERFLEVEL_L1DCACHESIZE (16 | CTLHW_PERFLEVEL)
224#define HW_PERFLEVEL_L2CACHESIZE (17 | CTLHW_PERFLEVEL)
225#define HW_PERFLEVEL_CPUSPERL2 (18 | CTLHW_PERFLEVEL)
226#define HW_PERFLEVEL_L3CACHESIZE (19 | CTLHW_PERFLEVEL)
227#define HW_PERFLEVEL_CPUSPERL3 (20 | CTLHW_PERFLEVEL)
228#define HW_PERFLEVEL_NAME (21 | CTLHW_PERFLEVEL)
229
230
231/*
232 * For a given perflevel, return the corresponding CPU type.
233 */
234cluster_type_t cpu_type_for_perflevel(int perflevel);
235cluster_type_t
236cpu_type_for_perflevel(int perflevel)
237{
238 unsigned int cpu_types = ml_get_cpu_types();
239 unsigned int n_perflevels = __builtin_popcount(cpu_types);
240
241 assert((perflevel >= 0) && (perflevel < n_perflevels));
242
243 int current_idx = 0, current_perflevel = -1;
244
245 while (cpu_types) {
246 current_perflevel += cpu_types & 1;
247 if (current_perflevel == (n_perflevels - (perflevel + 1))) {
248 return current_idx;
249 }
250
251 cpu_types >>= 1;
252 current_idx++;
253 }
254
255 return 0;
256}
257
258static ml_cpu_info_t
259sysctl_hw_generic_cpu_info(int perflevel, int arg2 __unused)
260{
261 bool ignore_perflevel = false;
262#if APPLE_ARM64_ARCH_FAMILY
263 if (arg2 == HW_CACHELINE) {
264 /* Apple SoCs have a uniform cacheline size across all clusters */
265 ignore_perflevel = true;
266 }
267#endif
268
269 ml_cpu_info_t cpu_info;
270 if (ignore_perflevel) {
271 ml_cpu_get_info(ml_cpu_info: &cpu_info);
272 } else {
273 ml_cpu_get_info_type(ml_cpu_info: &cpu_info, cluster_type: cpu_type_for_perflevel(perflevel));
274 }
275 return cpu_info;
276}
277
278/*
279 * Supporting some variables requires us to do "real" work. We
280 * gather some of that here.
281 */
282static int
283sysctl_hw_generic(__unused struct sysctl_oid *oidp, void *arg1,
284 int arg2, struct sysctl_req *req)
285{
286 char dummy[65];
287 int epochTemp;
288 int val, doquad;
289 long long qval;
290 unsigned int cpu_count;
291 host_basic_info_data_t hinfo;
292 kern_return_t kret;
293 mach_msg_type_number_t count = HOST_BASIC_INFO_COUNT;
294
295 /*
296 * If we are using one of the perflevel sysctls, return early if the perflevel
297 * does not exist in this system.
298 */
299 int perflevel = (int)arg1;
300 int n_perflevels = __builtin_popcount(ml_get_cpu_types());
301
302 if (arg2 & CTLHW_PERFLEVEL) {
303 if ((perflevel < 0) || (perflevel >= n_perflevels)) {
304 return ENOENT;
305 }
306 } else {
307 perflevel = n_perflevels - 1;
308 }
309
310 /*
311 * Test and mask off the 'return quad' flag.
312 * Note that only some things here support it.
313 */
314 doquad = arg2 & CTLHW_RETQUAD;
315 arg2 &= ~CTLHW_RETQUAD;
316
317#define BSD_HOST 1
318 kret = host_info(host: (host_t)BSD_HOST, HOST_BASIC_INFO, host_info_out: (host_info_t)&hinfo, host_info_outCnt: &count);
319
320 /*
321 * Handle various OIDs.
322 *
323 * OIDs that can return int or quad set val and qval and then break.
324 * Errors and int-only values return inline.
325 */
326 switch (arg2) {
327 case HW_NCPU:
328 if (kret == KERN_SUCCESS) {
329 return SYSCTL_RETURN(req, hinfo.max_cpus);
330 } else {
331 return EINVAL;
332 }
333 case HW_AVAILCPU:
334 if (kret == KERN_SUCCESS) {
335 return SYSCTL_RETURN(req, hinfo.avail_cpus);
336 } else {
337 return EINVAL;
338 }
339 case HW_LOCAL_PHYSICALCPU:
340 if (kret == KERN_SUCCESS) {
341 return SYSCTL_RETURN(req, hinfo.physical_cpu);
342 } else {
343 return EINVAL;
344 }
345 case HW_LOCAL_PHYSICALCPUMAX:
346 if (kret == KERN_SUCCESS) {
347 return SYSCTL_RETURN(req, hinfo.physical_cpu_max);
348 } else {
349 return EINVAL;
350 }
351 case HW_LOCAL_LOGICALCPU:
352 if (kret == KERN_SUCCESS) {
353 return SYSCTL_RETURN(req, hinfo.logical_cpu);
354 } else {
355 return EINVAL;
356 }
357 case HW_LOCAL_LOGICALCPUMAX:
358 if (kret == KERN_SUCCESS) {
359 return SYSCTL_RETURN(req, hinfo.logical_cpu_max);
360 } else {
361 return EINVAL;
362 }
363 case HW_NPERFLEVELS:
364 return SYSCTL_RETURN(req, n_perflevels);
365 case HW_PERFLEVEL_PHYSICALCPU:
366 cpu_count = ml_get_cpu_number_type(cluster_type: cpu_type_for_perflevel(perflevel), false, true);
367 return SYSCTL_RETURN(req, cpu_count);
368 case HW_PERFLEVEL_PHYSICALCPUMAX:
369 cpu_count = ml_get_cpu_number_type(cluster_type: cpu_type_for_perflevel(perflevel), false, false);
370 return SYSCTL_RETURN(req, cpu_count);
371 case HW_PERFLEVEL_LOGICALCPU:
372 cpu_count = ml_get_cpu_number_type(cluster_type: cpu_type_for_perflevel(perflevel), true, true);
373 return SYSCTL_RETURN(req, cpu_count);
374 case HW_PERFLEVEL_LOGICALCPUMAX:
375 cpu_count = ml_get_cpu_number_type(cluster_type: cpu_type_for_perflevel(perflevel), true, false);
376 return SYSCTL_RETURN(req, cpu_count);
377 case HW_PERFLEVEL_L1ICACHESIZE: {
378 ml_cpu_info_t cpu_info = sysctl_hw_generic_cpu_info(perflevel, arg2);
379 val = (int)cpu_info.l1_icache_size;
380 qval = (long long)cpu_info.l1_icache_size;
381 break;
382 }
383 case HW_PERFLEVEL_L1DCACHESIZE: {
384 ml_cpu_info_t cpu_info = sysctl_hw_generic_cpu_info(perflevel, arg2);
385 val = (int)cpu_info.l1_dcache_size;
386 qval = (long long)cpu_info.l1_dcache_size;
387 break;
388 }
389 case HW_PERFLEVEL_L2CACHESIZE: {
390 ml_cpu_info_t cpu_info = sysctl_hw_generic_cpu_info(perflevel, arg2);
391 val = (int)cpu_info.l2_cache_size;
392 qval = (long long)cpu_info.l2_cache_size;
393 break;
394 }
395 case HW_PERFLEVEL_CPUSPERL2:
396 cpu_count = ml_cpu_cache_sharing(level: 2, cluster_type: cpu_type_for_perflevel(perflevel), false);
397 return SYSCTL_RETURN(req, cpu_count);
398 case HW_PERFLEVEL_L3CACHESIZE: {
399 ml_cpu_info_t cpu_info = sysctl_hw_generic_cpu_info(perflevel, arg2);
400 if (cpu_info.l3_cache_size == UINT32_MAX) {
401 return EINVAL;
402 }
403 val = (int)cpu_info.l3_cache_size;
404 qval = (long long)cpu_info.l3_cache_size;
405 break;
406 }
407 case HW_PERFLEVEL_CPUSPERL3: {
408 ml_cpu_info_t cpu_info = sysctl_hw_generic_cpu_info(perflevel, arg2);
409 if (cpu_info.l3_cache_size == UINT32_MAX) {
410 return EINVAL;
411 }
412 cpu_count = ml_cpu_cache_sharing(level: 3, cluster_type: cpu_type_for_perflevel(perflevel), false);
413 return SYSCTL_RETURN(req, cpu_count);
414 }
415 case HW_PERFLEVEL_NAME:
416 bzero(s: dummy, n: sizeof(dummy));
417 ml_get_cluster_type_name(cluster_type: cpu_type_for_perflevel(perflevel), name: dummy, name_size: sizeof(dummy));
418 return SYSCTL_OUT(req, dummy, strlen(dummy) + 1);
419 case HW_LOCAL_CPUTYPE:
420 if (kret == KERN_SUCCESS) {
421 return SYSCTL_RETURN(req, hinfo.cpu_type);
422 } else {
423 return EINVAL;
424 }
425 case HW_LOCAL_CPUSUBTYPE:
426 if (kret == KERN_SUCCESS) {
427 return SYSCTL_RETURN(req, hinfo.cpu_subtype);
428 } else {
429 return EINVAL;
430 }
431 case HW_LOCAL_CPUFAMILY:
432 {
433 int cpufamily = 0;
434#if defined (__i386__) || defined (__x86_64__)
435 cpufamily = cpuid_cpufamily();
436#elif defined(__arm64__)
437 {
438 cpufamily = cpuid_get_cpufamily();
439 }
440#else
441#error unknown architecture
442#endif
443 return SYSCTL_RETURN(req, cpufamily);
444 }
445 case HW_LOCAL_CPUSUBFAMILY:
446 {
447 int cpusubfamily = 0;
448#if defined (__i386__) || defined (__x86_64__)
449 cpusubfamily = CPUSUBFAMILY_UNKNOWN;
450#elif defined(__arm64__)
451 {
452 cpusubfamily = cpuid_get_cpusubfamily();
453 }
454#else
455#error unknown architecture
456#endif
457 return SYSCTL_RETURN(req, cpusubfamily);
458 }
459 case HW_PAGESIZE:
460 {
461 vm_map_t map = get_task_map(current_task());
462 val = vm_map_page_size(map);
463 qval = (long long)val;
464 break;
465 }
466 case HW_CACHELINE: {
467 ml_cpu_info_t cpu_info = sysctl_hw_generic_cpu_info(perflevel, arg2);
468 val = (int)cpu_info.cache_line_size;
469 qval = (long long)val;
470 break;
471 }
472 case HW_L1ICACHESIZE: {
473 ml_cpu_info_t cpu_info = sysctl_hw_generic_cpu_info(perflevel, arg2);
474 val = (int)cpu_info.l1_icache_size;
475 qval = (long long)cpu_info.l1_icache_size;
476 break;
477 }
478 case HW_L1DCACHESIZE: {
479 ml_cpu_info_t cpu_info = sysctl_hw_generic_cpu_info(perflevel, arg2);
480 val = (int)cpu_info.l1_dcache_size;
481 qval = (long long)cpu_info.l1_dcache_size;
482 break;
483 }
484 case HW_L2CACHESIZE: {
485 ml_cpu_info_t cpu_info = sysctl_hw_generic_cpu_info(perflevel, arg2);
486 if (cpu_info.l2_cache_size == UINT32_MAX) {
487 return EINVAL;
488 }
489 val = (int)cpu_info.l2_cache_size;
490 qval = (long long)cpu_info.l2_cache_size;
491 break;
492 }
493 case HW_L3CACHESIZE: {
494 ml_cpu_info_t cpu_info = sysctl_hw_generic_cpu_info(perflevel, arg2);
495 if (cpu_info.l3_cache_size == UINT32_MAX) {
496 return EINVAL;
497 }
498 val = (int)cpu_info.l3_cache_size;
499 qval = (long long)cpu_info.l3_cache_size;
500 break;
501 }
502 case HW_TARGET:
503 bzero(s: dummy, n: sizeof(dummy));
504 if (!PEGetTargetName(name: dummy, maxLength: 64)) {
505 return EINVAL;
506 }
507 dummy[64] = 0;
508 return SYSCTL_OUT(req, dummy, strlen(dummy) + 1);
509 case HW_PRODUCT:
510 bzero(s: dummy, n: sizeof(dummy));
511 if (!PEGetProductName(name: dummy, maxLength: 64)) {
512 return EINVAL;
513 }
514 dummy[64] = 0;
515 return SYSCTL_OUT(req, dummy, strlen(dummy) + 1);
516
517 /*
518 * Deprecated variables. We still support these for
519 * backwards compatibility purposes only.
520 */
521#if XNU_TARGET_OS_OSX && defined(__arm64__)
522 /* The following two are kludged for backward
523 * compatibility. Use hw.product/hw.target for something
524 * consistent instead. */
525
526 case HW_MACHINE:
527 bzero(s: dummy, n: sizeof(dummy));
528 if (proc_platform(req->p) == PLATFORM_IOS) {
529 /* iOS-on-Mac processes don't expect the macOS kind of
530 * hw.machine, e.g. "arm64", but are used to seeing
531 * a product string on iOS, which we here hardcode
532 * to return as "iPad8,6" for compatibility.
533 *
534 * Another reason why hw.machine and hw.model are
535 * trouble and hw.target+hw.product should be used
536 * instead.
537 */
538
539 strlcpy(dst: dummy, src: "iPad8,6", n: sizeof(dummy));
540 }
541 else {
542 strlcpy(dst: dummy, src: "arm64", n: sizeof(dummy));
543 }
544 dummy[64] = 0;
545 return SYSCTL_OUT(req, dummy, strlen(dummy) + 1);
546 case HW_MODEL:
547 bzero(s: dummy, n: sizeof(dummy));
548 if (!PEGetProductName(name: dummy, maxLength: 64)) {
549 return EINVAL;
550 }
551 dummy[64] = 0;
552 return SYSCTL_OUT(req, dummy, strlen(dummy) + 1);
553#else
554 case HW_MACHINE:
555 bzero(dummy, sizeof(dummy));
556 if (!PEGetMachineName(dummy, 64)) {
557 return EINVAL;
558 }
559 dummy[64] = 0;
560 return SYSCTL_OUT(req, dummy, strlen(dummy) + 1);
561 case HW_MODEL:
562 bzero(dummy, sizeof(dummy));
563 if (!PEGetModelName(dummy, 64)) {
564 return EINVAL;
565 }
566 dummy[64] = 0;
567 return SYSCTL_OUT(req, dummy, strlen(dummy) + 1);
568#endif
569 case HW_USERMEM:
570 {
571 int usermem = (int)(mem_size - vm_page_wire_count * page_size);
572
573 return SYSCTL_RETURN(req, usermem);
574 }
575 case HW_EPOCH:
576 epochTemp = PEGetPlatformEpoch();
577 if (epochTemp == -1) {
578 return EINVAL;
579 }
580 return SYSCTL_RETURN(req, epochTemp);
581 case HW_VECTORUNIT: {
582 ml_cpu_info_t cpu_info = sysctl_hw_generic_cpu_info(perflevel, arg2);
583 int vector = cpu_info.vector_unit == 0? 0 : 1;
584 return SYSCTL_RETURN(req, vector);
585 }
586 case HW_L2SETTINGS: {
587 ml_cpu_info_t cpu_info = sysctl_hw_generic_cpu_info(perflevel, arg2);
588 if (cpu_info.l2_cache_size == UINT32_MAX) {
589 return EINVAL;
590 }
591 return SYSCTL_RETURN(req, cpu_info.l2_settings);
592 }
593 case HW_L3SETTINGS: {
594 ml_cpu_info_t cpu_info = sysctl_hw_generic_cpu_info(perflevel, arg2);
595 if (cpu_info.l3_cache_size == UINT32_MAX) {
596 return EINVAL;
597 }
598 return SYSCTL_RETURN(req, cpu_info.l3_settings);
599 }
600 default:
601 return ENOTSUP;
602 }
603 /*
604 * Callers may come to us with either int or quad buffers.
605 */
606 if (doquad) {
607 return SYSCTL_RETURN(req, qval);
608 }
609 return SYSCTL_RETURN(req, val);
610}
611
612static int
613sysctl_hw_cachesize(struct sysctl_oid *oidp __unused, void *arg1 __unused,
614 int arg2 __unused, struct sysctl_req *req)
615{
616 uint64_t cachesize[10] = {};
617
618#if __x86_64__
619 cachesize[0] = ml_cpu_cache_size(0);
620 cachesize[1] = ml_cpu_cache_size(1);
621 cachesize[2] = ml_cpu_cache_size(2);
622 cachesize[3] = ml_cpu_cache_size(3);
623#elif __arm64__
624 cluster_type_t min_perflevel_cluster_type = cpu_type_for_perflevel(perflevel: __builtin_popcount(ml_get_cpu_types()) - 1);
625
626 cachesize[0] = ml_get_machine_mem();
627 cachesize[1] = cache_info_type(cluster_type: min_perflevel_cluster_type)->c_dsize; /* Using the DCache */
628 cachesize[2] = cache_info_type(cluster_type: min_perflevel_cluster_type)->c_l2size;
629#else
630#error unknown architecture
631#endif
632
633 return SYSCTL_RETURN(req, cachesize);
634}
635
636/* hw.pagesize and hw.tbfrequency are expected as 64 bit values */
637static int
638sysctl_pagesize
639(__unused struct sysctl_oid *oidp, __unused void *arg1, __unused int arg2, struct sysctl_req *req)
640{
641 vm_map_t map = get_task_map(current_task());
642 long long l = vm_map_page_size(map);
643 return sysctl_io_number(req, bigValue: l, valueSize: sizeof(l), NULL, NULL);
644}
645
646static int
647sysctl_pagesize32
648(__unused struct sysctl_oid *oidp, __unused void *arg1, __unused int arg2, struct sysctl_req *req)
649{
650 long long l;
651#if __arm64__
652 l = (long long) (1 << page_shift_user32);
653#else /* __arm64__ */
654 l = (long long) PAGE_SIZE;
655#endif /* __arm64__ */
656 return sysctl_io_number(req, bigValue: l, valueSize: sizeof(l), NULL, NULL);
657}
658
659static int
660sysctl_tbfrequency
661(__unused struct sysctl_oid *oidp, __unused void *arg1, __unused int arg2, struct sysctl_req *req)
662{
663 long long l = gPEClockFrequencyInfo.timebase_frequency_hz;
664 return sysctl_io_number(req, bigValue: l, valueSize: sizeof(l), NULL, NULL);
665}
666
667/*
668 * Called by IOKit on Intel, or by sysctl_load_devicetree_entries()
669 */
670void
671sysctl_set_osenvironment(unsigned int size, const void* value)
672{
673 if (osenvironment_size == 0 && size > 0) {
674 osenvironment = zalloc_permanent(size, ZALIGN_NONE);
675 if (osenvironment) {
676 memcpy(dst: osenvironment, src: value, n: size);
677 osenvironment_size = size;
678 }
679 }
680}
681
682void
683sysctl_unblock_osenvironment(void)
684{
685 os_atomic_inc(&osenvironment_initialized, relaxed);
686 thread_wakeup((event_t) &osenvironment_initialized);
687}
688
689/*
690 * Create sysctl entries coming from device tree.
691 *
692 * Entries from device tree are loaded here because SecureDTLookupEntry() only works before
693 * PE_init_iokit(). Doing this also avoids the extern-C hackery to access these entries
694 * from IORegistry (which requires C++).
695 */
696__startup_func
697static void
698sysctl_load_devicetree_entries(void)
699{
700 DTEntry chosen;
701 void const *value;
702 unsigned int size;
703
704 if (kSuccess != SecureDTLookupEntry(searchPoint: 0, pathName: "/chosen", foundEntry: &chosen)) {
705 return;
706 }
707
708 /* load osenvironment */
709 if (kSuccess == SecureDTGetProperty(entry: chosen, propertyName: "osenvironment", propertyValue: (void const **) &value, propertySize: &size)) {
710 sysctl_set_osenvironment(size, value);
711 }
712
713 /* load ephemeral_storage */
714 if (kSuccess == SecureDTGetProperty(entry: chosen, propertyName: "ephemeral-storage", propertyValue: (void const **) &value, propertySize: &size)) {
715 if (size == sizeof(uint32_t)) {
716 ephemeral_storage = *(uint32_t const *)value;
717 property_existence.ephemeral_storage = 1;
718 }
719 }
720
721 /* load use_recovery_securityd */
722 if (kSuccess == SecureDTGetProperty(entry: chosen, propertyName: "use-recovery-securityd", propertyValue: (void const **) &value, propertySize: &size)) {
723 if (size == sizeof(uint32_t)) {
724 use_recovery_securityd = *(uint32_t const *)value;
725 property_existence.use_recovery_securityd = 1;
726 }
727 }
728}
729STARTUP(SYSCTL, STARTUP_RANK_MIDDLE, sysctl_load_devicetree_entries);
730
731static int
732sysctl_osenvironment
733(__unused struct sysctl_oid *oidp, __unused void *arg1, __unused int arg2, struct sysctl_req *req)
734{
735#if defined(__x86_64__)
736#if (DEVELOPMENT || DEBUG)
737 if (os_atomic_load(&osenvironment_initialized, relaxed) == 0) {
738 assert_wait((event_t) &osenvironment_initialized, THREAD_UNINT);
739 if (os_atomic_load(&osenvironment_initialized, relaxed) != 0) {
740 clear_wait(current_thread(), THREAD_AWAKENED);
741 } else {
742 (void) thread_block(THREAD_CONTINUE_NULL);
743 }
744 }
745#endif
746#endif
747 if (osenvironment_size > 0) {
748 return SYSCTL_OUT(req, osenvironment, osenvironment_size);
749 } else {
750 return EINVAL;
751 }
752}
753
754static int
755sysctl_ephemeral_storage
756(__unused struct sysctl_oid *oidp, __unused void *arg1, __unused int arg2, struct sysctl_req *req)
757{
758 if (property_existence.ephemeral_storage) {
759 return SYSCTL_OUT(req, &ephemeral_storage, sizeof(ephemeral_storage));
760 } else {
761 return EINVAL;
762 }
763}
764
765static int
766sysctl_use_recovery_securityd
767(__unused struct sysctl_oid *oidp, __unused void *arg1, __unused int arg2, struct sysctl_req *req)
768{
769 if (property_existence.use_recovery_securityd) {
770 return SYSCTL_OUT(req, &use_recovery_securityd, sizeof(use_recovery_securityd));
771 } else {
772 return EINVAL;
773 }
774}
775
776static int
777sysctl_use_kernelmanagerd
778(__unused struct sysctl_oid *oidp, __unused void *arg1, __unused int arg2, struct sysctl_req *req)
779{
780#if XNU_TARGET_OS_OSX
781 static int use_kernelmanagerd = 1;
782 static bool once = false;
783
784 if (!once) {
785 kc_format_t kc_format;
786 PE_get_primary_kc_format(type: &kc_format);
787 if (kc_format == KCFormatFileset) {
788 use_kernelmanagerd = 1;
789 } else {
790 PE_parse_boot_argn(arg_string: "kernelmanagerd", arg_ptr: &use_kernelmanagerd, max_arg: sizeof(use_kernelmanagerd));
791 }
792 once = true;
793 }
794#else
795 static int use_kernelmanagerd = 0;
796#endif
797 return SYSCTL_OUT(req, &use_kernelmanagerd, sizeof(use_kernelmanagerd));
798}
799
800#define HW_LOCAL_FREQUENCY 1
801#define HW_LOCAL_FREQUENCY_MIN 2
802#define HW_LOCAL_FREQUENCY_MAX 3
803#define HW_LOCAL_FREQUENCY_CLOCK_RATE 4
804
805static int
806sysctl_bus_frequency
807(__unused struct sysctl_oid *oidp, __unused void *arg1, __unused int arg2, __unused struct sysctl_req *req)
808{
809
810#if DEBUG || DEVELOPMENT || !defined(__arm64__)
811 switch (arg2) {
812 case HW_LOCAL_FREQUENCY:
813 return SYSCTL_RETURN(req, gPEClockFrequencyInfo.bus_frequency_hz);
814 case HW_LOCAL_FREQUENCY_MIN:
815 return SYSCTL_RETURN(req, gPEClockFrequencyInfo.bus_frequency_min_hz);
816 case HW_LOCAL_FREQUENCY_MAX:
817 return SYSCTL_RETURN(req, gPEClockFrequencyInfo.bus_frequency_max_hz);
818 case HW_LOCAL_FREQUENCY_CLOCK_RATE:
819 return SYSCTL_OUT(req, &gPEClockFrequencyInfo.bus_clock_rate_hz, sizeof(int));
820 default:
821 return EINVAL;
822 }
823#else
824 return ENOENT;
825#endif
826}
827
828static int
829sysctl_cpu_frequency
830(__unused struct sysctl_oid *oidp, __unused void *arg1, __unused int arg2, __unused struct sysctl_req *req)
831{
832
833#if DEBUG || DEVELOPMENT || !defined(__arm64__)
834 switch (arg2) {
835 case HW_LOCAL_FREQUENCY:
836 return SYSCTL_RETURN(req, gPEClockFrequencyInfo.cpu_frequency_hz);
837 case HW_LOCAL_FREQUENCY_MIN:
838 return SYSCTL_RETURN(req, gPEClockFrequencyInfo.cpu_frequency_min_hz);
839 case HW_LOCAL_FREQUENCY_MAX:
840 return SYSCTL_RETURN(req, gPEClockFrequencyInfo.cpu_frequency_max_hz);
841 case HW_LOCAL_FREQUENCY_CLOCK_RATE:
842 return SYSCTL_OUT(req, &gPEClockFrequencyInfo.cpu_clock_rate_hz, sizeof(int));
843 default:
844 return EINVAL;
845 }
846#else
847 return ENOENT;
848#endif
849}
850
851/*
852 * This sysctl will signal to userspace that a serial console is desired:
853 *
854 * hw.serialdebugmode = 1 will load the serial console job in the multi-user session;
855 * hw.serialdebugmode = 2 will load the serial console job in the base system as well
856 */
857static int
858sysctl_serialdebugmode
859(__unused struct sysctl_oid *oidp, __unused void *arg1, __unused int arg2, struct sysctl_req *req)
860{
861 uint32_t serial_boot_arg;
862 int serialdebugmode = 0;
863
864 if (PE_parse_boot_argn(arg_string: "serial", arg_ptr: &serial_boot_arg, max_arg: sizeof(serial_boot_arg)) &&
865 (serial_boot_arg & SERIALMODE_OUTPUT) && (serial_boot_arg & SERIALMODE_INPUT)) {
866 serialdebugmode = (serial_boot_arg & SERIALMODE_BASE_TTY) ? 2 : 1;
867 }
868
869 return sysctl_io_number(req, bigValue: serialdebugmode, valueSize: sizeof(serialdebugmode), NULL, NULL);
870}
871
872/*
873 * hw.* MIB variables.
874 */
875SYSCTL_PROC(_hw, HW_NCPU, ncpu, CTLTYPE_INT | CTLFLAG_RD | CTLFLAG_KERN | CTLFLAG_LOCKED, 0, HW_NCPU, sysctl_hw_generic, "I", "");
876SYSCTL_PROC(_hw, HW_AVAILCPU, activecpu, CTLTYPE_INT | CTLFLAG_RD | CTLFLAG_KERN | CTLFLAG_LOCKED, 0, HW_AVAILCPU, sysctl_hw_generic, "I", "");
877SYSCTL_PROC(_hw, OID_AUTO, physicalcpu, CTLTYPE_INT | CTLFLAG_RD | CTLFLAG_KERN | CTLFLAG_LOCKED, 0, HW_LOCAL_PHYSICALCPU, sysctl_hw_generic, "I", "");
878SYSCTL_PROC(_hw, OID_AUTO, physicalcpu_max, CTLTYPE_INT | CTLFLAG_RD | CTLFLAG_KERN | CTLFLAG_LOCKED, 0, HW_LOCAL_PHYSICALCPUMAX, sysctl_hw_generic, "I", "");
879SYSCTL_PROC(_hw, OID_AUTO, logicalcpu, CTLTYPE_INT | CTLFLAG_RD | CTLFLAG_KERN | CTLFLAG_LOCKED, 0, HW_LOCAL_LOGICALCPU, sysctl_hw_generic, "I", "");
880SYSCTL_PROC(_hw, OID_AUTO, logicalcpu_max, CTLTYPE_INT | CTLFLAG_RD | CTLFLAG_KERN | CTLFLAG_LOCKED, 0, HW_LOCAL_LOGICALCPUMAX, sysctl_hw_generic, "I", "");
881SYSCTL_INT(_hw, HW_BYTEORDER, byteorder, CTLFLAG_RD | CTLFLAG_KERN | CTLFLAG_LOCKED, (int *)NULL, BYTE_ORDER, "");
882SYSCTL_PROC(_hw, OID_AUTO, cputype, CTLTYPE_INT | CTLFLAG_RD | CTLFLAG_KERN | CTLFLAG_LOCKED, 0, HW_LOCAL_CPUTYPE, sysctl_hw_generic, "I", "");
883SYSCTL_PROC(_hw, OID_AUTO, cpusubtype, CTLTYPE_INT | CTLFLAG_RD | CTLFLAG_KERN | CTLFLAG_LOCKED, 0, HW_LOCAL_CPUSUBTYPE, sysctl_hw_generic, "I", "");
884SYSCTL_INT(_hw, OID_AUTO, cpu64bit_capable, CTLFLAG_RD | CTLFLAG_KERN | CTLFLAG_LOCKED, &cpu64bit, 0, "");
885SYSCTL_PROC(_hw, OID_AUTO, cpufamily, CTLTYPE_INT | CTLFLAG_RD | CTLFLAG_KERN | CTLFLAG_LOCKED, 0, HW_LOCAL_CPUFAMILY, sysctl_hw_generic, "I", "");
886SYSCTL_PROC(_hw, OID_AUTO, cpusubfamily, CTLTYPE_INT | CTLFLAG_RD | CTLFLAG_KERN | CTLFLAG_LOCKED, 0, HW_LOCAL_CPUSUBFAMILY, sysctl_hw_generic, "I", "");
887SYSCTL_OPAQUE(_hw, OID_AUTO, cacheconfig, CTLFLAG_RD | CTLFLAG_LOCKED, &cacheconfig, sizeof(cacheconfig), "Q", "");
888SYSCTL_PROC(_hw, OID_AUTO, cachesize, CTLTYPE_OPAQUE | CTLFLAG_RD | CTLFLAG_LOCKED, 0, 0, sysctl_hw_cachesize, "Q", "");
889SYSCTL_PROC(_hw, OID_AUTO, pagesize, CTLTYPE_QUAD | CTLFLAG_RD | CTLFLAG_KERN | CTLFLAG_LOCKED, 0, 0, sysctl_pagesize, "Q", "");
890SYSCTL_PROC(_hw, OID_AUTO, pagesize32, CTLTYPE_QUAD | CTLFLAG_RD | CTLFLAG_KERN | CTLFLAG_LOCKED, 0, 0, sysctl_pagesize32, "Q", "");
891SYSCTL_PROC(_hw, OID_AUTO, busfrequency, CTLTYPE_QUAD | CTLFLAG_RD | CTLFLAG_KERN | CTLFLAG_LOCKED, 0, HW_LOCAL_FREQUENCY, sysctl_bus_frequency, "Q", "");
892SYSCTL_PROC(_hw, OID_AUTO, busfrequency_min, CTLTYPE_QUAD | CTLFLAG_RD | CTLFLAG_KERN | CTLFLAG_LOCKED, 0, HW_LOCAL_FREQUENCY_MIN, sysctl_bus_frequency, "Q", "");
893SYSCTL_PROC(_hw, OID_AUTO, busfrequency_max, CTLTYPE_QUAD | CTLFLAG_RD | CTLFLAG_KERN | CTLFLAG_LOCKED, 0, HW_LOCAL_FREQUENCY_MAX, sysctl_bus_frequency, "Q", "");
894SYSCTL_PROC(_hw, OID_AUTO, cpufrequency, CTLTYPE_QUAD | CTLFLAG_RD | CTLFLAG_KERN | CTLFLAG_LOCKED, 0, HW_LOCAL_FREQUENCY, sysctl_cpu_frequency, "Q", "");
895SYSCTL_PROC(_hw, OID_AUTO, cpufrequency_min, CTLTYPE_QUAD | CTLFLAG_RD | CTLFLAG_KERN | CTLFLAG_LOCKED, 0, HW_LOCAL_FREQUENCY_MIN, sysctl_cpu_frequency, "Q", "");
896SYSCTL_PROC(_hw, OID_AUTO, cpufrequency_max, CTLTYPE_QUAD | CTLFLAG_RD | CTLFLAG_KERN | CTLFLAG_LOCKED, 0, HW_LOCAL_FREQUENCY_MAX, sysctl_cpu_frequency, "Q", "");
897SYSCTL_PROC(_hw, OID_AUTO, cachelinesize, CTLTYPE_QUAD | CTLFLAG_RD | CTLFLAG_KERN | CTLFLAG_LOCKED, 0, HW_CACHELINE | CTLHW_RETQUAD, sysctl_hw_generic, "Q", "");
898SYSCTL_PROC(_hw, OID_AUTO, l1icachesize, CTLTYPE_QUAD | CTLFLAG_RD | CTLFLAG_KERN | CTLFLAG_LOCKED, 0, HW_L1ICACHESIZE | CTLHW_RETQUAD, sysctl_hw_generic, "Q", "");
899SYSCTL_PROC(_hw, OID_AUTO, l1dcachesize, CTLTYPE_QUAD | CTLFLAG_RD | CTLFLAG_KERN | CTLFLAG_LOCKED, 0, HW_L1DCACHESIZE | CTLHW_RETQUAD, sysctl_hw_generic, "Q", "");
900SYSCTL_PROC(_hw, OID_AUTO, l2cachesize, CTLTYPE_QUAD | CTLFLAG_RD | CTLFLAG_KERN | CTLFLAG_LOCKED, 0, HW_L2CACHESIZE | CTLHW_RETQUAD, sysctl_hw_generic, "Q", "");
901SYSCTL_PROC(_hw, OID_AUTO, l3cachesize, CTLTYPE_QUAD | CTLFLAG_RD | CTLFLAG_KERN | CTLFLAG_LOCKED, 0, HW_L3CACHESIZE | CTLHW_RETQUAD, sysctl_hw_generic, "Q", "");
902#if defined(__arm64__) && (DEBUG || DEVELOPMENT)
903SYSCTL_QUAD(_hw, OID_AUTO, memfrequency, CTLFLAG_RD | CTLFLAG_KERN | CTLFLAG_LOCKED, &gPEClockFrequencyInfo.mem_frequency_hz, "");
904SYSCTL_QUAD(_hw, OID_AUTO, memfrequency_min, CTLFLAG_RD | CTLFLAG_KERN | CTLFLAG_LOCKED, &gPEClockFrequencyInfo.mem_frequency_min_hz, "");
905SYSCTL_QUAD(_hw, OID_AUTO, memfrequency_max, CTLFLAG_RD | CTLFLAG_KERN | CTLFLAG_LOCKED, &gPEClockFrequencyInfo.mem_frequency_max_hz, "");
906SYSCTL_QUAD(_hw, OID_AUTO, prffrequency, CTLFLAG_RD | CTLFLAG_KERN | CTLFLAG_LOCKED, &gPEClockFrequencyInfo.prf_frequency_hz, "");
907SYSCTL_QUAD(_hw, OID_AUTO, prffrequency_min, CTLFLAG_RD | CTLFLAG_KERN | CTLFLAG_LOCKED, &gPEClockFrequencyInfo.prf_frequency_min_hz, "");
908SYSCTL_QUAD(_hw, OID_AUTO, prffrequency_max, CTLFLAG_RD | CTLFLAG_KERN | CTLFLAG_LOCKED, &gPEClockFrequencyInfo.prf_frequency_max_hz, "");
909SYSCTL_QUAD(_hw, OID_AUTO, fixfrequency, CTLFLAG_RD | CTLFLAG_KERN | CTLFLAG_LOCKED, &gPEClockFrequencyInfo.fix_frequency_hz, "");
910#endif /* __arm64__ */
911SYSCTL_PROC(_hw, OID_AUTO, tbfrequency, CTLTYPE_QUAD | CTLFLAG_RD | CTLFLAG_KERN | CTLFLAG_LOCKED, 0, 0, sysctl_tbfrequency, "Q", "");
912/**
913 * The naming around the sysctls for max_mem and max_mem_actual are different between macOS and
914 * non-macOS platforms because historically macOS's hw.memsize provided the value of the actual
915 * physical memory size, whereas on non-macOS it is the memory size minus any carveouts.
916 */
917#if XNU_TARGET_OS_OSX
918SYSCTL_QUAD(_hw, HW_MEMSIZE, memsize, CTLFLAG_RD | CTLFLAG_KERN | CTLFLAG_LOCKED, &max_mem_actual, "");
919SYSCTL_QUAD(_hw, OID_AUTO, memsize_usable, CTLFLAG_RD | CTLFLAG_KERN | CTLFLAG_LOCKED, &max_mem, "");
920#else
921SYSCTL_QUAD(_hw, HW_MEMSIZE, memsize, CTLFLAG_RD | CTLFLAG_KERN | CTLFLAG_LOCKED, &max_mem, "");
922SYSCTL_QUAD(_hw, OID_AUTO, memsize_physical, CTLFLAG_RD | CTLFLAG_KERN | CTLFLAG_LOCKED, &max_mem_actual, "");
923#endif /* XNU_TARGET_OS_OSX */
924SYSCTL_INT(_hw, OID_AUTO, packages, CTLFLAG_RD | CTLFLAG_KERN | CTLFLAG_LOCKED, &packages, 0, "");
925SYSCTL_PROC(_hw, OID_AUTO, osenvironment, CTLTYPE_STRING | CTLFLAG_RD | CTLFLAG_KERN | CTLFLAG_LOCKED, 0, 0, sysctl_osenvironment, "A", "");
926SYSCTL_PROC(_hw, OID_AUTO, ephemeral_storage, CTLTYPE_INT | CTLFLAG_RD | CTLFLAG_KERN | CTLFLAG_LOCKED, 0, 0, sysctl_ephemeral_storage, "I", "");
927SYSCTL_PROC(_hw, OID_AUTO, use_recovery_securityd, CTLTYPE_INT | CTLFLAG_RD | CTLFLAG_KERN | CTLFLAG_LOCKED, 0, 0, sysctl_use_recovery_securityd, "I", "");
928SYSCTL_PROC(_hw, OID_AUTO, use_kernelmanagerd, CTLTYPE_INT | CTLFLAG_RD | CTLFLAG_KERN | CTLFLAG_LOCKED, 0, 0, sysctl_use_kernelmanagerd, "I", "");
929SYSCTL_PROC(_hw, OID_AUTO, serialdebugmode, CTLTYPE_INT | CTLFLAG_RD | CTLFLAG_LOCKED, 0, 0, sysctl_serialdebugmode, "I", "");
930
931/*
932 * hw.perflevelN.* variables.
933 * Users may check these to determine properties that vary across different CPU types, such as number of CPUs,
934 * or cache sizes. Perflevel 0 corresponds to the highest performance one.
935 */
936SYSCTL_NODE(_hw, OID_AUTO, perflevel0, CTLFLAG_RD | CTLFLAG_KERN | CTLFLAG_LOCKED, NULL, "Perf level 0 topology and cache geometry paramaters");
937SYSCTL_NODE(_hw, OID_AUTO, perflevel1, CTLFLAG_RD | CTLFLAG_KERN | CTLFLAG_LOCKED, NULL, "Perf level 1 topology and cache geometry paramaters");
938SYSCTL_PROC(_hw, OID_AUTO, nperflevels, CTLTYPE_INT | CTLFLAG_RD | CTLFLAG_KERN | CTLFLAG_LOCKED, (void *)0, HW_NPERFLEVELS, sysctl_hw_generic, "I", "Number of performance levels supported by this system");
939
940SYSCTL_PROC(_hw_perflevel0, OID_AUTO, physicalcpu, CTLTYPE_INT | CTLFLAG_RD | CTLFLAG_KERN | CTLFLAG_LOCKED, (void *)0, HW_PERFLEVEL_PHYSICALCPU, sysctl_hw_generic, "I", "");
941SYSCTL_PROC(_hw_perflevel0, OID_AUTO, physicalcpu_max, CTLTYPE_INT | CTLFLAG_RD | CTLFLAG_KERN | CTLFLAG_LOCKED, (void *)0, HW_PERFLEVEL_PHYSICALCPUMAX, sysctl_hw_generic, "I", "");
942SYSCTL_PROC(_hw_perflevel0, OID_AUTO, logicalcpu, CTLTYPE_INT | CTLFLAG_RD | CTLFLAG_KERN | CTLFLAG_LOCKED, (void *)0, HW_PERFLEVEL_LOGICALCPU, sysctl_hw_generic, "I", "");
943SYSCTL_PROC(_hw_perflevel0, OID_AUTO, logicalcpu_max, CTLTYPE_INT | CTLFLAG_RD | CTLFLAG_KERN | CTLFLAG_LOCKED, (void *)0, HW_PERFLEVEL_LOGICALCPUMAX, sysctl_hw_generic, "I", "");
944SYSCTL_PROC(_hw_perflevel0, OID_AUTO, l1icachesize, CTLTYPE_INT | CTLFLAG_RD | CTLFLAG_KERN | CTLFLAG_LOCKED, (void *)0, HW_PERFLEVEL_L1ICACHESIZE, sysctl_hw_generic, "I", "");
945SYSCTL_PROC(_hw_perflevel0, OID_AUTO, l1dcachesize, CTLTYPE_INT | CTLFLAG_RD | CTLFLAG_KERN | CTLFLAG_LOCKED, (void *)0, HW_PERFLEVEL_L1DCACHESIZE, sysctl_hw_generic, "I", "");
946SYSCTL_PROC(_hw_perflevel0, OID_AUTO, l2cachesize, CTLTYPE_INT | CTLFLAG_RD | CTLFLAG_KERN | CTLFLAG_LOCKED, (void *)0, HW_PERFLEVEL_L2CACHESIZE, sysctl_hw_generic, "I", "");
947SYSCTL_PROC(_hw_perflevel0, OID_AUTO, cpusperl2, CTLTYPE_INT | CTLFLAG_RD | CTLFLAG_KERN | CTLFLAG_LOCKED, (void *)0, HW_PERFLEVEL_CPUSPERL2, sysctl_hw_generic, "I", "");
948SYSCTL_PROC(_hw_perflevel0, OID_AUTO, l3cachesize, CTLTYPE_INT | CTLFLAG_RD | CTLFLAG_KERN | CTLFLAG_LOCKED, (void *)0, HW_PERFLEVEL_L3CACHESIZE, sysctl_hw_generic, "I", "");
949SYSCTL_PROC(_hw_perflevel0, OID_AUTO, cpusperl3, CTLTYPE_INT | CTLFLAG_RD | CTLFLAG_KERN | CTLFLAG_LOCKED, (void *)0, HW_PERFLEVEL_CPUSPERL3, sysctl_hw_generic, "I", "");
950SYSCTL_PROC(_hw_perflevel0, OID_AUTO, name, CTLTYPE_STRING | CTLFLAG_RD | CTLFLAG_KERN | CTLFLAG_LOCKED, (void *)0, HW_PERFLEVEL_NAME, sysctl_hw_generic, "A", "");
951
952SYSCTL_PROC(_hw_perflevel1, OID_AUTO, physicalcpu, CTLTYPE_INT | CTLFLAG_RD | CTLFLAG_KERN | CTLFLAG_LOCKED, (void *)1, HW_PERFLEVEL_PHYSICALCPU, sysctl_hw_generic, "I", "");
953SYSCTL_PROC(_hw_perflevel1, OID_AUTO, physicalcpu_max, CTLTYPE_INT | CTLFLAG_RD | CTLFLAG_KERN | CTLFLAG_LOCKED, (void *)1, HW_PERFLEVEL_PHYSICALCPUMAX, sysctl_hw_generic, "I", "");
954SYSCTL_PROC(_hw_perflevel1, OID_AUTO, logicalcpu, CTLTYPE_INT | CTLFLAG_RD | CTLFLAG_KERN | CTLFLAG_LOCKED, (void *)1, HW_PERFLEVEL_LOGICALCPU, sysctl_hw_generic, "I", "");
955SYSCTL_PROC(_hw_perflevel1, OID_AUTO, logicalcpu_max, CTLTYPE_INT | CTLFLAG_RD | CTLFLAG_KERN | CTLFLAG_LOCKED, (void *)1, HW_PERFLEVEL_LOGICALCPUMAX, sysctl_hw_generic, "I", "");
956SYSCTL_PROC(_hw_perflevel1, OID_AUTO, l1icachesize, CTLTYPE_INT | CTLFLAG_RD | CTLFLAG_KERN | CTLFLAG_LOCKED, (void *)1, HW_PERFLEVEL_L1ICACHESIZE, sysctl_hw_generic, "I", "");
957SYSCTL_PROC(_hw_perflevel1, OID_AUTO, l1dcachesize, CTLTYPE_INT | CTLFLAG_RD | CTLFLAG_KERN | CTLFLAG_LOCKED, (void *)1, HW_PERFLEVEL_L1DCACHESIZE, sysctl_hw_generic, "I", "");
958SYSCTL_PROC(_hw_perflevel1, OID_AUTO, l2cachesize, CTLTYPE_INT | CTLFLAG_RD | CTLFLAG_KERN | CTLFLAG_LOCKED, (void *)1, HW_PERFLEVEL_L2CACHESIZE, sysctl_hw_generic, "I", "");
959SYSCTL_PROC(_hw_perflevel1, OID_AUTO, cpusperl2, CTLTYPE_INT | CTLFLAG_RD | CTLFLAG_KERN | CTLFLAG_LOCKED, (void *)1, HW_PERFLEVEL_CPUSPERL2, sysctl_hw_generic, "I", "");
960SYSCTL_PROC(_hw_perflevel1, OID_AUTO, l3cachesize, CTLTYPE_INT | CTLFLAG_RD | CTLFLAG_KERN | CTLFLAG_LOCKED, (void *)1, HW_PERFLEVEL_L3CACHESIZE, sysctl_hw_generic, "I", "");
961SYSCTL_PROC(_hw_perflevel1, OID_AUTO, cpusperl3, CTLTYPE_INT | CTLFLAG_RD | CTLFLAG_KERN | CTLFLAG_LOCKED, (void *)1, HW_PERFLEVEL_CPUSPERL3, sysctl_hw_generic, "I", "");
962SYSCTL_PROC(_hw_perflevel1, OID_AUTO, name, CTLTYPE_STRING | CTLFLAG_RD | CTLFLAG_KERN | CTLFLAG_LOCKED, (void *)1, HW_PERFLEVEL_NAME, sysctl_hw_generic, "A", "");
963
964/*
965 * Optional CPU features can register nodes below hw.optional.
966 *
967 * If the feature is not present, the node should either not be registered,
968 * or it should return 0. If the feature is present, the node should return
969 * 1.
970 */
971SYSCTL_NODE(_hw, OID_AUTO, optional, CTLFLAG_RW | CTLFLAG_LOCKED, NULL, "optional features");
972SYSCTL_NODE(_hw_optional, OID_AUTO, arm, CTLFLAG_RW | CTLFLAG_LOCKED, NULL, "optional features for ARM processors");
973
974SYSCTL_INT(_hw_optional, OID_AUTO, floatingpoint, CTLFLAG_RD | CTLFLAG_KERN | CTLFLAG_LOCKED, (int *)NULL, 1, ""); /* always set */
975
976/*
977 * Optional device hardware features can be registered by drivers below hw.features
978 */
979SYSCTL_EXTENSIBLE_NODE(_hw, OID_AUTO, features, CTLFLAG_RD | CTLFLAG_LOCKED, NULL, "hardware features");
980
981/*
982 * Deprecated variables. These are supported for backwards compatibility
983 * purposes only. The MASKED flag requests that the variables not be
984 * printed by sysctl(8) and similar utilities.
985 *
986 * The variables named *_compat here are int-sized versions of variables
987 * that are now exported as quads. The int-sized versions are normally
988 * looked up only by number, wheras the quad-sized versions should be
989 * looked up by name.
990 *
991 * The *_compat nodes are *NOT* visible within the kernel.
992 */
993
994SYSCTL_PROC(_hw, HW_PAGESIZE, pagesize_compat, CTLTYPE_INT | CTLFLAG_RD | CTLFLAG_MASKED | CTLFLAG_LOCKED, 0, HW_PAGESIZE, sysctl_hw_generic, "I", "");
995SYSCTL_PROC(_hw, HW_BUS_FREQ, busfrequency_compat, CTLTYPE_INT | CTLFLAG_RD | CTLFLAG_MASKED | CTLFLAG_LOCKED, 0, HW_LOCAL_FREQUENCY_CLOCK_RATE, sysctl_bus_frequency, "I", "");
996SYSCTL_PROC(_hw, HW_CPU_FREQ, cpufrequency_compat, CTLTYPE_INT | CTLFLAG_RD | CTLFLAG_MASKED | CTLFLAG_LOCKED, 0, HW_LOCAL_FREQUENCY_CLOCK_RATE, sysctl_cpu_frequency, "I", "");
997SYSCTL_PROC(_hw, HW_CACHELINE, cachelinesize_compat, CTLTYPE_INT | CTLFLAG_RD | CTLFLAG_MASKED | CTLFLAG_LOCKED, 0, HW_CACHELINE, sysctl_hw_generic, "I", "");
998SYSCTL_PROC(_hw, HW_L1ICACHESIZE, l1icachesize_compat, CTLTYPE_INT | CTLFLAG_RD | CTLFLAG_MASKED | CTLFLAG_LOCKED, 0, HW_L1ICACHESIZE, sysctl_hw_generic, "I", "");
999SYSCTL_PROC(_hw, HW_L1DCACHESIZE, l1dcachesize_compat, CTLTYPE_INT | CTLFLAG_RD | CTLFLAG_MASKED | CTLFLAG_LOCKED, 0, HW_L1DCACHESIZE, sysctl_hw_generic, "I", "");
1000SYSCTL_PROC(_hw, HW_L2CACHESIZE, l2cachesize_compat, CTLTYPE_INT | CTLFLAG_RD | CTLFLAG_MASKED | CTLFLAG_LOCKED, 0, HW_L2CACHESIZE, sysctl_hw_generic, "I", "");
1001SYSCTL_PROC(_hw, HW_L3CACHESIZE, l3cachesize_compat, CTLTYPE_INT | CTLFLAG_RD | CTLFLAG_MASKED | CTLFLAG_LOCKED, 0, HW_L3CACHESIZE, sysctl_hw_generic, "I", "");
1002SYSCTL_COMPAT_INT(_hw, HW_TB_FREQ, tbfrequency_compat, CTLFLAG_RD | CTLFLAG_MASKED | CTLFLAG_LOCKED, &gPEClockFrequencyInfo.timebase_frequency_hz, 0, "");
1003SYSCTL_PROC(_hw, HW_MACHINE, machine, CTLTYPE_STRING | CTLFLAG_RD | CTLFLAG_MASKED | CTLFLAG_LOCKED, 0, HW_MACHINE, sysctl_hw_generic, "A", "");
1004SYSCTL_PROC(_hw, HW_MODEL, model, CTLTYPE_STRING | CTLFLAG_RD | CTLFLAG_MASKED | CTLFLAG_LOCKED, 0, HW_MODEL, sysctl_hw_generic, "A", "");
1005SYSCTL_PROC(_hw, HW_TARGET, target, CTLTYPE_STRING | CTLFLAG_RD | CTLFLAG_MASKED | CTLFLAG_LOCKED, 0, HW_TARGET, sysctl_hw_generic, "A", "");
1006SYSCTL_PROC(_hw, HW_PRODUCT, product, CTLTYPE_STRING | CTLFLAG_RD | CTLFLAG_MASKED | CTLFLAG_LOCKED, 0, HW_PRODUCT, sysctl_hw_generic, "A", "");
1007SYSCTL_COMPAT_UINT(_hw, HW_PHYSMEM, physmem, CTLFLAG_RD | CTLFLAG_MASKED | CTLFLAG_LOCKED, &mem_size, 0, "");
1008SYSCTL_PROC(_hw, HW_USERMEM, usermem, CTLTYPE_INT | CTLFLAG_RD | CTLFLAG_MASKED | CTLFLAG_LOCKED, 0, HW_USERMEM, sysctl_hw_generic, "I", "");
1009SYSCTL_PROC(_hw, HW_EPOCH, epoch, CTLTYPE_INT | CTLFLAG_RD | CTLFLAG_MASKED | CTLFLAG_LOCKED, 0, HW_EPOCH, sysctl_hw_generic, "I", "");
1010SYSCTL_PROC(_hw, HW_VECTORUNIT, vectorunit, CTLTYPE_INT | CTLFLAG_RD | CTLFLAG_MASKED | CTLFLAG_LOCKED, 0, HW_VECTORUNIT, sysctl_hw_generic, "I", "");
1011SYSCTL_PROC(_hw, HW_L2SETTINGS, l2settings, CTLTYPE_INT | CTLFLAG_RD | CTLFLAG_MASKED | CTLFLAG_LOCKED, 0, HW_L2SETTINGS, sysctl_hw_generic, "I", "");
1012SYSCTL_PROC(_hw, HW_L3SETTINGS, l3settings, CTLTYPE_INT | CTLFLAG_RD | CTLFLAG_MASKED | CTLFLAG_LOCKED, 0, HW_L3SETTINGS, sysctl_hw_generic, "I", "");
1013SYSCTL_INT(_hw, OID_AUTO, cputhreadtype, CTLFLAG_RD | CTLFLAG_NOAUTO | CTLFLAG_KERN | CTLFLAG_LOCKED, &cputhreadtype, 0, "");
1014
1015#if defined(__i386__) || defined(__x86_64__) || CONFIG_X86_64_COMPAT
1016static int
1017sysctl_cpu_capability
1018(__unused struct sysctl_oid *oidp, void *arg1, __unused int arg2, struct sysctl_req *req)
1019{
1020 uint64_t caps;
1021 caps = _get_cpu_capabilities();
1022
1023 uint64_t mask = (uint64_t) (uintptr_t) arg1;
1024 boolean_t is_capable = (caps & mask) != 0;
1025
1026 return SYSCTL_OUT(req, &is_capable, sizeof(is_capable));
1027}
1028#define capability(name) name
1029
1030
1031SYSCTL_PROC(_hw_optional, OID_AUTO, mmx, CTLTYPE_INT | CTLFLAG_RD | CTLFLAG_KERN | CTLFLAG_LOCKED, (void *) capability(kHasMMX), 0, sysctl_cpu_capability, "I", "");
1032SYSCTL_PROC(_hw_optional, OID_AUTO, sse, CTLTYPE_INT | CTLFLAG_RD | CTLFLAG_KERN | CTLFLAG_LOCKED, (void *) capability(kHasSSE), 0, sysctl_cpu_capability, "I", "");
1033SYSCTL_PROC(_hw_optional, OID_AUTO, sse2, CTLTYPE_INT | CTLFLAG_RD | CTLFLAG_KERN | CTLFLAG_LOCKED, (void *) capability(kHasSSE2), 0, sysctl_cpu_capability, "I", "");
1034SYSCTL_PROC(_hw_optional, OID_AUTO, sse3, CTLTYPE_INT | CTLFLAG_RD | CTLFLAG_KERN | CTLFLAG_LOCKED, (void *) capability(kHasSSE3), 0, sysctl_cpu_capability, "I", "");
1035SYSCTL_PROC(_hw_optional, OID_AUTO, supplementalsse3, CTLTYPE_INT | CTLFLAG_RD | CTLFLAG_KERN | CTLFLAG_LOCKED, (void *) capability(kHasSupplementalSSE3), 0, sysctl_cpu_capability, "I", "");
1036SYSCTL_PROC(_hw_optional, OID_AUTO, sse4_1, CTLTYPE_INT | CTLFLAG_RD | CTLFLAG_KERN | CTLFLAG_LOCKED, (void *) capability(kHasSSE4_1), 0, sysctl_cpu_capability, "I", "");
1037SYSCTL_PROC(_hw_optional, OID_AUTO, sse4_2, CTLTYPE_INT | CTLFLAG_RD | CTLFLAG_KERN | CTLFLAG_LOCKED, (void *) capability(kHasSSE4_2), 0, sysctl_cpu_capability, "I", "");
1038/* "x86_64" is actually a preprocessor symbol on the x86_64 kernel, so we have to hack this */
1039#undef x86_64
1040SYSCTL_PROC(_hw_optional, OID_AUTO, x86_64, CTLTYPE_INT | CTLFLAG_RD | CTLFLAG_KERN | CTLFLAG_LOCKED, (void *) capability(k64Bit), 0, sysctl_cpu_capability, "I", "");
1041SYSCTL_PROC(_hw_optional, OID_AUTO, aes, CTLTYPE_INT | CTLFLAG_RD | CTLFLAG_KERN | CTLFLAG_LOCKED, (void *) capability(kHasAES), 0, sysctl_cpu_capability, "I", "");
1042SYSCTL_PROC(_hw_optional, OID_AUTO, avx1_0, CTLTYPE_INT | CTLFLAG_RD | CTLFLAG_KERN | CTLFLAG_LOCKED, (void *) capability(kHasAVX1_0), 0, sysctl_cpu_capability, "I", "");
1043SYSCTL_PROC(_hw_optional, OID_AUTO, rdrand, CTLTYPE_INT | CTLFLAG_RD | CTLFLAG_KERN | CTLFLAG_LOCKED, (void *) capability(kHasRDRAND), 0, sysctl_cpu_capability, "I", "");
1044SYSCTL_PROC(_hw_optional, OID_AUTO, f16c, CTLTYPE_INT | CTLFLAG_RD | CTLFLAG_KERN | CTLFLAG_LOCKED, (void *) capability(kHasF16C), 0, sysctl_cpu_capability, "I", "");
1045SYSCTL_PROC(_hw_optional, OID_AUTO, enfstrg, CTLTYPE_INT | CTLFLAG_RD | CTLFLAG_KERN | CTLFLAG_LOCKED, (void *) capability(kHasENFSTRG), 0, sysctl_cpu_capability, "I", "");
1046SYSCTL_PROC(_hw_optional, OID_AUTO, fma, CTLTYPE_INT | CTLFLAG_RD | CTLFLAG_KERN | CTLFLAG_LOCKED, (void *) capability(kHasFMA), 0, sysctl_cpu_capability, "I", "");
1047SYSCTL_PROC(_hw_optional, OID_AUTO, avx2_0, CTLTYPE_INT | CTLFLAG_RD | CTLFLAG_KERN | CTLFLAG_LOCKED, (void *) capability(kHasAVX2_0), 0, sysctl_cpu_capability, "I", "");
1048SYSCTL_PROC(_hw_optional, OID_AUTO, bmi1, CTLTYPE_INT | CTLFLAG_RD | CTLFLAG_KERN | CTLFLAG_LOCKED, (void *) capability(kHasBMI1), 0, sysctl_cpu_capability, "I", "");
1049SYSCTL_PROC(_hw_optional, OID_AUTO, bmi2, CTLTYPE_INT | CTLFLAG_RD | CTLFLAG_KERN | CTLFLAG_LOCKED, (void *) capability(kHasBMI2), 0, sysctl_cpu_capability, "I", "");
1050SYSCTL_PROC(_hw_optional, OID_AUTO, rtm, CTLTYPE_INT | CTLFLAG_RD | CTLFLAG_KERN | CTLFLAG_LOCKED, (void *) capability(kHasRTM), 0, sysctl_cpu_capability, "I", "");
1051SYSCTL_PROC(_hw_optional, OID_AUTO, hle, CTLTYPE_INT | CTLFLAG_RD | CTLFLAG_KERN | CTLFLAG_LOCKED, (void *) capability(kHasHLE), 0, sysctl_cpu_capability, "I", "");
1052SYSCTL_PROC(_hw_optional, OID_AUTO, adx, CTLTYPE_INT | CTLFLAG_RD | CTLFLAG_KERN | CTLFLAG_LOCKED, (void *) capability(kHasADX), 0, sysctl_cpu_capability, "I", "");
1053SYSCTL_PROC(_hw_optional, OID_AUTO, mpx, CTLTYPE_INT | CTLFLAG_RD | CTLFLAG_KERN | CTLFLAG_LOCKED, (void *) capability(kHasMPX), 0, sysctl_cpu_capability, "I", "");
1054SYSCTL_PROC(_hw_optional, OID_AUTO, sgx, CTLTYPE_INT | CTLFLAG_RD | CTLFLAG_KERN | CTLFLAG_LOCKED, (void *) capability(kHasSGX), 0, sysctl_cpu_capability, "I", "");
1055SYSCTL_PROC(_hw_optional, OID_AUTO, avx512f, CTLTYPE_INT | CTLFLAG_RD | CTLFLAG_KERN | CTLFLAG_LOCKED, (void *) capability(kHasAVX512F), 0, sysctl_cpu_capability, "I", "");
1056SYSCTL_PROC(_hw_optional, OID_AUTO, avx512cd, CTLTYPE_INT | CTLFLAG_RD | CTLFLAG_KERN | CTLFLAG_LOCKED, (void *) capability(kHasAVX512CD), 0, sysctl_cpu_capability, "I", "");
1057SYSCTL_PROC(_hw_optional, OID_AUTO, avx512dq, CTLTYPE_INT | CTLFLAG_RD | CTLFLAG_KERN | CTLFLAG_LOCKED, (void *) capability(kHasAVX512DQ), 0, sysctl_cpu_capability, "I", "");
1058SYSCTL_PROC(_hw_optional, OID_AUTO, avx512bw, CTLTYPE_INT | CTLFLAG_RD | CTLFLAG_KERN | CTLFLAG_LOCKED, (void *) capability(kHasAVX512BW), 0, sysctl_cpu_capability, "I", "");
1059SYSCTL_PROC(_hw_optional, OID_AUTO, avx512vl, CTLTYPE_INT | CTLFLAG_RD | CTLFLAG_KERN | CTLFLAG_LOCKED, (void *) capability(kHasAVX512VL), 0, sysctl_cpu_capability, "I", "");
1060SYSCTL_PROC(_hw_optional, OID_AUTO, avx512ifma, CTLTYPE_INT | CTLFLAG_RD | CTLFLAG_KERN | CTLFLAG_LOCKED, (void *) capability(kHasAVX512IFMA), 0, sysctl_cpu_capability, "I", "");
1061SYSCTL_PROC(_hw_optional, OID_AUTO, avx512vbmi, CTLTYPE_INT | CTLFLAG_RD | CTLFLAG_KERN | CTLFLAG_LOCKED, (void *) capability(kHasAVX512VBMI), 0, sysctl_cpu_capability, "I", "");
1062#undef capability
1063#endif /* !__i386__ && !__x86_64 && !CONFIG_X86_64_COMPAT */
1064
1065#if defined (__arm64__)
1066int watchpoint_flag = 0;
1067int breakpoint_flag = 0;
1068SECURITY_READ_ONLY_LATE(int) gARMv8Crc32 = 0;
1069
1070/* Features from: ID_AA64ISAR0_EL1 */
1071SECURITY_READ_ONLY_LATE(int) gARM_FEAT_FlagM = 0;
1072SECURITY_READ_ONLY_LATE(int) gARM_FEAT_FlagM2 = 0;
1073SECURITY_READ_ONLY_LATE(int) gARM_FEAT_FHM = 0;
1074SECURITY_READ_ONLY_LATE(int) gARM_FEAT_DotProd = 0;
1075SECURITY_READ_ONLY_LATE(int) gARM_FEAT_SHA3 = 0;
1076SECURITY_READ_ONLY_LATE(int) gARM_FEAT_RDM = 0;
1077SECURITY_READ_ONLY_LATE(int) gARM_FEAT_LSE = 0;
1078SECURITY_READ_ONLY_LATE(int) gARM_FEAT_SHA256 = 0;
1079SECURITY_READ_ONLY_LATE(int) gARM_FEAT_SHA512 = 0;
1080SECURITY_READ_ONLY_LATE(int) gARM_FEAT_SHA1 = 0;
1081SECURITY_READ_ONLY_LATE(int) gARM_FEAT_AES = 0;
1082SECURITY_READ_ONLY_LATE(int) gARM_FEAT_PMULL = 0;
1083
1084/* Features from: ID_AA64ISAR1_EL1 */
1085SECURITY_READ_ONLY_LATE(int) gARM_FEAT_SPECRES = 0;
1086SECURITY_READ_ONLY_LATE(int) gARM_FEAT_SB = 0;
1087SECURITY_READ_ONLY_LATE(int) gARM_FEAT_FRINTTS = 0;
1088SECURITY_READ_ONLY_LATE(int) gARMv8Gpi = 0;
1089SECURITY_READ_ONLY_LATE(int) gARM_FEAT_LRCPC = 0;
1090SECURITY_READ_ONLY_LATE(int) gARM_FEAT_LRCPC2 = 0;
1091SECURITY_READ_ONLY_LATE(int) gARM_FEAT_FCMA = 0;
1092SECURITY_READ_ONLY_LATE(int) gARM_FEAT_JSCVT = 0;
1093SECURITY_READ_ONLY_LATE(int) gARM_FEAT_PAuth = 0;
1094SECURITY_READ_ONLY_LATE(int) gARM_FEAT_PAuth2 = 0;
1095SECURITY_READ_ONLY_LATE(int) gARM_FEAT_FPAC = 0;
1096SECURITY_READ_ONLY_LATE(int) gARM_FEAT_FPACCOMBINE = 0;
1097SECURITY_READ_ONLY_LATE(int) gARM_FEAT_DPB = 0;
1098SECURITY_READ_ONLY_LATE(int) gARM_FEAT_DPB2 = 0;
1099SECURITY_READ_ONLY_LATE(int) gARM_FEAT_BF16 = 0;
1100SECURITY_READ_ONLY_LATE(int) gARM_FEAT_I8MM = 0;
1101
1102/* Features from: ID_AA64ISAR2_EL1 */
1103SECURITY_READ_ONLY_LATE(int) gARM_FEAT_RPRES = 0;
1104
1105/* Features from: ID_AA64MMFR0_EL1 */
1106SECURITY_READ_ONLY_LATE(int) gARM_FEAT_ECV = 0;
1107
1108/* Features from: ID_AA64MMFR1_EL1 */
1109SECURITY_READ_ONLY_LATE(int) gARM_FEAT_AFP = 0;
1110
1111/* Features from: ID_AA64MMFR2_EL1 */
1112SECURITY_READ_ONLY_LATE(int) gARM_FEAT_LSE2 = 0;
1113
1114/* Features from: ID_AA64PFR0_EL1 */
1115SECURITY_READ_ONLY_LATE(int) gARM_FEAT_CSV2 = 0;
1116SECURITY_READ_ONLY_LATE(int) gARM_FEAT_CSV3 = 0;
1117SECURITY_READ_ONLY_LATE(int) gARM_FEAT_DIT = 0;
1118SECURITY_READ_ONLY_LATE(int) gARM_AdvSIMD = 0;
1119SECURITY_READ_ONLY_LATE(int) gARM_AdvSIMD_HPFPCvt = 0;
1120SECURITY_READ_ONLY_LATE(int) gARM_FEAT_FP16 = 0;
1121
1122/* Features from: ID_AA64PFR1_EL1 */
1123SECURITY_READ_ONLY_LATE(int) gARM_FEAT_SSBS = 0;
1124SECURITY_READ_ONLY_LATE(int) gARM_FEAT_BTI = 0;
1125
1126
1127SECURITY_READ_ONLY_LATE(int) gUCNormalMem = 0;
1128
1129#if defined (__arm64__)
1130SECURITY_READ_ONLY_LATE(int) arm64_flag = 1;
1131#else /* end __arm64__*/
1132SECURITY_READ_ONLY_LATE(int) arm64_flag = 0;
1133#endif
1134
1135/* Legacy Names ARM Optional Feature Sysctls */
1136SYSCTL_INT(_hw_optional, OID_AUTO, neon, CTLFLAG_RD | CTLFLAG_KERN | CTLFLAG_LOCKED, &gARM_AdvSIMD, 0, "");
1137SYSCTL_INT(_hw_optional, OID_AUTO, neon_hpfp, CTLFLAG_RD | CTLFLAG_KERN | CTLFLAG_LOCKED, &gARM_AdvSIMD_HPFPCvt, 0, "");
1138SYSCTL_INT(_hw_optional, OID_AUTO, neon_fp16, CTLFLAG_RD | CTLFLAG_KERN | CTLFLAG_LOCKED, &gARM_FEAT_FP16, 0, "");
1139SYSCTL_INT(_hw_optional, OID_AUTO, armv8_1_atomics, CTLFLAG_RD | CTLFLAG_KERN | CTLFLAG_LOCKED, &gARM_FEAT_LSE, 0, "");
1140SYSCTL_INT(_hw_optional, OID_AUTO, armv8_2_fhm, CTLFLAG_RD | CTLFLAG_KERN | CTLFLAG_LOCKED, &gARM_FEAT_FHM, 0, "");
1141SYSCTL_INT(_hw_optional, OID_AUTO, armv8_2_sha512, CTLFLAG_RD | CTLFLAG_KERN | CTLFLAG_LOCKED, &gARM_FEAT_SHA512, 0, "");
1142SYSCTL_INT(_hw_optional, OID_AUTO, armv8_2_sha3, CTLFLAG_RD | CTLFLAG_KERN | CTLFLAG_LOCKED, &gARM_FEAT_SHA3, 0, "");
1143SYSCTL_INT(_hw_optional, OID_AUTO, armv8_3_compnum, CTLFLAG_RD | CTLFLAG_KERN | CTLFLAG_LOCKED, &gARM_FEAT_FCMA, 0, "");
1144
1145/* Misc ARM Optional Feature Sysctls */
1146SYSCTL_INT(_hw_optional, OID_AUTO, watchpoint, CTLFLAG_RD | CTLFLAG_KERN | CTLFLAG_LOCKED, &watchpoint_flag, 0, "");
1147SYSCTL_INT(_hw_optional, OID_AUTO, breakpoint, CTLFLAG_RD | CTLFLAG_KERN | CTLFLAG_LOCKED, &breakpoint_flag, 0, "");
1148
1149/**
1150 * Enumerated syscalls for every ARM optional feature to be exported to
1151 * userspace. These are to be enumerated using the official feature name from
1152 * the ARM ARM. They are grouped below based on the MSR that will be used to populate the data.
1153 */
1154
1155/* Features from: ID_AA64ISAR0_EL1 */
1156SYSCTL_INT(_hw_optional, OID_AUTO, armv8_crc32, CTLFLAG_RD | CTLFLAG_KERN | CTLFLAG_LOCKED, &gARMv8Crc32, 0, "");
1157SYSCTL_INT(_hw_optional_arm, OID_AUTO, FEAT_FlagM, CTLFLAG_RD | CTLFLAG_KERN | CTLFLAG_LOCKED, &gARM_FEAT_FlagM, 0, "");
1158SYSCTL_INT(_hw_optional_arm, OID_AUTO, FEAT_FlagM2, CTLFLAG_RD | CTLFLAG_KERN | CTLFLAG_LOCKED, &gARM_FEAT_FlagM2, 0, "");
1159SYSCTL_INT(_hw_optional_arm, OID_AUTO, FEAT_FHM, CTLFLAG_RD | CTLFLAG_KERN | CTLFLAG_LOCKED, &gARM_FEAT_FHM, 0, "");
1160SYSCTL_INT(_hw_optional_arm, OID_AUTO, FEAT_DotProd, CTLFLAG_RD | CTLFLAG_KERN | CTLFLAG_LOCKED, &gARM_FEAT_DotProd, 0, "");
1161SYSCTL_INT(_hw_optional_arm, OID_AUTO, FEAT_SHA3, CTLFLAG_RD | CTLFLAG_KERN | CTLFLAG_LOCKED, &gARM_FEAT_SHA3, 0, "");
1162SYSCTL_INT(_hw_optional_arm, OID_AUTO, FEAT_RDM, CTLFLAG_RD | CTLFLAG_KERN | CTLFLAG_LOCKED, &gARM_FEAT_RDM, 0, "");
1163SYSCTL_INT(_hw_optional_arm, OID_AUTO, FEAT_LSE, CTLFLAG_RD | CTLFLAG_KERN | CTLFLAG_LOCKED, &gARM_FEAT_LSE, 0, "");
1164SYSCTL_INT(_hw_optional_arm, OID_AUTO, FEAT_SHA256, CTLFLAG_RD | CTLFLAG_KERN | CTLFLAG_LOCKED, &gARM_FEAT_SHA256, 0, "");
1165SYSCTL_INT(_hw_optional_arm, OID_AUTO, FEAT_SHA512, CTLFLAG_RD | CTLFLAG_KERN | CTLFLAG_LOCKED, &gARM_FEAT_SHA512, 0, "");
1166SYSCTL_INT(_hw_optional_arm, OID_AUTO, FEAT_SHA1, CTLFLAG_RD | CTLFLAG_KERN | CTLFLAG_LOCKED, &gARM_FEAT_SHA1, 0, "");
1167SYSCTL_INT(_hw_optional_arm, OID_AUTO, FEAT_AES, CTLFLAG_RD | CTLFLAG_KERN | CTLFLAG_LOCKED, &gARM_FEAT_AES, 0, "");
1168SYSCTL_INT(_hw_optional_arm, OID_AUTO, FEAT_PMULL, CTLFLAG_RD | CTLFLAG_KERN | CTLFLAG_LOCKED, &gARM_FEAT_PMULL, 0, "");
1169
1170/* Features from: ID_AA64ISAR1_EL1 */
1171SYSCTL_INT(_hw_optional, OID_AUTO, armv8_gpi, CTLFLAG_RD | CTLFLAG_KERN | CTLFLAG_LOCKED, &gARMv8Gpi, 0, "");
1172SYSCTL_INT(_hw_optional_arm, OID_AUTO, FEAT_SPECRES, CTLFLAG_RD | CTLFLAG_KERN | CTLFLAG_LOCKED, &gARM_FEAT_SPECRES, 0, "");
1173SYSCTL_INT(_hw_optional_arm, OID_AUTO, FEAT_SB, CTLFLAG_RD | CTLFLAG_KERN | CTLFLAG_LOCKED, &gARM_FEAT_SB, 0, "");
1174SYSCTL_INT(_hw_optional_arm, OID_AUTO, FEAT_FRINTTS, CTLFLAG_RD | CTLFLAG_KERN | CTLFLAG_LOCKED, &gARM_FEAT_FRINTTS, 0, "");
1175SYSCTL_INT(_hw_optional_arm, OID_AUTO, FEAT_LRCPC, CTLFLAG_RD | CTLFLAG_KERN | CTLFLAG_LOCKED, &gARM_FEAT_LRCPC, 0, "");
1176SYSCTL_INT(_hw_optional_arm, OID_AUTO, FEAT_LRCPC2, CTLFLAG_RD | CTLFLAG_KERN | CTLFLAG_LOCKED, &gARM_FEAT_LRCPC2, 0, "");
1177SYSCTL_INT(_hw_optional_arm, OID_AUTO, FEAT_FCMA, CTLFLAG_RD | CTLFLAG_KERN | CTLFLAG_LOCKED, &gARM_FEAT_FCMA, 0, "");
1178SYSCTL_INT(_hw_optional_arm, OID_AUTO, FEAT_JSCVT, CTLFLAG_RD | CTLFLAG_KERN | CTLFLAG_LOCKED, &gARM_FEAT_JSCVT, 0, "");
1179SYSCTL_INT(_hw_optional_arm, OID_AUTO, FEAT_PAuth, CTLFLAG_RD | CTLFLAG_KERN | CTLFLAG_LOCKED, &gARM_FEAT_PAuth, 0, "");
1180SYSCTL_INT(_hw_optional_arm, OID_AUTO, FEAT_PAuth2, CTLFLAG_RD | CTLFLAG_KERN | CTLFLAG_LOCKED, &gARM_FEAT_PAuth2, 0, "");
1181SYSCTL_INT(_hw_optional_arm, OID_AUTO, FEAT_FPAC, CTLFLAG_RD | CTLFLAG_KERN | CTLFLAG_LOCKED, &gARM_FEAT_FPAC, 0, "");
1182SYSCTL_INT(_hw_optional_arm, OID_AUTO, FEAT_DPB, CTLFLAG_RD | CTLFLAG_KERN | CTLFLAG_LOCKED, &gARM_FEAT_DPB, 0, "");
1183SYSCTL_INT(_hw_optional_arm, OID_AUTO, FEAT_DPB2, CTLFLAG_RD | CTLFLAG_KERN | CTLFLAG_LOCKED, &gARM_FEAT_DPB2, 0, "");
1184SYSCTL_INT(_hw_optional_arm, OID_AUTO, FEAT_BF16, CTLFLAG_RD | CTLFLAG_KERN | CTLFLAG_LOCKED, &gARM_FEAT_BF16, 0, "");
1185SYSCTL_INT(_hw_optional_arm, OID_AUTO, FEAT_I8MM, CTLFLAG_RD | CTLFLAG_KERN | CTLFLAG_LOCKED, &gARM_FEAT_I8MM, 0, "");
1186
1187/* Features from: ID_AA64ISAR2_EL1 */
1188SYSCTL_INT(_hw_optional_arm, OID_AUTO, FEAT_RPRES, CTLFLAG_RD | CTLFLAG_KERN | CTLFLAG_LOCKED, &gARM_FEAT_RPRES, 0, "");
1189
1190/* Features from: ID_AA64MMFR0_EL1 */
1191SYSCTL_INT(_hw_optional_arm, OID_AUTO, FEAT_ECV, CTLFLAG_RD | CTLFLAG_KERN | CTLFLAG_LOCKED, &gARM_FEAT_ECV, 0, "");
1192
1193/* Features from: ID_AA64MMFR1_EL1 */
1194SYSCTL_INT(_hw_optional_arm, OID_AUTO, FEAT_AFP, CTLFLAG_RD | CTLFLAG_KERN | CTLFLAG_LOCKED, &gARM_FEAT_AFP, 0, "");
1195
1196/* Features from: ID_AA64MMFR2_EL1 */
1197SYSCTL_INT(_hw_optional_arm, OID_AUTO, FEAT_LSE2, CTLFLAG_RD | CTLFLAG_KERN | CTLFLAG_LOCKED, &gARM_FEAT_LSE2, 0, "");
1198
1199/* Features from: ID_AA64PFR0_EL1 */
1200SYSCTL_INT(_hw_optional, OID_AUTO, AdvSIMD, CTLFLAG_RD | CTLFLAG_KERN | CTLFLAG_LOCKED, &gARM_AdvSIMD, 0, "");
1201SYSCTL_INT(_hw_optional, OID_AUTO, AdvSIMD_HPFPCvt, CTLFLAG_RD | CTLFLAG_KERN | CTLFLAG_LOCKED, &gARM_AdvSIMD_HPFPCvt, 0, "");
1202SYSCTL_INT(_hw_optional_arm, OID_AUTO, FEAT_CSV2, CTLFLAG_RD | CTLFLAG_KERN | CTLFLAG_LOCKED, &gARM_FEAT_CSV2, 0, "");
1203SYSCTL_INT(_hw_optional_arm, OID_AUTO, FEAT_CSV3, CTLFLAG_RD | CTLFLAG_KERN | CTLFLAG_LOCKED, &gARM_FEAT_CSV3, 0, "");
1204SYSCTL_INT(_hw_optional_arm, OID_AUTO, FEAT_DIT, CTLFLAG_RD | CTLFLAG_KERN | CTLFLAG_LOCKED, &gARM_FEAT_DIT, 0, "");
1205SYSCTL_INT(_hw_optional_arm, OID_AUTO, FEAT_FP16, CTLFLAG_RD | CTLFLAG_KERN | CTLFLAG_LOCKED, &gARM_FEAT_FP16, 0, "");
1206
1207/* Features from: FPCR */
1208SECURITY_READ_ONLY_LATE(int) gARM_FP_SyncExceptions = 0;
1209
1210/* Features from: ID_AA64PFR1_EL1 */
1211SYSCTL_INT(_hw_optional_arm, OID_AUTO, FEAT_SSBS, CTLFLAG_RD | CTLFLAG_KERN | CTLFLAG_LOCKED, &gARM_FEAT_SSBS, 0, "");
1212SYSCTL_INT(_hw_optional_arm, OID_AUTO, FEAT_BTI, CTLFLAG_RD | CTLFLAG_KERN | CTLFLAG_LOCKED, &gARM_FEAT_BTI, 0, "");
1213
1214
1215/* Features from FPCR. */
1216SYSCTL_INT(_hw_optional_arm, OID_AUTO, FP_SyncExceptions, CTLFLAG_RD | CTLFLAG_KERN | CTLFLAG_LOCKED, &gARM_FP_SyncExceptions, 0, "");
1217
1218SYSCTL_INT(_hw_optional, OID_AUTO, ucnormal_mem, CTLFLAG_RD | CTLFLAG_KERN | CTLFLAG_LOCKED, &gUCNormalMem, 0, "");
1219
1220#if DEBUG || DEVELOPMENT
1221#if __ARM_KERNEL_PROTECT__
1222static SECURITY_READ_ONLY_LATE(int) arm_kernel_protect = 1;
1223#else
1224static SECURITY_READ_ONLY_LATE(int) arm_kernel_protect = 0;
1225#endif
1226SYSCTL_INT(_hw_optional, OID_AUTO, arm_kernel_protect, CTLFLAG_RD | CTLFLAG_KERN | CTLFLAG_LOCKED, &arm_kernel_protect, 0, "");
1227#endif
1228
1229#if DEBUG || DEVELOPMENT
1230static int ic_inval_filters = 0;
1231SYSCTL_INT(_hw_optional, OID_AUTO, ic_inval_filters, CTLFLAG_RD | CTLFLAG_KERN | CTLFLAG_LOCKED, &ic_inval_filters, 0, "");
1232#endif
1233
1234#if DEBUG || DEVELOPMENT
1235static SECURITY_READ_ONLY_LATE(int) wkdm_popcount = 0;
1236SYSCTL_INT(_hw_optional, OID_AUTO, wkdm_popcount, CTLFLAG_RD | CTLFLAG_KERN | CTLFLAG_LOCKED, &wkdm_popcount, 0, "");
1237#endif
1238
1239#if DEBUG || DEVELOPMENT
1240#if __has_feature(ptrauth_calls)
1241static SECURITY_READ_ONLY_LATE(int) ptrauth = 1;
1242#else
1243static SECURITY_READ_ONLY_LATE(int) ptrauth = 0;
1244#endif
1245SYSCTL_INT(_hw_optional, OID_AUTO, ptrauth, CTLFLAG_RD | CTLFLAG_KERN | CTLFLAG_LOCKED, &ptrauth, 0, "");
1246#endif
1247
1248/*
1249 * Without this little ifdef dance, the preprocessor replaces "arm64" with "1",
1250 * leaving us with a less-than-helpful sysctl.hwoptional.1.
1251 */
1252#ifdef arm64
1253#undef arm64
1254SYSCTL_INT(_hw_optional, OID_AUTO, arm64, CTLFLAG_RD | CTLFLAG_KERN | CTLFLAG_LOCKED, &arm64_flag, 0, "");
1255#define arm64 1
1256#else
1257SYSCTL_INT(_hw_optional, OID_AUTO, arm64, CTLFLAG_RD | CTLFLAG_KERN | CTLFLAG_LOCKED, &arm64_flag, 0, "");
1258#endif
1259#endif /* ! __arm64__ */
1260
1261
1262#if defined(__arm64__) && defined(CONFIG_XNUPOST)
1263/**
1264 * Test whether the new values for a few hw.optional sysctls matches the legacy
1265 * way of obtaining that information.
1266 *
1267 * Specifically, hw.optional.neon_fp16 has been used to indicate both FEAT_FP16
1268 * and FEAT_FHM, as we are now grabbing the information directly from the ISA
1269 * status registers instead of from the arm_mvfp_info, we need to check that
1270 * this new source won't break any existing usecases of the sysctl and assert
1271 * that hw.optional.neon_fp16 will return the same value as it used to for all
1272 * devices.
1273 */
1274kern_return_t
1275arm_cpu_capabilities_legacy_test(void)
1276{
1277 T_SETUPBEGIN;
1278 arm_mvfp_info_t *mvfp_info = arm_mvfp_info();
1279 T_ASSERT_NOTNULL(mvfp_info, "arm_mvfp_info returned null pointer.");
1280 T_SETUPEND;
1281
1282
1283 T_EXPECT_EQ_INT(mvfp_info->neon, gARM_AdvSIMD, "neon value should match legacy");
1284 T_EXPECT_EQ_INT(mvfp_info->neon_hpfp, gARM_AdvSIMD_HPFPCvt, "neon hpfp cvt value should match legacy");
1285 T_EXPECT_EQ_INT(mvfp_info->neon_fp16, gARM_FEAT_FP16, "neon fp16 value should match legacy");
1286
1287 T_LOG("Completed arm cpu capabalities legacy compliance test.");
1288 return KERN_SUCCESS;
1289}
1290#endif /* defined(__arm64__) && defined(CONFIG_XNUPOST) */
1291
1292/******************************************************************************
1293 * Generic MIB initialisation.
1294 *
1295 * This is a hack, and should be replaced with SYSINITs
1296 * at some point.
1297 */
1298void
1299sysctl_mib_init(void)
1300{
1301#if defined(__i386__) || defined (__x86_64__)
1302 cpu64bit = (_get_cpu_capabilities() & k64Bit) == k64Bit;
1303#elif defined (__arm64__)
1304 cpu64bit = (cpu_type() & CPU_ARCH_ABI64) == CPU_ARCH_ABI64;
1305#else
1306#error Unsupported arch
1307#endif
1308#if defined (__i386__) || defined (__x86_64__)
1309 /* hw.cacheconfig */
1310 cacheconfig[0] = ml_cpu_cache_sharing(0, CLUSTER_TYPE_SMP, true);
1311 cacheconfig[1] = ml_cpu_cache_sharing(1, CLUSTER_TYPE_SMP, true);
1312 cacheconfig[2] = ml_cpu_cache_sharing(2, CLUSTER_TYPE_SMP, true);
1313 cacheconfig[3] = ml_cpu_cache_sharing(3, CLUSTER_TYPE_SMP, true);
1314 cacheconfig[4] = 0;
1315
1316 /* hw.packages */
1317 packages = (int)(roundup(ml_cpu_cache_sharing(0, CLUSTER_TYPE_SMP, true), cpuid_info()->thread_count)
1318 / cpuid_info()->thread_count);
1319
1320#elif defined(__arm64__) /* end __i386 */
1321 watchpoint_flag = arm_debug_info()->num_watchpoint_pairs;
1322 breakpoint_flag = arm_debug_info()->num_breakpoint_pairs;
1323
1324 cluster_type_t min_perflevel_cluster_type = cpu_type_for_perflevel(perflevel: __builtin_popcount(ml_get_cpu_types()) - 1);
1325
1326 cacheconfig[0] = ml_wait_max_cpus();
1327 cacheconfig[1] = ml_cpu_cache_sharing(level: 1, cluster_type: min_perflevel_cluster_type, true);
1328 cacheconfig[2] = ml_cpu_cache_sharing(level: 2, cluster_type: min_perflevel_cluster_type, true);
1329 cacheconfig[3] = 0;
1330 cacheconfig[4] = 0;
1331 cacheconfig[5] = 0;
1332 cacheconfig[6] = 0;
1333
1334 packages = 1;
1335#else
1336#error unknown architecture
1337#endif /* !__i386__ && !__x86_64 && !__arm64__ */
1338}
1339
1340__startup_func
1341static void
1342sysctl_mib_startup(void)
1343{
1344 cputhreadtype = cpu_threadtype();
1345
1346 /*
1347 * Populate the optional portion of the hw.* MIB.
1348 *
1349 * XXX This could be broken out into parts of the code
1350 * that actually directly relate to the functions in
1351 * question.
1352 */
1353
1354 if (cputhreadtype != CPU_THREADTYPE_NONE) {
1355 sysctl_register_oid_early(oidp: &sysctl__hw_cputhreadtype);
1356 }
1357
1358}
1359STARTUP(SYSCTL, STARTUP_RANK_MIDDLE, sysctl_mib_startup);
1360