1/*
2 * Copyright (c) 2000-2009 Apple Inc. All rights reserved.
3 *
4 * @APPLE_OSREFERENCE_LICENSE_HEADER_START@
5 *
6 * This file contains Original Code and/or Modifications of Original Code
7 * as defined in and that are subject to the Apple Public Source License
8 * Version 2.0 (the 'License'). You may not use this file except in
9 * compliance with the License. The rights granted to you under the License
10 * may not be used to create, or enable the creation or redistribution of,
11 * unlawful or unlicensed copies of an Apple operating system, or to
12 * circumvent, violate, or enable the circumvention or violation of, any
13 * terms of an Apple operating system software license agreement.
14 *
15 * Please obtain a copy of the License at
16 * http://www.opensource.apple.com/apsl/ and read it before using this file.
17 *
18 * The Original Code and all software distributed under the License are
19 * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER
20 * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES,
21 * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY,
22 * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT.
23 * Please see the License for the specific language governing rights and
24 * limitations under the License.
25 *
26 * @APPLE_OSREFERENCE_LICENSE_HEADER_END@
27 */
28/*
29 * @OSF_COPYRIGHT@
30 */
31/*
32 * Mach Operating System
33 * Copyright (c) 1991,1990,1989,1988 Carnegie Mellon University
34 * All Rights Reserved.
35 *
36 * Permission to use, copy, modify and distribute this software and its
37 * documentation is hereby granted, provided that both the copyright
38 * notice and this permission notice appear in all copies of the
39 * software, derivative works or modified versions, and any portions
40 * thereof, and that both notices appear in supporting documentation.
41 *
42 * CARNEGIE MELLON ALLOWS FREE USE OF THIS SOFTWARE IN ITS "AS IS"
43 * CONDITION. CARNEGIE MELLON DISCLAIMS ANY LIABILITY OF ANY KIND FOR
44 * ANY DAMAGES WHATSOEVER RESULTING FROM THE USE OF THIS SOFTWARE.
45 *
46 * Carnegie Mellon requests users of this software to return to
47 *
48 * Software Distribution Coordinator or Software.Distribution@CS.CMU.EDU
49 * School of Computer Science
50 * Carnegie Mellon University
51 * Pittsburgh PA 15213-3890
52 *
53 * any improvements or extensions that they make and grant Carnegie Mellon
54 * the rights to redistribute these changes.
55 */
56/*
57 */
58
59/*
60 * host.c
61 *
62 * Non-ipc host functions.
63 */
64
65#include <mach/mach_types.h>
66#include <mach/boolean.h>
67#include <mach/host_info.h>
68#include <mach/host_special_ports.h>
69#include <mach/kern_return.h>
70#include <mach/machine.h>
71#include <mach/port.h>
72#include <mach/processor_info.h>
73#include <mach/vm_param.h>
74#include <mach/processor.h>
75#include <mach/mach_host_server.h>
76#include <mach/host_priv_server.h>
77#include <mach/vm_map.h>
78#include <mach/task_info.h>
79
80#include <machine/commpage.h>
81#include <machine/cpu_capabilities.h>
82
83#include <device/device_port.h>
84
85#include <kern/kern_types.h>
86#include <kern/assert.h>
87#include <kern/kalloc.h>
88#include <kern/ecc.h>
89#include <kern/host.h>
90#include <kern/host_statistics.h>
91#include <kern/ipc_host.h>
92#include <kern/misc_protos.h>
93#include <kern/sched.h>
94#include <kern/processor.h>
95#include <kern/mach_node.h> // mach_node_port_changed()
96
97#include <vm/vm_map.h>
98#include <vm/vm_purgeable_internal.h>
99#include <vm/vm_pageout.h>
100
101#include <IOKit/IOBSD.h> // IOTaskHasEntitlement
102#include <IOKit/IOKitKeys.h> // DriverKit entitlement strings
103
104
105#if CONFIG_ATM
106#include <atm/atm_internal.h>
107#endif
108
109#if CONFIG_MACF
110#include <security/mac_mach_internal.h>
111#endif
112
113#if CONFIG_CSR
114#include <sys/csr.h>
115#endif
116
117#include <pexpert/pexpert.h>
118
119SCALABLE_COUNTER_DEFINE(vm_statistics_zero_fill_count); /* # of zero fill pages */
120SCALABLE_COUNTER_DEFINE(vm_statistics_reactivations); /* # of pages reactivated */
121SCALABLE_COUNTER_DEFINE(vm_statistics_pageins); /* # of pageins */
122SCALABLE_COUNTER_DEFINE(vm_statistics_pageouts); /* # of pageouts */
123SCALABLE_COUNTER_DEFINE(vm_statistics_faults); /* # of faults */
124SCALABLE_COUNTER_DEFINE(vm_statistics_cow_faults); /* # of copy-on-writes */
125SCALABLE_COUNTER_DEFINE(vm_statistics_lookups); /* object cache lookups */
126SCALABLE_COUNTER_DEFINE(vm_statistics_hits); /* object cache hits */
127SCALABLE_COUNTER_DEFINE(vm_statistics_purges); /* # of pages purged */
128SCALABLE_COUNTER_DEFINE(vm_statistics_decompressions); /* # of pages decompressed */
129SCALABLE_COUNTER_DEFINE(vm_statistics_compressions); /* # of pages compressed */
130SCALABLE_COUNTER_DEFINE(vm_statistics_swapins); /* # of pages swapped in (via compression segments) */
131SCALABLE_COUNTER_DEFINE(vm_statistics_swapouts); /* # of pages swapped out (via compression segments) */
132SCALABLE_COUNTER_DEFINE(vm_statistics_total_uncompressed_pages_in_compressor); /* # of pages (uncompressed) held within the compressor. */
133SCALABLE_COUNTER_DEFINE(vm_page_grab_count);
134
135host_data_t realhost;
136
137static void
138get_host_vm_stats(vm_statistics64_t out)
139{
140 out->zero_fill_count = counter_load(&vm_statistics_zero_fill_count);
141 out->reactivations = counter_load(&vm_statistics_reactivations);
142 out->pageins = counter_load(&vm_statistics_pageins);
143 out->pageouts = counter_load(&vm_statistics_pageouts);
144 out->faults = counter_load(&vm_statistics_faults);
145 out->cow_faults = counter_load(&vm_statistics_cow_faults);
146 out->lookups = counter_load(&vm_statistics_lookups);
147 out->hits = counter_load(&vm_statistics_hits);
148 out->compressions = counter_load(&vm_statistics_compressions);
149 out->decompressions = counter_load(&vm_statistics_decompressions);
150 out->swapins = counter_load(&vm_statistics_swapins);
151 out->swapouts = counter_load(&vm_statistics_swapouts);
152}
153vm_extmod_statistics_data_t host_extmod_statistics;
154
155kern_return_t
156host_processors(host_priv_t host_priv, processor_array_t * out_array, mach_msg_type_number_t * countp)
157{
158 if (host_priv == HOST_PRIV_NULL) {
159 return KERN_INVALID_ARGUMENT;
160 }
161
162 unsigned int count = processor_count;
163 assert(count != 0);
164
165 static_assert(sizeof(mach_port_t) == sizeof(processor_t));
166
167 mach_port_t *ports = kalloc_type(mach_port_t, count, Z_WAITOK);
168 if (!ports) {
169 return KERN_RESOURCE_SHORTAGE;
170 }
171
172 for (unsigned int i = 0; i < count; i++) {
173 processor_t processor = processor_array[i];
174 assert(processor != PROCESSOR_NULL);
175
176 /* do the conversion that Mig should handle */
177 ipc_port_t processor_port = convert_processor_to_port(processor);
178 ports[i] = processor_port;
179 }
180
181 *countp = count;
182 *out_array = (processor_array_t)ports;
183
184 return KERN_SUCCESS;
185}
186
187extern int sched_allow_NO_SMT_threads;
188
189kern_return_t
190host_info(host_t host, host_flavor_t flavor, host_info_t info, mach_msg_type_number_t * count)
191{
192 if (host == HOST_NULL) {
193 return KERN_INVALID_ARGUMENT;
194 }
195
196 switch (flavor) {
197 case HOST_BASIC_INFO: {
198 host_basic_info_t basic_info;
199 int master_id = master_processor->cpu_id;
200
201 /*
202 * Basic information about this host.
203 */
204 if (*count < HOST_BASIC_INFO_OLD_COUNT) {
205 return KERN_FAILURE;
206 }
207
208 basic_info = (host_basic_info_t)info;
209
210 basic_info->memory_size = machine_info.memory_size;
211 basic_info->cpu_type = slot_type(slot_num: master_id);
212 basic_info->cpu_subtype = slot_subtype(slot_num: master_id);
213 basic_info->max_cpus = machine_info.max_cpus;
214#if defined(__x86_64__)
215 if (sched_allow_NO_SMT_threads && current_task()->t_flags & TF_NO_SMT) {
216 basic_info->avail_cpus = primary_processor_avail_count_user;
217 } else {
218 basic_info->avail_cpus = processor_avail_count_user;
219 }
220#else
221 basic_info->avail_cpus = processor_avail_count;
222#endif
223
224
225 if (*count >= HOST_BASIC_INFO_COUNT) {
226 basic_info->cpu_threadtype = slot_threadtype(slot_num: master_id);
227 basic_info->physical_cpu = machine_info.physical_cpu;
228 basic_info->physical_cpu_max = machine_info.physical_cpu_max;
229#if defined(__x86_64__)
230 basic_info->logical_cpu = basic_info->avail_cpus;
231#else
232 basic_info->logical_cpu = machine_info.logical_cpu;
233#endif
234 basic_info->logical_cpu_max = machine_info.logical_cpu_max;
235 basic_info->max_mem = machine_info.max_mem;
236
237 *count = HOST_BASIC_INFO_COUNT;
238 } else {
239 *count = HOST_BASIC_INFO_OLD_COUNT;
240 }
241
242 return KERN_SUCCESS;
243 }
244
245 case HOST_SCHED_INFO: {
246 host_sched_info_t sched_info;
247 uint32_t quantum_time;
248 uint64_t quantum_ns;
249
250 /*
251 * Return scheduler information.
252 */
253 if (*count < HOST_SCHED_INFO_COUNT) {
254 return KERN_FAILURE;
255 }
256
257 sched_info = (host_sched_info_t)info;
258
259 quantum_time = SCHED(initial_quantum_size)(THREAD_NULL);
260 absolutetime_to_nanoseconds(abstime: quantum_time, result: &quantum_ns);
261
262 sched_info->min_timeout = sched_info->min_quantum = (uint32_t)(quantum_ns / 1000 / 1000);
263
264 *count = HOST_SCHED_INFO_COUNT;
265
266 return KERN_SUCCESS;
267 }
268
269 case HOST_RESOURCE_SIZES: {
270 /*
271 * Return sizes of kernel data structures
272 */
273 if (*count < HOST_RESOURCE_SIZES_COUNT) {
274 return KERN_FAILURE;
275 }
276
277 /* XXX Fail until ledgers are implemented */
278 return KERN_INVALID_ARGUMENT;
279 }
280
281 case HOST_PRIORITY_INFO: {
282 host_priority_info_t priority_info;
283
284 if (*count < HOST_PRIORITY_INFO_COUNT) {
285 return KERN_FAILURE;
286 }
287
288 priority_info = (host_priority_info_t)info;
289
290 priority_info->kernel_priority = MINPRI_KERNEL;
291 priority_info->system_priority = MINPRI_KERNEL;
292 priority_info->server_priority = MINPRI_RESERVED;
293 priority_info->user_priority = BASEPRI_DEFAULT;
294 priority_info->depress_priority = DEPRESSPRI;
295 priority_info->idle_priority = IDLEPRI;
296 priority_info->minimum_priority = MINPRI_USER;
297 priority_info->maximum_priority = MAXPRI_RESERVED;
298
299 *count = HOST_PRIORITY_INFO_COUNT;
300
301 return KERN_SUCCESS;
302 }
303
304 /*
305 * Gestalt for various trap facilities.
306 */
307 case HOST_MACH_MSG_TRAP:
308 case HOST_SEMAPHORE_TRAPS: {
309 *count = 0;
310 return KERN_SUCCESS;
311 }
312
313 case HOST_CAN_HAS_DEBUGGER: {
314 host_can_has_debugger_info_t can_has_debugger_info;
315
316 if (*count < HOST_CAN_HAS_DEBUGGER_COUNT) {
317 return KERN_FAILURE;
318 }
319
320 can_has_debugger_info = (host_can_has_debugger_info_t)info;
321 can_has_debugger_info->can_has_debugger = PE_i_can_has_debugger(NULL);
322 *count = HOST_CAN_HAS_DEBUGGER_COUNT;
323
324 return KERN_SUCCESS;
325 }
326
327 case HOST_VM_PURGABLE: {
328 if (*count < HOST_VM_PURGABLE_COUNT) {
329 return KERN_FAILURE;
330 }
331
332 vm_purgeable_stats(info: (vm_purgeable_info_t)info, NULL);
333
334 *count = HOST_VM_PURGABLE_COUNT;
335 return KERN_SUCCESS;
336 }
337
338 case HOST_DEBUG_INFO_INTERNAL: {
339#if DEVELOPMENT || DEBUG
340 if (*count < HOST_DEBUG_INFO_INTERNAL_COUNT) {
341 return KERN_FAILURE;
342 }
343
344 host_debug_info_internal_t debug_info = (host_debug_info_internal_t)info;
345 bzero(debug_info, sizeof(host_debug_info_internal_data_t));
346 *count = HOST_DEBUG_INFO_INTERNAL_COUNT;
347
348#if CONFIG_COALITIONS
349 debug_info->config_coalitions = 1;
350#endif
351 debug_info->config_bank = 1;
352#if CONFIG_ATM
353 debug_info->config_atm = 1;
354#endif
355#if CONFIG_CSR
356 debug_info->config_csr = 1;
357#endif
358 return KERN_SUCCESS;
359#else /* DEVELOPMENT || DEBUG */
360 return KERN_NOT_SUPPORTED;
361#endif
362 }
363
364 case HOST_PREFERRED_USER_ARCH: {
365 host_preferred_user_arch_t user_arch_info;
366
367 /*
368 * Basic information about this host.
369 */
370 if (*count < HOST_PREFERRED_USER_ARCH_COUNT) {
371 return KERN_FAILURE;
372 }
373
374 user_arch_info = (host_preferred_user_arch_t)info;
375
376#if defined(PREFERRED_USER_CPU_TYPE) && defined(PREFERRED_USER_CPU_SUBTYPE)
377 cpu_type_t preferred_cpu_type;
378 cpu_subtype_t preferred_cpu_subtype;
379 if (!PE_get_default("kern.preferred_cpu_type", &preferred_cpu_type, sizeof(cpu_type_t))) {
380 preferred_cpu_type = PREFERRED_USER_CPU_TYPE;
381 }
382 if (!PE_get_default("kern.preferred_cpu_subtype", &preferred_cpu_subtype, sizeof(cpu_subtype_t))) {
383 preferred_cpu_subtype = PREFERRED_USER_CPU_SUBTYPE;
384 }
385 user_arch_info->cpu_type = preferred_cpu_type;
386 user_arch_info->cpu_subtype = preferred_cpu_subtype;
387#else
388 int master_id = master_processor->cpu_id;
389 user_arch_info->cpu_type = slot_type(slot_num: master_id);
390 user_arch_info->cpu_subtype = slot_subtype(slot_num: master_id);
391#endif
392
393
394 *count = HOST_PREFERRED_USER_ARCH_COUNT;
395
396 return KERN_SUCCESS;
397 }
398
399 default: return KERN_INVALID_ARGUMENT;
400 }
401}
402
403kern_return_t host_statistics(host_t host, host_flavor_t flavor, host_info_t info, mach_msg_type_number_t * count);
404
405kern_return_t
406host_statistics(host_t host, host_flavor_t flavor, host_info_t info, mach_msg_type_number_t * count)
407{
408 if (host == HOST_NULL) {
409 return KERN_INVALID_HOST;
410 }
411
412 switch (flavor) {
413 case HOST_LOAD_INFO: {
414 host_load_info_t load_info;
415
416 if (*count < HOST_LOAD_INFO_COUNT) {
417 return KERN_FAILURE;
418 }
419
420 load_info = (host_load_info_t)info;
421
422 bcopy(src: (char *)avenrun, dst: (char *)load_info->avenrun, n: sizeof avenrun);
423 bcopy(src: (char *)mach_factor, dst: (char *)load_info->mach_factor, n: sizeof mach_factor);
424
425 *count = HOST_LOAD_INFO_COUNT;
426 return KERN_SUCCESS;
427 }
428
429 case HOST_VM_INFO: {
430 vm_statistics64_data_t host_vm_stat;
431 vm_statistics_t stat32;
432 mach_msg_type_number_t original_count;
433
434 if (*count < HOST_VM_INFO_REV0_COUNT) {
435 return KERN_FAILURE;
436 }
437
438 get_host_vm_stats(out: &host_vm_stat);
439
440 stat32 = (vm_statistics_t)info;
441
442 stat32->free_count = VM_STATISTICS_TRUNCATE_TO_32_BIT(vm_page_free_count + vm_page_speculative_count);
443 stat32->active_count = VM_STATISTICS_TRUNCATE_TO_32_BIT(vm_page_active_count);
444
445 if (vm_page_local_q) {
446 zpercpu_foreach(lq, vm_page_local_q) {
447 stat32->active_count += VM_STATISTICS_TRUNCATE_TO_32_BIT(lq->vpl_count);
448 }
449 }
450 stat32->inactive_count = VM_STATISTICS_TRUNCATE_TO_32_BIT(vm_page_inactive_count);
451#if !XNU_TARGET_OS_OSX
452 stat32->wire_count = VM_STATISTICS_TRUNCATE_TO_32_BIT(vm_page_wire_count);
453#else /* !XNU_TARGET_OS_OSX */
454 stat32->wire_count = VM_STATISTICS_TRUNCATE_TO_32_BIT(vm_page_wire_count + vm_page_throttled_count + vm_lopage_free_count);
455#endif /* !XNU_TARGET_OS_OSX */
456 stat32->zero_fill_count = VM_STATISTICS_TRUNCATE_TO_32_BIT(host_vm_stat.zero_fill_count);
457 stat32->reactivations = VM_STATISTICS_TRUNCATE_TO_32_BIT(host_vm_stat.reactivations);
458 stat32->pageins = VM_STATISTICS_TRUNCATE_TO_32_BIT(host_vm_stat.pageins);
459 stat32->pageouts = VM_STATISTICS_TRUNCATE_TO_32_BIT(host_vm_stat.pageouts);
460 stat32->faults = VM_STATISTICS_TRUNCATE_TO_32_BIT(host_vm_stat.faults);
461 stat32->cow_faults = VM_STATISTICS_TRUNCATE_TO_32_BIT(host_vm_stat.cow_faults);
462 stat32->lookups = VM_STATISTICS_TRUNCATE_TO_32_BIT(host_vm_stat.lookups);
463 stat32->hits = VM_STATISTICS_TRUNCATE_TO_32_BIT(host_vm_stat.hits);
464
465 /*
466 * Fill in extra info added in later revisions of the
467 * vm_statistics data structure. Fill in only what can fit
468 * in the data structure the caller gave us !
469 */
470 original_count = *count;
471 *count = HOST_VM_INFO_REV0_COUNT; /* rev0 already filled in */
472 if (original_count >= HOST_VM_INFO_REV1_COUNT) {
473 /* rev1 added "purgeable" info */
474 stat32->purgeable_count = VM_STATISTICS_TRUNCATE_TO_32_BIT(vm_page_purgeable_count);
475 stat32->purges = VM_STATISTICS_TRUNCATE_TO_32_BIT(vm_page_purged_count);
476 *count = HOST_VM_INFO_REV1_COUNT;
477 }
478
479 if (original_count >= HOST_VM_INFO_REV2_COUNT) {
480 /* rev2 added "speculative" info */
481 stat32->speculative_count = VM_STATISTICS_TRUNCATE_TO_32_BIT(vm_page_speculative_count);
482 *count = HOST_VM_INFO_REV2_COUNT;
483 }
484
485 /* rev3 changed some of the fields to be 64-bit*/
486
487 return KERN_SUCCESS;
488 }
489
490 case HOST_CPU_LOAD_INFO: {
491 host_cpu_load_info_t cpu_load_info;
492
493 if (*count < HOST_CPU_LOAD_INFO_COUNT) {
494 return KERN_FAILURE;
495 }
496
497#define GET_TICKS_VALUE(state, ticks) \
498 MACRO_BEGIN cpu_load_info->cpu_ticks[(state)] += (uint32_t)(ticks / hz_tick_interval); \
499 MACRO_END
500#define GET_TICKS_VALUE_FROM_TIMER(processor, state, timer) \
501 MACRO_BEGIN GET_TICKS_VALUE(state, timer_grab(&(processor)->timer)); \
502 MACRO_END
503
504 cpu_load_info = (host_cpu_load_info_t)info;
505 cpu_load_info->cpu_ticks[CPU_STATE_USER] = 0;
506 cpu_load_info->cpu_ticks[CPU_STATE_SYSTEM] = 0;
507 cpu_load_info->cpu_ticks[CPU_STATE_IDLE] = 0;
508 cpu_load_info->cpu_ticks[CPU_STATE_NICE] = 0;
509
510 simple_lock(&processor_list_lock, LCK_GRP_NULL);
511
512 unsigned int pcount = processor_count;
513
514 for (unsigned int i = 0; i < pcount; i++) {
515 processor_t processor = processor_array[i];
516 assert(processor != PROCESSOR_NULL);
517 processor_cpu_load_info(processor, ticks: cpu_load_info->cpu_ticks);
518 }
519 simple_unlock(&processor_list_lock);
520
521 *count = HOST_CPU_LOAD_INFO_COUNT;
522
523 return KERN_SUCCESS;
524 }
525
526 case HOST_EXPIRED_TASK_INFO: {
527 if (*count < TASK_POWER_INFO_COUNT) {
528 return KERN_FAILURE;
529 }
530
531 task_power_info_t tinfo1 = (task_power_info_t)info;
532 task_power_info_v2_t tinfo2 = (task_power_info_v2_t)info;
533
534 tinfo1->task_interrupt_wakeups = dead_task_statistics.task_interrupt_wakeups;
535 tinfo1->task_platform_idle_wakeups = dead_task_statistics.task_platform_idle_wakeups;
536
537 tinfo1->task_timer_wakeups_bin_1 = dead_task_statistics.task_timer_wakeups_bin_1;
538
539 tinfo1->task_timer_wakeups_bin_2 = dead_task_statistics.task_timer_wakeups_bin_2;
540
541 tinfo1->total_user = dead_task_statistics.total_user_time;
542 tinfo1->total_system = dead_task_statistics.total_system_time;
543 if (*count < TASK_POWER_INFO_V2_COUNT) {
544 *count = TASK_POWER_INFO_COUNT;
545 } else if (*count >= TASK_POWER_INFO_V2_COUNT) {
546 tinfo2->gpu_energy.task_gpu_utilisation = dead_task_statistics.task_gpu_ns;
547#if defined(__arm64__)
548 tinfo2->task_energy = dead_task_statistics.task_energy;
549 tinfo2->task_ptime = dead_task_statistics.total_ptime;
550 tinfo2->task_pset_switches = dead_task_statistics.total_pset_switches;
551#endif
552 *count = TASK_POWER_INFO_V2_COUNT;
553 }
554
555 return KERN_SUCCESS;
556 }
557 default: return KERN_INVALID_ARGUMENT;
558 }
559}
560
561extern uint32_t c_segment_pages_compressed;
562
563#define HOST_STATISTICS_TIME_WINDOW 1 /* seconds */
564#define HOST_STATISTICS_MAX_REQUESTS 10 /* maximum number of requests per window */
565#define HOST_STATISTICS_MIN_REQUESTS 2 /* minimum number of requests per window */
566
567uint64_t host_statistics_time_window;
568
569static LCK_GRP_DECLARE(host_statistics_lck_grp, "host_statistics");
570static LCK_MTX_DECLARE(host_statistics_lck, &host_statistics_lck_grp);
571
572#define HOST_VM_INFO64_REV0 0
573#define HOST_VM_INFO64_REV1 1
574#define HOST_EXTMOD_INFO64_REV0 2
575#define HOST_LOAD_INFO_REV0 3
576#define HOST_VM_INFO_REV0 4
577#define HOST_VM_INFO_REV1 5
578#define HOST_VM_INFO_REV2 6
579#define HOST_CPU_LOAD_INFO_REV0 7
580#define HOST_EXPIRED_TASK_INFO_REV0 8
581#define HOST_EXPIRED_TASK_INFO_REV1 9
582#define NUM_HOST_INFO_DATA_TYPES 10
583
584static vm_statistics64_data_t host_vm_info64_rev0 = {};
585static vm_statistics64_data_t host_vm_info64_rev1 = {};
586static vm_extmod_statistics_data_t host_extmod_info64 = {};
587static host_load_info_data_t host_load_info = {};
588static vm_statistics_data_t host_vm_info_rev0 = {};
589static vm_statistics_data_t host_vm_info_rev1 = {};
590static vm_statistics_data_t host_vm_info_rev2 = {};
591static host_cpu_load_info_data_t host_cpu_load_info = {};
592static task_power_info_data_t host_expired_task_info = {};
593static task_power_info_v2_data_t host_expired_task_info2 = {};
594
595struct host_stats_cache {
596 uint64_t last_access;
597 uint64_t current_requests;
598 uint64_t max_requests;
599 uintptr_t data;
600 mach_msg_type_number_t count; //NOTE count is in sizeof(integer_t)
601};
602
603static struct host_stats_cache g_host_stats_cache[NUM_HOST_INFO_DATA_TYPES] = {
604 [HOST_VM_INFO64_REV0] = { .last_access = 0, .current_requests = 0, .max_requests = 0, .data = (uintptr_t)&host_vm_info64_rev0, .count = HOST_VM_INFO64_REV0_COUNT },
605 [HOST_VM_INFO64_REV1] = { .last_access = 0, .current_requests = 0, .max_requests = 0, .data = (uintptr_t)&host_vm_info64_rev1, .count = HOST_VM_INFO64_REV1_COUNT },
606 [HOST_EXTMOD_INFO64_REV0] = { .last_access = 0, .current_requests = 0, .max_requests = 0, .data = (uintptr_t)&host_extmod_info64, .count = HOST_EXTMOD_INFO64_COUNT },
607 [HOST_LOAD_INFO_REV0] = { .last_access = 0, .current_requests = 0, .max_requests = 0, .data = (uintptr_t)&host_load_info, .count = HOST_LOAD_INFO_COUNT },
608 [HOST_VM_INFO_REV0] = { .last_access = 0, .current_requests = 0, .max_requests = 0, .data = (uintptr_t)&host_vm_info_rev0, .count = HOST_VM_INFO_REV0_COUNT },
609 [HOST_VM_INFO_REV1] = { .last_access = 0, .current_requests = 0, .max_requests = 0, .data = (uintptr_t)&host_vm_info_rev1, .count = HOST_VM_INFO_REV1_COUNT },
610 [HOST_VM_INFO_REV2] = { .last_access = 0, .current_requests = 0, .max_requests = 0, .data = (uintptr_t)&host_vm_info_rev2, .count = HOST_VM_INFO_REV2_COUNT },
611 [HOST_CPU_LOAD_INFO_REV0] = { .last_access = 0, .current_requests = 0, .max_requests = 0, .data = (uintptr_t)&host_cpu_load_info, .count = HOST_CPU_LOAD_INFO_COUNT },
612 [HOST_EXPIRED_TASK_INFO_REV0] = { .last_access = 0, .current_requests = 0, .max_requests = 0, .data = (uintptr_t)&host_expired_task_info, .count = TASK_POWER_INFO_COUNT },
613 [HOST_EXPIRED_TASK_INFO_REV1] = { .last_access = 0, .current_requests = 0, .max_requests = 0, .data = (uintptr_t)&host_expired_task_info2, .count = TASK_POWER_INFO_V2_COUNT},
614};
615
616
617void
618host_statistics_init(void)
619{
620 nanoseconds_to_absolutetime(nanoseconds: (HOST_STATISTICS_TIME_WINDOW * NSEC_PER_SEC), result: &host_statistics_time_window);
621}
622
623static void
624cache_host_statistics(int index, host_info64_t info)
625{
626 if (index < 0 || index >= NUM_HOST_INFO_DATA_TYPES) {
627 return;
628 }
629
630 if (task_get_platform_binary(task: current_task())) {
631 return;
632 }
633
634 memcpy(dst: (void *)g_host_stats_cache[index].data, src: info, n: g_host_stats_cache[index].count * sizeof(integer_t));
635 return;
636}
637
638static void
639get_cached_info(int index, host_info64_t info, mach_msg_type_number_t* count)
640{
641 if (index < 0 || index >= NUM_HOST_INFO_DATA_TYPES) {
642 *count = 0;
643 return;
644 }
645
646 *count = g_host_stats_cache[index].count;
647 memcpy(dst: info, src: (void *)g_host_stats_cache[index].data, n: g_host_stats_cache[index].count * sizeof(integer_t));
648}
649
650static int
651get_host_info_data_index(bool is_stat64, host_flavor_t flavor, mach_msg_type_number_t* count, kern_return_t* ret)
652{
653 switch (flavor) {
654 case HOST_VM_INFO64:
655 if (!is_stat64) {
656 *ret = KERN_INVALID_ARGUMENT;
657 return -1;
658 }
659 if (*count < HOST_VM_INFO64_REV0_COUNT) {
660 *ret = KERN_FAILURE;
661 return -1;
662 }
663 if (*count >= HOST_VM_INFO64_REV1_COUNT) {
664 return HOST_VM_INFO64_REV1;
665 }
666 return HOST_VM_INFO64_REV0;
667
668 case HOST_EXTMOD_INFO64:
669 if (!is_stat64) {
670 *ret = KERN_INVALID_ARGUMENT;
671 return -1;
672 }
673 if (*count < HOST_EXTMOD_INFO64_COUNT) {
674 *ret = KERN_FAILURE;
675 return -1;
676 }
677 return HOST_EXTMOD_INFO64_REV0;
678
679 case HOST_LOAD_INFO:
680 if (*count < HOST_LOAD_INFO_COUNT) {
681 *ret = KERN_FAILURE;
682 return -1;
683 }
684 return HOST_LOAD_INFO_REV0;
685
686 case HOST_VM_INFO:
687 if (*count < HOST_VM_INFO_REV0_COUNT) {
688 *ret = KERN_FAILURE;
689 return -1;
690 }
691 if (*count >= HOST_VM_INFO_REV2_COUNT) {
692 return HOST_VM_INFO_REV2;
693 }
694 if (*count >= HOST_VM_INFO_REV1_COUNT) {
695 return HOST_VM_INFO_REV1;
696 }
697 return HOST_VM_INFO_REV0;
698
699 case HOST_CPU_LOAD_INFO:
700 if (*count < HOST_CPU_LOAD_INFO_COUNT) {
701 *ret = KERN_FAILURE;
702 return -1;
703 }
704 return HOST_CPU_LOAD_INFO_REV0;
705
706 case HOST_EXPIRED_TASK_INFO:
707 if (*count < TASK_POWER_INFO_COUNT) {
708 *ret = KERN_FAILURE;
709 return -1;
710 }
711 if (*count >= TASK_POWER_INFO_V2_COUNT) {
712 return HOST_EXPIRED_TASK_INFO_REV1;
713 }
714 return HOST_EXPIRED_TASK_INFO_REV0;
715
716 default:
717 *ret = KERN_INVALID_ARGUMENT;
718 return -1;
719 }
720}
721
722static bool
723rate_limit_host_statistics(bool is_stat64, host_flavor_t flavor, host_info64_t info, mach_msg_type_number_t* count, kern_return_t* ret, int *pindex)
724{
725 task_t task = current_task();
726
727 assert(task != kernel_task);
728
729 *ret = KERN_SUCCESS;
730 *pindex = -1;
731
732 /* Access control only for third party applications */
733 if (task_get_platform_binary(task)) {
734 return FALSE;
735 }
736
737 /* Rate limit to HOST_STATISTICS_MAX_REQUESTS queries for each HOST_STATISTICS_TIME_WINDOW window of time */
738 bool rate_limited = FALSE;
739 bool set_last_access = TRUE;
740
741 /* there is a cache for every flavor */
742 int index = get_host_info_data_index(is_stat64, flavor, count, ret);
743 if (index == -1) {
744 goto out;
745 }
746
747 *pindex = index;
748 lck_mtx_lock(lck: &host_statistics_lck);
749 if (g_host_stats_cache[index].last_access > mach_continuous_time() - host_statistics_time_window) {
750 set_last_access = FALSE;
751 if (g_host_stats_cache[index].current_requests++ >= g_host_stats_cache[index].max_requests) {
752 rate_limited = TRUE;
753 get_cached_info(index, info, count);
754 }
755 }
756 if (set_last_access) {
757 g_host_stats_cache[index].current_requests = 1;
758 /*
759 * select a random number of requests (included between HOST_STATISTICS_MIN_REQUESTS and HOST_STATISTICS_MAX_REQUESTS)
760 * to let query host_statistics.
761 * In this way it is not possible to infer looking at when the a cached copy changes if host_statistics was called on
762 * the provious window.
763 */
764 g_host_stats_cache[index].max_requests = (mach_absolute_time() % (HOST_STATISTICS_MAX_REQUESTS - HOST_STATISTICS_MIN_REQUESTS + 1)) + HOST_STATISTICS_MIN_REQUESTS;
765 g_host_stats_cache[index].last_access = mach_continuous_time();
766 }
767 lck_mtx_unlock(lck: &host_statistics_lck);
768out:
769 return rate_limited;
770}
771
772kern_return_t
773vm_stats(void *info, unsigned int *count)
774{
775 vm_statistics64_data_t host_vm_stat;
776 mach_msg_type_number_t original_count;
777 unsigned int local_q_internal_count;
778 unsigned int local_q_external_count;
779
780 if (*count < HOST_VM_INFO64_REV0_COUNT) {
781 return KERN_FAILURE;
782 }
783 get_host_vm_stats(out: &host_vm_stat);
784
785 vm_statistics64_t stat = (vm_statistics64_t)info;
786
787 stat->free_count = vm_page_free_count + vm_page_speculative_count;
788 stat->active_count = vm_page_active_count;
789
790 local_q_internal_count = 0;
791 local_q_external_count = 0;
792 if (vm_page_local_q) {
793 zpercpu_foreach(lq, vm_page_local_q) {
794 stat->active_count += lq->vpl_count;
795 local_q_internal_count += lq->vpl_internal_count;
796 local_q_external_count += lq->vpl_external_count;
797 }
798 }
799 stat->inactive_count = vm_page_inactive_count;
800#if !XNU_TARGET_OS_OSX
801 stat->wire_count = vm_page_wire_count;
802#else /* !XNU_TARGET_OS_OSX */
803 stat->wire_count = vm_page_wire_count + vm_page_throttled_count + vm_lopage_free_count;
804#endif /* !XNU_TARGET_OS_OSX */
805 stat->zero_fill_count = host_vm_stat.zero_fill_count;
806 stat->reactivations = host_vm_stat.reactivations;
807 stat->pageins = host_vm_stat.pageins;
808 stat->pageouts = host_vm_stat.pageouts;
809 stat->faults = host_vm_stat.faults;
810 stat->cow_faults = host_vm_stat.cow_faults;
811 stat->lookups = host_vm_stat.lookups;
812 stat->hits = host_vm_stat.hits;
813
814 stat->purgeable_count = vm_page_purgeable_count;
815 stat->purges = vm_page_purged_count;
816
817 stat->speculative_count = vm_page_speculative_count;
818
819 /*
820 * Fill in extra info added in later revisions of the
821 * vm_statistics data structure. Fill in only what can fit
822 * in the data structure the caller gave us !
823 */
824 original_count = *count;
825 *count = HOST_VM_INFO64_REV0_COUNT; /* rev0 already filled in */
826 if (original_count >= HOST_VM_INFO64_REV1_COUNT) {
827 /* rev1 added "throttled count" */
828 stat->throttled_count = vm_page_throttled_count;
829 /* rev1 added "compression" info */
830 stat->compressor_page_count = VM_PAGE_COMPRESSOR_COUNT;
831 stat->compressions = host_vm_stat.compressions;
832 stat->decompressions = host_vm_stat.decompressions;
833 stat->swapins = host_vm_stat.swapins;
834 stat->swapouts = host_vm_stat.swapouts;
835 /* rev1 added:
836 * "external page count"
837 * "anonymous page count"
838 * "total # of pages (uncompressed) held in the compressor"
839 */
840 stat->external_page_count = (vm_page_pageable_external_count + local_q_external_count);
841 stat->internal_page_count = (vm_page_pageable_internal_count + local_q_internal_count);
842 stat->total_uncompressed_pages_in_compressor = c_segment_pages_compressed;
843 *count = HOST_VM_INFO64_REV1_COUNT;
844 }
845
846 return KERN_SUCCESS;
847}
848
849kern_return_t host_statistics64(host_t host, host_flavor_t flavor, host_info_t info, mach_msg_type_number_t * count);
850
851kern_return_t
852host_statistics64(host_t host, host_flavor_t flavor, host_info64_t info, mach_msg_type_number_t * count)
853{
854 if (host == HOST_NULL) {
855 return KERN_INVALID_HOST;
856 }
857
858 switch (flavor) {
859 case HOST_VM_INFO64: /* We were asked to get vm_statistics64 */
860 return vm_stats(info, count);
861
862 case HOST_EXTMOD_INFO64: /* We were asked to get vm_statistics64 */
863 {
864 vm_extmod_statistics_t out_extmod_statistics;
865
866 if (*count < HOST_EXTMOD_INFO64_COUNT) {
867 return KERN_FAILURE;
868 }
869
870 out_extmod_statistics = (vm_extmod_statistics_t)info;
871 *out_extmod_statistics = host_extmod_statistics;
872
873 *count = HOST_EXTMOD_INFO64_COUNT;
874
875 return KERN_SUCCESS;
876 }
877
878 default: /* If we didn't recognize the flavor, send to host_statistics */
879 return host_statistics(host, flavor, info: (host_info_t)info, count);
880 }
881}
882
883kern_return_t
884host_statistics64_from_user(host_t host, host_flavor_t flavor, host_info64_t info, mach_msg_type_number_t * count)
885{
886 kern_return_t ret = KERN_SUCCESS;
887 int index;
888
889 if (host == HOST_NULL) {
890 return KERN_INVALID_HOST;
891 }
892
893 if (rate_limit_host_statistics(TRUE, flavor, info, count, ret: &ret, pindex: &index)) {
894 return ret;
895 }
896
897 if (ret != KERN_SUCCESS) {
898 return ret;
899 }
900
901 ret = host_statistics64(host, flavor, info, count);
902
903 if (ret == KERN_SUCCESS) {
904 cache_host_statistics(index, info);
905 }
906
907 return ret;
908}
909
910kern_return_t
911host_statistics_from_user(host_t host, host_flavor_t flavor, host_info64_t info, mach_msg_type_number_t * count)
912{
913 kern_return_t ret = KERN_SUCCESS;
914 int index;
915
916 if (host == HOST_NULL) {
917 return KERN_INVALID_HOST;
918 }
919
920 if (rate_limit_host_statistics(FALSE, flavor, info, count, ret: &ret, pindex: &index)) {
921 return ret;
922 }
923
924 if (ret != KERN_SUCCESS) {
925 return ret;
926 }
927
928 ret = host_statistics(host, flavor, info, count);
929
930 if (ret == KERN_SUCCESS) {
931 cache_host_statistics(index, info);
932 }
933
934 return ret;
935}
936
937/*
938 * Get host statistics that require privilege.
939 * None for now, just call the un-privileged version.
940 */
941kern_return_t
942host_priv_statistics(host_priv_t host_priv, host_flavor_t flavor, host_info_t info, mach_msg_type_number_t * count)
943{
944 return host_statistics(host: (host_t)host_priv, flavor, info, count);
945}
946
947kern_return_t
948set_sched_stats_active(boolean_t active)
949{
950 sched_stats_active = active;
951 return KERN_SUCCESS;
952}
953
954kern_return_t
955get_sched_statistics(struct _processor_statistics_np * out, uint32_t * count)
956{
957 uint32_t pos = 0;
958
959 if (!sched_stats_active) {
960 return KERN_FAILURE;
961 }
962
963 percpu_foreach_base(pcpu_base) {
964 struct sched_statistics stats;
965 processor_t processor;
966
967 pos += sizeof(struct _processor_statistics_np);
968 if (pos > *count) {
969 return KERN_FAILURE;
970 }
971
972 stats = *PERCPU_GET_WITH_BASE(pcpu_base, sched_stats);
973 processor = PERCPU_GET_WITH_BASE(pcpu_base, processor);
974
975 out->ps_cpuid = processor->cpu_id;
976 out->ps_csw_count = stats.csw_count;
977 out->ps_preempt_count = stats.preempt_count;
978 out->ps_preempted_rt_count = stats.preempted_rt_count;
979 out->ps_preempted_by_rt_count = stats.preempted_by_rt_count;
980 out->ps_rt_sched_count = stats.rt_sched_count;
981 out->ps_interrupt_count = stats.interrupt_count;
982 out->ps_ipi_count = stats.ipi_count;
983 out->ps_timer_pop_count = stats.timer_pop_count;
984 out->ps_runq_count_sum = SCHED(processor_runq_stats_count_sum)(processor);
985 out->ps_idle_transitions = stats.idle_transitions;
986 out->ps_quantum_timer_expirations = stats.quantum_timer_expirations;
987
988 out++;
989 }
990
991 /* And include RT Queue information */
992 pos += sizeof(struct _processor_statistics_np);
993 if (pos > *count) {
994 return KERN_FAILURE;
995 }
996
997 bzero(s: out, n: sizeof(*out));
998 out->ps_cpuid = (-1);
999 out->ps_runq_count_sum = SCHED(rt_runq_count_sum)();
1000 out++;
1001
1002 *count = pos;
1003
1004 return KERN_SUCCESS;
1005}
1006
1007kern_return_t
1008host_page_size(host_t host, vm_size_t * out_page_size)
1009{
1010 if (host == HOST_NULL) {
1011 return KERN_INVALID_ARGUMENT;
1012 }
1013
1014 *out_page_size = PAGE_SIZE;
1015
1016 return KERN_SUCCESS;
1017}
1018
1019/*
1020 * Return kernel version string (more than you ever
1021 * wanted to know about what version of the kernel this is).
1022 */
1023extern char version[];
1024
1025kern_return_t
1026host_kernel_version(host_t host, kernel_version_t out_version)
1027{
1028 if (host == HOST_NULL) {
1029 return KERN_INVALID_ARGUMENT;
1030 }
1031
1032 (void)strncpy(out_version, version, sizeof(kernel_version_t));
1033
1034 return KERN_SUCCESS;
1035}
1036
1037/*
1038 * host_processor_sets:
1039 *
1040 * List all processor sets on the host.
1041 */
1042kern_return_t
1043host_processor_sets(host_priv_t host_priv, processor_set_name_array_t * pset_list, mach_msg_type_number_t * count)
1044{
1045 mach_port_t *ports;
1046
1047 if (host_priv == HOST_PRIV_NULL) {
1048 return KERN_INVALID_ARGUMENT;
1049 }
1050
1051 /*
1052 * Allocate memory. Can be pageable because it won't be
1053 * touched while holding a lock.
1054 */
1055
1056 ports = kalloc_type(mach_port_t, 1, Z_WAITOK | Z_ZERO | Z_NOFAIL);
1057
1058 /* do the conversion that Mig should handle */
1059 ports[0] = convert_pset_name_to_port(processor: &pset0);
1060
1061 *pset_list = (processor_set_array_t)ports;
1062 *count = 1;
1063
1064 return KERN_SUCCESS;
1065}
1066
1067/*
1068 * host_processor_set_priv:
1069 *
1070 * Return control port for given processor set.
1071 */
1072kern_return_t
1073host_processor_set_priv(host_priv_t host_priv, processor_set_t pset_name, processor_set_t * pset)
1074{
1075 if (host_priv == HOST_PRIV_NULL || pset_name == PROCESSOR_SET_NULL) {
1076 *pset = PROCESSOR_SET_NULL;
1077
1078 return KERN_INVALID_ARGUMENT;
1079 }
1080
1081 *pset = pset_name;
1082
1083 return KERN_SUCCESS;
1084}
1085
1086/*
1087 * host_processor_info
1088 *
1089 * Return info about the processors on this host. It will return
1090 * the number of processors, and the specific type of info requested
1091 * in an OOL array.
1092 */
1093kern_return_t
1094host_processor_info(host_t host,
1095 processor_flavor_t flavor,
1096 natural_t * out_pcount,
1097 processor_info_array_t * out_array,
1098 mach_msg_type_number_t * out_array_count)
1099{
1100 kern_return_t result;
1101 host_t thost;
1102 processor_info_t info;
1103 unsigned int icount;
1104 unsigned int pcount;
1105 vm_offset_t addr;
1106 vm_size_t size, needed;
1107 vm_map_copy_t copy;
1108
1109 if (host == HOST_NULL) {
1110 return KERN_INVALID_ARGUMENT;
1111 }
1112
1113 result = processor_info_count(flavor, count: &icount);
1114 if (result != KERN_SUCCESS) {
1115 return result;
1116 }
1117
1118 pcount = processor_count;
1119 assert(pcount != 0);
1120
1121 needed = pcount * icount * sizeof(natural_t);
1122 size = vm_map_round_page(needed, VM_MAP_PAGE_MASK(ipc_kernel_map));
1123 result = kmem_alloc(map: ipc_kernel_map, addrp: &addr, size, flags: KMA_DATA, VM_KERN_MEMORY_IPC);
1124 if (result != KERN_SUCCESS) {
1125 return KERN_RESOURCE_SHORTAGE;
1126 }
1127
1128 info = (processor_info_t)addr;
1129
1130 for (unsigned int i = 0; i < pcount; i++) {
1131 processor_t processor = processor_array[i];
1132 assert(processor != PROCESSOR_NULL);
1133
1134 unsigned int tcount = icount;
1135
1136 result = processor_info(processor, flavor, host: &thost, processor_info_out: info, processor_info_outCnt: &tcount);
1137 if (result != KERN_SUCCESS) {
1138 kmem_free(map: ipc_kernel_map, addr, size);
1139 return result;
1140 }
1141 info += icount;
1142 }
1143
1144 if (size != needed) {
1145 bzero(s: (char *)addr + needed, n: size - needed);
1146 }
1147
1148 result = vm_map_unwire(map: ipc_kernel_map, vm_map_trunc_page(addr, VM_MAP_PAGE_MASK(ipc_kernel_map)),
1149 vm_map_round_page(addr + size, VM_MAP_PAGE_MASK(ipc_kernel_map)), FALSE);
1150 assert(result == KERN_SUCCESS);
1151 result = vm_map_copyin(src_map: ipc_kernel_map, src_addr: (vm_map_address_t)addr, len: (vm_map_size_t)needed, TRUE, copy_result: &copy);
1152 assert(result == KERN_SUCCESS);
1153
1154 *out_pcount = pcount;
1155 *out_array = (processor_info_array_t)copy;
1156 *out_array_count = pcount * icount;
1157
1158 return KERN_SUCCESS;
1159}
1160
1161static bool
1162is_valid_host_special_port(int id)
1163{
1164 return (id <= HOST_MAX_SPECIAL_PORT) &&
1165 (id >= HOST_MIN_SPECIAL_PORT) &&
1166 ((id <= HOST_LAST_SPECIAL_KERNEL_PORT) || (id > HOST_MAX_SPECIAL_KERNEL_PORT));
1167}
1168
1169extern void * XNU_PTRAUTH_SIGNED_PTR("initproc") initproc;
1170
1171/*
1172 * Kernel interface for setting a special port.
1173 */
1174kern_return_t
1175kernel_set_special_port(host_priv_t host_priv, int id, ipc_port_t port)
1176{
1177 ipc_port_t old_port;
1178
1179 if (!is_valid_host_special_port(id)) {
1180 panic("attempted to set invalid special port %d", id);
1181 }
1182
1183#if !MACH_FLIPC
1184 if (id == HOST_NODE_PORT) {
1185 return KERN_NOT_SUPPORTED;
1186 }
1187#endif
1188
1189 host_lock(host_priv);
1190 old_port = host_priv->special[id];
1191 host_priv->special[id] = port;
1192 host_unlock(host_priv);
1193
1194#if MACH_FLIPC
1195 if (id == HOST_NODE_PORT) {
1196 mach_node_port_changed();
1197 }
1198#endif
1199
1200 if (IP_VALID(old_port)) {
1201 ipc_port_release_send(port: old_port);
1202 }
1203
1204
1205 return KERN_SUCCESS;
1206}
1207
1208/*
1209 * Kernel interface for retrieving a special port.
1210 */
1211kern_return_t
1212kernel_get_special_port(host_priv_t host_priv, int id, ipc_port_t * portp)
1213{
1214 if (!is_valid_host_special_port(id)) {
1215 panic("attempted to get invalid special port %d", id);
1216 }
1217
1218 host_lock(host_priv);
1219 *portp = host_priv->special[id];
1220 host_unlock(host_priv);
1221 return KERN_SUCCESS;
1222}
1223
1224/*
1225 * User interface for setting a special port.
1226 *
1227 * Only permits the user to set a user-owned special port
1228 * ID, rejecting a kernel-owned special port ID.
1229 *
1230 * A special kernel port cannot be set up using this
1231 * routine; use kernel_set_special_port() instead.
1232 */
1233kern_return_t
1234host_set_special_port_from_user(host_priv_t host_priv, int id, ipc_port_t port)
1235{
1236 if (host_priv == HOST_PRIV_NULL || id <= HOST_MAX_SPECIAL_KERNEL_PORT || id > HOST_MAX_SPECIAL_PORT) {
1237 return KERN_INVALID_ARGUMENT;
1238 }
1239
1240 if (task_is_driver(task: current_task())) {
1241 return KERN_NO_ACCESS;
1242 }
1243
1244 if (IP_VALID(port) && (port->ip_immovable_receive || port->ip_immovable_send)) {
1245 return KERN_INVALID_RIGHT;
1246 }
1247
1248 return host_set_special_port(host_priv, id, port);
1249}
1250
1251kern_return_t
1252host_set_special_port(host_priv_t host_priv, int id, ipc_port_t port)
1253{
1254 if (host_priv == HOST_PRIV_NULL || id <= HOST_MAX_SPECIAL_KERNEL_PORT || id > HOST_MAX_SPECIAL_PORT) {
1255 return KERN_INVALID_ARGUMENT;
1256 }
1257
1258 if (current_task() != kernel_task && get_bsdtask_info(current_task()) != initproc) {
1259 bool allowed = (id == HOST_TELEMETRY_PORT &&
1260 IOTaskHasEntitlement(task: current_task(), entitlement: "com.apple.private.xpc.launchd.event-monitor"));
1261#if CONFIG_CSR
1262 if (!allowed) {
1263 allowed = (csr_check(CSR_ALLOW_TASK_FOR_PID) == 0);
1264 }
1265#endif
1266 if (!allowed) {
1267 return KERN_NO_ACCESS;
1268 }
1269 }
1270
1271#if CONFIG_MACF
1272 if (mac_task_check_set_host_special_port(task: current_task(), id, port) != 0) {
1273 return KERN_NO_ACCESS;
1274 }
1275#endif
1276
1277 return kernel_set_special_port(host_priv, id, port);
1278}
1279
1280/*
1281 * User interface for retrieving a special port.
1282 *
1283 * Note that there is nothing to prevent a user special
1284 * port from disappearing after it has been discovered by
1285 * the caller; thus, using a special port can always result
1286 * in a "port not valid" error.
1287 */
1288
1289kern_return_t
1290host_get_special_port_from_user(host_priv_t host_priv, __unused int node, int id, ipc_port_t * portp)
1291{
1292 if (host_priv == HOST_PRIV_NULL || id == HOST_SECURITY_PORT || id > HOST_MAX_SPECIAL_PORT || id < HOST_MIN_SPECIAL_PORT) {
1293 return KERN_INVALID_ARGUMENT;
1294 }
1295
1296 task_t task = current_task();
1297 if (task && task_is_driver(task) && id > HOST_MAX_SPECIAL_KERNEL_PORT) {
1298 /* allow HID drivers to get the sysdiagnose port for keychord handling */
1299 if (id == HOST_SYSDIAGNOSE_PORT &&
1300 IOCurrentTaskHasEntitlement(kIODriverKitHIDFamilyEventServiceEntitlementKey)) {
1301 goto get_special_port;
1302 }
1303 return KERN_NO_ACCESS;
1304 }
1305get_special_port:
1306 return host_get_special_port(host_priv, node, id, portp);
1307}
1308
1309kern_return_t
1310host_get_special_port(host_priv_t host_priv, __unused int node, int id, ipc_port_t * portp)
1311{
1312 ipc_port_t port;
1313
1314 if (host_priv == HOST_PRIV_NULL || id == HOST_SECURITY_PORT || id > HOST_MAX_SPECIAL_PORT || id < HOST_MIN_SPECIAL_PORT) {
1315 return KERN_INVALID_ARGUMENT;
1316 }
1317
1318 host_lock(host_priv);
1319 port = realhost.special[id];
1320 switch (id) {
1321 case HOST_PORT:
1322 *portp = ipc_kobject_copy_send(port, kobject: &realhost, kotype: IKOT_HOST);
1323 break;
1324 case HOST_PRIV_PORT:
1325 *portp = ipc_kobject_copy_send(port, kobject: &realhost, kotype: IKOT_HOST_PRIV);
1326 break;
1327 case HOST_IO_MAIN_PORT:
1328 *portp = ipc_port_copy_send_any(port: main_device_port);
1329 break;
1330 default:
1331 *portp = ipc_port_copy_send_mqueue(port);
1332 break;
1333 }
1334 host_unlock(host_priv);
1335
1336 return KERN_SUCCESS;
1337}
1338
1339/*
1340 * host_get_io_main
1341 *
1342 * Return the IO main access port for this host.
1343 */
1344kern_return_t
1345host_get_io_main(host_t host, io_main_t * io_mainp)
1346{
1347 if (host == HOST_NULL) {
1348 return KERN_INVALID_ARGUMENT;
1349 }
1350
1351 return host_get_io_main_port(host_priv_self(), io_mainp);
1352}
1353
1354host_t
1355host_self(void)
1356{
1357 return &realhost;
1358}
1359
1360host_priv_t
1361host_priv_self(void)
1362{
1363 return &realhost;
1364}
1365
1366kern_return_t
1367host_set_atm_diagnostic_flag(host_t host, uint32_t diagnostic_flag)
1368{
1369 if (host == HOST_NULL) {
1370 return KERN_INVALID_ARGUMENT;
1371 }
1372
1373 if (!IOCurrentTaskHasEntitlement(entitlement: "com.apple.private.set-atm-diagnostic-flag")) {
1374 return KERN_NO_ACCESS;
1375 }
1376
1377#if CONFIG_ATM
1378 return atm_set_diagnostic_config(diagnostic_flag);
1379#else
1380 (void)diagnostic_flag;
1381 return KERN_NOT_SUPPORTED;
1382#endif
1383}
1384
1385kern_return_t
1386host_set_multiuser_config_flags(host_priv_t host_priv, uint32_t multiuser_config)
1387{
1388#if !defined(XNU_TARGET_OS_OSX)
1389 if (host_priv == HOST_PRIV_NULL) {
1390 return KERN_INVALID_ARGUMENT;
1391 }
1392
1393 /*
1394 * multiuser bit is extensively used for sharedIpad mode.
1395 * Caller sets the sharedIPad or other mutiuser modes.
1396 * Any override during commpage setting is not suitable anymore.
1397 */
1398 commpage_update_multiuser_config(multiuser_config);
1399 return KERN_SUCCESS;
1400#else
1401 (void)host_priv;
1402 (void)multiuser_config;
1403 return KERN_NOT_SUPPORTED;
1404#endif
1405}
1406