1/*
2 * Copyright (c) 2000-2009 Apple Inc. All rights reserved.
3 *
4 * @APPLE_OSREFERENCE_LICENSE_HEADER_START@
5 *
6 * This file contains Original Code and/or Modifications of Original Code
7 * as defined in and that are subject to the Apple Public Source License
8 * Version 2.0 (the 'License'). You may not use this file except in
9 * compliance with the License. The rights granted to you under the License
10 * may not be used to create, or enable the creation or redistribution of,
11 * unlawful or unlicensed copies of an Apple operating system, or to
12 * circumvent, violate, or enable the circumvention or violation of, any
13 * terms of an Apple operating system software license agreement.
14 *
15 * Please obtain a copy of the License at
16 * http://www.opensource.apple.com/apsl/ and read it before using this file.
17 *
18 * The Original Code and all software distributed under the License are
19 * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER
20 * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES,
21 * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY,
22 * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT.
23 * Please see the License for the specific language governing rights and
24 * limitations under the License.
25 *
26 * @APPLE_OSREFERENCE_LICENSE_HEADER_END@
27 */
28/*
29 * @OSF_COPYRIGHT@
30 */
31/*
32 * Mach Operating System
33 * Copyright (c) 1991,1990,1989,1988 Carnegie Mellon University
34 * All Rights Reserved.
35 *
36 * Permission to use, copy, modify and distribute this software and its
37 * documentation is hereby granted, provided that both the copyright
38 * notice and this permission notice appear in all copies of the
39 * software, derivative works or modified versions, and any portions
40 * thereof, and that both notices appear in supporting documentation.
41 *
42 * CARNEGIE MELLON ALLOWS FREE USE OF THIS SOFTWARE IN ITS "AS IS"
43 * CONDITION. CARNEGIE MELLON DISCLAIMS ANY LIABILITY OF ANY KIND FOR
44 * ANY DAMAGES WHATSOEVER RESULTING FROM THE USE OF THIS SOFTWARE.
45 *
46 * Carnegie Mellon requests users of this software to return to
47 *
48 * Software Distribution Coordinator or Software.Distribution@CS.CMU.EDU
49 * School of Computer Science
50 * Carnegie Mellon University
51 * Pittsburgh PA 15213-3890
52 *
53 * any improvements or extensions that they make and grant Carnegie Mellon
54 * the rights to redistribute these changes.
55 */
56/*
57 */
58
59/*
60 * host.c
61 *
62 * Non-ipc host functions.
63 */
64
65#include <mach/mach_types.h>
66#include <mach/boolean.h>
67#include <mach/host_info.h>
68#include <mach/host_special_ports.h>
69#include <mach/kern_return.h>
70#include <mach/machine.h>
71#include <mach/port.h>
72#include <mach/processor_info.h>
73#include <mach/vm_param.h>
74#include <mach/processor.h>
75#include <mach/mach_host_server.h>
76#include <mach/host_priv_server.h>
77#include <mach/vm_map.h>
78#include <mach/task_info.h>
79
80#include <machine/commpage.h>
81#include <machine/cpu_capabilities.h>
82
83#include <kern/kern_types.h>
84#include <kern/assert.h>
85#include <kern/kalloc.h>
86#include <kern/host.h>
87#include <kern/host_statistics.h>
88#include <kern/ipc_host.h>
89#include <kern/misc_protos.h>
90#include <kern/sched.h>
91#include <kern/processor.h>
92#include <kern/mach_node.h> // mach_node_port_changed()
93
94#include <vm/vm_map.h>
95#include <vm/vm_purgeable_internal.h>
96#include <vm/vm_pageout.h>
97
98
99#if CONFIG_ATM
100#include <atm/atm_internal.h>
101#endif
102
103#if CONFIG_MACF
104#include <security/mac_mach_internal.h>
105#endif
106
107#include <pexpert/pexpert.h>
108
109host_data_t realhost;
110
111vm_extmod_statistics_data_t host_extmod_statistics;
112
113kern_return_t
114host_processors(host_priv_t host_priv, processor_array_t * out_array, mach_msg_type_number_t * countp)
115{
116 processor_t processor, *tp;
117 void * addr;
118 unsigned int count, i;
119
120 if (host_priv == HOST_PRIV_NULL)
121 return (KERN_INVALID_ARGUMENT);
122
123 assert(host_priv == &realhost);
124
125 count = processor_count;
126 assert(count != 0);
127
128 addr = kalloc((vm_size_t)(count * sizeof(mach_port_t)));
129 if (addr == 0)
130 return (KERN_RESOURCE_SHORTAGE);
131
132 tp = (processor_t *)addr;
133 *tp++ = processor = processor_list;
134
135 if (count > 1) {
136 simple_lock(&processor_list_lock);
137
138 for (i = 1; i < count; i++)
139 *tp++ = processor = processor->processor_list;
140
141 simple_unlock(&processor_list_lock);
142 }
143
144 *countp = count;
145 *out_array = (processor_array_t)addr;
146
147 /* do the conversion that Mig should handle */
148 tp = (processor_t *)addr;
149 for (i = 0; i < count; i++)
150 ((mach_port_t *)tp)[i] = (mach_port_t)convert_processor_to_port(tp[i]);
151
152 return (KERN_SUCCESS);
153}
154
155kern_return_t
156host_info(host_t host, host_flavor_t flavor, host_info_t info, mach_msg_type_number_t * count)
157{
158 if (host == HOST_NULL)
159 return (KERN_INVALID_ARGUMENT);
160
161 switch (flavor) {
162 case HOST_BASIC_INFO: {
163 host_basic_info_t basic_info;
164 int master_id;
165
166 /*
167 * Basic information about this host.
168 */
169 if (*count < HOST_BASIC_INFO_OLD_COUNT)
170 return (KERN_FAILURE);
171
172 basic_info = (host_basic_info_t)info;
173
174 basic_info->memory_size = machine_info.memory_size;
175 basic_info->max_cpus = machine_info.max_cpus;
176 basic_info->avail_cpus = processor_avail_count;
177 master_id = master_processor->cpu_id;
178 basic_info->cpu_type = slot_type(master_id);
179 basic_info->cpu_subtype = slot_subtype(master_id);
180
181 if (*count >= HOST_BASIC_INFO_COUNT) {
182 basic_info->cpu_threadtype = slot_threadtype(master_id);
183 basic_info->physical_cpu = machine_info.physical_cpu;
184 basic_info->physical_cpu_max = machine_info.physical_cpu_max;
185 basic_info->logical_cpu = machine_info.logical_cpu;
186 basic_info->logical_cpu_max = machine_info.logical_cpu_max;
187 basic_info->max_mem = machine_info.max_mem;
188
189 *count = HOST_BASIC_INFO_COUNT;
190 } else {
191 *count = HOST_BASIC_INFO_OLD_COUNT;
192 }
193
194 return (KERN_SUCCESS);
195 }
196
197 case HOST_SCHED_INFO: {
198 host_sched_info_t sched_info;
199 uint32_t quantum_time;
200 uint64_t quantum_ns;
201
202 /*
203 * Return scheduler information.
204 */
205 if (*count < HOST_SCHED_INFO_COUNT)
206 return (KERN_FAILURE);
207
208 sched_info = (host_sched_info_t)info;
209
210 quantum_time = SCHED(initial_quantum_size)(THREAD_NULL);
211 absolutetime_to_nanoseconds(quantum_time, &quantum_ns);
212
213 sched_info->min_timeout = sched_info->min_quantum = (uint32_t)(quantum_ns / 1000 / 1000);
214
215 *count = HOST_SCHED_INFO_COUNT;
216
217 return (KERN_SUCCESS);
218 }
219
220 case HOST_RESOURCE_SIZES: {
221 /*
222 * Return sizes of kernel data structures
223 */
224 if (*count < HOST_RESOURCE_SIZES_COUNT)
225 return (KERN_FAILURE);
226
227 /* XXX Fail until ledgers are implemented */
228 return (KERN_INVALID_ARGUMENT);
229 }
230
231 case HOST_PRIORITY_INFO: {
232 host_priority_info_t priority_info;
233
234 if (*count < HOST_PRIORITY_INFO_COUNT)
235 return (KERN_FAILURE);
236
237 priority_info = (host_priority_info_t)info;
238
239 priority_info->kernel_priority = MINPRI_KERNEL;
240 priority_info->system_priority = MINPRI_KERNEL;
241 priority_info->server_priority = MINPRI_RESERVED;
242 priority_info->user_priority = BASEPRI_DEFAULT;
243 priority_info->depress_priority = DEPRESSPRI;
244 priority_info->idle_priority = IDLEPRI;
245 priority_info->minimum_priority = MINPRI_USER;
246 priority_info->maximum_priority = MAXPRI_RESERVED;
247
248 *count = HOST_PRIORITY_INFO_COUNT;
249
250 return (KERN_SUCCESS);
251 }
252
253 /*
254 * Gestalt for various trap facilities.
255 */
256 case HOST_MACH_MSG_TRAP:
257 case HOST_SEMAPHORE_TRAPS: {
258 *count = 0;
259 return (KERN_SUCCESS);
260 }
261
262 case HOST_CAN_HAS_DEBUGGER: {
263 host_can_has_debugger_info_t can_has_debugger_info;
264
265 if (*count < HOST_CAN_HAS_DEBUGGER_COUNT)
266 return (KERN_FAILURE);
267
268 can_has_debugger_info = (host_can_has_debugger_info_t)info;
269 can_has_debugger_info->can_has_debugger = PE_i_can_has_debugger(NULL);
270 *count = HOST_CAN_HAS_DEBUGGER_COUNT;
271
272 return KERN_SUCCESS;
273 }
274
275 case HOST_VM_PURGABLE: {
276 if (*count < HOST_VM_PURGABLE_COUNT)
277 return (KERN_FAILURE);
278
279 vm_purgeable_stats((vm_purgeable_info_t)info, NULL);
280
281 *count = HOST_VM_PURGABLE_COUNT;
282 return (KERN_SUCCESS);
283 }
284
285 case HOST_DEBUG_INFO_INTERNAL: {
286#if DEVELOPMENT || DEBUG
287 if (*count < HOST_DEBUG_INFO_INTERNAL_COUNT)
288 return (KERN_FAILURE);
289
290 host_debug_info_internal_t debug_info = (host_debug_info_internal_t)info;
291 bzero(debug_info, sizeof(host_debug_info_internal_data_t));
292 *count = HOST_DEBUG_INFO_INTERNAL_COUNT;
293
294#if CONFIG_COALITIONS
295 debug_info->config_coalitions = 1;
296#endif
297 debug_info->config_bank = 1;
298#if CONFIG_ATM
299 debug_info->config_atm = 1;
300#endif
301#if CONFIG_CSR
302 debug_info->config_csr = 1;
303#endif
304 return (KERN_SUCCESS);
305#else /* DEVELOPMENT || DEBUG */
306 return (KERN_NOT_SUPPORTED);
307#endif
308 }
309
310 case HOST_PREFERRED_USER_ARCH: {
311 host_preferred_user_arch_t user_arch_info;
312
313 /*
314 * Basic information about this host.
315 */
316 if (*count < HOST_PREFERRED_USER_ARCH_COUNT)
317 return (KERN_FAILURE);
318
319 user_arch_info = (host_preferred_user_arch_t)info;
320
321#if defined(PREFERRED_USER_CPU_TYPE) && defined(PREFERRED_USER_CPU_SUBTYPE)
322 user_arch_info->cpu_type = PREFERRED_USER_CPU_TYPE;
323 user_arch_info->cpu_subtype = PREFERRED_USER_CPU_SUBTYPE;
324#else
325 int master_id = master_processor->cpu_id;
326 user_arch_info->cpu_type = slot_type(master_id);
327 user_arch_info->cpu_subtype = slot_subtype(master_id);
328#endif
329
330 *count = HOST_PREFERRED_USER_ARCH_COUNT;
331
332 return (KERN_SUCCESS);
333 }
334
335 default: return (KERN_INVALID_ARGUMENT);
336 }
337}
338
339kern_return_t host_statistics(host_t host, host_flavor_t flavor, host_info_t info, mach_msg_type_number_t * count);
340
341kern_return_t
342host_statistics(host_t host, host_flavor_t flavor, host_info_t info, mach_msg_type_number_t * count)
343{
344 uint32_t i;
345
346 if (host == HOST_NULL)
347 return (KERN_INVALID_HOST);
348
349 switch (flavor) {
350 case HOST_LOAD_INFO: {
351 host_load_info_t load_info;
352
353 if (*count < HOST_LOAD_INFO_COUNT)
354 return (KERN_FAILURE);
355
356 load_info = (host_load_info_t)info;
357
358 bcopy((char *)avenrun, (char *)load_info->avenrun, sizeof avenrun);
359 bcopy((char *)mach_factor, (char *)load_info->mach_factor, sizeof mach_factor);
360
361 *count = HOST_LOAD_INFO_COUNT;
362 return (KERN_SUCCESS);
363 }
364
365 case HOST_VM_INFO: {
366 processor_t processor;
367 vm_statistics64_t stat;
368 vm_statistics64_data_t host_vm_stat;
369 vm_statistics_t stat32;
370 mach_msg_type_number_t original_count;
371
372 if (*count < HOST_VM_INFO_REV0_COUNT)
373 return (KERN_FAILURE);
374
375 processor = processor_list;
376 stat = &PROCESSOR_DATA(processor, vm_stat);
377 host_vm_stat = *stat;
378
379 if (processor_count > 1) {
380 simple_lock(&processor_list_lock);
381
382 while ((processor = processor->processor_list) != NULL) {
383 stat = &PROCESSOR_DATA(processor, vm_stat);
384
385 host_vm_stat.zero_fill_count += stat->zero_fill_count;
386 host_vm_stat.reactivations += stat->reactivations;
387 host_vm_stat.pageins += stat->pageins;
388 host_vm_stat.pageouts += stat->pageouts;
389 host_vm_stat.faults += stat->faults;
390 host_vm_stat.cow_faults += stat->cow_faults;
391 host_vm_stat.lookups += stat->lookups;
392 host_vm_stat.hits += stat->hits;
393 }
394
395 simple_unlock(&processor_list_lock);
396 }
397
398 stat32 = (vm_statistics_t)info;
399
400 stat32->free_count = VM_STATISTICS_TRUNCATE_TO_32_BIT(vm_page_free_count + vm_page_speculative_count);
401 stat32->active_count = VM_STATISTICS_TRUNCATE_TO_32_BIT(vm_page_active_count);
402
403 if (vm_page_local_q) {
404 for (i = 0; i < vm_page_local_q_count; i++) {
405 struct vpl * lq;
406
407 lq = &vm_page_local_q[i].vpl_un.vpl;
408
409 stat32->active_count += VM_STATISTICS_TRUNCATE_TO_32_BIT(lq->vpl_count);
410 }
411 }
412 stat32->inactive_count = VM_STATISTICS_TRUNCATE_TO_32_BIT(vm_page_inactive_count);
413#if CONFIG_EMBEDDED
414 stat32->wire_count = VM_STATISTICS_TRUNCATE_TO_32_BIT(vm_page_wire_count);
415#else
416 stat32->wire_count = VM_STATISTICS_TRUNCATE_TO_32_BIT(vm_page_wire_count + vm_page_throttled_count + vm_lopage_free_count);
417#endif
418 stat32->zero_fill_count = VM_STATISTICS_TRUNCATE_TO_32_BIT(host_vm_stat.zero_fill_count);
419 stat32->reactivations = VM_STATISTICS_TRUNCATE_TO_32_BIT(host_vm_stat.reactivations);
420 stat32->pageins = VM_STATISTICS_TRUNCATE_TO_32_BIT(host_vm_stat.pageins);
421 stat32->pageouts = VM_STATISTICS_TRUNCATE_TO_32_BIT(host_vm_stat.pageouts);
422 stat32->faults = VM_STATISTICS_TRUNCATE_TO_32_BIT(host_vm_stat.faults);
423 stat32->cow_faults = VM_STATISTICS_TRUNCATE_TO_32_BIT(host_vm_stat.cow_faults);
424 stat32->lookups = VM_STATISTICS_TRUNCATE_TO_32_BIT(host_vm_stat.lookups);
425 stat32->hits = VM_STATISTICS_TRUNCATE_TO_32_BIT(host_vm_stat.hits);
426
427 /*
428 * Fill in extra info added in later revisions of the
429 * vm_statistics data structure. Fill in only what can fit
430 * in the data structure the caller gave us !
431 */
432 original_count = *count;
433 *count = HOST_VM_INFO_REV0_COUNT; /* rev0 already filled in */
434 if (original_count >= HOST_VM_INFO_REV1_COUNT) {
435 /* rev1 added "purgeable" info */
436 stat32->purgeable_count = VM_STATISTICS_TRUNCATE_TO_32_BIT(vm_page_purgeable_count);
437 stat32->purges = VM_STATISTICS_TRUNCATE_TO_32_BIT(vm_page_purged_count);
438 *count = HOST_VM_INFO_REV1_COUNT;
439 }
440
441 if (original_count >= HOST_VM_INFO_REV2_COUNT) {
442 /* rev2 added "speculative" info */
443 stat32->speculative_count = VM_STATISTICS_TRUNCATE_TO_32_BIT(vm_page_speculative_count);
444 *count = HOST_VM_INFO_REV2_COUNT;
445 }
446
447 /* rev3 changed some of the fields to be 64-bit*/
448
449 return (KERN_SUCCESS);
450 }
451
452 case HOST_CPU_LOAD_INFO: {
453 processor_t processor;
454 host_cpu_load_info_t cpu_load_info;
455
456 if (*count < HOST_CPU_LOAD_INFO_COUNT)
457 return (KERN_FAILURE);
458
459#define GET_TICKS_VALUE(state, ticks) \
460 MACRO_BEGIN cpu_load_info->cpu_ticks[(state)] += (uint32_t)(ticks / hz_tick_interval); \
461 MACRO_END
462#define GET_TICKS_VALUE_FROM_TIMER(processor, state, timer) \
463 MACRO_BEGIN GET_TICKS_VALUE(state, timer_grab(&PROCESSOR_DATA(processor, timer))); \
464 MACRO_END
465
466 cpu_load_info = (host_cpu_load_info_t)info;
467 cpu_load_info->cpu_ticks[CPU_STATE_USER] = 0;
468 cpu_load_info->cpu_ticks[CPU_STATE_SYSTEM] = 0;
469 cpu_load_info->cpu_ticks[CPU_STATE_IDLE] = 0;
470 cpu_load_info->cpu_ticks[CPU_STATE_NICE] = 0;
471
472 simple_lock(&processor_list_lock);
473
474 for (processor = processor_list; processor != NULL; processor = processor->processor_list) {
475 timer_t idle_state;
476 uint64_t idle_time_snapshot1, idle_time_snapshot2;
477 uint64_t idle_time_tstamp1, idle_time_tstamp2;
478
479 /* See discussion in processor_info(PROCESSOR_CPU_LOAD_INFO) */
480
481 GET_TICKS_VALUE_FROM_TIMER(processor, CPU_STATE_USER, user_state);
482 if (precise_user_kernel_time) {
483 GET_TICKS_VALUE_FROM_TIMER(processor, CPU_STATE_SYSTEM, system_state);
484 } else {
485 /* system_state may represent either sys or user */
486 GET_TICKS_VALUE_FROM_TIMER(processor, CPU_STATE_USER, system_state);
487 }
488
489 idle_state = &PROCESSOR_DATA(processor, idle_state);
490 idle_time_snapshot1 = timer_grab(idle_state);
491 idle_time_tstamp1 = idle_state->tstamp;
492
493 if (PROCESSOR_DATA(processor, current_state) != idle_state) {
494 /* Processor is non-idle, so idle timer should be accurate */
495 GET_TICKS_VALUE_FROM_TIMER(processor, CPU_STATE_IDLE, idle_state);
496 } else if ((idle_time_snapshot1 != (idle_time_snapshot2 = timer_grab(idle_state))) ||
497 (idle_time_tstamp1 != (idle_time_tstamp2 = idle_state->tstamp))) {
498 /* Idle timer is being updated concurrently, second stamp is good enough */
499 GET_TICKS_VALUE(CPU_STATE_IDLE, idle_time_snapshot2);
500 } else {
501 /*
502 * Idle timer may be very stale. Fortunately we have established
503 * that idle_time_snapshot1 and idle_time_tstamp1 are unchanging
504 */
505 idle_time_snapshot1 += mach_absolute_time() - idle_time_tstamp1;
506
507 GET_TICKS_VALUE(CPU_STATE_IDLE, idle_time_snapshot1);
508 }
509 }
510 simple_unlock(&processor_list_lock);
511
512 *count = HOST_CPU_LOAD_INFO_COUNT;
513
514 return (KERN_SUCCESS);
515 }
516
517 case HOST_EXPIRED_TASK_INFO: {
518 if (*count < TASK_POWER_INFO_COUNT) {
519 return (KERN_FAILURE);
520 }
521
522 task_power_info_t tinfo1 = (task_power_info_t)info;
523 task_power_info_v2_t tinfo2 = (task_power_info_v2_t)info;
524
525 tinfo1->task_interrupt_wakeups = dead_task_statistics.task_interrupt_wakeups;
526 tinfo1->task_platform_idle_wakeups = dead_task_statistics.task_platform_idle_wakeups;
527
528 tinfo1->task_timer_wakeups_bin_1 = dead_task_statistics.task_timer_wakeups_bin_1;
529
530 tinfo1->task_timer_wakeups_bin_2 = dead_task_statistics.task_timer_wakeups_bin_2;
531
532 tinfo1->total_user = dead_task_statistics.total_user_time;
533 tinfo1->total_system = dead_task_statistics.total_system_time;
534 if (*count < TASK_POWER_INFO_V2_COUNT) {
535 *count = TASK_POWER_INFO_COUNT;
536 }
537 else if (*count >= TASK_POWER_INFO_V2_COUNT) {
538 tinfo2->gpu_energy.task_gpu_utilisation = dead_task_statistics.task_gpu_ns;
539#if defined(__arm__) || defined(__arm64__)
540 tinfo2->task_energy = dead_task_statistics.task_energy;
541 tinfo2->task_ptime = dead_task_statistics.total_ptime;
542 tinfo2->task_pset_switches = dead_task_statistics.total_pset_switches;
543#endif
544 *count = TASK_POWER_INFO_V2_COUNT;
545 }
546
547 return (KERN_SUCCESS);
548 }
549 default: return (KERN_INVALID_ARGUMENT);
550 }
551}
552
553extern uint32_t c_segment_pages_compressed;
554
555#define HOST_STATISTICS_TIME_WINDOW 1 /* seconds */
556#define HOST_STATISTICS_MAX_REQUESTS 10 /* maximum number of requests per window */
557#define HOST_STATISTICS_MIN_REQUESTS 2 /* minimum number of requests per window */
558
559uint64_t host_statistics_time_window;
560
561static lck_mtx_t host_statistics_lck;
562static lck_grp_t* host_statistics_lck_grp;
563
564#define HOST_VM_INFO64_REV0 0
565#define HOST_VM_INFO64_REV1 1
566#define HOST_EXTMOD_INFO64_REV0 2
567#define HOST_LOAD_INFO_REV0 3
568#define HOST_VM_INFO_REV0 4
569#define HOST_VM_INFO_REV1 5
570#define HOST_VM_INFO_REV2 6
571#define HOST_CPU_LOAD_INFO_REV0 7
572#define HOST_EXPIRED_TASK_INFO_REV0 8
573#define HOST_EXPIRED_TASK_INFO_REV1 9
574#define NUM_HOST_INFO_DATA_TYPES 10
575
576static vm_statistics64_data_t host_vm_info64_rev0 = {};
577static vm_statistics64_data_t host_vm_info64_rev1 = {};
578static vm_extmod_statistics_data_t host_extmod_info64 = {};
579static host_load_info_data_t host_load_info = {};
580static vm_statistics_data_t host_vm_info_rev0 = {};
581static vm_statistics_data_t host_vm_info_rev1 = {};
582static vm_statistics_data_t host_vm_info_rev2 = {};
583static host_cpu_load_info_data_t host_cpu_load_info = {};
584static task_power_info_data_t host_expired_task_info = {};
585static task_power_info_v2_data_t host_expired_task_info2 = {};
586
587struct host_stats_cache {
588 uint64_t last_access;
589 uint64_t current_requests;
590 uint64_t max_requests;
591 uintptr_t data;
592 mach_msg_type_number_t count; //NOTE count is in sizeof(integer_t)
593};
594
595static struct host_stats_cache g_host_stats_cache[NUM_HOST_INFO_DATA_TYPES] = {
596 [HOST_VM_INFO64_REV0] = { .last_access = 0, .current_requests = 0, .max_requests = 0, .data = (uintptr_t)&host_vm_info64_rev0, .count = HOST_VM_INFO64_REV0_COUNT },
597 [HOST_VM_INFO64_REV1] = { .last_access = 0, .current_requests = 0, .max_requests = 0, .data = (uintptr_t)&host_vm_info64_rev1, .count = HOST_VM_INFO64_REV1_COUNT },
598 [HOST_EXTMOD_INFO64_REV0] = { .last_access = 0, .current_requests = 0, .max_requests = 0, .data = (uintptr_t)&host_extmod_info64, .count = HOST_EXTMOD_INFO64_COUNT },
599 [HOST_LOAD_INFO_REV0] = { .last_access = 0, .current_requests = 0, .max_requests = 0, .data = (uintptr_t)&host_load_info, .count = HOST_LOAD_INFO_COUNT },
600 [HOST_VM_INFO_REV0] = { .last_access = 0, .current_requests = 0, .max_requests = 0, .data = (uintptr_t)&host_vm_info_rev0, .count = HOST_VM_INFO_REV0_COUNT },
601 [HOST_VM_INFO_REV1] = { .last_access = 0, .current_requests = 0, .max_requests = 0, .data = (uintptr_t)&host_vm_info_rev1, .count = HOST_VM_INFO_REV1_COUNT },
602 [HOST_VM_INFO_REV2] = { .last_access = 0, .current_requests = 0, .max_requests = 0, .data = (uintptr_t)&host_vm_info_rev2, .count = HOST_VM_INFO_REV2_COUNT },
603 [HOST_CPU_LOAD_INFO_REV0] = { .last_access = 0, .current_requests = 0, .max_requests = 0, .data = (uintptr_t)&host_cpu_load_info, .count = HOST_CPU_LOAD_INFO_COUNT },
604 [HOST_EXPIRED_TASK_INFO_REV0] = { .last_access = 0, .current_requests = 0, .max_requests = 0, .data = (uintptr_t)&host_expired_task_info, .count = TASK_POWER_INFO_COUNT },
605 [HOST_EXPIRED_TASK_INFO_REV1] = { .last_access = 0, .current_requests = 0, .max_requests = 0, .data = (uintptr_t)&host_expired_task_info2, .count = TASK_POWER_INFO_V2_COUNT},
606};
607
608
609void
610host_statistics_init(void)
611{
612 host_statistics_lck_grp = lck_grp_alloc_init("host_statistics", LCK_GRP_ATTR_NULL);
613 lck_mtx_init(&host_statistics_lck, host_statistics_lck_grp, LCK_ATTR_NULL);
614 nanoseconds_to_absolutetime((HOST_STATISTICS_TIME_WINDOW * NSEC_PER_SEC), &host_statistics_time_window);
615}
616
617static void
618cache_host_statistics(int index, host_info64_t info)
619{
620 if (index < 0 || index >= NUM_HOST_INFO_DATA_TYPES)
621 return;
622
623 task_t task = current_task();
624 if (task->t_flags & TF_PLATFORM)
625 return;
626
627 memcpy((void *)g_host_stats_cache[index].data, info, g_host_stats_cache[index].count * sizeof(integer_t));
628 return;
629}
630
631static void
632get_cached_info(int index, host_info64_t info, mach_msg_type_number_t* count)
633{
634 if (index < 0 || index >= NUM_HOST_INFO_DATA_TYPES) {
635 *count = 0;
636 return;
637 }
638
639 *count = g_host_stats_cache[index].count;
640 memcpy(info, (void *)g_host_stats_cache[index].data, g_host_stats_cache[index].count * sizeof(integer_t));
641}
642
643static int
644get_host_info_data_index(bool is_stat64, host_flavor_t flavor, mach_msg_type_number_t* count, kern_return_t* ret)
645{
646 switch (flavor) {
647
648 case HOST_VM_INFO64:
649 if (!is_stat64){
650 *ret = KERN_INVALID_ARGUMENT;
651 return -1;
652 }
653 if (*count < HOST_VM_INFO64_REV0_COUNT) {
654 *ret = KERN_FAILURE;
655 return -1;
656 }
657 if (*count >= HOST_VM_INFO64_REV1_COUNT) {
658 return HOST_VM_INFO64_REV1;
659 }
660 return HOST_VM_INFO64_REV0;
661
662 case HOST_EXTMOD_INFO64:
663 if (!is_stat64){
664 *ret = KERN_INVALID_ARGUMENT;
665 return -1;
666 }
667 if (*count < HOST_EXTMOD_INFO64_COUNT) {
668 *ret = KERN_FAILURE;
669 return -1;
670 }
671 return HOST_EXTMOD_INFO64_REV0;
672
673 case HOST_LOAD_INFO:
674 if (*count < HOST_LOAD_INFO_COUNT) {
675 *ret = KERN_FAILURE;
676 return -1;
677 }
678 return HOST_LOAD_INFO_REV0;
679
680 case HOST_VM_INFO:
681 if (*count < HOST_VM_INFO_REV0_COUNT) {
682 *ret = KERN_FAILURE;
683 return -1;
684 }
685 if (*count >= HOST_VM_INFO_REV2_COUNT) {
686 return HOST_VM_INFO_REV2;
687 }
688 if (*count >= HOST_VM_INFO_REV1_COUNT) {
689 return HOST_VM_INFO_REV1;
690 }
691 return HOST_VM_INFO_REV0;
692
693 case HOST_CPU_LOAD_INFO:
694 if (*count < HOST_CPU_LOAD_INFO_COUNT) {
695 *ret = KERN_FAILURE;
696 return -1;
697 }
698 return HOST_CPU_LOAD_INFO_REV0;
699
700 case HOST_EXPIRED_TASK_INFO:
701 if (*count < TASK_POWER_INFO_COUNT){
702 *ret = KERN_FAILURE;
703 return -1;
704 }
705 if (*count >= TASK_POWER_INFO_V2_COUNT){
706 return HOST_EXPIRED_TASK_INFO_REV1;
707 }
708 return HOST_EXPIRED_TASK_INFO_REV0;
709
710 default:
711 *ret = KERN_INVALID_ARGUMENT;
712 return -1;
713
714 }
715
716}
717
718static bool
719rate_limit_host_statistics(bool is_stat64, host_flavor_t flavor, host_info64_t info, mach_msg_type_number_t* count, kern_return_t* ret, int *pindex)
720{
721 task_t task = current_task();
722
723 assert(task != kernel_task);
724
725 *ret = KERN_SUCCESS;
726
727 /* Access control only for third party applications */
728 if (task->t_flags & TF_PLATFORM) {
729 return FALSE;
730 }
731
732 /* Rate limit to HOST_STATISTICS_MAX_REQUESTS queries for each HOST_STATISTICS_TIME_WINDOW window of time */
733 bool rate_limited = FALSE;
734 bool set_last_access = TRUE;
735
736 /* there is a cache for every flavor */
737 int index = get_host_info_data_index(is_stat64, flavor, count, ret);
738 if (index == -1)
739 goto out;
740
741 *pindex = index;
742 lck_mtx_lock(&host_statistics_lck);
743 if (g_host_stats_cache[index].last_access > mach_continuous_time() - host_statistics_time_window) {
744 set_last_access = FALSE;
745 if (g_host_stats_cache[index].current_requests++ >= g_host_stats_cache[index].max_requests) {
746 rate_limited = TRUE;
747 get_cached_info(index, info, count);
748 }
749 }
750 if (set_last_access) {
751 g_host_stats_cache[index].current_requests = 1;
752 /*
753 * select a random number of requests (included between HOST_STATISTICS_MIN_REQUESTS and HOST_STATISTICS_MAX_REQUESTS)
754 * to let query host_statistics.
755 * In this way it is not possible to infer looking at when the a cached copy changes if host_statistics was called on
756 * the provious window.
757 */
758 g_host_stats_cache[index].max_requests = (mach_absolute_time() % (HOST_STATISTICS_MAX_REQUESTS - HOST_STATISTICS_MIN_REQUESTS + 1)) + HOST_STATISTICS_MIN_REQUESTS;
759 g_host_stats_cache[index].last_access = mach_continuous_time();
760 }
761 lck_mtx_unlock(&host_statistics_lck);
762out:
763 return rate_limited;
764}
765
766kern_return_t host_statistics64(host_t host, host_flavor_t flavor, host_info_t info, mach_msg_type_number_t * count);
767
768kern_return_t
769host_statistics64(host_t host, host_flavor_t flavor, host_info64_t info, mach_msg_type_number_t * count)
770{
771 uint32_t i;
772
773 if (host == HOST_NULL)
774 return (KERN_INVALID_HOST);
775
776 switch (flavor) {
777 case HOST_VM_INFO64: /* We were asked to get vm_statistics64 */
778 {
779 processor_t processor;
780 vm_statistics64_t stat;
781 vm_statistics64_data_t host_vm_stat;
782 mach_msg_type_number_t original_count;
783 unsigned int local_q_internal_count;
784 unsigned int local_q_external_count;
785
786 if (*count < HOST_VM_INFO64_REV0_COUNT)
787 return (KERN_FAILURE);
788
789 processor = processor_list;
790 stat = &PROCESSOR_DATA(processor, vm_stat);
791 host_vm_stat = *stat;
792
793 if (processor_count > 1) {
794 simple_lock(&processor_list_lock);
795
796 while ((processor = processor->processor_list) != NULL) {
797 stat = &PROCESSOR_DATA(processor, vm_stat);
798
799 host_vm_stat.zero_fill_count += stat->zero_fill_count;
800 host_vm_stat.reactivations += stat->reactivations;
801 host_vm_stat.pageins += stat->pageins;
802 host_vm_stat.pageouts += stat->pageouts;
803 host_vm_stat.faults += stat->faults;
804 host_vm_stat.cow_faults += stat->cow_faults;
805 host_vm_stat.lookups += stat->lookups;
806 host_vm_stat.hits += stat->hits;
807 host_vm_stat.compressions += stat->compressions;
808 host_vm_stat.decompressions += stat->decompressions;
809 host_vm_stat.swapins += stat->swapins;
810 host_vm_stat.swapouts += stat->swapouts;
811 }
812
813 simple_unlock(&processor_list_lock);
814 }
815
816 stat = (vm_statistics64_t)info;
817
818 stat->free_count = vm_page_free_count + vm_page_speculative_count;
819 stat->active_count = vm_page_active_count;
820
821 local_q_internal_count = 0;
822 local_q_external_count = 0;
823 if (vm_page_local_q) {
824 for (i = 0; i < vm_page_local_q_count; i++) {
825 struct vpl * lq;
826
827 lq = &vm_page_local_q[i].vpl_un.vpl;
828
829 stat->active_count += lq->vpl_count;
830 local_q_internal_count += lq->vpl_internal_count;
831 local_q_external_count += lq->vpl_external_count;
832 }
833 }
834 stat->inactive_count = vm_page_inactive_count;
835#if CONFIG_EMBEDDED
836 stat->wire_count = vm_page_wire_count;
837#else
838 stat->wire_count = vm_page_wire_count + vm_page_throttled_count + vm_lopage_free_count;
839#endif
840 stat->zero_fill_count = host_vm_stat.zero_fill_count;
841 stat->reactivations = host_vm_stat.reactivations;
842 stat->pageins = host_vm_stat.pageins;
843 stat->pageouts = host_vm_stat.pageouts;
844 stat->faults = host_vm_stat.faults;
845 stat->cow_faults = host_vm_stat.cow_faults;
846 stat->lookups = host_vm_stat.lookups;
847 stat->hits = host_vm_stat.hits;
848
849 stat->purgeable_count = vm_page_purgeable_count;
850 stat->purges = vm_page_purged_count;
851
852 stat->speculative_count = vm_page_speculative_count;
853
854 /*
855 * Fill in extra info added in later revisions of the
856 * vm_statistics data structure. Fill in only what can fit
857 * in the data structure the caller gave us !
858 */
859 original_count = *count;
860 *count = HOST_VM_INFO64_REV0_COUNT; /* rev0 already filled in */
861 if (original_count >= HOST_VM_INFO64_REV1_COUNT) {
862 /* rev1 added "throttled count" */
863 stat->throttled_count = vm_page_throttled_count;
864 /* rev1 added "compression" info */
865 stat->compressor_page_count = VM_PAGE_COMPRESSOR_COUNT;
866 stat->compressions = host_vm_stat.compressions;
867 stat->decompressions = host_vm_stat.decompressions;
868 stat->swapins = host_vm_stat.swapins;
869 stat->swapouts = host_vm_stat.swapouts;
870 /* rev1 added:
871 * "external page count"
872 * "anonymous page count"
873 * "total # of pages (uncompressed) held in the compressor"
874 */
875 stat->external_page_count = (vm_page_pageable_external_count + local_q_external_count);
876 stat->internal_page_count = (vm_page_pageable_internal_count + local_q_internal_count);
877 stat->total_uncompressed_pages_in_compressor = c_segment_pages_compressed;
878 *count = HOST_VM_INFO64_REV1_COUNT;
879 }
880
881 return (KERN_SUCCESS);
882 }
883
884 case HOST_EXTMOD_INFO64: /* We were asked to get vm_statistics64 */
885 {
886 vm_extmod_statistics_t out_extmod_statistics;
887
888 if (*count < HOST_EXTMOD_INFO64_COUNT)
889 return (KERN_FAILURE);
890
891 out_extmod_statistics = (vm_extmod_statistics_t)info;
892 *out_extmod_statistics = host_extmod_statistics;
893
894 *count = HOST_EXTMOD_INFO64_COUNT;
895
896 return (KERN_SUCCESS);
897 }
898
899 default: /* If we didn't recognize the flavor, send to host_statistics */
900 return (host_statistics(host, flavor, (host_info_t)info, count));
901 }
902}
903
904kern_return_t
905host_statistics64_from_user(host_t host, host_flavor_t flavor, host_info64_t info, mach_msg_type_number_t * count)
906{
907 kern_return_t ret = KERN_SUCCESS;
908 int index;
909
910 if (host == HOST_NULL)
911 return (KERN_INVALID_HOST);
912
913 if (rate_limit_host_statistics(TRUE, flavor, info, count, &ret, &index))
914 return ret;
915
916 if (ret != KERN_SUCCESS)
917 return ret;
918
919 ret = host_statistics64(host, flavor, info, count);
920
921 if (ret == KERN_SUCCESS)
922 cache_host_statistics(index, info);
923
924 return ret;
925}
926
927kern_return_t
928host_statistics_from_user(host_t host, host_flavor_t flavor, host_info64_t info, mach_msg_type_number_t * count)
929{
930 kern_return_t ret = KERN_SUCCESS;
931 int index;
932
933 if (host == HOST_NULL)
934 return (KERN_INVALID_HOST);
935
936 if (rate_limit_host_statistics(FALSE, flavor, info, count, &ret, &index))
937 return ret;
938
939 if (ret != KERN_SUCCESS)
940 return ret;
941
942 ret = host_statistics(host, flavor, info, count);
943
944 if (ret == KERN_SUCCESS)
945 cache_host_statistics(index, info);
946
947 return ret;
948}
949
950/*
951 * Get host statistics that require privilege.
952 * None for now, just call the un-privileged version.
953 */
954kern_return_t
955host_priv_statistics(host_priv_t host_priv, host_flavor_t flavor, host_info_t info, mach_msg_type_number_t * count)
956{
957 return (host_statistics((host_t)host_priv, flavor, info, count));
958}
959
960kern_return_t
961set_sched_stats_active(boolean_t active)
962{
963 sched_stats_active = active;
964 return (KERN_SUCCESS);
965}
966
967
968uint64_t
969get_pages_grabbed_count(void)
970{
971 processor_t processor;
972 uint64_t pages_grabbed_count = 0;
973
974 simple_lock(&processor_list_lock);
975
976 processor = processor_list;
977
978 while (processor) {
979 pages_grabbed_count += PROCESSOR_DATA(processor, page_grab_count);
980 processor = processor->processor_list;
981 }
982 simple_unlock(&processor_list_lock);
983
984 return(pages_grabbed_count);
985}
986
987
988kern_return_t
989get_sched_statistics(struct _processor_statistics_np * out, uint32_t * count)
990{
991 processor_t processor;
992
993 if (!sched_stats_active) {
994 return (KERN_FAILURE);
995 }
996
997 simple_lock(&processor_list_lock);
998
999 if (*count < (processor_count + 1) * sizeof(struct _processor_statistics_np)) { /* One for RT */
1000 simple_unlock(&processor_list_lock);
1001 return (KERN_FAILURE);
1002 }
1003
1004 processor = processor_list;
1005 while (processor) {
1006 struct processor_sched_statistics * stats = &processor->processor_data.sched_stats;
1007
1008 out->ps_cpuid = processor->cpu_id;
1009 out->ps_csw_count = stats->csw_count;
1010 out->ps_preempt_count = stats->preempt_count;
1011 out->ps_preempted_rt_count = stats->preempted_rt_count;
1012 out->ps_preempted_by_rt_count = stats->preempted_by_rt_count;
1013 out->ps_rt_sched_count = stats->rt_sched_count;
1014 out->ps_interrupt_count = stats->interrupt_count;
1015 out->ps_ipi_count = stats->ipi_count;
1016 out->ps_timer_pop_count = stats->timer_pop_count;
1017 out->ps_runq_count_sum = SCHED(processor_runq_stats_count_sum)(processor);
1018 out->ps_idle_transitions = stats->idle_transitions;
1019 out->ps_quantum_timer_expirations = stats->quantum_timer_expirations;
1020
1021 out++;
1022 processor = processor->processor_list;
1023 }
1024
1025 *count = (uint32_t)(processor_count * sizeof(struct _processor_statistics_np));
1026
1027 simple_unlock(&processor_list_lock);
1028
1029 /* And include RT Queue information */
1030 bzero(out, sizeof(*out));
1031 out->ps_cpuid = (-1);
1032 out->ps_runq_count_sum = SCHED(rt_runq_count_sum)();
1033 out++;
1034 *count += (uint32_t)sizeof(struct _processor_statistics_np);
1035
1036 return (KERN_SUCCESS);
1037}
1038
1039kern_return_t
1040host_page_size(host_t host, vm_size_t * out_page_size)
1041{
1042 if (host == HOST_NULL)
1043 return (KERN_INVALID_ARGUMENT);
1044
1045 *out_page_size = PAGE_SIZE;
1046
1047 return (KERN_SUCCESS);
1048}
1049
1050/*
1051 * Return kernel version string (more than you ever
1052 * wanted to know about what version of the kernel this is).
1053 */
1054extern char version[];
1055
1056kern_return_t
1057host_kernel_version(host_t host, kernel_version_t out_version)
1058{
1059 if (host == HOST_NULL)
1060 return (KERN_INVALID_ARGUMENT);
1061
1062 (void)strncpy(out_version, version, sizeof(kernel_version_t));
1063
1064 return (KERN_SUCCESS);
1065}
1066
1067/*
1068 * host_processor_sets:
1069 *
1070 * List all processor sets on the host.
1071 */
1072kern_return_t
1073host_processor_sets(host_priv_t host_priv, processor_set_name_array_t * pset_list, mach_msg_type_number_t * count)
1074{
1075 void * addr;
1076
1077 if (host_priv == HOST_PRIV_NULL)
1078 return (KERN_INVALID_ARGUMENT);
1079
1080 /*
1081 * Allocate memory. Can be pageable because it won't be
1082 * touched while holding a lock.
1083 */
1084
1085 addr = kalloc((vm_size_t)sizeof(mach_port_t));
1086 if (addr == 0)
1087 return (KERN_RESOURCE_SHORTAGE);
1088
1089 /* do the conversion that Mig should handle */
1090 *((ipc_port_t *)addr) = convert_pset_name_to_port(&pset0);
1091
1092 *pset_list = (processor_set_array_t)addr;
1093 *count = 1;
1094
1095 return (KERN_SUCCESS);
1096}
1097
1098/*
1099 * host_processor_set_priv:
1100 *
1101 * Return control port for given processor set.
1102 */
1103kern_return_t
1104host_processor_set_priv(host_priv_t host_priv, processor_set_t pset_name, processor_set_t * pset)
1105{
1106 if (host_priv == HOST_PRIV_NULL || pset_name == PROCESSOR_SET_NULL) {
1107 *pset = PROCESSOR_SET_NULL;
1108
1109 return (KERN_INVALID_ARGUMENT);
1110 }
1111
1112 *pset = pset_name;
1113
1114 return (KERN_SUCCESS);
1115}
1116
1117/*
1118 * host_processor_info
1119 *
1120 * Return info about the processors on this host. It will return
1121 * the number of processors, and the specific type of info requested
1122 * in an OOL array.
1123 */
1124kern_return_t
1125host_processor_info(host_t host,
1126 processor_flavor_t flavor,
1127 natural_t * out_pcount,
1128 processor_info_array_t * out_array,
1129 mach_msg_type_number_t * out_array_count)
1130{
1131 kern_return_t result;
1132 processor_t processor;
1133 host_t thost;
1134 processor_info_t info;
1135 unsigned int icount, tcount;
1136 unsigned int pcount, i;
1137 vm_offset_t addr;
1138 vm_size_t size, needed;
1139 vm_map_copy_t copy;
1140
1141 if (host == HOST_NULL)
1142 return (KERN_INVALID_ARGUMENT);
1143
1144 result = processor_info_count(flavor, &icount);
1145 if (result != KERN_SUCCESS)
1146 return (result);
1147
1148 pcount = processor_count;
1149 assert(pcount != 0);
1150
1151 needed = pcount * icount * sizeof(natural_t);
1152 size = vm_map_round_page(needed, VM_MAP_PAGE_MASK(ipc_kernel_map));
1153 result = kmem_alloc(ipc_kernel_map, &addr, size, VM_KERN_MEMORY_IPC);
1154 if (result != KERN_SUCCESS)
1155 return (KERN_RESOURCE_SHORTAGE);
1156
1157 info = (processor_info_t)addr;
1158 processor = processor_list;
1159 tcount = icount;
1160
1161 result = processor_info(processor, flavor, &thost, info, &tcount);
1162 if (result != KERN_SUCCESS) {
1163 kmem_free(ipc_kernel_map, addr, size);
1164 return (result);
1165 }
1166
1167 if (pcount > 1) {
1168 for (i = 1; i < pcount; i++) {
1169 simple_lock(&processor_list_lock);
1170 processor = processor->processor_list;
1171 simple_unlock(&processor_list_lock);
1172
1173 info += icount;
1174 tcount = icount;
1175 result = processor_info(processor, flavor, &thost, info, &tcount);
1176 if (result != KERN_SUCCESS) {
1177 kmem_free(ipc_kernel_map, addr, size);
1178 return (result);
1179 }
1180 }
1181 }
1182
1183 if (size != needed)
1184 bzero((char *)addr + needed, size - needed);
1185
1186 result = vm_map_unwire(ipc_kernel_map, vm_map_trunc_page(addr, VM_MAP_PAGE_MASK(ipc_kernel_map)),
1187 vm_map_round_page(addr + size, VM_MAP_PAGE_MASK(ipc_kernel_map)), FALSE);
1188 assert(result == KERN_SUCCESS);
1189 result = vm_map_copyin(ipc_kernel_map, (vm_map_address_t)addr, (vm_map_size_t)needed, TRUE, &copy);
1190 assert(result == KERN_SUCCESS);
1191
1192 *out_pcount = pcount;
1193 *out_array = (processor_info_array_t)copy;
1194 *out_array_count = pcount * icount;
1195
1196 return (KERN_SUCCESS);
1197}
1198
1199static bool
1200is_valid_host_special_port(int id)
1201{
1202 return (id <= HOST_MAX_SPECIAL_PORT) &&
1203 (id >= HOST_MIN_SPECIAL_PORT) &&
1204 ((id <= HOST_LAST_SPECIAL_KERNEL_PORT) || (id > HOST_MAX_SPECIAL_KERNEL_PORT));
1205}
1206
1207/*
1208 * Kernel interface for setting a special port.
1209 */
1210kern_return_t
1211kernel_set_special_port(host_priv_t host_priv, int id, ipc_port_t port)
1212{
1213 ipc_port_t old_port;
1214
1215 if (!is_valid_host_special_port(id))
1216 panic("attempted to set invalid special port %d", id);
1217
1218#if !MACH_FLIPC
1219 if (id == HOST_NODE_PORT)
1220 return (KERN_NOT_SUPPORTED);
1221#endif
1222
1223 host_lock(host_priv);
1224 old_port = host_priv->special[id];
1225 host_priv->special[id] = port;
1226 host_unlock(host_priv);
1227
1228#if MACH_FLIPC
1229 if (id == HOST_NODE_PORT)
1230 mach_node_port_changed();
1231#endif
1232
1233 if (IP_VALID(old_port))
1234 ipc_port_release_send(old_port);
1235 return (KERN_SUCCESS);
1236}
1237
1238/*
1239 * Kernel interface for retrieving a special port.
1240 */
1241kern_return_t
1242kernel_get_special_port(host_priv_t host_priv, int id, ipc_port_t * portp)
1243{
1244 if (!is_valid_host_special_port(id))
1245 panic("attempted to get invalid special port %d", id);
1246
1247 host_lock(host_priv);
1248 *portp = host_priv->special[id];
1249 host_unlock(host_priv);
1250 return (KERN_SUCCESS);
1251}
1252
1253/*
1254 * User interface for setting a special port.
1255 *
1256 * Only permits the user to set a user-owned special port
1257 * ID, rejecting a kernel-owned special port ID.
1258 *
1259 * A special kernel port cannot be set up using this
1260 * routine; use kernel_set_special_port() instead.
1261 */
1262kern_return_t
1263host_set_special_port(host_priv_t host_priv, int id, ipc_port_t port)
1264{
1265 if (host_priv == HOST_PRIV_NULL || id <= HOST_MAX_SPECIAL_KERNEL_PORT || id > HOST_MAX_SPECIAL_PORT)
1266 return (KERN_INVALID_ARGUMENT);
1267
1268#if CONFIG_MACF
1269 if (mac_task_check_set_host_special_port(current_task(), id, port) != 0)
1270 return (KERN_NO_ACCESS);
1271#endif
1272
1273 return (kernel_set_special_port(host_priv, id, port));
1274}
1275
1276/*
1277 * User interface for retrieving a special port.
1278 *
1279 * Note that there is nothing to prevent a user special
1280 * port from disappearing after it has been discovered by
1281 * the caller; thus, using a special port can always result
1282 * in a "port not valid" error.
1283 */
1284
1285kern_return_t
1286host_get_special_port(host_priv_t host_priv, __unused int node, int id, ipc_port_t * portp)
1287{
1288 ipc_port_t port;
1289
1290 if (host_priv == HOST_PRIV_NULL || id == HOST_SECURITY_PORT || id > HOST_MAX_SPECIAL_PORT || id < HOST_MIN_SPECIAL_PORT)
1291 return (KERN_INVALID_ARGUMENT);
1292
1293 host_lock(host_priv);
1294 port = realhost.special[id];
1295 *portp = ipc_port_copy_send(port);
1296 host_unlock(host_priv);
1297
1298 return (KERN_SUCCESS);
1299}
1300
1301/*
1302 * host_get_io_master
1303 *
1304 * Return the IO master access port for this host.
1305 */
1306kern_return_t
1307host_get_io_master(host_t host, io_master_t * io_masterp)
1308{
1309 if (host == HOST_NULL)
1310 return (KERN_INVALID_ARGUMENT);
1311
1312 return (host_get_io_master_port(host_priv_self(), io_masterp));
1313}
1314
1315host_t
1316host_self(void)
1317{
1318 return (&realhost);
1319}
1320
1321host_priv_t
1322host_priv_self(void)
1323{
1324 return (&realhost);
1325}
1326
1327host_security_t
1328host_security_self(void)
1329{
1330 return (&realhost);
1331}
1332
1333kern_return_t
1334host_set_atm_diagnostic_flag(host_priv_t host_priv, uint32_t diagnostic_flag)
1335{
1336 if (host_priv == HOST_PRIV_NULL)
1337 return (KERN_INVALID_ARGUMENT);
1338
1339 assert(host_priv == &realhost);
1340
1341#if CONFIG_ATM
1342 return (atm_set_diagnostic_config(diagnostic_flag));
1343#else
1344 (void)diagnostic_flag;
1345 return (KERN_NOT_SUPPORTED);
1346#endif
1347}
1348
1349kern_return_t
1350host_set_multiuser_config_flags(host_priv_t host_priv, uint32_t multiuser_config)
1351{
1352#if CONFIG_EMBEDDED
1353 if (host_priv == HOST_PRIV_NULL)
1354 return (KERN_INVALID_ARGUMENT);
1355
1356 assert(host_priv == &realhost);
1357
1358 /*
1359 * Always enforce that the multiuser bit is set
1360 * if a value is written to the commpage word.
1361 */
1362 commpage_update_multiuser_config(multiuser_config | kIsMultiUserDevice);
1363 return (KERN_SUCCESS);
1364#else
1365 (void)host_priv;
1366 (void)multiuser_config;
1367 return (KERN_NOT_SUPPORTED);
1368#endif
1369}
1370