1/*
2 * Copyright (c) 2000-2019 Apple Inc. All rights reserved.
3 *
4 * @APPLE_OSREFERENCE_LICENSE_HEADER_START@
5 *
6 * This file contains Original Code and/or Modifications of Original Code
7 * as defined in and that are subject to the Apple Public Source License
8 * Version 2.0 (the 'License'). You may not use this file except in
9 * compliance with the License. The rights granted to you under the License
10 * may not be used to create, or enable the creation or redistribution of,
11 * unlawful or unlicensed copies of an Apple operating system, or to
12 * circumvent, violate, or enable the circumvention or violation of, any
13 * terms of an Apple operating system software license agreement.
14 *
15 * Please obtain a copy of the License at
16 * http://www.opensource.apple.com/apsl/ and read it before using this file.
17 *
18 * The Original Code and all software distributed under the License are
19 * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER
20 * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES,
21 * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY,
22 * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT.
23 * Please see the License for the specific language governing rights and
24 * limitations under the License.
25 *
26 * @APPLE_OSREFERENCE_LICENSE_HEADER_END@
27 */
28/*
29 * @OSF_FREE_COPYRIGHT@
30 */
31/*
32 * Mach Operating System
33 * Copyright (c) 1991,1990,1989,1988 Carnegie Mellon University
34 * All Rights Reserved.
35 *
36 * Permission to use, copy, modify and distribute this software and its
37 * documentation is hereby granted, provided that both the copyright
38 * notice and this permission notice appear in all copies of the
39 * software, derivative works or modified versions, and any portions
40 * thereof, and that both notices appear in supporting documentation.
41 *
42 * CARNEGIE MELLON ALLOWS FREE USE OF THIS SOFTWARE IN ITS "AS IS"
43 * CONDITION. CARNEGIE MELLON DISCLAIMS ANY LIABILITY OF ANY KIND FOR
44 * ANY DAMAGES WHATSOEVER RESULTING FROM THE USE OF THIS SOFTWARE.
45 *
46 * Carnegie Mellon requests users of this software to return to
47 *
48 * Software Distribution Coordinator or Software.Distribution@CS.CMU.EDU
49 * School of Computer Science
50 * Carnegie Mellon University
51 * Pittsburgh PA 15213-3890
52 *
53 * any improvements or extensions that they make and grant Carnegie Mellon
54 * the rights to redistribute these changes.
55 */
56/*
57 */
58/*
59 * File: task.h
60 * Author: Avadis Tevanian, Jr.
61 *
62 * This file contains the structure definitions for tasks.
63 *
64 */
65/*
66 * Copyright (c) 1993 The University of Utah and
67 * the Computer Systems Laboratory (CSL). All rights reserved.
68 *
69 * Permission to use, copy, modify and distribute this software and its
70 * documentation is hereby granted, provided that both the copyright
71 * notice and this permission notice appear in all copies of the
72 * software, derivative works or modified versions, and any portions
73 * thereof, and that both notices appear in supporting documentation.
74 *
75 * THE UNIVERSITY OF UTAH AND CSL ALLOW FREE USE OF THIS SOFTWARE IN ITS "AS
76 * IS" CONDITION. THE UNIVERSITY OF UTAH AND CSL DISCLAIM ANY LIABILITY OF
77 * ANY KIND FOR ANY DAMAGES WHATSOEVER RESULTING FROM THE USE OF THIS SOFTWARE.
78 *
79 * CSL requests users of this software to return to csl-dist@cs.utah.edu any
80 * improvements that they make and grant CSL redistribution rights.
81 *
82 */
83/*
84 * NOTICE: This file was modified by McAfee Research in 2004 to introduce
85 * support for mandatory and extensible security protections. This notice
86 * is included in support of clause 2.2 (b) of the Apple Public License,
87 * Version 2.0.
88 * Copyright (c) 2005 SPARTA, Inc.
89 */
90
91#ifndef _KERN_TASK_H_
92#define _KERN_TASK_H_
93
94#include <kern/kern_types.h>
95#include <kern/task_ref.h>
96#include <mach/mach_types.h>
97#include <sys/cdefs.h>
98
99#ifdef XNU_KERNEL_PRIVATE
100#include <kern/btlog.h>
101#include <kern/kern_cdata.h>
102#include <mach/sfi_class.h>
103#include <kern/counter.h>
104#include <kern/cs_blobs.h>
105#include <kern/queue.h>
106#include <kern/recount.h>
107#include <sys/kern_sysctl.h>
108#if CONFIG_EXCLAVES
109#include <mach/exclaves.h>
110#endif /* CONFIG_EXCLAVES */
111#endif /* XNU_KERNEL_PRIVATE */
112
113#ifdef MACH_KERNEL_PRIVATE
114#include <mach/boolean.h>
115#include <mach/port.h>
116#include <mach/time_value.h>
117#include <mach/message.h>
118#include <mach/mach_param.h>
119#include <mach/task_info.h>
120#include <mach/exception_types.h>
121#include <mach/vm_statistics.h>
122#include <machine/task.h>
123
124#include <kern/cpu_data.h>
125#include <kern/queue.h>
126#include <kern/exception.h>
127#include <kern/locks.h>
128#include <security/_label.h>
129#include <ipc/ipc_port.h>
130
131#include <kern/thread.h>
132#include <mach/coalition.h>
133#include <stdatomic.h>
134#include <os/refcnt.h>
135
136#if CONFIG_DEFERRED_RECLAIM
137typedef struct vm_deferred_reclamation_metadata_s *vm_deferred_reclamation_metadata_t;
138#endif /* CONFIG_DEFFERED_RECLAIM */
139
140struct _cpu_time_qos_stats {
141 uint64_t cpu_time_qos_default;
142 uint64_t cpu_time_qos_maintenance;
143 uint64_t cpu_time_qos_background;
144 uint64_t cpu_time_qos_utility;
145 uint64_t cpu_time_qos_legacy;
146 uint64_t cpu_time_qos_user_initiated;
147 uint64_t cpu_time_qos_user_interactive;
148};
149
150struct task_writes_counters {
151 uint64_t task_immediate_writes;
152 uint64_t task_deferred_writes;
153 uint64_t task_invalidated_writes;
154 uint64_t task_metadata_writes;
155};
156
157struct task_watchports;
158#include <bank/bank_internal.h>
159
160#ifdef MACH_BSD
161struct proc;
162struct proc_ro;
163#endif
164
165struct task {
166 /* Synchronization/destruction information */
167 decl_lck_mtx_data(, lock); /* Task's lock */
168 os_refcnt_t ref_count; /* Number of references to me */
169
170#if DEVELOPMENT || DEBUG
171 struct os_refgrp *ref_group;
172 lck_spin_t ref_group_lock;
173#endif /* DEVELOPMENT || DEBUG */
174
175 bool active; /* Task has not been terminated */
176 bool ipc_active; /* IPC with the task ports is allowed */
177 bool halting; /* Task is being halted */
178 bool message_app_suspended; /* Let iokit know when pidsuspended */
179
180 /* Virtual timers */
181 uint32_t vtimers;
182 uint32_t loadTag; /* dext ID used for logging identity */
183
184 /* Globally uniqueid to identify tasks and corpses */
185 uint64_t task_uniqueid;
186
187 /* Miscellaneous */
188 vm_map_t XNU_PTRAUTH_SIGNED_PTR("task.map") map; /* Address space description */
189 queue_chain_t tasks; /* global list of tasks */
190 struct task_watchports *watchports; /* watchports passed in spawn */
191 turnstile_inheritor_t returnwait_inheritor; /* inheritor for task_wait */
192
193#if defined(CONFIG_SCHED_MULTIQ)
194 sched_group_t sched_group;
195#endif /* defined(CONFIG_SCHED_MULTIQ) */
196
197 /* Threads in this task */
198 queue_head_t threads;
199 struct restartable_ranges *t_rr_ranges;
200
201 processor_set_t pset_hint;
202 struct affinity_space *affinity_space;
203
204 int thread_count;
205 uint32_t active_thread_count;
206 int suspend_count; /* Internal scheduling only */
207#ifdef CONFIG_TASK_SUSPEND_STATS
208 struct task_suspend_stats_s t_suspend_stats; /* suspension statistics for this task */
209 task_suspend_source_array_t t_suspend_sources; /* array of suspender debug info for this task */
210#endif /* CONFIG_TASK_SUSPEND_STATS */
211
212 /* User-visible scheduling information */
213 integer_t user_stop_count; /* outstanding stops */
214 integer_t legacy_stop_count; /* outstanding legacy stops */
215
216 int16_t priority; /* base priority for threads */
217 int16_t max_priority; /* maximum priority for threads */
218
219 integer_t importance; /* priority offset (BSD 'nice' value) */
220
221#define task_is_immovable(task) \
222 !!(task_get_control_port_options(task) & TASK_CONTROL_PORT_IMMOVABLE)
223#define task_is_pinned(task) \
224 !!(task_get_control_port_options(task) & TASK_CONTROL_PORT_PINNED)
225
226 /* Statistics */
227 uint64_t total_runnable_time;
228
229 struct recount_task tk_recount;
230
231 /* IPC structures */
232 decl_lck_mtx_data(, itk_lock_data);
233 /*
234 * Different flavors of task port.
235 * These flavors TASK_FLAVOR_* are defined in mach_types.h
236 */
237 struct ipc_port * XNU_PTRAUTH_SIGNED_PTR("task.itk_task_ports") itk_task_ports[TASK_SELF_PORT_COUNT];
238 struct ipc_port * XNU_PTRAUTH_SIGNED_PTR("task.itk_settable_self") itk_settable_self; /* a send right */
239 struct ipc_port * XNU_PTRAUTH_SIGNED_PTR("task.itk_self") itk_self; /* immovable/pinned task port, does not hold right */
240 struct exception_action exc_actions[EXC_TYPES_COUNT];
241 /* a send right each valid element */
242 struct ipc_port * XNU_PTRAUTH_SIGNED_PTR("task.itk_host") itk_host; /* a send right */
243 struct ipc_port * XNU_PTRAUTH_SIGNED_PTR("task.itk_bootstrap") itk_bootstrap; /* a send right */
244 struct ipc_port * XNU_PTRAUTH_SIGNED_PTR("task.itk_debug_control") itk_debug_control; /* send right for debugmode communications */
245 struct ipc_port * XNU_PTRAUTH_SIGNED_PTR("task.itk_task_access") itk_task_access; /* and another send right */
246 struct ipc_port * XNU_PTRAUTH_SIGNED_PTR("task.itk_resume") itk_resume; /* a receive right to resume this task */
247 struct ipc_port * XNU_PTRAUTH_SIGNED_PTR("task.itk_registered") itk_registered[TASK_PORT_REGISTER_MAX];
248 /* all send rights */
249 ipc_port_t * XNU_PTRAUTH_SIGNED_PTR("task.itk_dyld_notify") itk_dyld_notify; /* lazy send rights array of size DYLD_MAX_PROCESS_INFO_NOTIFY_COUNT */
250#if CONFIG_PROC_RESOURCE_LIMITS
251 struct ipc_port * XNU_PTRAUTH_SIGNED_PTR("task.itk_resource_notify") itk_resource_notify; /* a send right to the resource notify port */
252#endif /* CONFIG_PROC_RESOURCE_LIMITS */
253 struct ipc_space * XNU_PTRAUTH_SIGNED_PTR("task.itk_space") itk_space;
254
255 ledger_t ledger;
256 /* Synchronizer ownership information */
257 queue_head_t semaphore_list; /* list of owned semaphores */
258 int semaphores_owned; /* number of semaphores owned */
259
260 unsigned int priv_flags; /* privilege resource flags */
261#define VM_BACKING_STORE_PRIV 0x1
262
263 MACHINE_TASK
264
265 counter_t faults; /* faults counter */
266 counter_t pageins; /* pageins counter */
267 counter_t cow_faults; /* copy on write fault counter */
268 counter_t messages_sent; /* messages sent counter */
269 counter_t messages_received; /* messages received counter */
270 uint32_t decompressions; /* decompression counter */
271 uint32_t syscalls_mach; /* mach system call counter */
272 uint32_t syscalls_unix; /* unix system call counter */
273 uint32_t c_switch; /* total context switches */
274 uint32_t p_switch; /* total processor switches */
275 uint32_t ps_switch; /* total pset switches */
276
277#ifdef MACH_BSD
278 struct proc_ro * bsd_info_ro;
279#endif
280 kcdata_descriptor_t corpse_info;
281 uint64_t crashed_thread_id;
282 queue_chain_t corpse_tasks;
283#ifdef CONFIG_MACF
284 struct label * crash_label;
285#endif
286 volatile uint32_t t_flags; /* general-purpose task flags protected by task_lock (TL) */
287#define TF_NONE 0
288#define TF_64B_ADDR 0x00000001 /* task has 64-bit addressing */
289#define TF_64B_DATA 0x00000002 /* task has 64-bit data registers */
290#define TF_CPUMON_WARNING 0x00000004 /* task has at least one thread in CPU usage warning zone */
291#define TF_WAKEMON_WARNING 0x00000008 /* task is in wakeups monitor warning zone */
292#define TF_TELEMETRY (TF_CPUMON_WARNING | TF_WAKEMON_WARNING) /* task is a telemetry participant */
293#define TF_GPU_DENIED 0x00000010 /* task is not allowed to access the GPU */
294#define TF_PENDING_CORPSE 0x00000040 /* task corpse has not been reported yet */
295#define TF_CORPSE_FORK 0x00000080 /* task is a forked corpse */
296#define TF_CA_CLIENT_WI 0x00000800 /* task has CA_CLIENT work interval */
297#define TF_DARKWAKE_MODE 0x00001000 /* task is in darkwake mode */
298#define TF_NO_SMT 0x00002000 /* task threads must not be paired with SMT threads */
299#define TF_SYS_VERSION_COMPAT 0x00008000 /* shim task accesses to OS version data (macOS - app compatibility) */
300#define TF_TECS 0x00020000 /* task threads must enable CPU security */
301#if defined(__x86_64__)
302#define TF_INSN_COPY_OPTOUT 0x00040000 /* task threads opt out of unhandled-fault instruction stream collection */
303#endif
304#define TF_COALITION_MEMBER 0x00080000 /* task is a member of a coalition */
305#define TF_NO_CORPSE_FORKING 0x00100000 /* do not fork a corpse for this task */
306#define TF_USE_PSET_HINT_CLUSTER_TYPE 0x00200000 /* bind task to task->pset_hint->pset_cluster_type */
307#define TF_DYLD_ALL_IMAGE_FINAL 0x00400000 /* all_image_info_addr can no longer be changed */
308#define TF_HASPROC 0x00800000 /* task points to a proc */
309#define TF_HAS_REPLY_PORT_TELEMETRY 0x10000000 /* Rate limit telemetry for reply port security semantics violations rdar://100244531 */
310#define TF_HAS_EXCEPTION_TELEMETRY 0x20000000 /* Rate limit telemetry for exception identity violations rdar://100729339 */
311#define TF_GAME_MODE 0x40000000 /* Set the game mode bit for CLPC */
312
313/*
314 * WARNING: These TF_ and TFRO_ flags are NOT automatically inherited by a child of fork
315 * If you believe something should be inherited, you must manually inherit the flags in `task_create_internal`
316 */
317
318/*
319 * RO-protected flags:
320 */
321#define TFRO_CORPSE 0x00000020 /* task is a corpse */
322#define TFRO_HARDENED 0x00000100 /* task is a hardened runtime binary */
323#if XNU_TARGET_OS_OSX
324#define TFRO_MACH_HARDENING_OPT_OUT 0x00000200 /* task might load third party plugins on macOS and should be opted out of mach hardening */
325#endif /* XNU_TARGET_OS_OSX */
326#define TFRO_PLATFORM 0x00000400 /* task is a platform binary */
327#define TFRO_FILTER_MSG 0x00004000 /* task calls into message filter callback before sending a message */
328#define TFRO_PAC_EXC_FATAL 0x00010000 /* task is marked a corpse if a PAC exception occurs */
329#define TFRO_JIT_EXC_FATAL 0x00020000 /* kill the task on access violations from privileged JIT code */
330#define TFRO_PAC_ENFORCE_USER_STATE 0x01000000 /* Enforce user and kernel signed thread state */
331#if CONFIG_EXCLAVES
332#define TFRO_HAS_KD_ACCESS 0x02000000 /* Access to the kernel exclave resource domain */
333#endif /* CONFIG_EXCLAVES */
334
335/*
336 * Task is running within a 64-bit address space.
337 */
338#define task_has_64Bit_addr(task) \
339 (((task)->t_flags & TF_64B_ADDR) != 0)
340#define task_set_64Bit_addr(task) \
341 ((task)->t_flags |= TF_64B_ADDR)
342#define task_clear_64Bit_addr(task) \
343 ((task)->t_flags &= ~TF_64B_ADDR)
344
345/*
346 * Task is using 64-bit machine state.
347 */
348#define task_has_64Bit_data(task) \
349 (((task)->t_flags & TF_64B_DATA) != 0)
350#define task_set_64Bit_data(task) \
351 ((task)->t_flags |= TF_64B_DATA)
352#define task_clear_64Bit_data(task) \
353 ((task)->t_flags &= ~TF_64B_DATA)
354
355#define task_corpse_pending_report(task) \
356 (((task)->t_flags & TF_PENDING_CORPSE) != 0)
357
358#define task_set_corpse_pending_report(task) \
359 ((task)->t_flags |= TF_PENDING_CORPSE)
360
361#define task_clear_corpse_pending_report(task) \
362 ((task)->t_flags &= ~TF_PENDING_CORPSE)
363
364#define task_is_a_corpse_fork(task) \
365 (((task)->t_flags & TF_CORPSE_FORK) != 0)
366
367#define task_set_coalition_member(task) \
368 ((task)->t_flags |= TF_COALITION_MEMBER)
369
370#define task_clear_coalition_member(task) \
371 ((task)->t_flags &= ~TF_COALITION_MEMBER)
372
373#define task_is_coalition_member(task) \
374 (((task)->t_flags & TF_COALITION_MEMBER) != 0)
375
376#define task_has_proc(task) \
377 (((task)->t_flags & TF_HASPROC) != 0)
378
379#define task_set_has_proc(task) \
380 ((task)->t_flags |= TF_HASPROC)
381
382#define task_clear_has_proc(task) \
383 ((task)->t_flags &= ~TF_HASPROC)
384
385#define task_has_reply_port_telemetry(task) \
386 (((task)->t_flags & TF_HAS_REPLY_PORT_TELEMETRY) != 0)
387
388#define task_set_reply_port_telemetry(task) \
389 ((task)->t_flags |= TF_HAS_REPLY_PORT_TELEMETRY)
390
391#define task_has_exception_telemetry(task) \
392 (((task)->t_flags & TF_HAS_EXCEPTION_TELEMETRY) != 0)
393
394#define task_set_exception_telemetry(task) \
395 ((task)->t_flags |= TF_HAS_EXCEPTION_TELEMETRY)
396
397 uint32_t t_procflags; /* general-purpose task flags protected by proc_lock (PL) */
398#define TPF_NONE 0
399#define TPF_DID_EXEC 0x00000001 /* task has been execed to a new task */
400#define TPF_EXEC_COPY 0x00000002 /* task is the new copy of an exec */
401
402#define task_did_exec_internal(task) \
403 (((task)->t_procflags & TPF_DID_EXEC) != 0)
404
405#define task_is_exec_copy_internal(task) \
406 (((task)->t_procflags & TPF_EXEC_COPY) != 0)
407
408 mach_vm_address_t all_image_info_addr; /* dyld __all_image_info */
409 mach_vm_size_t all_image_info_size; /* section location and size */
410
411#if CONFIG_CPU_COUNTERS
412#define TASK_KPC_FORCED_ALL_CTRS 0x2 /* Bit in "t_kpc" signifying this task forced all counters */
413 uint32_t t_kpc; /* kpc flags */
414#endif /* CONFIG_CPU_COUNTERS */
415
416 bool pidsuspended; /* pid_suspend called; no threads can execute */
417 bool frozen; /* frozen; private resident pages committed to swap */
418 bool changing_freeze_state; /* in the process of freezing or thawing */
419 bool is_large_corpse;
420 uint16_t policy_ru_cpu :4,
421 policy_ru_cpu_ext :4,
422 applied_ru_cpu :4,
423 applied_ru_cpu_ext :4;
424 uint8_t rusage_cpu_flags;
425 uint8_t rusage_cpu_percentage; /* Task-wide CPU limit percentage */
426 uint8_t rusage_cpu_perthr_percentage; /* Per-thread CPU limit percentage */
427#if MACH_ASSERT
428 int8_t suspends_outstanding; /* suspends this task performed in excess of resumes */
429#endif
430 uint8_t t_returnwaitflags;
431#define TWF_NONE 0
432#define TRW_LRETURNWAIT 0x01 /* task is waiting for fork/posix_spawn/exec to complete */
433#define TRW_LRETURNWAITER 0x02 /* task is waiting for TRW_LRETURNWAIT to get cleared */
434#define TRW_LEXEC_COMPLETE 0x04 /* thread should call exec complete */
435
436#if CONFIG_EXCLAVES
437 uint8_t t_exclave_state;
438#define TES_NONE 0
439#define TES_CONCLAVE_TAINTED 0x01 /* Task has talked to conclave, xnu has tainted the process */
440#define TES_CONCLAVE_UNTAINTABLE 0x02 /* Task can not be tainted by xnu when it talks to conclave */
441#endif /* CONFIG_EXCLAVES */
442
443#if __has_feature(ptrauth_calls)
444 bool shared_region_auth_remapped; /* authenticated sections ready for use */
445 char *shared_region_id; /* determines which ptr auth key to use */
446#endif /* __has_feature(ptrauth_calls) */
447 struct vm_shared_region *shared_region;
448
449 uint64_t rusage_cpu_interval; /* Task-wide CPU limit interval */
450 uint64_t rusage_cpu_perthr_interval; /* Per-thread CPU limit interval */
451 uint64_t rusage_cpu_deadline;
452 thread_call_t rusage_cpu_callt;
453#if CONFIG_TASKWATCH
454 queue_head_t task_watchers; /* app state watcher threads */
455 int num_taskwatchers;
456 int watchapplying;
457#endif /* CONFIG_TASKWATCH */
458
459 struct bank_task *bank_context; /* pointer to per task bank structure */
460
461#if IMPORTANCE_INHERITANCE
462 struct ipc_importance_task *task_imp_base; /* Base of IPC importance chain */
463#endif /* IMPORTANCE_INHERITANCE */
464
465 vm_extmod_statistics_data_t extmod_statistics;
466
467 struct task_requested_policy requested_policy;
468 struct task_effective_policy effective_policy;
469
470 /*
471 * Can be merged with imp_donor bits, once the IMPORTANCE_INHERITANCE macro goes away.
472 */
473 uint32_t low_mem_notified_warn :1, /* warning low memory notification is sent to the task */
474 low_mem_notified_critical :1, /* critical low memory notification is sent to the task */
475 purged_memory_warn :1, /* purgeable memory of the task is purged for warning level pressure */
476 purged_memory_critical :1, /* purgeable memory of the task is purged for critical level pressure */
477 low_mem_privileged_listener :1, /* if set, task would like to know about pressure changes before other tasks on the system */
478 mem_notify_reserved :27; /* reserved for future use */
479
480 uint32_t memlimit_is_active :1, /* if set, use active attributes, otherwise use inactive attributes */
481 memlimit_is_fatal :1, /* if set, exceeding current memlimit will prove fatal to the task */
482 memlimit_active_exc_resource :1, /* if set, suppress exc_resource exception when task exceeds active memory limit */
483 memlimit_inactive_exc_resource :1, /* if set, suppress exc_resource exception when task exceeds inactive memory limit */
484 memlimit_attrs_reserved :28; /* reserved for future use */
485
486 io_stat_info_t task_io_stats;
487
488 struct task_writes_counters task_writes_counters_internal;
489 struct task_writes_counters task_writes_counters_external;
490
491 /*
492 * The cpu_time_qos_stats fields are protected by the task lock
493 */
494 struct _cpu_time_qos_stats cpu_time_eqos_stats;
495 struct _cpu_time_qos_stats cpu_time_rqos_stats;
496
497 /* Statistics accumulated for terminated threads from this task */
498 uint32_t task_timer_wakeups_bin_1;
499 uint32_t task_timer_wakeups_bin_2;
500 uint64_t task_gpu_ns;
501
502 uint8_t task_can_transfer_memory_ownership;
503#if DEVELOPMENT || DEBUG
504 uint8_t task_no_footprint_for_debug;
505#endif
506 uint8_t task_objects_disowning;
507 uint8_t task_objects_disowned;
508 /* # of purgeable volatile VM objects owned by this task: */
509 int task_volatile_objects;
510 /* # of purgeable but not volatile VM objects owned by this task: */
511 int task_nonvolatile_objects;
512 int task_owned_objects;
513 queue_head_t task_objq;
514 decl_lck_mtx_data(, task_objq_lock); /* protects "task_objq" */
515
516 unsigned int task_thread_limit:16;
517#if __arm64__
518 unsigned int task_legacy_footprint:1;
519 unsigned int task_extra_footprint_limit:1;
520 unsigned int task_ios13extended_footprint_limit:1;
521#endif /* __arm64__ */
522 unsigned int task_region_footprint:1;
523 unsigned int task_has_crossed_thread_limit:1;
524 unsigned int task_rr_in_flight:1; /* a t_rr_synchronzie() is in flight */
525 /*
526 * A task's coalition set is "adopted" in task_create_internal
527 * and unset in task_deallocate_internal, so each array member
528 * can be referenced without the task lock.
529 * Note: these fields are protected by coalition->lock,
530 * not the task lock.
531 */
532 coalition_t coalition[COALITION_NUM_TYPES];
533 queue_chain_t task_coalition[COALITION_NUM_TYPES];
534 uint64_t dispatchqueue_offset;
535
536#if DEVELOPMENT || DEBUG
537 boolean_t task_unnested;
538 int task_disconnected_count;
539#endif
540
541#if HYPERVISOR
542 void * XNU_PTRAUTH_SIGNED_PTR("task.hv_task_target") hv_task_target; /* hypervisor virtual machine object associated with this task */
543#endif /* HYPERVISOR */
544
545#if CONFIG_SECLUDED_MEMORY
546 uint8_t task_can_use_secluded_mem;
547 uint8_t task_could_use_secluded_mem;
548 uint8_t task_could_also_use_secluded_mem;
549 uint8_t task_suppressed_secluded;
550#endif /* CONFIG_SECLUDED_MEMORY */
551
552 task_exc_guard_behavior_t task_exc_guard;
553 mach_vm_address_t mach_header_vm_address;
554
555 queue_head_t io_user_clients;
556
557#if CONFIG_FREEZE
558 queue_head_t task_frozen_cseg_q; /* queue of csegs frozen to NAND */
559#endif /* CONFIG_FREEZE */
560 boolean_t donates_own_pages; /* pages land on the special Q (only swappable pages on iPadOS, early swap on macOS) */
561 uint32_t task_shared_region_slide; /* cached here to avoid locking during telemetry */
562#if CONFIG_PHYS_WRITE_ACCT
563 uint64_t task_fs_metadata_writes;
564#endif /* CONFIG_PHYS_WRITE_ACCT */
565 uuid_t task_shared_region_uuid;
566#if CONFIG_MEMORYSTATUS
567 uint64_t memstat_dirty_start; /* last abstime transition into the dirty band or last call to task_ledger_settle_dirty_time while dirty */
568#endif /* CONFIG_MEMORYSTATUS */
569 vmobject_list_output_t corpse_vmobject_list;
570 uint64_t corpse_vmobject_list_size;
571#if CONFIG_DEFERRED_RECLAIM
572 vm_deferred_reclamation_metadata_t deferred_reclamation_metadata; /* Protected by the task lock */
573#endif /* CONFIG_DEFERRED_RECLAIM */
574
575#if CONFIG_EXCLAVES
576 void * XNU_PTRAUTH_SIGNED_PTR("task.conclave") conclave;
577 void * XNU_PTRAUTH_SIGNED_PTR("task.exclave_crash_info") exclave_crash_info;
578 uint32_t exclave_crash_info_length;
579#endif /* CONFIG_EXCLAVES */
580};
581
582ZONE_DECLARE_ID(ZONE_ID_PROC_TASK, void *);
583extern zone_t proc_task_zone;
584
585extern task_control_port_options_t task_get_control_port_options(task_t task);
586extern void task_set_control_port_options(task_t task, task_control_port_options_t opts);
587
588/*
589 * EXC_GUARD default delivery behavior for optional Mach port and VM guards.
590 * Applied to new tasks at creation time.
591 */
592extern task_exc_guard_behavior_t task_exc_guard_default;
593extern size_t proc_and_task_size;
594extern void *get_bsdtask_info(task_t t);
595extern void *task_get_proc_raw(task_t task);
596static inline void
597task_require(struct task *task)
598{
599 zone_id_require(zone_id: ZONE_ID_PROC_TASK, elem_size: proc_and_task_size, addr: task_get_proc_raw(task));
600}
601
602/*
603 * task_lock() and task_unlock() need to be callable from the `bsd/` tree of
604 * XNU and are therefore promoted to full functions instead of macros so that
605 * they can be linked against.
606 *
607 * We provide `extern` declarations here for consumers of `task.h` in `osfmk/`,
608 * then separately provide `inline` definitions in `task.c`. Together with the
609 * `BUILD_LTO=1` build argument, this guarantees these functions are always
610 * inlined regardless of whether called from the `osfmk/` tree or `bsd/` tree.
611 */
612extern void task_lock(task_t);
613extern void task_unlock(task_t);
614
615#define task_lock_assert_owned(task) LCK_MTX_ASSERT(&(task)->lock, LCK_MTX_ASSERT_OWNED)
616#define task_lock_try(task) lck_mtx_try_lock(&(task)->lock)
617
618#define task_objq_lock_init(task) lck_mtx_init(&(task)->task_objq_lock, &vm_object_lck_grp, &vm_object_lck_attr)
619#define task_objq_lock_destroy(task) lck_mtx_destroy(&(task)->task_objq_lock, &vm_object_lck_grp)
620#define task_objq_lock(task) lck_mtx_lock(&(task)->task_objq_lock)
621#define task_objq_lock_assert_owned(task) LCK_MTX_ASSERT(&(task)->task_objq_lock, LCK_MTX_ASSERT_OWNED)
622#define task_objq_lock_try(task) lck_mtx_try_lock(&(task)->task_objq_lock)
623#define task_objq_unlock(task) lck_mtx_unlock(&(task)->task_objq_lock)
624
625#define itk_lock_init(task) lck_mtx_init(&(task)->itk_lock_data, &ipc_lck_grp, &ipc_lck_attr)
626#define itk_lock_destroy(task) lck_mtx_destroy(&(task)->itk_lock_data, &ipc_lck_grp)
627#define itk_lock(task) lck_mtx_lock(&(task)->itk_lock_data)
628#define itk_unlock(task) lck_mtx_unlock(&(task)->itk_lock_data)
629
630/* task clear return wait flags */
631#define TCRW_CLEAR_INITIAL_WAIT 0x1
632#define TCRW_CLEAR_FINAL_WAIT 0x2
633#define TCRW_CLEAR_EXEC_COMPLETE 0x4
634#define TCRW_CLEAR_ALL_WAIT (TCRW_CLEAR_INITIAL_WAIT | TCRW_CLEAR_FINAL_WAIT)
635
636/* Initialize task module */
637extern void task_init(void);
638
639/* coalition_init() calls this to initialize ledgers before task_init() */
640extern void init_task_ledgers(void);
641
642extern task_t current_task(void) __pure2;
643
644extern bool task_is_driver(task_t task);
645extern uint32_t task_ro_flags_get(task_t task);
646extern void task_ro_flags_set(task_t task, uint32_t flags);
647extern void task_ro_flags_clear(task_t task, uint32_t flags);
648
649extern lck_attr_t task_lck_attr;
650extern lck_grp_t task_lck_grp;
651
652struct task_watchport_elem {
653 task_t twe_task;
654 ipc_port_t twe_port; /* (Space lock) */
655 ipc_port_t XNU_PTRAUTH_SIGNED_PTR("twe_pdrequest") twe_pdrequest;
656};
657
658struct task_watchports {
659 os_refcnt_t tw_refcount; /* (Space lock) */
660 task_t tw_task; /* (Space lock) & tw_refcount == 0 */
661 thread_t tw_thread; /* (Space lock) & tw_refcount == 0 */
662 uint32_t tw_elem_array_count; /* (Space lock) */
663 struct task_watchport_elem tw_elem[]; /* (Space lock) & (Portlock) & (mq lock) */
664};
665
666#define task_watchports_retain(x) (os_ref_retain(&(x)->tw_refcount))
667#define task_watchports_release(x) (os_ref_release(&(x)->tw_refcount))
668
669#define task_watchport_elem_init(elem, task, port) \
670do { \
671 (elem)->twe_task = (task); \
672 (elem)->twe_port = (port); \
673 (elem)->twe_pdrequest = IP_NULL; \
674} while(0)
675
676#define task_watchport_elem_clear(elem) task_watchport_elem_init((elem), NULL, NULL)
677
678extern void
679task_add_turnstile_watchports(
680 task_t task,
681 thread_t thread,
682 ipc_port_t *portwatch_ports,
683 uint32_t portwatch_count);
684
685extern void
686task_watchport_elem_deallocate(
687 struct task_watchport_elem *watchport_elem);
688
689extern boolean_t
690task_has_watchports(task_t task);
691
692void
693task_dyld_process_info_update_helper(
694 task_t task,
695 size_t active_count,
696 vm_map_address_t magic_addr,
697 ipc_port_t *release_ports,
698 size_t release_count);
699
700extern kern_return_t
701task_suspend2_mig(
702 task_t task,
703 task_suspension_token_t *suspend_token);
704
705extern kern_return_t
706task_suspend2_external(
707 task_t task,
708 task_suspension_token_t *suspend_token);
709
710extern kern_return_t
711task_resume2_mig(
712 task_suspension_token_t suspend_token);
713
714extern kern_return_t
715task_resume2_external(
716 task_suspension_token_t suspend_token);
717
718extern void
719task_suspension_token_deallocate_grp(
720 task_suspension_token_t suspend_token,
721 task_grp_t grp);
722
723extern ipc_port_t
724convert_task_to_port_with_flavor(
725 task_t task,
726 mach_task_flavor_t flavor,
727 task_grp_t grp);
728
729extern task_t current_task_early(void) __pure2;
730
731#else /* MACH_KERNEL_PRIVATE */
732
733__BEGIN_DECLS
734
735extern task_t current_task(void) __pure2;
736
737extern bool task_is_driver(task_t task);
738
739#define TF_NONE 0
740
741#define TWF_NONE 0
742#define TRW_LRETURNWAIT 0x01 /* task is waiting for fork/posix_spawn/exec to complete */
743#define TRW_LRETURNWAITER 0x02 /* task is waiting for TRW_LRETURNWAIT to get cleared */
744#define TRW_LEXEC_COMPLETE 0x04 /* thread should call exec complete */
745
746/* task clear return wait flags */
747#define TCRW_CLEAR_INITIAL_WAIT 0x1
748#define TCRW_CLEAR_FINAL_WAIT 0x2
749#define TCRW_CLEAR_EXEC_COMPLETE 0x4
750#define TCRW_CLEAR_ALL_WAIT (TCRW_CLEAR_INITIAL_WAIT | TCRW_CLEAR_FINAL_WAIT)
751
752
753#define TPF_NONE 0
754#define TPF_EXEC_COPY 0x00000002 /* task is the new copy of an exec */
755
756
757__END_DECLS
758
759#endif /* MACH_KERNEL_PRIVATE */
760
761__BEGIN_DECLS
762
763#ifdef KERNEL_PRIVATE
764extern boolean_t task_is_app_suspended(task_t task);
765extern bool task_is_exotic(task_t task);
766extern bool task_is_alien(task_t task);
767#endif /* KERNEL_PRIVATE */
768
769#ifdef XNU_KERNEL_PRIVATE
770
771/* Hold all threads in a task, Wait for task to stop running, just to get off CPU */
772extern kern_return_t task_hold_and_wait(
773 task_t task);
774
775/* Release hold on all threads in a task */
776extern kern_return_t task_release(
777 task_t task);
778
779/* Suspend/resume a task where the kernel owns the suspend count */
780extern kern_return_t task_suspend_internal_locked( task_t task);
781extern kern_return_t task_suspend_internal( task_t task);
782extern kern_return_t task_resume_internal_locked( task_t task);
783extern kern_return_t task_resume_internal( task_t task);
784
785/* Suspends a task by placing a hold on its threads */
786extern kern_return_t task_pidsuspend(
787 task_t task);
788
789/* Resumes a previously paused task */
790extern kern_return_t task_pidresume(
791 task_t task);
792
793extern kern_return_t task_send_trace_memory(
794 task_t task,
795 uint32_t pid,
796 uint64_t uniqueid);
797
798extern void task_remove_turnstile_watchports(
799 task_t task);
800
801extern void task_transfer_turnstile_watchports(
802 task_t old_task,
803 task_t new_task,
804 thread_t new_thread);
805
806extern kern_return_t
807 task_violated_guard(mach_exception_code_t, mach_exception_subcode_t, void *, bool);
808
809#if DEVELOPMENT || DEBUG
810
811extern kern_return_t task_disconnect_page_mappings(
812 task_t task);
813#endif /* DEVELOPMENT || DEBUG */
814
815extern void tasks_system_suspend(boolean_t suspend);
816
817#if CONFIG_FREEZE
818
819/* Freeze a task's resident pages */
820extern kern_return_t task_freeze(
821 task_t task,
822 uint32_t *purgeable_count,
823 uint32_t *wired_count,
824 uint32_t *clean_count,
825 uint32_t *dirty_count,
826 uint32_t dirty_budget,
827 uint32_t *shared_count,
828 int *freezer_error_code,
829 boolean_t eval_only);
830
831/* Thaw a currently frozen task */
832extern kern_return_t task_thaw(
833 task_t task);
834
835typedef enum {
836 CREDIT_TO_SWAP = 1,
837 DEBIT_FROM_SWAP = 2
838} freezer_acct_op_t;
839
840extern void task_update_frozen_to_swap_acct(
841 task_t task,
842 int64_t amount,
843 freezer_acct_op_t op);
844
845#endif /* CONFIG_FREEZE */
846
847/* Halt all other threads in the current task */
848extern kern_return_t task_start_halt(
849 task_t task);
850
851/* Wait for other threads to halt and free halting task resources */
852extern void task_complete_halt(
853 task_t task);
854
855extern kern_return_t task_terminate_internal(
856 task_t task);
857
858struct proc_ro;
859typedef struct proc_ro *proc_ro_t;
860
861extern kern_return_t task_create_internal(
862 task_t parent_task,
863 proc_ro_t proc_ro,
864 coalition_t *parent_coalitions,
865 boolean_t inherit_memory,
866 boolean_t is_64bit,
867 boolean_t is_64bit_data,
868 uint32_t t_flags,
869 uint32_t t_flags_ro,
870 uint32_t procflags,
871 uint8_t t_returnwaitflags,
872 task_t child_task);
873
874extern kern_return_t task_set_special_port_internal(
875 task_t task,
876 int which,
877 ipc_port_t port);
878
879extern kern_return_t task_set_security_tokens(
880 task_t task,
881 security_token_t sec_token,
882 audit_token_t audit_token,
883 host_priv_t host_priv);
884
885extern kern_return_t task_info(
886 task_t task,
887 task_flavor_t flavor,
888 task_info_t task_info_out,
889 mach_msg_type_number_t *task_info_count);
890
891/*
892 * Additional fields that aren't exposed through `task_power_info` but needed
893 * by clients of `task_power_info_locked`.
894 */
895struct task_power_info_extra {
896 uint64_t cycles;
897 uint64_t instructions;
898 uint64_t pcycles;
899 uint64_t pinstructions;
900 uint64_t user_ptime;
901 uint64_t system_ptime;
902 uint64_t runnable_time;
903 uint64_t energy;
904 uint64_t penergy;
905 uint64_t secure_time;
906 uint64_t secure_ptime;
907};
908
909void task_power_info_locked(
910 task_t task,
911 task_power_info_t info,
912 gpu_energy_data_t gpu_energy,
913 task_power_info_v2_t infov2,
914 struct task_power_info_extra *extra_info);
915
916extern uint64_t task_gpu_utilisation(
917 task_t task);
918
919extern void task_update_cpu_time_qos_stats(
920 task_t task,
921 uint64_t *eqos_stats,
922 uint64_t *rqos_stats);
923
924extern void task_vtimer_set(
925 task_t task,
926 integer_t which);
927
928extern void task_vtimer_clear(
929 task_t task,
930 integer_t which);
931
932extern void task_vtimer_update(
933 task_t task,
934 integer_t which,
935 uint32_t *microsecs);
936
937#define TASK_VTIMER_USER 0x01
938#define TASK_VTIMER_PROF 0x02
939#define TASK_VTIMER_RLIM 0x04
940
941extern void task_set_64bit(
942 task_t task,
943 boolean_t is_64bit,
944 boolean_t is_64bit_data);
945
946extern bool task_get_64bit_addr(
947 task_t task);
948
949extern bool task_get_64bit_data(
950 task_t task);
951
952extern void task_set_platform_binary(
953 task_t task,
954 boolean_t is_platform);
955
956#if XNU_TARGET_OS_OSX
957#if DEVELOPMENT || DEBUG
958/* Disables task identity security hardening (*_set_exception_ports policy)
959 * for all tasks if amfi_get_out_of_my_way is set. */
960extern bool AMFI_bootarg_disable_mach_hardening;
961#endif /* DEVELOPMENT || DEBUG */
962extern void task_disable_mach_hardening(
963 task_t task);
964
965extern bool task_opted_out_mach_hardening(
966 task_t task);
967#endif /* XNU_TARGET_OS_OSX */
968
969extern boolean_t task_get_platform_binary(
970 task_t task);
971
972extern void
973task_set_hardened_runtime(
974 task_t task,
975 bool is_hardened);
976
977extern boolean_t
978task_is_hardened_binary(
979 task_t task);
980
981extern boolean_t task_is_a_corpse(
982 task_t task);
983
984extern boolean_t task_is_ipc_active(
985 task_t task);
986
987extern void task_set_corpse(
988 task_t task);
989
990extern void task_set_exc_guard_ctrl_port_default(
991 task_t task,
992 thread_t main_thread,
993 const char *name,
994 unsigned int namelen,
995 boolean_t is_simulated,
996 uint32_t platform,
997 uint32_t sdk);
998
999extern void task_set_immovable_pinned(task_t task);
1000
1001extern bool task_set_ca_client_wi(
1002 task_t task,
1003 boolean_t ca_client_wi);
1004
1005extern kern_return_t task_set_dyld_info(
1006 task_t task,
1007 mach_vm_address_t addr,
1008 mach_vm_size_t size);
1009
1010extern void task_set_mach_header_address(
1011 task_t task,
1012 mach_vm_address_t addr);
1013
1014extern void task_set_uniqueid(task_t task);
1015
1016/* Get number of activations in a task */
1017extern int get_task_numacts(
1018 task_t task);
1019
1020extern bool task_donates_own_pages(
1021 task_t task);
1022
1023struct label;
1024extern kern_return_t task_collect_crash_info(
1025 task_t task,
1026#if CONFIG_MACF
1027 struct label *crash_label,
1028#endif
1029 int is_corpse_fork);
1030void task_wait_till_threads_terminate_locked(task_t task);
1031
1032/* JMM - should just be temporary (implementation in bsd_kern still) */
1033extern void set_bsdtask_info(task_t, void *);
1034extern uint32_t set_task_loadTag(task_t task, uint32_t loadTag);
1035extern vm_map_t get_task_map_reference(task_t);
1036extern vm_map_t swap_task_map(task_t, thread_t, vm_map_t);
1037extern pmap_t get_task_pmap(task_t);
1038extern uint64_t get_task_resident_size(task_t);
1039extern uint64_t get_task_compressed(task_t);
1040extern uint64_t get_task_resident_max(task_t);
1041extern uint64_t get_task_phys_footprint(task_t);
1042#if CONFIG_LEDGER_INTERVAL_MAX
1043extern uint64_t get_task_phys_footprint_interval_max(task_t, int reset);
1044#endif /* CONFIG_FOOTPRINT_INTERVAL_MAX */
1045extern uint64_t get_task_phys_footprint_lifetime_max(task_t);
1046extern uint64_t get_task_phys_footprint_limit(task_t);
1047extern uint64_t get_task_purgeable_size(task_t);
1048extern uint64_t get_task_cpu_time(task_t);
1049extern uint64_t get_task_dispatchqueue_offset(task_t);
1050extern uint64_t get_task_dispatchqueue_serialno_offset(task_t);
1051extern uint64_t get_task_dispatchqueue_label_offset(task_t);
1052extern uint64_t get_task_uniqueid(task_t task);
1053extern int get_task_version(task_t task);
1054
1055extern uint64_t get_task_internal(task_t);
1056extern uint64_t get_task_internal_compressed(task_t);
1057extern uint64_t get_task_purgeable_nonvolatile(task_t);
1058extern uint64_t get_task_purgeable_nonvolatile_compressed(task_t);
1059extern uint64_t get_task_iokit_mapped(task_t);
1060extern uint64_t get_task_alternate_accounting(task_t);
1061extern uint64_t get_task_alternate_accounting_compressed(task_t);
1062extern uint64_t get_task_memory_region_count(task_t);
1063extern uint64_t get_task_page_table(task_t);
1064#if CONFIG_FREEZE
1065extern uint64_t get_task_frozen_to_swap(task_t);
1066#endif
1067extern uint64_t get_task_network_nonvolatile(task_t);
1068extern uint64_t get_task_network_nonvolatile_compressed(task_t);
1069extern uint64_t get_task_wired_mem(task_t);
1070extern uint32_t get_task_loadTag(task_t task);
1071
1072extern uint64_t get_task_tagged_footprint(task_t task);
1073extern uint64_t get_task_tagged_footprint_compressed(task_t task);
1074extern uint64_t get_task_media_footprint(task_t task);
1075extern uint64_t get_task_media_footprint_compressed(task_t task);
1076extern uint64_t get_task_graphics_footprint(task_t task);
1077extern uint64_t get_task_graphics_footprint_compressed(task_t task);
1078extern uint64_t get_task_neural_footprint(task_t task);
1079extern uint64_t get_task_neural_footprint_compressed(task_t task);
1080
1081extern kern_return_t task_convert_phys_footprint_limit(int, int *);
1082extern kern_return_t task_set_phys_footprint_limit_internal(task_t, int, int *, boolean_t, boolean_t);
1083extern kern_return_t task_get_phys_footprint_limit(task_t task, int *limit_mb);
1084#if DEBUG || DEVELOPMENT
1085#if CONFIG_MEMORYSTATUS
1086extern kern_return_t task_set_diag_footprint_limit_internal(task_t, uint64_t, uint64_t *);
1087extern kern_return_t task_get_diag_footprint_limit_internal(task_t, uint64_t *, bool *);
1088extern kern_return_t task_set_diag_footprint_limit(task_t task, uint64_t new_limit_mb, uint64_t *old_limit_mb);
1089#endif /* CONFIG_MEMORYSTATUS */
1090#endif /* DEBUG || DEVELOPMENT */
1091
1092extern security_token_t *task_get_sec_token(task_t task);
1093extern void task_set_sec_token(task_t task, security_token_t *token);
1094extern audit_token_t *task_get_audit_token(task_t task);
1095extern void task_set_audit_token(task_t task, audit_token_t *token);
1096extern void task_set_tokens(task_t task, security_token_t *sec_token, audit_token_t *audit_token);
1097extern boolean_t task_is_privileged(task_t task);
1098extern uint8_t *task_get_mach_trap_filter_mask(task_t task);
1099extern void task_set_mach_trap_filter_mask(task_t task, uint8_t *mask);
1100extern uint8_t *task_get_mach_kobj_filter_mask(task_t task);
1101extern void task_set_mach_kobj_filter_mask(task_t task, uint8_t *mask);
1102extern mach_vm_address_t task_get_all_image_info_addr(task_t task);
1103
1104/* Jetsam memlimit attributes */
1105extern boolean_t task_get_memlimit_is_active(task_t task);
1106extern boolean_t task_get_memlimit_is_fatal(task_t task);
1107extern void task_set_memlimit_is_active(task_t task, boolean_t memlimit_is_active);
1108extern void task_set_memlimit_is_fatal(task_t task, boolean_t memlimit_is_fatal);
1109extern boolean_t task_has_triggered_exc_resource(task_t task, boolean_t memlimit_is_active);
1110extern void task_mark_has_triggered_exc_resource(task_t task, boolean_t memlimit_is_active);
1111
1112extern uint64_t task_get_dirty_start(task_t task);
1113extern void task_set_dirty_start(task_t task, uint64_t start);
1114
1115extern void task_set_thread_limit(task_t task, uint16_t thread_limit);
1116#if CONFIG_PROC_RESOURCE_LIMITS
1117extern kern_return_t task_set_port_space_limits(task_t task, uint32_t soft_limit, uint32_t hard_limit);
1118#endif /* CONFIG_PROC_RESOURCE_LIMITS */
1119extern void task_port_space_ast(task_t task);
1120
1121#if XNU_TARGET_OS_OSX
1122extern boolean_t task_has_system_version_compat_enabled(task_t task);
1123extern void task_set_system_version_compat_enabled(task_t task, boolean_t enable_system_version_compat);
1124#endif
1125
1126extern boolean_t is_kerneltask(task_t task);
1127extern boolean_t is_corpsefork(task_t task);
1128
1129extern kern_return_t check_actforsig(task_t task, thread_t thread, int setast);
1130
1131extern kern_return_t machine_task_get_state(
1132 task_t task,
1133 int flavor,
1134 thread_state_t state,
1135 mach_msg_type_number_t *state_count);
1136
1137extern kern_return_t machine_task_set_state(
1138 task_t task,
1139 int flavor,
1140 thread_state_t state,
1141 mach_msg_type_number_t state_count);
1142
1143extern void machine_task_terminate(task_t task);
1144
1145extern kern_return_t machine_task_process_signature(task_t task, uint32_t platform, uint32_t sdk, char const **error_msg);
1146
1147struct _task_ledger_indices {
1148 int cpu_time;
1149 int tkm_private;
1150 int tkm_shared;
1151 int phys_mem;
1152 int wired_mem;
1153 int conclave_mem;
1154 int internal;
1155 int iokit_mapped;
1156 int external;
1157 int reusable;
1158 int alternate_accounting;
1159 int alternate_accounting_compressed;
1160 int page_table;
1161 int phys_footprint;
1162 int internal_compressed;
1163 int purgeable_volatile;
1164 int purgeable_nonvolatile;
1165 int purgeable_volatile_compressed;
1166 int purgeable_nonvolatile_compressed;
1167 int tagged_nofootprint;
1168 int tagged_footprint;
1169 int tagged_nofootprint_compressed;
1170 int tagged_footprint_compressed;
1171 int network_volatile;
1172 int network_nonvolatile;
1173 int network_volatile_compressed;
1174 int network_nonvolatile_compressed;
1175 int media_nofootprint;
1176 int media_footprint;
1177 int media_nofootprint_compressed;
1178 int media_footprint_compressed;
1179 int graphics_nofootprint;
1180 int graphics_footprint;
1181 int graphics_nofootprint_compressed;
1182 int graphics_footprint_compressed;
1183 int neural_nofootprint;
1184 int neural_footprint;
1185 int neural_nofootprint_compressed;
1186 int neural_footprint_compressed;
1187 int platform_idle_wakeups;
1188 int interrupt_wakeups;
1189#if CONFIG_SCHED_SFI
1190 int sfi_wait_times[MAX_SFI_CLASS_ID];
1191#endif /* CONFIG_SCHED_SFI */
1192 int cpu_time_billed_to_me;
1193 int cpu_time_billed_to_others;
1194 int physical_writes;
1195 int logical_writes;
1196 int logical_writes_to_external;
1197 int energy_billed_to_me;
1198 int energy_billed_to_others;
1199#if CONFIG_MEMORYSTATUS
1200 int memorystatus_dirty_time;
1201#endif /* CONFIG_MEMORYSTATUS */
1202#if DEBUG || DEVELOPMENT
1203 int pages_grabbed;
1204 int pages_grabbed_kern;
1205 int pages_grabbed_iopl;
1206 int pages_grabbed_upl;
1207#endif
1208#if CONFIG_FREEZE
1209 int frozen_to_swap;
1210#endif /* CONFIG_FREEZE */
1211#if CONFIG_PHYS_WRITE_ACCT
1212 int fs_metadata_writes;
1213#endif /* CONFIG_PHYS_WRITE_ACCT */
1214 int swapins;
1215};
1216
1217/*
1218 * Many of the task ledger entries use a reduced feature set
1219 * (specifically they just use LEDGER_ENTRY_ALLOW_PANIC_ON_NEGATIVE)
1220 * and are stored in a smaller entry structure.
1221 * That structure is an implementation detail of the ledger.
1222 * But on PPL systems, the task ledger's memory is managed by the PPL
1223 * and it has to determine the size of the task ledger at compile time.
1224 * This define specifies the number of small entries so the PPL can
1225 * properly determine the ledger's size.
1226 *
1227 * If you add a new entry with only the
1228 * LEDGER_ENTRY_ALLOW_PANIC_ON_NEGATIVE | LEDGER_ENTRY_ALLOW_INACTIVE
1229 * flags, you need to increment this count.
1230 * Otherwise, PPL systems will panic at boot.
1231 */
1232#if DEVELOPMENT || DEBUG
1233#define TASK_LEDGER_NUM_SMALL_INDICES 33
1234#else
1235#define TASK_LEDGER_NUM_SMALL_INDICES 29
1236#endif /* DEVELOPMENT || DEBUG */
1237extern struct _task_ledger_indices task_ledgers;
1238
1239/* requires task to be unlocked, returns a referenced thread */
1240thread_t task_findtid(task_t task, uint64_t tid);
1241int pid_from_task(task_t task);
1242
1243extern kern_return_t task_wakeups_monitor_ctl(task_t task, uint32_t *rate_hz, int32_t *flags);
1244extern kern_return_t task_cpu_usage_monitor_ctl(task_t task, uint32_t *flags);
1245extern void task_rollup_accounting_info(task_t new_task, task_t parent_task);
1246extern kern_return_t task_io_monitor_ctl(task_t task, uint32_t *flags);
1247extern void task_set_did_exec_flag(task_t task);
1248extern void task_clear_exec_copy_flag(task_t task);
1249extern boolean_t task_is_exec_copy(task_t);
1250extern boolean_t task_did_exec(task_t task);
1251extern boolean_t task_is_active(task_t task);
1252extern boolean_t task_is_halting(task_t task);
1253extern void task_clear_return_wait(task_t task, uint32_t flags);
1254extern void task_wait_to_return(void) __attribute__((noreturn));
1255extern event_t task_get_return_wait_event(task_t task);
1256
1257extern void task_bank_reset(task_t task);
1258extern void task_bank_init(task_t task);
1259
1260#if CONFIG_MEMORYSTATUS
1261extern void task_ledger_settle_dirty_time(task_t t);
1262#endif /* CONFIG_MEMORYSTATUS */
1263
1264#if CONFIG_ARCADE
1265extern void task_prep_arcade(task_t task, thread_t thread);
1266#endif /* CONFIG_ARCADE */
1267
1268extern int task_pid(task_t task);
1269
1270#if __has_feature(ptrauth_calls)
1271char *task_get_vm_shared_region_id_and_jop_pid(task_t task, uint64_t *);
1272void task_set_shared_region_id(task_t task, char *id);
1273#endif /* __has_feature(ptrauth_calls) */
1274
1275extern boolean_t task_has_assertions(task_t task);
1276/* End task_policy */
1277
1278extern void task_set_gpu_denied(task_t task, boolean_t denied);
1279extern boolean_t task_is_gpu_denied(task_t task);
1280
1281extern void task_set_game_mode(task_t task, bool enabled);
1282/* returns true if update must be pushed to coalition (Automatically handled by task_set_game_mode) */
1283extern bool task_set_game_mode_locked(task_t task, bool enabled);
1284extern bool task_get_game_mode(task_t task);
1285
1286extern queue_head_t * task_io_user_clients(task_t task);
1287extern void task_set_message_app_suspended(task_t task, boolean_t enable);
1288
1289extern void task_copy_fields_for_exec(task_t dst_task, task_t src_task);
1290
1291extern void task_copy_vmobjects(task_t task, vm_object_query_t query, size_t len, size_t *num);
1292extern void task_get_owned_vmobjects(task_t task, size_t buffer_size, vmobject_list_output_t buffer, size_t* output_size, size_t* entries);
1293extern void task_store_owned_vmobject_info(task_t to_task, task_t from_task);
1294
1295extern void task_set_filter_msg_flag(task_t task, boolean_t flag);
1296extern boolean_t task_get_filter_msg_flag(task_t task);
1297
1298#if __has_feature(ptrauth_calls)
1299extern bool task_is_pac_exception_fatal(task_t task);
1300extern void task_set_pac_exception_fatal_flag(task_t task);
1301#endif /*__has_feature(ptrauth_calls)*/
1302
1303extern bool task_is_jit_exception_fatal(task_t task);
1304extern void task_set_jit_exception_fatal_flag(task_t task);
1305
1306extern bool task_needs_user_signed_thread_state(task_t task);
1307extern void task_set_tecs(task_t task);
1308extern void task_get_corpse_vmobject_list(task_t task, vmobject_list_output_t* list, size_t* list_size);
1309
1310extern boolean_t task_corpse_forking_disabled(task_t task);
1311
1312void __attribute__((noinline)) SENDING_NOTIFICATION__THIS_PROCESS_HAS_TOO_MANY_MACH_PORTS(task_t task,
1313 uint32_t current_size, uint32_t soft_limit, uint32_t hard_limit);
1314
1315extern int get_task_cdhash(task_t task, char cdhash[CS_CDHASH_LEN]);
1316
1317extern boolean_t kdp_task_is_locked(task_t task);
1318
1319/* Kernel side prototypes for MIG routines */
1320extern kern_return_t task_get_exception_ports(
1321 task_t task,
1322 exception_mask_t exception_mask,
1323 exception_mask_array_t masks,
1324 mach_msg_type_number_t *CountCnt,
1325 exception_port_array_t ports,
1326 exception_behavior_array_t behaviors,
1327 thread_state_flavor_array_t flavors);
1328
1329#if CONFIG_EXCLAVES
1330int task_add_conclave(task_t task, void *, int64_t, const char *task_conclave_id);
1331kern_return_t task_inherit_conclave(task_t old_task, task_t new_task, void *vnode, int64_t off);
1332kern_return_t task_launch_conclave(mach_port_name_t port);
1333void task_clear_conclave(task_t task);
1334void task_stop_conclave(task_t task, bool gather_crash_bt);
1335kern_return_t task_stop_conclave_upcall(void);
1336kern_return_t task_stop_conclave_upcall_complete(void);
1337kern_return_t task_suspend_conclave_upcall(uint64_t *, size_t);
1338struct xnuupcalls_conclavesharedbuffer_s;
1339kern_return_t task_crash_info_conclave_upcall(task_t task,
1340 const struct xnuupcalls_conclavesharedbuffer_s *shared_buf, uint32_t length);
1341typedef struct exclaves_resource exclaves_resource_t;
1342exclaves_resource_t *task_get_conclave(task_t task);
1343void task_set_conclave_untaintable(task_t task);
1344void task_add_conclave_crash_info(task_t task, void *crash_info_ptr);
1345//Changing this would also warrant a change in ConclaveSharedBuffer
1346#define CONCLAVE_CRASH_BUFFER_PAGECOUNT 2
1347
1348#endif /* CONFIG_EXCLAVES */
1349
1350#endif /* XNU_KERNEL_PRIVATE */
1351#ifdef KERNEL_PRIVATE
1352
1353extern void *get_bsdtask_info(task_t);
1354extern void *get_bsdthreadtask_info(thread_t);
1355extern void task_bsdtask_kill(task_t);
1356extern vm_map_t get_task_map(task_t);
1357extern ledger_t get_task_ledger(task_t);
1358
1359extern boolean_t get_task_pidsuspended(task_t);
1360extern boolean_t get_task_suspended(task_t);
1361extern boolean_t get_task_frozen(task_t);
1362
1363/*
1364 * Flavors of convert_task_to_port. XNU callers get convert_task_to_port_kernel,
1365 * external callers get convert_task_to_port_external.
1366 */
1367extern ipc_port_t convert_task_to_port(task_t);
1368extern ipc_port_t convert_task_to_port_kernel(task_t);
1369extern ipc_port_t convert_task_to_port_external(task_t);
1370extern ipc_port_t convert_task_to_port_pinned(task_t);
1371
1372extern ipc_port_t convert_task_read_to_port(task_t);
1373extern ipc_port_t convert_task_read_to_port_kernel(task_read_t);
1374extern ipc_port_t convert_task_read_to_port_external(task_t);
1375
1376extern ipc_port_t convert_task_inspect_to_port(task_inspect_t);
1377extern ipc_port_t convert_task_name_to_port(task_name_t);
1378
1379extern ipc_port_t convert_corpse_to_port_and_nsrequest(task_t task);
1380
1381extern ipc_port_t convert_task_suspension_token_to_port(task_suspension_token_t task);
1382/* Convert from a port (in this case, an SO right to a task's resume port) to a task. */
1383extern task_suspension_token_t convert_port_to_task_suspension_token(ipc_port_t port);
1384
1385extern void task_suspension_send_once(ipc_port_t port);
1386
1387#define TASK_WRITE_IMMEDIATE 0x1
1388#define TASK_WRITE_DEFERRED 0x2
1389#define TASK_WRITE_INVALIDATED 0x4
1390#define TASK_WRITE_METADATA 0x8
1391extern void task_update_logical_writes(task_t task, uint32_t io_size, int flags, void *vp);
1392
1393__enum_decl(task_balance_flags_t, uint8_t, {
1394 TASK_BALANCE_CREDIT = 0x1,
1395 TASK_BALANCE_DEBIT = 0x2,
1396});
1397
1398__enum_decl(task_physical_write_flavor_t, uint8_t, {
1399 TASK_PHYSICAL_WRITE_METADATA = 0x1,
1400});
1401extern void task_update_physical_writes(task_t task, task_physical_write_flavor_t flavor,
1402 uint64_t io_size, task_balance_flags_t flags);
1403
1404#if CONFIG_SECLUDED_MEMORY
1405extern void task_set_can_use_secluded_mem(
1406 task_t task,
1407 boolean_t can_use_secluded_mem);
1408extern void task_set_could_use_secluded_mem(
1409 task_t task,
1410 boolean_t could_use_secluded_mem);
1411extern void task_set_could_also_use_secluded_mem(
1412 task_t task,
1413 boolean_t could_also_use_secluded_mem);
1414extern boolean_t task_can_use_secluded_mem(
1415 task_t task,
1416 boolean_t is_allocate);
1417extern boolean_t task_could_use_secluded_mem(task_t task);
1418extern boolean_t task_could_also_use_secluded_mem(task_t task);
1419#endif /* CONFIG_SECLUDED_MEMORY */
1420
1421extern void task_set_darkwake_mode(task_t, boolean_t);
1422extern boolean_t task_get_darkwake_mode(task_t);
1423
1424#if __arm64__
1425extern void task_set_legacy_footprint(task_t task);
1426extern void task_set_extra_footprint_limit(task_t task);
1427extern void task_set_ios13extended_footprint_limit(task_t task);
1428#endif /* __arm64__ */
1429
1430#if CONFIG_MACF
1431extern struct label *get_task_crash_label(task_t task);
1432extern void set_task_crash_label(task_t task, struct label *label);
1433#endif /* CONFIG_MACF */
1434
1435#endif /* KERNEL_PRIVATE */
1436
1437extern task_t kernel_task;
1438
1439extern void task_name_deallocate_mig(
1440 task_name_t task_name);
1441
1442extern void task_policy_set_deallocate_mig(
1443 task_policy_set_t task_policy_set);
1444
1445extern void task_policy_get_deallocate_mig(
1446 task_policy_get_t task_policy_get);
1447
1448extern void task_inspect_deallocate_mig(
1449 task_inspect_t task_inspect);
1450
1451extern void task_read_deallocate_mig(
1452 task_read_t task_read);
1453
1454extern void task_suspension_token_deallocate(
1455 task_suspension_token_t token);
1456
1457extern boolean_t task_self_region_footprint(void);
1458extern void task_self_region_footprint_set(boolean_t newval);
1459extern void task_ledgers_footprint(ledger_t ledger,
1460 ledger_amount_t *ledger_resident,
1461 ledger_amount_t *ledger_compressed);
1462extern void task_set_memory_ownership_transfer(
1463 task_t task,
1464 boolean_t value);
1465
1466#if DEVELOPMENT || DEBUG
1467extern void task_set_no_footprint_for_debug(
1468 task_t task,
1469 boolean_t value);
1470extern int task_get_no_footprint_for_debug(
1471 task_t task);
1472#endif /* DEVELOPMENT || DEBUG */
1473
1474#ifdef KERNEL_PRIVATE
1475extern kern_return_t task_get_suspend_stats(task_t task, task_suspend_stats_t stats);
1476extern kern_return_t task_get_suspend_stats_kdp(task_t task, task_suspend_stats_t stats);
1477#endif /* KERNEL_PRIVATE*/
1478
1479#ifdef XNU_KERNEL_PRIVATE
1480extern kern_return_t task_get_suspend_sources(task_t task, task_suspend_source_array_t sources);
1481extern kern_return_t task_get_suspend_sources_kdp(task_t task, task_suspend_source_array_t sources);
1482#endif /* XNU_KERNEL_PRIVATE */
1483
1484#if CONFIG_ROSETTA
1485extern bool task_is_translated(task_t task);
1486#endif
1487
1488
1489
1490#ifdef MACH_KERNEL_PRIVATE
1491void task_procname(task_t task, char *buf, int size);
1492void task_best_name(task_t task, char *buf, size_t size);
1493#endif /* MACH_KERNEL_PRIVATE */
1494
1495__END_DECLS
1496
1497#endif /* _KERN_TASK_H_ */
1498