1/*
2 * Copyright (c) 2000-2019 Apple Inc. All rights reserved.
3 *
4 * @APPLE_OSREFERENCE_LICENSE_HEADER_START@
5 *
6 * This file contains Original Code and/or Modifications of Original Code
7 * as defined in and that are subject to the Apple Public Source License
8 * Version 2.0 (the 'License'). You may not use this file except in
9 * compliance with the License. The rights granted to you under the License
10 * may not be used to create, or enable the creation or redistribution of,
11 * unlawful or unlicensed copies of an Apple operating system, or to
12 * circumvent, violate, or enable the circumvention or violation of, any
13 * terms of an Apple operating system software license agreement.
14 *
15 * Please obtain a copy of the License at
16 * http://www.opensource.apple.com/apsl/ and read it before using this file.
17 *
18 * The Original Code and all software distributed under the License are
19 * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER
20 * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES,
21 * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY,
22 * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT.
23 * Please see the License for the specific language governing rights and
24 * limitations under the License.
25 *
26 * @APPLE_OSREFERENCE_LICENSE_HEADER_END@
27 */
28/*
29 * @OSF_FREE_COPYRIGHT@
30 */
31/*
32 * Mach Operating System
33 * Copyright (c) 1991,1990,1989,1988,1987 Carnegie Mellon University
34 * All Rights Reserved.
35 *
36 * Permission to use, copy, modify and distribute this software and its
37 * documentation is hereby granted, provided that both the copyright
38 * notice and this permission notice appear in all copies of the
39 * software, derivative works or modified versions, and any portions
40 * thereof, and that both notices appear in supporting documentation.
41 *
42 * CARNEGIE MELLON ALLOWS FREE USE OF THIS SOFTWARE IN ITS "AS IS"
43 * CONDITION. CARNEGIE MELLON DISCLAIMS ANY LIABILITY OF ANY KIND FOR
44 * ANY DAMAGES WHATSOEVER RESULTING FROM THE USE OF THIS SOFTWARE.
45 *
46 * Carnegie Mellon requests users of this software to return to
47 *
48 * Software Distribution Coordinator or Software.Distribution@CS.CMU.EDU
49 * School of Computer Science
50 * Carnegie Mellon University
51 * Pittsburgh PA 15213-3890
52 *
53 * any improvements or extensions that they make and grant Carnegie Mellon
54 * the rights to redistribute these changes.
55 */
56/*
57 */
58/*
59 * File: thread.h
60 * Author: Avadis Tevanian, Jr.
61 *
62 * This file contains the structure definitions for threads.
63 *
64 */
65/*
66 * Copyright (c) 1993 The University of Utah and
67 * the Computer Systems Laboratory (CSL). All rights reserved.
68 *
69 * Permission to use, copy, modify and distribute this software and its
70 * documentation is hereby granted, provided that both the copyright
71 * notice and this permission notice appear in all copies of the
72 * software, derivative works or modified versions, and any portions
73 * thereof, and that both notices appear in supporting documentation.
74 *
75 * THE UNIVERSITY OF UTAH AND CSL ALLOW FREE USE OF THIS SOFTWARE IN ITS "AS
76 * IS" CONDITION. THE UNIVERSITY OF UTAH AND CSL DISCLAIM ANY LIABILITY OF
77 * ANY KIND FOR ANY DAMAGES WHATSOEVER RESULTING FROM THE USE OF THIS SOFTWARE.
78 *
79 * CSL requests users of this software to return to csl-dist@cs.utah.edu any
80 * improvements that they make and grant CSL redistribution rights.
81 *
82 */
83
84#ifndef _KERN_THREAD_H_
85#define _KERN_THREAD_H_
86
87#include <mach/kern_return.h>
88#include <mach/mach_types.h>
89#include <mach/mach_param.h>
90#include <mach/message.h>
91#include <mach/boolean.h>
92#include <mach/vm_param.h>
93#include <mach/thread_info.h>
94#include <mach/thread_status.h>
95#include <mach/exception_types.h>
96
97#include <kern/kern_types.h>
98#include <vm/vm_kern.h>
99#include <sys/cdefs.h>
100#include <sys/_types/_size_t.h>
101
102#ifdef MACH_KERNEL_PRIVATE
103#include <mach_assert.h>
104#include <mach_ldebug.h>
105
106#include <ipc/ipc_types.h>
107
108#include <mach/port.h>
109#include <kern/cpu_number.h>
110#include <kern/smp.h>
111#include <kern/queue.h>
112
113#include <kern/timer.h>
114#include <kern/simple_lock.h>
115#include <kern/locks.h>
116#include <kern/sched.h>
117#include <kern/sched_prim.h>
118#include <mach/sfi_class.h>
119#include <kern/thread_call.h>
120#include <kern/thread_group.h>
121#include <kern/timer_call.h>
122#include <kern/task.h>
123#include <kern/exception.h>
124#include <kern/affinity.h>
125#include <kern/debug.h>
126#include <kern/block_hint.h>
127#include <kern/recount.h>
128#include <kern/turnstile.h>
129#include <kern/mpsc_queue.h>
130
131#include <kern/waitq.h>
132#include <san/kasan.h>
133#include <san/kcov_data.h>
134#include <os/refcnt.h>
135
136#include <ipc/ipc_kmsg.h>
137
138#include <machine/atomic.h>
139#include <machine/cpu_data.h>
140#include <machine/thread.h>
141
142#endif /* MACH_KERNEL_PRIVATE */
143#ifdef XNU_KERNEL_PRIVATE
144/* priority queue static asserts fail for __ARM64_ARCH_8_32__ kext builds */
145#include <kern/priority_queue.h>
146#endif /* XNU_KERNEL_PRIVATE */
147
148__BEGIN_DECLS
149
150#ifdef XNU_KERNEL_PRIVATE
151#if CONFIG_TASKWATCH
152/* Taskwatch related. TODO: find this a better home */
153typedef struct task_watcher task_watch_t;
154#endif /* CONFIG_TASKWATCH */
155
156/* Thread tags; for easy identification. */
157__options_closed_decl(thread_tag_t, uint16_t, {
158 THREAD_TAG_MAINTHREAD = 0x01,
159 THREAD_TAG_CALLOUT = 0x02,
160 THREAD_TAG_IOWORKLOOP = 0x04,
161 THREAD_TAG_PTHREAD = 0x10,
162 THREAD_TAG_WORKQUEUE = 0x20,
163 THREAD_TAG_USER_JOIN = 0x40,
164});
165
166typedef struct thread_ro *thread_ro_t;
167
168/*!
169 * @struct thread_ro
170 *
171 * @brief
172 * A structure allocated in a read only zone that safely
173 * represents the linkages of a thread to its cred, proc, task, ...
174 *
175 * @discussion
176 * The lifetime of a @c thread_ro structure is 1:1 with that
177 * of a @c thread_t or a @c uthread_t and holding a thread reference
178 * always allows to dereference this structure safely.
179 */
180struct thread_ro {
181 struct thread *tro_owner;
182#if MACH_BSD
183 __xnu_struct_group(thread_ro_creds, tro_creds, {
184 /*
185 * @c tro_cred holds the current thread credentials.
186 *
187 * For most threads, this is a cache of the proc's
188 * credentials that has been updated at the last
189 * syscall boundary via current_cached_proc_cred_update().
190 *
191 * If the thread assumed a different identity using settid(),
192 * then the proc cached credential lives in @c tro_realcred
193 * instead.
194 */
195 struct ucred *tro_cred;
196 struct ucred *tro_realcred;
197 });
198 struct proc *tro_proc;
199 struct proc_ro *tro_proc_ro;
200#endif
201 struct task *tro_task;
202
203 struct ipc_port *tro_self_port;
204 struct ipc_port *tro_settable_self_port; /* send right */
205 struct ipc_port *tro_ports[THREAD_SELF_PORT_COUNT]; /* no right */
206
207 struct exception_action *tro_exc_actions;
208};
209
210/*
211 * Flags for `thread set status`.
212 */
213__options_decl(thread_set_status_flags_t, uint32_t, {
214 TSSF_FLAGS_NONE = 0,
215
216 /* Translate the state to user. */
217 TSSF_TRANSLATE_TO_USER = 0x01,
218
219 /* Translate the state to user. Preserve flags */
220 TSSF_PRESERVE_FLAGS = 0x02,
221
222 /* Check kernel signed flag */
223 TSSF_CHECK_USER_FLAGS = 0x04,
224
225 /* Allow only user state PTRS */
226 TSSF_ALLOW_ONLY_USER_PTRS = 0x08,
227
228 /* Generate random diversifier and stash it */
229 TSSF_RANDOM_USER_DIV = 0x10,
230
231 /* Stash sigreturn token */
232 TSSF_STASH_SIGRETURN_TOKEN = 0x20,
233
234 /* Check sigreturn token */
235 TSSF_CHECK_SIGRETURN_TOKEN = 0x40,
236
237 /* Allow only matching sigreturn token */
238 TSSF_ALLOW_ONLY_MATCHING_TOKEN = 0x80,
239
240 /* Stash diversifier from thread */
241 TSSF_THREAD_USER_DIV = 0x100,
242
243 /* Check for entitlement */
244 TSSF_CHECK_ENTITLEMENT = 0x200,
245});
246
247/*
248 * Size in bits of compact thread id (ctid).
249 */
250#define CTID_SIZE_BIT 20
251typedef uint32_t ctid_t;
252
253#endif /* XNU_KERNEL_PRIVATE */
254#ifdef MACH_KERNEL_PRIVATE
255
256extern zone_t thread_ro_zone;
257
258__options_decl(thread_work_interval_flags_t, uint32_t, {
259 TH_WORK_INTERVAL_FLAGS_NONE = 0x0,
260#if CONFIG_SCHED_AUTO_JOIN
261 /* Flags to indicate status about work interval thread is currently part of */
262 TH_WORK_INTERVAL_FLAGS_AUTO_JOIN_LEAK = 0x1,
263#endif /* CONFIG_SCHED_AUTO_JOIN */
264 TH_WORK_INTERVAL_FLAGS_HAS_WORKLOAD_ID = 0x2,
265 TH_WORK_INTERVAL_FLAGS_RT_ALLOWED = 0x4,
266});
267
268#if CONFIG_EXCLAVES
269/* Thread exclaves interrupt-safe state bits (ORd) */
270__options_decl(thread_exclaves_intstate_flags_t, uint32_t, {
271 /* Thread is currently executing in secure kernel or exclaves userspace
272 * or was interrupted/preempted while doing so. */
273 TH_EXCLAVES_EXECUTION = 0x1,
274});
275
276__options_decl(thread_exclaves_state_flags_t, uint16_t, {
277/* Thread exclaves state bits (ORd) */
278 /* Thread is handling RPC from a client in xnu or Darwin userspace (but
279 * may have returned to xnu due to an exclaves scheduler request or having
280 * upcalled). Must not re-enter exclaves via RPC or return to Darwin
281 * userspace. */
282 TH_EXCLAVES_RPC = 0x1,
283 /* Thread has made an upcall RPC request back into xnu while handling RPC
284 * into exclaves from a client in xnu or Darwin userspace. Must not
285 * re-enter exclaves via RPC or return to Darwin userspace. */
286 TH_EXCLAVES_UPCALL = 0x2,
287 /* Thread has made an exclaves scheduler request (such as a wait or wake)
288 * from the xnu scheduler while handling RPC into exclaves from a client in
289 * xnu or Darwin userspace. Must not re-enter exclaves via RPC or return to
290 * Darwin userspace. */
291 TH_EXCLAVES_SCHEDULER_REQUEST = 0x4,
292 /* Thread is calling into xnu proxy server directly (but may have
293 * returned to xnu due to an exclaves scheduler request or having
294 * upcalled). Must not re-enter exclaves or return to Darwin userspace.
295 */
296 TH_EXCLAVES_XNUPROXY = 0x8,
297 /* Thread is calling into the exclaves scheduler directly.
298 * Must not re-enter exclaves or return to Darwin userspace.
299 */
300 TH_EXCLAVES_SCHEDULER_CALL = 0x10,
301 /* Thread has called the stop upcall and once the thread returns from
302 * downcall, exit_with_reason needs to be called on the task.
303 */
304 TH_EXCLAVES_STOP_UPCALL_PENDING = 0x20,
305});
306#define TH_EXCLAVES_STATE_ANY ( \
307 TH_EXCLAVES_RPC | \
308 TH_EXCLAVES_UPCALL | \
309 TH_EXCLAVES_SCHEDULER_REQUEST | \
310 TH_EXCLAVES_XNUPROXY | \
311 TH_EXCLAVES_SCHEDULER_CALL)
312
313__options_decl(thread_exclaves_inspection_flags_t, uint16_t, {
314 /* Thread is on Stackshot's inspection queue */
315 TH_EXCLAVES_INSPECTION_STACKSHOT = 0x1,
316 /* Thread is on Kperf's inspection queue */
317 TH_EXCLAVES_INSPECTION_KPERF = 0x2,
318 /* Thread must not be inspected (may deadlock, etc.) - set by collector thread*/
319 TH_EXCLAVES_INSPECTION_NOINSPECT = 0x8000,
320});
321
322#endif /* CONFIG_EXCLAVES */
323
324typedef union thread_rr_state {
325 uint32_t trr_value;
326 struct {
327#define TRR_FAULT_NONE 0
328#define TRR_FAULT_PENDING 1
329#define TRR_FAULT_OBSERVED 2
330 /*
331 * Set to TRR_FAULT_PENDING with interrupts disabled
332 * by the thread when it is entering a user fault codepath.
333 *
334 * Moved to TRR_FAULT_OBSERVED from TRR_FAULT_PENDING:
335 * - by the thread if at IPI time,
336 * - or by task_restartable_ranges_synchronize() if the thread
337 * is interrupted (under the thread lock)
338 *
339 * Cleared by the thread when returning from a user fault
340 * codepath.
341 */
342 uint8_t trr_fault_state;
343
344 /*
345 * Set by task_restartable_ranges_synchronize()
346 * if trr_fault_state is TRR_FAULT_OBSERVED
347 * and a rendez vous at the AST is required.
348 *
349 * Set atomically if trr_fault_state == TRR_FAULT_OBSERVED,
350 * and trr_ipi_ack_pending == 0
351 */
352 uint8_t trr_sync_waiting;
353
354 /*
355 * Updated under the thread_lock(),
356 * set by task_restartable_ranges_synchronize()
357 * when the thread was IPIed and the caller is waiting
358 * for an ACK.
359 */
360 uint16_t trr_ipi_ack_pending;
361 };
362} thread_rr_state_t;
363
364struct thread {
365#if MACH_ASSERT
366#define THREAD_MAGIC 0x1234ABCDDCBA4321ULL
367 /* Ensure nothing uses &thread as a queue entry */
368 uint64_t thread_magic;
369#endif /* MACH_ASSERT */
370
371 /*
372 * NOTE: The runq field in the thread structure has an unusual
373 * locking protocol. If its value is PROCESSOR_NULL, then it is
374 * locked by the thread_lock, but if its value is something else
375 * then it is locked by the associated run queue lock. It is
376 * set to PROCESSOR_NULL without holding the thread lock, but the
377 * transition from PROCESSOR_NULL to non-null must be done
378 * under the thread lock and the run queue lock. To enforce the
379 * protocol, runq should only be accessed using the
380 * thread_get/set/clear_runq functions and locked variants below.
381 *
382 * New waitq APIs allow the 'links' and '__runq' fields to be
383 * anywhere in the thread structure.
384 */
385 union {
386 queue_chain_t runq_links; /* run queue links */
387 queue_chain_t wait_links; /* wait queue links */
388 struct mpsc_queue_chain mpsc_links; /* thread daemon mpsc links */
389 struct priority_queue_entry_sched wait_prioq_links; /* priority ordered waitq links */
390 };
391
392 event64_t wait_event; /* wait queue event */
393 struct { processor_t runq; } __runq; /* internally managed run queue assignment, see above comment */
394 waitq_t waitq; /* wait queue this thread is enqueued on */
395 struct turnstile *turnstile; /* thread's turnstile, protected by primitives interlock */
396 void *inheritor; /* inheritor of the primitive the thread will block on */
397 struct priority_queue_sched_max sched_inheritor_queue; /* Inheritor queue for kernel promotion */
398 struct priority_queue_sched_max base_inheritor_queue; /* Inheritor queue for user promotion */
399
400#if CONFIG_SCHED_EDGE
401 bool th_bound_cluster_enqueued;
402 bool th_shared_rsrc_enqueued[CLUSTER_SHARED_RSRC_TYPE_COUNT];
403 bool th_shared_rsrc_heavy_user[CLUSTER_SHARED_RSRC_TYPE_COUNT];
404 bool th_shared_rsrc_heavy_perf_control[CLUSTER_SHARED_RSRC_TYPE_COUNT];
405#endif /* CONFIG_SCHED_EDGE */
406
407#if CONFIG_SCHED_CLUTCH
408 /*
409 * In the clutch scheduler, the threads are maintained in runqs at the clutch_bucket
410 * level (clutch_bucket defines a unique thread group and scheduling bucket pair). The
411 * thread is linked via a couple of linkages in the clutch bucket:
412 *
413 * - A stable priority queue linkage which is the main runqueue (based on sched_pri) for the clutch bucket
414 * - A regular priority queue linkage which is based on thread's base/promoted pri (used for clutch bucket priority calculation)
415 * - A queue linkage used for timesharing operations of threads at the scheduler tick
416 */
417 struct priority_queue_entry_stable th_clutch_runq_link;
418 struct priority_queue_entry_sched th_clutch_pri_link;
419 queue_chain_t th_clutch_timeshare_link;
420#endif /* CONFIG_SCHED_CLUTCH */
421
422 /* Data updated during assert_wait/thread_wakeup */
423 decl_simple_lock_data(, sched_lock); /* scheduling lock (thread_lock()) */
424 decl_simple_lock_data(, wake_lock); /* for thread stop / wait (wake_lock()) */
425 uint16_t options; /* options set by thread itself */
426#define TH_OPT_INTMASK 0x0003 /* interrupt / abort level */
427#define TH_OPT_VMPRIV 0x0004 /* may allocate reserved memory */
428#define TH_OPT_SYSTEM_CRITICAL 0x0010 /* Thread must always be allowed to run - even under heavy load */
429#define TH_OPT_PROC_CPULIMIT 0x0020 /* Thread has a task-wide CPU limit applied to it */
430#define TH_OPT_PRVT_CPULIMIT 0x0040 /* Thread has a thread-private CPU limit applied to it */
431#define TH_OPT_IDLE_THREAD 0x0080 /* Thread is a per-processor idle thread */
432#define TH_OPT_GLOBAL_FORCED_IDLE 0x0100 /* Thread performs forced idle for thermal control */
433#define TH_OPT_SCHED_VM_GROUP 0x0200 /* Thread belongs to special scheduler VM group */
434#define TH_OPT_HONOR_QLIMIT 0x0400 /* Thread will honor qlimit while sending mach_msg, regardless of MACH_SEND_ALWAYS */
435#define TH_OPT_SEND_IMPORTANCE 0x0800 /* Thread will allow importance donation from kernel rpc */
436#define TH_OPT_ZONE_PRIV 0x1000 /* Thread may use the zone replenish reserve */
437#define TH_OPT_IPC_TG_BLOCKED 0x2000 /* Thread blocked in sync IPC and has made the thread group blocked callout */
438#define TH_OPT_FORCED_LEDGER 0x4000 /* Thread has a forced CPU limit */
439#define TH_IN_MACH_EXCEPTION 0x8000 /* Thread is currently handling a mach exception */
440
441 bool wake_active; /* wake event on stop */
442 bool at_safe_point; /* thread_abort_safely allowed */
443 uint8_t sched_saved_run_weight;
444#if DEVELOPMENT || DEBUG
445 bool pmap_footprint_suspended;
446#endif /* DEVELOPMENT || DEBUG */
447
448
449 ast_t reason; /* why we blocked */
450 uint32_t quantum_remaining;
451 wait_result_t wait_result; /* outcome of wait -
452 * may be examined by this thread
453 * WITHOUT locking */
454 thread_rr_state_t t_rr_state; /* state for restartable ranges */
455 thread_continue_t continuation; /* continue here next dispatch */
456 void *parameter; /* continuation parameter */
457
458 /* Data updated/used in thread_invoke */
459 vm_offset_t kernel_stack; /* current kernel stack */
460 vm_offset_t reserved_stack; /* reserved kernel stack */
461
462 /*** Machine-dependent state ***/
463 struct machine_thread machine;
464
465#if KASAN
466 struct kasan_thread_data kasan_data;
467#endif
468#if CONFIG_KCOV
469 kcov_thread_data_t kcov_data;
470#endif
471
472 /* Thread state: */
473 int state;
474/*
475 * Thread states [bits or'ed]
476 * All but TH_WAIT_REPORT are encoded in SS_TH_FLAGS
477 * All are encoded in kcdata.py ('ths_state')
478 */
479#define TH_WAIT 0x01 /* queued for waiting */
480#define TH_SUSP 0x02 /* stopped or requested to stop */
481#define TH_RUN 0x04 /* running or on runq */
482#define TH_UNINT 0x08 /* waiting uninteruptibly */
483#define TH_TERMINATE 0x10 /* halted at termination */
484#define TH_TERMINATE2 0x20 /* added to termination queue */
485#define TH_WAIT_REPORT 0x40 /* the wait is using the sched_call,
486 * only set if TH_WAIT is also set */
487#define TH_IDLE 0x80 /* idling processor */
488#define TH_WAKING 0x100 /* between waitq remove and thread_go */
489
490 /* Scheduling information */
491 sched_mode_t sched_mode; /* scheduling mode */
492 sched_mode_t saved_mode; /* saved mode during forced mode demotion */
493
494 /* This thread's contribution to global sched counters */
495 sched_bucket_t th_sched_bucket;
496
497 sfi_class_id_t sfi_class; /* SFI class (XXX Updated on CSW/QE/AST) */
498 sfi_class_id_t sfi_wait_class; /* Currently in SFI wait for this class, protected by sfi_lock */
499
500 uint32_t sched_flags; /* current flag bits */
501#define TH_SFLAG_NO_SMT 0x0001 /* On an SMT CPU, this thread must be scheduled alone */
502#define TH_SFLAG_FAILSAFE 0x0002 /* fail-safe has tripped */
503#define TH_SFLAG_THROTTLED 0x0004 /* throttled thread forced to timeshare mode (may be applied in addition to failsafe) */
504
505#define TH_SFLAG_PROMOTED 0x0008 /* sched pri has been promoted by kernel mutex priority promotion */
506#define TH_SFLAG_ABORT 0x0010 /* abort interruptible waits */
507#define TH_SFLAG_ABORTSAFELY 0x0020 /* ... but only those at safe point */
508#define TH_SFLAG_ABORTED_MASK (TH_SFLAG_ABORT | TH_SFLAG_ABORTSAFELY)
509#define TH_SFLAG_DEPRESS 0x0040 /* normal depress yield */
510#define TH_SFLAG_POLLDEPRESS 0x0080 /* polled depress yield */
511#define TH_SFLAG_DEPRESSED_MASK (TH_SFLAG_DEPRESS | TH_SFLAG_POLLDEPRESS)
512/* unused TH_SFLAG_PRI_UPDATE 0x0100 */
513#define TH_SFLAG_EAGERPREEMPT 0x0200 /* Any preemption of this thread should be treated as if AST_URGENT applied */
514#define TH_SFLAG_RW_PROMOTED 0x0400 /* promote reason: blocking with RW lock held */
515#define TH_SFLAG_BASE_PRI_FROZEN 0x0800 /* (effective) base_pri is frozen */
516#define TH_SFLAG_WAITQ_PROMOTED 0x1000 /* promote reason: waitq wakeup (generally for IPC receive) */
517
518#if __AMP__
519#define TH_SFLAG_ECORE_ONLY 0x2000 /* (unused) Bind thread to E core processor set */
520#define TH_SFLAG_PCORE_ONLY 0x4000 /* (unused) Bind thread to P core processor set */
521#endif
522
523#define TH_SFLAG_EXEC_PROMOTED 0x8000 /* promote reason: thread is in an exec */
524
525#define TH_SFLAG_THREAD_GROUP_AUTO_JOIN 0x10000 /* thread has been auto-joined to thread group */
526#if __AMP__
527#define TH_SFLAG_BOUND_SOFT 0x20000 /* thread is soft bound to a cluster; can run anywhere if bound cluster unavailable */
528#endif /* __AMP__ */
529
530#if CONFIG_PREADOPT_TG
531#define TH_SFLAG_REEVALUTE_TG_HIERARCHY_LATER 0x40000 /* thread needs to reevaluate its TG hierarchy */
532#endif
533
534#define TH_SFLAG_FLOOR_PROMOTED 0x80000 /* promote reason: boost requested */
535
536/* 'promote reasons' that request a priority floor only, not a custom priority */
537#define TH_SFLAG_PROMOTE_REASON_MASK (TH_SFLAG_RW_PROMOTED | TH_SFLAG_WAITQ_PROMOTED | TH_SFLAG_EXEC_PROMOTED | TH_SFLAG_FLOOR_PROMOTED)
538
539#define TH_SFLAG_RT_DISALLOWED 0x100000 /* thread wants RT but may not have joined a work interval that allows it */
540#define TH_SFLAG_DEMOTED_MASK (TH_SFLAG_THROTTLED | TH_SFLAG_FAILSAFE | TH_SFLAG_RT_DISALLOWED) /* saved_mode contains previous sched_mode */
541#define TH_SFLAG_RT_CPULIMIT 0x200000 /* thread should have a CPU limit applied. */
542
543 int16_t sched_pri; /* scheduled (current) priority */
544 int16_t base_pri; /* effective base priority (equal to req_base_pri unless TH_SFLAG_BASE_PRI_FROZEN) */
545 int16_t req_base_pri; /* requested base priority */
546 int16_t max_priority; /* copy of max base priority */
547 int16_t task_priority; /* copy of task base priority */
548 int16_t promotion_priority; /* priority thread is currently promoted to */
549 uint16_t priority_floor_count; /* number of push to boost the floor priority */
550 int16_t suspend_count; /* Kernel holds on this thread */
551
552 int iotier_override; /* atomic operations to set, cleared on ret to user */
553 os_ref_atomic_t ref_count; /* number of references to me */
554
555 uint32_t rwlock_count; /* Number of lck_rw_t locks held by thread */
556 struct smrq_slist_head smr_stack;
557#ifdef DEBUG_RW
558 rw_lock_debug_t rw_lock_held; /* rw_locks currently held by the thread */
559#endif /* DEBUG_RW */
560
561 integer_t importance; /* task-relative importance */
562
563 /* Priority depression expiration */
564 integer_t depress_timer_active;
565 timer_call_t depress_timer;
566
567 /* real-time parameters */
568 struct { /* see mach/thread_policy.h */
569 uint32_t period;
570 uint32_t computation;
571 uint32_t constraint;
572 bool preemptible;
573 uint8_t priority_offset; /* base_pri = BASEPRI_RTQUEUES + priority_offset */
574 uint64_t deadline;
575 } realtime;
576
577 uint64_t last_run_time; /* time when thread was switched away from */
578 uint64_t last_made_runnable_time; /* time when thread was unblocked or preempted */
579 uint64_t last_basepri_change_time; /* time when thread was last changed in basepri while runnable */
580 uint64_t same_pri_latency;
581 /*
582 * workq_quantum_deadline is the workq thread's next runtime deadline. This
583 * value is set to 0 if the thread has no such deadline applicable to it.
584 *
585 * The synchronization for this field is due to how this field is modified
586 * 1) This field is always modified on the thread by itself or on the thread
587 * when it is not running/runnable
588 * 2) Change of this field is immediately followed by a
589 * corresponding change to the AST_KEVENT to either set or clear the
590 * AST_KEVENT_WORKQ_QUANTUM_EXPIRED bit
591 *
592 * workq_quantum_deadline can be modified by the thread on itself during
593 * interrupt context. However, due to (2) and due to the fact that the
594 * change to the AST_KEVENT is volatile, this forces the compiler to
595 * guarantee the order between the write to workq_quantum_deadline and the
596 * kevent field and therefore guarantees the correct synchronization.
597 */
598 uint64_t workq_quantum_deadline;
599
600#if WORKQ_QUANTUM_HISTORY_DEBUG
601
602#define WORKQ_QUANTUM_HISTORY_COUNT 16
603 struct workq_quantum_history {
604 uint64_t time;
605 uint64_t deadline;
606 bool arm;
607 } workq_quantum_history[WORKQ_QUANTUM_HISTORY_COUNT];
608 uint64_t workq_quantum_history_index;
609
610#define WORKQ_QUANTUM_HISTORY_WRITE_ENTRY(thread, ...) ({\
611 thread_t __th = (thread); \
612 uint64_t __index = os_atomic_inc_orig(&thread->workq_quantum_history_index, relaxed); \
613 struct workq_quantum_history _wq_quantum_history = { mach_approximate_time(), __VA_ARGS__}; \
614 __th->workq_quantum_history[__index % WORKQ_QUANTUM_HISTORY_COUNT] = \
615 (struct workq_quantum_history) _wq_quantum_history; \
616 })
617#else /* WORKQ_QUANTUM_HISTORY_DEBUG */
618#define WORKQ_QUANTUM_HISTORY_WRITE_ENTRY(thread, ...)
619#endif /* WORKQ_QUANTUM_HISTORY_DEBUG */
620
621#define THREAD_NOT_RUNNABLE (~0ULL)
622
623#if CONFIG_THREAD_GROUPS
624 struct thread_group *thread_group;
625#endif
626
627#if defined(CONFIG_SCHED_MULTIQ)
628 sched_group_t sched_group;
629#endif /* defined(CONFIG_SCHED_MULTIQ) */
630
631 /* Data used during setrun/dispatch */
632 processor_t bound_processor; /* bound to a processor? */
633 processor_t last_processor; /* processor last dispatched on */
634 processor_t chosen_processor; /* Where we want to run this thread */
635
636 /* Fail-safe computation since last unblock or qualifying yield */
637 uint64_t computation_metered;
638 uint64_t computation_epoch;
639 uint64_t computation_interrupt_epoch;
640 uint64_t safe_release; /* when to release fail-safe */
641
642 /* Call out from scheduler */
643 void (*sched_call)(int type, thread_t thread);
644
645#if defined(CONFIG_SCHED_PROTO)
646 uint32_t runqueue_generation; /* last time runqueue was drained */
647#endif
648
649 /* Statistics and timesharing calculations */
650#if defined(CONFIG_SCHED_TIMESHARE_CORE)
651 natural_t sched_stamp; /* last scheduler tick */
652 natural_t sched_usage; /* timesharing cpu usage [sched] */
653 natural_t pri_shift; /* usage -> priority from pset */
654 natural_t cpu_usage; /* instrumented cpu usage [%cpu] */
655 natural_t cpu_delta; /* accumulated cpu_usage delta */
656#endif /* CONFIG_SCHED_TIMESHARE_CORE */
657
658 uint32_t c_switch; /* total context switches */
659 uint32_t p_switch; /* total processor switches */
660 uint32_t ps_switch; /* total pset switches */
661
662 /* Timing data structures */
663 uint64_t sched_time_save; /* saved time for scheduler tick */
664 uint64_t vtimer_user_save; /* saved values for vtimers */
665 uint64_t vtimer_prof_save;
666 uint64_t vtimer_rlim_save;
667 uint64_t vtimer_qos_save;
668
669 timer_data_t runnable_timer; /* time the thread is runnable (including running) */
670
671 struct recount_thread th_recount; /* resource accounting */
672
673#if CONFIG_SCHED_SFI
674 /* Timing for wait state */
675 uint64_t wait_sfi_begin_time; /* start time for thread waiting in SFI */
676#endif
677
678 /*
679 * Processor/cache affinity
680 * - affinity_threads links task threads with the same affinity set
681 */
682 queue_chain_t affinity_threads;
683 affinity_set_t affinity_set;
684
685#if CONFIG_TASKWATCH
686 task_watch_t *taskwatch; /* task watch */
687#endif /* CONFIG_TASKWATCH */
688
689 /* Various bits of state to stash across a continuation, exclusive to the current thread block point */
690 union {
691 struct {
692 mach_msg_return_t state; /* receive state */
693 mach_port_seqno_t seqno; /* seqno of recvd message */
694 ipc_object_t object; /* object received on */
695 mach_vm_address_t msg_addr; /* receive msg buffer pointer */
696 mach_vm_address_t aux_addr; /* receive aux buffer pointer */
697 mach_msg_size_t max_msize; /* max rcv size for msg */
698 mach_msg_size_t max_asize; /* max rcv size for aux data */
699 mach_msg_size_t msize; /* actual size for the msg */
700 mach_msg_size_t asize; /* actual size for aux data */
701 mach_msg_option64_t option; /* 64 bits options for receive */
702 mach_port_name_t receiver_name; /* the receive port name */
703 union {
704 struct ipc_kmsg *XNU_PTRAUTH_SIGNED_PTR("thread.ith_kmsg") kmsg; /* received message */
705#if MACH_FLIPC
706 struct ipc_mqueue *XNU_PTRAUTH_SIGNED_PTR("thread.ith_peekq") peekq; /* mqueue to peek at */
707#endif /* MACH_FLIPC */
708 };
709 } receive;
710 struct {
711 struct semaphore *waitsemaphore; /* semaphore ref */
712 struct semaphore *signalsemaphore; /* semaphore ref */
713 int options; /* semaphore options */
714 kern_return_t result; /* primary result */
715 mach_msg_continue_t continuation;
716 } sema;
717 struct {
718#define THREAD_SAVE_IOKIT_TLS_COUNT 8
719 void *tls[THREAD_SAVE_IOKIT_TLS_COUNT];
720 } iokit;
721 } saved;
722
723 /* Only user threads can cause guard exceptions, only kernel threads can be thread call threads */
724 union {
725 /* Thread call thread's state structure, stored on its stack */
726 struct thread_call_thread_state *thc_state;
727
728 /* Structure to save information about guard exception */
729 struct {
730 mach_exception_code_t code;
731 mach_exception_subcode_t subcode;
732 } guard_exc_info;
733 };
734
735 /* User level suspensions */
736 int32_t user_stop_count;
737
738 /* IPC data structures */
739#if IMPORTANCE_INHERITANCE
740 natural_t ith_assertions; /* assertions pending drop */
741#endif
742 circle_queue_head_t ith_messages; /* messages to reap */
743 mach_port_t ith_kernel_reply_port; /* reply port for kernel RPCs */
744
745 /* Ast/Halt data structures */
746 vm_offset_t recover; /* page fault recover(copyin/out) */
747
748 queue_chain_t threads; /* global list of all threads */
749
750 /* Activation */
751 queue_chain_t task_threads;
752
753 /* Task membership */
754#if __x86_64__ || __arm__
755 struct task *t_task;
756#endif
757 struct thread_ro *t_tro;
758 vm_map_t map;
759 thread_t handoff_thread;
760
761 /* Timed wait expiration */
762 timer_call_t wait_timer;
763 uint16_t wait_timer_active; /* is the call running */
764 bool wait_timer_armed; /* should the wait be cleared */
765
766 /* Miscellaneous bits guarded by mutex */
767 uint32_t
768 active:1, /* Thread is active and has not been terminated */
769 ipc_active:1, /* IPC with the thread ports is allowed */
770 started:1, /* Thread has been started after creation */
771 static_param:1, /* Disallow policy parameter changes */
772 inspection:1, /* TRUE when task is being inspected by crash reporter */
773 policy_reset:1, /* Disallow policy parameter changes on terminating threads */
774 suspend_parked:1, /* thread parked in thread_suspended */
775 corpse_dup:1, /* TRUE when thread is an inactive duplicate in a corpse */
776 :0;
777
778 /* Pending thread ast(s) */
779 os_atomic(ast_t) ast;
780
781 decl_lck_mtx_data(, mutex);
782
783 struct ipc_port *ith_special_reply_port; /* ref to special reply port */
784
785#if CONFIG_DTRACE
786 uint16_t t_dtrace_flags; /* DTrace thread states */
787#define TH_DTRACE_EXECSUCCESS 0x01
788 uint16_t t_dtrace_inprobe; /* Executing under dtrace_probe */
789 uint32_t t_dtrace_predcache; /* DTrace per thread predicate value hint */
790 int64_t t_dtrace_tracing; /* Thread time under dtrace_probe() */
791 int64_t t_dtrace_vtime;
792#endif
793
794 clock_sec_t t_page_creation_time;
795 uint32_t t_page_creation_count;
796 uint32_t t_page_creation_throttled;
797#if (DEVELOPMENT || DEBUG)
798 uint64_t t_page_creation_throttled_hard;
799 uint64_t t_page_creation_throttled_soft;
800#endif /* DEVELOPMENT || DEBUG */
801 int t_pagein_error; /* for vm_fault(), holds error from vnop_pagein() */
802
803 mach_port_name_t ith_voucher_name;
804 ipc_voucher_t ith_voucher;
805
806#ifdef KPERF
807/* The high 8 bits are the number of frames to sample of a user callstack. */
808#define T_KPERF_CALLSTACK_DEPTH_OFFSET (24)
809#define T_KPERF_SET_CALLSTACK_DEPTH(DEPTH) (((uint32_t)(DEPTH)) << T_KPERF_CALLSTACK_DEPTH_OFFSET)
810#define T_KPERF_GET_CALLSTACK_DEPTH(FLAGS) ((FLAGS) >> T_KPERF_CALLSTACK_DEPTH_OFFSET)
811#define T_KPERF_ACTIONID_OFFSET (18)
812#define T_KPERF_SET_ACTIONID(AID) (((uint32_t)(AID)) << T_KPERF_ACTIONID_OFFSET)
813#define T_KPERF_GET_ACTIONID(FLAGS) ((FLAGS) >> T_KPERF_ACTIONID_OFFSET)
814#endif
815
816#define T_KPERF_AST_CALLSTACK 0x1 /* dump a callstack on thread's next AST */
817#define T_KPERF_AST_DISPATCH 0x2 /* dump a name on thread's next AST */
818#define T_KPC_ALLOC 0x4 /* thread needs a kpc_buf allocated */
819
820#define T_KPERF_AST_ALL \
821 (T_KPERF_AST_CALLSTACK | T_KPERF_AST_DISPATCH | T_KPC_ALLOC)
822/* only go up to T_KPERF_ACTIONID_OFFSET - 1 */
823
824#ifdef KPERF
825 uint32_t kperf_ast;
826 uint32_t kperf_pet_gen; /* last generation of PET that sampled this thread*/
827 uint32_t kperf_c_switch; /* last dispatch detection */
828 uint32_t kperf_pet_cnt; /* how many times a thread has been sampled by PET */
829#if CONFIG_EXCLAVES
830 uint32_t kperf_exclaves_ast;
831#endif
832#endif
833
834#ifdef CONFIG_CPU_COUNTERS
835 /* accumulated performance counters for this thread */
836 uint64_t *kpc_buf;
837#endif /* CONFIG_CPU_COUNTERS */
838
839#if HYPERVISOR
840 /* hypervisor virtual CPU object associated with this thread */
841 void *hv_thread_target;
842#endif /* HYPERVISOR */
843
844 /* Statistics accumulated per-thread and aggregated per-task */
845 uint32_t syscalls_unix;
846 uint32_t syscalls_mach;
847 ledger_t t_ledger;
848 ledger_t t_threadledger; /* per thread ledger */
849 ledger_t t_bankledger; /* ledger to charge someone */
850 uint64_t t_deduct_bank_ledger_time; /* cpu time to be deducted from bank ledger */
851 uint64_t t_deduct_bank_ledger_energy; /* energy to be deducted from bank ledger */
852
853 uint64_t thread_id; /* system wide unique thread-id */
854 uint32_t ctid; /* system wide compact thread-id */
855 uint32_t ctsid; /* this thread ts ID */
856
857 /* policy is protected by the thread mutex */
858 struct thread_requested_policy requested_policy;
859 struct thread_effective_policy effective_policy;
860
861 /* usynch override is protected by the task lock, eventually will be thread mutex */
862 struct thread_qos_override {
863 struct thread_qos_override *override_next;
864 uint32_t override_contended_resource_count;
865 int16_t override_qos;
866 int16_t override_resource_type;
867 user_addr_t override_resource;
868 } *overrides;
869
870 uint32_t kevent_overrides;
871 uint8_t user_promotion_basepri;
872 uint8_t kern_promotion_schedpri;
873 _Atomic uint16_t kevent_ast_bits;
874
875 io_stat_info_t thread_io_stats; /* per-thread I/O statistics */
876
877 uint32_t thread_callout_interrupt_wakeups;
878 uint32_t thread_callout_platform_idle_wakeups;
879 uint32_t thread_timer_wakeups_bin_1;
880 uint32_t thread_timer_wakeups_bin_2;
881 thread_tag_t thread_tag;
882
883 /*
884 * callout_* fields are only set for thread call threads whereas guard_exc_fatal is set
885 * by user threads on themselves while taking a guard exception. So it's okay for them to
886 * share this bitfield.
887 */
888 uint16_t
889 callout_woken_from_icontext:1,
890 callout_woken_from_platform_idle:1,
891 callout_woke_thread:1,
892 guard_exc_fatal:1,
893 thread_bitfield_unused:12;
894
895#define THREAD_BOUND_CLUSTER_NONE (UINT32_MAX)
896 uint32_t th_bound_cluster_id;
897
898#if CONFIG_THREAD_GROUPS
899#if CONFIG_PREADOPT_TG
900 /* The preadopt thread group is set on the thread
901 *
902 * a) By another thread when it is a creator and it is scheduled with the
903 * thread group on the TR
904 * b) On itself when it binds a thread request and becomes a
905 * servicer or when it rebinds to the thread request
906 * c) On itself when it processes knotes and finds the first
907 * EVFILT_MACHPORT event to deliver to userspace
908 *
909 * Note that this is a full reference owned by the thread_t and not a
910 * borrowed reference.
911 *
912 * This reference is cleared from the thread_t by the thread itself at the
913 * following times:
914 * a) When it explicitly adopts a work interval or a bank voucher
915 * b) If it still exists on the thread, after it has unbound and is about
916 * to park
917 * c) During thread termination if one still exists
918 * d) When a different preadoption thread group is set on the thread
919 *
920 * It is modified under the thread lock.
921 */
922 struct thread_group *preadopt_thread_group;
923
924 /* This field here is present in order to make sure that the t->thread_group
925 * is always pointing to a valid thread group and isn't a dangling pointer.
926 *
927 * Consider the following scenario:
928 * a) t->thread_group points to the preadoption thread group
929 * b) The preadoption thread group is modified on the thread but we are
930 * unable to resolve the hierarchy immediately due to the current state of
931 * the thread
932 *
933 * In order to make sure that t->thread_group points to a valid thread
934 * group until we can resolve the hierarchy again, we save the existing
935 * thread_group it points to in old_preadopt_thread_group. The next time a
936 * hierarchy resolution is done, we know that t->thread_group will not point
937 * to this field anymore so we can clear it.
938 *
939 * This field is always going to take the reference that was previously in
940 * preadopt_thread_group so it will have a full +1
941 */
942 struct thread_group *old_preadopt_thread_group;
943#endif /* CONFIG_PREADOPT_TG */
944
945 /* This is a borrowed reference to the TG from the ith_voucher and is saved
946 * here since we may not always be in the right context to able to do the
947 * lookups.
948 *
949 * It is set always set on self under the thread lock */
950 struct thread_group *bank_thread_group;
951
952 /* Whether this is the autojoin thread group or the work interval thread
953 * group depends on whether the thread's sched_flags has the
954 * TH_SFLAG_THREAD_GROUP_AUTO_JOIN bit set */
955 union {
956 /* This is a borrowed reference to the auto join thread group from the
957 * work_interval. It is set with the thread lock held */
958 struct thread_group *auto_join_thread_group;
959 /* This is a borrowed reference to the explicit work_interval thread group
960 * and is always set on self */
961 struct thread_group *work_interval_thread_group;
962 };
963#endif /* CONFIG_THREAD_GROUPS */
964
965 /* work interval (if any) associated with the thread. Only modified by
966 * current thread on itself or when another thread when the thread is held
967 * off of runq */
968 struct work_interval *th_work_interval;
969 thread_work_interval_flags_t th_work_interval_flags;
970
971#if SCHED_TRACE_THREAD_WAKEUPS
972 uintptr_t thread_wakeup_bt[64];
973#endif
974 turnstile_update_flags_t inheritor_flags; /* inheritor flags for inheritor field */
975 block_hint_t pending_block_hint;
976 block_hint_t block_hint; /* What type of primitive last caused us to block. */
977 uint32_t decompressions; /* Per-thread decompressions counter to be added to per-task decompressions counter */
978 int thread_region_page_shift; /* Page shift that this thread would like to use when */
979 /* introspecting a task. This is currently being used */
980 /* by footprint which uses a thread for each task being inspected. */
981#if CONFIG_SCHED_RT_ALLOW
982 /* Used when a thread is requested to set/clear its own CPU limit */
983 uint32_t
984 t_ledger_req_action:2,
985 t_ledger_req_percentage:7,
986 t_ledger_req_interval_ms:16,
987 :0;
988#endif /* CONFIG_SCHED_RT_ALLOW */
989
990#if CONFIG_IOSCHED
991 void *decmp_upl;
992#endif /* CONFIG_IOSCHED */
993 struct knote *ith_knote; /* knote fired for rcv */
994
995#if CONFIG_SPTM
996 /* TXM thread stack associated with this thread */
997 uintptr_t txm_thread_stack;
998#endif
999
1000#if CONFIG_EXCLAVES
1001 /* Per-thread IPC buffer for exclaves communication. Only modified by the
1002 * current thread on itself. */
1003 void *th_exclaves_ipc_buffer;
1004 /* Exclaves scheduling context ID corresponding to IPC buffer, communicated
1005 * to the exclaves scheduler component. Only modified by the current
1006 * thread on itself. */
1007 uint64_t th_exclaves_scheduling_context_id;
1008 /* Thread exclaves interrupt-safe state. Only mutated by the current thread
1009 * on itself with interrupts disabled, and only ever read by the current
1010 * thread (with no locking), including from interrupt context, or during
1011 * debug/stackshot. */
1012 thread_exclaves_intstate_flags_t th_exclaves_intstate;
1013 /* Thread exclaves state. Only mutated by the current thread on itself, and
1014 * only ever read by the current thread (with no locking). Unsafe to read
1015 * from interrupt context. */
1016 thread_exclaves_state_flags_t th_exclaves_state;
1017 /* Thread stackshot state. Prevents returning to Exclave world until after
1018 * an external agent has triggered inspection (likely via Exclave stackshot),
1019 * and woken this thread. */
1020 thread_exclaves_inspection_flags_t _Atomic th_exclaves_inspection_state;
1021 /* Task for which conclave teardown is being called by this thread. Used
1022 * for context by conclave crash info upcall to find the task for appending
1023 * the conclave crash info. */
1024 task_t conclave_stop_task;
1025 /* Queue of threads being inspected by Stackshot.
1026 * Modified under exclaves_collect_mtx. */
1027 queue_chain_t th_exclaves_inspection_queue_stackshot;
1028 /* Queue of threads being inspected by kperf.
1029 * Modified under exclaves_collect_mtx. */
1030 queue_chain_t th_exclaves_inspection_queue_kperf;
1031#endif /* CONFIG_EXCLAVES */
1032};
1033
1034#define ith_state saved.receive.state
1035#define ith_seqno saved.receive.seqno
1036#define ith_object saved.receive.object
1037#define ith_msg_addr saved.receive.msg_addr
1038#define ith_aux_addr saved.receive.aux_addr
1039#define ith_max_msize saved.receive.max_msize
1040#define ith_max_asize saved.receive.max_asize
1041#define ith_msize saved.receive.msize
1042#define ith_asize saved.receive.asize
1043#define ith_option saved.receive.option
1044#define ith_receiver_name saved.receive.receiver_name
1045#define ith_kmsg saved.receive.kmsg
1046#if MACH_FLIPC
1047#define ith_peekq saved.receive.peekq
1048#endif /* MACH_FLIPC */
1049
1050#define sth_waitsemaphore saved.sema.waitsemaphore
1051#define sth_signalsemaphore saved.sema.signalsemaphore
1052#define sth_options saved.sema.options
1053#define sth_result saved.sema.result
1054#define sth_continuation saved.sema.continuation
1055
1056#define ITH_KNOTE_NULL ((void *)NULL)
1057#define ITH_KNOTE_PSEUDO ((void *)0xdeadbeef)
1058/*
1059 * The ith_knote is used during message delivery, and can safely be interpreted
1060 * only when used for one of these codepaths, which the test for the msgt_name
1061 * being RECEIVE or SEND_ONCE is about.
1062 */
1063#define ITH_KNOTE_VALID(kn, msgt_name) \
1064 (((kn) != ITH_KNOTE_NULL && (kn) != ITH_KNOTE_PSEUDO) && \
1065 ((msgt_name) == MACH_MSG_TYPE_PORT_RECEIVE || \
1066 (msgt_name) == MACH_MSG_TYPE_PORT_SEND_ONCE))
1067
1068#if MACH_ASSERT
1069#define assert_thread_magic(thread) assertf((thread)->thread_magic == THREAD_MAGIC, \
1070 "bad thread magic 0x%llx for thread %p, expected 0x%llx", \
1071 (thread)->thread_magic, (thread), THREAD_MAGIC)
1072#else
1073#define assert_thread_magic(thread) do { (void)(thread); } while (0)
1074#endif
1075
1076extern thread_t thread_bootstrap(void);
1077
1078extern void thread_machine_init_template(void);
1079
1080extern void thread_init(void);
1081
1082extern void thread_daemon_init(void);
1083
1084extern void thread_reference(
1085 thread_t thread);
1086
1087extern void thread_deallocate(
1088 thread_t thread);
1089
1090extern void thread_inspect_deallocate(
1091 thread_inspect_t thread);
1092
1093extern void thread_read_deallocate(
1094 thread_read_t thread);
1095
1096extern void thread_terminate_self(void);
1097
1098extern kern_return_t thread_terminate_internal(
1099 thread_t thread);
1100
1101extern void thread_start(
1102 thread_t thread) __attribute__ ((noinline));
1103
1104extern void thread_start_in_assert_wait(
1105 thread_t thread,
1106 struct waitq *waitq,
1107 event64_t event,
1108 wait_interrupt_t interruptible) __attribute__ ((noinline));
1109
1110extern void thread_terminate_enqueue(
1111 thread_t thread);
1112
1113extern void thread_exception_enqueue(
1114 task_t task,
1115 thread_t thread,
1116 exception_type_t etype);
1117
1118extern void thread_backtrace_enqueue(
1119 kcdata_object_t obj,
1120 exception_port_t ports[static BT_EXC_PORTS_COUNT],
1121 exception_type_t etype);
1122
1123extern void thread_copy_resource_info(
1124 thread_t dst_thread,
1125 thread_t src_thread);
1126
1127extern void thread_terminate_crashed_threads(void);
1128
1129extern void thread_stack_enqueue(
1130 thread_t thread);
1131
1132extern void thread_hold(
1133 thread_t thread);
1134
1135extern void thread_release(
1136 thread_t thread);
1137
1138extern void thread_corpse_continue(void) __dead2;
1139
1140extern boolean_t thread_is_active(thread_t thread);
1141
1142extern lck_grp_t thread_lck_grp;
1143
1144/* Locking for scheduler state, always acquired with interrupts disabled (splsched()) */
1145#define thread_lock_init(th) simple_lock_init(&(th)->sched_lock, 0)
1146#define thread_lock(th) simple_lock(&(th)->sched_lock, &thread_lck_grp)
1147#define thread_unlock(th) simple_unlock(&(th)->sched_lock)
1148#define thread_lock_assert(th, x) simple_lock_assert(&(th)->sched_lock, (x))
1149
1150#define wake_lock_init(th) simple_lock_init(&(th)->wake_lock, 0)
1151#define wake_lock(th) simple_lock(&(th)->wake_lock, &thread_lck_grp)
1152#define wake_unlock(th) simple_unlock(&(th)->wake_lock)
1153
1154#define thread_should_halt_fast(thread) (!(thread)->active)
1155
1156extern void stack_alloc(
1157 thread_t thread);
1158
1159extern void stack_handoff(
1160 thread_t from,
1161 thread_t to);
1162
1163extern void stack_free(
1164 thread_t thread);
1165
1166extern void stack_free_reserved(
1167 thread_t thread);
1168
1169extern boolean_t stack_alloc_try(
1170 thread_t thread);
1171
1172extern void stack_collect(void);
1173
1174extern kern_return_t thread_info_internal(
1175 thread_t thread,
1176 thread_flavor_t flavor,
1177 thread_info_t thread_info_out,
1178 mach_msg_type_number_t *thread_info_count);
1179
1180extern kern_return_t kernel_thread_create(
1181 thread_continue_t continuation,
1182 void *parameter,
1183 integer_t priority,
1184 thread_t *new_thread);
1185
1186extern kern_return_t kernel_thread_start_priority(
1187 thread_continue_t continuation,
1188 void *parameter,
1189 integer_t priority,
1190 thread_t *new_thread);
1191
1192extern void machine_stack_attach(
1193 thread_t thread,
1194 vm_offset_t stack);
1195
1196extern vm_offset_t machine_stack_detach(
1197 thread_t thread);
1198
1199extern void machine_stack_handoff(
1200 thread_t old,
1201 thread_t new);
1202
1203extern thread_t machine_switch_context(
1204 thread_t old_thread,
1205 thread_continue_t continuation,
1206 thread_t new_thread);
1207
1208extern void machine_load_context(
1209 thread_t thread) __attribute__((noreturn));
1210
1211extern void machine_thread_state_initialize(
1212 thread_t thread);
1213
1214extern kern_return_t machine_thread_set_state(
1215 thread_t thread,
1216 thread_flavor_t flavor,
1217 thread_state_t state,
1218 mach_msg_type_number_t count);
1219
1220extern mach_vm_address_t machine_thread_pc(
1221 thread_t thread);
1222
1223extern void machine_thread_reset_pc(
1224 thread_t thread,
1225 mach_vm_address_t pc);
1226
1227extern boolean_t machine_thread_on_core(
1228 thread_t thread);
1229
1230extern boolean_t machine_thread_on_core_allow_invalid(
1231 thread_t thread);
1232
1233extern kern_return_t machine_thread_get_state(
1234 thread_t thread,
1235 thread_flavor_t flavor,
1236 thread_state_t state,
1237 mach_msg_type_number_t *count);
1238
1239extern kern_return_t machine_thread_state_convert_from_user(
1240 thread_t thread,
1241 thread_flavor_t flavor,
1242 thread_state_t tstate,
1243 mach_msg_type_number_t count,
1244 thread_state_t old_tstate,
1245 mach_msg_type_number_t old_count,
1246 thread_set_status_flags_t tssf_flags);
1247
1248extern kern_return_t machine_thread_state_convert_to_user(
1249 thread_t thread,
1250 thread_flavor_t flavor,
1251 thread_state_t tstate,
1252 mach_msg_type_number_t *count,
1253 thread_set_status_flags_t tssf_flags);
1254
1255extern kern_return_t machine_thread_dup(
1256 thread_t self,
1257 thread_t target,
1258 boolean_t is_corpse);
1259
1260extern void machine_thread_init(void);
1261
1262extern void machine_thread_template_init(thread_t thr_template);
1263
1264#if __has_feature(ptrauth_calls)
1265extern bool machine_thread_state_is_debug_flavor(int flavor);
1266#endif /* __has_feature(ptrauth_calls) */
1267
1268
1269extern void machine_thread_create(
1270 thread_t thread,
1271 task_t task,
1272 bool first_thread);
1273
1274extern kern_return_t machine_thread_process_signature(
1275 thread_t thread,
1276 task_t task);
1277
1278extern void machine_thread_switch_addrmode(
1279 thread_t thread);
1280
1281extern void machine_thread_destroy(
1282 thread_t thread);
1283
1284extern void machine_set_current_thread(
1285 thread_t thread);
1286
1287extern kern_return_t machine_thread_get_kern_state(
1288 thread_t thread,
1289 thread_flavor_t flavor,
1290 thread_state_t tstate,
1291 mach_msg_type_number_t *count);
1292
1293extern kern_return_t machine_thread_inherit_taskwide(
1294 thread_t thread,
1295 task_t parent_task);
1296
1297extern kern_return_t machine_thread_set_tsd_base(
1298 thread_t thread,
1299 mach_vm_offset_t tsd_base);
1300
1301#define thread_mtx_try(thread) lck_mtx_try_lock(&(thread)->mutex)
1302#define thread_mtx_held(thread) lck_mtx_assert(&(thread)->mutex, LCK_MTX_ASSERT_OWNED)
1303
1304extern void thread_apc_ast(thread_t thread);
1305
1306extern void thread_update_qos_cpu_time(thread_t thread);
1307
1308void act_machine_sv_free(thread_t, int);
1309
1310vm_offset_t min_valid_stack_address(void);
1311vm_offset_t max_valid_stack_address(void);
1312
1313extern bool thread_no_smt(thread_t thread);
1314extern bool processor_active_thread_no_smt(processor_t processor);
1315
1316extern void thread_set_options(uint32_t thopt);
1317
1318#if CONFIG_THREAD_GROUPS
1319struct thread_group *thread_get_current_voucher_thread_group(thread_t thread);
1320#endif /* CONFIG_THREAD_GROUPS */
1321
1322#if CONFIG_COALITIONS
1323uint64_t thread_get_current_voucher_resource_coalition_id(thread_t thread);
1324#endif /* CONFIG_COALITIONS */
1325
1326#endif /* MACH_KERNEL_PRIVATE */
1327#if BSD_KERNEL_PRIVATE
1328
1329/* Duplicated from osfmk/kern/ipc_tt.h */
1330__options_decl(port_intrans_options_t, uint32_t, {
1331 PORT_INTRANS_OPTIONS_NONE = 0x0000,
1332 PORT_INTRANS_THREAD_IN_CURRENT_TASK = 0x0001,
1333 PORT_INTRANS_THREAD_NOT_CURRENT_THREAD = 0x0002,
1334
1335 PORT_INTRANS_SKIP_TASK_EVAL = 0x0004,
1336 PORT_INTRANS_ALLOW_CORPSE_TASK = 0x0008,
1337});
1338
1339extern thread_t port_name_to_thread(
1340 mach_port_name_t port_name,
1341 port_intrans_options_t options);
1342
1343#endif /* BSD_KERNEL_PRIVATE */
1344#ifdef XNU_KERNEL_PRIVATE
1345
1346extern void thread_require(
1347 thread_t thread);
1348
1349extern void thread_deallocate_safe(
1350 thread_t thread);
1351
1352extern uint64_t thread_rettokern_addr(
1353 thread_t thread);
1354
1355extern uint64_t thread_wqquantum_addr(
1356 thread_t thread);
1357
1358extern integer_t thread_kern_get_pri(thread_t thr) __pure2;
1359
1360extern void thread_kern_set_pri(thread_t thr, integer_t pri);
1361
1362extern integer_t thread_kern_get_kernel_maxpri(void) __pure2;
1363
1364uint16_t thread_set_tag(thread_t thread, uint16_t tag);
1365uint16_t thread_get_tag(thread_t thread);
1366
1367__options_decl(shared_rsrc_policy_agent_t, uint32_t, {
1368 SHARED_RSRC_POLICY_AGENT_DISPATCH = 0,
1369 SHARED_RSRC_POLICY_AGENT_SYSCTL = 1,
1370 SHARED_RSRC_POLICY_AGENT_PERFCTL_CSW = 2,
1371 SHARED_RSRC_POLICY_AGENT_PERFCTL_QUANTUM = 3,
1372});
1373
1374boolean_t thread_shared_rsrc_policy_get(thread_t thread, cluster_shared_rsrc_type_t type);
1375kern_return_t thread_shared_rsrc_policy_set(thread_t thread, uint32_t index, cluster_shared_rsrc_type_t type, shared_rsrc_policy_agent_t agent);
1376kern_return_t thread_shared_rsrc_policy_clear(thread_t thread, cluster_shared_rsrc_type_t type, shared_rsrc_policy_agent_t agent);
1377
1378#ifdef MACH_KERNEL_PRIVATE
1379static inline thread_tag_t
1380thread_set_tag_internal(thread_t thread, thread_tag_t tag)
1381{
1382 return os_atomic_or_orig(&thread->thread_tag, tag, relaxed);
1383}
1384
1385static inline thread_tag_t
1386thread_get_tag_internal(thread_t thread)
1387{
1388 return thread->thread_tag;
1389}
1390#endif /* MACH_KERNEL_PRIVATE */
1391
1392uint64_t thread_last_run_time(thread_t thread);
1393
1394extern kern_return_t thread_state_initialize(
1395 thread_t thread);
1396
1397extern kern_return_t thread_setstatus(
1398 thread_t thread,
1399 int flavor,
1400 thread_state_t tstate,
1401 mach_msg_type_number_t count);
1402
1403extern kern_return_t thread_setstatus_from_user(
1404 thread_t thread,
1405 int flavor,
1406 thread_state_t tstate,
1407 mach_msg_type_number_t count,
1408 thread_state_t old_tstate,
1409 mach_msg_type_number_t old_count,
1410 thread_set_status_flags_t flags);
1411
1412extern kern_return_t thread_getstatus(
1413 thread_t thread,
1414 int flavor,
1415 thread_state_t tstate,
1416 mach_msg_type_number_t *count);
1417
1418extern void main_thread_set_immovable_pinned(thread_t thread);
1419
1420extern kern_return_t thread_getstatus_to_user(
1421 thread_t thread,
1422 int flavor,
1423 thread_state_t tstate,
1424 mach_msg_type_number_t *count,
1425 thread_set_status_flags_t flags);
1426
1427extern kern_return_t thread_create_with_continuation(
1428 task_t task,
1429 thread_t *new_thread,
1430 thread_continue_t continuation);
1431
1432extern kern_return_t main_thread_create_waiting(task_t task,
1433 thread_continue_t continuation,
1434 event_t event,
1435 thread_t *new_thread);
1436
1437extern kern_return_t thread_create_workq_waiting(
1438 task_t task,
1439 thread_continue_t thread_return,
1440 thread_t *new_thread);
1441
1442extern void thread_yield_internal(
1443 mach_msg_timeout_t interval);
1444
1445extern void thread_yield_to_preemption(void);
1446
1447extern void thread_depress_timer_setup(thread_t self);
1448
1449/*
1450 * Thread-private CPU limits: apply a private CPU limit to this thread only. Available actions are:
1451 *
1452 * 1) Block. Prevent CPU consumption of the thread from exceeding the limit.
1453 * 2) Exception. Generate a resource consumption exception when the limit is exceeded.
1454 * 3) Disable. Remove any existing CPU limit.
1455 */
1456#define THREAD_CPULIMIT_BLOCK 0x1
1457#define THREAD_CPULIMIT_EXCEPTION 0x2
1458#define THREAD_CPULIMIT_DISABLE 0x3
1459
1460struct _thread_ledger_indices {
1461 int cpu_time;
1462};
1463
1464extern struct _thread_ledger_indices thread_ledgers;
1465
1466extern int thread_get_cpulimit(int *action, uint8_t *percentage, uint64_t *interval_ns);
1467extern int thread_set_cpulimit(int action, uint8_t percentage, uint64_t interval_ns);
1468
1469extern uint64_t thread_cpulimit_remaining(uint64_t now);
1470extern bool thread_cpulimit_interval_has_expired(uint64_t now);
1471extern void thread_cpulimit_restart(uint64_t now);
1472
1473extern void thread_read_times(
1474 thread_t thread,
1475 time_value_t *user_time,
1476 time_value_t *system_time,
1477 time_value_t *runnable_time);
1478
1479extern void thread_read_times_unsafe(
1480 thread_t thread,
1481 time_value_t *user_time,
1482 time_value_t *system_time,
1483 time_value_t *runnable_time);
1484
1485extern uint64_t thread_get_runtime_self(void);
1486
1487extern void thread_setuserstack(
1488 thread_t thread,
1489 mach_vm_offset_t user_stack);
1490
1491extern user_addr_t thread_adjuserstack(
1492 thread_t thread,
1493 int adjust);
1494
1495
1496extern void thread_setentrypoint(
1497 thread_t thread,
1498 mach_vm_offset_t entry);
1499
1500extern kern_return_t thread_set_tsd_base(
1501 thread_t thread,
1502 mach_vm_offset_t tsd_base);
1503
1504extern kern_return_t thread_setsinglestep(
1505 thread_t thread,
1506 int on);
1507
1508extern kern_return_t thread_userstack(
1509 thread_t,
1510 int,
1511 thread_state_t,
1512 unsigned int,
1513 mach_vm_offset_t *,
1514 int *,
1515 boolean_t);
1516
1517extern kern_return_t thread_entrypoint(
1518 thread_t,
1519 int,
1520 thread_state_t,
1521 unsigned int,
1522 mach_vm_offset_t *);
1523
1524extern kern_return_t thread_userstackdefault(
1525 mach_vm_offset_t *,
1526 boolean_t);
1527
1528extern kern_return_t thread_wire_internal(
1529 host_priv_t host_priv,
1530 thread_t thread,
1531 boolean_t wired,
1532 boolean_t *prev_state);
1533
1534
1535extern kern_return_t thread_dup(thread_t);
1536
1537extern kern_return_t thread_dup2(thread_t, thread_t);
1538
1539#if !defined(_SCHED_CALL_T_DEFINED)
1540#define _SCHED_CALL_T_DEFINED
1541typedef void (*sched_call_t)(
1542 int type,
1543 thread_t thread);
1544#endif
1545
1546#define SCHED_CALL_BLOCK 0x1
1547#define SCHED_CALL_UNBLOCK 0x2
1548
1549extern void thread_sched_call(
1550 thread_t thread,
1551 sched_call_t call);
1552
1553extern boolean_t thread_is_static_param(
1554 thread_t thread);
1555
1556extern task_t get_threadtask(thread_t) __pure2;
1557
1558extern task_t get_threadtask_early(thread_t) __pure2;
1559
1560/*
1561 * Thread is running within a 64-bit address space.
1562 */
1563#define thread_is_64bit_addr(thd) \
1564 task_has_64Bit_addr(get_threadtask(thd))
1565
1566/*
1567 * Thread is using 64-bit machine state.
1568 */
1569#define thread_is_64bit_data(thd) \
1570 task_has_64Bit_data(get_threadtask(thd))
1571
1572struct uthread;
1573
1574#if defined(__x86_64__)
1575extern int thread_task_has_ldt(thread_t);
1576#endif
1577extern void set_thread_pagein_error(thread_t, int);
1578extern event_t workq_thread_init_and_wq_lock(task_t, thread_t); // bsd/pthread/
1579
1580struct proc;
1581struct uthread;
1582struct image_params;
1583extern const size_t uthread_size;
1584extern thread_ro_t get_thread_ro_unchecked(thread_t) __pure2;
1585extern thread_ro_t get_thread_ro(thread_t) __pure2;
1586extern thread_ro_t current_thread_ro_unchecked(void) __pure2;
1587extern thread_ro_t current_thread_ro(void) __pure2;
1588extern void clear_thread_ro_proc(thread_t);
1589extern struct uthread *get_bsdthread_info(thread_t) __pure2;
1590extern thread_t get_machthread(struct uthread *) __pure2;
1591extern uint64_t uthread_tid(struct uthread *) __pure2;
1592extern user_addr_t thread_get_sigreturn_token(thread_t thread);
1593extern uint32_t thread_get_sigreturn_diversifier(thread_t thread);
1594extern void uthread_init(task_t, struct uthread *, thread_ro_t, int);
1595extern void uthread_cleanup_name(struct uthread *uthread);
1596extern void uthread_cleanup(struct uthread *, thread_ro_t);
1597extern void uthread_cred_ref(struct ucred *);
1598extern void uthread_cred_free(struct ucred *);
1599extern void uthread_destroy(struct uthread *);
1600extern void uthread_reset_proc_refcount(struct uthread *);
1601
1602extern void uthread_set_exec_data(struct uthread *uth, struct image_params *imgp);
1603extern bool uthread_is64bit(struct uthread *uth) __pure2;
1604#if PROC_REF_DEBUG
1605extern void uthread_init_proc_refcount(struct uthread *);
1606extern void uthread_destroy_proc_refcount(struct uthread *);
1607extern void uthread_assert_zero_proc_refcount(struct uthread *);
1608#else
1609#define uthread_init_proc_refcount(uth) ((void)(uth))
1610#define uthread_destroy_proc_refcount(uth) ((void)(uth))
1611#define uthread_assert_zero_proc_refcount(uth) ((void)(uth))
1612#endif
1613#if CONFIG_DEBUG_SYSCALL_REJECTION
1614extern uint64_t uthread_get_syscall_rejection_flags(void *);
1615extern uint64_t *uthread_get_syscall_rejection_mask(void *);
1616extern uint64_t *uthread_get_syscall_rejection_once_mask(void *);
1617extern bool uthread_syscall_rejection_is_enabled(void *);
1618#endif /* CONFIG_DEBUG_SYSCALL_REJECTION */
1619extern mach_port_name_t uthread_joiner_port(struct uthread *);
1620extern user_addr_t uthread_joiner_address(struct uthread *);
1621extern void uthread_joiner_wake(task_t task, struct uthread *);
1622
1623extern boolean_t thread_should_halt(
1624 thread_t thread);
1625
1626extern boolean_t thread_should_abort(
1627 thread_t);
1628
1629extern int is_64signalregset(void);
1630
1631extern void act_set_kperf(thread_t);
1632extern void act_set_astledger(thread_t thread);
1633extern void act_set_astledger_async(thread_t thread);
1634extern void act_set_io_telemetry_ast(thread_t);
1635extern void act_set_macf_telemetry_ast(thread_t);
1636extern void act_set_astproc_resource(thread_t);
1637
1638extern vm_offset_t thread_get_kernel_stack(thread_t);
1639
1640extern kern_return_t thread_process_signature(thread_t thread, task_t task);
1641
1642extern uint32_t dtrace_get_thread_predcache(thread_t);
1643extern int64_t dtrace_get_thread_vtime(thread_t);
1644extern int64_t dtrace_get_thread_tracing(thread_t);
1645extern uint16_t dtrace_get_thread_inprobe(thread_t);
1646extern int dtrace_get_thread_last_cpu_id(thread_t);
1647extern vm_offset_t dtrace_get_kernel_stack(thread_t);
1648#define dtrace_get_kernel_stack thread_get_kernel_stack
1649extern void dtrace_set_thread_predcache(thread_t, uint32_t);
1650extern void dtrace_set_thread_vtime(thread_t, int64_t);
1651extern void dtrace_set_thread_tracing(thread_t, int64_t);
1652extern void dtrace_set_thread_inprobe(thread_t, uint16_t);
1653extern void dtrace_thread_bootstrap(void);
1654extern void dtrace_thread_didexec(thread_t);
1655
1656extern int64_t dtrace_calc_thread_recent_vtime(thread_t);
1657
1658
1659extern kern_return_t thread_set_wq_state32(
1660 thread_t thread,
1661 thread_state_t tstate);
1662
1663extern kern_return_t thread_set_wq_state64(
1664 thread_t thread,
1665 thread_state_t tstate);
1666
1667extern vm_offset_t kernel_stack_mask;
1668extern vm_offset_t kernel_stack_size;
1669extern vm_offset_t kernel_stack_depth_max;
1670
1671extern void guard_ast(thread_t);
1672extern void fd_guard_ast(thread_t,
1673 mach_exception_code_t, mach_exception_subcode_t);
1674#if CONFIG_VNGUARD
1675extern void vn_guard_ast(thread_t,
1676 mach_exception_code_t, mach_exception_subcode_t);
1677#endif
1678extern void mach_port_guard_ast(thread_t,
1679 mach_exception_code_t, mach_exception_subcode_t);
1680extern void virt_memory_guard_ast(thread_t,
1681 mach_exception_code_t, mach_exception_subcode_t);
1682extern void thread_guard_violation(thread_t,
1683 mach_exception_code_t, mach_exception_subcode_t, boolean_t);
1684extern void thread_update_io_stats(thread_t, int size, int io_flags);
1685
1686extern kern_return_t thread_set_voucher_name(mach_port_name_t name);
1687extern kern_return_t thread_get_voucher_origin_pid(thread_t thread, int32_t *pid);
1688extern kern_return_t thread_get_voucher_origin_proximate_pid(thread_t thread,
1689 int32_t *origin_pid, int32_t *proximate_pid);
1690extern kern_return_t thread_get_current_voucher_origin_pid(int32_t *pid);
1691
1692extern void thread_enable_send_importance(thread_t thread, boolean_t enable);
1693
1694/*
1695 * Translate signal context data pointer to userspace representation
1696 */
1697
1698extern kern_return_t machine_thread_siguctx_pointer_convert_to_user(
1699 thread_t thread,
1700 user_addr_t *uctxp);
1701
1702extern void machine_tecs(thread_t thr);
1703
1704typedef enum cpuvn {
1705 CPUVN_CI = 1
1706} cpuvn_e;
1707
1708extern int machine_csv(cpuvn_e cve);
1709#if defined(__x86_64__)
1710extern void machine_thread_set_insn_copy_optout(thread_t thr);
1711#endif
1712
1713/*
1714 * Translate array of function pointer syscall arguments from userspace representation
1715 */
1716
1717extern kern_return_t machine_thread_function_pointers_convert_from_user(
1718 thread_t thread,
1719 user_addr_t *fptrs,
1720 uint32_t count);
1721
1722/*
1723 * Get the duration of the given thread's last wait.
1724 */
1725uint64_t thread_get_last_wait_duration(thread_t thread);
1726
1727extern bool thread_get_no_smt(void);
1728#if defined(__x86_64__)
1729extern bool curtask_get_insn_copy_optout(void);
1730extern void curtask_set_insn_copy_optout(void);
1731#endif /* defined(__x86_64__) */
1732
1733/*! @function ctid_get_thread
1734 * @abstract translates a ctid_t to thread_t
1735 * @discussion ctid are system wide compact thread-id
1736 * associated to thread_t at thread creation
1737 * and recycled at thread termination. If a ctid is
1738 * referenced past the corresponding thread termination,
1739 * it is considered stale, and the behavior is not defined.
1740 * Note that this call does not acquire a reference on the thread,
1741 * so as soon as the matching thread terminates, the ctid
1742 * will become stale, and it could be re-used and associated with
1743 * another thread. You must externally guarantee that the thread
1744 * will not exit while you are using its ctid.
1745 * @result thread_t corresponding to ctid
1746 */
1747extern thread_t ctid_get_thread(ctid_t ctid);
1748
1749/*! @function ctid_get_thread
1750 * @abstract translates a ctid_t to thread_t
1751 * @discussion Unsafe variant of ctid_get_thread() to be used
1752 * when the caller can't guarantee the liveness of this ctid_t.
1753 * may return NULL or a freed thread_t.
1754 */
1755extern thread_t ctid_get_thread_unsafe(ctid_t ctid);
1756
1757/*!
1758 * @function thread_get_ctid
1759 * @abstract returns the ctid of thread.
1760 * @param thread to find the corresponding ctid.
1761 * @discussion the ctid provided will become stale after the matching thread
1762 * terminates.
1763 * @result uint32_t ctid.
1764 */
1765extern ctid_t thread_get_ctid(thread_t thread);
1766
1767#endif /* XNU_KERNEL_PRIVATE */
1768#ifdef KERNEL_PRIVATE
1769
1770typedef struct thread_pri_floor {
1771 thread_t thread;
1772} thread_pri_floor_t;
1773
1774#ifdef MACH_KERNEL_PRIVATE
1775extern void thread_floor_boost_ast(thread_t thread);
1776extern void thread_floor_boost_set_promotion_locked(thread_t thread);
1777#endif /* MACH_KERNEL_PRIVATE */
1778
1779/*! @function thread_priority_floor_start
1780 * @abstract boost the current thread priority to floor.
1781 * @discussion Increase the priority of the current thread to at least MINPRI_FLOOR.
1782 * The boost will be mantained until a corresponding thread_priority_floor_end()
1783 * is called. Every call of thread_priority_floor_start() needs to have a corresponding
1784 * call to thread_priority_floor_end() from the same thread.
1785 * No thread can return to userspace before calling thread_priority_floor_end().
1786 *
1787 * NOTE: avoid to use this function. Try to use gate_t or sleep_with_inheritor()
1788 * instead.
1789 * @result a token to be given to the corresponding thread_priority_floor_end()
1790 */
1791extern thread_pri_floor_t thread_priority_floor_start(void);
1792/*! @function thread_priority_floor_end
1793 * @abstract ends the floor boost.
1794 * @param token the token obtained from thread_priority_floor_start()
1795 * @discussion ends the priority floor boost started with thread_priority_floor_start()
1796 */
1797extern void thread_priority_floor_end(thread_pri_floor_t *token);
1798
1799extern void thread_set_no_smt(bool set);
1800
1801extern void thread_mtx_lock(thread_t thread);
1802
1803extern void thread_mtx_unlock(thread_t thread);
1804
1805extern uint64_t thread_dispatchqaddr(
1806 thread_t thread);
1807
1808bool thread_is_eager_preempt(thread_t thread);
1809void thread_set_eager_preempt(thread_t thread);
1810void thread_clear_eager_preempt(thread_t thread);
1811void thread_set_honor_qlimit(thread_t thread);
1812void thread_clear_honor_qlimit(thread_t thread);
1813extern ipc_port_t convert_thread_to_port(thread_t);
1814extern ipc_port_t convert_thread_to_port_pinned(thread_t);
1815extern ipc_port_t convert_thread_inspect_to_port(thread_inspect_t);
1816extern ipc_port_t convert_thread_read_to_port(thread_read_t);
1817extern boolean_t is_external_pageout_thread(void);
1818extern boolean_t is_vm_privileged(void);
1819extern boolean_t set_vm_privilege(boolean_t);
1820extern kern_allocation_name_t thread_set_allocation_name(kern_allocation_name_t new_name);
1821extern void *thread_iokit_tls_get(uint32_t index);
1822extern void thread_iokit_tls_set(uint32_t index, void * data);
1823extern int thread_self_region_page_shift(void);
1824extern void thread_self_region_page_shift_set(int pgshift);
1825extern kern_return_t thread_create_immovable(task_t task, thread_t *new_thread);
1826extern kern_return_t thread_terminate_pinned(thread_t thread);
1827
1828struct thread_attr_for_ipc_propagation;
1829extern kern_return_t thread_get_ipc_propagate_attr(thread_t thread, struct thread_attr_for_ipc_propagation *attr);
1830extern size_t thread_get_current_exec_path(char *path, size_t size);
1831#endif /* KERNEL_PRIVATE */
1832#ifdef XNU_KERNEL_PRIVATE
1833
1834extern void
1835thread_get_thread_name(thread_t th, char* name);
1836
1837/* Read the runq assignment, under the thread lock. */
1838extern processor_t thread_get_runq(thread_t thread);
1839
1840/*
1841 * Read the runq assignment, under both the thread lock and
1842 * the pset lock corresponding to the last non-null assignment.
1843 */
1844extern processor_t thread_get_runq_locked(thread_t thread);
1845
1846/*
1847 * Set the runq assignment to a non-null value, under both the
1848 * thread lock and the pset lock corresponding to the new
1849 * assignment.
1850 */
1851extern void thread_set_runq_locked(thread_t thread, processor_t new_runq);
1852
1853/*
1854 * Set the runq assignment to PROCESSOR_NULL, under the pset
1855 * lock corresponding to the current non-null assignment.
1856 */
1857extern void thread_clear_runq(thread_t thread);
1858
1859/*
1860 * Set the runq assignment to PROCESSOR_NULL, under both the
1861 * thread lock and the pset lock corresponding to the current
1862 * non-null assignment.
1863 */
1864extern void thread_clear_runq_locked(thread_t thread);
1865
1866/*
1867 * Assert the runq assignment to be PROCESSOR_NULL, under
1868 * some guarantee that the runq will not change from null to
1869 * non-null, such as holding the thread lock.
1870 */
1871extern void thread_assert_runq_null(thread_t thread);
1872
1873/*
1874 * Assert the runq assignment to be non-null, under the pset
1875 * lock corresponding to the current non-null assignment.
1876 */
1877extern void thread_assert_runq_nonnull(thread_t thread);
1878
1879extern bool thread_supports_cooperative_workqueue(thread_t thread);
1880extern void thread_arm_workqueue_quantum(thread_t thread);
1881extern void thread_disarm_workqueue_quantum(thread_t thread);
1882
1883extern void thread_evaluate_workqueue_quantum_expiry(thread_t thread);
1884extern bool thread_has_expired_workqueue_quantum(thread_t thread, bool should_trace);
1885
1886#if CONFIG_SPTM
1887
1888extern void
1889thread_associate_txm_thread_stack(uintptr_t thread_stack);
1890
1891extern void
1892thread_disassociate_txm_thread_stack(uintptr_t thread_stack);
1893
1894extern uintptr_t
1895thread_get_txm_thread_stack(void);
1896
1897#endif /* CONFIG_SPTM */
1898
1899/* Kernel side prototypes for MIG routines */
1900extern kern_return_t thread_get_exception_ports(
1901 thread_t thread,
1902 exception_mask_t exception_mask,
1903 exception_mask_array_t masks,
1904 mach_msg_type_number_t *CountCnt,
1905 exception_port_array_t ports,
1906 exception_behavior_array_t behaviors,
1907 thread_state_flavor_array_t flavors);
1908
1909extern kern_return_t thread_get_special_port(
1910 thread_inspect_t thread,
1911 int which,
1912 ipc_port_t *portp);
1913
1914#endif /* XNU_KERNEL_PRIVATE */
1915
1916/*! @function thread_has_thread_name
1917 * @abstract Checks if a thread has a name.
1918 * @discussion This function takes one input, a thread, and returns
1919 * a boolean value indicating if that thread already has a name associated
1920 * with it.
1921 * @param th The thread to inspect.
1922 * @result TRUE if the thread has a name, FALSE otherwise.
1923 */
1924extern boolean_t thread_has_thread_name(thread_t th);
1925
1926/*! @function thread_set_thread_name
1927 * @abstract Set a thread's name.
1928 * @discussion This function takes two input parameters: a thread to name,
1929 * and the name to apply to the thread. The name will be copied over to
1930 * the thread in order to better identify the thread. If the name is
1931 * longer than MAXTHREADNAMESIZE - 1, it will be truncated.
1932 * @param th The thread to be named.
1933 * @param name The name to apply to the thread.
1934 */
1935extern void thread_set_thread_name(thread_t th, const char* name);
1936
1937#if !MACH_KERNEL_PRIVATE || !defined(current_thread)
1938extern thread_t current_thread(void) __pure2;
1939#endif
1940
1941extern uint64_t thread_tid(thread_t thread) __pure2;
1942
1943extern void thread_reference(
1944 thread_t thread);
1945
1946extern void thread_deallocate(
1947 thread_t thread);
1948
1949/*! @function kernel_thread_start
1950 * @abstract Create a kernel thread.
1951 * @discussion This function takes three input parameters, namely reference
1952 * to the function that the thread should execute, caller specified data
1953 * and a reference which is used to return the newly created kernel
1954 * thread. The function returns KERN_SUCCESS on success or an appropriate
1955 * kernel code type indicating the error. It may be noted that the caller
1956 * is responsible for explicitly releasing the reference to the created
1957 * thread when no longer needed. This should be done by calling
1958 * thread_deallocate(new_thread).
1959 * @param continuation A C-function pointer where the thread will begin execution.
1960 * @param parameter Caller specified data to be passed to the new thread.
1961 * @param new_thread Reference to the new thread is returned in this parameter.
1962 * @result Returns KERN_SUCCESS on success or an appropriate kernel code type.
1963 */
1964
1965extern kern_return_t kernel_thread_start(
1966 thread_continue_t continuation,
1967 void *parameter,
1968 thread_t *new_thread);
1969
1970__END_DECLS
1971
1972#endif /* _KERN_THREAD_H_ */
1973