1/*
2 * Copyright (c) 2000-2007 Apple Inc. All rights reserved.
3 *
4 * @APPLE_OSREFERENCE_LICENSE_HEADER_START@
5 *
6 * This file contains Original Code and/or Modifications of Original Code
7 * as defined in and that are subject to the Apple Public Source License
8 * Version 2.0 (the 'License'). You may not use this file except in
9 * compliance with the License. The rights granted to you under the License
10 * may not be used to create, or enable the creation or redistribution of,
11 * unlawful or unlicensed copies of an Apple operating system, or to
12 * circumvent, violate, or enable the circumvention or violation of, any
13 * terms of an Apple operating system software license agreement.
14 *
15 * Please obtain a copy of the License at
16 * http://www.opensource.apple.com/apsl/ and read it before using this file.
17 *
18 * The Original Code and all software distributed under the License are
19 * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER
20 * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES,
21 * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY,
22 * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT.
23 * Please see the License for the specific language governing rights and
24 * limitations under the License.
25 *
26 * @APPLE_OSREFERENCE_LICENSE_HEADER_END@
27 */
28/*
29 * @OSF_COPYRIGHT@
30 */
31
32#ifndef _KERN_KERN_TYPES_H_
33#define _KERN_KERN_TYPES_H_
34
35#include <stdint.h>
36#include <mach/mach_types.h>
37#include <mach/machine/vm_types.h>
38
39#ifdef KERNEL_PRIVATE
40
41#ifndef MACH_KERNEL_PRIVATE
42
43struct zone;
44
45#ifndef __LP64__
46struct wait_queue { unsigned int opaque[2]; uintptr_t opaquep[2]; };
47#else
48struct wait_queue { unsigned char opaque[32]; };
49#endif
50
51#endif /* MACH_KERNEL_PRIVATE */
52
53typedef struct zone *zone_t;
54#define ZONE_NULL ((zone_t) 0)
55
56typedef struct wait_queue *wait_queue_t;
57#define WAIT_QUEUE_NULL ((wait_queue_t) 0)
58#define SIZEOF_WAITQUEUE sizeof(struct wait_queue)
59
60typedef void * ipc_kobject_t;
61#define IKO_NULL ((ipc_kobject_t) 0)
62
63#endif /* KERNEL_PRIVATE */
64
65typedef void *event_t; /* wait event */
66#define NO_EVENT ((event_t) 0)
67
68/*
69 * Events are used to selectively wake up threads waiting
70 * on a specified wait queue.
71 *
72 * The NO_EVENT64 value is a special event that is used
73 * on wait queues that can be members of wait queue sets
74 * for waits/wakeups that need to prepost to the set.
75 *
76 * This event must be "unique" and it is customary to use
77 * a pointer to memory related to the event.
78 */
79typedef uint64_t event64_t; /* 64 bit wait event */
80#define NO_EVENT64 ((event64_t) 0)
81#define CAST_EVENT64_T(a_ptr) ((event64_t)((uintptr_t)(a_ptr)))
82
83/*
84 * Possible wait_result_t values.
85 */
86typedef int wait_result_t;
87#define THREAD_WAITING -1 /* thread is waiting */
88#define THREAD_AWAKENED 0 /* normal wakeup */
89#define THREAD_TIMED_OUT 1 /* timeout expired */
90#define THREAD_INTERRUPTED 2 /* aborted/interrupted */
91#define THREAD_RESTART 3 /* restart operation entirely */
92#define THREAD_NOT_WAITING 10 /* thread didn't need to wait */
93
94typedef void (*thread_continue_t)(void *, wait_result_t);
95#define THREAD_CONTINUE_NULL ((thread_continue_t) NULL)
96
97/*
98 * Interruptible flag for waits.
99 *
100 * THREAD_UNINT: Uninterruptible wait
101 * Wait will only end when someone explicitly wakes up the thread, or if the
102 * wait timeout expires.
103 *
104 * Use this state if the system as a whole cannot recover from a thread being
105 * interrupted out of the wait.
106 *
107 * THREAD_INTERRUPTIBLE:
108 * Wait will end if someone explicitly wakes up the thread, the wait timeout
109 * expires, or the current thread is being terminated.
110 *
111 * This value can be used when your operation may not be cleanly restartable
112 * for the current process or thread (i.e. the loss of state would be only visible
113 * to the current client). Since the thread is exiting anyways, you're willing
114 * to cut the operation short. The system as a whole must be able to cleanly
115 * deal with the interruption (i.e. remain in a consistent and recoverable state).
116 *
117 * THREAD_ABORTSAFE:
118 * Wait will end if someone explicitly wakes up the thread, the wait timeout
119 * expires, the current thread is being terminated, if any signal arrives for
120 * the task, or thread_abort_safely() is called on the thread.
121 *
122 * Using this value means that you are willing to be interrupted in the face
123 * of any user signal, and safely rewind the thread back to the user/kernel
124 * boundary. Many syscalls will try to restart the operation they were performing
125 * after the signal has been handled.
126 *
127 * You must provide this value for any unbounded wait - otherwise you will
128 * pend user signals forever.
129 *
130 * THREAD_WAIT_NOREPORT:
131 * The scheduler has a callback (sched_call) that some subsystems use to
132 * decide whether more threads should be thrown at a given problem by trying
133 * to maintain a good level of concurrency.
134 *
135 * When the wait will not be helped by adding more threads (e.g. lock
136 * contention), using this flag as an argument to assert_wait* (or any of its
137 * wrappers) will prevent the next wait/block to cause thread creation.
138 *
139 * This comes in two flavors: THREAD_WAIT_NOREPORT_KERNEL, and
140 * THREAD_WAIT_NOREPORT_USER to prevent reporting about the wait for kernel
141 * and user threads respectively.
142 *
143 * Thread interrupt mask:
144 *
145 * The current maximum interruptible state for the thread, as set by
146 * thread_interrupt_level(), will limit the conditions that will cause a wake.
147 * This is useful for code that can't be interrupted to set before calling code
148 * that doesn't know that.
149 *
150 * Thread termination vs safe abort:
151 *
152 * Termination abort: thread_abort(), thread_terminate()
153 *
154 * A termination abort is sticky. Once a thread is marked for termination, every
155 * THREAD_INTERRUPTIBLE wait will return immediately with THREAD_INTERRUPTED
156 * until the thread successfully exits.
157 *
158 * Safe abort: thread_abort_safely()
159 *
160 * A safe abort is not sticky. The current wait, (or the next wait if the thread
161 * is not currently waiting) will be interrupted, but then the abort condition is cleared.
162 * The next wait will sleep as normal. Safe aborts only have a single effect.
163 *
164 * The path back to the user/kernel boundary must not make any further unbounded
165 * wait calls. The waiter should detect the THREAD_INTERRUPTED return code
166 * from an ABORTSAFE wait and return an error code that causes its caller
167 * to understand that the current operation has been interrupted, and its
168 * caller should return a similar error code, and so on until the
169 * user/kernel boundary is reached. For Mach, the error code is usually KERN_ABORTED,
170 * for BSD it is EINTR.
171 *
172 * Debuggers rely on the safe abort mechanism - a signaled thread must return to
173 * the AST at the user/kernel boundary for the debugger to finish attaching.
174 *
175 * No wait/block will ever disappear a thread out from under the waiter. The block
176 * call will always either return or call the passed in continuation.
177 */
178typedef int wait_interrupt_t;
179#define THREAD_UNINT 0x00000000 /* not interruptible */
180#define THREAD_INTERRUPTIBLE 0x00000001 /* may not be restartable */
181#define THREAD_ABORTSAFE 0x00000002 /* abortable safely */
182#define THREAD_WAIT_NOREPORT_KERNEL 0x80000000
183#define THREAD_WAIT_NOREPORT_USER 0x40000000
184#define THREAD_WAIT_NOREPORT (THREAD_WAIT_NOREPORT_KERNEL | THREAD_WAIT_NOREPORT_USER)
185
186typedef int wait_timeout_urgency_t;
187#define TIMEOUT_URGENCY_SYS_NORMAL 0x00 /* use default leeway thresholds for system */
188#define TIMEOUT_URGENCY_SYS_CRITICAL 0x01 /* use critical leeway thresholds for system */
189#define TIMEOUT_URGENCY_SYS_BACKGROUND 0x02 /* use background leeway thresholds for system */
190
191#define TIMEOUT_URGENCY_USER_MASK 0x10 /* mask to identify user timeout urgency classes */
192#define TIMEOUT_URGENCY_USER_NORMAL 0x10 /* use default leeway thresholds for user */
193#define TIMEOUT_URGENCY_USER_CRITICAL 0x11 /* use critical leeway thresholds for user */
194#define TIMEOUT_URGENCY_USER_BACKGROUND 0x12 /* use background leeway thresholds for user */
195
196#define TIMEOUT_URGENCY_MASK 0x13 /* mask to identify timeout urgency */
197
198#define TIMEOUT_URGENCY_LEEWAY 0x20 /* don't ignore provided leeway value */
199
200#define TIMEOUT_URGENCY_FIRST_AVAIL 0x40 /* first available bit outside of urgency mask/leeway */
201#define TIMEOUT_URGENCY_RATELIMITED 0x80
202
203/*
204 * Timeout and deadline tokens for waits.
205 * The following tokens define common values for leeway and deadline parameters.
206 */
207#define TIMEOUT_NO_LEEWAY (0ULL)
208#define TIMEOUT_WAIT_FOREVER (0ULL)
209
210#ifdef KERNEL_PRIVATE
211
212/*
213 * n.b. this is defined in thread_call.h, but in the TIMEOUT_URGENCY flags space:
214 * #define THREAD_CALL_CONTINUOUS 0x100
215 */
216
217#ifdef MACH_KERNEL_PRIVATE
218
219#include <kern/misc_protos.h>
220typedef struct clock *clock_t;
221
222typedef struct mig_object *mig_object_t;
223#define MIG_OBJECT_NULL ((mig_object_t) 0)
224
225typedef struct mig_notify *mig_notify_t;
226#define MIG_NOTIFY_NULL ((mig_notify_t) 0)
227
228typedef struct pset_node *pset_node_t;
229#define PSET_NODE_NULL ((pset_node_t) 0)
230
231typedef struct affinity_set *affinity_set_t;
232#define AFFINITY_SET_NULL ((affinity_set_t) 0)
233
234typedef struct run_queue *run_queue_t;
235#define RUN_QUEUE_NULL ((run_queue_t) 0)
236
237typedef struct grrr_run_queue *grrr_run_queue_t;
238#define GRRR_RUN_QUEUE_NULL ((grrr_run_queue_t) 0)
239
240typedef struct grrr_group *grrr_group_t;
241#define GRRR_GROUP_NULL ((grrr_group_t) 0)
242
243#if defined(CONFIG_SCHED_MULTIQ)
244typedef struct sched_group *sched_group_t;
245#define SCHED_GROUP_NULL ((sched_group_t) 0)
246#endif /* defined(CONFIG_SCHED_MULTIQ) */
247
248#else /* MACH_KERNEL_PRIVATE */
249
250struct wait_queue_set;
251struct _wait_queue_link;
252
253#endif /* MACH_KERNEL_PRIVATE */
254
255typedef struct wait_queue_set *wait_queue_set_t;
256#define WAIT_QUEUE_SET_NULL ((wait_queue_set_t)0)
257#define SIZEOF_WAITQUEUE_SET wait_queue_set_size()
258
259typedef struct _wait_queue_link *wait_queue_link_t;
260#define WAIT_QUEUE_LINK_NULL ((wait_queue_link_t)0)
261#define SIZEOF_WAITQUEUE_LINK wait_queue_link_size()
262
263typedef struct perfcontrol_state *perfcontrol_state_t;
264#define PERFCONTROL_STATE_NULL ((perfcontrol_state_t)0)
265
266/*
267 * Enum to define the event which caused the CLPC callout
268 */
269typedef enum perfcontrol_event {
270 /*
271 * Thread State Update Events
272 * Used to indicate events that update properties for
273 * a given thread. These events are passed as part of the
274 * sched_perfcontrol_state_update_t callout
275 */
276 QUANTUM_EXPIRY = 1,
277 THREAD_GROUP_UPDATE = 2,
278 PERFCONTROL_ATTR_UPDATE = 3,
279 /*
280 * Context Switch Events
281 * Used to indicate events that switch from one thread
282 * to the other. These events are passed as part of the
283 * sched_perfcontrol_csw_t callout.
284 */
285 CONTEXT_SWITCH = 10,
286 IDLE = 11
287} perfcontrol_event;
288
289/*
290 * Flags for the sched_perfcontrol_csw_t, sched_perfcontrol_state_update_t
291 * & sched_perfcontrol_thread_group_blocked_t/sched_perfcontrol_thread_group_unblocked_t
292 * callouts.
293 * Currently defined flags are:
294 *
295 * PERFCONTROL_CALLOUT_WAKE_UNSAFE: Flag to indicate its unsafe to
296 * do a wakeup as part of this callout. If this is set, it
297 * indicates that the scheduler holds a spinlock which might be needed
298 * in the wakeup path. In that case CLPC should do a thread_call
299 * instead of a direct wakeup to run their workloop thread.
300 *
301 * PERFCONTROL_CALLOUT_BLOCKING_TG_RENDER_SERVER: Flag to indicate
302 * that the render server thread group is blocking/unblocking progress
303 * of another thread group. The render server thread group is well
304 * known to CLPC, so XNU simply passes this flag instead of taking
305 * a reference on it. It is illegal to pass both the TG identity and
306 * this flag in the callout; this flag should only be set with the
307 * blocking/unblocking TG being NULL.
308 */
309#define PERFCONTROL_CALLOUT_WAKE_UNSAFE (0x1)
310#define PERFCONTROL_CALLOUT_BLOCKING_TG_RENDER_SERVER (0x2)
311
312/*
313 * Enum to define the perfcontrol class for thread.
314 * thread_get_perfcontrol_class() takes the thread's
315 * priority, QoS, urgency etc. into consideration and
316 * produces a value in this enum.
317 */
318typedef enum perfcontrol_class {
319 /* Idle thread */
320 PERFCONTROL_CLASS_IDLE = 1,
321 /* Kernel thread */
322 PERFCONTROL_CLASS_KERNEL = 2,
323 /* Realtime Thread */
324 PERFCONTROL_CLASS_REALTIME = 3,
325 /* Background Thread */
326 PERFCONTROL_CLASS_BACKGROUND = 4,
327 /* Utility Thread */
328 PERFCONTROL_CLASS_UTILITY = 5,
329 /* Non-UI Thread (Default/Legacy) */
330 PERFCONTROL_CLASS_NONUI = 6,
331 /* UI Thread (UI QoS / Per-Frame work) */
332 PERFCONTROL_CLASS_UI = 7,
333 /* Above UI Thread */
334 PERFCONTROL_CLASS_ABOVEUI = 8,
335 /* Frame-async UI Thread */
336 PERFCONTROL_CLASS_USER_INITIATED = 9,
337 /* Maximum class */
338 PERFCONTROL_CLASS_MAX = 10,
339} perfcontrol_class_t;
340
341typedef enum {
342 REASON_NONE,
343 REASON_SYSTEM,
344 REASON_USER,
345 REASON_CLPC_SYSTEM,
346 REASON_CLPC_USER,
347} processor_reason_t;
348
349#define SHUTDOWN_TEMPORARY 0x0001
350#define LOCK_STATE 0x0002
351#define UNLOCK_STATE 0x0004
352#define WAIT_FOR_START 0x0008
353#define WAIT_FOR_LAST_START 0x0010
354#if DEVELOPMENT || DEBUG
355#define ASSERT_IN_SLEEP 0x10000000
356#define ASSERT_POWERDOWN_SUSPENDED 0x20000000
357#endif
358
359/*
360 * struct sched_clutch_edge
361 *
362 * Represents an edge from one cluster to another in the Edge Scheduler.
363 * An edge has the following properties:
364 * - Edge Weight: A value which indicates the likelihood of migrating threads
365 * across that edge. The actual unit of the edge weight is in (usecs) of
366 * scheduling delay.
367 * - Migration Allowed: Bit indicating if migrations are allowed across this
368 * edge from src to dst.
369 * - Steal Allowed: Bit indicating whether the dst cluster is allowed to steal
370 * across that edge when a processor in that cluster goes idle.
371 *
372 * These values can be modified by CLPC for better load balancing, thermal
373 * mitigations etc.
374 */
375typedef union sched_clutch_edge {
376 struct {
377 uint32_t
378 /* boolean_t */ sce_migration_allowed : 1,
379 /* boolean_t */ sce_steal_allowed : 1,
380 _reserved : 30;
381 uint32_t sce_migration_weight;
382 };
383 uint64_t sce_edge_packed;
384} sched_clutch_edge;
385
386/*
387 * Cluster shared resource management
388 *
389 * The options describe the various shared cluster resource
390 * types that can be contended under load and need special
391 * handling from the scheduler.
392 */
393__options_decl(cluster_shared_rsrc_type_t, uint32_t, {
394 CLUSTER_SHARED_RSRC_TYPE_RR = 0,
395 CLUSTER_SHARED_RSRC_TYPE_NATIVE_FIRST = 1,
396 CLUSTER_SHARED_RSRC_TYPE_COUNT = 2,
397 CLUSTER_SHARED_RSRC_TYPE_MIN = CLUSTER_SHARED_RSRC_TYPE_RR,
398 CLUSTER_SHARED_RSRC_TYPE_NONE = CLUSTER_SHARED_RSRC_TYPE_COUNT,
399});
400
401#endif /* KERNEL_PRIVATE */
402
403#endif /* _KERN_KERN_TYPES_H_ */
404