1 | /* |
2 | * Copyright (c) 2000-2012 Apple Inc. All rights reserved. |
3 | * |
4 | * @APPLE_OSREFERENCE_LICENSE_HEADER_START@ |
5 | * |
6 | * This file contains Original Code and/or Modifications of Original Code |
7 | * as defined in and that are subject to the Apple Public Source License |
8 | * Version 2.0 (the 'License'). You may not use this file except in |
9 | * compliance with the License. The rights granted to you under the License |
10 | * may not be used to create, or enable the creation or redistribution of, |
11 | * unlawful or unlicensed copies of an Apple operating system, or to |
12 | * circumvent, violate, or enable the circumvention or violation of, any |
13 | * terms of an Apple operating system software license agreement. |
14 | * |
15 | * Please obtain a copy of the License at |
16 | * http://www.opensource.apple.com/apsl/ and read it before using this file. |
17 | * |
18 | * The Original Code and all software distributed under the License are |
19 | * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER |
20 | * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES, |
21 | * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY, |
22 | * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT. |
23 | * Please see the License for the specific language governing rights and |
24 | * limitations under the License. |
25 | * |
26 | * @APPLE_OSREFERENCE_LICENSE_HEADER_END@ |
27 | */ |
28 | /* |
29 | * @OSF_COPYRIGHT@ |
30 | */ |
31 | /* |
32 | * Mach Operating System |
33 | * Copyright (c) 1991,1990,1989,1988,1987 Carnegie Mellon University |
34 | * All Rights Reserved. |
35 | * |
36 | * Permission to use, copy, modify and distribute this software and its |
37 | * documentation is hereby granted, provided that both the copyright |
38 | * notice and this permission notice appear in all copies of the |
39 | * software, derivative works or modified versions, and any portions |
40 | * thereof, and that both notices appear in supporting documentation. |
41 | * |
42 | * CARNEGIE MELLON ALLOWS FREE USE OF THIS SOFTWARE IN ITS "AS IS" |
43 | * CONDITION. CARNEGIE MELLON DISCLAIMS ANY LIABILITY OF ANY KIND FOR |
44 | * ANY DAMAGES WHATSOEVER RESULTING FROM THE USE OF THIS SOFTWARE. |
45 | * |
46 | * Carnegie Mellon requests users of this software to return to |
47 | * |
48 | * Software Distribution Coordinator or Software.Distribution@CS.CMU.EDU |
49 | * School of Computer Science |
50 | * Carnegie Mellon University |
51 | * Pittsburgh PA 15213-3890 |
52 | * |
53 | * any improvements or extensions that they make and grant Carnegie Mellon |
54 | * the rights to redistribute these changes. |
55 | */ |
56 | /* |
57 | */ |
58 | /* |
59 | * File: sched_prim.h |
60 | * Author: David Golub |
61 | * |
62 | * Scheduling primitive definitions file |
63 | * |
64 | */ |
65 | |
66 | #ifndef _KERN_SCHED_PRIM_H_ |
67 | #define _KERN_SCHED_PRIM_H_ |
68 | |
69 | #include <mach/boolean.h> |
70 | #include <mach/machine/vm_types.h> |
71 | #include <mach/kern_return.h> |
72 | #include <kern/clock.h> |
73 | #include <kern/kern_types.h> |
74 | #include <kern/thread.h> |
75 | #include <sys/cdefs.h> |
76 | #include <kern/block_hint.h> |
77 | |
78 | #ifdef MACH_KERNEL_PRIVATE |
79 | |
80 | /* Initialization */ |
81 | extern void sched_init(void); |
82 | |
83 | extern void sched_startup(void); |
84 | |
85 | extern void sched_timebase_init(void); |
86 | |
87 | extern void pset_rt_init(processor_set_t pset); |
88 | |
89 | extern void sched_rtglobal_init(processor_set_t pset); |
90 | |
91 | extern rt_queue_t sched_rtglobal_runq(processor_set_t pset); |
92 | |
93 | extern void sched_rtglobal_queue_shutdown(processor_t processor); |
94 | |
95 | extern int64_t sched_rtglobal_runq_count_sum(void); |
96 | |
97 | extern void sched_check_spill(processor_set_t pset, thread_t thread); |
98 | |
99 | extern bool sched_thread_should_yield(processor_t processor, thread_t thread); |
100 | |
101 | /* Force a preemption point for a thread and wait for it to stop running */ |
102 | extern boolean_t thread_stop( |
103 | thread_t thread, |
104 | boolean_t until_not_runnable); |
105 | |
106 | /* Release a previous stop request */ |
107 | extern void thread_unstop( |
108 | thread_t thread); |
109 | |
110 | /* Wait for a thread to stop running */ |
111 | extern void thread_wait( |
112 | thread_t thread, |
113 | boolean_t until_not_runnable); |
114 | |
115 | /* Unblock thread on wake up */ |
116 | extern boolean_t thread_unblock( |
117 | thread_t thread, |
118 | wait_result_t wresult); |
119 | |
120 | /* Unblock and dispatch thread */ |
121 | extern kern_return_t thread_go( |
122 | thread_t thread, |
123 | wait_result_t wresult); |
124 | |
125 | /* Handle threads at context switch */ |
126 | extern void thread_dispatch( |
127 | thread_t old_thread, |
128 | thread_t new_thread); |
129 | |
130 | /* Switch directly to a particular thread */ |
131 | extern int thread_run( |
132 | thread_t self, |
133 | thread_continue_t continuation, |
134 | void *parameter, |
135 | thread_t new_thread); |
136 | |
137 | /* Resume thread with new stack */ |
138 | extern void thread_continue( |
139 | thread_t old_thread); |
140 | |
141 | /* Invoke continuation */ |
142 | extern void call_continuation( |
143 | thread_continue_t continuation, |
144 | void *parameter, |
145 | wait_result_t wresult, |
146 | boolean_t enable_interrupts); |
147 | |
148 | /* |
149 | * Flags that can be passed to set_sched_pri |
150 | * to skip side effects |
151 | */ |
152 | typedef enum { |
153 | SETPRI_DEFAULT = 0x0, |
154 | SETPRI_LAZY = 0x1, /* Avoid setting AST flags or sending IPIs */ |
155 | } set_sched_pri_options_t; |
156 | |
157 | /* Set the current scheduled priority */ |
158 | extern void set_sched_pri( |
159 | thread_t thread, |
160 | int priority, |
161 | set_sched_pri_options_t options); |
162 | |
163 | /* Set base priority of the specified thread */ |
164 | extern void sched_set_thread_base_priority( |
165 | thread_t thread, |
166 | int priority); |
167 | |
168 | /* Set the thread's true scheduling mode */ |
169 | extern void sched_set_thread_mode(thread_t thread, |
170 | sched_mode_t mode); |
171 | /* Demote the true scheduler mode */ |
172 | extern void sched_thread_mode_demote(thread_t thread, |
173 | uint32_t reason); |
174 | /* Un-demote the true scheduler mode */ |
175 | extern void sched_thread_mode_undemote(thread_t thread, |
176 | uint32_t reason); |
177 | |
178 | extern void sched_thread_promote_to_pri(thread_t thread, int priority, uintptr_t trace_obj); |
179 | extern void sched_thread_update_promotion_to_pri(thread_t thread, int priority, uintptr_t trace_obj); |
180 | extern void sched_thread_unpromote(thread_t thread, uintptr_t trace_obj); |
181 | |
182 | extern void assert_promotions_invariant(thread_t thread); |
183 | |
184 | extern void sched_thread_promote_reason(thread_t thread, uint32_t reason, uintptr_t trace_obj); |
185 | extern void sched_thread_unpromote_reason(thread_t thread, uint32_t reason, uintptr_t trace_obj); |
186 | |
187 | /* Re-evaluate base priority of thread (thread locked) */ |
188 | void thread_recompute_priority(thread_t thread); |
189 | |
190 | /* Re-evaluate scheduled priority of thread (thread locked) */ |
191 | extern void thread_recompute_sched_pri( |
192 | thread_t thread, |
193 | set_sched_pri_options_t options); |
194 | |
195 | /* Periodic scheduler activity */ |
196 | extern void sched_init_thread(void (*)(void)); |
197 | |
198 | /* Perform sched_tick housekeeping activities */ |
199 | extern boolean_t can_update_priority( |
200 | thread_t thread); |
201 | |
202 | extern void update_priority( |
203 | thread_t thread); |
204 | |
205 | extern void lightweight_update_priority( |
206 | thread_t thread); |
207 | |
208 | extern void sched_default_quantum_expire(thread_t thread); |
209 | |
210 | /* Idle processor thread */ |
211 | extern void idle_thread(void); |
212 | |
213 | extern kern_return_t idle_thread_create( |
214 | processor_t processor); |
215 | |
216 | /* Continuation return from syscall */ |
217 | extern void thread_syscall_return( |
218 | kern_return_t ret); |
219 | |
220 | /* Context switch */ |
221 | extern wait_result_t thread_block_reason( |
222 | thread_continue_t continuation, |
223 | void *parameter, |
224 | ast_t reason); |
225 | |
226 | /* Reschedule thread for execution */ |
227 | extern void thread_setrun( |
228 | thread_t thread, |
229 | integer_t options); |
230 | |
231 | typedef enum { |
232 | SCHED_NONE = 0x0, |
233 | SCHED_TAILQ = 0x1, |
234 | SCHED_HEADQ = 0x2, |
235 | SCHED_PREEMPT = 0x4, |
236 | SCHED_REBALANCE = 0x8, |
237 | } sched_options_t; |
238 | |
239 | extern processor_set_t task_choose_pset( |
240 | task_t task); |
241 | |
242 | /* Bind the current thread to a particular processor */ |
243 | extern processor_t thread_bind( |
244 | processor_t processor); |
245 | |
246 | /* Choose the best processor to run a thread */ |
247 | extern processor_t choose_processor( |
248 | processor_set_t pset, |
249 | processor_t processor, |
250 | thread_t thread); |
251 | |
252 | extern void sched_SMT_balance( |
253 | processor_t processor, |
254 | processor_set_t pset); |
255 | |
256 | extern void thread_quantum_init( |
257 | thread_t thread); |
258 | |
259 | extern void run_queue_init( |
260 | run_queue_t runq); |
261 | |
262 | extern thread_t run_queue_dequeue( |
263 | run_queue_t runq, |
264 | integer_t options); |
265 | |
266 | extern boolean_t run_queue_enqueue( |
267 | run_queue_t runq, |
268 | thread_t thread, |
269 | integer_t options); |
270 | |
271 | extern void run_queue_remove( |
272 | run_queue_t runq, |
273 | thread_t thread); |
274 | |
275 | struct sched_update_scan_context |
276 | { |
277 | uint64_t earliest_bg_make_runnable_time; |
278 | uint64_t earliest_normal_make_runnable_time; |
279 | uint64_t earliest_rt_make_runnable_time; |
280 | }; |
281 | typedef struct sched_update_scan_context *sched_update_scan_context_t; |
282 | |
283 | extern void sched_rtglobal_runq_scan(sched_update_scan_context_t scan_context); |
284 | |
285 | /* |
286 | * Enum to define various events which need IPIs. The IPI policy |
287 | * engine decides what kind of IPI to use based on destination |
288 | * processor state, thread and one of the following scheduling events. |
289 | */ |
290 | typedef enum { |
291 | SCHED_IPI_EVENT_BOUND_THR = 0x1, |
292 | SCHED_IPI_EVENT_PREEMPT = 0x2, |
293 | SCHED_IPI_EVENT_SMT_REBAL = 0x3, |
294 | SCHED_IPI_EVENT_SPILL = 0x4, |
295 | SCHED_IPI_EVENT_REBALANCE = 0x5, |
296 | } sched_ipi_event_t; |
297 | |
298 | |
299 | /* Enum to define various IPI types used by the scheduler */ |
300 | typedef enum { |
301 | SCHED_IPI_NONE = 0x0, |
302 | SCHED_IPI_IMMEDIATE = 0x1, |
303 | SCHED_IPI_IDLE = 0x2, |
304 | SCHED_IPI_DEFERRED = 0x3, |
305 | } sched_ipi_type_t; |
306 | |
307 | /* The IPI policy engine behaves in the following manner: |
308 | * - All scheduler events which need an IPI invoke sched_ipi_action() with |
309 | * the appropriate destination processor, thread and event. |
310 | * - sched_ipi_action() performs basic checks, invokes the scheduler specific |
311 | * ipi_policy routine and sets pending_AST bits based on the result. |
312 | * - Once the pset lock is dropped, the scheduler invokes sched_ipi_perform() |
313 | * routine which actually sends the appropriate IPI to the destination core. |
314 | */ |
315 | extern sched_ipi_type_t sched_ipi_action(processor_t dst, thread_t thread, |
316 | boolean_t dst_idle, sched_ipi_event_t event); |
317 | extern void sched_ipi_perform(processor_t dst, sched_ipi_type_t ipi); |
318 | |
319 | /* sched_ipi_policy() is the global default IPI policy for all schedulers */ |
320 | extern sched_ipi_type_t sched_ipi_policy(processor_t dst, thread_t thread, |
321 | boolean_t dst_idle, sched_ipi_event_t event); |
322 | |
323 | /* sched_ipi_deferred_policy() is the global default deferred IPI policy for all schedulers */ |
324 | extern sched_ipi_type_t sched_ipi_deferred_policy(processor_set_t pset, |
325 | processor_t dst, sched_ipi_event_t event); |
326 | |
327 | #if defined(CONFIG_SCHED_TIMESHARE_CORE) |
328 | |
329 | extern boolean_t thread_update_add_thread(thread_t thread); |
330 | extern void thread_update_process_threads(void); |
331 | extern boolean_t runq_scan(run_queue_t runq, sched_update_scan_context_t scan_context); |
332 | |
333 | extern void sched_timeshare_init(void); |
334 | extern void sched_timeshare_timebase_init(void); |
335 | extern void sched_timeshare_maintenance_continue(void); |
336 | |
337 | extern boolean_t priority_is_urgent(int priority); |
338 | extern uint32_t sched_timeshare_initial_quantum_size(thread_t thread); |
339 | |
340 | extern int sched_compute_timeshare_priority(thread_t thread); |
341 | |
342 | #endif /* CONFIG_SCHED_TIMESHARE_CORE */ |
343 | |
344 | /* Remove thread from its run queue */ |
345 | extern boolean_t thread_run_queue_remove(thread_t thread); |
346 | thread_t thread_run_queue_remove_for_handoff(thread_t thread); |
347 | |
348 | /* Put a thread back in the run queue after being yanked */ |
349 | extern void thread_run_queue_reinsert(thread_t thread, integer_t options); |
350 | |
351 | extern void thread_timer_expire( |
352 | void *thread, |
353 | void *p1); |
354 | |
355 | extern boolean_t thread_eager_preemption( |
356 | thread_t thread); |
357 | |
358 | extern boolean_t sched_generic_direct_dispatch_to_idle_processors; |
359 | |
360 | /* Set the maximum interrupt level for the thread */ |
361 | __private_extern__ wait_interrupt_t thread_interrupt_level( |
362 | wait_interrupt_t interruptible); |
363 | |
364 | __private_extern__ wait_result_t thread_mark_wait_locked( |
365 | thread_t thread, |
366 | wait_interrupt_t interruptible); |
367 | |
368 | /* Wake up locked thread directly, passing result */ |
369 | __private_extern__ kern_return_t clear_wait_internal( |
370 | thread_t thread, |
371 | wait_result_t result); |
372 | |
373 | extern void sched_stats_handle_csw( |
374 | processor_t processor, |
375 | int reasons, |
376 | int selfpri, |
377 | int otherpri); |
378 | |
379 | extern void sched_stats_handle_runq_change( |
380 | struct runq_stats *stats, |
381 | int old_count); |
382 | |
383 | |
384 | #if DEBUG |
385 | |
386 | #define SCHED_STATS_CSW(processor, reasons, selfpri, otherpri) \ |
387 | do { \ |
388 | if (__builtin_expect(sched_stats_active, 0)) { \ |
389 | sched_stats_handle_csw((processor), \ |
390 | (reasons), (selfpri), (otherpri)); \ |
391 | } \ |
392 | } while (0) |
393 | |
394 | |
395 | #define SCHED_STATS_RUNQ_CHANGE(stats, old_count) \ |
396 | do { \ |
397 | if (__builtin_expect(sched_stats_active, 0)) { \ |
398 | sched_stats_handle_runq_change((stats), \ |
399 | (old_count)); \ |
400 | } \ |
401 | } while (0) |
402 | |
403 | #else /* DEBUG */ |
404 | |
405 | #define SCHED_STATS_CSW(processor, reasons, selfpri, otherpri) do { }while(0) |
406 | #define SCHED_STATS_RUNQ_CHANGE(stats, old_count) do { }while(0) |
407 | |
408 | #endif /* DEBUG */ |
409 | |
410 | extern uint32_t sched_debug_flags; |
411 | #define SCHED_DEBUG_FLAG_PLATFORM_TRACEPOINTS 0x00000001 |
412 | #define SCHED_DEBUG_FLAG_CHOOSE_PROCESSOR_TRACEPOINTS 0x00000002 |
413 | |
414 | #define SCHED_DEBUG_PLATFORM_KERNEL_DEBUG_CONSTANT(...) do { \ |
415 | if (__improbable(sched_debug_flags & SCHED_DEBUG_FLAG_PLATFORM_TRACEPOINTS)) { \ |
416 | KERNEL_DEBUG_CONSTANT(__VA_ARGS__); \ |
417 | } \ |
418 | } while(0) |
419 | |
420 | #define SCHED_DEBUG_CHOOSE_PROCESSOR_KERNEL_DEBUG_CONSTANT(...) do { \ |
421 | if (__improbable(sched_debug_flags & SCHED_DEBUG_FLAG_CHOOSE_PROCESSOR_TRACEPOINTS)) { \ |
422 | KERNEL_DEBUG_CONSTANT(__VA_ARGS__); \ |
423 | } \ |
424 | } while(0) |
425 | |
426 | #define THREAD_URGENCY_NONE 0 /* indicates that there is no currently runnable */ |
427 | #define THREAD_URGENCY_BACKGROUND 1 /* indicates that the thread is marked as a "background" thread */ |
428 | #define THREAD_URGENCY_NORMAL 2 /* indicates that the thread is marked as a "normal" thread */ |
429 | #define THREAD_URGENCY_REAL_TIME 3 /* indicates that the thread is marked as a "real-time" or urgent thread */ |
430 | #define THREAD_URGENCY_MAX 4 /* Marker */ |
431 | /* Returns the "urgency" of a thread (provided by scheduler) */ |
432 | extern int thread_get_urgency( |
433 | thread_t thread, |
434 | uint64_t *rt_period, |
435 | uint64_t *rt_deadline); |
436 | |
437 | /* Tells the "urgency" of the just scheduled thread (provided by CPU PM) */ |
438 | extern void thread_tell_urgency( |
439 | int urgency, |
440 | uint64_t rt_period, |
441 | uint64_t rt_deadline, |
442 | uint64_t sched_latency, |
443 | thread_t nthread); |
444 | |
445 | /* Tells if there are "active" RT threads in the system (provided by CPU PM) */ |
446 | extern void active_rt_threads( |
447 | boolean_t active); |
448 | |
449 | /* Returns the perfcontrol attribute for the thread */ |
450 | extern perfcontrol_class_t thread_get_perfcontrol_class( |
451 | thread_t thread); |
452 | |
453 | /* Generic routine for Non-AMP schedulers to calculate parallelism */ |
454 | extern uint32_t sched_qos_max_parallelism(int qos, uint64_t options); |
455 | |
456 | #endif /* MACH_KERNEL_PRIVATE */ |
457 | |
458 | __BEGIN_DECLS |
459 | |
460 | #ifdef XNU_KERNEL_PRIVATE |
461 | |
462 | /* Toggles a global override to turn off CPU Throttling */ |
463 | extern void sys_override_cpu_throttle(boolean_t enable_override); |
464 | |
465 | /* |
466 | ****************** Only exported until BSD stops using ******************** |
467 | */ |
468 | |
469 | extern void thread_vm_bind_group_add(void); |
470 | |
471 | /* Wake up thread directly, passing result */ |
472 | extern kern_return_t clear_wait( |
473 | thread_t thread, |
474 | wait_result_t result); |
475 | |
476 | /* Start thread running */ |
477 | extern void thread_bootstrap_return(void) __attribute__((noreturn)); |
478 | |
479 | /* Return from exception (BSD-visible interface) */ |
480 | extern void thread_exception_return(void) __dead2; |
481 | |
482 | #define SCHED_STRING_MAX_LENGTH (48) |
483 | /* String declaring the name of the current scheduler */ |
484 | extern char sched_string[SCHED_STRING_MAX_LENGTH]; |
485 | |
486 | extern thread_t port_name_to_thread_for_ulock(mach_port_name_t thread_name); |
487 | |
488 | /* Attempt to context switch to a specific runnable thread */ |
489 | extern wait_result_t thread_handoff_deallocate(thread_t thread); |
490 | |
491 | __attribute__((nonnull(1, 2))) |
492 | extern void thread_handoff_parameter(thread_t thread, |
493 | thread_continue_t continuation, void *parameter) __dead2; |
494 | |
495 | extern struct waitq *assert_wait_queue(event_t event); |
496 | |
497 | extern kern_return_t thread_wakeup_one_with_pri(event_t event, int priority); |
498 | |
499 | extern thread_t thread_wakeup_identify(event_t event, int priority); |
500 | |
501 | #endif /* XNU_KERNEL_PRIVATE */ |
502 | |
503 | #ifdef KERNEL_PRIVATE |
504 | /* Set pending block hint for a particular object before we go into a wait state */ |
505 | extern void thread_set_pending_block_hint( |
506 | thread_t thread, |
507 | block_hint_t block_hint); |
508 | |
509 | #define QOS_PARALLELISM_COUNT_LOGICAL 0x1 |
510 | #define QOS_PARALLELISM_REALTIME 0x2 |
511 | extern uint32_t qos_max_parallelism(int qos, uint64_t options); |
512 | #endif /* KERNEL_PRIVATE */ |
513 | |
514 | #if XNU_KERNEL_PRIVATE |
515 | extern void thread_yield_with_continuation( |
516 | thread_continue_t continuation, |
517 | void *parameter) __dead2; |
518 | #endif |
519 | |
520 | /* Context switch */ |
521 | extern wait_result_t thread_block( |
522 | thread_continue_t continuation); |
523 | |
524 | extern wait_result_t thread_block_parameter( |
525 | thread_continue_t continuation, |
526 | void *parameter); |
527 | |
528 | /* Declare thread will wait on a particular event */ |
529 | extern wait_result_t assert_wait( |
530 | event_t event, |
531 | wait_interrupt_t interruptible); |
532 | |
533 | /* Assert that the thread intends to wait with a timeout */ |
534 | extern wait_result_t assert_wait_timeout( |
535 | event_t event, |
536 | wait_interrupt_t interruptible, |
537 | uint32_t interval, |
538 | uint32_t scale_factor); |
539 | |
540 | /* Assert that the thread intends to wait with an urgency, timeout and leeway */ |
541 | extern wait_result_t assert_wait_timeout_with_leeway( |
542 | event_t event, |
543 | wait_interrupt_t interruptible, |
544 | wait_timeout_urgency_t urgency, |
545 | uint32_t interval, |
546 | uint32_t leeway, |
547 | uint32_t scale_factor); |
548 | |
549 | extern wait_result_t assert_wait_deadline( |
550 | event_t event, |
551 | wait_interrupt_t interruptible, |
552 | uint64_t deadline); |
553 | |
554 | /* Assert that the thread intends to wait with an urgency, deadline, and leeway */ |
555 | extern wait_result_t assert_wait_deadline_with_leeway( |
556 | event_t event, |
557 | wait_interrupt_t interruptible, |
558 | wait_timeout_urgency_t urgency, |
559 | uint64_t deadline, |
560 | uint64_t leeway); |
561 | |
562 | /* Wake up thread (or threads) waiting on a particular event */ |
563 | extern kern_return_t thread_wakeup_prim( |
564 | event_t event, |
565 | boolean_t one_thread, |
566 | wait_result_t result); |
567 | |
568 | #define thread_wakeup(x) \ |
569 | thread_wakeup_prim((x), FALSE, THREAD_AWAKENED) |
570 | #define thread_wakeup_with_result(x, z) \ |
571 | thread_wakeup_prim((x), FALSE, (z)) |
572 | #define thread_wakeup_one(x) \ |
573 | thread_wakeup_prim((x), TRUE, THREAD_AWAKENED) |
574 | |
575 | /* Wakeup the specified thread if it is waiting on this event */ |
576 | extern kern_return_t thread_wakeup_thread(event_t event, thread_t thread); |
577 | |
578 | extern boolean_t preemption_enabled(void); |
579 | |
580 | #ifdef MACH_KERNEL_PRIVATE |
581 | |
582 | /* |
583 | * Scheduler algorithm indirection. If only one algorithm is |
584 | * enabled at compile-time, a direction function call is used. |
585 | * If more than one is enabled, calls are dispatched through |
586 | * a function pointer table. |
587 | */ |
588 | |
589 | #if !defined(CONFIG_SCHED_TRADITIONAL) && !defined(CONFIG_SCHED_PROTO) && !defined(CONFIG_SCHED_GRRR) && !defined(CONFIG_SCHED_MULTIQ) |
590 | #error Enable at least one scheduler algorithm in osfmk/conf/MASTER.XXX |
591 | #endif |
592 | |
593 | #if DEBUG |
594 | #define SCHED(f) (sched_current_dispatch->f) |
595 | #else /* DEBUG */ |
596 | |
597 | /* |
598 | * For DEV & REL kernels, use a static dispatch table instead of |
599 | * using the indirect function table. |
600 | */ |
601 | extern const struct sched_dispatch_table sched_dualq_dispatch; |
602 | #define SCHED(f) (sched_dualq_dispatch.f) |
603 | |
604 | #endif /* DEBUG */ |
605 | |
606 | struct sched_dispatch_table { |
607 | const char *sched_name; |
608 | void (*init)(void); /* Init global state */ |
609 | void (*timebase_init)(void); /* Timebase-dependent initialization */ |
610 | void (*processor_init)(processor_t processor); /* Per-processor scheduler init */ |
611 | void (*pset_init)(processor_set_t pset); /* Per-processor set scheduler init */ |
612 | |
613 | void (*maintenance_continuation)(void); /* Function called regularly */ |
614 | |
615 | /* |
616 | * Choose a thread of greater or equal priority from the per-processor |
617 | * runqueue for timeshare/fixed threads |
618 | */ |
619 | thread_t (*choose_thread)( |
620 | processor_t processor, |
621 | int priority, |
622 | ast_t reason); |
623 | |
624 | /* True if scheduler supports stealing threads */ |
625 | boolean_t steal_thread_enabled; |
626 | |
627 | /* |
628 | * Steal a thread from another processor in the pset so that it can run |
629 | * immediately |
630 | */ |
631 | thread_t (*steal_thread)( |
632 | processor_set_t pset); |
633 | |
634 | /* |
635 | * Compute priority for a timeshare thread based on base priority. |
636 | */ |
637 | int (*compute_timeshare_priority)(thread_t thread); |
638 | |
639 | /* |
640 | * Pick the best processor for a thread (any kind of thread) to run on. |
641 | */ |
642 | processor_t (*choose_processor)( |
643 | processor_set_t pset, |
644 | processor_t processor, |
645 | thread_t thread); |
646 | /* |
647 | * Enqueue a timeshare or fixed priority thread onto the per-processor |
648 | * runqueue |
649 | */ |
650 | boolean_t (*processor_enqueue)( |
651 | processor_t processor, |
652 | thread_t thread, |
653 | integer_t options); |
654 | |
655 | /* Migrate threads away in preparation for processor shutdown */ |
656 | void (*processor_queue_shutdown)( |
657 | processor_t processor); |
658 | |
659 | /* Remove the specific thread from the per-processor runqueue */ |
660 | boolean_t (*processor_queue_remove)( |
661 | processor_t processor, |
662 | thread_t thread); |
663 | |
664 | /* |
665 | * Does the per-processor runqueue have any timeshare or fixed priority |
666 | * threads on it? Called without pset lock held, so should |
667 | * not assume immutability while executing. |
668 | */ |
669 | boolean_t (*processor_queue_empty)(processor_t processor); |
670 | |
671 | /* |
672 | * Would this priority trigger an urgent preemption if it's sitting |
673 | * on the per-processor runqueue? |
674 | */ |
675 | boolean_t (*priority_is_urgent)(int priority); |
676 | |
677 | /* |
678 | * Does the per-processor runqueue contain runnable threads that |
679 | * should cause the currently-running thread to be preempted? |
680 | */ |
681 | ast_t (*processor_csw_check)(processor_t processor); |
682 | |
683 | /* |
684 | * Does the per-processor runqueue contain a runnable thread |
685 | * of > or >= priority, as a preflight for choose_thread() or other |
686 | * thread selection |
687 | */ |
688 | boolean_t (*processor_queue_has_priority)(processor_t processor, |
689 | int priority, |
690 | boolean_t gte); |
691 | |
692 | /* Quantum size for the specified non-realtime thread. */ |
693 | uint32_t (*initial_quantum_size)(thread_t thread); |
694 | |
695 | /* Scheduler mode for a new thread */ |
696 | sched_mode_t (*initial_thread_sched_mode)(task_t parent_task); |
697 | |
698 | /* |
699 | * Is it safe to call update_priority, which may change a thread's |
700 | * runqueue or other state. This can be used to throttle changes |
701 | * to dynamic priority. |
702 | */ |
703 | boolean_t (*can_update_priority)(thread_t thread); |
704 | |
705 | /* |
706 | * Update both scheduled priority and other persistent state. |
707 | * Side effects may including migration to another processor's runqueue. |
708 | */ |
709 | void (*update_priority)(thread_t thread); |
710 | |
711 | /* Lower overhead update to scheduled priority and state. */ |
712 | void (*lightweight_update_priority)(thread_t thread); |
713 | |
714 | /* Callback for non-realtime threads when the quantum timer fires */ |
715 | void (*quantum_expire)(thread_t thread); |
716 | |
717 | /* |
718 | * Runnable threads on per-processor runqueue. Should only |
719 | * be used for relative comparisons of load between processors. |
720 | */ |
721 | int (*processor_runq_count)(processor_t processor); |
722 | |
723 | /* Aggregate runcount statistics for per-processor runqueue */ |
724 | uint64_t (*processor_runq_stats_count_sum)(processor_t processor); |
725 | |
726 | boolean_t (*processor_bound_count)(processor_t processor); |
727 | |
728 | void (*thread_update_scan)(sched_update_scan_context_t scan_context); |
729 | |
730 | /* |
731 | * Use processor->next_thread to pin a thread to an idle |
732 | * processor. If FALSE, threads are enqueued and can |
733 | * be stolen by other processors. |
734 | */ |
735 | boolean_t direct_dispatch_to_idle_processors; |
736 | |
737 | /* Supports more than one pset */ |
738 | boolean_t multiple_psets_enabled; |
739 | /* Supports scheduler groups */ |
740 | boolean_t sched_groups_enabled; |
741 | |
742 | /* Supports avoid-processor */ |
743 | boolean_t avoid_processor_enabled; |
744 | |
745 | /* Returns true if this processor should avoid running this thread. */ |
746 | bool (*thread_avoid_processor)(processor_t processor, thread_t thread); |
747 | |
748 | /* |
749 | * Invoked when a processor is about to choose the idle thread |
750 | * Used to send IPIs to a processor which would be preferred to be idle instead. |
751 | * Called with pset lock held, returns pset lock unlocked. |
752 | */ |
753 | void (*processor_balance)(processor_t processor, processor_set_t pset); |
754 | rt_queue_t (*rt_runq)(processor_set_t pset); |
755 | void (*rt_init)(processor_set_t pset); |
756 | void (*rt_queue_shutdown)(processor_t processor); |
757 | void (*rt_runq_scan)(sched_update_scan_context_t scan_context); |
758 | int64_t (*rt_runq_count_sum)(void); |
759 | |
760 | uint32_t (*qos_max_parallelism)(int qos, uint64_t options); |
761 | void (*check_spill)(processor_set_t pset, thread_t thread); |
762 | sched_ipi_type_t (*ipi_policy)(processor_t dst, thread_t thread, boolean_t dst_idle, sched_ipi_event_t event); |
763 | bool (*thread_should_yield)(processor_t processor, thread_t thread); |
764 | }; |
765 | |
766 | #if defined(CONFIG_SCHED_TRADITIONAL) |
767 | extern const struct sched_dispatch_table sched_traditional_dispatch; |
768 | extern const struct sched_dispatch_table sched_traditional_with_pset_runqueue_dispatch; |
769 | #endif |
770 | |
771 | #if defined(CONFIG_SCHED_MULTIQ) |
772 | extern const struct sched_dispatch_table sched_multiq_dispatch; |
773 | extern const struct sched_dispatch_table sched_dualq_dispatch; |
774 | #endif |
775 | |
776 | #if defined(CONFIG_SCHED_PROTO) |
777 | extern const struct sched_dispatch_table sched_proto_dispatch; |
778 | #endif |
779 | |
780 | #if defined(CONFIG_SCHED_GRRR) |
781 | extern const struct sched_dispatch_table sched_grrr_dispatch; |
782 | #endif |
783 | |
784 | /* |
785 | * It is an error to invoke any scheduler-related code |
786 | * before this is set up |
787 | */ |
788 | extern const struct sched_dispatch_table *sched_current_dispatch; |
789 | |
790 | #endif /* MACH_KERNEL_PRIVATE */ |
791 | |
792 | __END_DECLS |
793 | |
794 | #endif /* _KERN_SCHED_PRIM_H_ */ |
795 | |