1/*
2 * Copyright (c) 2000-2015 Apple Inc. All rights reserved.
3 *
4 * @APPLE_OSREFERENCE_LICENSE_HEADER_START@
5 *
6 * This file contains Original Code and/or Modifications of Original Code
7 * as defined in and that are subject to the Apple Public Source License
8 * Version 2.0 (the 'License'). You may not use this file except in
9 * compliance with the License. The rights granted to you under the License
10 * may not be used to create, or enable the creation or redistribution of,
11 * unlawful or unlicensed copies of an Apple operating system, or to
12 * circumvent, violate, or enable the circumvention or violation of, any
13 * terms of an Apple operating system software license agreement.
14 *
15 * Please obtain a copy of the License at
16 * http://www.opensource.apple.com/apsl/ and read it before using this file.
17 *
18 * The Original Code and all software distributed under the License are
19 * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER
20 * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES,
21 * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY,
22 * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT.
23 * Please see the License for the specific language governing rights and
24 * limitations under the License.
25 *
26 * @APPLE_OSREFERENCE_LICENSE_HEADER_END@
27 */
28/*
29 * @OSF_FREE_COPYRIGHT@
30 */
31/*
32 * Mach Operating System
33 * Copyright (c) 1991,1990,1989,1988,1987 Carnegie Mellon University
34 * All Rights Reserved.
35 *
36 * Permission to use, copy, modify and distribute this software and its
37 * documentation is hereby granted, provided that both the copyright
38 * notice and this permission notice appear in all copies of the
39 * software, derivative works or modified versions, and any portions
40 * thereof, and that both notices appear in supporting documentation.
41 *
42 * CARNEGIE MELLON ALLOWS FREE USE OF THIS SOFTWARE IN ITS "AS IS"
43 * CONDITION. CARNEGIE MELLON DISCLAIMS ANY LIABILITY OF ANY KIND FOR
44 * ANY DAMAGES WHATSOEVER RESULTING FROM THE USE OF THIS SOFTWARE.
45 *
46 * Carnegie Mellon requests users of this software to return to
47 *
48 * Software Distribution Coordinator or Software.Distribution@CS.CMU.EDU
49 * School of Computer Science
50 * Carnegie Mellon University
51 * Pittsburgh PA 15213-3890
52 *
53 * any improvements or extensions that they make and grant Carnegie Mellon
54 * the rights to redistribute these changes.
55 */
56
57#include <mach/mach_types.h>
58
59#include <kern/sched.h>
60#include <kern/sched_prim.h>
61
62static boolean_t
63 sched_traditional_use_pset_runqueue = FALSE;
64
65static void
66sched_traditional_init(void);
67
68static bool
69sched_traditional_steal_thread_enabled(processor_set_t pset);
70
71static thread_t
72sched_traditional_steal_thread(processor_set_t pset);
73
74static thread_t
75sched_traditional_steal_processor_thread(processor_t processor);
76
77static void
78sched_traditional_thread_update_scan(sched_update_scan_context_t scan_context);
79
80static void
81sched_traditional_processor_queue_shutdown(processor_t processor);
82
83static boolean_t
84sched_traditional_processor_enqueue(processor_t processor, thread_t thread,
85 sched_options_t options);
86
87static boolean_t
88sched_traditional_processor_queue_remove(processor_t processor, thread_t thread);
89
90static boolean_t
91sched_traditional_processor_queue_empty(processor_t processor);
92
93static ast_t
94sched_traditional_processor_csw_check(processor_t processor);
95
96static boolean_t
97sched_traditional_processor_queue_has_priority(processor_t processor, int priority, boolean_t gte);
98
99static int
100sched_traditional_processor_runq_count(processor_t processor);
101
102static boolean_t
103sched_traditional_with_pset_runqueue_processor_queue_empty(processor_t processor);
104
105static uint64_t
106sched_traditional_processor_runq_stats_count_sum(processor_t processor);
107
108static uint64_t
109sched_traditional_with_pset_runqueue_processor_runq_stats_count_sum(processor_t processor);
110
111static int
112sched_traditional_processor_bound_count(processor_t processor);
113
114extern void
115sched_traditional_quantum_expire(thread_t thread);
116
117static void
118sched_traditional_processor_init(processor_t processor);
119
120static void
121sched_traditional_pset_init(processor_set_t pset);
122
123static void
124sched_traditional_with_pset_runqueue_init(void);
125
126static sched_mode_t
127sched_traditional_initial_thread_sched_mode(task_t parent_task);
128
129static thread_t
130sched_traditional_choose_thread(processor_t processor, int priority, ast_t reason);
131
132/* Choose a thread from a processor's priority-based runq */
133static thread_t sched_traditional_choose_thread_from_runq(processor_t processor, run_queue_t runq, int priority);
134
135const struct sched_dispatch_table sched_traditional_dispatch = {
136 .sched_name = "traditional",
137 .init = sched_traditional_init,
138 .timebase_init = sched_timeshare_timebase_init,
139 .processor_init = sched_traditional_processor_init,
140 .pset_init = sched_traditional_pset_init,
141 .maintenance_continuation = sched_timeshare_maintenance_continue,
142 .choose_thread = sched_traditional_choose_thread,
143 .steal_thread_enabled = sched_traditional_steal_thread_enabled,
144 .steal_thread = sched_traditional_steal_thread,
145 .compute_timeshare_priority = sched_compute_timeshare_priority,
146 .choose_node = sched_choose_node,
147 .choose_processor = choose_processor,
148 .processor_enqueue = sched_traditional_processor_enqueue,
149 .processor_queue_shutdown = sched_traditional_processor_queue_shutdown,
150 .processor_queue_remove = sched_traditional_processor_queue_remove,
151 .processor_queue_empty = sched_traditional_processor_queue_empty,
152 .priority_is_urgent = priority_is_urgent,
153 .processor_csw_check = sched_traditional_processor_csw_check,
154 .processor_queue_has_priority = sched_traditional_processor_queue_has_priority,
155 .initial_quantum_size = sched_timeshare_initial_quantum_size,
156 .initial_thread_sched_mode = sched_traditional_initial_thread_sched_mode,
157 .can_update_priority = can_update_priority,
158 .update_priority = update_priority,
159 .lightweight_update_priority = lightweight_update_priority,
160 .quantum_expire = sched_default_quantum_expire,
161 .processor_runq_count = sched_traditional_processor_runq_count,
162 .processor_runq_stats_count_sum = sched_traditional_processor_runq_stats_count_sum,
163 .processor_bound_count = sched_traditional_processor_bound_count,
164 .thread_update_scan = sched_traditional_thread_update_scan,
165 .multiple_psets_enabled = TRUE,
166 .sched_groups_enabled = FALSE,
167 .avoid_processor_enabled = FALSE,
168 .thread_avoid_processor = NULL,
169 .processor_balance = sched_SMT_balance,
170
171 .rt_runq = sched_rtlocal_runq,
172 .rt_init = sched_rtlocal_init,
173 .rt_queue_shutdown = sched_rtlocal_queue_shutdown,
174 .rt_runq_scan = sched_rtlocal_runq_scan,
175 .rt_runq_count_sum = sched_rtlocal_runq_count_sum,
176 .rt_steal_thread = sched_rtlocal_steal_thread,
177
178 .qos_max_parallelism = sched_qos_max_parallelism,
179 .check_spill = sched_check_spill,
180 .ipi_policy = sched_ipi_policy,
181 .thread_should_yield = sched_thread_should_yield,
182 .run_count_incr = sched_run_incr,
183 .run_count_decr = sched_run_decr,
184 .update_thread_bucket = sched_update_thread_bucket,
185 .pset_made_schedulable = sched_pset_made_schedulable,
186 .cpu_init_completed = NULL,
187 .thread_eligible_for_pset = NULL,
188};
189
190const struct sched_dispatch_table sched_traditional_with_pset_runqueue_dispatch = {
191 .sched_name = "traditional_with_pset_runqueue",
192 .init = sched_traditional_with_pset_runqueue_init,
193 .timebase_init = sched_timeshare_timebase_init,
194 .processor_init = sched_traditional_processor_init,
195 .pset_init = sched_traditional_pset_init,
196 .maintenance_continuation = sched_timeshare_maintenance_continue,
197 .choose_thread = sched_traditional_choose_thread,
198 .steal_thread_enabled = sched_steal_thread_enabled,
199 .steal_thread = sched_traditional_steal_thread,
200 .compute_timeshare_priority = sched_compute_timeshare_priority,
201 .choose_node = sched_choose_node,
202 .choose_processor = choose_processor,
203 .processor_enqueue = sched_traditional_processor_enqueue,
204 .processor_queue_shutdown = sched_traditional_processor_queue_shutdown,
205 .processor_queue_remove = sched_traditional_processor_queue_remove,
206 .processor_queue_empty = sched_traditional_with_pset_runqueue_processor_queue_empty,
207 .priority_is_urgent = priority_is_urgent,
208 .processor_csw_check = sched_traditional_processor_csw_check,
209 .processor_queue_has_priority = sched_traditional_processor_queue_has_priority,
210 .initial_quantum_size = sched_timeshare_initial_quantum_size,
211 .initial_thread_sched_mode = sched_traditional_initial_thread_sched_mode,
212 .can_update_priority = can_update_priority,
213 .update_priority = update_priority,
214 .lightweight_update_priority = lightweight_update_priority,
215 .quantum_expire = sched_default_quantum_expire,
216 .processor_runq_count = sched_traditional_processor_runq_count,
217 .processor_runq_stats_count_sum = sched_traditional_with_pset_runqueue_processor_runq_stats_count_sum,
218 .processor_bound_count = sched_traditional_processor_bound_count,
219 .thread_update_scan = sched_traditional_thread_update_scan,
220 .multiple_psets_enabled = TRUE,
221 .sched_groups_enabled = FALSE,
222 .avoid_processor_enabled = FALSE,
223 .thread_avoid_processor = NULL,
224 .processor_balance = sched_SMT_balance,
225
226 .rt_runq = sched_rtlocal_runq,
227 .rt_init = sched_rtlocal_init,
228 .rt_queue_shutdown = sched_rtlocal_queue_shutdown,
229 .rt_runq_scan = sched_rtlocal_runq_scan,
230 .rt_runq_count_sum = sched_rtlocal_runq_count_sum,
231 .rt_steal_thread = sched_rtlocal_steal_thread,
232
233 .qos_max_parallelism = sched_qos_max_parallelism,
234 .check_spill = sched_check_spill,
235 .ipi_policy = sched_ipi_policy,
236 .thread_should_yield = sched_thread_should_yield,
237 .run_count_incr = sched_run_incr,
238 .run_count_decr = sched_run_decr,
239 .update_thread_bucket = sched_update_thread_bucket,
240 .pset_made_schedulable = sched_pset_made_schedulable,
241 .cpu_init_completed = NULL,
242 .thread_eligible_for_pset = NULL,
243};
244
245static void
246sched_traditional_init(void)
247{
248 sched_timeshare_init();
249}
250
251static void
252sched_traditional_with_pset_runqueue_init(void)
253{
254 sched_timeshare_init();
255 sched_traditional_use_pset_runqueue = TRUE;
256}
257
258static void
259sched_traditional_processor_init(processor_t processor)
260{
261 if (!sched_traditional_use_pset_runqueue) {
262 run_queue_init(runq: &processor->runq);
263 }
264 processor->runq_bound_count = 0;
265}
266
267static void
268sched_traditional_pset_init(processor_set_t pset)
269{
270 if (sched_traditional_use_pset_runqueue) {
271 run_queue_init(runq: &pset->pset_runq);
272 }
273 pset->pset_runq_bound_count = 0;
274}
275
276__attribute__((always_inline))
277static inline run_queue_t
278runq_for_processor(processor_t processor)
279{
280 if (sched_traditional_use_pset_runqueue) {
281 return &processor->processor_set->pset_runq;
282 } else {
283 return &processor->runq;
284 }
285}
286
287__attribute__((always_inline))
288static inline void
289runq_consider_incr_bound_count(processor_t processor,
290 thread_t thread)
291{
292 if (thread->bound_processor == PROCESSOR_NULL) {
293 return;
294 }
295
296 assert(thread->bound_processor == processor);
297
298 if (sched_traditional_use_pset_runqueue) {
299 processor->processor_set->pset_runq_bound_count++;
300 }
301
302 processor->runq_bound_count++;
303}
304
305__attribute__((always_inline))
306static inline void
307runq_consider_decr_bound_count(processor_t processor,
308 thread_t thread)
309{
310 if (thread->bound_processor == PROCESSOR_NULL) {
311 return;
312 }
313
314 assert(thread->bound_processor == processor);
315
316 if (sched_traditional_use_pset_runqueue) {
317 processor->processor_set->pset_runq_bound_count--;
318 }
319
320 processor->runq_bound_count--;
321}
322
323static thread_t
324sched_traditional_choose_thread(
325 processor_t processor,
326 int priority,
327 __unused ast_t reason)
328{
329 thread_t thread;
330
331 thread = sched_traditional_choose_thread_from_runq(processor, runq: runq_for_processor(processor), priority);
332 if (thread != THREAD_NULL) {
333 runq_consider_decr_bound_count(processor, thread);
334 }
335
336 return thread;
337}
338
339/*
340 * sched_traditional_choose_thread_from_runq:
341 *
342 * Locate a thread to execute from the processor run queue
343 * and return it. Only choose a thread with greater or equal
344 * priority.
345 *
346 * Associated pset must be locked. Returns THREAD_NULL
347 * on failure.
348 */
349static thread_t
350sched_traditional_choose_thread_from_runq(
351 processor_t processor,
352 run_queue_t rq,
353 int priority)
354{
355 circle_queue_t queue = rq->queues + rq->highq;
356 int pri = rq->highq;
357 int count = rq->count;
358 thread_t thread;
359
360 while (count > 0 && pri >= priority) {
361 cqe_foreach_element_safe(thread, queue, runq_links) {
362 if (thread->bound_processor == PROCESSOR_NULL ||
363 thread->bound_processor == processor) {
364 circle_dequeue(cq: queue, elt: &thread->runq_links);
365
366 thread_clear_runq(thread);
367 SCHED_STATS_RUNQ_CHANGE(&rq->runq_stats, rq->count);
368 rq->count--;
369 if (SCHED(priority_is_urgent)(pri)) {
370 rq->urgency--; assert(rq->urgency >= 0);
371 }
372 if (circle_queue_empty(cq: queue)) {
373 bitmap_clear(map: rq->bitmap, n: pri);
374 rq->highq = bitmap_first(map: rq->bitmap, NRQS);
375 }
376 return thread;
377 }
378 count--;
379 }
380
381 queue--; pri--;
382 }
383
384 return THREAD_NULL;
385}
386
387static sched_mode_t
388sched_traditional_initial_thread_sched_mode(task_t parent_task)
389{
390 if (parent_task == kernel_task) {
391 return TH_MODE_FIXED;
392 } else {
393 return TH_MODE_TIMESHARE;
394 }
395}
396
397/*
398 * sched_traditional_processor_enqueue:
399 *
400 * Enqueue thread on a processor run queue. Thread must be locked,
401 * and not already be on a run queue.
402 *
403 * Returns TRUE if a preemption is indicated based on the state
404 * of the run queue.
405 *
406 * The run queue must be locked (see thread_run_queue_remove()
407 * for more info).
408 */
409static boolean_t
410sched_traditional_processor_enqueue(processor_t processor,
411 thread_t thread,
412 sched_options_t options)
413{
414 run_queue_t rq = runq_for_processor(processor);
415 boolean_t result;
416
417 result = run_queue_enqueue(runq: rq, thread, options);
418 thread_set_runq_locked(thread, new_runq: processor);
419 runq_consider_incr_bound_count(processor, thread);
420
421 return result;
422}
423
424static boolean_t
425sched_traditional_processor_queue_empty(processor_t processor)
426{
427 return runq_for_processor(processor)->count == 0;
428}
429
430static boolean_t
431sched_traditional_with_pset_runqueue_processor_queue_empty(processor_t processor)
432{
433 processor_set_t pset = processor->processor_set;
434 int count = runq_for_processor(processor)->count;
435
436 /*
437 * The pset runq contains the count of all runnable threads
438 * for all processors in the pset. However, for threads that
439 * are bound to another processor, the current "processor"
440 * is not eligible to execute the thread. So we only
441 * include bound threads that our bound to the current
442 * "processor". This allows the processor to idle when the
443 * count of eligible threads drops to 0, even if there's
444 * a runnable thread bound to a different processor in the
445 * shared runq.
446 */
447
448 count -= pset->pset_runq_bound_count;
449 count += processor->runq_bound_count;
450
451 return count == 0;
452}
453
454static ast_t
455sched_traditional_processor_csw_check(processor_t processor)
456{
457 run_queue_t runq;
458 boolean_t has_higher;
459
460 assert(processor->active_thread != NULL);
461
462 runq = runq_for_processor(processor);
463
464 if (processor->first_timeslice) {
465 has_higher = (runq->highq > processor->current_pri);
466 } else {
467 has_higher = (runq->highq >= processor->current_pri);
468 }
469
470 if (has_higher) {
471 if (runq->urgency > 0) {
472 return AST_PREEMPT | AST_URGENT;
473 }
474
475 return AST_PREEMPT;
476 }
477
478 return AST_NONE;
479}
480
481static boolean_t
482sched_traditional_processor_queue_has_priority(processor_t processor,
483 int priority,
484 boolean_t gte)
485{
486 if (gte) {
487 return runq_for_processor(processor)->highq >= priority;
488 } else {
489 return runq_for_processor(processor)->highq > priority;
490 }
491}
492
493static int
494sched_traditional_processor_runq_count(processor_t processor)
495{
496 return runq_for_processor(processor)->count;
497}
498
499static uint64_t
500sched_traditional_processor_runq_stats_count_sum(processor_t processor)
501{
502 return runq_for_processor(processor)->runq_stats.count_sum;
503}
504
505static uint64_t
506sched_traditional_with_pset_runqueue_processor_runq_stats_count_sum(processor_t processor)
507{
508 if (processor->cpu_id == processor->processor_set->cpu_set_low) {
509 return runq_for_processor(processor)->runq_stats.count_sum;
510 } else {
511 return 0ULL;
512 }
513}
514
515static int
516sched_traditional_processor_bound_count(processor_t processor)
517{
518 return processor->runq_bound_count;
519}
520
521/*
522 * sched_traditional_processor_queue_shutdown:
523 *
524 * Shutdown a processor run queue by
525 * re-dispatching non-bound threads.
526 *
527 * Associated pset must be locked, and is
528 * returned unlocked.
529 */
530static void
531sched_traditional_processor_queue_shutdown(processor_t processor)
532{
533 processor_set_t pset = processor->processor_set;
534 run_queue_t rq = runq_for_processor(processor);
535 circle_queue_t queue = rq->queues + rq->highq;
536 int pri = rq->highq;
537 int count = rq->count;
538 thread_t thread;
539 circle_queue_head_t tqueue;
540
541 circle_queue_init(&tqueue);
542
543 while (count > 0) {
544 cqe_foreach_element_safe(thread, queue, runq_links) {
545 if (thread->bound_processor == PROCESSOR_NULL) {
546 circle_dequeue(cq: queue, elt: &thread->runq_links);
547
548 thread_clear_runq(thread);
549 SCHED_STATS_RUNQ_CHANGE(&rq->runq_stats, rq->count);
550 runq_consider_decr_bound_count(processor, thread);
551 rq->count--;
552 if (SCHED(priority_is_urgent)(pri)) {
553 rq->urgency--; assert(rq->urgency >= 0);
554 }
555 if (circle_queue_empty(cq: queue)) {
556 bitmap_clear(map: rq->bitmap, n: pri);
557 rq->highq = bitmap_first(map: rq->bitmap, NRQS);
558 }
559
560 circle_enqueue_tail(cq: &tqueue, elt: &thread->runq_links);
561 }
562 count--;
563 }
564
565 queue--; pri--;
566 }
567
568 pset_unlock(pset);
569
570 while ((thread = cqe_dequeue_head(&tqueue, struct thread, runq_links)) != THREAD_NULL) {
571 thread_lock(thread);
572
573 thread_setrun(thread, options: SCHED_TAILQ);
574
575 thread_unlock(thread);
576 }
577}
578
579/*
580 * Locks the runqueue itself.
581 *
582 * Thread must be locked.
583 */
584static boolean_t
585sched_traditional_processor_queue_remove(processor_t processor,
586 thread_t thread)
587{
588 processor_set_t pset;
589 run_queue_t rq;
590
591 pset = processor->processor_set;
592 pset_lock(pset);
593
594 rq = runq_for_processor(processor);
595
596 if (processor == thread_get_runq_locked(thread)) {
597 /*
598 * Thread is on a run queue and we have a lock on
599 * that run queue.
600 */
601 runq_consider_decr_bound_count(processor, thread);
602 run_queue_remove(runq: rq, thread);
603 } else {
604 /*
605 * The thread left the run queue before we could
606 * lock the run queue.
607 */
608 thread_assert_runq_null(thread);
609 processor = PROCESSOR_NULL;
610 }
611
612 pset_unlock(pset);
613
614 return processor != PROCESSOR_NULL;
615}
616
617/*
618 * sched_traditional_steal_processor_thread:
619 *
620 * Locate a thread to steal from the processor and
621 * return it.
622 *
623 * Associated pset must be locked. Returns THREAD_NULL
624 * on failure.
625 */
626static thread_t
627sched_traditional_steal_processor_thread(processor_t processor)
628{
629 run_queue_t rq = runq_for_processor(processor);
630 circle_queue_t queue = rq->queues + rq->highq;
631 int pri = rq->highq;
632 int count = rq->count;
633 thread_t thread;
634
635 while (count > 0) {
636 cqe_foreach_element_safe(thread, queue, runq_links) {
637 if (thread->bound_processor == PROCESSOR_NULL) {
638 circle_dequeue(cq: queue, elt: &thread->runq_links);
639
640 thread_clear_runq(thread);
641 SCHED_STATS_RUNQ_CHANGE(&rq->runq_stats, rq->count);
642 runq_consider_decr_bound_count(processor, thread);
643 rq->count--;
644 if (SCHED(priority_is_urgent)(pri)) {
645 rq->urgency--; assert(rq->urgency >= 0);
646 }
647 if (circle_queue_empty(cq: queue)) {
648 bitmap_clear(map: rq->bitmap, n: pri);
649 rq->highq = bitmap_first(map: rq->bitmap, NRQS);
650 }
651
652 return thread;
653 }
654 count--;
655 }
656
657 queue--; pri--;
658 }
659
660 return THREAD_NULL;
661}
662
663static bool
664sched_traditional_steal_thread_enabled(processor_set_t pset)
665{
666 (void)pset;
667 return true;
668}
669
670/*
671 * Locate and steal a thread, beginning
672 * at the pset.
673 *
674 * The pset must be locked, and is returned
675 * unlocked.
676 *
677 * Returns the stolen thread, or THREAD_NULL on
678 * failure.
679 */
680static thread_t
681sched_traditional_steal_thread(processor_set_t pset)
682{
683 processor_set_t nset, cset = pset;
684 processor_t processor;
685 thread_t thread;
686
687 do {
688 uint64_t active_map = (pset->cpu_state_map[PROCESSOR_RUNNING] |
689 pset->cpu_state_map[PROCESSOR_DISPATCHING]);
690 for (int cpuid = lsb_first(bitmap: active_map); cpuid >= 0; cpuid = lsb_next(bitmap: active_map, previous_bit: cpuid)) {
691 processor = processor_array[cpuid];
692 if (runq_for_processor(processor)->count > 0) {
693 thread = sched_traditional_steal_processor_thread(processor);
694 if (thread != THREAD_NULL) {
695 pset_unlock(cset);
696
697 return thread;
698 }
699 }
700 }
701
702 nset = next_pset(pset: cset);
703
704 if (nset != pset) {
705 pset_unlock(cset);
706
707 cset = nset;
708 pset_lock(cset);
709 }
710 } while (nset != pset);
711
712 pset_unlock(cset);
713
714 return THREAD_NULL;
715}
716
717static void
718sched_traditional_thread_update_scan(sched_update_scan_context_t scan_context)
719{
720 boolean_t restart_needed = FALSE;
721 processor_t processor = processor_list;
722 processor_set_t pset;
723 thread_t thread;
724 spl_t s;
725
726 do {
727 do {
728 /*
729 * TODO: in sched_traditional_use_pset_runqueue case,
730 * avoid scanning the same runq multiple times
731 */
732 pset = processor->processor_set;
733
734 s = splsched();
735 pset_lock(pset);
736
737 restart_needed = runq_scan(runq: runq_for_processor(processor), scan_context);
738
739 pset_unlock(pset);
740 splx(s);
741
742 if (restart_needed) {
743 break;
744 }
745
746 thread = processor->idle_thread;
747 if (thread != THREAD_NULL && thread->sched_stamp != sched_tick) {
748 if (thread_update_add_thread(thread) == FALSE) {
749 restart_needed = TRUE;
750 break;
751 }
752 }
753 } while ((processor = processor->processor_list) != NULL);
754
755 /* Ok, we now have a collection of candidates -- fix them. */
756 thread_update_process_threads();
757 } while (restart_needed);
758}
759