1/*
2 * Copyright (c) 2000-2015 Apple Inc. All rights reserved.
3 *
4 * @APPLE_OSREFERENCE_LICENSE_HEADER_START@
5 *
6 * This file contains Original Code and/or Modifications of Original Code
7 * as defined in and that are subject to the Apple Public Source License
8 * Version 2.0 (the 'License'). You may not use this file except in
9 * compliance with the License. The rights granted to you under the License
10 * may not be used to create, or enable the creation or redistribution of,
11 * unlawful or unlicensed copies of an Apple operating system, or to
12 * circumvent, violate, or enable the circumvention or violation of, any
13 * terms of an Apple operating system software license agreement.
14 *
15 * Please obtain a copy of the License at
16 * http://www.opensource.apple.com/apsl/ and read it before using this file.
17 *
18 * The Original Code and all software distributed under the License are
19 * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER
20 * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES,
21 * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY,
22 * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT.
23 * Please see the License for the specific language governing rights and
24 * limitations under the License.
25 *
26 * @APPLE_OSREFERENCE_LICENSE_HEADER_END@
27 */
28/*
29 * @OSF_FREE_COPYRIGHT@
30 */
31/*
32 * Mach Operating System
33 * Copyright (c) 1991,1990,1989,1988,1987 Carnegie Mellon University
34 * All Rights Reserved.
35 *
36 * Permission to use, copy, modify and distribute this software and its
37 * documentation is hereby granted, provided that both the copyright
38 * notice and this permission notice appear in all copies of the
39 * software, derivative works or modified versions, and any portions
40 * thereof, and that both notices appear in supporting documentation.
41 *
42 * CARNEGIE MELLON ALLOWS FREE USE OF THIS SOFTWARE IN ITS "AS IS"
43 * CONDITION. CARNEGIE MELLON DISCLAIMS ANY LIABILITY OF ANY KIND FOR
44 * ANY DAMAGES WHATSOEVER RESULTING FROM THE USE OF THIS SOFTWARE.
45 *
46 * Carnegie Mellon requests users of this software to return to
47 *
48 * Software Distribution Coordinator or Software.Distribution@CS.CMU.EDU
49 * School of Computer Science
50 * Carnegie Mellon University
51 * Pittsburgh PA 15213-3890
52 *
53 * any improvements or extensions that they make and grant Carnegie Mellon
54 * the rights to redistribute these changes.
55 */
56
57#include <mach/mach_types.h>
58
59#include <kern/sched.h>
60#include <kern/sched_prim.h>
61
62static boolean_t
63sched_traditional_use_pset_runqueue = FALSE;
64
65static void
66sched_traditional_init(void);
67
68static thread_t
69sched_traditional_steal_thread(processor_set_t pset);
70
71static thread_t
72sched_traditional_steal_processor_thread(processor_t processor);
73
74static void
75sched_traditional_thread_update_scan(sched_update_scan_context_t scan_context);
76
77static void
78sched_traditional_processor_queue_shutdown(processor_t processor);
79
80static boolean_t
81sched_traditional_processor_enqueue(processor_t processor, thread_t thread, integer_t options);
82
83static boolean_t
84sched_traditional_processor_queue_remove(processor_t processor, thread_t thread);
85
86static boolean_t
87sched_traditional_processor_queue_empty(processor_t processor);
88
89static ast_t
90sched_traditional_processor_csw_check(processor_t processor);
91
92static boolean_t
93sched_traditional_processor_queue_has_priority(processor_t processor, int priority, boolean_t gte);
94
95static int
96sched_traditional_processor_runq_count(processor_t processor);
97
98static boolean_t
99sched_traditional_with_pset_runqueue_processor_queue_empty(processor_t processor);
100
101static uint64_t
102sched_traditional_processor_runq_stats_count_sum(processor_t processor);
103
104static uint64_t
105sched_traditional_with_pset_runqueue_processor_runq_stats_count_sum(processor_t processor);
106
107static int
108sched_traditional_processor_bound_count(processor_t processor);
109
110extern void
111sched_traditional_quantum_expire(thread_t thread);
112
113static void
114sched_traditional_processor_init(processor_t processor);
115
116static void
117sched_traditional_pset_init(processor_set_t pset);
118
119static void
120sched_traditional_with_pset_runqueue_init(void);
121
122static sched_mode_t
123sched_traditional_initial_thread_sched_mode(task_t parent_task);
124
125static thread_t
126sched_traditional_choose_thread(processor_t processor, int priority, ast_t reason);
127
128/* Choose a thread from a processor's priority-based runq */
129static thread_t sched_traditional_choose_thread_from_runq(processor_t processor, run_queue_t runq, int priority);
130
131const struct sched_dispatch_table sched_traditional_dispatch = {
132 .sched_name = "traditional",
133 .init = sched_traditional_init,
134 .timebase_init = sched_timeshare_timebase_init,
135 .processor_init = sched_traditional_processor_init,
136 .pset_init = sched_traditional_pset_init,
137 .maintenance_continuation = sched_timeshare_maintenance_continue,
138 .choose_thread = sched_traditional_choose_thread,
139 .steal_thread_enabled = TRUE,
140 .steal_thread = sched_traditional_steal_thread,
141 .compute_timeshare_priority = sched_compute_timeshare_priority,
142 .choose_processor = choose_processor,
143 .processor_enqueue = sched_traditional_processor_enqueue,
144 .processor_queue_shutdown = sched_traditional_processor_queue_shutdown,
145 .processor_queue_remove = sched_traditional_processor_queue_remove,
146 .processor_queue_empty = sched_traditional_processor_queue_empty,
147 .priority_is_urgent = priority_is_urgent,
148 .processor_csw_check = sched_traditional_processor_csw_check,
149 .processor_queue_has_priority = sched_traditional_processor_queue_has_priority,
150 .initial_quantum_size = sched_timeshare_initial_quantum_size,
151 .initial_thread_sched_mode = sched_traditional_initial_thread_sched_mode,
152 .can_update_priority = can_update_priority,
153 .update_priority = update_priority,
154 .lightweight_update_priority = lightweight_update_priority,
155 .quantum_expire = sched_default_quantum_expire,
156 .processor_runq_count = sched_traditional_processor_runq_count,
157 .processor_runq_stats_count_sum = sched_traditional_processor_runq_stats_count_sum,
158 .processor_bound_count = sched_traditional_processor_bound_count,
159 .thread_update_scan = sched_traditional_thread_update_scan,
160 .direct_dispatch_to_idle_processors = TRUE,
161 .multiple_psets_enabled = TRUE,
162 .sched_groups_enabled = FALSE,
163 .avoid_processor_enabled = FALSE,
164 .thread_avoid_processor = NULL,
165 .processor_balance = sched_SMT_balance,
166
167 .rt_runq = sched_rtglobal_runq,
168 .rt_init = sched_rtglobal_init,
169 .rt_queue_shutdown = sched_rtglobal_queue_shutdown,
170 .rt_runq_scan = sched_rtglobal_runq_scan,
171 .rt_runq_count_sum = sched_rtglobal_runq_count_sum,
172
173 .qos_max_parallelism = sched_qos_max_parallelism,
174 .check_spill = sched_check_spill,
175 .ipi_policy = sched_ipi_policy,
176 .thread_should_yield = sched_thread_should_yield,
177};
178
179const struct sched_dispatch_table sched_traditional_with_pset_runqueue_dispatch = {
180 .sched_name = "traditional_with_pset_runqueue",
181 .init = sched_traditional_with_pset_runqueue_init,
182 .timebase_init = sched_timeshare_timebase_init,
183 .processor_init = sched_traditional_processor_init,
184 .pset_init = sched_traditional_pset_init,
185 .maintenance_continuation = sched_timeshare_maintenance_continue,
186 .choose_thread = sched_traditional_choose_thread,
187 .steal_thread_enabled = TRUE,
188 .steal_thread = sched_traditional_steal_thread,
189 .compute_timeshare_priority = sched_compute_timeshare_priority,
190 .choose_processor = choose_processor,
191 .processor_enqueue = sched_traditional_processor_enqueue,
192 .processor_queue_shutdown = sched_traditional_processor_queue_shutdown,
193 .processor_queue_remove = sched_traditional_processor_queue_remove,
194 .processor_queue_empty = sched_traditional_with_pset_runqueue_processor_queue_empty,
195 .priority_is_urgent = priority_is_urgent,
196 .processor_csw_check = sched_traditional_processor_csw_check,
197 .processor_queue_has_priority = sched_traditional_processor_queue_has_priority,
198 .initial_quantum_size = sched_timeshare_initial_quantum_size,
199 .initial_thread_sched_mode = sched_traditional_initial_thread_sched_mode,
200 .can_update_priority = can_update_priority,
201 .update_priority = update_priority,
202 .lightweight_update_priority = lightweight_update_priority,
203 .quantum_expire = sched_default_quantum_expire,
204 .processor_runq_count = sched_traditional_processor_runq_count,
205 .processor_runq_stats_count_sum = sched_traditional_with_pset_runqueue_processor_runq_stats_count_sum,
206 .processor_bound_count = sched_traditional_processor_bound_count,
207 .thread_update_scan = sched_traditional_thread_update_scan,
208 .direct_dispatch_to_idle_processors = FALSE,
209 .multiple_psets_enabled = TRUE,
210 .sched_groups_enabled = FALSE,
211 .avoid_processor_enabled = FALSE,
212 .thread_avoid_processor = NULL,
213 .processor_balance = sched_SMT_balance,
214
215 .rt_runq = sched_rtglobal_runq,
216 .rt_init = sched_rtglobal_init,
217 .rt_queue_shutdown = sched_rtglobal_queue_shutdown,
218 .rt_runq_scan = sched_rtglobal_runq_scan,
219 .rt_runq_count_sum = sched_rtglobal_runq_count_sum,
220
221 .qos_max_parallelism = sched_qos_max_parallelism,
222 .check_spill = sched_check_spill,
223 .ipi_policy = sched_ipi_policy,
224 .thread_should_yield = sched_thread_should_yield,
225};
226
227static void
228sched_traditional_init(void)
229{
230 sched_timeshare_init();
231}
232
233static void
234sched_traditional_with_pset_runqueue_init(void)
235{
236 sched_timeshare_init();
237 sched_traditional_use_pset_runqueue = TRUE;
238}
239
240static void
241sched_traditional_processor_init(processor_t processor)
242{
243 if (!sched_traditional_use_pset_runqueue) {
244 run_queue_init(&processor->runq);
245 }
246 processor->runq_bound_count = 0;
247}
248
249static void
250sched_traditional_pset_init(processor_set_t pset)
251{
252 if (sched_traditional_use_pset_runqueue) {
253 run_queue_init(&pset->pset_runq);
254 }
255 pset->pset_runq_bound_count = 0;
256}
257
258__attribute__((always_inline))
259static inline run_queue_t runq_for_processor(processor_t processor)
260{
261 if (sched_traditional_use_pset_runqueue)
262 return &processor->processor_set->pset_runq;
263 else
264 return &processor->runq;
265}
266
267__attribute__((always_inline))
268static inline void runq_consider_incr_bound_count(processor_t processor,
269 thread_t thread)
270{
271 if (thread->bound_processor == PROCESSOR_NULL)
272 return;
273
274 assert(thread->bound_processor == processor);
275
276 if (sched_traditional_use_pset_runqueue)
277 processor->processor_set->pset_runq_bound_count++;
278
279 processor->runq_bound_count++;
280}
281
282__attribute__((always_inline))
283static inline void runq_consider_decr_bound_count(processor_t processor,
284 thread_t thread)
285{
286 if (thread->bound_processor == PROCESSOR_NULL)
287 return;
288
289 assert(thread->bound_processor == processor);
290
291 if (sched_traditional_use_pset_runqueue)
292 processor->processor_set->pset_runq_bound_count--;
293
294 processor->runq_bound_count--;
295}
296
297static thread_t
298sched_traditional_choose_thread(
299 processor_t processor,
300 int priority,
301 __unused ast_t reason)
302{
303 thread_t thread;
304
305 thread = sched_traditional_choose_thread_from_runq(processor, runq_for_processor(processor), priority);
306 if (thread != THREAD_NULL) {
307 runq_consider_decr_bound_count(processor, thread);
308 }
309
310 return thread;
311}
312
313/*
314 * sched_traditional_choose_thread_from_runq:
315 *
316 * Locate a thread to execute from the processor run queue
317 * and return it. Only choose a thread with greater or equal
318 * priority.
319 *
320 * Associated pset must be locked. Returns THREAD_NULL
321 * on failure.
322 */
323static thread_t
324sched_traditional_choose_thread_from_runq(
325 processor_t processor,
326 run_queue_t rq,
327 int priority)
328{
329 queue_t queue = rq->queues + rq->highq;
330 int pri = rq->highq;
331 int count = rq->count;
332 thread_t thread;
333
334 while (count > 0 && pri >= priority) {
335 thread = (thread_t)(uintptr_t)queue_first(queue);
336 while (!queue_end(queue, (queue_entry_t)thread)) {
337 if (thread->bound_processor == PROCESSOR_NULL ||
338 thread->bound_processor == processor) {
339 remqueue((queue_entry_t)thread);
340
341 thread->runq = PROCESSOR_NULL;
342 SCHED_STATS_RUNQ_CHANGE(&rq->runq_stats, rq->count);
343 rq->count--;
344 if (SCHED(priority_is_urgent)(pri)) {
345 rq->urgency--; assert(rq->urgency >= 0);
346 }
347 if (queue_empty(queue)) {
348 bitmap_clear(rq->bitmap, pri);
349 rq->highq = bitmap_first(rq->bitmap, NRQS);
350 }
351
352 return (thread);
353 }
354 count--;
355
356 thread = (thread_t)(uintptr_t)queue_next((queue_entry_t)thread);
357 }
358
359 queue--; pri--;
360 }
361
362 return (THREAD_NULL);
363}
364
365static sched_mode_t
366sched_traditional_initial_thread_sched_mode(task_t parent_task)
367{
368 if (parent_task == kernel_task)
369 return TH_MODE_FIXED;
370 else
371 return TH_MODE_TIMESHARE;
372}
373
374/*
375 * sched_traditional_processor_enqueue:
376 *
377 * Enqueue thread on a processor run queue. Thread must be locked,
378 * and not already be on a run queue.
379 *
380 * Returns TRUE if a preemption is indicated based on the state
381 * of the run queue.
382 *
383 * The run queue must be locked (see thread_run_queue_remove()
384 * for more info).
385 */
386static boolean_t
387sched_traditional_processor_enqueue(processor_t processor,
388 thread_t thread,
389 integer_t options)
390{
391 run_queue_t rq = runq_for_processor(processor);
392 boolean_t result;
393
394 result = run_queue_enqueue(rq, thread, options);
395 thread->runq = processor;
396 runq_consider_incr_bound_count(processor, thread);
397
398 return (result);
399}
400
401static boolean_t
402sched_traditional_processor_queue_empty(processor_t processor)
403{
404 return runq_for_processor(processor)->count == 0;
405}
406
407static boolean_t
408sched_traditional_with_pset_runqueue_processor_queue_empty(processor_t processor)
409{
410 processor_set_t pset = processor->processor_set;
411 int count = runq_for_processor(processor)->count;
412
413 /*
414 * The pset runq contains the count of all runnable threads
415 * for all processors in the pset. However, for threads that
416 * are bound to another processor, the current "processor"
417 * is not eligible to execute the thread. So we only
418 * include bound threads that our bound to the current
419 * "processor". This allows the processor to idle when the
420 * count of eligible threads drops to 0, even if there's
421 * a runnable thread bound to a different processor in the
422 * shared runq.
423 */
424
425 count -= pset->pset_runq_bound_count;
426 count += processor->runq_bound_count;
427
428 return count == 0;
429}
430
431static ast_t
432sched_traditional_processor_csw_check(processor_t processor)
433{
434 run_queue_t runq;
435 boolean_t has_higher;
436
437 assert(processor->active_thread != NULL);
438
439 runq = runq_for_processor(processor);
440
441 if (processor->first_timeslice) {
442 has_higher = (runq->highq > processor->current_pri);
443 } else {
444 has_higher = (runq->highq >= processor->current_pri);
445 }
446
447 if (has_higher) {
448 if (runq->urgency > 0)
449 return (AST_PREEMPT | AST_URGENT);
450
451 return AST_PREEMPT;
452 }
453
454 return AST_NONE;
455}
456
457static boolean_t
458sched_traditional_processor_queue_has_priority(processor_t processor,
459 int priority,
460 boolean_t gte)
461{
462 if (gte)
463 return runq_for_processor(processor)->highq >= priority;
464 else
465 return runq_for_processor(processor)->highq > priority;
466}
467
468static int
469sched_traditional_processor_runq_count(processor_t processor)
470{
471 return runq_for_processor(processor)->count;
472}
473
474static uint64_t
475sched_traditional_processor_runq_stats_count_sum(processor_t processor)
476{
477 return runq_for_processor(processor)->runq_stats.count_sum;
478}
479
480static uint64_t
481sched_traditional_with_pset_runqueue_processor_runq_stats_count_sum(processor_t processor)
482{
483 if (processor->cpu_id == processor->processor_set->cpu_set_low)
484 return runq_for_processor(processor)->runq_stats.count_sum;
485 else
486 return 0ULL;
487}
488
489static int
490sched_traditional_processor_bound_count(processor_t processor)
491{
492 return processor->runq_bound_count;
493}
494
495/*
496 * sched_traditional_processor_queue_shutdown:
497 *
498 * Shutdown a processor run queue by
499 * re-dispatching non-bound threads.
500 *
501 * Associated pset must be locked, and is
502 * returned unlocked.
503 */
504static void
505sched_traditional_processor_queue_shutdown(processor_t processor)
506{
507 processor_set_t pset = processor->processor_set;
508 run_queue_t rq = runq_for_processor(processor);
509 queue_t queue = rq->queues + rq->highq;
510 int pri = rq->highq;
511 int count = rq->count;
512 thread_t next, thread;
513 queue_head_t tqueue;
514
515 queue_init(&tqueue);
516
517 while (count > 0) {
518 thread = (thread_t)(uintptr_t)queue_first(queue);
519 while (!queue_end(queue, (queue_entry_t)thread)) {
520 next = (thread_t)(uintptr_t)queue_next((queue_entry_t)thread);
521
522 if (thread->bound_processor == PROCESSOR_NULL) {
523 remqueue((queue_entry_t)thread);
524
525 thread->runq = PROCESSOR_NULL;
526 SCHED_STATS_RUNQ_CHANGE(&rq->runq_stats, rq->count);
527 runq_consider_decr_bound_count(processor, thread);
528 rq->count--;
529 if (SCHED(priority_is_urgent)(pri)) {
530 rq->urgency--; assert(rq->urgency >= 0);
531 }
532 if (queue_empty(queue)) {
533 bitmap_clear(rq->bitmap, pri);
534 rq->highq = bitmap_first(rq->bitmap, NRQS);
535 }
536
537 enqueue_tail(&tqueue, (queue_entry_t)thread);
538 }
539 count--;
540
541 thread = next;
542 }
543
544 queue--; pri--;
545 }
546
547 pset_unlock(pset);
548
549 while ((thread = (thread_t)(uintptr_t)dequeue_head(&tqueue)) != THREAD_NULL) {
550 thread_lock(thread);
551
552 thread_setrun(thread, SCHED_TAILQ);
553
554 thread_unlock(thread);
555 }
556}
557
558#if 0
559static void
560run_queue_check(
561 run_queue_t rq,
562 thread_t thread)
563{
564 queue_t q;
565 queue_entry_t qe;
566
567 if (rq != thread->runq)
568 panic("run_queue_check: thread runq");
569
570 if (thread->sched_pri > MAXPRI || thread->sched_pri < MINPRI)
571 panic("run_queue_check: thread sched_pri");
572
573 q = &rq->queues[thread->sched_pri];
574 qe = queue_first(q);
575 while (!queue_end(q, qe)) {
576 if (qe == (queue_entry_t)thread)
577 return;
578
579 qe = queue_next(qe);
580 }
581
582 panic("run_queue_check: end");
583}
584#endif /* 0 */
585
586/*
587 * Locks the runqueue itself.
588 *
589 * Thread must be locked.
590 */
591static boolean_t
592sched_traditional_processor_queue_remove(processor_t processor,
593 thread_t thread)
594{
595 processor_set_t pset;
596 run_queue_t rq;
597
598 pset = processor->processor_set;
599 pset_lock(pset);
600
601 rq = runq_for_processor(processor);
602
603 if (processor == thread->runq) {
604 /*
605 * Thread is on a run queue and we have a lock on
606 * that run queue.
607 */
608 runq_consider_decr_bound_count(processor, thread);
609 run_queue_remove(rq, thread);
610 }
611 else {
612 /*
613 * The thread left the run queue before we could
614 * lock the run queue.
615 */
616 assert(thread->runq == PROCESSOR_NULL);
617 processor = PROCESSOR_NULL;
618 }
619
620 pset_unlock(pset);
621
622 return (processor != PROCESSOR_NULL);
623}
624
625/*
626 * sched_traditional_steal_processor_thread:
627 *
628 * Locate a thread to steal from the processor and
629 * return it.
630 *
631 * Associated pset must be locked. Returns THREAD_NULL
632 * on failure.
633 */
634static thread_t
635sched_traditional_steal_processor_thread(processor_t processor)
636{
637 run_queue_t rq = runq_for_processor(processor);
638 queue_t queue = rq->queues + rq->highq;
639 int pri = rq->highq;
640 int count = rq->count;
641 thread_t thread;
642
643 while (count > 0) {
644 thread = (thread_t)(uintptr_t)queue_first(queue);
645 while (!queue_end(queue, (queue_entry_t)thread)) {
646 if (thread->bound_processor == PROCESSOR_NULL) {
647 remqueue((queue_entry_t)thread);
648
649 thread->runq = PROCESSOR_NULL;
650 SCHED_STATS_RUNQ_CHANGE(&rq->runq_stats, rq->count);
651 runq_consider_decr_bound_count(processor, thread);
652 rq->count--;
653 if (SCHED(priority_is_urgent)(pri)) {
654 rq->urgency--; assert(rq->urgency >= 0);
655 }
656 if (queue_empty(queue)) {
657 bitmap_clear(rq->bitmap, pri);
658 rq->highq = bitmap_first(rq->bitmap, NRQS);
659 }
660
661 return (thread);
662 }
663 count--;
664
665 thread = (thread_t)(uintptr_t)queue_next((queue_entry_t)thread);
666 }
667
668 queue--; pri--;
669 }
670
671 return (THREAD_NULL);
672}
673
674/*
675 * Locate and steal a thread, beginning
676 * at the pset.
677 *
678 * The pset must be locked, and is returned
679 * unlocked.
680 *
681 * Returns the stolen thread, or THREAD_NULL on
682 * failure.
683 */
684static thread_t
685sched_traditional_steal_thread(processor_set_t pset)
686{
687 processor_set_t nset, cset = pset;
688 processor_t processor;
689 thread_t thread;
690
691 do {
692 uint64_t active_map = (pset->cpu_state_map[PROCESSOR_RUNNING] |
693 pset->cpu_state_map[PROCESSOR_DISPATCHING]);
694 for (int cpuid = lsb_first(active_map); cpuid >= 0; cpuid = lsb_next(active_map, cpuid)) {
695 processor = processor_array[cpuid];
696 if (runq_for_processor(processor)->count > 0) {
697 thread = sched_traditional_steal_processor_thread(processor);
698 if (thread != THREAD_NULL) {
699 pset_unlock(cset);
700
701 return (thread);
702 }
703 }
704 }
705
706 nset = next_pset(cset);
707
708 if (nset != pset) {
709 pset_unlock(cset);
710
711 cset = nset;
712 pset_lock(cset);
713 }
714 } while (nset != pset);
715
716 pset_unlock(cset);
717
718 return (THREAD_NULL);
719}
720
721static void
722sched_traditional_thread_update_scan(sched_update_scan_context_t scan_context)
723{
724 boolean_t restart_needed = FALSE;
725 processor_t processor = processor_list;
726 processor_set_t pset;
727 thread_t thread;
728 spl_t s;
729
730 do {
731 do {
732 /*
733 * TODO: in sched_traditional_use_pset_runqueue case,
734 * avoid scanning the same runq multiple times
735 */
736 pset = processor->processor_set;
737
738 s = splsched();
739 pset_lock(pset);
740
741 restart_needed = runq_scan(runq_for_processor(processor), scan_context);
742
743 pset_unlock(pset);
744 splx(s);
745
746 if (restart_needed)
747 break;
748
749 thread = processor->idle_thread;
750 if (thread != THREAD_NULL && thread->sched_stamp != sched_tick) {
751 if (thread_update_add_thread(thread) == FALSE) {
752 restart_needed = TRUE;
753 break;
754 }
755 }
756 } while ((processor = processor->processor_list) != NULL);
757
758 /* Ok, we now have a collection of candidates -- fix them. */
759 thread_update_process_threads();
760 } while (restart_needed);
761}
762
763