1/*
2 * Copyright (c) 2000-2016 Apple Inc. All rights reserved.
3 *
4 * @APPLE_OSREFERENCE_LICENSE_HEADER_START@
5 *
6 * This file contains Original Code and/or Modifications of Original Code
7 * as defined in and that are subject to the Apple Public Source License
8 * Version 2.0 (the 'License'). You may not use this file except in
9 * compliance with the License. The rights granted to you under the License
10 * may not be used to create, or enable the creation or redistribution of,
11 * unlawful or unlicensed copies of an Apple operating system, or to
12 * circumvent, violate, or enable the circumvention or violation of, any
13 * terms of an Apple operating system software license agreement.
14 *
15 * Please obtain a copy of the License at
16 * http://www.opensource.apple.com/apsl/ and read it before using this file.
17 *
18 * The Original Code and all software distributed under the License are
19 * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER
20 * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES,
21 * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY,
22 * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT.
23 * Please see the License for the specific language governing rights and
24 * limitations under the License.
25 *
26 * @APPLE_OSREFERENCE_LICENSE_HEADER_END@
27 */
28/*
29 * @OSF_FREE_COPYRIGHT@
30 */
31/*
32 * Copyright (c) 1993 The University of Utah and
33 * the Center for Software Science (CSS). All rights reserved.
34 *
35 * Permission to use, copy, modify and distribute this software and its
36 * documentation is hereby granted, provided that both the copyright
37 * notice and this permission notice appear in all copies of the
38 * software, derivative works or modified versions, and any portions
39 * thereof, and that both notices appear in supporting documentation.
40 *
41 * THE UNIVERSITY OF UTAH AND CSS ALLOW FREE USE OF THIS SOFTWARE IN ITS "AS
42 * IS" CONDITION. THE UNIVERSITY OF UTAH AND CSS DISCLAIM ANY LIABILITY OF
43 * ANY KIND FOR ANY DAMAGES WHATSOEVER RESULTING FROM THE USE OF THIS SOFTWARE.
44 *
45 * CSS requests users of this software to return to css-dist@cs.utah.edu any
46 * improvements that they make and grant CSS redistribution rights.
47 *
48 * Author: Bryan Ford, University of Utah CSS
49 *
50 * Thread management routines
51 */
52
53#include <mach/mach_types.h>
54#include <mach/kern_return.h>
55#include <mach/thread_act_server.h>
56
57#include <kern/kern_types.h>
58#include <kern/ast.h>
59#include <kern/mach_param.h>
60#include <kern/zalloc.h>
61#include <kern/extmod_statistics.h>
62#include <kern/thread.h>
63#include <kern/task.h>
64#include <kern/sched_prim.h>
65#include <kern/misc_protos.h>
66#include <kern/assert.h>
67#include <kern/exception.h>
68#include <kern/ipc_mig.h>
69#include <kern/ipc_tt.h>
70#include <kern/machine.h>
71#include <kern/spl.h>
72#include <kern/syscall_subr.h>
73#include <kern/sync_lock.h>
74#include <kern/processor.h>
75#include <kern/timer.h>
76#include <kern/affinity.h>
77
78#include <stdatomic.h>
79
80#include <security/mac_mach_internal.h>
81
82static void act_abort(thread_t thread);
83
84static void thread_suspended(void *arg, wait_result_t result);
85static void thread_set_apc_ast(thread_t thread);
86static void thread_set_apc_ast_locked(thread_t thread);
87
88/*
89 * Internal routine to mark a thread as started.
90 * Always called with the thread mutex locked.
91 */
92void
93thread_start(
94 thread_t thread)
95{
96 clear_wait(thread, THREAD_AWAKENED);
97 thread->started = TRUE;
98}
99
100/*
101 * Internal routine to mark a thread as waiting
102 * right after it has been created. The caller
103 * is responsible to call wakeup()/thread_wakeup()
104 * or thread_terminate() to get it going.
105 *
106 * Always called with the thread mutex locked.
107 *
108 * Task and task_threads mutexes also held
109 * (so nobody can set the thread running before
110 * this point)
111 *
112 * Converts TH_UNINT wait to THREAD_INTERRUPTIBLE
113 * to allow termination from this point forward.
114 */
115void
116thread_start_in_assert_wait(
117 thread_t thread,
118 event_t event,
119 wait_interrupt_t interruptible)
120{
121 struct waitq *waitq = assert_wait_queue(event);
122 wait_result_t wait_result;
123 spl_t spl;
124
125 spl = splsched();
126 waitq_lock(waitq);
127
128 /* clear out startup condition (safe because thread not started yet) */
129 thread_lock(thread);
130 assert(!thread->started);
131 assert((thread->state & (TH_WAIT | TH_UNINT)) == (TH_WAIT | TH_UNINT));
132 thread->state &= ~(TH_WAIT | TH_UNINT);
133 thread_unlock(thread);
134
135 /* assert wait interruptibly forever */
136 wait_result = waitq_assert_wait64_locked(waitq, CAST_EVENT64_T(event),
137 interruptible,
138 TIMEOUT_URGENCY_SYS_NORMAL,
139 TIMEOUT_WAIT_FOREVER,
140 TIMEOUT_NO_LEEWAY,
141 thread);
142 assert (wait_result == THREAD_WAITING);
143
144 /* mark thread started while we still hold the waitq lock */
145 thread_lock(thread);
146 thread->started = TRUE;
147 thread_unlock(thread);
148
149 waitq_unlock(waitq);
150 splx(spl);
151}
152
153/*
154 * Internal routine to terminate a thread.
155 * Sometimes called with task already locked.
156 */
157kern_return_t
158thread_terminate_internal(
159 thread_t thread)
160{
161 kern_return_t result = KERN_SUCCESS;
162
163 thread_mtx_lock(thread);
164
165 if (thread->active) {
166 thread->active = FALSE;
167
168 act_abort(thread);
169
170 if (thread->started)
171 clear_wait(thread, THREAD_INTERRUPTED);
172 else {
173 thread_start(thread);
174 }
175 }
176 else
177 result = KERN_TERMINATED;
178
179 if (thread->affinity_set != NULL)
180 thread_affinity_terminate(thread);
181
182 thread_mtx_unlock(thread);
183
184 if (thread != current_thread() && result == KERN_SUCCESS)
185 thread_wait(thread, FALSE);
186
187 return (result);
188}
189
190/*
191 * Terminate a thread.
192 */
193kern_return_t
194thread_terminate(
195 thread_t thread)
196{
197 if (thread == THREAD_NULL)
198 return (KERN_INVALID_ARGUMENT);
199
200 /* Kernel threads can't be terminated without their own cooperation */
201 if (thread->task == kernel_task && thread != current_thread())
202 return (KERN_FAILURE);
203
204 kern_return_t result = thread_terminate_internal(thread);
205
206 /*
207 * If a kernel thread is terminating itself, force handle the APC_AST here.
208 * Kernel threads don't pass through the return-to-user AST checking code,
209 * but all threads must finish their own termination in thread_apc_ast.
210 */
211 if (thread->task == kernel_task) {
212 assert(thread->active == FALSE);
213 thread_ast_clear(thread, AST_APC);
214 thread_apc_ast(thread);
215
216 panic("thread_terminate");
217 /* NOTREACHED */
218 }
219
220 return (result);
221}
222
223/*
224 * Suspend execution of the specified thread.
225 * This is a recursive-style suspension of the thread, a count of
226 * suspends is maintained.
227 *
228 * Called with thread mutex held.
229 */
230void
231thread_hold(thread_t thread)
232{
233 if (thread->suspend_count++ == 0) {
234 thread_set_apc_ast(thread);
235 assert(thread->suspend_parked == FALSE);
236 }
237}
238
239/*
240 * Decrement internal suspension count, setting thread
241 * runnable when count falls to zero.
242 *
243 * Because the wait is abortsafe, we can't be guaranteed that the thread
244 * is currently actually waiting even if suspend_parked is set.
245 *
246 * Called with thread mutex held.
247 */
248void
249thread_release(thread_t thread)
250{
251 assertf(thread->suspend_count > 0, "thread %p over-resumed", thread);
252
253 /* fail-safe on non-assert builds */
254 if (thread->suspend_count == 0)
255 return;
256
257 if (--thread->suspend_count == 0) {
258 if (!thread->started) {
259 thread_start(thread);
260 } else if (thread->suspend_parked) {
261 thread->suspend_parked = FALSE;
262 thread_wakeup_thread(&thread->suspend_count, thread);
263 }
264 }
265}
266
267kern_return_t
268thread_suspend(thread_t thread)
269{
270 kern_return_t result = KERN_SUCCESS;
271
272 if (thread == THREAD_NULL || thread->task == kernel_task)
273 return (KERN_INVALID_ARGUMENT);
274
275 thread_mtx_lock(thread);
276
277 if (thread->active) {
278 if (thread->user_stop_count++ == 0)
279 thread_hold(thread);
280 } else {
281 result = KERN_TERMINATED;
282 }
283
284 thread_mtx_unlock(thread);
285
286 if (thread != current_thread() && result == KERN_SUCCESS)
287 thread_wait(thread, FALSE);
288
289 return (result);
290}
291
292kern_return_t
293thread_resume(thread_t thread)
294{
295 kern_return_t result = KERN_SUCCESS;
296
297 if (thread == THREAD_NULL || thread->task == kernel_task)
298 return (KERN_INVALID_ARGUMENT);
299
300 thread_mtx_lock(thread);
301
302 if (thread->active) {
303 if (thread->user_stop_count > 0) {
304 if (--thread->user_stop_count == 0)
305 thread_release(thread);
306 } else {
307 result = KERN_FAILURE;
308 }
309 } else {
310 result = KERN_TERMINATED;
311 }
312
313 thread_mtx_unlock(thread);
314
315 return (result);
316}
317
318/*
319 * thread_depress_abort_from_user:
320 *
321 * Prematurely abort priority depression if there is one.
322 */
323kern_return_t
324thread_depress_abort_from_user(thread_t thread)
325{
326 kern_return_t result;
327
328 if (thread == THREAD_NULL)
329 return (KERN_INVALID_ARGUMENT);
330
331 thread_mtx_lock(thread);
332
333 if (thread->active)
334 result = thread_depress_abort(thread);
335 else
336 result = KERN_TERMINATED;
337
338 thread_mtx_unlock(thread);
339
340 return (result);
341}
342
343
344/*
345 * Indicate that the thread should run the AST_APC callback
346 * to detect an abort condition.
347 *
348 * Called with thread mutex held.
349 */
350static void
351act_abort(
352 thread_t thread)
353{
354 spl_t s = splsched();
355
356 thread_lock(thread);
357
358 if (!(thread->sched_flags & TH_SFLAG_ABORT)) {
359 thread->sched_flags |= TH_SFLAG_ABORT;
360 thread_set_apc_ast_locked(thread);
361 thread_depress_abort_locked(thread);
362 } else {
363 thread->sched_flags &= ~TH_SFLAG_ABORTSAFELY;
364 }
365
366 thread_unlock(thread);
367 splx(s);
368}
369
370kern_return_t
371thread_abort(
372 thread_t thread)
373{
374 kern_return_t result = KERN_SUCCESS;
375
376 if (thread == THREAD_NULL)
377 return (KERN_INVALID_ARGUMENT);
378
379 thread_mtx_lock(thread);
380
381 if (thread->active) {
382 act_abort(thread);
383 clear_wait(thread, THREAD_INTERRUPTED);
384 }
385 else
386 result = KERN_TERMINATED;
387
388 thread_mtx_unlock(thread);
389
390 return (result);
391}
392
393kern_return_t
394thread_abort_safely(
395 thread_t thread)
396{
397 kern_return_t result = KERN_SUCCESS;
398
399 if (thread == THREAD_NULL)
400 return (KERN_INVALID_ARGUMENT);
401
402 thread_mtx_lock(thread);
403
404 if (thread->active) {
405 spl_t s = splsched();
406
407 thread_lock(thread);
408 if (!thread->at_safe_point ||
409 clear_wait_internal(thread, THREAD_INTERRUPTED) != KERN_SUCCESS) {
410 if (!(thread->sched_flags & TH_SFLAG_ABORT)) {
411 thread->sched_flags |= TH_SFLAG_ABORTED_MASK;
412 thread_set_apc_ast_locked(thread);
413 thread_depress_abort_locked(thread);
414 }
415 }
416 thread_unlock(thread);
417 splx(s);
418 } else {
419 result = KERN_TERMINATED;
420 }
421
422 thread_mtx_unlock(thread);
423
424 return (result);
425}
426
427/*** backward compatibility hacks ***/
428#include <mach/thread_info.h>
429#include <mach/thread_special_ports.h>
430#include <ipc/ipc_port.h>
431
432kern_return_t
433thread_info(
434 thread_t thread,
435 thread_flavor_t flavor,
436 thread_info_t thread_info_out,
437 mach_msg_type_number_t *thread_info_count)
438{
439 kern_return_t result;
440
441 if (thread == THREAD_NULL)
442 return (KERN_INVALID_ARGUMENT);
443
444 thread_mtx_lock(thread);
445
446 if (thread->active || thread->inspection)
447 result = thread_info_internal(
448 thread, flavor, thread_info_out, thread_info_count);
449 else
450 result = KERN_TERMINATED;
451
452 thread_mtx_unlock(thread);
453
454 return (result);
455}
456
457static inline kern_return_t
458thread_get_state_internal(
459 thread_t thread,
460 int flavor,
461 thread_state_t state, /* pointer to OUT array */
462 mach_msg_type_number_t *state_count, /*IN/OUT*/
463 boolean_t to_user)
464{
465 kern_return_t result = KERN_SUCCESS;
466
467 if (thread == THREAD_NULL)
468 return (KERN_INVALID_ARGUMENT);
469
470 thread_mtx_lock(thread);
471
472 if (thread->active) {
473 if (thread != current_thread()) {
474 thread_hold(thread);
475
476 thread_mtx_unlock(thread);
477
478 if (thread_stop(thread, FALSE)) {
479 thread_mtx_lock(thread);
480 result = machine_thread_get_state(
481 thread, flavor, state, state_count);
482 thread_unstop(thread);
483 }
484 else {
485 thread_mtx_lock(thread);
486 result = KERN_ABORTED;
487 }
488
489 thread_release(thread);
490 }
491 else
492 result = machine_thread_get_state(
493 thread, flavor, state, state_count);
494 }
495 else if (thread->inspection)
496 {
497 result = machine_thread_get_state(
498 thread, flavor, state, state_count);
499 }
500 else
501 result = KERN_TERMINATED;
502
503 if (to_user && result == KERN_SUCCESS) {
504 result = machine_thread_state_convert_to_user(thread, flavor, state,
505 state_count);
506 }
507
508 thread_mtx_unlock(thread);
509
510 return (result);
511}
512
513/* No prototype, since thread_act_server.h has the _to_user version if KERNEL_SERVER */
514
515kern_return_t
516thread_get_state(
517 thread_t thread,
518 int flavor,
519 thread_state_t state,
520 mach_msg_type_number_t *state_count);
521
522kern_return_t
523thread_get_state(
524 thread_t thread,
525 int flavor,
526 thread_state_t state, /* pointer to OUT array */
527 mach_msg_type_number_t *state_count) /*IN/OUT*/
528{
529 return thread_get_state_internal(thread, flavor, state, state_count, FALSE);
530}
531
532kern_return_t
533thread_get_state_to_user(
534 thread_t thread,
535 int flavor,
536 thread_state_t state, /* pointer to OUT array */
537 mach_msg_type_number_t *state_count) /*IN/OUT*/
538{
539 return thread_get_state_internal(thread, flavor, state, state_count, TRUE);
540}
541
542/*
543 * Change thread's machine-dependent state. Called with nothing
544 * locked. Returns same way.
545 */
546static inline kern_return_t
547thread_set_state_internal(
548 thread_t thread,
549 int flavor,
550 thread_state_t state,
551 mach_msg_type_number_t state_count,
552 boolean_t from_user)
553{
554 kern_return_t result = KERN_SUCCESS;
555
556 if (thread == THREAD_NULL)
557 return (KERN_INVALID_ARGUMENT);
558
559 thread_mtx_lock(thread);
560
561 if (thread->active) {
562 if (from_user) {
563 result = machine_thread_state_convert_from_user(thread, flavor,
564 state, state_count);
565 if (result != KERN_SUCCESS) {
566 goto out;
567 }
568 }
569 if (thread != current_thread()) {
570 thread_hold(thread);
571
572 thread_mtx_unlock(thread);
573
574 if (thread_stop(thread, TRUE)) {
575 thread_mtx_lock(thread);
576 result = machine_thread_set_state(
577 thread, flavor, state, state_count);
578 thread_unstop(thread);
579 }
580 else {
581 thread_mtx_lock(thread);
582 result = KERN_ABORTED;
583 }
584
585 thread_release(thread);
586 }
587 else
588 result = machine_thread_set_state(
589 thread, flavor, state, state_count);
590 }
591 else
592 result = KERN_TERMINATED;
593
594 if ((result == KERN_SUCCESS) && from_user)
595 extmod_statistics_incr_thread_set_state(thread);
596
597out:
598 thread_mtx_unlock(thread);
599
600 return (result);
601}
602
603/* No prototype, since thread_act_server.h has the _from_user version if KERNEL_SERVER */
604kern_return_t
605thread_set_state(
606 thread_t thread,
607 int flavor,
608 thread_state_t state,
609 mach_msg_type_number_t state_count);
610
611kern_return_t
612thread_set_state(
613 thread_t thread,
614 int flavor,
615 thread_state_t state,
616 mach_msg_type_number_t state_count)
617{
618 return thread_set_state_internal(thread, flavor, state, state_count, FALSE);
619}
620
621kern_return_t
622thread_set_state_from_user(
623 thread_t thread,
624 int flavor,
625 thread_state_t state,
626 mach_msg_type_number_t state_count)
627{
628 return thread_set_state_internal(thread, flavor, state, state_count, TRUE);
629}
630
631/*
632 * Kernel-internal "thread" interfaces used outside this file:
633 */
634
635/* Initialize (or re-initialize) a thread state. Called from execve
636 * with nothing locked, returns same way.
637 */
638kern_return_t
639thread_state_initialize(
640 thread_t thread)
641{
642 kern_return_t result = KERN_SUCCESS;
643
644 if (thread == THREAD_NULL)
645 return (KERN_INVALID_ARGUMENT);
646
647 thread_mtx_lock(thread);
648
649 if (thread->active) {
650 if (thread != current_thread()) {
651 thread_hold(thread);
652
653 thread_mtx_unlock(thread);
654
655 if (thread_stop(thread, TRUE)) {
656 thread_mtx_lock(thread);
657 result = machine_thread_state_initialize( thread );
658 thread_unstop(thread);
659 }
660 else {
661 thread_mtx_lock(thread);
662 result = KERN_ABORTED;
663 }
664
665 thread_release(thread);
666 }
667 else
668 result = machine_thread_state_initialize( thread );
669 }
670 else
671 result = KERN_TERMINATED;
672
673 thread_mtx_unlock(thread);
674
675 return (result);
676}
677
678
679kern_return_t
680thread_dup(
681 thread_t target)
682{
683 thread_t self = current_thread();
684 kern_return_t result = KERN_SUCCESS;
685
686 if (target == THREAD_NULL || target == self)
687 return (KERN_INVALID_ARGUMENT);
688
689 thread_mtx_lock(target);
690
691 if (target->active) {
692 thread_hold(target);
693
694 thread_mtx_unlock(target);
695
696 if (thread_stop(target, TRUE)) {
697 thread_mtx_lock(target);
698 result = machine_thread_dup(self, target, FALSE);
699
700 if (self->affinity_set != AFFINITY_SET_NULL)
701 thread_affinity_dup(self, target);
702 thread_unstop(target);
703 }
704 else {
705 thread_mtx_lock(target);
706 result = KERN_ABORTED;
707 }
708
709 thread_release(target);
710 }
711 else
712 result = KERN_TERMINATED;
713
714 thread_mtx_unlock(target);
715
716 return (result);
717}
718
719
720kern_return_t
721thread_dup2(
722 thread_t source,
723 thread_t target)
724{
725 kern_return_t result = KERN_SUCCESS;
726 uint32_t active = 0;
727
728 if (source == THREAD_NULL || target == THREAD_NULL || target == source)
729 return (KERN_INVALID_ARGUMENT);
730
731 thread_mtx_lock(source);
732 active = source->active;
733 thread_mtx_unlock(source);
734
735 if (!active) {
736 return KERN_TERMINATED;
737 }
738
739 thread_mtx_lock(target);
740
741 if (target->active || target->inspection) {
742 thread_hold(target);
743
744 thread_mtx_unlock(target);
745
746 if (thread_stop(target, TRUE)) {
747 thread_mtx_lock(target);
748 result = machine_thread_dup(source, target, TRUE);
749 if (source->affinity_set != AFFINITY_SET_NULL)
750 thread_affinity_dup(source, target);
751 thread_unstop(target);
752 }
753 else {
754 thread_mtx_lock(target);
755 result = KERN_ABORTED;
756 }
757
758 thread_release(target);
759 }
760 else
761 result = KERN_TERMINATED;
762
763 thread_mtx_unlock(target);
764
765 return (result);
766}
767
768/*
769 * thread_setstatus:
770 *
771 * Set the status of the specified thread.
772 * Called with (and returns with) no locks held.
773 */
774kern_return_t
775thread_setstatus(
776 thread_t thread,
777 int flavor,
778 thread_state_t tstate,
779 mach_msg_type_number_t count)
780{
781
782 return (thread_set_state(thread, flavor, tstate, count));
783}
784
785kern_return_t
786thread_setstatus_from_user(
787 thread_t thread,
788 int flavor,
789 thread_state_t tstate,
790 mach_msg_type_number_t count)
791{
792
793 return (thread_set_state_from_user(thread, flavor, tstate, count));
794}
795
796/*
797 * thread_getstatus:
798 *
799 * Get the status of the specified thread.
800 */
801kern_return_t
802thread_getstatus(
803 thread_t thread,
804 int flavor,
805 thread_state_t tstate,
806 mach_msg_type_number_t *count)
807{
808 return (thread_get_state(thread, flavor, tstate, count));
809}
810
811kern_return_t
812thread_getstatus_to_user(
813 thread_t thread,
814 int flavor,
815 thread_state_t tstate,
816 mach_msg_type_number_t *count)
817{
818 return (thread_get_state_to_user(thread, flavor, tstate, count));
819}
820
821/*
822 * Change thread's machine-dependent userspace TSD base.
823 * Called with nothing locked. Returns same way.
824 */
825kern_return_t
826thread_set_tsd_base(
827 thread_t thread,
828 mach_vm_offset_t tsd_base)
829{
830 kern_return_t result = KERN_SUCCESS;
831
832 if (thread == THREAD_NULL)
833 return (KERN_INVALID_ARGUMENT);
834
835 thread_mtx_lock(thread);
836
837 if (thread->active) {
838 if (thread != current_thread()) {
839 thread_hold(thread);
840
841 thread_mtx_unlock(thread);
842
843 if (thread_stop(thread, TRUE)) {
844 thread_mtx_lock(thread);
845 result = machine_thread_set_tsd_base(thread, tsd_base);
846 thread_unstop(thread);
847 }
848 else {
849 thread_mtx_lock(thread);
850 result = KERN_ABORTED;
851 }
852
853 thread_release(thread);
854 }
855 else
856 result = machine_thread_set_tsd_base(thread, tsd_base);
857 }
858 else
859 result = KERN_TERMINATED;
860
861 thread_mtx_unlock(thread);
862
863 return (result);
864}
865
866/*
867 * thread_set_apc_ast:
868 *
869 * Register the AST_APC callback that handles suspension and
870 * termination, if it hasn't been installed already.
871 *
872 * Called with the thread mutex held.
873 */
874static void
875thread_set_apc_ast(thread_t thread)
876{
877 spl_t s = splsched();
878
879 thread_lock(thread);
880 thread_set_apc_ast_locked(thread);
881 thread_unlock(thread);
882
883 splx(s);
884}
885
886/*
887 * thread_set_apc_ast_locked:
888 *
889 * Do the work of registering for the AST_APC callback.
890 *
891 * Called with the thread mutex and scheduling lock held.
892 */
893static void
894thread_set_apc_ast_locked(thread_t thread)
895{
896 thread_ast_set(thread, AST_APC);
897
898 if (thread == current_thread()) {
899 ast_propagate(thread);
900 } else {
901 processor_t processor = thread->last_processor;
902
903 if (processor != PROCESSOR_NULL &&
904 processor->state == PROCESSOR_RUNNING &&
905 processor->active_thread == thread) {
906 cause_ast_check(processor);
907 }
908 }
909}
910
911/*
912 * Activation control support routines internal to this file:
913 *
914 */
915
916/*
917 * thread_suspended
918 *
919 * Continuation routine for thread suspension. It checks
920 * to see whether there has been any new suspensions. If so, it
921 * installs the AST_APC handler again.
922 */
923__attribute__((noreturn))
924static void
925thread_suspended(__unused void *parameter, wait_result_t result)
926{
927 thread_t thread = current_thread();
928
929 thread_mtx_lock(thread);
930
931 if (result == THREAD_INTERRUPTED)
932 thread->suspend_parked = FALSE;
933 else
934 assert(thread->suspend_parked == FALSE);
935
936 if (thread->suspend_count > 0)
937 thread_set_apc_ast(thread);
938
939 thread_mtx_unlock(thread);
940
941 thread_exception_return();
942 /*NOTREACHED*/
943}
944
945/*
946 * thread_apc_ast - handles AST_APC and drives thread suspension and termination.
947 * Called with nothing locked. Returns (if it returns) the same way.
948 */
949void
950thread_apc_ast(thread_t thread)
951{
952 thread_mtx_lock(thread);
953
954 assert(thread->suspend_parked == FALSE);
955
956 spl_t s = splsched();
957 thread_lock(thread);
958
959 /* TH_SFLAG_POLLDEPRESS is OK to have here */
960 assert((thread->sched_flags & TH_SFLAG_DEPRESS) == 0);
961
962 thread->sched_flags &= ~TH_SFLAG_ABORTED_MASK;
963 thread_unlock(thread);
964 splx(s);
965
966 if (!thread->active) {
967 /* Thread is ready to terminate, time to tear it down */
968 thread_mtx_unlock(thread);
969
970 thread_terminate_self();
971 /*NOTREACHED*/
972 }
973
974 /* If we're suspended, go to sleep and wait for someone to wake us up. */
975 if (thread->suspend_count > 0) {
976 thread->suspend_parked = TRUE;
977 assert_wait(&thread->suspend_count,
978 THREAD_ABORTSAFE | THREAD_WAIT_NOREPORT_USER);
979 thread_mtx_unlock(thread);
980
981 thread_block(thread_suspended);
982 /*NOTREACHED*/
983 }
984
985 thread_mtx_unlock(thread);
986}
987
988/* Prototype, see justification above */
989kern_return_t
990act_set_state(
991 thread_t thread,
992 int flavor,
993 thread_state_t state,
994 mach_msg_type_number_t count);
995
996kern_return_t
997act_set_state(
998 thread_t thread,
999 int flavor,
1000 thread_state_t state,
1001 mach_msg_type_number_t count)
1002{
1003 if (thread == current_thread())
1004 return (KERN_INVALID_ARGUMENT);
1005
1006 return (thread_set_state(thread, flavor, state, count));
1007
1008}
1009
1010kern_return_t
1011act_set_state_from_user(
1012 thread_t thread,
1013 int flavor,
1014 thread_state_t state,
1015 mach_msg_type_number_t count)
1016{
1017 if (thread == current_thread())
1018 return (KERN_INVALID_ARGUMENT);
1019
1020 return (thread_set_state_from_user(thread, flavor, state, count));
1021
1022}
1023
1024/* Prototype, see justification above */
1025kern_return_t
1026act_get_state(
1027 thread_t thread,
1028 int flavor,
1029 thread_state_t state,
1030 mach_msg_type_number_t *count);
1031
1032kern_return_t
1033act_get_state(
1034 thread_t thread,
1035 int flavor,
1036 thread_state_t state,
1037 mach_msg_type_number_t *count)
1038{
1039 if (thread == current_thread())
1040 return (KERN_INVALID_ARGUMENT);
1041
1042 return (thread_get_state(thread, flavor, state, count));
1043}
1044
1045kern_return_t
1046act_get_state_to_user(
1047 thread_t thread,
1048 int flavor,
1049 thread_state_t state,
1050 mach_msg_type_number_t *count)
1051{
1052 if (thread == current_thread())
1053 return (KERN_INVALID_ARGUMENT);
1054
1055 return (thread_get_state_to_user(thread, flavor, state, count));
1056}
1057
1058static void
1059act_set_ast(
1060 thread_t thread,
1061 ast_t ast)
1062{
1063 spl_t s = splsched();
1064
1065 if (thread == current_thread()) {
1066 thread_ast_set(thread, ast);
1067 ast_propagate(thread);
1068 } else {
1069 processor_t processor;
1070
1071 thread_lock(thread);
1072 thread_ast_set(thread, ast);
1073 processor = thread->last_processor;
1074 if ( processor != PROCESSOR_NULL &&
1075 processor->state == PROCESSOR_RUNNING &&
1076 processor->active_thread == thread )
1077 cause_ast_check(processor);
1078 thread_unlock(thread);
1079 }
1080
1081 splx(s);
1082}
1083
1084/*
1085 * set AST on thread without causing an AST check
1086 * and without taking the thread lock
1087 *
1088 * If thread is not the current thread, then it may take
1089 * up until the next context switch or quantum expiration
1090 * on that thread for it to notice the AST.
1091 */
1092static void
1093act_set_ast_async(thread_t thread,
1094 ast_t ast)
1095{
1096 thread_ast_set(thread, ast);
1097
1098 if (thread == current_thread()) {
1099 spl_t s = splsched();
1100 ast_propagate(thread);
1101 splx(s);
1102 }
1103}
1104
1105void
1106act_set_astbsd(
1107 thread_t thread)
1108{
1109 act_set_ast( thread, AST_BSD );
1110}
1111
1112void
1113act_set_astkevent(thread_t thread, uint16_t bits)
1114{
1115 atomic_fetch_or(&thread->kevent_ast_bits, bits);
1116
1117 /* kevent AST shouldn't send immediate IPIs */
1118 act_set_ast_async(thread, AST_KEVENT);
1119}
1120
1121void
1122act_set_kperf(
1123 thread_t thread)
1124{
1125 /* safety check */
1126 if (thread != current_thread())
1127 if( !ml_get_interrupts_enabled() )
1128 panic("unsafe act_set_kperf operation");
1129
1130 act_set_ast( thread, AST_KPERF );
1131}
1132
1133#if CONFIG_MACF
1134void
1135act_set_astmacf(
1136 thread_t thread)
1137{
1138 act_set_ast( thread, AST_MACF);
1139}
1140#endif
1141
1142void
1143act_set_astledger(thread_t thread)
1144{
1145 act_set_ast(thread, AST_LEDGER);
1146}
1147
1148/*
1149 * The ledger AST may need to be set while already holding
1150 * the thread lock. This routine skips sending the IPI,
1151 * allowing us to avoid the lock hold.
1152 *
1153 * However, it means the targeted thread must context switch
1154 * to recognize the ledger AST.
1155 */
1156void
1157act_set_astledger_async(thread_t thread)
1158{
1159 act_set_ast_async(thread, AST_LEDGER);
1160}
1161
1162void
1163act_set_io_telemetry_ast(thread_t thread)
1164{
1165 act_set_ast(thread, AST_TELEMETRY_IO);
1166}
1167
1168