1/*
2 * Copyright (c) 2000-2016 Apple Inc. All rights reserved.
3 *
4 * @APPLE_OSREFERENCE_LICENSE_HEADER_START@
5 *
6 * This file contains Original Code and/or Modifications of Original Code
7 * as defined in and that are subject to the Apple Public Source License
8 * Version 2.0 (the 'License'). You may not use this file except in
9 * compliance with the License. The rights granted to you under the License
10 * may not be used to create, or enable the creation or redistribution of,
11 * unlawful or unlicensed copies of an Apple operating system, or to
12 * circumvent, violate, or enable the circumvention or violation of, any
13 * terms of an Apple operating system software license agreement.
14 *
15 * Please obtain a copy of the License at
16 * http://www.opensource.apple.com/apsl/ and read it before using this file.
17 *
18 * The Original Code and all software distributed under the License are
19 * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER
20 * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES,
21 * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY,
22 * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT.
23 * Please see the License for the specific language governing rights and
24 * limitations under the License.
25 *
26 * @APPLE_OSREFERENCE_LICENSE_HEADER_END@
27 */
28/*
29 * @OSF_FREE_COPYRIGHT@
30 */
31/*
32 * Copyright (c) 1993 The University of Utah and
33 * the Center for Software Science (CSS). All rights reserved.
34 *
35 * Permission to use, copy, modify and distribute this software and its
36 * documentation is hereby granted, provided that both the copyright
37 * notice and this permission notice appear in all copies of the
38 * software, derivative works or modified versions, and any portions
39 * thereof, and that both notices appear in supporting documentation.
40 *
41 * THE UNIVERSITY OF UTAH AND CSS ALLOW FREE USE OF THIS SOFTWARE IN ITS "AS
42 * IS" CONDITION. THE UNIVERSITY OF UTAH AND CSS DISCLAIM ANY LIABILITY OF
43 * ANY KIND FOR ANY DAMAGES WHATSOEVER RESULTING FROM THE USE OF THIS SOFTWARE.
44 *
45 * CSS requests users of this software to return to css-dist@cs.utah.edu any
46 * improvements that they make and grant CSS redistribution rights.
47 *
48 * Author: Bryan Ford, University of Utah CSS
49 *
50 * Thread management routines
51 */
52
53#include <mach/mach_types.h>
54#include <mach/kern_return.h>
55#include <mach/thread_act_server.h>
56#include <mach/thread_act.h>
57
58#include <kern/kern_types.h>
59#include <kern/ast.h>
60#include <kern/mach_param.h>
61#include <kern/zalloc.h>
62#include <kern/extmod_statistics.h>
63#include <kern/thread.h>
64#include <kern/task.h>
65#include <kern/sched_prim.h>
66#include <kern/misc_protos.h>
67#include <kern/assert.h>
68#include <kern/exception.h>
69#include <kern/ipc_mig.h>
70#include <kern/ipc_tt.h>
71#include <kern/machine.h>
72#include <kern/spl.h>
73#include <kern/syscall_subr.h>
74#include <kern/processor.h>
75#include <kern/restartable.h>
76#include <kern/timer.h>
77#include <kern/affinity.h>
78#include <kern/host.h>
79#include <kern/exc_guard.h>
80#include <ipc/port.h>
81#include <mach/arm/thread_status.h>
82
83
84#include <stdatomic.h>
85
86#include <security/mac_mach_internal.h>
87#include <libkern/coreanalytics/coreanalytics.h>
88
89static void act_abort(thread_t thread);
90
91static void thread_suspended(void *arg, wait_result_t result);
92static void thread_set_apc_ast(thread_t thread);
93static void thread_set_apc_ast_locked(thread_t thread);
94
95extern void proc_name(int pid, char * buf, int size);
96extern boolean_t IOCurrentTaskHasEntitlement(const char *);
97
98CA_EVENT(thread_set_state,
99 CA_STATIC_STRING(CA_PROCNAME_LEN), current_proc);
100
101static void
102send_thread_set_state_telemetry(void)
103{
104 ca_event_t ca_event = CA_EVENT_ALLOCATE(thread_set_state);
105 CA_EVENT_TYPE(thread_set_state) * event = ca_event->data;
106
107 proc_name(pid: task_pid(task: current_task()), buf: (char *) &event->current_proc, CA_PROCNAME_LEN);
108
109 CA_EVENT_SEND(ca_event);
110}
111
112/* bootarg to create lightweight corpse for thread set state lockdown */
113TUNABLE(bool, tss_should_crash, "tss_should_crash", true);
114
115static inline boolean_t
116thread_set_state_allowed(thread_t thread, int flavor)
117{
118 task_t target_task = get_threadtask(thread);
119
120#if DEVELOPMENT || DEBUG
121 /* disable the feature if the boot-arg is disabled. */
122 if (!tss_should_crash) {
123 return TRUE;
124 }
125#endif /* DEVELOPMENT || DEBUG */
126
127 /* hardened binaries must have entitlement - all others ok */
128 if (task_is_hardened_binary(task: target_task)
129 && !(thread->options & TH_IN_MACH_EXCEPTION) /* Allowed for now - rdar://103085786 */
130 && FLAVOR_MODIFIES_CORE_CPU_REGISTERS(flavor) /* only care about locking down PC/LR */
131#if XNU_TARGET_OS_OSX
132 && !task_opted_out_mach_hardening(task: target_task)
133#endif /* XNU_TARGET_OS_OSX */
134#if CONFIG_ROSETTA
135 && !task_is_translated(target_task) /* Ignore translated tasks */
136#endif /* CONFIG_ROSETTA */
137 && !IOCurrentTaskHasEntitlement("com.apple.private.thread-set-state")
138 ) {
139 /* fatal crash */
140 mach_port_guard_exception(MACH_PORT_NULL, inguard: 0, portguard: 0, reason: kGUARD_EXC_THREAD_SET_STATE);
141 send_thread_set_state_telemetry();
142 return FALSE;
143 }
144
145#if __has_feature(ptrauth_calls)
146 /* Do not allow Fatal PAC exception binaries to set Debug state */
147 if (task_is_pac_exception_fatal(target_task)
148 && machine_thread_state_is_debug_flavor(flavor)
149#if XNU_TARGET_OS_OSX
150 && !task_opted_out_mach_hardening(target_task)
151#endif /* XNU_TARGET_OS_OSX */
152#if CONFIG_ROSETTA
153 && !task_is_translated(target_task) /* Ignore translated tasks */
154#endif /* CONFIG_ROSETTA */
155 && !IOCurrentTaskHasEntitlement("com.apple.private.thread-set-state")
156 ) {
157 /* fatal crash */
158 mach_port_guard_exception(MACH_PORT_NULL, 0, 0, kGUARD_EXC_THREAD_SET_STATE);
159 send_thread_set_state_telemetry();
160 return FALSE;
161 }
162#endif /* __has_feature(ptrauth_calls) */
163
164 return TRUE;
165}
166
167/*
168 * Internal routine to mark a thread as started.
169 * Always called with the thread mutex locked.
170 */
171void
172thread_start(
173 thread_t thread)
174{
175 clear_wait(thread, THREAD_AWAKENED);
176 thread->started = TRUE;
177}
178
179/*
180 * Internal routine to mark a thread as waiting
181 * right after it has been created. The caller
182 * is responsible to call wakeup()/thread_wakeup()
183 * or thread_terminate() to get it going.
184 *
185 * Always called with the thread mutex locked.
186 *
187 * Task and task_threads mutexes also held
188 * (so nobody can set the thread running before
189 * this point)
190 *
191 * Converts TH_UNINT wait to THREAD_INTERRUPTIBLE
192 * to allow termination from this point forward.
193 */
194void
195thread_start_in_assert_wait(
196 thread_t thread,
197 struct waitq *waitq,
198 event64_t event,
199 wait_interrupt_t interruptible)
200{
201 wait_result_t wait_result;
202 spl_t spl;
203
204 spl = splsched();
205 waitq_lock(wq: waitq);
206
207 /* clear out startup condition (safe because thread not started yet) */
208 thread_lock(thread);
209 assert(!thread->started);
210 assert((thread->state & (TH_WAIT | TH_UNINT)) == (TH_WAIT | TH_UNINT));
211 thread->state &= ~(TH_WAIT | TH_UNINT);
212 thread_unlock(thread);
213
214 /* assert wait interruptibly forever */
215 wait_result = waitq_assert_wait64_locked(waitq: waitq, wait_event: event,
216 interruptible,
217 TIMEOUT_URGENCY_SYS_NORMAL,
218 TIMEOUT_WAIT_FOREVER,
219 TIMEOUT_NO_LEEWAY,
220 thread);
221 assert(wait_result == THREAD_WAITING);
222
223 /* mark thread started while we still hold the waitq lock */
224 thread_lock(thread);
225 thread->started = TRUE;
226 thread_unlock(thread);
227
228 waitq_unlock(wq: waitq);
229 splx(spl);
230}
231
232/*
233 * Internal routine to terminate a thread.
234 * Sometimes called with task already locked.
235 *
236 * If thread is on core, cause AST check immediately;
237 * Otherwise, let the thread continue running in kernel
238 * until it hits AST.
239 */
240kern_return_t
241thread_terminate_internal(
242 thread_t thread)
243{
244 kern_return_t result = KERN_SUCCESS;
245
246 thread_mtx_lock(thread);
247
248 if (thread->active) {
249 thread->active = FALSE;
250
251 act_abort(thread);
252
253 if (thread->started) {
254 clear_wait(thread, THREAD_INTERRUPTED);
255 } else {
256 thread_start(thread);
257 }
258 } else {
259 result = KERN_TERMINATED;
260 }
261
262 if (thread->affinity_set != NULL) {
263 thread_affinity_terminate(thread);
264 }
265
266 /* unconditionally unpin the thread in internal termination */
267 ipc_thread_port_unpin(port: get_thread_ro(thread)->tro_self_port);
268
269 thread_mtx_unlock(thread);
270
271 if (thread != current_thread() && result == KERN_SUCCESS) {
272 thread_wait(thread, FALSE);
273 }
274
275 return result;
276}
277
278kern_return_t
279thread_terminate(
280 thread_t thread)
281{
282 task_t task;
283
284 if (thread == THREAD_NULL) {
285 return KERN_INVALID_ARGUMENT;
286 }
287
288 task = get_threadtask(thread);
289
290 /* Kernel threads can't be terminated without their own cooperation */
291 if (task == kernel_task && thread != current_thread()) {
292 return KERN_FAILURE;
293 }
294
295 kern_return_t result = thread_terminate_internal(thread);
296
297 /*
298 * If a kernel thread is terminating itself, force handle the APC_AST here.
299 * Kernel threads don't pass through the return-to-user AST checking code,
300 * but all threads must finish their own termination in thread_apc_ast.
301 */
302 if (task == kernel_task) {
303 assert(thread->active == FALSE);
304 thread_ast_clear(thread, AST_APC);
305 thread_apc_ast(thread);
306
307 panic("thread_terminate");
308 /* NOTREACHED */
309 }
310
311 return result;
312}
313
314/*
315 * [MIG Call] Terminate a thread.
316 *
317 * Cannot be used on threads managed by pthread.
318 */
319kern_return_t
320thread_terminate_from_user(
321 thread_t thread)
322{
323 if (thread == THREAD_NULL) {
324 return KERN_INVALID_ARGUMENT;
325 }
326
327 if (thread_get_tag(thread) & THREAD_TAG_PTHREAD) {
328 return KERN_DENIED;
329 }
330
331 return thread_terminate(thread);
332}
333
334/*
335 * Terminate a thread with pinned control port.
336 *
337 * Can only be used on threads managed by pthread. Exported in pthread_kern.
338 */
339kern_return_t
340thread_terminate_pinned(
341 thread_t thread)
342{
343 task_t task;
344
345 if (thread == THREAD_NULL) {
346 return KERN_INVALID_ARGUMENT;
347 }
348
349 task = get_threadtask(thread);
350
351
352 assert(task != kernel_task);
353 assert(thread_get_tag(thread) & (THREAD_TAG_PTHREAD | THREAD_TAG_MAINTHREAD));
354
355 thread_mtx_lock(thread);
356 if (task_is_pinned(task) && thread->active) {
357 assert(get_thread_ro(thread)->tro_self_port->ip_pinned == 1);
358 }
359 thread_mtx_unlock(thread);
360
361 kern_return_t result = thread_terminate_internal(thread);
362 return result;
363}
364
365/*
366 * Suspend execution of the specified thread.
367 * This is a recursive-style suspension of the thread, a count of
368 * suspends is maintained.
369 *
370 * Called with thread mutex held.
371 */
372void
373thread_hold(thread_t thread)
374{
375 if (thread->suspend_count++ == 0) {
376 thread_set_apc_ast(thread);
377 assert(thread->suspend_parked == FALSE);
378 }
379}
380
381/*
382 * Decrement internal suspension count, setting thread
383 * runnable when count falls to zero.
384 *
385 * Because the wait is abortsafe, we can't be guaranteed that the thread
386 * is currently actually waiting even if suspend_parked is set.
387 *
388 * Called with thread mutex held.
389 */
390void
391thread_release(thread_t thread)
392{
393 assertf(thread->suspend_count > 0, "thread %p over-resumed", thread);
394
395 /* fail-safe on non-assert builds */
396 if (thread->suspend_count == 0) {
397 return;
398 }
399
400 if (--thread->suspend_count == 0) {
401 if (!thread->started) {
402 thread_start(thread);
403 } else if (thread->suspend_parked) {
404 thread->suspend_parked = FALSE;
405 thread_wakeup_thread(event: &thread->suspend_count, thread);
406 }
407 }
408}
409
410kern_return_t
411thread_suspend(thread_t thread)
412{
413 kern_return_t result = KERN_SUCCESS;
414
415 if (thread == THREAD_NULL || get_threadtask(thread) == kernel_task) {
416 return KERN_INVALID_ARGUMENT;
417 }
418
419 thread_mtx_lock(thread);
420
421 if (thread->active) {
422 if (thread->user_stop_count++ == 0) {
423 thread_hold(thread);
424 }
425 } else {
426 result = KERN_TERMINATED;
427 }
428
429 thread_mtx_unlock(thread);
430
431 if (thread != current_thread() && result == KERN_SUCCESS) {
432 thread_wait(thread, FALSE);
433 }
434
435 return result;
436}
437
438kern_return_t
439thread_resume(thread_t thread)
440{
441 kern_return_t result = KERN_SUCCESS;
442
443 if (thread == THREAD_NULL || get_threadtask(thread) == kernel_task) {
444 return KERN_INVALID_ARGUMENT;
445 }
446
447 thread_mtx_lock(thread);
448
449 if (thread->active) {
450 if (thread->user_stop_count > 0) {
451 if (--thread->user_stop_count == 0) {
452 thread_release(thread);
453 }
454 } else {
455 result = KERN_FAILURE;
456 }
457 } else {
458 result = KERN_TERMINATED;
459 }
460
461 thread_mtx_unlock(thread);
462
463 return result;
464}
465
466/*
467 * thread_depress_abort_from_user:
468 *
469 * Prematurely abort priority depression if there is one.
470 */
471kern_return_t
472thread_depress_abort_from_user(thread_t thread)
473{
474 kern_return_t result;
475
476 if (thread == THREAD_NULL) {
477 return KERN_INVALID_ARGUMENT;
478 }
479
480 thread_mtx_lock(thread);
481
482 if (thread->active) {
483 result = thread_depress_abort(thread);
484 } else {
485 result = KERN_TERMINATED;
486 }
487
488 thread_mtx_unlock(thread);
489
490 return result;
491}
492
493
494/*
495 * Indicate that the thread should run the AST_APC callback
496 * to detect an abort condition.
497 *
498 * Called with thread mutex held.
499 */
500static void
501act_abort(
502 thread_t thread)
503{
504 spl_t s = splsched();
505
506 thread_lock(thread);
507
508 if (!(thread->sched_flags & TH_SFLAG_ABORT)) {
509 thread->sched_flags |= TH_SFLAG_ABORT;
510 thread_set_apc_ast_locked(thread);
511 thread_depress_abort_locked(thread);
512 } else {
513 thread->sched_flags &= ~TH_SFLAG_ABORTSAFELY;
514 }
515
516 thread_unlock(thread);
517 splx(s);
518}
519
520kern_return_t
521thread_abort(
522 thread_t thread)
523{
524 kern_return_t result = KERN_SUCCESS;
525
526 if (thread == THREAD_NULL) {
527 return KERN_INVALID_ARGUMENT;
528 }
529
530 thread_mtx_lock(thread);
531
532 if (thread->active) {
533 act_abort(thread);
534 clear_wait(thread, THREAD_INTERRUPTED);
535 } else {
536 result = KERN_TERMINATED;
537 }
538
539 thread_mtx_unlock(thread);
540
541 return result;
542}
543
544kern_return_t
545thread_abort_safely(
546 thread_t thread)
547{
548 kern_return_t result = KERN_SUCCESS;
549
550 if (thread == THREAD_NULL) {
551 return KERN_INVALID_ARGUMENT;
552 }
553
554 thread_mtx_lock(thread);
555
556 if (thread->active) {
557 spl_t s = splsched();
558
559 thread_lock(thread);
560 if (!thread->at_safe_point ||
561 clear_wait_internal(thread, THREAD_INTERRUPTED) != KERN_SUCCESS) {
562 if (!(thread->sched_flags & TH_SFLAG_ABORT)) {
563 thread->sched_flags |= TH_SFLAG_ABORTED_MASK;
564 thread_set_apc_ast_locked(thread);
565 thread_depress_abort_locked(thread);
566 }
567 }
568 thread_unlock(thread);
569 splx(s);
570 } else {
571 result = KERN_TERMINATED;
572 }
573
574 thread_mtx_unlock(thread);
575
576 return result;
577}
578
579/*** backward compatibility hacks ***/
580#include <mach/thread_info.h>
581#include <mach/thread_special_ports.h>
582#include <ipc/ipc_port.h>
583
584kern_return_t
585thread_info(
586 thread_t thread,
587 thread_flavor_t flavor,
588 thread_info_t thread_info_out,
589 mach_msg_type_number_t *thread_info_count)
590{
591 kern_return_t result;
592
593 if (thread == THREAD_NULL) {
594 return KERN_INVALID_ARGUMENT;
595 }
596
597 thread_mtx_lock(thread);
598
599 if (thread->active || thread->inspection) {
600 result = thread_info_internal(
601 thread, flavor, thread_info_out, thread_info_count);
602 } else {
603 result = KERN_TERMINATED;
604 }
605
606 thread_mtx_unlock(thread);
607
608 return result;
609}
610
611static inline kern_return_t
612thread_get_state_internal(
613 thread_t thread,
614 int flavor,
615 thread_state_t state, /* pointer to OUT array */
616 mach_msg_type_number_t *state_count, /*IN/OUT*/
617 thread_set_status_flags_t flags)
618{
619 kern_return_t result = KERN_SUCCESS;
620 boolean_t to_user = !!(flags & TSSF_TRANSLATE_TO_USER);
621
622 if (thread == THREAD_NULL) {
623 return KERN_INVALID_ARGUMENT;
624 }
625
626 thread_mtx_lock(thread);
627
628 if (thread->active) {
629 if (thread != current_thread()) {
630 thread_hold(thread);
631
632 thread_mtx_unlock(thread);
633
634 if (thread_stop(thread, FALSE)) {
635 thread_mtx_lock(thread);
636 result = machine_thread_get_state(
637 thread, flavor, state, count: state_count);
638 thread_unstop(thread);
639 } else {
640 thread_mtx_lock(thread);
641 result = KERN_ABORTED;
642 }
643
644 thread_release(thread);
645 } else {
646 result = machine_thread_get_state(
647 thread, flavor, state, count: state_count);
648 }
649 } else if (thread->inspection) {
650 result = machine_thread_get_state(
651 thread, flavor, state, count: state_count);
652 } else {
653 result = KERN_TERMINATED;
654 }
655
656 if (to_user && result == KERN_SUCCESS) {
657 result = machine_thread_state_convert_to_user(thread, flavor, tstate: state,
658 count: state_count, tssf_flags: flags);
659 }
660
661 thread_mtx_unlock(thread);
662
663 return result;
664}
665
666/* No prototype, since thread_act_server.h has the _to_user version if KERNEL_SERVER */
667
668kern_return_t
669thread_get_state(
670 thread_t thread,
671 int flavor,
672 thread_state_t state,
673 mach_msg_type_number_t *state_count);
674
675kern_return_t
676thread_get_state(
677 thread_t thread,
678 int flavor,
679 thread_state_t state, /* pointer to OUT array */
680 mach_msg_type_number_t *state_count) /*IN/OUT*/
681{
682 return thread_get_state_internal(thread, flavor, state, state_count, flags: TSSF_FLAGS_NONE);
683}
684
685kern_return_t
686thread_get_state_to_user(
687 thread_t thread,
688 int flavor,
689 thread_state_t state, /* pointer to OUT array */
690 mach_msg_type_number_t *state_count) /*IN/OUT*/
691{
692 return thread_get_state_internal(thread, flavor, state, state_count, flags: TSSF_TRANSLATE_TO_USER);
693}
694
695/*
696 * Change thread's machine-dependent state. Called with nothing
697 * locked. Returns same way.
698 */
699static inline kern_return_t
700thread_set_state_internal(
701 thread_t thread,
702 int flavor,
703 thread_state_t state,
704 mach_msg_type_number_t state_count,
705 thread_state_t old_state,
706 mach_msg_type_number_t old_state_count,
707 thread_set_status_flags_t flags)
708{
709 kern_return_t result = KERN_SUCCESS;
710 boolean_t from_user = !!(flags & TSSF_TRANSLATE_TO_USER);
711
712 if (thread == THREAD_NULL) {
713 return KERN_INVALID_ARGUMENT;
714 }
715
716 if ((flags & TSSF_CHECK_ENTITLEMENT) &&
717 !thread_set_state_allowed(thread, flavor)) {
718 return KERN_NO_ACCESS;
719 }
720
721 thread_mtx_lock(thread);
722
723 if (thread->active) {
724 if (from_user) {
725 result = machine_thread_state_convert_from_user(thread, flavor,
726 tstate: state, count: state_count, old_tstate: old_state, old_count: old_state_count, tssf_flags: flags);
727 if (result != KERN_SUCCESS) {
728 goto out;
729 }
730 }
731 if (thread != current_thread()) {
732 thread_hold(thread);
733
734 thread_mtx_unlock(thread);
735
736 if (thread_stop(thread, TRUE)) {
737 thread_mtx_lock(thread);
738 result = machine_thread_set_state(
739 thread, flavor, state, count: state_count);
740 thread_unstop(thread);
741 } else {
742 thread_mtx_lock(thread);
743 result = KERN_ABORTED;
744 }
745
746 thread_release(thread);
747 } else {
748 result = machine_thread_set_state(
749 thread, flavor, state, count: state_count);
750 }
751 } else {
752 result = KERN_TERMINATED;
753 }
754
755 if ((result == KERN_SUCCESS) && from_user) {
756 extmod_statistics_incr_thread_set_state(target: thread);
757 }
758
759out:
760 thread_mtx_unlock(thread);
761
762 return result;
763}
764
765/* No prototype, since thread_act_server.h has the _from_user version if KERNEL_SERVER */
766kern_return_t
767thread_set_state(
768 thread_t thread,
769 int flavor,
770 thread_state_t state,
771 mach_msg_type_number_t state_count);
772
773kern_return_t
774thread_set_state(
775 thread_t thread,
776 int flavor,
777 thread_state_t state,
778 mach_msg_type_number_t state_count)
779{
780 return thread_set_state_internal(thread, flavor, state, state_count, NULL, old_state_count: 0, flags: TSSF_FLAGS_NONE);
781}
782
783kern_return_t
784thread_set_state_from_user(
785 thread_t thread,
786 int flavor,
787 thread_state_t state,
788 mach_msg_type_number_t state_count)
789{
790 return thread_set_state_internal(thread, flavor, state, state_count, NULL,
791 old_state_count: 0, flags: TSSF_TRANSLATE_TO_USER | TSSF_CHECK_ENTITLEMENT);
792}
793
794kern_return_t
795thread_convert_thread_state(
796 thread_t thread,
797 int direction,
798 thread_state_flavor_t flavor,
799 thread_state_t in_state, /* pointer to IN array */
800 mach_msg_type_number_t in_state_count,
801 thread_state_t out_state, /* pointer to OUT array */
802 mach_msg_type_number_t *out_state_count) /*IN/OUT*/
803{
804 kern_return_t kr;
805 thread_t to_thread = THREAD_NULL;
806 thread_t from_thread = THREAD_NULL;
807 mach_msg_type_number_t state_count = in_state_count;
808
809 if (direction != THREAD_CONVERT_THREAD_STATE_TO_SELF &&
810 direction != THREAD_CONVERT_THREAD_STATE_FROM_SELF) {
811 return KERN_INVALID_ARGUMENT;
812 }
813
814 if (thread == THREAD_NULL) {
815 return KERN_INVALID_ARGUMENT;
816 }
817
818 if (state_count > *out_state_count) {
819 return KERN_INSUFFICIENT_BUFFER_SIZE;
820 }
821
822 if (direction == THREAD_CONVERT_THREAD_STATE_FROM_SELF) {
823 to_thread = thread;
824 from_thread = current_thread();
825 } else {
826 to_thread = current_thread();
827 from_thread = thread;
828 }
829
830 /* Authenticate and convert thread state to kernel representation */
831 kr = machine_thread_state_convert_from_user(thread: from_thread, flavor,
832 tstate: in_state, count: state_count, NULL, old_count: 0, tssf_flags: TSSF_FLAGS_NONE);
833
834 /* Return early if one of the thread was jop disabled while other wasn't */
835 if (kr != KERN_SUCCESS) {
836 return kr;
837 }
838
839 /* Convert thread state to target thread user representation */
840 kr = machine_thread_state_convert_to_user(thread: to_thread, flavor,
841 tstate: in_state, count: &state_count, tssf_flags: TSSF_PRESERVE_FLAGS);
842
843 if (kr == KERN_SUCCESS) {
844 if (state_count <= *out_state_count) {
845 memcpy(dst: out_state, src: in_state, n: state_count * sizeof(uint32_t));
846 *out_state_count = state_count;
847 } else {
848 kr = KERN_INSUFFICIENT_BUFFER_SIZE;
849 }
850 }
851
852 return kr;
853}
854
855/*
856 * Kernel-internal "thread" interfaces used outside this file:
857 */
858
859/* Initialize (or re-initialize) a thread state. Called from execve
860 * with nothing locked, returns same way.
861 */
862kern_return_t
863thread_state_initialize(
864 thread_t thread)
865{
866 kern_return_t result = KERN_SUCCESS;
867
868 if (thread == THREAD_NULL) {
869 return KERN_INVALID_ARGUMENT;
870 }
871
872 thread_mtx_lock(thread);
873
874 if (thread->active) {
875 if (thread != current_thread()) {
876 /* Thread created in exec should be blocked in UNINT wait */
877 assert(!(thread->state & TH_RUN));
878 }
879 machine_thread_state_initialize( thread );
880 } else {
881 result = KERN_TERMINATED;
882 }
883
884 thread_mtx_unlock(thread);
885
886 return result;
887}
888
889kern_return_t
890thread_dup(
891 thread_t target)
892{
893 thread_t self = current_thread();
894 kern_return_t result = KERN_SUCCESS;
895
896 if (target == THREAD_NULL || target == self) {
897 return KERN_INVALID_ARGUMENT;
898 }
899
900 thread_mtx_lock(thread: target);
901
902 if (target->active) {
903 thread_hold(thread: target);
904
905 thread_mtx_unlock(thread: target);
906
907 if (thread_stop(thread: target, TRUE)) {
908 thread_mtx_lock(thread: target);
909 result = machine_thread_dup(self, target, FALSE);
910
911 if (self->affinity_set != AFFINITY_SET_NULL) {
912 thread_affinity_dup(parent: self, child: target);
913 }
914 thread_unstop(thread: target);
915 } else {
916 thread_mtx_lock(thread: target);
917 result = KERN_ABORTED;
918 }
919
920 thread_release(thread: target);
921 } else {
922 result = KERN_TERMINATED;
923 }
924
925 thread_mtx_unlock(thread: target);
926
927 return result;
928}
929
930
931kern_return_t
932thread_dup2(
933 thread_t source,
934 thread_t target)
935{
936 kern_return_t result = KERN_SUCCESS;
937 uint32_t active = 0;
938
939 if (source == THREAD_NULL || target == THREAD_NULL || target == source) {
940 return KERN_INVALID_ARGUMENT;
941 }
942
943 thread_mtx_lock(thread: source);
944 active = source->active;
945 thread_mtx_unlock(thread: source);
946
947 if (!active) {
948 return KERN_TERMINATED;
949 }
950
951 thread_mtx_lock(thread: target);
952
953 if (target->active || target->inspection) {
954 thread_hold(thread: target);
955
956 thread_mtx_unlock(thread: target);
957
958 if (thread_stop(thread: target, TRUE)) {
959 thread_mtx_lock(thread: target);
960 result = machine_thread_dup(self: source, target, TRUE);
961 if (source->affinity_set != AFFINITY_SET_NULL) {
962 thread_affinity_dup(parent: source, child: target);
963 }
964 thread_unstop(thread: target);
965 } else {
966 thread_mtx_lock(thread: target);
967 result = KERN_ABORTED;
968 }
969
970 thread_release(thread: target);
971 } else {
972 result = KERN_TERMINATED;
973 }
974
975 thread_mtx_unlock(thread: target);
976
977 return result;
978}
979
980/*
981 * thread_setstatus:
982 *
983 * Set the status of the specified thread.
984 * Called with (and returns with) no locks held.
985 */
986kern_return_t
987thread_setstatus(
988 thread_t thread,
989 int flavor,
990 thread_state_t tstate,
991 mach_msg_type_number_t count)
992{
993 return thread_set_state(thread, flavor, state: tstate, state_count: count);
994}
995
996kern_return_t
997thread_setstatus_from_user(
998 thread_t thread,
999 int flavor,
1000 thread_state_t tstate,
1001 mach_msg_type_number_t count,
1002 thread_state_t old_tstate,
1003 mach_msg_type_number_t old_count,
1004 thread_set_status_flags_t flags)
1005{
1006 return thread_set_state_internal(thread, flavor, state: tstate, state_count: count, old_state: old_tstate,
1007 old_state_count: old_count, flags: flags | TSSF_TRANSLATE_TO_USER);
1008}
1009
1010/*
1011 * thread_getstatus:
1012 *
1013 * Get the status of the specified thread.
1014 */
1015kern_return_t
1016thread_getstatus(
1017 thread_t thread,
1018 int flavor,
1019 thread_state_t tstate,
1020 mach_msg_type_number_t *count)
1021{
1022 return thread_get_state(thread, flavor, state: tstate, state_count: count);
1023}
1024
1025kern_return_t
1026thread_getstatus_to_user(
1027 thread_t thread,
1028 int flavor,
1029 thread_state_t tstate,
1030 mach_msg_type_number_t *count,
1031 thread_set_status_flags_t flags)
1032{
1033 return thread_get_state_internal(thread, flavor, state: tstate, state_count: count, flags: flags | TSSF_TRANSLATE_TO_USER);
1034}
1035
1036/*
1037 * Change thread's machine-dependent userspace TSD base.
1038 * Called with nothing locked. Returns same way.
1039 */
1040kern_return_t
1041thread_set_tsd_base(
1042 thread_t thread,
1043 mach_vm_offset_t tsd_base)
1044{
1045 kern_return_t result = KERN_SUCCESS;
1046
1047 if (thread == THREAD_NULL) {
1048 return KERN_INVALID_ARGUMENT;
1049 }
1050
1051 thread_mtx_lock(thread);
1052
1053 if (thread->active) {
1054 if (thread != current_thread()) {
1055 thread_hold(thread);
1056
1057 thread_mtx_unlock(thread);
1058
1059 if (thread_stop(thread, TRUE)) {
1060 thread_mtx_lock(thread);
1061 result = machine_thread_set_tsd_base(thread, tsd_base);
1062 thread_unstop(thread);
1063 } else {
1064 thread_mtx_lock(thread);
1065 result = KERN_ABORTED;
1066 }
1067
1068 thread_release(thread);
1069 } else {
1070 result = machine_thread_set_tsd_base(thread, tsd_base);
1071 }
1072 } else {
1073 result = KERN_TERMINATED;
1074 }
1075
1076 thread_mtx_unlock(thread);
1077
1078 return result;
1079}
1080
1081/*
1082 * thread_set_apc_ast:
1083 *
1084 * Register the AST_APC callback that handles suspension and
1085 * termination, if it hasn't been installed already.
1086 *
1087 * Called with the thread mutex held.
1088 */
1089static void
1090thread_set_apc_ast(thread_t thread)
1091{
1092 spl_t s = splsched();
1093
1094 thread_lock(thread);
1095 thread_set_apc_ast_locked(thread);
1096 thread_unlock(thread);
1097
1098 splx(s);
1099}
1100
1101/*
1102 * thread_set_apc_ast_locked:
1103 *
1104 * Do the work of registering for the AST_APC callback.
1105 *
1106 * Called with the thread mutex and scheduling lock held.
1107 */
1108static void
1109thread_set_apc_ast_locked(thread_t thread)
1110{
1111 thread_ast_set(thread, AST_APC);
1112
1113 if (thread == current_thread()) {
1114 ast_propagate(thread);
1115 } else {
1116 processor_t processor = thread->last_processor;
1117
1118 if (processor != PROCESSOR_NULL &&
1119 processor->state == PROCESSOR_RUNNING &&
1120 processor->active_thread == thread) {
1121 cause_ast_check(processor);
1122 }
1123 }
1124}
1125
1126/*
1127 * Activation control support routines internal to this file:
1128 *
1129 */
1130
1131/*
1132 * thread_suspended
1133 *
1134 * Continuation routine for thread suspension. It checks
1135 * to see whether there has been any new suspensions. If so, it
1136 * installs the AST_APC handler again.
1137 */
1138__attribute__((noreturn))
1139static void
1140thread_suspended(__unused void *parameter, wait_result_t result)
1141{
1142 thread_t thread = current_thread();
1143
1144 thread_mtx_lock(thread);
1145
1146 if (result == THREAD_INTERRUPTED) {
1147 thread->suspend_parked = FALSE;
1148 } else {
1149 assert(thread->suspend_parked == FALSE);
1150 }
1151
1152 if (thread->suspend_count > 0) {
1153 thread_set_apc_ast(thread);
1154 }
1155
1156 thread_mtx_unlock(thread);
1157
1158 thread_exception_return();
1159 /*NOTREACHED*/
1160}
1161
1162/*
1163 * thread_apc_ast - handles AST_APC and drives thread suspension and termination.
1164 * Called with nothing locked. Returns (if it returns) the same way.
1165 */
1166void
1167thread_apc_ast(thread_t thread)
1168{
1169 thread_mtx_lock(thread);
1170
1171 assert(thread->suspend_parked == FALSE);
1172
1173 spl_t s = splsched();
1174 thread_lock(thread);
1175
1176 /* TH_SFLAG_POLLDEPRESS is OK to have here */
1177 assert((thread->sched_flags & TH_SFLAG_DEPRESS) == 0);
1178
1179 thread->sched_flags &= ~TH_SFLAG_ABORTED_MASK;
1180 thread_unlock(thread);
1181 splx(s);
1182
1183 if (!thread->active) {
1184 /* Thread is ready to terminate, time to tear it down */
1185 thread_mtx_unlock(thread);
1186
1187 thread_terminate_self();
1188 /*NOTREACHED*/
1189 }
1190
1191 /* If we're suspended, go to sleep and wait for someone to wake us up. */
1192 if (thread->suspend_count > 0) {
1193 thread->suspend_parked = TRUE;
1194 assert_wait(event: &thread->suspend_count,
1195 THREAD_ABORTSAFE | THREAD_WAIT_NOREPORT_USER);
1196 thread_mtx_unlock(thread);
1197
1198 thread_block(continuation: thread_suspended);
1199 /*NOTREACHED*/
1200 }
1201
1202 thread_mtx_unlock(thread);
1203}
1204
1205#if CONFIG_ROSETTA
1206extern kern_return_t
1207exception_deliver(
1208 thread_t thread,
1209 exception_type_t exception,
1210 mach_exception_data_t code,
1211 mach_msg_type_number_t codeCnt,
1212 struct exception_action *excp,
1213 lck_mtx_t *mutex);
1214
1215kern_return_t
1216thread_raise_exception(
1217 thread_t thread,
1218 exception_type_t exception,
1219 natural_t code_count,
1220 int64_t code,
1221 int64_t sub_code)
1222{
1223 task_t task;
1224
1225 if (thread == THREAD_NULL) {
1226 return KERN_INVALID_ARGUMENT;
1227 }
1228
1229 task = get_threadtask(thread);
1230
1231 if (task != current_task()) {
1232 return KERN_FAILURE;
1233 }
1234
1235 if (!task_is_translated(task)) {
1236 return KERN_FAILURE;
1237 }
1238
1239 if (exception == EXC_CRASH) {
1240 return KERN_INVALID_ARGUMENT;
1241 }
1242
1243 int64_t codes[] = { code, sub_code };
1244 host_priv_t host_priv = host_priv_self();
1245 kern_return_t kr = exception_deliver(thread, exception, codes, code_count, host_priv->exc_actions, &host_priv->lock);
1246 if (kr != KERN_SUCCESS) {
1247 return kr;
1248 }
1249
1250 return thread_resume(thread);
1251}
1252#endif
1253
1254void
1255thread_debug_return_to_user_ast(
1256 thread_t thread)
1257{
1258#pragma unused(thread)
1259#if MACH_ASSERT
1260 if ((thread->sched_flags & TH_SFLAG_RW_PROMOTED) ||
1261 thread->rwlock_count > 0) {
1262 panic("Returning to userspace with rw lock held, thread %p sched_flag %u rwlock_count %d", thread, thread->sched_flags, thread->rwlock_count);
1263 }
1264
1265 if ((thread->sched_flags & TH_SFLAG_FLOOR_PROMOTED) ||
1266 thread->priority_floor_count > 0) {
1267 panic("Returning to userspace with floor boost set, thread %p sched_flag %u priority_floor_count %d", thread, thread->sched_flags, thread->priority_floor_count);
1268 }
1269
1270#if CONFIG_EXCLAVES
1271 assert3u(thread->th_exclaves_state & TH_EXCLAVES_STATE_ANY, ==, 0);
1272#endif /* CONFIG_EXCLAVES */
1273
1274#endif /* MACH_ASSERT */
1275}
1276
1277
1278/* Prototype, see justification above */
1279kern_return_t
1280act_set_state(
1281 thread_t thread,
1282 int flavor,
1283 thread_state_t state,
1284 mach_msg_type_number_t count);
1285
1286kern_return_t
1287act_set_state(
1288 thread_t thread,
1289 int flavor,
1290 thread_state_t state,
1291 mach_msg_type_number_t count)
1292{
1293 if (thread == current_thread()) {
1294 return KERN_INVALID_ARGUMENT;
1295 }
1296
1297 return thread_set_state(thread, flavor, state, state_count: count);
1298}
1299
1300kern_return_t
1301act_set_state_from_user(
1302 thread_t thread,
1303 int flavor,
1304 thread_state_t state,
1305 mach_msg_type_number_t count)
1306{
1307 if (thread == current_thread()) {
1308 return KERN_INVALID_ARGUMENT;
1309 }
1310
1311 return thread_set_state_from_user(thread, flavor, state, state_count: count);
1312}
1313
1314/* Prototype, see justification above */
1315kern_return_t
1316act_get_state(
1317 thread_t thread,
1318 int flavor,
1319 thread_state_t state,
1320 mach_msg_type_number_t *count);
1321
1322kern_return_t
1323act_get_state(
1324 thread_t thread,
1325 int flavor,
1326 thread_state_t state,
1327 mach_msg_type_number_t *count)
1328{
1329 if (thread == current_thread()) {
1330 return KERN_INVALID_ARGUMENT;
1331 }
1332
1333 return thread_get_state(thread, flavor, state, state_count: count);
1334}
1335
1336kern_return_t
1337act_get_state_to_user(
1338 thread_t thread,
1339 int flavor,
1340 thread_state_t state,
1341 mach_msg_type_number_t *count)
1342{
1343 if (thread == current_thread()) {
1344 return KERN_INVALID_ARGUMENT;
1345 }
1346
1347 return thread_get_state_to_user(thread, flavor, state, state_count: count);
1348}
1349
1350static void
1351act_set_ast(
1352 thread_t thread,
1353 ast_t ast)
1354{
1355 spl_t s = splsched();
1356
1357 if (thread == current_thread()) {
1358 thread_ast_set(thread, ast);
1359 ast_propagate(thread);
1360 } else {
1361 processor_t processor;
1362
1363 thread_lock(thread);
1364 thread_ast_set(thread, ast);
1365 processor = thread->last_processor;
1366 if (processor != PROCESSOR_NULL &&
1367 processor->state == PROCESSOR_RUNNING &&
1368 processor->active_thread == thread) {
1369 cause_ast_check(processor);
1370 }
1371 thread_unlock(thread);
1372 }
1373
1374 splx(s);
1375}
1376
1377/*
1378 * set AST on thread without causing an AST check
1379 * and without taking the thread lock
1380 *
1381 * If thread is not the current thread, then it may take
1382 * up until the next context switch or quantum expiration
1383 * on that thread for it to notice the AST.
1384 */
1385static void
1386act_set_ast_async(thread_t thread,
1387 ast_t ast)
1388{
1389 thread_ast_set(thread, ast);
1390
1391 if (thread == current_thread()) {
1392 spl_t s = splsched();
1393 ast_propagate(thread);
1394 splx(s);
1395 }
1396}
1397
1398void
1399act_set_debug_assert(void)
1400{
1401 thread_t thread = current_thread();
1402 if (thread_ast_peek(thread, AST_DEBUG_ASSERT) != AST_DEBUG_ASSERT) {
1403 thread_ast_set(thread, AST_DEBUG_ASSERT);
1404 }
1405 if (ast_peek(AST_DEBUG_ASSERT) != AST_DEBUG_ASSERT) {
1406 spl_t s = splsched();
1407 ast_propagate(thread);
1408 splx(s);
1409 }
1410}
1411
1412void
1413act_set_astbsd(thread_t thread)
1414{
1415 act_set_ast(thread, AST_BSD);
1416}
1417
1418void
1419act_set_astkevent(thread_t thread, uint16_t bits)
1420{
1421 os_atomic_or(&thread->kevent_ast_bits, bits, relaxed);
1422
1423 /* kevent AST shouldn't send immediate IPIs */
1424 act_set_ast_async(thread, AST_KEVENT);
1425}
1426
1427uint16_t
1428act_clear_astkevent(thread_t thread, uint16_t bits)
1429{
1430 /*
1431 * avoid the atomic operation if none of the bits is set,
1432 * which will be the common case.
1433 */
1434 uint16_t cur = os_atomic_load(&thread->kevent_ast_bits, relaxed);
1435 if (cur & bits) {
1436 cur = os_atomic_andnot_orig(&thread->kevent_ast_bits, bits, relaxed);
1437 }
1438 return cur & bits;
1439}
1440
1441bool
1442act_set_ast_reset_pcs(task_t task, thread_t thread)
1443{
1444 processor_t processor;
1445 bool needs_wait = false;
1446 spl_t s;
1447
1448 s = splsched();
1449
1450 if (thread == current_thread()) {
1451 /*
1452 * this is called from the signal code,
1453 * just set the AST and move on
1454 */
1455 thread_ast_set(thread, AST_RESET_PCS);
1456 ast_propagate(thread);
1457 } else {
1458 thread_lock(thread);
1459
1460 assert(thread->t_rr_state.trr_ipi_ack_pending == 0);
1461 assert(thread->t_rr_state.trr_sync_waiting == 0);
1462
1463 processor = thread->last_processor;
1464 if (!thread->active) {
1465 /*
1466 * ->active is being set before the thread is added
1467 * to the thread list (under the task lock which
1468 * the caller holds), and is reset before the thread
1469 * lock is being taken by thread_terminate_self().
1470 *
1471 * The result is that this will never fail to
1472 * set the AST on an thread that is active,
1473 * but will not set it past thread_terminate_self().
1474 */
1475 } else if (processor != PROCESSOR_NULL &&
1476 processor->state == PROCESSOR_RUNNING &&
1477 processor->active_thread == thread) {
1478 thread->t_rr_state.trr_ipi_ack_pending = true;
1479 needs_wait = true;
1480 thread_ast_set(thread, AST_RESET_PCS);
1481 cause_ast_check(processor);
1482 } else if (thread_reset_pcs_in_range(task, thread)) {
1483 if (thread->t_rr_state.trr_fault_state) {
1484 thread->t_rr_state.trr_fault_state =
1485 TRR_FAULT_OBSERVED;
1486 needs_wait = true;
1487 }
1488 thread_ast_set(thread, AST_RESET_PCS);
1489 }
1490 thread_unlock(thread);
1491 }
1492
1493 splx(s);
1494
1495 return needs_wait;
1496}
1497
1498void
1499act_set_kperf(thread_t thread)
1500{
1501 /* safety check */
1502 if (thread != current_thread()) {
1503 if (!ml_get_interrupts_enabled()) {
1504 panic("unsafe act_set_kperf operation");
1505 }
1506 }
1507
1508 act_set_ast(thread, AST_KPERF);
1509}
1510
1511#if CONFIG_MACF
1512void
1513act_set_astmacf(
1514 thread_t thread)
1515{
1516 act_set_ast( thread, AST_MACF);
1517}
1518#endif
1519
1520void
1521act_set_astledger(thread_t thread)
1522{
1523 act_set_ast(thread, AST_LEDGER);
1524}
1525
1526/*
1527 * The ledger AST may need to be set while already holding
1528 * the thread lock. This routine skips sending the IPI,
1529 * allowing us to avoid the lock hold.
1530 *
1531 * However, it means the targeted thread must context switch
1532 * to recognize the ledger AST.
1533 */
1534void
1535act_set_astledger_async(thread_t thread)
1536{
1537 act_set_ast_async(thread, AST_LEDGER);
1538}
1539
1540void
1541act_set_io_telemetry_ast(thread_t thread)
1542{
1543 act_set_ast(thread, AST_TELEMETRY_IO);
1544}
1545
1546void
1547act_set_macf_telemetry_ast(thread_t thread)
1548{
1549 act_set_ast(thread, AST_TELEMETRY_MACF);
1550}
1551
1552void
1553act_set_astproc_resource(thread_t thread)
1554{
1555 act_set_ast(thread, AST_PROC_RESOURCE);
1556}
1557