1/*
2 * Copyright (c) 2000-2020 Apple Computer, Inc. All rights reserved.
3 *
4 * @APPLE_OSREFERENCE_LICENSE_HEADER_START@
5 *
6 * This file contains Original Code and/or Modifications of Original Code
7 * as defined in and that are subject to the Apple Public Source License
8 * Version 2.0 (the 'License'). You may not use this file except in
9 * compliance with the License. The rights granted to you under the License
10 * may not be used to create, or enable the creation or redistribution of,
11 * unlawful or unlicensed copies of an Apple operating system, or to
12 * circumvent, violate, or enable the circumvention or violation of, any
13 * terms of an Apple operating system software license agreement.
14 *
15 * Please obtain a copy of the License at
16 * http://www.opensource.apple.com/apsl/ and read it before using this file.
17 *
18 * The Original Code and all software distributed under the License are
19 * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER
20 * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES,
21 * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY,
22 * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT.
23 * Please see the License for the specific language governing rights and
24 * limitations under the License.
25 *
26 * @APPLE_OSREFERENCE_LICENSE_HEADER_END@
27 */
28/*
29 * @OSF_COPYRIGHT@
30 */
31/*
32 * Mach Operating System
33 * Copyright (c) 1991,1990,1989,1988,1987 Carnegie Mellon University
34 * All Rights Reserved.
35 *
36 * Permission to use, copy, modify and distribute this software and its
37 * documentation is hereby granted, provided that both the copyright
38 * notice and this permission notice appear in all copies of the
39 * software, derivative works or modified versions, and any portions
40 * thereof, and that both notices appear in supporting documentation.
41 *
42 * CARNEGIE MELLON ALLOWS FREE USE OF THIS SOFTWARE IN ITS "AS IS"
43 * CONDITION. CARNEGIE MELLON DISCLAIMS ANY LIABILITY OF ANY KIND FOR
44 * ANY DAMAGES WHATSOEVER RESULTING FROM THE USE OF THIS SOFTWARE.
45 *
46 * Carnegie Mellon requests users of this software to return to
47 *
48 * Software Distribution Coordinator or Software.Distribution@CS.CMU.EDU
49 * School of Computer Science
50 * Carnegie Mellon University
51 * Pittsburgh PA 15213-3890
52 *
53 * any improvements or extensions that they make and grant Carnegie Mellon
54 * the rights to redistribute these changes.
55 */
56/*
57 */
58
59#include <mach/mach_types.h>
60#include <mach/boolean.h>
61#include <mach/kern_return.h>
62#include <mach/message.h>
63#include <mach/port.h>
64#include <mach/mig_errors.h>
65#include <mach/task.h>
66#include <mach/thread_status.h>
67#include <mach/exception_types.h>
68#include <mach/exc.h>
69#include <mach/mach_exc.h>
70
71#include <ipc/port.h>
72#include <ipc/ipc_entry.h>
73#include <ipc/ipc_object.h>
74#include <ipc/ipc_notify.h>
75#include <ipc/ipc_space.h>
76#include <ipc/ipc_pset.h>
77#include <ipc/ipc_machdep.h>
78
79#include <kern/ipc_tt.h>
80#include <kern/task.h>
81#include <kern/thread.h>
82#include <kern/processor.h>
83#include <kern/sched.h>
84#include <kern/sched_prim.h>
85#include <kern/host.h>
86#include <kern/misc_protos.h>
87#include <kern/ux_handler.h>
88#include <kern/task_ident.h>
89
90#include <vm/vm_map.h>
91
92#include <security/mac_mach_internal.h>
93#include <string.h>
94
95#include <pexpert/pexpert.h>
96
97#include <os/log.h>
98#include <os/system_event_log.h>
99
100#include <libkern/coreanalytics/coreanalytics.h>
101
102#include <sys/code_signing.h> /* for developer mode state */
103
104bool panic_on_exception_triage = false;
105
106/* Not used in coded, only for inspection during debugging */
107unsigned long c_thr_exc_raise = 0;
108unsigned long c_thr_exc_raise_identity_token = 0;
109unsigned long c_thr_exc_raise_state = 0;
110unsigned long c_thr_exc_raise_state_id = 0;
111unsigned long c_thr_exc_raise_backtrace = 0;
112
113/* forward declarations */
114kern_return_t exception_deliver(
115 thread_t thread,
116 exception_type_t exception,
117 mach_exception_data_t code,
118 mach_msg_type_number_t codeCnt,
119 struct exception_action *excp,
120 lck_mtx_t *mutex);
121
122#ifdef MACH_BSD
123kern_return_t bsd_exception(
124 exception_type_t exception,
125 mach_exception_data_t code,
126 mach_msg_type_number_t codeCnt);
127#endif /* MACH_BSD */
128
129#if __has_feature(ptrauth_calls)
130extern int exit_with_pac_exception(
131 void *proc,
132 exception_type_t exception,
133 mach_exception_code_t code,
134 mach_exception_subcode_t subcode);
135#endif /* __has_feature(ptrauth_calls) */
136
137#ifdef MACH_BSD
138extern bool proc_is_traced(void *p);
139extern int proc_selfpid(void);
140extern char *proc_name_address(struct proc *p);
141#endif /* MACH_BSD */
142
143#if (DEVELOPMENT || DEBUG)
144TUNABLE_WRITEABLE(unsigned int, exception_log_max_pid, "exception_log_max_pid", 0);
145#endif /* (DEVELOPMENT || DEBUG) */
146
147/*
148 * Routine: exception_init
149 * Purpose:
150 * Global initialization of state for exceptions.
151 * Conditions:
152 * None.
153 */
154void
155exception_init(void)
156{
157 int tmp = 0;
158
159 if (PE_parse_boot_argn(arg_string: "-panic_on_exception_triage", arg_ptr: &tmp, max_arg: sizeof(tmp))) {
160 panic_on_exception_triage = true;
161 }
162
163#if (DEVELOPMENT || DEBUG)
164 if (exception_log_max_pid) {
165 printf("Logging all exceptions where pid < exception_log_max_pid (%d)\n", exception_log_max_pid);
166 }
167#endif /* (DEVELOPMENT || DEBUG) */
168}
169
170static TUNABLE(bool, pac_replace_ptrs_user, "pac_replace_ptrs_user", true);
171
172ipc_port_t
173exception_port_copy_send(ipc_port_t port)
174{
175 if (IP_VALID(port)) {
176 if (is_ux_handler_port(port)) {
177 /* is_ux_handler_port() compares against __DATA_CONST */
178 port = ipc_port_copy_send_any(port);
179 } else {
180 port = ipc_port_copy_send_mqueue(port);
181 }
182 }
183 return port;
184}
185
186/*
187 * Routine: exception_deliver
188 * Purpose:
189 * Make an upcall to the exception server provided.
190 * Conditions:
191 * Nothing locked and no resources held.
192 * Called from an exception context, so
193 * thread_exception_return and thread_kdb_return
194 * are possible.
195 * Returns:
196 * KERN_SUCCESS if the exception was handled
197 */
198kern_return_t
199exception_deliver(
200 thread_t thread,
201 exception_type_t exception,
202 mach_exception_data_t code,
203 mach_msg_type_number_t codeCnt,
204 struct exception_action *excp,
205 lck_mtx_t *mutex)
206{
207 ipc_port_t exc_port = IPC_PORT_NULL;
208 exception_data_type_t small_code[EXCEPTION_CODE_MAX];
209 thread_state_t new_state = NULL;
210 int code64;
211 int behavior;
212 int flavor;
213 kern_return_t kr;
214 task_t task;
215 task_id_token_t task_token;
216 ipc_port_t thread_port = IPC_PORT_NULL,
217 task_port = IPC_PORT_NULL,
218 task_token_port = IPC_PORT_NULL;
219
220 /*
221 * Save work if we are terminating.
222 * Just go back to our AST handler.
223 */
224 if (!thread->active && !thread->inspection) {
225 return KERN_SUCCESS;
226 }
227
228 /*
229 * If there are no exception actions defined for this entity,
230 * we can't deliver here.
231 */
232 if (excp == NULL) {
233 return KERN_FAILURE;
234 }
235
236 assert(exception < EXC_TYPES_COUNT);
237 if (exception >= EXC_TYPES_COUNT) {
238 return KERN_FAILURE;
239 }
240
241 excp = &excp[exception];
242
243 /*
244 * Snapshot the exception action data under lock for consistency.
245 * Hold a reference to the port over the exception_raise_* calls
246 * so it can't be destroyed. This seems like overkill, but keeps
247 * the port from disappearing between now and when
248 * ipc_object_copyin_from_kernel is finally called.
249 */
250 lck_mtx_lock(lck: mutex);
251 exc_port = exception_port_copy_send(port: excp->port);
252 if (!IP_VALID(exc_port)) {
253 lck_mtx_unlock(lck: mutex);
254 return KERN_FAILURE;
255 }
256
257 flavor = excp->flavor;
258 behavior = excp->behavior;
259 lck_mtx_unlock(lck: mutex);
260
261 code64 = (behavior & MACH_EXCEPTION_CODES);
262 behavior &= ~MACH_EXCEPTION_MASK;
263
264 if (!code64) {
265 small_code[0] = CAST_DOWN_EXPLICIT(exception_data_type_t, code[0]);
266 small_code[1] = CAST_DOWN_EXPLICIT(exception_data_type_t, code[1]);
267 }
268
269 task = get_threadtask(thread);
270
271#if CONFIG_MACF
272 /* Now is a reasonably good time to check if the exception action is
273 * permitted for this process, because after this point we will send
274 * the message out almost certainly.
275 * As with other failures, exception_triage_thread will go on
276 * to the next level.
277 */
278
279 /* The global exception-to-signal translation port is safe to be an exception handler. */
280 if (is_ux_handler_port(port: exc_port) == FALSE &&
281 mac_exc_action_check_exception_send(victim_task: task, action: excp) != 0) {
282 kr = KERN_FAILURE;
283 goto out_release_right;
284 }
285#endif
286
287 thread->options |= TH_IN_MACH_EXCEPTION;
288
289 switch (behavior) {
290 case EXCEPTION_STATE: {
291 mach_msg_type_number_t old_state_cnt, new_state_cnt;
292 thread_state_data_t old_state;
293 thread_set_status_flags_t get_flags = TSSF_TRANSLATE_TO_USER;
294 thread_set_status_flags_t set_flags = TSSF_CHECK_USER_FLAGS;
295 bool task_allow_user_state = task_needs_user_signed_thread_state(task);
296
297 if (pac_replace_ptrs_user || task_allow_user_state) {
298 get_flags |= TSSF_RANDOM_USER_DIV;
299 set_flags |= (TSSF_ALLOW_ONLY_USER_PTRS | TSSF_RANDOM_USER_DIV);
300 }
301
302 c_thr_exc_raise_state++;
303 old_state_cnt = _MachineStateCount[flavor];
304 kr = thread_getstatus_to_user(thread, flavor,
305 tstate: (thread_state_t)old_state,
306 count: &old_state_cnt, flags: get_flags);
307 new_state_cnt = old_state_cnt;
308 if (kr == KERN_SUCCESS) {
309 new_state = (thread_state_t)kalloc_data(sizeof(thread_state_data_t), Z_WAITOK | Z_ZERO);
310 if (new_state == NULL) {
311 kr = KERN_RESOURCE_SHORTAGE;
312 goto out_release_right;
313 }
314 if (code64) {
315 kr = mach_exception_raise_state(exception_port: exc_port,
316 exception,
317 code,
318 codeCnt,
319 flavor: &flavor,
320 old_state, old_stateCnt: old_state_cnt,
321 new_state, new_stateCnt: &new_state_cnt);
322 } else {
323 kr = exception_raise_state(exception_port: exc_port, exception,
324 code: small_code,
325 codeCnt,
326 flavor: &flavor,
327 old_state, old_stateCnt: old_state_cnt,
328 new_state, new_stateCnt: &new_state_cnt);
329 }
330 if (kr == KERN_SUCCESS) {
331 if (exception != EXC_CORPSE_NOTIFY) {
332 kr = thread_setstatus_from_user(thread, flavor,
333 tstate: (thread_state_t)new_state, count: new_state_cnt,
334 old_tstate: (thread_state_t)old_state, old_count: old_state_cnt,
335 flags: set_flags);
336 }
337 goto out_release_right;
338 }
339 }
340
341 goto out_release_right;
342 }
343
344 case EXCEPTION_DEFAULT: {
345 c_thr_exc_raise++;
346
347 task_reference(task);
348 thread_reference(thread);
349 /*
350 * Only deliver control port if Developer Mode enabled,
351 * or task is a corpse. Otherwise we only deliver the
352 * (immovable) read port in exception handler (both in
353 * or out of process). (94669540)
354 */
355 if (developer_mode_state() || task_is_a_corpse(task)) {
356 task_port = convert_task_to_port(task);
357 thread_port = convert_thread_to_port(thread);
358 } else {
359 task_port = convert_task_read_to_port(task);
360 thread_port = convert_thread_read_to_port(thread);
361 }
362 /* task and thread ref consumed */
363
364 if (code64) {
365 kr = mach_exception_raise(exception_port: exc_port,
366 thread: thread_port,
367 task: task_port,
368 exception,
369 code,
370 codeCnt);
371 } else {
372 kr = exception_raise(exception_port: exc_port,
373 thread: thread_port,
374 task: task_port,
375 exception,
376 code: small_code,
377 codeCnt);
378 }
379
380 goto out_release_right;
381 }
382
383 case EXCEPTION_IDENTITY_PROTECTED: {
384 c_thr_exc_raise_identity_token++;
385
386 kr = task_create_identity_token(task, token: &task_token);
387 if (!task->active && kr == KERN_INVALID_ARGUMENT) {
388 /* The task is terminating, don't need to send more exceptions */
389 kr = KERN_SUCCESS;
390 goto out_release_right;
391 }
392 /* task_token now represents a task, or corpse */
393 assert(kr == KERN_SUCCESS);
394 task_token_port = convert_task_id_token_to_port(token: task_token);
395 /* task token ref consumed */
396
397 if (code64) {
398 kr = mach_exception_raise_identity_protected(exception_port: exc_port,
399 thread_id: thread->thread_id,
400 task_id_token_t: task_token_port,
401 exception,
402 code,
403 codeCnt);
404 } else {
405 panic("mach_exception_raise_identity_protected() must be code64");
406 }
407
408 goto out_release_right;
409 }
410
411 case EXCEPTION_STATE_IDENTITY: {
412 mach_msg_type_number_t old_state_cnt, new_state_cnt;
413 thread_state_data_t old_state;
414 thread_set_status_flags_t get_flags = TSSF_TRANSLATE_TO_USER;
415 thread_set_status_flags_t set_flags = TSSF_CHECK_USER_FLAGS;
416 bool task_allow_user_state = task_needs_user_signed_thread_state(task);
417
418 if (pac_replace_ptrs_user || task_allow_user_state) {
419 get_flags |= TSSF_RANDOM_USER_DIV;
420 set_flags |= (TSSF_ALLOW_ONLY_USER_PTRS | TSSF_RANDOM_USER_DIV);
421 }
422
423 c_thr_exc_raise_state_id++;
424
425 task_reference(task);
426 thread_reference(thread);
427 /*
428 * Only deliver control port if Developer Mode enabled,
429 * or task is a corpse. Otherwise we only deliver the
430 * (immovable) read port in exception handler (both in
431 * or out of process). (94669540)
432 */
433 if (developer_mode_state() || task_is_a_corpse(task)) {
434 task_port = convert_task_to_port(task);
435 thread_port = convert_thread_to_port(thread);
436 } else {
437 task_port = convert_task_read_to_port(task);
438 thread_port = convert_thread_read_to_port(thread);
439 }
440 /* task and thread ref consumed */
441
442 old_state_cnt = _MachineStateCount[flavor];
443 kr = thread_getstatus_to_user(thread, flavor,
444 tstate: (thread_state_t)old_state,
445 count: &old_state_cnt, flags: get_flags);
446 new_state_cnt = old_state_cnt;
447 if (kr == KERN_SUCCESS) {
448 new_state = (thread_state_t)kalloc_data(sizeof(thread_state_data_t), Z_WAITOK | Z_ZERO);
449 if (new_state == NULL) {
450 kr = KERN_RESOURCE_SHORTAGE;
451 goto out_release_right;
452 }
453 if (code64) {
454 kr = mach_exception_raise_state_identity(
455 exception_port: exc_port,
456 thread: thread_port,
457 task: task_port,
458 exception,
459 code,
460 codeCnt,
461 flavor: &flavor,
462 old_state, old_stateCnt: old_state_cnt,
463 new_state, new_stateCnt: &new_state_cnt);
464 } else {
465 kr = exception_raise_state_identity(exception_port: exc_port,
466 thread: thread_port,
467 task: task_port,
468 exception,
469 code: small_code,
470 codeCnt,
471 flavor: &flavor,
472 old_state, old_stateCnt: old_state_cnt,
473 new_state, new_stateCnt: &new_state_cnt);
474 }
475
476 if (kr == KERN_SUCCESS) {
477 if (exception != EXC_CORPSE_NOTIFY &&
478 ip_kotype(thread_port) == IKOT_THREAD_CONTROL) {
479 kr = thread_setstatus_from_user(thread, flavor,
480 tstate: (thread_state_t)new_state, count: new_state_cnt,
481 old_tstate: (thread_state_t)old_state, old_count: old_state_cnt, flags: set_flags);
482 }
483 goto out_release_right;
484 }
485 }
486
487 goto out_release_right;
488 }
489
490 default:
491 panic("bad exception behavior!");
492 return KERN_FAILURE;
493 }/* switch */
494
495out_release_right:
496
497 thread->options &= ~TH_IN_MACH_EXCEPTION;
498
499 if (task_port) {
500 ipc_port_release_send(port: task_port);
501 }
502
503 if (thread_port) {
504 ipc_port_release_send(port: thread_port);
505 }
506
507 if (exc_port) {
508 ipc_port_release_send(port: exc_port);
509 }
510
511 if (task_token_port) {
512 ipc_port_release_send(port: task_token_port);
513 }
514
515 if (new_state) {
516 kfree_data(new_state, sizeof(thread_state_data_t));
517 }
518
519 return kr;
520}
521
522/*
523 * Attempt exception delivery with backtrace info to exception ports
524 * in exc_ports in order.
525 */
526/*
527 * Routine: exception_deliver_backtrace
528 * Purpose:
529 * Attempt exception delivery with backtrace info to exception ports
530 * in exc_ports in order.
531 * Conditions:
532 * Caller has a reference on bt_object, and send rights on exc_ports.
533 * Does not consume any passed references or rights
534 */
535void
536exception_deliver_backtrace(
537 kcdata_object_t bt_object,
538 ipc_port_t exc_ports[static BT_EXC_PORTS_COUNT],
539 exception_type_t exception)
540{
541 kern_return_t kr;
542 mach_exception_data_type_t code[EXCEPTION_CODE_MAX];
543 ipc_port_t target_port, bt_obj_port;
544
545 assert(exception == EXC_GUARD);
546
547 code[0] = exception;
548 code[1] = 0;
549
550 kcdata_object_reference(obj: bt_object);
551 bt_obj_port = convert_kcdata_object_to_port(obj: bt_object);
552 /* backtrace object ref consumed, no-senders is armed */
553
554 if (!IP_VALID(bt_obj_port)) {
555 return;
556 }
557
558 /*
559 * We are guaranteed at task_enqueue_exception_with_corpse() time
560 * that the exception port prefers backtrace delivery.
561 */
562 for (unsigned int i = 0; i < BT_EXC_PORTS_COUNT; i++) {
563 target_port = exc_ports[i];
564
565 if (!IP_VALID(target_port)) {
566 continue;
567 }
568
569 ip_mq_lock(target_port);
570 if (!ip_active(target_port)) {
571 ip_mq_unlock(target_port);
572 continue;
573 }
574 ip_mq_unlock(target_port);
575
576 kr = mach_exception_raise_backtrace(exception_port: target_port,
577 kcdata_object_t: bt_obj_port,
578 EXC_CORPSE_NOTIFY,
579 code,
580 EXCEPTION_CODE_MAX);
581
582 if (kr == KERN_SUCCESS || kr == MACH_RCV_PORT_DIED) {
583 /* Exception is handled at this level */
584 break;
585 }
586 }
587
588 /* May trigger no-senders notification for backtrace object */
589 ipc_port_release_send(port: bt_obj_port);
590
591 return;
592}
593
594/*
595 * Routine: check_exc_receiver_dependency
596 * Purpose:
597 * Verify that the port destined for receiving this exception is not
598 * on the current task. This would cause hang in kernel for
599 * EXC_CRASH primarily. Note: If port is transferred
600 * between check and delivery then deadlock may happen.
601 *
602 * Conditions:
603 * Nothing locked and no resources held.
604 * Called from an exception context.
605 * Returns:
606 * KERN_SUCCESS if its ok to send exception message.
607 */
608static kern_return_t
609check_exc_receiver_dependency(
610 exception_type_t exception,
611 struct exception_action *excp,
612 lck_mtx_t *mutex)
613{
614 kern_return_t retval = KERN_SUCCESS;
615
616 if (excp == NULL || exception != EXC_CRASH) {
617 return retval;
618 }
619
620 task_t task = current_task();
621 lck_mtx_lock(lck: mutex);
622 ipc_port_t xport = excp[exception].port;
623 if (IP_VALID(xport) && ip_in_space_noauth(port: xport, space: task->itk_space)) {
624 retval = KERN_FAILURE;
625 }
626 lck_mtx_unlock(lck: mutex);
627 return retval;
628}
629
630
631/*
632 * Routine: exception_triage_thread
633 * Purpose:
634 * The thread caught an exception.
635 * We make an up-call to the thread's exception server.
636 * Conditions:
637 * Nothing locked and no resources held.
638 * Called from an exception context, so
639 * thread_exception_return and thread_kdb_return
640 * are possible.
641 * Returns:
642 * KERN_SUCCESS if exception is handled by any of the handlers.
643 */
644kern_return_t
645exception_triage_thread(
646 exception_type_t exception,
647 mach_exception_data_t code,
648 mach_msg_type_number_t codeCnt,
649 thread_t thread)
650{
651 task_t task;
652 thread_ro_t tro;
653 host_priv_t host_priv;
654 lck_mtx_t *mutex;
655 struct exception_action *actions;
656 kern_return_t kr = KERN_FAILURE;
657
658 assert(exception != EXC_RPC_ALERT);
659
660 /*
661 * If this behavior has been requested by the the kernel
662 * (due to the boot environment), we should panic if we
663 * enter this function. This is intended as a debugging
664 * aid; it should allow us to debug why we caught an
665 * exception in environments where debugging is especially
666 * difficult.
667 */
668 if (panic_on_exception_triage) {
669 panic("called exception_triage when it was forbidden by the boot environment");
670 }
671
672 /*
673 * Try to raise the exception at the activation level.
674 */
675 mutex = &thread->mutex;
676 tro = get_thread_ro(thread);
677 actions = tro->tro_exc_actions;
678 if (KERN_SUCCESS == check_exc_receiver_dependency(exception, excp: actions, mutex)) {
679 kr = exception_deliver(thread, exception, code, codeCnt, excp: actions, mutex);
680 if (kr == KERN_SUCCESS || kr == MACH_RCV_PORT_DIED) {
681 goto out;
682 }
683 }
684
685 /*
686 * Maybe the task level will handle it.
687 */
688 task = tro->tro_task;
689 mutex = &task->itk_lock_data;
690 actions = task->exc_actions;
691 if (KERN_SUCCESS == check_exc_receiver_dependency(exception, excp: actions, mutex)) {
692 kr = exception_deliver(thread, exception, code, codeCnt, excp: actions, mutex);
693 if (kr == KERN_SUCCESS || kr == MACH_RCV_PORT_DIED) {
694 goto out;
695 }
696 }
697
698 /*
699 * How about at the host level?
700 */
701 host_priv = host_priv_self();
702 mutex = &host_priv->lock;
703 actions = host_priv->exc_actions;
704 if (KERN_SUCCESS == check_exc_receiver_dependency(exception, excp: actions, mutex)) {
705 kr = exception_deliver(thread, exception, code, codeCnt, excp: actions, mutex);
706 if (kr == KERN_SUCCESS || kr == MACH_RCV_PORT_DIED) {
707 goto out;
708 }
709 }
710
711out:
712 if ((exception != EXC_CRASH) && (exception != EXC_RESOURCE) &&
713 (exception != EXC_GUARD) && (exception != EXC_CORPSE_NOTIFY)) {
714 thread_exception_return();
715 }
716 return kr;
717}
718
719#if __has_feature(ptrauth_calls)
720static TUNABLE(bool, pac_exception_telemetry, "-pac_exception_telemetry", false);
721
722CA_EVENT(pac_exception_event,
723 CA_INT, exception,
724 CA_INT, exception_code_0,
725 CA_INT, exception_code_1,
726 CA_STATIC_STRING(CA_PROCNAME_LEN), proc_name);
727
728static void
729pac_exception_triage(
730 exception_type_t exception,
731 mach_exception_data_t code)
732{
733 boolean_t traced_flag = FALSE;
734 task_t task = current_task();
735 void *proc = get_bsdtask_info(task);
736 char *proc_name = (char *) "unknown";
737 int pid = 0;
738
739#ifdef MACH_BSD
740 pid = proc_selfpid();
741 if (proc) {
742 traced_flag = proc_is_traced(proc);
743 /* Should only be called on current proc */
744 proc_name = proc_name_address(proc);
745
746 /*
747 * For a ptrauth violation, check if process isn't being ptraced and
748 * the task has the TFRO_PAC_EXC_FATAL flag set. If both conditions are true,
749 * terminate the task via exit_with_reason
750 */
751 if (!traced_flag) {
752 if (pac_exception_telemetry) {
753 ca_event_t ca_event = CA_EVENT_ALLOCATE(pac_exception_event);
754 CA_EVENT_TYPE(pac_exception_event) * pexc_event = ca_event->data;
755 pexc_event->exception = exception;
756 pexc_event->exception_code_0 = code[0];
757 pexc_event->exception_code_1 = code[1];
758 strlcpy(pexc_event->proc_name, proc_name, CA_PROCNAME_LEN);
759 CA_EVENT_SEND(ca_event);
760 }
761 if (task_is_pac_exception_fatal(task)) {
762 os_log_error(OS_LOG_DEFAULT, "%s: process %s[%d] hit a pac violation\n", __func__, proc_name, pid);
763 exit_with_pac_exception(proc, exception, code[0], code[1]);
764 thread_exception_return();
765 /* NOT_REACHABLE */
766 }
767 }
768 }
769#endif /* MACH_BSD */
770}
771#endif /* __has_feature(ptrauth_calls) */
772
773/*
774 * Routine: exception_triage
775 * Purpose:
776 * The current thread caught an exception.
777 * We make an up-call to the thread's exception server.
778 * Conditions:
779 * Nothing locked and no resources held.
780 * Called from an exception context, so
781 * thread_exception_return and thread_kdb_return
782 * are possible.
783 * Returns:
784 * KERN_SUCCESS if exception is handled by any of the handlers.
785 */
786int debug4k_panic_on_exception = 0;
787kern_return_t
788exception_triage(
789 exception_type_t exception,
790 mach_exception_data_t code,
791 mach_msg_type_number_t codeCnt)
792{
793 thread_t thread = current_thread();
794 task_t task = current_task();
795
796 assert(codeCnt > 0);
797
798 if (VM_MAP_PAGE_SIZE(task->map) < PAGE_SIZE) {
799 DEBUG4K_EXC("thread %p task %p map %p exception %d codes 0x%llx 0x%llx\n",
800 thread, task, task->map, exception, code[0], codeCnt > 1 ? code[1] : 0);
801 if (debug4k_panic_on_exception) {
802 panic("DEBUG4K thread %p task %p map %p exception %d codes 0x%llx 0x%llx",
803 thread, task, task->map, exception, code[0], codeCnt > 1 ? code[1] : 0);
804 }
805 }
806
807#if (DEVELOPMENT || DEBUG)
808#ifdef MACH_BSD
809 if (proc_pid(get_bsdtask_info(task)) <= exception_log_max_pid) {
810 record_system_event(SYSTEM_EVENT_TYPE_INFO, SYSTEM_EVENT_SUBSYSTEM_PROCESS, "process exit",
811 "exception_log_max_pid: pid %d (%s): sending exception %d (0x%llx 0x%llx)",
812 proc_pid(get_bsdtask_info(task)), proc_name_address(get_bsdtask_info(task)),
813 exception, code[0], codeCnt > 1 ? code[1] : 0);
814 }
815#endif /* MACH_BSD */
816#endif /* DEVELOPMENT || DEBUG */
817
818#if __has_feature(ptrauth_calls)
819 if (exception & EXC_PTRAUTH_BIT) {
820 exception &= ~EXC_PTRAUTH_BIT;
821 assert(codeCnt == 2);
822 pac_exception_triage(exception, code);
823 }
824#endif /* __has_feature(ptrauth_calls) */
825 return exception_triage_thread(exception, code, codeCnt, thread);
826}
827
828kern_return_t
829bsd_exception(
830 exception_type_t exception,
831 mach_exception_data_t code,
832 mach_msg_type_number_t codeCnt)
833{
834 task_t task;
835 lck_mtx_t *mutex;
836 thread_t self = current_thread();
837 kern_return_t kr;
838
839 /*
840 * Maybe the task level will handle it.
841 */
842 task = current_task();
843 mutex = &task->itk_lock_data;
844
845 kr = exception_deliver(thread: self, exception, code, codeCnt, excp: task->exc_actions, mutex);
846
847 if (kr == KERN_SUCCESS || kr == MACH_RCV_PORT_DIED) {
848 return KERN_SUCCESS;
849 }
850 return KERN_FAILURE;
851}
852
853
854/*
855 * Raise an exception on a task.
856 * This should tell launchd to launch Crash Reporter for this task.
857 * If the exception is fatal, we should be careful about sending a synchronous exception
858 */
859kern_return_t
860task_exception_notify(exception_type_t exception,
861 mach_exception_data_type_t exccode, mach_exception_data_type_t excsubcode, const bool fatal)
862{
863 mach_exception_data_type_t code[EXCEPTION_CODE_MAX];
864 wait_interrupt_t wsave;
865 kern_return_t kr = KERN_SUCCESS;
866
867 /*
868 * If we are not in dev mode, nobody should be allowed to synchronously handle
869 * a fatal EXC_GUARD - they might stall on it indefinitely
870 */
871 if (fatal && !developer_mode_state() && exception == EXC_GUARD) {
872 return KERN_DENIED;
873 }
874
875 code[0] = exccode;
876 code[1] = excsubcode;
877
878 wsave = thread_interrupt_level(THREAD_UNINT);
879 kr = exception_triage(exception, code, EXCEPTION_CODE_MAX);
880 (void) thread_interrupt_level(interruptible: wsave);
881 return kr;
882}
883
884
885/*
886 * Handle interface for special performance monitoring
887 * This is a special case of the host exception handler
888 */
889kern_return_t
890sys_perf_notify(thread_t thread, int pid)
891{
892 host_priv_t hostp;
893 ipc_port_t xport;
894 wait_interrupt_t wsave;
895 kern_return_t ret;
896
897 hostp = host_priv_self(); /* Get the host privileged ports */
898 mach_exception_data_type_t code[EXCEPTION_CODE_MAX];
899 code[0] = 0xFF000001; /* Set terminate code */
900 code[1] = pid; /* Pass out the pid */
901
902 lck_mtx_lock(lck: &hostp->lock);
903 xport = hostp->exc_actions[EXC_RPC_ALERT].port;
904
905 /* Make sure we're not catching our own exception */
906 if (!IP_VALID(xport) ||
907 !ip_active(xport) ||
908 ip_in_space_noauth(port: xport, space: get_threadtask(thread)->itk_space)) {
909 lck_mtx_unlock(lck: &hostp->lock);
910 return KERN_FAILURE;
911 }
912
913 lck_mtx_unlock(lck: &hostp->lock);
914
915 wsave = thread_interrupt_level(THREAD_UNINT);
916 ret = exception_deliver(
917 thread,
918 EXC_RPC_ALERT,
919 code,
920 codeCnt: 2,
921 excp: hostp->exc_actions,
922 mutex: &hostp->lock);
923 (void)thread_interrupt_level(interruptible: wsave);
924
925 return ret;
926}
927