1/*
2 * Copyright (c) 2012-2013, 2015 Apple Inc. All rights reserved.
3 *
4 * @APPLE_OSREFERENCE_LICENSE_HEADER_START@
5 *
6 * This file contains Original Code and/or Modifications of Original Code
7 * as defined in and that are subject to the Apple Public Source License
8 * Version 2.0 (the 'License'). You may not use this file except in
9 * compliance with the License. The rights granted to you under the License
10 * may not be used to create, or enable the creation or redistribution of,
11 * unlawful or unlicensed copies of an Apple operating system, or to
12 * circumvent, violate, or enable the circumvention or violation of, any
13 * terms of an Apple operating system software license agreement.
14 *
15 * Please obtain a copy of the License at
16 * http://www.opensource.apple.com/apsl/ and read it before using this file.
17 *
18 * The Original Code and all software distributed under the License are
19 * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER
20 * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES,
21 * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY,
22 * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT.
23 * Please see the License for the specific language governing rights and
24 * limitations under the License.
25 *
26 * @APPLE_OSREFERENCE_LICENSE_HEADER_END@
27 */
28
29
30/*
31 * Corpses Overview
32 * ================
33 *
34 * A corpse is a state of process that is past the point of its death. This means that process has
35 * completed all its termination operations like releasing file descriptors, mach ports, sockets and
36 * other constructs used to identify a process. For all the processes this mimics the behavior as if
37 * the process has died and no longer available by any means.
38 *
39 * Why do we need Corpses?
40 * -----------------------
41 * For crash inspection we need to inspect the state and data that is associated with process so that
42 * crash reporting infrastructure can build backtraces, find leaks etc. For example a crash
43 *
44 * Corpses functionality in kernel
45 * ===============================
46 * The corpse functionality is an extension of existing exception reporting mechanisms we have. The
47 * exception_triage calls will try to deliver the first round of exceptions allowing
48 * task/debugger/ReportCrash/launchd level exception handlers to respond to exception. If even after
49 * notification the exception is not handled, then the process begins the death operations and during
50 * proc_prepareexit, we decide to create a corpse for inspection. Following is a sample run through
51 * of events and data shuffling that happens when corpses is enabled.
52 *
53 * * a process causes an exception during normal execution of threads.
54 * * The exception generated by either mach(e.g GUARDED_MARCHPORT) or bsd(eg SIGABORT, GUARDED_FD
55 * etc) side is passed through the exception_triage() function to follow the thread -> task -> host
56 * level exception handling system. This set of steps are same as before and allow for existing
57 * crash reporting systems (both internal and 3rd party) to catch and create reports as required.
58 * * If above exception handling returns failed (when nobody handles the notification), then the
59 * proc_prepareexit path has logic to decide to create corpse.
60 * * The task_mark_corpse function allocates userspace vm memory and attaches the information
61 * kcdata_descriptor_t to task->corpse_info field of task.
62 * - All the task's threads are marked with the "inspection" flag which signals the termination
63 * daemon to not reap them but hold until they are being inspected.
64 * - task flags t_flags reflect the corpse bit and also a PENDING_CORPSE bit. PENDING_CORPSE
65 * prevents task_terminate from stripping important data from task.
66 * - It marks all the threads to terminate and return to AST for termination.
67 * - The allocation logic takes into account the rate limiting policy of allowing only
68 * TOTAL_CORPSES_ALLOWED in flight.
69 * * The proc exit threads continues and collects required information in the allocated vm region.
70 * Once complete it marks itself for termination.
71 * * In the thread_terminate_self(), the last thread to enter will do a call to proc_exit().
72 * Following this is a check to see if task is marked for corpse notification and will
73 * invoke the the task_deliver_crash_notification().
74 * * Once EXC_CORPSE_NOTIFY is delivered, it removes the PENDING_CORPSE flag from task (and
75 * inspection flag from all its threads) and allows task_terminate to go ahead and continue
76 * the mach task termination process.
77 * * ASIDE: The rest of the threads that are reaching the thread_terminate_daemon() with the
78 * inspection flag set are just bounced to another holding queue (crashed_threads_queue).
79 * Only after the corpse notification these are pulled out from holding queue and enqueued
80 * back to termination queue
81 *
82 *
83 * Corpse info format
84 * ==================
85 * The kernel (task_mark_corpse()) makes a vm allocation in the dead task's vm space (with tag
86 * VM_MEMORY_CORPSEINFO (80)). Within this memory all corpse information is saved by various
87 * subsystems like
88 * * bsd proc exit path may write down pid, parent pid, number of file descriptors etc
89 * * mach side may append data regarding ledger usage, memory stats etc
90 * See detailed info about the memory structure and format in kern_cdata.h documentation.
91 *
92 * Configuring Corpses functionality
93 * =================================
94 * boot-arg: -no_corpses disables the corpse generation. This can be added/removed without affecting
95 * any other subsystem.
96 * TOTAL_CORPSES_ALLOWED : (recompilation required) - Changing this number allows for controlling
97 * the number of corpse instances to be held for inspection before allowing memory to be reclaimed
98 * by system.
99 * CORPSEINFO_ALLOCATION_SIZE: is the default size of vm allocation. If in future there is much more
100 * data to be put in, then please re-tune this parameter.
101 *
102 * Debugging/Visibility
103 * ====================
104 * * lldbmacros for thread and task summary are updated to show "C" flag for corpse task/threads.
105 * * there are macros to see list of threads in termination queue (dumpthread_terminate_queue)
106 * and holding queue (dumpcrashed_thread_queue).
107 * * In case of corpse creation is disabled of ignored then the system log is updated with
108 * printf data with reason.
109 *
110 * Limitations of Corpses
111 * ======================
112 * With holding off memory for inspection, it creates vm pressure which might not be desirable
113 * on low memory devices. There are limits to max corpses being inspected at a time which is
114 * marked by TOTAL_CORPSES_ALLOWED.
115 *
116 */
117
118
119#include <stdatomic.h>
120#include <kern/assert.h>
121#include <mach/mach_types.h>
122#include <mach/boolean.h>
123#include <mach/vm_param.h>
124#include <mach/task.h>
125#include <mach/thread_act.h>
126#include <mach/host_priv.h>
127#include <kern/host.h>
128#include <kern/kern_types.h>
129#include <kern/mach_param.h>
130#include <kern/policy_internal.h>
131#include <kern/thread.h>
132#include <kern/task.h>
133#include <corpses/task_corpse.h>
134#include <kern/kalloc.h>
135#include <kern/kern_cdata.h>
136#include <mach/mach_vm.h>
137#include <kern/exc_guard.h>
138#include <os/log.h>
139#include <sys/kdebug_triage.h>
140
141#if CONFIG_MACF
142#include <security/mac_mach_internal.h>
143#endif
144
145/*
146 * Exported interfaces
147 */
148#include <mach/task_server.h>
149
150union corpse_creation_gate {
151 struct {
152 uint16_t user_faults;
153 uint16_t corpses;
154 };
155 uint32_t value;
156};
157
158static _Atomic uint32_t inflight_corpses;
159unsigned long total_corpses_created = 0;
160
161static TUNABLE(bool, corpses_disabled, "-no_corpses", false);
162
163#if !XNU_TARGET_OS_OSX
164/* Use lightweight corpse on embedded */
165static TUNABLE(bool, lw_corpses_enabled, "lw_corpses", true);
166#else
167static TUNABLE(bool, lw_corpses_enabled, "lw_corpses", false);
168#endif
169
170#if DEBUG || DEVELOPMENT
171/* bootarg to generate corpse with size up to max_footprint_mb */
172TUNABLE(bool, corpse_threshold_system_limit, "corpse_threshold_system_limit", false);
173#endif /* DEBUG || DEVELOPMENT */
174
175/* bootarg to turn on corpse forking for EXC_RESOURCE */
176TUNABLE(bool, exc_via_corpse_forking, "exc_via_corpse_forking", true);
177
178/* bootarg to generate corpse for fatal high memory watermark violation */
179TUNABLE(bool, corpse_for_fatal_memkill, "corpse_for_fatal_memkill", true);
180
181extern int IS_64BIT_PROCESS(void *);
182extern void gather_populate_corpse_crashinfo(void *p, task_t task,
183 mach_exception_data_type_t code, mach_exception_data_type_t subcode,
184 uint64_t *udata_buffer, int num_udata, void *reason, exception_type_t etype);
185extern void *proc_find(int pid);
186extern int proc_rele(void *p);
187extern task_t proc_get_task_raw(void *proc);
188extern char *proc_best_name(struct proc *proc);
189
190
191/*
192 * Routine: corpses_enabled
193 * returns FALSE if not enabled
194 */
195boolean_t
196corpses_enabled(void)
197{
198 return !corpses_disabled;
199}
200
201unsigned long
202total_corpses_count(void)
203{
204 union corpse_creation_gate gate;
205
206 gate.value = atomic_load_explicit(&inflight_corpses, memory_order_relaxed);
207 return gate.corpses;
208}
209
210extern char *proc_best_name(struct proc *);
211extern int proc_pid(struct proc *);
212
213/*
214 * Routine: task_crashinfo_get_ref()
215 * Grab a slot at creating a corpse.
216 * Returns: KERN_SUCCESS if the policy allows for creating a corpse.
217 */
218static kern_return_t
219task_crashinfo_get_ref(corpse_flags_t kcd_u_flags)
220{
221 union corpse_creation_gate oldgate, newgate;
222 struct proc *p = (void *)current_proc();
223
224 assert(kcd_u_flags & CORPSE_CRASHINFO_HAS_REF);
225
226 oldgate.value = atomic_load_explicit(&inflight_corpses, memory_order_relaxed);
227 for (;;) {
228 newgate = oldgate;
229 if (kcd_u_flags & CORPSE_CRASHINFO_USER_FAULT) {
230 if (newgate.user_faults++ >= TOTAL_USER_FAULTS_ALLOWED) {
231 os_log(OS_LOG_DEFAULT, "%s[%d] Corpse failure, too many faults %d\n",
232 proc_best_name(p), proc_pid(p), newgate.user_faults);
233 return KERN_RESOURCE_SHORTAGE;
234 }
235 }
236 if (newgate.corpses++ >= TOTAL_CORPSES_ALLOWED) {
237 os_log(OS_LOG_DEFAULT, "%s[%d] Corpse failure, too many %d\n",
238 proc_best_name(p), proc_pid(p), newgate.corpses);
239 return KERN_RESOURCE_SHORTAGE;
240 }
241
242 // this reloads the value in oldgate
243 if (atomic_compare_exchange_strong_explicit(&inflight_corpses,
244 &oldgate.value, newgate.value, memory_order_relaxed,
245 memory_order_relaxed)) {
246 os_log(OS_LOG_DEFAULT, "%s[%d] Corpse allowed %d of %d\n",
247 proc_best_name(p), proc_pid(p), newgate.corpses, TOTAL_CORPSES_ALLOWED);
248 return KERN_SUCCESS;
249 }
250 }
251}
252
253/*
254 * Routine: task_crashinfo_release_ref
255 * release the slot for corpse being used.
256 */
257static kern_return_t
258task_crashinfo_release_ref(corpse_flags_t kcd_u_flags)
259{
260 union corpse_creation_gate oldgate, newgate;
261
262 assert(kcd_u_flags & CORPSE_CRASHINFO_HAS_REF);
263
264 oldgate.value = atomic_load_explicit(&inflight_corpses, memory_order_relaxed);
265 for (;;) {
266 newgate = oldgate;
267 if (kcd_u_flags & CORPSE_CRASHINFO_USER_FAULT) {
268 if (newgate.user_faults-- == 0) {
269 panic("corpse in flight count over-release");
270 }
271 }
272 if (newgate.corpses-- == 0) {
273 panic("corpse in flight count over-release");
274 }
275 // this reloads the value in oldgate
276 if (atomic_compare_exchange_strong_explicit(&inflight_corpses,
277 &oldgate.value, newgate.value, memory_order_relaxed,
278 memory_order_relaxed)) {
279 os_log(OS_LOG_DEFAULT, "Corpse released, count at %d\n", newgate.corpses);
280 return KERN_SUCCESS;
281 }
282 }
283}
284
285
286kcdata_descriptor_t
287task_crashinfo_alloc_init(mach_vm_address_t crash_data_p, unsigned size,
288 corpse_flags_t kc_u_flags, unsigned kc_flags)
289{
290 kcdata_descriptor_t kcdata;
291
292 if (kc_u_flags & CORPSE_CRASHINFO_HAS_REF) {
293 if (KERN_SUCCESS != task_crashinfo_get_ref(kcd_u_flags: kc_u_flags)) {
294 return NULL;
295 }
296 }
297
298 kcdata = kcdata_memory_alloc_init(crash_data_p, TASK_CRASHINFO_BEGIN, size,
299 flags: kc_flags);
300 if (kcdata) {
301 kcdata->kcd_user_flags = kc_u_flags;
302 } else if (kc_u_flags & CORPSE_CRASHINFO_HAS_REF) {
303 task_crashinfo_release_ref(kcd_u_flags: kc_u_flags);
304 }
305 return kcdata;
306}
307
308kcdata_descriptor_t
309task_btinfo_alloc_init(mach_vm_address_t addr, unsigned size)
310{
311 kcdata_descriptor_t kcdata;
312
313 kcdata = kcdata_memory_alloc_init(crash_data_p: addr, TASK_BTINFO_BEGIN, size, KCFLAG_USE_MEMCOPY);
314
315 return kcdata;
316}
317
318
319/*
320 * Free up the memory associated with task_crashinfo_data
321 */
322kern_return_t
323task_crashinfo_destroy(kcdata_descriptor_t data)
324{
325 if (!data) {
326 return KERN_INVALID_ARGUMENT;
327 }
328 if (data->kcd_user_flags & CORPSE_CRASHINFO_HAS_REF) {
329 task_crashinfo_release_ref(kcd_u_flags: data->kcd_user_flags);
330 }
331 return kcdata_memory_destroy(data);
332}
333
334/*
335 * Routine: task_get_corpseinfo
336 * params: task - task which has corpse info setup.
337 * returns: crash info data attached to task.
338 * NULL if task is null or has no corpse info
339 */
340kcdata_descriptor_t
341task_get_corpseinfo(task_t task)
342{
343 kcdata_descriptor_t retval = NULL;
344 if (task != NULL) {
345 retval = task->corpse_info;
346 }
347 return retval;
348}
349
350/*
351 * Routine: task_add_to_corpse_task_list
352 * params: task - task to be added to corpse task list
353 * returns: None.
354 */
355void
356task_add_to_corpse_task_list(task_t corpse_task)
357{
358 lck_mtx_lock(lck: &tasks_corpse_lock);
359 queue_enter(&corpse_tasks, corpse_task, task_t, corpse_tasks);
360 lck_mtx_unlock(lck: &tasks_corpse_lock);
361}
362
363/*
364 * Routine: task_remove_from_corpse_task_list
365 * params: task - task to be removed from corpse task list
366 * returns: None.
367 */
368void
369task_remove_from_corpse_task_list(task_t corpse_task)
370{
371 lck_mtx_lock(lck: &tasks_corpse_lock);
372 queue_remove(&corpse_tasks, corpse_task, task_t, corpse_tasks);
373 lck_mtx_unlock(lck: &tasks_corpse_lock);
374}
375
376/*
377 * Routine: task_purge_all_corpses
378 * params: None.
379 * returns: None.
380 */
381void
382task_purge_all_corpses(void)
383{
384 task_t task;
385
386 lck_mtx_lock(lck: &tasks_corpse_lock);
387 /* Iterate through all the corpse tasks and clear all map entries */
388 queue_iterate(&corpse_tasks, task, task_t, corpse_tasks) {
389 os_log(OS_LOG_DEFAULT, "Memory pressure corpse purge for pid %d.\n", task_pid(task));
390 vm_map_terminate(map: task->map);
391 }
392 lck_mtx_unlock(lck: &tasks_corpse_lock);
393}
394
395/*
396 * Routine: find_corpse_task_by_uniqueid_grp
397 * params: task_uniqueid - uniqueid of the corpse
398 * target - target task [Out Param]
399 * grp - task reference group
400 * returns:
401 * KERN_SUCCESS if a matching corpse if found, gives a ref.
402 * KERN_FAILURE corpse with given uniqueid is not found.
403 */
404kern_return_t
405find_corpse_task_by_uniqueid_grp(
406 uint64_t task_uniqueid,
407 task_t *target,
408 task_grp_t grp)
409{
410 task_t task;
411
412 lck_mtx_lock(lck: &tasks_corpse_lock);
413
414 queue_iterate(&corpse_tasks, task, task_t, corpse_tasks) {
415 if (task->task_uniqueid == task_uniqueid) {
416 task_reference_grp(task, grp);
417 lck_mtx_unlock(lck: &tasks_corpse_lock);
418 *target = task;
419 return KERN_SUCCESS;
420 }
421 }
422
423 lck_mtx_unlock(lck: &tasks_corpse_lock);
424 return KERN_FAILURE;
425}
426
427/*
428 * Routine: task_generate_corpse
429 * params: task - task to fork a corpse
430 * corpse_task - task port of the generated corpse
431 * returns: KERN_SUCCESS on Success.
432 * KERN_FAILURE on Failure.
433 * KERN_NOT_SUPPORTED on corpse disabled.
434 * KERN_RESOURCE_SHORTAGE on memory alloc failure or reaching max corpse.
435 */
436kern_return_t
437task_generate_corpse(
438 task_t task,
439 ipc_port_t *corpse_task_port)
440{
441 task_t new_task;
442 kern_return_t kr;
443 thread_t thread, th_iter;
444 ipc_port_t corpse_port;
445
446 if (task == kernel_task || task == TASK_NULL) {
447 return KERN_INVALID_ARGUMENT;
448 }
449
450 task_lock(task);
451 if (task_is_a_corpse_fork(task)) {
452 task_unlock(task);
453 return KERN_INVALID_ARGUMENT;
454 }
455 task_unlock(task);
456
457 thread_set_exec_promotion(thread: current_thread());
458 /* Generate a corpse for the given task, will return with a ref on corpse task */
459 kr = task_generate_corpse_internal(task, corpse_task: &new_task, thread: &thread, etype: 0, code: 0, subcode: 0, NULL);
460 thread_clear_exec_promotion(thread: current_thread());
461 if (kr != KERN_SUCCESS) {
462 return kr;
463 }
464 if (thread != THREAD_NULL) {
465 thread_deallocate(thread);
466 }
467
468 /* wait for all the threads in the task to terminate */
469 task_lock(new_task);
470 task_wait_till_threads_terminate_locked(task: new_task);
471
472 /* Reset thread ports of all the threads in task */
473 queue_iterate(&new_task->threads, th_iter, thread_t, task_threads)
474 {
475 /* Do not reset the thread port for inactive threads */
476 if (th_iter->corpse_dup == FALSE) {
477 ipc_thread_reset(thread: th_iter);
478 }
479 }
480 task_unlock(new_task);
481
482 /* transfer the task ref to port and arm the no-senders notification */
483 corpse_port = convert_corpse_to_port_and_nsrequest(task: new_task);
484 assert(IP_NULL != corpse_port);
485
486 *corpse_task_port = corpse_port;
487 return KERN_SUCCESS;
488}
489
490/*
491 * Only generate lightweight corpse if any of thread, task, or host level registers
492 * EXC_CORPSE_NOTIFY with behavior EXCEPTION_BACKTRACE.
493 *
494 * Save a send right and behavior of those ports on out param EXC_PORTS.
495 */
496static boolean_t
497task_should_generate_lightweight_corpse(
498 task_t task,
499 ipc_port_t exc_ports[static BT_EXC_PORTS_COUNT])
500{
501 kern_return_t kr;
502 boolean_t should_generate = FALSE;
503
504 exception_mask_t mask;
505 mach_msg_type_number_t nmasks;
506 exception_port_t exc_port = IP_NULL;
507 exception_behavior_t behavior;
508 thread_state_flavor_t flavor;
509
510 if (task != current_task()) {
511 return FALSE;
512 }
513
514 if (!lw_corpses_enabled) {
515 return FALSE;
516 }
517
518 for (unsigned int i = 0; i < BT_EXC_PORTS_COUNT; i++) {
519 nmasks = 1;
520
521 /* thread, task, and host level, in this order */
522 if (i == 0) {
523 kr = thread_get_exception_ports(thread: current_thread(), EXC_MASK_CORPSE_NOTIFY,
524 masks: &mask, CountCnt: &nmasks, ports: &exc_port, behaviors: &behavior, flavors: &flavor);
525 } else if (i == 1) {
526 kr = task_get_exception_ports(task: current_task(), EXC_MASK_CORPSE_NOTIFY,
527 masks: &mask, CountCnt: &nmasks, ports: &exc_port, behaviors: &behavior, flavors: &flavor);
528 } else {
529 kr = host_get_exception_ports(host_priv: host_priv_self(), EXC_MASK_CORPSE_NOTIFY,
530 masks: &mask, masksCnt: &nmasks, old_handlers: &exc_port, old_behaviors: &behavior, old_flavors: &flavor);
531 }
532
533 if (kr != KERN_SUCCESS || nmasks == 0) {
534 exc_port = IP_NULL;
535 }
536
537 /* thread level can return KERN_SUCCESS && nmasks 0 */
538 assert(nmasks == 1 || i == 0);
539
540 if (IP_VALID(exc_port) && (behavior & MACH_EXCEPTION_BACKTRACE_PREFERRED)) {
541 assert(behavior & MACH_EXCEPTION_CODES);
542 exc_ports[i] = exc_port; /* transfers right to array */
543 exc_port = NULL;
544 should_generate = TRUE;
545 } else {
546 exc_ports[i] = IP_NULL;
547 }
548
549 ipc_port_release_send(port: exc_port);
550 }
551
552 return should_generate;
553}
554
555/*
556 * Routine: task_enqueue_exception_with_corpse
557 * params: task - task to generate a corpse and enqueue it
558 * etype - EXC_RESOURCE or EXC_GUARD
559 * code - exception code to be enqueued
560 * codeCnt - code array count - code and subcode
561 *
562 * returns: KERN_SUCCESS on Success.
563 * KERN_FAILURE on Failure.
564 * KERN_INVALID_ARGUMENT on invalid arguments passed.
565 * KERN_NOT_SUPPORTED on corpse disabled.
566 * KERN_RESOURCE_SHORTAGE on memory alloc failure or reaching max corpse.
567 */
568kern_return_t
569task_enqueue_exception_with_corpse(
570 task_t task,
571 exception_type_t etype,
572 mach_exception_data_t code,
573 mach_msg_type_number_t codeCnt,
574 void *reason,
575 boolean_t lightweight)
576{
577 kern_return_t kr;
578 ipc_port_t exc_ports[BT_EXC_PORTS_COUNT]; /* send rights in thread, task, host order */
579 const char *procname = proc_best_name(get_bsdtask_info(task));
580
581 if (codeCnt < 2) {
582 return KERN_INVALID_ARGUMENT;
583 }
584
585 if (lightweight && task_should_generate_lightweight_corpse(task, exc_ports)) {
586 /* port rights captured in exc_ports */
587 kcdata_descriptor_t desc = NULL;
588 kcdata_object_t obj = KCDATA_OBJECT_NULL;
589 bool lw_corpse_enqueued = false;
590
591 assert(task == current_task());
592 assert(etype == EXC_GUARD);
593
594 kr = kcdata_object_throttle_get(flags: KCDATA_OBJECT_TYPE_LW_CORPSE);
595 if (kr != KERN_SUCCESS) {
596 goto out;
597 }
598
599 kr = current_thread_collect_backtrace_info(new_desc: &desc, etype, code, codeCnt, reason);
600 if (kr != KERN_SUCCESS) {
601 kcdata_object_throttle_release(flags: KCDATA_OBJECT_TYPE_LW_CORPSE);
602 goto out;
603 }
604
605 kr = kcdata_create_object(data: desc, flags: KCDATA_OBJECT_TYPE_LW_CORPSE, BTINFO_ALLOCATION_SIZE, objp: &obj);
606 assert(kr == KERN_SUCCESS);
607 /* desc ref and throttle slot captured in obj ref */
608
609 thread_backtrace_enqueue(obj, ports: exc_ports, etype);
610 os_log(OS_LOG_DEFAULT, "Lightweight corpse enqueued for %s\n", procname);
611 /* obj ref and exc_ports send rights consumed */
612 lw_corpse_enqueued = true;
613
614out:
615 if (!lw_corpse_enqueued) {
616 for (unsigned int i = 0; i < BT_EXC_PORTS_COUNT; i++) {
617 ipc_port_release_send(port: exc_ports[i]);
618 }
619 }
620 } else {
621 task_t corpse = TASK_NULL;
622 thread_t thread = THREAD_NULL;
623
624 thread_set_exec_promotion(thread: current_thread());
625 /* Generate a corpse for the given task, will return with a ref on corpse task */
626 kr = task_generate_corpse_internal(task, corpse_task: &corpse, thread: &thread, etype,
627 code: code[0], subcode: code[1], reason);
628 thread_clear_exec_promotion(thread: current_thread());
629 if (kr == KERN_SUCCESS) {
630 if (thread == THREAD_NULL) {
631 return KERN_FAILURE;
632 }
633 assert(corpse != TASK_NULL);
634 assert(etype == EXC_RESOURCE || etype == EXC_GUARD);
635 thread_exception_enqueue(task: corpse, thread, etype);
636 os_log(OS_LOG_DEFAULT, "Full corpse enqueued for %s\n", procname);
637 }
638 }
639
640 return kr;
641}
642
643/*
644 * Routine: task_generate_corpse_internal
645 * params: task - task to fork a corpse
646 * corpse_task - task of the generated corpse
647 * exc_thread - equivalent thread in corpse enqueuing exception
648 * etype - EXC_RESOURCE or EXC_GUARD or 0
649 * code - mach exception code to be passed in corpse blob
650 * subcode - mach exception subcode to be passed in corpse blob
651 * returns: KERN_SUCCESS on Success.
652 * KERN_FAILURE on Failure.
653 * KERN_NOT_SUPPORTED on corpse disabled.
654 * KERN_RESOURCE_SHORTAGE on memory alloc failure or reaching max corpse.
655 */
656kern_return_t
657task_generate_corpse_internal(
658 task_t task,
659 task_t *corpse_task,
660 thread_t *exc_thread,
661 exception_type_t etype,
662 mach_exception_data_type_t code,
663 mach_exception_data_type_t subcode,
664 void *reason)
665{
666 task_t new_task = TASK_NULL;
667 thread_t thread = THREAD_NULL;
668 thread_t thread_next = THREAD_NULL;
669 kern_return_t kr;
670 struct proc *p = NULL;
671 int is_64bit_addr;
672 int is_64bit_data;
673 uint32_t t_flags;
674 uint32_t t_flags_ro;
675 uint64_t *udata_buffer = NULL;
676 int size = 0;
677 int num_udata = 0;
678 corpse_flags_t kc_u_flags = CORPSE_CRASHINFO_HAS_REF;
679 void *corpse_proc = NULL;
680 thread_t self = current_thread();
681
682#if CONFIG_MACF
683 struct label *label = NULL;
684#endif
685
686 if (!corpses_enabled()) {
687 ktriage_record(thread_id: thread_tid(thread: self), KDBG_TRIAGE_EVENTID(KDBG_TRIAGE_SUBSYS_CORPSE, KDBG_TRIAGE_RESERVED, KDBG_TRIAGE_CORPSES_DISABLED), arg: 0 /* arg */);
688 return KERN_NOT_SUPPORTED;
689 }
690
691 if (task_corpse_forking_disabled(task)) {
692 os_log(OS_LOG_DEFAULT, "corpse for pid %d disabled via SPI\n", task_pid(task));
693 ktriage_record(thread_id: thread_tid(thread: self), KDBG_TRIAGE_EVENTID(KDBG_TRIAGE_SUBSYS_CORPSE, KDBG_TRIAGE_RESERVED, KDBG_TRIAGE_CORPSE_DISABLED_FOR_PROC), arg: 0 /* arg */);
694 return KERN_FAILURE;
695 }
696
697 if (etype == EXC_GUARD && EXC_GUARD_DECODE_GUARD_TYPE(code) == GUARD_TYPE_USER) {
698 kc_u_flags |= CORPSE_CRASHINFO_USER_FAULT;
699 }
700
701 kr = task_crashinfo_get_ref(kcd_u_flags: kc_u_flags);
702 if (kr != KERN_SUCCESS) {
703 return kr;
704 }
705
706 /* Having a task reference does not guarantee a proc reference */
707 p = proc_find(pid: task_pid(task));
708 if (p == NULL) {
709 kr = KERN_INVALID_TASK;
710 goto error_task_generate_corpse;
711 }
712
713 is_64bit_addr = IS_64BIT_PROCESS(p);
714 is_64bit_data = (task == TASK_NULL) ? is_64bit_addr : task_get_64bit_data(task);
715 t_flags = TF_CORPSE_FORK |
716 TF_PENDING_CORPSE |
717 (is_64bit_addr ? TF_64B_ADDR : TF_NONE) |
718 (is_64bit_data ? TF_64B_DATA : TF_NONE);
719 t_flags_ro = TFRO_CORPSE;
720
721#if CONFIG_MACF
722 /* Create the corpse label credentials from the process. */
723 label = mac_exc_create_label_for_proc(proc: p);
724#endif
725
726 corpse_proc = zalloc_flags(proc_task_zone, Z_WAITOK | Z_ZERO);
727 new_task = proc_get_task_raw(proc: corpse_proc);
728
729 /* Create a task for corpse */
730 kr = task_create_internal(parent_task: task,
731 NULL,
732 NULL,
733 TRUE,
734 is_64bit: is_64bit_addr,
735 is_64bit_data,
736 t_flags,
737 t_flags_ro,
738 TPF_NONE,
739 TWF_NONE,
740 child_task: new_task);
741 if (kr != KERN_SUCCESS) {
742 new_task = TASK_NULL;
743 goto error_task_generate_corpse;
744 }
745
746 /* Enable IPC access to the corpse task */
747 ipc_task_enable(task: new_task);
748
749 /* new task is now referenced, do not free the struct in error case */
750 corpse_proc = NULL;
751
752 /* Create and copy threads from task, returns a ref to thread */
753 kr = task_duplicate_map_and_threads(task, p, new_task, thread: &thread,
754 udata_buffer: &udata_buffer, size: &size, num_udata: &num_udata, for_exception: (etype != 0));
755 if (kr != KERN_SUCCESS) {
756 goto error_task_generate_corpse;
757 }
758
759 kr = task_collect_crash_info(task: new_task,
760#if CONFIG_MACF
761 crash_label: label,
762#endif
763 TRUE);
764 if (kr != KERN_SUCCESS) {
765 goto error_task_generate_corpse;
766 }
767
768 /* transfer our references to the corpse info */
769 assert(new_task->corpse_info->kcd_user_flags == 0);
770 new_task->corpse_info->kcd_user_flags = kc_u_flags;
771 kc_u_flags = 0;
772
773 kr = task_start_halt(task: new_task);
774 if (kr != KERN_SUCCESS) {
775 goto error_task_generate_corpse;
776 }
777
778 /* terminate the ipc space */
779 ipc_space_terminate(space: new_task->itk_space);
780
781 /* Populate the corpse blob, use the proc struct of task instead of corpse task */
782 gather_populate_corpse_crashinfo(p, task: new_task,
783 code, subcode, udata_buffer, num_udata, reason, etype);
784
785 /* Add it to global corpse task list */
786 task_add_to_corpse_task_list(corpse_task: new_task);
787
788 *corpse_task = new_task;
789 *exc_thread = thread;
790
791error_task_generate_corpse:
792#if CONFIG_MACF
793 if (label) {
794 mac_exc_free_label(label);
795 }
796#endif
797
798 /* Release the proc reference */
799 if (p != NULL) {
800 proc_rele(p);
801 }
802
803 if (corpse_proc != NULL) {
804 zfree(proc_task_zone, corpse_proc);
805 }
806
807 if (kr != KERN_SUCCESS) {
808 if (thread != THREAD_NULL) {
809 thread_deallocate(thread);
810 }
811 if (new_task != TASK_NULL) {
812 task_lock(new_task);
813 /* Terminate all the other threads in the task. */
814 queue_iterate(&new_task->threads, thread_next, thread_t, task_threads)
815 {
816 thread_terminate_internal(thread: thread_next);
817 }
818 /* wait for all the threads in the task to terminate */
819 task_wait_till_threads_terminate_locked(task: new_task);
820 task_unlock(new_task);
821
822 task_clear_corpse(task: new_task);
823 task_terminate_internal(task: new_task);
824 task_deallocate(new_task);
825 }
826 if (kc_u_flags) {
827 task_crashinfo_release_ref(kcd_u_flags: kc_u_flags);
828 }
829 }
830 /* Free the udata buffer allocated in task_duplicate_map_and_threads */
831 kfree_data(udata_buffer, size);
832
833 return kr;
834}
835
836static kern_return_t
837task_map_kcdata_64(
838 task_t task,
839 void *kcdata_addr,
840 mach_vm_address_t *uaddr,
841 mach_vm_size_t kcd_size,
842 vm_tag_t tag)
843{
844 kern_return_t kr;
845 mach_vm_offset_t udata_ptr;
846
847 kr = mach_vm_allocate_kernel(map: task->map, addr: &udata_ptr, size: (size_t)kcd_size,
848 VM_FLAGS_ANYWHERE, tag);
849 if (kr != KERN_SUCCESS) {
850 return kr;
851 }
852 copyout(kcdata_addr, (user_addr_t)udata_ptr, (size_t)kcd_size);
853 *uaddr = udata_ptr;
854
855 return KERN_SUCCESS;
856}
857
858/*
859 * Routine: task_map_corpse_info
860 * params: task - Map the corpse info in task's address space
861 * corpse_task - task port of the corpse
862 * kcd_addr_begin - address of the mapped corpse info
863 * kcd_addr_begin - size of the mapped corpse info
864 * returns: KERN_SUCCESS on Success.
865 * KERN_FAILURE on Failure.
866 * KERN_INVALID_ARGUMENT on invalid arguments.
867 * Note: Temporary function, will be deleted soon.
868 */
869kern_return_t
870task_map_corpse_info(
871 task_t task,
872 task_t corpse_task,
873 vm_address_t *kcd_addr_begin,
874 uint32_t *kcd_size)
875{
876 kern_return_t kr;
877 mach_vm_address_t kcd_addr_begin_64;
878 mach_vm_size_t size_64;
879
880 kr = task_map_corpse_info_64(task, corspe_task: corpse_task, kcd_addr_begin: &kcd_addr_begin_64, kcd_size: &size_64);
881 if (kr != KERN_SUCCESS) {
882 return kr;
883 }
884
885 *kcd_addr_begin = (vm_address_t)kcd_addr_begin_64;
886 *kcd_size = (uint32_t) size_64;
887 return KERN_SUCCESS;
888}
889
890/*
891 * Routine: task_map_corpse_info_64
892 * params: task - Map the corpse info in task's address space
893 * corpse_task - task port of the corpse
894 * kcd_addr_begin - address of the mapped corpse info (takes mach_vm_addess_t *)
895 * kcd_size - size of the mapped corpse info (takes mach_vm_size_t *)
896 * returns: KERN_SUCCESS on Success.
897 * KERN_FAILURE on Failure.
898 * KERN_INVALID_ARGUMENT on invalid arguments.
899 */
900kern_return_t
901task_map_corpse_info_64(
902 task_t task,
903 task_t corpse_task,
904 mach_vm_address_t *kcd_addr_begin,
905 mach_vm_size_t *kcd_size)
906{
907 kern_return_t kr;
908 mach_vm_offset_t crash_data_ptr = 0;
909 const mach_vm_size_t size = CORPSEINFO_ALLOCATION_SIZE;
910 void *corpse_info_kernel = NULL;
911
912 if (task == TASK_NULL || task_is_a_corpse(task) ||
913 corpse_task == TASK_NULL || !task_is_a_corpse(task: corpse_task)) {
914 return KERN_INVALID_ARGUMENT;
915 }
916
917 corpse_info_kernel = kcdata_memory_get_begin_addr(data: corpse_task->corpse_info);
918 if (corpse_info_kernel == NULL) {
919 return KERN_INVALID_ARGUMENT;
920 }
921
922 kr = task_map_kcdata_64(task, kcdata_addr: corpse_info_kernel, uaddr: &crash_data_ptr, kcd_size: size,
923 VM_MEMORY_CORPSEINFO);
924
925 if (kr == KERN_SUCCESS) {
926 *kcd_addr_begin = crash_data_ptr;
927 *kcd_size = size;
928 }
929
930 return kr;
931}
932
933/*
934 * Routine: task_map_kcdata_object_64
935 * params: task - Map the underlying kcdata in task's address space
936 * kcdata_obj - Object representing the data
937 * kcd_addr_begin - Address of the mapped kcdata
938 * kcd_size - Size of the mapped kcdata
939 * returns: KERN_SUCCESS on Success.
940 * KERN_FAILURE on Failure.
941 * KERN_INVALID_ARGUMENT on invalid arguments.
942 */
943kern_return_t
944task_map_kcdata_object_64(
945 task_t task,
946 kcdata_object_t kcdata_obj,
947 mach_vm_address_t *kcd_addr_begin,
948 mach_vm_size_t *kcd_size)
949{
950 kern_return_t kr;
951 mach_vm_offset_t bt_data_ptr = 0;
952 const mach_vm_size_t size = BTINFO_ALLOCATION_SIZE;
953 void *bt_info_kernel = NULL;
954
955 if (task == TASK_NULL || task_is_a_corpse(task) ||
956 kcdata_obj == KCDATA_OBJECT_NULL) {
957 return KERN_INVALID_ARGUMENT;
958 }
959
960 bt_info_kernel = kcdata_memory_get_begin_addr(data: kcdata_obj->ko_data);
961 if (bt_info_kernel == NULL) {
962 return KERN_INVALID_ARGUMENT;
963 }
964
965 kr = task_map_kcdata_64(task, kcdata_addr: bt_info_kernel, uaddr: &bt_data_ptr, kcd_size: size,
966 VM_MEMORY_BTINFO);
967
968 if (kr == KERN_SUCCESS) {
969 *kcd_addr_begin = bt_data_ptr;
970 *kcd_size = size;
971 }
972
973 return kr;
974}
975
976uint64_t
977task_corpse_get_crashed_thread_id(task_t corpse_task)
978{
979 return corpse_task->crashed_thread_id;
980}
981