1 | /* |
2 | * Copyright (c) 2000-2016 Apple Inc. All rights reserved. |
3 | * |
4 | * @APPLE_OSREFERENCE_LICENSE_HEADER_START@ |
5 | * |
6 | * This file contains Original Code and/or Modifications of Original Code |
7 | * as defined in and that are subject to the Apple Public Source License |
8 | * Version 2.0 (the 'License'). You may not use this file except in |
9 | * compliance with the License. The rights granted to you under the License |
10 | * may not be used to create, or enable the creation or redistribution of, |
11 | * unlawful or unlicensed copies of an Apple operating system, or to |
12 | * circumvent, violate, or enable the circumvention or violation of, any |
13 | * terms of an Apple operating system software license agreement. |
14 | * |
15 | * Please obtain a copy of the License at |
16 | * http://www.opensource.apple.com/apsl/ and read it before using this file. |
17 | * |
18 | * The Original Code and all software distributed under the License are |
19 | * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER |
20 | * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES, |
21 | * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY, |
22 | * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT. |
23 | * Please see the License for the specific language governing rights and |
24 | * limitations under the License. |
25 | * |
26 | * @APPLE_OSREFERENCE_LICENSE_HEADER_END@ |
27 | */ |
28 | /* Copyright (c) 1995, 1997 Apple Computer, Inc. All Rights Reserved */ |
29 | /* |
30 | * Copyright (c) 1982, 1986, 1989, 1991, 1993 |
31 | * The Regents of the University of California. All rights reserved. |
32 | * (c) UNIX System Laboratories, Inc. |
33 | * All or some portions of this file are derived from material licensed |
34 | * to the University of California by American Telephone and Telegraph |
35 | * Co. or Unix System Laboratories, Inc. and are reproduced herein with |
36 | * the permission of UNIX System Laboratories, Inc. |
37 | * |
38 | * Redistribution and use in source and binary forms, with or without |
39 | * modification, are permitted provided that the following conditions |
40 | * are met: |
41 | * 1. Redistributions of source code must retain the above copyright |
42 | * notice, this list of conditions and the following disclaimer. |
43 | * 2. Redistributions in binary form must reproduce the above copyright |
44 | * notice, this list of conditions and the following disclaimer in the |
45 | * documentation and/or other materials provided with the distribution. |
46 | * 3. All advertising materials mentioning features or use of this software |
47 | * must display the following acknowledgement: |
48 | * This product includes software developed by the University of |
49 | * California, Berkeley and its contributors. |
50 | * 4. Neither the name of the University nor the names of its contributors |
51 | * may be used to endorse or promote products derived from this software |
52 | * without specific prior written permission. |
53 | * |
54 | * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND |
55 | * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE |
56 | * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE |
57 | * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE |
58 | * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL |
59 | * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS |
60 | * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) |
61 | * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT |
62 | * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY |
63 | * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF |
64 | * SUCH DAMAGE. |
65 | * |
66 | * @(#)kern_exit.c 8.7 (Berkeley) 2/12/94 |
67 | */ |
68 | /* |
69 | * NOTICE: This file was modified by SPARTA, Inc. in 2005 to introduce |
70 | * support for mandatory and extensible security protections. This notice |
71 | * is included in support of clause 2.2 (b) of the Apple Public License, |
72 | * Version 2.0. |
73 | */ |
74 | |
75 | #include <machine/reg.h> |
76 | #include <machine/psl.h> |
77 | #include <stdatomic.h> |
78 | |
79 | #include <sys/param.h> |
80 | #include <sys/systm.h> |
81 | #include <sys/ioctl.h> |
82 | #include <sys/proc_internal.h> |
83 | #include <sys/proc.h> |
84 | #include <sys/kauth.h> |
85 | #include <sys/tty.h> |
86 | #include <sys/time.h> |
87 | #include <sys/resource.h> |
88 | #include <sys/kernel.h> |
89 | #include <sys/wait.h> |
90 | #include <sys/file_internal.h> |
91 | #include <sys/vnode_internal.h> |
92 | #include <sys/syslog.h> |
93 | #include <sys/malloc.h> |
94 | #include <sys/resourcevar.h> |
95 | #include <sys/ptrace.h> |
96 | #include <sys/proc_info.h> |
97 | #include <sys/reason.h> |
98 | #include <sys/_types/_timeval64.h> |
99 | #include <sys/user.h> |
100 | #include <sys/aio_kern.h> |
101 | #include <sys/sysproto.h> |
102 | #include <sys/signalvar.h> |
103 | #include <sys/kdebug.h> |
104 | #include <sys/kdebug_triage.h> |
105 | #include <sys/acct.h> /* acct_process */ |
106 | #include <sys/codesign.h> |
107 | #include <sys/event.h> /* kevent_proc_copy_uptrs */ |
108 | #include <sys/sdt.h> |
109 | #include <sys/bsdtask_info.h> /* bsd_getthreadname */ |
110 | #include <sys/spawn.h> |
111 | #include <sys/ubc.h> |
112 | #include <sys/code_signing.h> |
113 | |
114 | #include <security/audit/audit.h> |
115 | #include <bsm/audit_kevents.h> |
116 | |
117 | #include <mach/mach_types.h> |
118 | #include <mach/task.h> |
119 | #include <mach/thread_act.h> |
120 | |
121 | #include <kern/exc_resource.h> |
122 | #include <kern/kern_types.h> |
123 | #include <kern/kalloc.h> |
124 | #include <kern/task.h> |
125 | #include <corpses/task_corpse.h> |
126 | #include <kern/thread.h> |
127 | #include <kern/thread_call.h> |
128 | #include <kern/sched_prim.h> |
129 | #include <kern/assert.h> |
130 | #include <kern/locks.h> |
131 | #include <kern/policy_internal.h> |
132 | #include <kern/exc_guard.h> |
133 | #include <kern/backtrace.h> |
134 | |
135 | #include <vm/vm_protos.h> |
136 | #include <os/log.h> |
137 | #include <os/system_event_log.h> |
138 | |
139 | #include <pexpert/pexpert.h> |
140 | |
141 | #include <kdp/kdp_dyld.h> |
142 | |
143 | #if SYSV_SHM |
144 | #include <sys/shm_internal.h> /* shmexit */ |
145 | #endif /* SYSV_SHM */ |
146 | #if CONFIG_PERSONAS |
147 | #include <sys/persona.h> |
148 | #endif /* CONFIG_PERSONAS */ |
149 | #if CONFIG_MEMORYSTATUS |
150 | #include <sys/kern_memorystatus.h> |
151 | #endif /* CONFIG_MEMORYSTATUS */ |
152 | #if CONFIG_DTRACE |
153 | /* Do not include dtrace.h, it redefines kmem_[alloc/free] */ |
154 | void dtrace_proc_exit(proc_t p); |
155 | #include <sys/dtrace_ptss.h> |
156 | #endif /* CONFIG_DTRACE */ |
157 | #if CONFIG_MACF |
158 | #include <security/mac_framework.h> |
159 | #include <security/mac_mach_internal.h> |
160 | #include <sys/syscall.h> |
161 | #endif /* CONFIG_MACF */ |
162 | |
163 | #ifdef CONFIG_EXCLAVES |
164 | void |
165 | task_add_conclave_crash_info(task_t task, void *crash_info_ptr); |
166 | #endif /* CONFIG_EXCLAVES */ |
167 | |
168 | #if CONFIG_MEMORYSTATUS |
169 | static void proc_memorystatus_remove(proc_t p); |
170 | #endif /* CONFIG_MEMORYSTATUS */ |
171 | void proc_prepareexit(proc_t p, int rv, boolean_t perf_notify); |
172 | void gather_populate_corpse_crashinfo(proc_t p, task_t corpse_task, |
173 | mach_exception_data_type_t code, mach_exception_data_type_t subcode, |
174 | uint64_t *udata_buffer, int num_udata, void *reason, exception_type_t etype); |
175 | mach_exception_data_type_t proc_encode_exit_exception_code(proc_t p); |
176 | exception_type_t get_exception_from_corpse_crashinfo(kcdata_descriptor_t corpse_info); |
177 | __private_extern__ void munge_user64_rusage(struct rusage *a_rusage_p, struct user64_rusage *a_user_rusage_p); |
178 | __private_extern__ void munge_user32_rusage(struct rusage *a_rusage_p, struct user32_rusage *a_user_rusage_p); |
179 | static void populate_corpse_crashinfo(proc_t p, task_t corpse_task, |
180 | struct rusage_superset *rup, mach_exception_data_type_t code, |
181 | mach_exception_data_type_t subcode, uint64_t *udata_buffer, |
182 | int num_udata, os_reason_t reason, exception_type_t etype); |
183 | static void proc_update_corpse_exception_codes(proc_t p, mach_exception_data_type_t *code, mach_exception_data_type_t *subcode); |
184 | extern int proc_pidpathinfo_internal(proc_t p, uint64_t arg, char *buffer, uint32_t buffersize, int32_t *retval); |
185 | extern void proc_piduniqidentifierinfo(proc_t p, struct proc_uniqidentifierinfo *p_uniqidinfo); |
186 | extern void task_coalition_ids(task_t task, uint64_t ids[COALITION_NUM_TYPES]); |
187 | extern uint64_t (task_t); |
188 | int proc_list_uptrs(void *p, uint64_t *udata_buffer, int size); |
189 | extern uint64_t task_corpse_get_crashed_thread_id(task_t corpse_task); |
190 | |
191 | extern unsigned int exception_log_max_pid; |
192 | |
193 | extern void IOUserServerRecordExitReason(task_t task, os_reason_t reason); |
194 | |
195 | /* |
196 | * Flags for `reap_child_locked`. |
197 | */ |
198 | __options_decl(reap_flags_t, uint32_t, { |
199 | /* |
200 | * Parent is exiting, so the kernel is responsible for reaping children. |
201 | */ |
202 | REAP_DEAD_PARENT = 0x01, |
203 | /* |
204 | * Childr process was re-parented to initproc. |
205 | */ |
206 | REAP_REPARENTED_TO_INIT = 0x02, |
207 | /* |
208 | * `proc_list_lock` is held on entry. |
209 | */ |
210 | REAP_LOCKED = 0x04, |
211 | /* |
212 | * Drop the `proc_list_lock` on return. Note that the `proc_list_lock` will |
213 | * be dropped internally by the function regardless. |
214 | */ |
215 | REAP_DROP_LOCK = 0x08, |
216 | }); |
217 | static void reap_child_locked(proc_t parent, proc_t child, reap_flags_t flags); |
218 | |
219 | static KALLOC_TYPE_DEFINE(zombie_zone, struct rusage_superset, KT_DEFAULT); |
220 | |
221 | /* |
222 | * Things which should have prototypes in headers, but don't |
223 | */ |
224 | void proc_exit(proc_t p); |
225 | int wait1continue(int result); |
226 | int waitidcontinue(int result); |
227 | kern_return_t sys_perf_notify(thread_t thread, int pid); |
228 | kern_return_t task_exception_notify(exception_type_t exception, |
229 | mach_exception_data_type_t code, mach_exception_data_type_t subcode, bool fatal); |
230 | void delay(int); |
231 | |
232 | #if __has_feature(ptrauth_calls) |
233 | int exit_with_pac_exception(proc_t p, exception_type_t exception, mach_exception_code_t code, |
234 | mach_exception_subcode_t subcode); |
235 | #endif /* __has_feature(ptrauth_calls) */ |
236 | |
237 | int exit_with_guard_exception(proc_t p, mach_exception_data_type_t code, |
238 | mach_exception_data_type_t subcode); |
239 | int exit_with_port_space_exception(proc_t p, mach_exception_data_type_t code, |
240 | mach_exception_data_type_t subcode); |
241 | static int exit_with_mach_exception(proc_t p, os_reason_t reason, exception_type_t exception, |
242 | mach_exception_code_t code, mach_exception_subcode_t subcode); |
243 | |
244 | #if CONFIG_EXCLAVES |
245 | int |
246 | exit_with_exclave_exception(proc_t p); |
247 | #endif /* CONFIG_EXCLAVES */ |
248 | |
249 | int |
250 | exit_with_jit_exception(proc_t p); |
251 | |
252 | #if DEVELOPMENT || DEBUG |
253 | static LCK_GRP_DECLARE(proc_exit_lpexit_spin_lock_grp, "proc_exit_lpexit_spin" ); |
254 | static LCK_MTX_DECLARE(proc_exit_lpexit_spin_lock, &proc_exit_lpexit_spin_lock_grp); |
255 | static pid_t proc_exit_lpexit_spin_pid = -1; /* wakeup point */ |
256 | static int proc_exit_lpexit_spin_pos = -1; /* point to block */ |
257 | static int proc_exit_lpexit_spinning = 0; |
258 | enum { |
259 | PELS_POS_START = 0, /* beginning of proc_exit */ |
260 | PELS_POS_PRE_TASK_DETACH, /* before task/proc detach */ |
261 | PELS_POS_POST_TASK_DETACH, /* after task/proc detach */ |
262 | PELS_POS_END, /* end of proc_exit */ |
263 | PELS_NPOS /* # valid values */ |
264 | }; |
265 | |
266 | /* Panic if matching processes (delimited by ',') exit on error. */ |
267 | static TUNABLE_STR(panic_on_eexit_pcomms, 128, "panic_on_error_exit" , "" ); |
268 | |
269 | static int |
270 | proc_exit_lpexit_spin_pid_sysctl SYSCTL_HANDLER_ARGS |
271 | { |
272 | #pragma unused(oidp, arg1, arg2) |
273 | pid_t new_value; |
274 | int changed; |
275 | int error; |
276 | |
277 | if (!PE_parse_boot_argn("enable_proc_exit_lpexit_spin" , NULL, 0)) { |
278 | return ENOENT; |
279 | } |
280 | |
281 | error = sysctl_io_number(req, proc_exit_lpexit_spin_pid, |
282 | sizeof(proc_exit_lpexit_spin_pid), &new_value, &changed); |
283 | if (error == 0 && changed != 0) { |
284 | if (new_value < -1) { |
285 | return EINVAL; |
286 | } |
287 | lck_mtx_lock(&proc_exit_lpexit_spin_lock); |
288 | proc_exit_lpexit_spin_pid = new_value; |
289 | wakeup(&proc_exit_lpexit_spin_pid); |
290 | proc_exit_lpexit_spinning = 0; |
291 | lck_mtx_unlock(&proc_exit_lpexit_spin_lock); |
292 | } |
293 | return error; |
294 | } |
295 | |
296 | static int |
297 | proc_exit_lpexit_spin_pos_sysctl SYSCTL_HANDLER_ARGS |
298 | { |
299 | #pragma unused(oidp, arg1, arg2) |
300 | int new_value; |
301 | int changed; |
302 | int error; |
303 | |
304 | if (!PE_parse_boot_argn("enable_proc_exit_lpexit_spin" , NULL, 0)) { |
305 | return ENOENT; |
306 | } |
307 | |
308 | error = sysctl_io_number(req, proc_exit_lpexit_spin_pos, |
309 | sizeof(proc_exit_lpexit_spin_pos), &new_value, &changed); |
310 | if (error == 0 && changed != 0) { |
311 | if (new_value < -1 || new_value >= PELS_NPOS) { |
312 | return EINVAL; |
313 | } |
314 | lck_mtx_lock(&proc_exit_lpexit_spin_lock); |
315 | proc_exit_lpexit_spin_pos = new_value; |
316 | wakeup(&proc_exit_lpexit_spin_pid); |
317 | proc_exit_lpexit_spinning = 0; |
318 | lck_mtx_unlock(&proc_exit_lpexit_spin_lock); |
319 | } |
320 | return error; |
321 | } |
322 | |
323 | static int |
324 | proc_exit_lpexit_spinning_sysctl SYSCTL_HANDLER_ARGS |
325 | { |
326 | #pragma unused(oidp, arg1, arg2) |
327 | int new_value; |
328 | int changed; |
329 | int error; |
330 | |
331 | if (!PE_parse_boot_argn("enable_proc_exit_lpexit_spin" , NULL, 0)) { |
332 | return ENOENT; |
333 | } |
334 | |
335 | error = sysctl_io_number(req, proc_exit_lpexit_spinning, |
336 | sizeof(proc_exit_lpexit_spinning), &new_value, &changed); |
337 | if (error == 0 && changed != 0) { |
338 | return EINVAL; |
339 | } |
340 | return error; |
341 | } |
342 | |
343 | SYSCTL_PROC(_debug, OID_AUTO, proc_exit_lpexit_spin_pid, |
344 | CTLTYPE_INT | CTLFLAG_RW | CTLFLAG_LOCKED, |
345 | NULL, sizeof(pid_t), |
346 | proc_exit_lpexit_spin_pid_sysctl, "I" , "PID to hold in proc_exit" ); |
347 | |
348 | SYSCTL_PROC(_debug, OID_AUTO, proc_exit_lpexit_spin_pos, |
349 | CTLTYPE_INT | CTLFLAG_RW | CTLFLAG_LOCKED, |
350 | NULL, sizeof(int), |
351 | proc_exit_lpexit_spin_pos_sysctl, "I" , "position to hold in proc_exit" ); |
352 | |
353 | SYSCTL_PROC(_debug, OID_AUTO, proc_exit_lpexit_spinning, |
354 | CTLTYPE_INT | CTLFLAG_RD | CTLFLAG_LOCKED, |
355 | NULL, sizeof(int), |
356 | proc_exit_lpexit_spinning_sysctl, "I" , "is a thread at requested pid/pos" ); |
357 | |
358 | static inline void |
359 | proc_exit_lpexit_check(pid_t pid, int pos) |
360 | { |
361 | if (proc_exit_lpexit_spin_pid == pid) { |
362 | bool slept = false; |
363 | lck_mtx_lock(&proc_exit_lpexit_spin_lock); |
364 | while (proc_exit_lpexit_spin_pid == pid && |
365 | proc_exit_lpexit_spin_pos == pos) { |
366 | if (!slept) { |
367 | os_log(OS_LOG_DEFAULT, |
368 | "proc_exit_lpexit_check: Process[%d] waiting during proc_exit at pos %d as requested" , pid, pos); |
369 | slept = true; |
370 | } |
371 | proc_exit_lpexit_spinning = 1; |
372 | msleep(&proc_exit_lpexit_spin_pid, &proc_exit_lpexit_spin_lock, |
373 | PWAIT, "proc_exit_lpexit_check" , NULL); |
374 | proc_exit_lpexit_spinning = 0; |
375 | } |
376 | lck_mtx_unlock(&proc_exit_lpexit_spin_lock); |
377 | if (slept) { |
378 | os_log(OS_LOG_DEFAULT, |
379 | "proc_exit_lpexit_check: Process[%d] driving on from pos %d" , pid, pos); |
380 | } |
381 | } |
382 | } |
383 | #endif /* DEVELOPMENT || DEBUG */ |
384 | |
385 | /* |
386 | * NOTE: Source and target may *NOT* overlap! |
387 | * XXX Should share code with bsd/dev/ppc/unix_signal.c |
388 | */ |
389 | void |
390 | siginfo_user_to_user32(user_siginfo_t *in, user32_siginfo_t *out) |
391 | { |
392 | out->si_signo = in->si_signo; |
393 | out->si_errno = in->si_errno; |
394 | out->si_code = in->si_code; |
395 | out->si_pid = in->si_pid; |
396 | out->si_uid = in->si_uid; |
397 | out->si_status = in->si_status; |
398 | out->si_addr = CAST_DOWN_EXPLICIT(user32_addr_t, in->si_addr); |
399 | /* following cast works for sival_int because of padding */ |
400 | out->si_value.sival_ptr = CAST_DOWN_EXPLICIT(user32_addr_t, in->si_value.sival_ptr); |
401 | out->si_band = (user32_long_t)in->si_band; /* range reduction */ |
402 | } |
403 | |
404 | void |
405 | siginfo_user_to_user64(user_siginfo_t *in, user64_siginfo_t *out) |
406 | { |
407 | out->si_signo = in->si_signo; |
408 | out->si_errno = in->si_errno; |
409 | out->si_code = in->si_code; |
410 | out->si_pid = in->si_pid; |
411 | out->si_uid = in->si_uid; |
412 | out->si_status = in->si_status; |
413 | out->si_addr = in->si_addr; |
414 | /* following cast works for sival_int because of padding */ |
415 | out->si_value.sival_ptr = in->si_value.sival_ptr; |
416 | out->si_band = in->si_band; /* range reduction */ |
417 | } |
418 | |
419 | static int |
420 | copyoutsiginfo(user_siginfo_t *native, boolean_t is64, user_addr_t uaddr) |
421 | { |
422 | if (is64) { |
423 | user64_siginfo_t sinfo64; |
424 | |
425 | bzero(s: &sinfo64, n: sizeof(sinfo64)); |
426 | siginfo_user_to_user64(in: native, out: &sinfo64); |
427 | return copyout(&sinfo64, uaddr, sizeof(sinfo64)); |
428 | } else { |
429 | user32_siginfo_t sinfo32; |
430 | |
431 | bzero(s: &sinfo32, n: sizeof(sinfo32)); |
432 | siginfo_user_to_user32(in: native, out: &sinfo32); |
433 | return copyout(&sinfo32, uaddr, sizeof(sinfo32)); |
434 | } |
435 | } |
436 | |
437 | void |
438 | gather_populate_corpse_crashinfo(proc_t p, task_t corpse_task, |
439 | mach_exception_data_type_t code, mach_exception_data_type_t subcode, |
440 | uint64_t *udata_buffer, int num_udata, void *reason, exception_type_t etype) |
441 | { |
442 | struct rusage_superset rup; |
443 | |
444 | gather_rusage_info(p, ru: &rup.ri, RUSAGE_INFO_CURRENT); |
445 | rup.ri.ri_phys_footprint = 0; |
446 | populate_corpse_crashinfo(p, corpse_task, rup: &rup, code, subcode, |
447 | udata_buffer, num_udata, reason, etype); |
448 | } |
449 | |
450 | static void |
451 | proc_update_corpse_exception_codes(proc_t p, mach_exception_data_type_t *code, mach_exception_data_type_t *subcode) |
452 | { |
453 | mach_exception_data_type_t code_update = *code; |
454 | mach_exception_data_type_t subcode_update = *subcode; |
455 | if (p->p_exit_reason == OS_REASON_NULL) { |
456 | return; |
457 | } |
458 | |
459 | switch (p->p_exit_reason->osr_namespace) { |
460 | case OS_REASON_JETSAM: |
461 | if (p->p_exit_reason->osr_code == JETSAM_REASON_MEMORY_PERPROCESSLIMIT) { |
462 | /* Update the code with EXC_RESOURCE code for high memory watermark */ |
463 | EXC_RESOURCE_ENCODE_TYPE(code_update, RESOURCE_TYPE_MEMORY); |
464 | EXC_RESOURCE_ENCODE_FLAVOR(code_update, FLAVOR_HIGH_WATERMARK); |
465 | EXC_RESOURCE_HWM_ENCODE_LIMIT(code_update, ((get_task_phys_footprint_limit(proc_task(p))) >> 20)); |
466 | subcode_update = 0; |
467 | break; |
468 | } |
469 | |
470 | break; |
471 | default: |
472 | break; |
473 | } |
474 | |
475 | *code = code_update; |
476 | *subcode = subcode_update; |
477 | return; |
478 | } |
479 | |
480 | mach_exception_data_type_t |
481 | proc_encode_exit_exception_code(proc_t p) |
482 | { |
483 | uint64_t subcode = 0; |
484 | |
485 | if (p->p_exit_reason == OS_REASON_NULL) { |
486 | return 0; |
487 | } |
488 | |
489 | /* Embed first 32 bits of osr_namespace and osr_code in exception code */ |
490 | ENCODE_OSR_NAMESPACE_TO_MACH_EXCEPTION_CODE(subcode, p->p_exit_reason->osr_namespace); |
491 | ENCODE_OSR_CODE_TO_MACH_EXCEPTION_CODE(subcode, p->p_exit_reason->osr_code); |
492 | return (mach_exception_data_type_t)subcode; |
493 | } |
494 | |
495 | static void |
496 | populate_corpse_crashinfo(proc_t p, task_t corpse_task, struct rusage_superset *rup, |
497 | mach_exception_data_type_t code, mach_exception_data_type_t subcode, |
498 | uint64_t *udata_buffer, int num_udata, os_reason_t reason, exception_type_t etype) |
499 | { |
500 | mach_vm_address_t uaddr = 0; |
501 | mach_exception_data_type_t exc_codes[EXCEPTION_CODE_MAX]; |
502 | exc_codes[0] = code; |
503 | exc_codes[1] = subcode; |
504 | cpu_type_t cputype; |
505 | struct proc_uniqidentifierinfo p_uniqidinfo; |
506 | struct proc_workqueueinfo pwqinfo; |
507 | int retval = 0; |
508 | uint64_t crashed_threadid = task_corpse_get_crashed_thread_id(corpse_task); |
509 | boolean_t is_corpse_fork; |
510 | uint32_t csflags; |
511 | unsigned int pflags = 0; |
512 | uint64_t ; |
513 | uint64_t ; |
514 | |
515 | uint64_t ledger_internal; |
516 | uint64_t ledger_internal_compressed; |
517 | uint64_t ledger_iokit_mapped; |
518 | uint64_t ledger_alternate_accounting; |
519 | uint64_t ledger_alternate_accounting_compressed; |
520 | uint64_t ledger_purgeable_nonvolatile; |
521 | uint64_t ledger_purgeable_nonvolatile_compressed; |
522 | uint64_t ledger_page_table; |
523 | uint64_t ; |
524 | uint64_t ; |
525 | uint64_t ledger_network_nonvolatile; |
526 | uint64_t ledger_network_nonvolatile_compressed; |
527 | uint64_t ledger_wired_mem; |
528 | uint64_t ; |
529 | uint64_t ; |
530 | uint64_t ; |
531 | uint64_t ; |
532 | uint64_t ; |
533 | uint64_t ; |
534 | uint64_t ; |
535 | uint64_t ; |
536 | |
537 | void *crash_info_ptr = task_get_corpseinfo(task: corpse_task); |
538 | |
539 | #if CONFIG_MEMORYSTATUS |
540 | int memstat_dirty_flags = 0; |
541 | #endif |
542 | |
543 | if (KERN_SUCCESS == kcdata_get_memory_addr(data: crash_info_ptr, TASK_CRASHINFO_EXCEPTION_CODES, size: sizeof(exc_codes), user_addr: &uaddr)) { |
544 | kcdata_memcpy(data: crash_info_ptr, dst_addr: uaddr, src_addr: exc_codes, size: sizeof(exc_codes)); |
545 | } |
546 | |
547 | if (KERN_SUCCESS == kcdata_get_memory_addr(data: crash_info_ptr, TASK_CRASHINFO_PID, size: sizeof(pid_t), user_addr: &uaddr)) { |
548 | pid_t pid = proc_getpid(p); |
549 | kcdata_memcpy(data: crash_info_ptr, dst_addr: uaddr, src_addr: &pid, size: sizeof(pid)); |
550 | } |
551 | |
552 | if (KERN_SUCCESS == kcdata_get_memory_addr(data: crash_info_ptr, TASK_CRASHINFO_PPID, size: sizeof(p->p_ppid), user_addr: &uaddr)) { |
553 | kcdata_memcpy(data: crash_info_ptr, dst_addr: uaddr, src_addr: &p->p_ppid, size: sizeof(p->p_ppid)); |
554 | } |
555 | |
556 | /* Don't include the crashed thread ID if there's an exit reason that indicates it's irrelevant */ |
557 | if ((p->p_exit_reason == OS_REASON_NULL) || !(p->p_exit_reason->osr_flags & OS_REASON_FLAG_NO_CRASHED_TID)) { |
558 | if (KERN_SUCCESS == kcdata_get_memory_addr(data: crash_info_ptr, TASK_CRASHINFO_CRASHED_THREADID, size: sizeof(uint64_t), user_addr: &uaddr)) { |
559 | kcdata_memcpy(data: crash_info_ptr, dst_addr: uaddr, src_addr: &crashed_threadid, size: sizeof(uint64_t)); |
560 | } |
561 | } |
562 | |
563 | static_assert(sizeof(struct proc_uniqidentifierinfo) == sizeof(struct crashinfo_proc_uniqidentifierinfo)); |
564 | if (KERN_SUCCESS == |
565 | kcdata_get_memory_addr(data: crash_info_ptr, TASK_CRASHINFO_BSDINFOWITHUNIQID, size: sizeof(struct proc_uniqidentifierinfo), user_addr: &uaddr)) { |
566 | proc_piduniqidentifierinfo(p, p_uniqidinfo: &p_uniqidinfo); |
567 | kcdata_memcpy(data: crash_info_ptr, dst_addr: uaddr, src_addr: &p_uniqidinfo, size: sizeof(struct proc_uniqidentifierinfo)); |
568 | } |
569 | |
570 | if (KERN_SUCCESS == kcdata_get_memory_addr(data: crash_info_ptr, TASK_CRASHINFO_RUSAGE_INFO, size: sizeof(rusage_info_current), user_addr: &uaddr)) { |
571 | kcdata_memcpy(data: crash_info_ptr, dst_addr: uaddr, src_addr: &rup->ri, size: sizeof(rusage_info_current)); |
572 | } |
573 | |
574 | csflags = (uint32_t)proc_getcsflags(p); |
575 | if (KERN_SUCCESS == kcdata_get_memory_addr(data: crash_info_ptr, TASK_CRASHINFO_PROC_CSFLAGS, size: sizeof(csflags), user_addr: &uaddr)) { |
576 | kcdata_memcpy(data: crash_info_ptr, dst_addr: uaddr, src_addr: &csflags, size: sizeof(csflags)); |
577 | } |
578 | |
579 | if (KERN_SUCCESS == kcdata_get_memory_addr(data: crash_info_ptr, TASK_CRASHINFO_PROC_NAME, size: sizeof(p->p_comm), user_addr: &uaddr)) { |
580 | kcdata_memcpy(data: crash_info_ptr, dst_addr: uaddr, src_addr: &p->p_comm, size: sizeof(p->p_comm)); |
581 | } |
582 | |
583 | if (KERN_SUCCESS == kcdata_get_memory_addr(data: crash_info_ptr, TASK_CRASHINFO_PROC_STARTTIME, size: sizeof(p->p_start), user_addr: &uaddr)) { |
584 | struct timeval64 t64; |
585 | t64.tv_sec = (int64_t)p->p_start.tv_sec; |
586 | t64.tv_usec = (int64_t)p->p_start.tv_usec; |
587 | kcdata_memcpy(data: crash_info_ptr, dst_addr: uaddr, src_addr: &t64, size: sizeof(t64)); |
588 | } |
589 | |
590 | if (KERN_SUCCESS == kcdata_get_memory_addr(data: crash_info_ptr, TASK_CRASHINFO_USERSTACK, size: sizeof(p->user_stack), user_addr: &uaddr)) { |
591 | kcdata_memcpy(data: crash_info_ptr, dst_addr: uaddr, src_addr: &p->user_stack, size: sizeof(p->user_stack)); |
592 | } |
593 | |
594 | if (KERN_SUCCESS == kcdata_get_memory_addr(data: crash_info_ptr, TASK_CRASHINFO_ARGSLEN, size: sizeof(p->p_argslen), user_addr: &uaddr)) { |
595 | kcdata_memcpy(data: crash_info_ptr, dst_addr: uaddr, src_addr: &p->p_argslen, size: sizeof(p->p_argslen)); |
596 | } |
597 | |
598 | if (KERN_SUCCESS == kcdata_get_memory_addr(data: crash_info_ptr, TASK_CRASHINFO_PROC_ARGC, size: sizeof(p->p_argc), user_addr: &uaddr)) { |
599 | kcdata_memcpy(data: crash_info_ptr, dst_addr: uaddr, src_addr: &p->p_argc, size: sizeof(p->p_argc)); |
600 | } |
601 | |
602 | if (KERN_SUCCESS == kcdata_get_memory_addr(data: crash_info_ptr, TASK_CRASHINFO_PROC_PATH, MAXPATHLEN, user_addr: &uaddr)) { |
603 | char *buf = zalloc_flags(ZV_NAMEI, Z_WAITOK | Z_ZERO); |
604 | proc_pidpathinfo_internal(p, arg: 0, buffer: buf, MAXPATHLEN, retval: &retval); |
605 | kcdata_memcpy(data: crash_info_ptr, dst_addr: uaddr, src_addr: buf, MAXPATHLEN); |
606 | zfree(ZV_NAMEI, buf); |
607 | } |
608 | |
609 | pflags = p->p_flag & (P_LP64 | P_SUGID | P_TRANSLATED); |
610 | if (KERN_SUCCESS == kcdata_get_memory_addr(data: crash_info_ptr, TASK_CRASHINFO_PROC_FLAGS, size: sizeof(pflags), user_addr: &uaddr)) { |
611 | kcdata_memcpy(data: crash_info_ptr, dst_addr: uaddr, src_addr: &pflags, size: sizeof(pflags)); |
612 | } |
613 | |
614 | if (KERN_SUCCESS == kcdata_get_memory_addr(data: crash_info_ptr, TASK_CRASHINFO_UID, size: sizeof(p->p_uid), user_addr: &uaddr)) { |
615 | kcdata_memcpy(data: crash_info_ptr, dst_addr: uaddr, src_addr: &p->p_uid, size: sizeof(p->p_uid)); |
616 | } |
617 | |
618 | if (KERN_SUCCESS == kcdata_get_memory_addr(data: crash_info_ptr, TASK_CRASHINFO_GID, size: sizeof(p->p_gid), user_addr: &uaddr)) { |
619 | kcdata_memcpy(data: crash_info_ptr, dst_addr: uaddr, src_addr: &p->p_gid, size: sizeof(p->p_gid)); |
620 | } |
621 | |
622 | cputype = cpu_type() & ~CPU_ARCH_MASK; |
623 | if (IS_64BIT_PROCESS(p)) { |
624 | cputype |= CPU_ARCH_ABI64; |
625 | } else if (proc_is64bit_data(p)) { |
626 | cputype |= CPU_ARCH_ABI64_32; |
627 | } |
628 | |
629 | if (KERN_SUCCESS == kcdata_get_memory_addr(data: crash_info_ptr, TASK_CRASHINFO_CPUTYPE, size: sizeof(cpu_type_t), user_addr: &uaddr)) { |
630 | kcdata_memcpy(data: crash_info_ptr, dst_addr: uaddr, src_addr: &cputype, size: sizeof(cpu_type_t)); |
631 | } |
632 | |
633 | if (KERN_SUCCESS == kcdata_get_memory_addr(data: crash_info_ptr, TASK_CRASHINFO_PROC_CPUTYPE, size: sizeof(cpu_type_t), user_addr: &uaddr)) { |
634 | kcdata_memcpy(data: crash_info_ptr, dst_addr: uaddr, src_addr: &p->p_cputype, size: sizeof(cpu_type_t)); |
635 | } |
636 | |
637 | if (KERN_SUCCESS == kcdata_get_memory_addr(data: crash_info_ptr, TASK_CRASHINFO_MEMORY_LIMIT, size: sizeof(max_footprint_mb), user_addr: &uaddr)) { |
638 | max_footprint = get_task_phys_footprint_limit(proc_task(p)); |
639 | max_footprint_mb = max_footprint >> 20; |
640 | kcdata_memcpy(data: crash_info_ptr, dst_addr: uaddr, src_addr: &max_footprint_mb, size: sizeof(max_footprint_mb)); |
641 | } |
642 | |
643 | if (KERN_SUCCESS == kcdata_get_memory_addr(data: crash_info_ptr, TASK_CRASHINFO_LEDGER_PHYS_FOOTPRINT_LIFETIME_MAX, size: sizeof(ledger_phys_footprint_lifetime_max), user_addr: &uaddr)) { |
644 | ledger_phys_footprint_lifetime_max = get_task_phys_footprint_lifetime_max(proc_task(p)); |
645 | kcdata_memcpy(data: crash_info_ptr, dst_addr: uaddr, src_addr: &ledger_phys_footprint_lifetime_max, size: sizeof(ledger_phys_footprint_lifetime_max)); |
646 | } |
647 | |
648 | // In the forking case, the current ledger info is copied into the corpse while the original task is suspended for consistency |
649 | if (KERN_SUCCESS == kcdata_get_memory_addr(data: crash_info_ptr, TASK_CRASHINFO_LEDGER_INTERNAL, size: sizeof(ledger_internal), user_addr: &uaddr)) { |
650 | ledger_internal = get_task_internal(corpse_task); |
651 | kcdata_memcpy(data: crash_info_ptr, dst_addr: uaddr, src_addr: &ledger_internal, size: sizeof(ledger_internal)); |
652 | } |
653 | |
654 | if (KERN_SUCCESS == kcdata_get_memory_addr(data: crash_info_ptr, TASK_CRASHINFO_LEDGER_INTERNAL_COMPRESSED, size: sizeof(ledger_internal_compressed), user_addr: &uaddr)) { |
655 | ledger_internal_compressed = get_task_internal_compressed(corpse_task); |
656 | kcdata_memcpy(data: crash_info_ptr, dst_addr: uaddr, src_addr: &ledger_internal_compressed, size: sizeof(ledger_internal_compressed)); |
657 | } |
658 | |
659 | if (KERN_SUCCESS == kcdata_get_memory_addr(data: crash_info_ptr, TASK_CRASHINFO_LEDGER_IOKIT_MAPPED, size: sizeof(ledger_iokit_mapped), user_addr: &uaddr)) { |
660 | ledger_iokit_mapped = get_task_iokit_mapped(corpse_task); |
661 | kcdata_memcpy(data: crash_info_ptr, dst_addr: uaddr, src_addr: &ledger_iokit_mapped, size: sizeof(ledger_iokit_mapped)); |
662 | } |
663 | |
664 | if (KERN_SUCCESS == kcdata_get_memory_addr(data: crash_info_ptr, TASK_CRASHINFO_LEDGER_ALTERNATE_ACCOUNTING, size: sizeof(ledger_alternate_accounting), user_addr: &uaddr)) { |
665 | ledger_alternate_accounting = get_task_alternate_accounting(corpse_task); |
666 | kcdata_memcpy(data: crash_info_ptr, dst_addr: uaddr, src_addr: &ledger_alternate_accounting, size: sizeof(ledger_alternate_accounting)); |
667 | } |
668 | |
669 | if (KERN_SUCCESS == kcdata_get_memory_addr(data: crash_info_ptr, TASK_CRASHINFO_LEDGER_ALTERNATE_ACCOUNTING_COMPRESSED, size: sizeof(ledger_alternate_accounting_compressed), user_addr: &uaddr)) { |
670 | ledger_alternate_accounting_compressed = get_task_alternate_accounting_compressed(corpse_task); |
671 | kcdata_memcpy(data: crash_info_ptr, dst_addr: uaddr, src_addr: &ledger_alternate_accounting_compressed, size: sizeof(ledger_alternate_accounting_compressed)); |
672 | } |
673 | |
674 | if (KERN_SUCCESS == kcdata_get_memory_addr(data: crash_info_ptr, TASK_CRASHINFO_LEDGER_PURGEABLE_NONVOLATILE, size: sizeof(ledger_purgeable_nonvolatile), user_addr: &uaddr)) { |
675 | ledger_purgeable_nonvolatile = get_task_purgeable_nonvolatile(corpse_task); |
676 | kcdata_memcpy(data: crash_info_ptr, dst_addr: uaddr, src_addr: &ledger_purgeable_nonvolatile, size: sizeof(ledger_purgeable_nonvolatile)); |
677 | } |
678 | |
679 | if (KERN_SUCCESS == kcdata_get_memory_addr(data: crash_info_ptr, TASK_CRASHINFO_LEDGER_PURGEABLE_NONVOLATILE_COMPRESSED, size: sizeof(ledger_purgeable_nonvolatile_compressed), user_addr: &uaddr)) { |
680 | ledger_purgeable_nonvolatile_compressed = get_task_purgeable_nonvolatile_compressed(corpse_task); |
681 | kcdata_memcpy(data: crash_info_ptr, dst_addr: uaddr, src_addr: &ledger_purgeable_nonvolatile_compressed, size: sizeof(ledger_purgeable_nonvolatile_compressed)); |
682 | } |
683 | |
684 | if (KERN_SUCCESS == kcdata_get_memory_addr(data: crash_info_ptr, TASK_CRASHINFO_LEDGER_PAGE_TABLE, size: sizeof(ledger_page_table), user_addr: &uaddr)) { |
685 | ledger_page_table = get_task_page_table(corpse_task); |
686 | kcdata_memcpy(data: crash_info_ptr, dst_addr: uaddr, src_addr: &ledger_page_table, size: sizeof(ledger_page_table)); |
687 | } |
688 | |
689 | if (KERN_SUCCESS == kcdata_get_memory_addr(data: crash_info_ptr, TASK_CRASHINFO_LEDGER_PHYS_FOOTPRINT, size: sizeof(ledger_phys_footprint), user_addr: &uaddr)) { |
690 | ledger_phys_footprint = get_task_phys_footprint(corpse_task); |
691 | kcdata_memcpy(data: crash_info_ptr, dst_addr: uaddr, src_addr: &ledger_phys_footprint, size: sizeof(ledger_phys_footprint)); |
692 | } |
693 | |
694 | if (KERN_SUCCESS == kcdata_get_memory_addr(data: crash_info_ptr, TASK_CRASHINFO_LEDGER_NETWORK_NONVOLATILE, size: sizeof(ledger_network_nonvolatile), user_addr: &uaddr)) { |
695 | ledger_network_nonvolatile = get_task_network_nonvolatile(corpse_task); |
696 | kcdata_memcpy(data: crash_info_ptr, dst_addr: uaddr, src_addr: &ledger_network_nonvolatile, size: sizeof(ledger_network_nonvolatile)); |
697 | } |
698 | |
699 | if (KERN_SUCCESS == kcdata_get_memory_addr(data: crash_info_ptr, TASK_CRASHINFO_LEDGER_NETWORK_NONVOLATILE_COMPRESSED, size: sizeof(ledger_network_nonvolatile_compressed), user_addr: &uaddr)) { |
700 | ledger_network_nonvolatile_compressed = get_task_network_nonvolatile_compressed(corpse_task); |
701 | kcdata_memcpy(data: crash_info_ptr, dst_addr: uaddr, src_addr: &ledger_network_nonvolatile_compressed, size: sizeof(ledger_network_nonvolatile_compressed)); |
702 | } |
703 | |
704 | if (KERN_SUCCESS == kcdata_get_memory_addr(data: crash_info_ptr, TASK_CRASHINFO_LEDGER_WIRED_MEM, size: sizeof(ledger_wired_mem), user_addr: &uaddr)) { |
705 | ledger_wired_mem = get_task_wired_mem(corpse_task); |
706 | kcdata_memcpy(data: crash_info_ptr, dst_addr: uaddr, src_addr: &ledger_wired_mem, size: sizeof(ledger_wired_mem)); |
707 | } |
708 | |
709 | bzero(s: &pwqinfo, n: sizeof(struct proc_workqueueinfo)); |
710 | retval = fill_procworkqueue(p, &pwqinfo); |
711 | if (retval == 0) { |
712 | if (KERN_SUCCESS == kcdata_get_memory_addr(data: crash_info_ptr, TASK_CRASHINFO_WORKQUEUEINFO, size: sizeof(struct proc_workqueueinfo), user_addr: &uaddr)) { |
713 | kcdata_memcpy(data: crash_info_ptr, dst_addr: uaddr, src_addr: &pwqinfo, size: sizeof(struct proc_workqueueinfo)); |
714 | } |
715 | } |
716 | |
717 | if (KERN_SUCCESS == kcdata_get_memory_addr(data: crash_info_ptr, TASK_CRASHINFO_RESPONSIBLE_PID, size: sizeof(p->p_responsible_pid), user_addr: &uaddr)) { |
718 | kcdata_memcpy(data: crash_info_ptr, dst_addr: uaddr, src_addr: &p->p_responsible_pid, size: sizeof(p->p_responsible_pid)); |
719 | } |
720 | |
721 | if (KERN_SUCCESS == kcdata_get_memory_addr(data: crash_info_ptr, TASK_CRASHINFO_PROC_PERSONA_ID, size: sizeof(uid_t), user_addr: &uaddr)) { |
722 | uid_t persona_id = proc_persona_id(p); |
723 | kcdata_memcpy(data: crash_info_ptr, dst_addr: uaddr, src_addr: &persona_id, size: sizeof(persona_id)); |
724 | } |
725 | |
726 | #if CONFIG_COALITIONS |
727 | if (KERN_SUCCESS == kcdata_get_memory_addr_for_array(data: crash_info_ptr, TASK_CRASHINFO_COALITION_ID, size_of_element: sizeof(uint64_t), COALITION_NUM_TYPES, user_addr: &uaddr)) { |
728 | uint64_t coalition_ids[COALITION_NUM_TYPES]; |
729 | task_coalition_ids(task: proc_task(p), ids: coalition_ids); |
730 | kcdata_memcpy(data: crash_info_ptr, dst_addr: uaddr, src_addr: coalition_ids, size: sizeof(coalition_ids)); |
731 | } |
732 | #endif /* CONFIG_COALITIONS */ |
733 | |
734 | #if CONFIG_MEMORYSTATUS |
735 | memstat_dirty_flags = memorystatus_dirty_get(p, FALSE); |
736 | if (KERN_SUCCESS == kcdata_get_memory_addr(data: crash_info_ptr, TASK_CRASHINFO_DIRTY_FLAGS, size: sizeof(memstat_dirty_flags), user_addr: &uaddr)) { |
737 | kcdata_memcpy(data: crash_info_ptr, dst_addr: uaddr, src_addr: &memstat_dirty_flags, size: sizeof(memstat_dirty_flags)); |
738 | } |
739 | #endif |
740 | |
741 | if (KERN_SUCCESS == kcdata_get_memory_addr(data: crash_info_ptr, TASK_CRASHINFO_MEMORY_LIMIT_INCREASE, size: sizeof(p->p_memlimit_increase), user_addr: &uaddr)) { |
742 | kcdata_memcpy(data: crash_info_ptr, dst_addr: uaddr, src_addr: &p->p_memlimit_increase, size: sizeof(p->p_memlimit_increase)); |
743 | } |
744 | |
745 | if (KERN_SUCCESS == kcdata_get_memory_addr(data: crash_info_ptr, TASK_CRASHINFO_LEDGER_TAGGED_FOOTPRINT, size: sizeof(ledger_tagged_footprint), user_addr: &uaddr)) { |
746 | ledger_tagged_footprint = get_task_tagged_footprint(task: corpse_task); |
747 | kcdata_memcpy(data: crash_info_ptr, dst_addr: uaddr, src_addr: &ledger_tagged_footprint, size: sizeof(ledger_tagged_footprint)); |
748 | } |
749 | |
750 | if (KERN_SUCCESS == kcdata_get_memory_addr(data: crash_info_ptr, TASK_CRASHINFO_LEDGER_TAGGED_FOOTPRINT_COMPRESSED, size: sizeof(ledger_tagged_footprint_compressed), user_addr: &uaddr)) { |
751 | ledger_tagged_footprint_compressed = get_task_tagged_footprint_compressed(task: corpse_task); |
752 | kcdata_memcpy(data: crash_info_ptr, dst_addr: uaddr, src_addr: &ledger_tagged_footprint_compressed, size: sizeof(ledger_tagged_footprint_compressed)); |
753 | } |
754 | |
755 | if (KERN_SUCCESS == kcdata_get_memory_addr(data: crash_info_ptr, TASK_CRASHINFO_LEDGER_MEDIA_FOOTPRINT, size: sizeof(ledger_media_footprint), user_addr: &uaddr)) { |
756 | ledger_media_footprint = get_task_media_footprint(task: corpse_task); |
757 | kcdata_memcpy(data: crash_info_ptr, dst_addr: uaddr, src_addr: &ledger_media_footprint, size: sizeof(ledger_media_footprint)); |
758 | } |
759 | |
760 | if (KERN_SUCCESS == kcdata_get_memory_addr(data: crash_info_ptr, TASK_CRASHINFO_LEDGER_MEDIA_FOOTPRINT_COMPRESSED, size: sizeof(ledger_media_footprint_compressed), user_addr: &uaddr)) { |
761 | ledger_media_footprint_compressed = get_task_media_footprint_compressed(task: corpse_task); |
762 | kcdata_memcpy(data: crash_info_ptr, dst_addr: uaddr, src_addr: &ledger_media_footprint_compressed, size: sizeof(ledger_media_footprint_compressed)); |
763 | } |
764 | |
765 | if (KERN_SUCCESS == kcdata_get_memory_addr(data: crash_info_ptr, TASK_CRASHINFO_LEDGER_GRAPHICS_FOOTPRINT, size: sizeof(ledger_graphics_footprint), user_addr: &uaddr)) { |
766 | ledger_graphics_footprint = get_task_graphics_footprint(task: corpse_task); |
767 | kcdata_memcpy(data: crash_info_ptr, dst_addr: uaddr, src_addr: &ledger_graphics_footprint, size: sizeof(ledger_graphics_footprint)); |
768 | } |
769 | |
770 | if (KERN_SUCCESS == kcdata_get_memory_addr(data: crash_info_ptr, TASK_CRASHINFO_LEDGER_GRAPHICS_FOOTPRINT_COMPRESSED, size: sizeof(ledger_graphics_footprint_compressed), user_addr: &uaddr)) { |
771 | ledger_graphics_footprint_compressed = get_task_graphics_footprint_compressed(task: corpse_task); |
772 | kcdata_memcpy(data: crash_info_ptr, dst_addr: uaddr, src_addr: &ledger_graphics_footprint_compressed, size: sizeof(ledger_graphics_footprint_compressed)); |
773 | } |
774 | |
775 | if (KERN_SUCCESS == kcdata_get_memory_addr(data: crash_info_ptr, TASK_CRASHINFO_LEDGER_NEURAL_FOOTPRINT, size: sizeof(ledger_neural_footprint), user_addr: &uaddr)) { |
776 | ledger_neural_footprint = get_task_neural_footprint(task: corpse_task); |
777 | kcdata_memcpy(data: crash_info_ptr, dst_addr: uaddr, src_addr: &ledger_neural_footprint, size: sizeof(ledger_neural_footprint)); |
778 | } |
779 | |
780 | if (KERN_SUCCESS == kcdata_get_memory_addr(data: crash_info_ptr, TASK_CRASHINFO_LEDGER_NEURAL_FOOTPRINT_COMPRESSED, size: sizeof(ledger_neural_footprint_compressed), user_addr: &uaddr)) { |
781 | ledger_neural_footprint_compressed = get_task_neural_footprint_compressed(task: corpse_task); |
782 | kcdata_memcpy(data: crash_info_ptr, dst_addr: uaddr, src_addr: &ledger_neural_footprint_compressed, size: sizeof(ledger_neural_footprint_compressed)); |
783 | } |
784 | |
785 | if (KERN_SUCCESS == kcdata_get_memory_addr(data: crash_info_ptr, TASK_CRASHINFO_MEMORYSTATUS_EFFECTIVE_PRIORITY, size: sizeof(p->p_memstat_effectivepriority), user_addr: &uaddr)) { |
786 | kcdata_memcpy(data: crash_info_ptr, dst_addr: uaddr, src_addr: &p->p_memstat_effectivepriority, size: sizeof(p->p_memstat_effectivepriority)); |
787 | } |
788 | |
789 | if (KERN_SUCCESS == kcdata_get_memory_addr(data: crash_info_ptr, TASK_CRASHINFO_KERNEL_TRIAGE_INFO_V1, size: sizeof(struct kernel_triage_info_v1), user_addr: &uaddr)) { |
790 | char triage_strings[KDBG_TRIAGE_MAX_STRINGS][KDBG_TRIAGE_MAX_STRLEN]; |
791 | ktriage_extract(thread_id: thread_tid(thread: current_thread()), buf: triage_strings, KDBG_TRIAGE_MAX_STRINGS * KDBG_TRIAGE_MAX_STRLEN); |
792 | kcdata_memcpy(data: crash_info_ptr, dst_addr: uaddr, src_addr: (void*) triage_strings, size: sizeof(struct kernel_triage_info_v1)); |
793 | } |
794 | |
795 | if (KERN_SUCCESS == kcdata_get_memory_addr(data: crash_info_ptr, TASK_CRASHINFO_TASK_IS_CORPSE_FORK, size: sizeof(is_corpse_fork), user_addr: &uaddr)) { |
796 | is_corpse_fork = is_corpsefork(task: corpse_task); |
797 | kcdata_memcpy(data: crash_info_ptr, dst_addr: uaddr, src_addr: &is_corpse_fork, size: sizeof(is_corpse_fork)); |
798 | } |
799 | |
800 | if (KERN_SUCCESS == kcdata_get_memory_addr(data: crash_info_ptr, TASK_CRASHINFO_EXCEPTION_TYPE, size: sizeof(etype), user_addr: &uaddr)) { |
801 | kcdata_memcpy(data: crash_info_ptr, dst_addr: uaddr, src_addr: &etype, size: sizeof(etype)); |
802 | } |
803 | |
804 | if (KERN_SUCCESS == kcdata_get_memory_addr(data: crash_info_ptr, TASK_CRASHINFO_CRASH_COUNT, size: sizeof(int), user_addr: &uaddr)) { |
805 | kcdata_memcpy(data: crash_info_ptr, dst_addr: uaddr, src_addr: &p->p_crash_count, size: sizeof(int)); |
806 | } |
807 | |
808 | if (KERN_SUCCESS == kcdata_get_memory_addr(data: crash_info_ptr, TASK_CRASHINFO_THROTTLE_TIMEOUT, size: sizeof(int), user_addr: &uaddr)) { |
809 | kcdata_memcpy(data: crash_info_ptr, dst_addr: uaddr, src_addr: &p->p_throttle_timeout, size: sizeof(int)); |
810 | } |
811 | |
812 | char signing_id[MAX_CRASHINFO_SIGNING_ID_LEN] = {}; |
813 | if (KERN_SUCCESS == kcdata_get_memory_addr(data: crash_info_ptr, TASK_CRASHINFO_CS_SIGNING_ID, size: sizeof(signing_id), user_addr: &uaddr)) { |
814 | const char * id = cs_identity_get(p); |
815 | if (id) { |
816 | strlcpy(dst: signing_id, src: id, n: sizeof(signing_id)); |
817 | } |
818 | kcdata_memcpy(data: crash_info_ptr, dst_addr: uaddr, src_addr: &signing_id, size: sizeof(signing_id)); |
819 | } |
820 | char team_id[MAX_CRASHINFO_TEAM_ID_LEN] = {}; |
821 | if (KERN_SUCCESS == kcdata_get_memory_addr(data: crash_info_ptr, TASK_CRASHINFO_CS_TEAM_ID, size: sizeof(team_id), user_addr: &uaddr)) { |
822 | const char * id = csproc_get_teamid(p); |
823 | if (id) { |
824 | strlcpy(dst: team_id, src: id, n: sizeof(team_id)); |
825 | } |
826 | kcdata_memcpy(data: crash_info_ptr, dst_addr: uaddr, src_addr: &team_id, size: sizeof(team_id)); |
827 | } |
828 | |
829 | if (KERN_SUCCESS == kcdata_get_memory_addr(data: crash_info_ptr, TASK_CRASHINFO_CS_VALIDATION_CATEGORY, size: sizeof(uint32_t), user_addr: &uaddr)) { |
830 | uint32_t category = 0; |
831 | if (csproc_get_validation_category(p, &category) != KERN_SUCCESS) { |
832 | category = CS_VALIDATION_CATEGORY_INVALID; |
833 | } |
834 | kcdata_memcpy(data: crash_info_ptr, dst_addr: uaddr, src_addr: &category, size: sizeof(category)); |
835 | } |
836 | |
837 | if (KERN_SUCCESS == kcdata_get_memory_addr(data: crash_info_ptr, TASK_CRASHINFO_CS_TRUST_LEVEL, size: sizeof(uint32_t), user_addr: &uaddr)) { |
838 | uint32_t trust = 0; |
839 | kern_return_t ret = get_trust_level_kdp(pmap: get_task_pmap(corpse_task), trust_level: &trust); |
840 | if (ret != KERN_SUCCESS) { |
841 | trust = KCDATA_INVALID_CS_TRUST_LEVEL; |
842 | } |
843 | kcdata_memcpy(data: crash_info_ptr, dst_addr: uaddr, src_addr: &trust, size: sizeof(trust)); |
844 | } |
845 | |
846 | |
847 | if (p->p_exit_reason != OS_REASON_NULL && reason == OS_REASON_NULL) { |
848 | reason = p->p_exit_reason; |
849 | } |
850 | if (reason != OS_REASON_NULL) { |
851 | if (KERN_SUCCESS == kcdata_get_memory_addr(data: crash_info_ptr, EXIT_REASON_SNAPSHOT, size: sizeof(struct exit_reason_snapshot), user_addr: &uaddr)) { |
852 | struct exit_reason_snapshot ers = { |
853 | .ers_namespace = reason->osr_namespace, |
854 | .ers_code = reason->osr_code, |
855 | .ers_flags = reason->osr_flags |
856 | }; |
857 | |
858 | kcdata_memcpy(data: crash_info_ptr, dst_addr: uaddr, src_addr: &ers, size: sizeof(ers)); |
859 | } |
860 | |
861 | if (reason->osr_kcd_buf != 0) { |
862 | uint32_t reason_buf_size = (uint32_t)kcdata_memory_get_used_bytes(kcd: &reason->osr_kcd_descriptor); |
863 | assert(reason_buf_size != 0); |
864 | |
865 | if (KERN_SUCCESS == kcdata_get_memory_addr(data: crash_info_ptr, KCDATA_TYPE_NESTED_KCDATA, size: reason_buf_size, user_addr: &uaddr)) { |
866 | kcdata_memcpy(data: crash_info_ptr, dst_addr: uaddr, src_addr: reason->osr_kcd_buf, size: reason_buf_size); |
867 | } |
868 | } |
869 | } |
870 | |
871 | if (num_udata > 0) { |
872 | if (KERN_SUCCESS == kcdata_get_memory_addr_for_array(data: crash_info_ptr, TASK_CRASHINFO_UDATA_PTRS, |
873 | size_of_element: sizeof(uint64_t), count: num_udata, user_addr: &uaddr)) { |
874 | kcdata_memcpy(data: crash_info_ptr, dst_addr: uaddr, src_addr: udata_buffer, size: sizeof(uint64_t) * num_udata); |
875 | } |
876 | } |
877 | |
878 | #if CONFIG_EXCLAVES |
879 | task_add_conclave_crash_info(corpse_task, crash_info_ptr); |
880 | #endif /* CONFIG_EXCLAVES */ |
881 | } |
882 | |
883 | exception_type_t |
884 | get_exception_from_corpse_crashinfo(kcdata_descriptor_t corpse_info) |
885 | { |
886 | kcdata_iter_t iter = kcdata_iter(buffer: (void *)corpse_info->kcd_addr_begin, |
887 | size: corpse_info->kcd_length); |
888 | __assert_only uint32_t type = kcdata_iter_type(iter); |
889 | assert(type == KCDATA_BUFFER_BEGIN_CRASHINFO); |
890 | |
891 | iter = kcdata_iter_find_type(iter, TASK_CRASHINFO_EXCEPTION_TYPE); |
892 | exception_type_t *etype = kcdata_iter_payload(iter); |
893 | return *etype; |
894 | } |
895 | |
896 | /* |
897 | * Collect information required for generating lightwight corpse for current |
898 | * task, which can be terminating. |
899 | */ |
900 | kern_return_t |
901 | current_thread_collect_backtrace_info( |
902 | kcdata_descriptor_t *new_desc, |
903 | exception_type_t etype, |
904 | mach_exception_data_t code, |
905 | mach_msg_type_number_t codeCnt, |
906 | void *reasonp) |
907 | { |
908 | kcdata_descriptor_t kcdata; |
909 | kern_return_t kr; |
910 | int frame_count = 0, max_frames = 100; |
911 | mach_vm_address_t uuid_info_addr = 0; |
912 | uint32_t uuid_info_count = 0; |
913 | uint32_t btinfo_flag = 0; |
914 | mach_vm_address_t btinfo_flag_addr = 0, kaddr = 0; |
915 | natural_t alloc_size = BTINFO_ALLOCATION_SIZE; |
916 | mach_msg_type_number_t th_info_count = THREAD_IDENTIFIER_INFO_COUNT; |
917 | thread_identifier_info_data_t th_info; |
918 | char threadname[MAXTHREADNAMESIZE]; |
919 | void *btdata_kernel = NULL; |
920 | typedef uintptr_t user_btframe_t __kernel_data_semantics; |
921 | user_btframe_t *btframes = NULL; |
922 | os_reason_t reason = (os_reason_t)reasonp; |
923 | struct backtrace_user_info info = BTUINFO_INIT; |
924 | struct rusage_superset rup; |
925 | uint32_t platform; |
926 | |
927 | task_t task = current_task(); |
928 | proc_t p = current_proc(); |
929 | |
930 | bool has_64bit_addr = task_get_64bit_addr(task: current_task()); |
931 | bool has_64bit_data = task_get_64bit_data(task: current_task()); |
932 | |
933 | if (new_desc == NULL) { |
934 | return KERN_INVALID_ARGUMENT; |
935 | } |
936 | |
937 | /* First, collect backtrace frames */ |
938 | btframes = kalloc_data(max_frames * sizeof(btframes[0]), Z_WAITOK | Z_ZERO); |
939 | if (!btframes) { |
940 | return KERN_RESOURCE_SHORTAGE; |
941 | } |
942 | |
943 | frame_count = backtrace_user(bt: btframes, btlen: max_frames, NULL, info_out: &info); |
944 | if (info.btui_error || frame_count == 0) { |
945 | kfree_data(btframes, max_frames * sizeof(btframes[0])); |
946 | return KERN_FAILURE; |
947 | } |
948 | |
949 | if ((info.btui_info & BTI_TRUNCATED) != 0) { |
950 | btinfo_flag |= TASK_BTINFO_FLAG_BT_TRUNCATED; |
951 | } |
952 | |
953 | /* Captured in kcdata descriptor below */ |
954 | btdata_kernel = kalloc_data(alloc_size, Z_WAITOK | Z_ZERO); |
955 | if (!btdata_kernel) { |
956 | kfree_data(btframes, max_frames * sizeof(btframes[0])); |
957 | return KERN_RESOURCE_SHORTAGE; |
958 | } |
959 | |
960 | kcdata = task_btinfo_alloc_init(addr: (mach_vm_address_t)btdata_kernel, size: alloc_size); |
961 | if (!kcdata) { |
962 | kfree_data(btdata_kernel, alloc_size); |
963 | kfree_data(btframes, max_frames * sizeof(btframes[0])); |
964 | return KERN_RESOURCE_SHORTAGE; |
965 | } |
966 | |
967 | /* First reserve space in kcdata blob for the btinfo flag fields */ |
968 | if (KERN_SUCCESS != kcdata_get_memory_addr(data: kcdata, TASK_BTINFO_FLAGS, |
969 | size: sizeof(uint32_t), user_addr: &btinfo_flag_addr)) { |
970 | kfree_data(btdata_kernel, alloc_size); |
971 | kfree_data(btframes, max_frames * sizeof(btframes[0])); |
972 | kcdata_memory_destroy(data: kcdata); |
973 | return KERN_RESOURCE_SHORTAGE; |
974 | } |
975 | |
976 | if (KERN_SUCCESS == kcdata_get_memory_addr_for_array(data: kcdata, |
977 | type_of_element: (has_64bit_addr ? TASK_BTINFO_BACKTRACE64 : TASK_BTINFO_BACKTRACE), |
978 | size_of_element: sizeof(uintptr_t), count: frame_count, user_addr: &kaddr)) { |
979 | kcdata_memcpy(data: kcdata, dst_addr: kaddr, src_addr: btframes, size: sizeof(uintptr_t) * frame_count); |
980 | } |
981 | |
982 | #if __LP64__ |
983 | /* We only support async stacks on 64-bit kernels */ |
984 | frame_count = 0; |
985 | |
986 | if (info.btui_async_frame_addr != 0) { |
987 | if (KERN_SUCCESS == kcdata_get_memory_addr(data: kcdata, TASK_BTINFO_ASYNC_START_INDEX, |
988 | size: sizeof(uint32_t), user_addr: &kaddr)) { |
989 | uint32_t idx = info.btui_async_start_index; |
990 | kcdata_memcpy(data: kcdata, dst_addr: kaddr, src_addr: &idx, size: sizeof(uint32_t)); |
991 | } |
992 | struct backtrace_control ctl = { |
993 | .btc_frame_addr = info.btui_async_frame_addr, |
994 | .btc_addr_offset = BTCTL_ASYNC_ADDR_OFFSET, |
995 | }; |
996 | |
997 | info = BTUINFO_INIT; |
998 | frame_count = backtrace_user(bt: btframes, btlen: max_frames, ctl: &ctl, info_out: &info); |
999 | if (info.btui_error == 0 && frame_count > 0) { |
1000 | if (KERN_SUCCESS == kcdata_get_memory_addr_for_array(data: kcdata, |
1001 | TASK_BTINFO_ASYNC_BACKTRACE64, |
1002 | size_of_element: sizeof(uintptr_t), count: frame_count, user_addr: &kaddr)) { |
1003 | kcdata_memcpy(data: kcdata, dst_addr: kaddr, src_addr: btframes, size: sizeof(uintptr_t) * frame_count); |
1004 | } |
1005 | } |
1006 | |
1007 | if ((info.btui_info & BTI_TRUNCATED) != 0) { |
1008 | btinfo_flag |= TASK_BTINFO_FLAG_ASYNC_BT_TRUNCATED; |
1009 | } |
1010 | } |
1011 | #endif |
1012 | |
1013 | /* Backtrace collection done, free the frames buffer */ |
1014 | kfree_data(btframes, max_frames * sizeof(btframes[0])); |
1015 | btframes = NULL; |
1016 | |
1017 | thread_set_exec_promotion(thread: current_thread()); |
1018 | /* Next, suspend the task briefly and collect image load infos */ |
1019 | task_suspend_internal(task); |
1020 | |
1021 | /* all_image_info struct is ABI, in agreement with address width */ |
1022 | if (has_64bit_addr) { |
1023 | struct user64_dyld_all_image_infos task_image_infos = {}; |
1024 | struct btinfo_sc_load_info64 sc_info; |
1025 | (void)copyin((user_addr_t)task_get_all_image_info_addr(task), &task_image_infos, |
1026 | sizeof(struct user64_dyld_all_image_infos)); |
1027 | uuid_info_count = (uint32_t)task_image_infos.uuidArrayCount; |
1028 | uuid_info_addr = task_image_infos.uuidArray; |
1029 | |
1030 | sc_info.sharedCacheSlide = task_image_infos.sharedCacheSlide; |
1031 | sc_info.sharedCacheBaseAddress = task_image_infos.sharedCacheBaseAddress; |
1032 | memcpy(dst: &sc_info.sharedCacheUUID, src: &task_image_infos.sharedCacheUUID, |
1033 | n: sizeof(task_image_infos.sharedCacheUUID)); |
1034 | |
1035 | if (KERN_SUCCESS == kcdata_get_memory_addr(data: kcdata, |
1036 | TASK_BTINFO_SC_LOADINFO64, size: sizeof(sc_info), user_addr: &kaddr)) { |
1037 | kcdata_memcpy(data: kcdata, dst_addr: kaddr, src_addr: &sc_info, size: sizeof(sc_info)); |
1038 | } |
1039 | } else { |
1040 | struct user32_dyld_all_image_infos task_image_infos = {}; |
1041 | struct btinfo_sc_load_info sc_info; |
1042 | (void)copyin((user_addr_t)task_get_all_image_info_addr(task), &task_image_infos, |
1043 | sizeof(struct user32_dyld_all_image_infos)); |
1044 | uuid_info_count = task_image_infos.uuidArrayCount; |
1045 | uuid_info_addr = task_image_infos.uuidArray; |
1046 | |
1047 | sc_info.sharedCacheSlide = task_image_infos.sharedCacheSlide; |
1048 | sc_info.sharedCacheBaseAddress = task_image_infos.sharedCacheBaseAddress; |
1049 | memcpy(dst: &sc_info.sharedCacheUUID, src: &task_image_infos.sharedCacheUUID, |
1050 | n: sizeof(task_image_infos.sharedCacheUUID)); |
1051 | |
1052 | if (KERN_SUCCESS == kcdata_get_memory_addr(data: kcdata, |
1053 | TASK_BTINFO_SC_LOADINFO, size: sizeof(sc_info), user_addr: &kaddr)) { |
1054 | kcdata_memcpy(data: kcdata, dst_addr: kaddr, src_addr: &sc_info, size: sizeof(sc_info)); |
1055 | } |
1056 | } |
1057 | |
1058 | if (!uuid_info_addr) { |
1059 | /* |
1060 | * Can happen when we catch dyld in the middle of updating |
1061 | * this data structure, or copyin of all_image_info struct failed. |
1062 | */ |
1063 | task_resume_internal(task); |
1064 | thread_clear_exec_promotion(thread: current_thread()); |
1065 | kfree_data(btdata_kernel, alloc_size); |
1066 | kcdata_memory_destroy(data: kcdata); |
1067 | return KERN_MEMORY_ERROR; |
1068 | } |
1069 | |
1070 | if (uuid_info_count > 0) { |
1071 | uint32_t uuid_info_size = (uint32_t)(has_64bit_addr ? |
1072 | sizeof(struct user64_dyld_uuid_info) : sizeof(struct user32_dyld_uuid_info)); |
1073 | |
1074 | if (KERN_SUCCESS == kcdata_get_memory_addr_for_array(data: kcdata, |
1075 | type_of_element: (has_64bit_addr ? TASK_BTINFO_DYLD_LOADINFO64 : TASK_BTINFO_DYLD_LOADINFO), |
1076 | size_of_element: uuid_info_size, count: uuid_info_count, user_addr: &kaddr)) { |
1077 | if (copyin((user_addr_t)uuid_info_addr, (void *)kaddr, uuid_info_size * uuid_info_count)) { |
1078 | task_resume_internal(task); |
1079 | thread_clear_exec_promotion(thread: current_thread()); |
1080 | kfree_data(btdata_kernel, alloc_size); |
1081 | kcdata_memory_destroy(data: kcdata); |
1082 | return KERN_MEMORY_ERROR; |
1083 | } |
1084 | } |
1085 | } |
1086 | |
1087 | task_resume_internal(task); |
1088 | thread_clear_exec_promotion(thread: current_thread()); |
1089 | |
1090 | /* Next, collect all other information */ |
1091 | thread_flavor_t tsflavor; |
1092 | mach_msg_type_number_t tscount; |
1093 | |
1094 | #if defined(__x86_64__) || defined(__i386__) |
1095 | tsflavor = x86_THREAD_STATE; /* unified */ |
1096 | tscount = x86_THREAD_STATE_COUNT; |
1097 | #else |
1098 | tsflavor = ARM_THREAD_STATE; /* unified */ |
1099 | tscount = ARM_UNIFIED_THREAD_STATE_COUNT; |
1100 | #endif |
1101 | |
1102 | if (KERN_SUCCESS == kcdata_get_memory_addr(data: kcdata, TASK_BTINFO_THREAD_STATE, |
1103 | size: sizeof(struct btinfo_thread_state_data_t) + sizeof(int) * tscount, user_addr: &kaddr)) { |
1104 | struct btinfo_thread_state_data_t *bt_thread_state = (struct btinfo_thread_state_data_t *)kaddr; |
1105 | bt_thread_state->flavor = tsflavor; |
1106 | bt_thread_state->count = tscount; |
1107 | /* variable-sized tstate array follows */ |
1108 | |
1109 | kr = thread_getstatus_to_user(thread: current_thread(), flavor: bt_thread_state->flavor, |
1110 | tstate: (thread_state_t)&bt_thread_state->tstate, count: &bt_thread_state->count, flags: TSSF_FLAGS_NONE); |
1111 | if (kr != KERN_SUCCESS) { |
1112 | bzero(s: (void *)kaddr, n: sizeof(struct btinfo_thread_state_data_t) + sizeof(int) * tscount); |
1113 | if (kr == KERN_TERMINATED) { |
1114 | btinfo_flag |= TASK_BTINFO_FLAG_TASK_TERMINATED; |
1115 | } |
1116 | } |
1117 | } |
1118 | |
1119 | #if defined(__x86_64__) || defined(__i386__) |
1120 | tsflavor = x86_EXCEPTION_STATE; /* unified */ |
1121 | tscount = x86_EXCEPTION_STATE_COUNT; |
1122 | #else |
1123 | #if defined(__arm64__) |
1124 | if (has_64bit_data) { |
1125 | tsflavor = ARM_EXCEPTION_STATE64; |
1126 | tscount = ARM_EXCEPTION_STATE64_COUNT; |
1127 | } else |
1128 | #endif /* defined(__arm64__) */ |
1129 | { |
1130 | tsflavor = ARM_EXCEPTION_STATE; |
1131 | tscount = ARM_EXCEPTION_STATE_COUNT; |
1132 | } |
1133 | #endif /* defined(__x86_64__) || defined(__i386__) */ |
1134 | |
1135 | if (KERN_SUCCESS == kcdata_get_memory_addr(data: kcdata, TASK_BTINFO_THREAD_EXCEPTION_STATE, |
1136 | size: sizeof(struct btinfo_thread_state_data_t) + sizeof(int) * tscount, user_addr: &kaddr)) { |
1137 | struct btinfo_thread_state_data_t *bt_thread_state = (struct btinfo_thread_state_data_t *)kaddr; |
1138 | bt_thread_state->flavor = tsflavor; |
1139 | bt_thread_state->count = tscount; |
1140 | /* variable-sized tstate array follows */ |
1141 | |
1142 | kr = thread_getstatus_to_user(thread: current_thread(), flavor: bt_thread_state->flavor, |
1143 | tstate: (thread_state_t)&bt_thread_state->tstate, count: &bt_thread_state->count, flags: TSSF_FLAGS_NONE); |
1144 | if (kr != KERN_SUCCESS) { |
1145 | bzero(s: (void *)kaddr, n: sizeof(struct btinfo_thread_state_data_t) + sizeof(int) * tscount); |
1146 | if (kr == KERN_TERMINATED) { |
1147 | btinfo_flag |= TASK_BTINFO_FLAG_TASK_TERMINATED; |
1148 | } |
1149 | } |
1150 | } |
1151 | |
1152 | if (KERN_SUCCESS == kcdata_get_memory_addr(data: kcdata, TASK_BTINFO_PID, size: sizeof(pid_t), user_addr: &kaddr)) { |
1153 | pid_t pid = proc_getpid(p); |
1154 | kcdata_memcpy(data: kcdata, dst_addr: kaddr, src_addr: &pid, size: sizeof(pid)); |
1155 | } |
1156 | |
1157 | if (KERN_SUCCESS == kcdata_get_memory_addr(data: kcdata, TASK_BTINFO_PPID, size: sizeof(p->p_ppid), user_addr: &kaddr)) { |
1158 | kcdata_memcpy(data: kcdata, dst_addr: kaddr, src_addr: &p->p_ppid, size: sizeof(p->p_ppid)); |
1159 | } |
1160 | |
1161 | if (KERN_SUCCESS == kcdata_get_memory_addr(data: kcdata, TASK_BTINFO_PROC_NAME, size: sizeof(p->p_comm), user_addr: &kaddr)) { |
1162 | kcdata_memcpy(data: kcdata, dst_addr: kaddr, src_addr: &p->p_comm, size: sizeof(p->p_comm)); |
1163 | } |
1164 | |
1165 | #if CONFIG_COALITIONS |
1166 | if (KERN_SUCCESS == kcdata_get_memory_addr_for_array(data: kcdata, TASK_BTINFO_COALITION_ID, size_of_element: sizeof(uint64_t), COALITION_NUM_TYPES, user_addr: &kaddr)) { |
1167 | uint64_t coalition_ids[COALITION_NUM_TYPES]; |
1168 | task_coalition_ids(task: proc_task(p), ids: coalition_ids); |
1169 | kcdata_memcpy(data: kcdata, dst_addr: kaddr, src_addr: coalition_ids, size: sizeof(coalition_ids)); |
1170 | } |
1171 | #endif /* CONFIG_COALITIONS */ |
1172 | |
1173 | /* V0 is sufficient for ReportCrash */ |
1174 | gather_rusage_info(p: current_proc(), ru: &rup.ri, RUSAGE_INFO_V0); |
1175 | rup.ri.ri_phys_footprint = 0; |
1176 | /* Soft crash, proc did not exit */ |
1177 | rup.ri.ri_proc_exit_abstime = 0; |
1178 | if (KERN_SUCCESS == kcdata_get_memory_addr(data: kcdata, TASK_BTINFO_RUSAGE_INFO, size: sizeof(struct rusage_info_v0), user_addr: &kaddr)) { |
1179 | kcdata_memcpy(data: kcdata, dst_addr: kaddr, src_addr: &rup.ri, size: sizeof(struct rusage_info_v0)); |
1180 | } |
1181 | |
1182 | platform = proc_platform(current_proc()); |
1183 | if (KERN_SUCCESS == kcdata_get_memory_addr(data: kcdata, TASK_BTINFO_PLATFORM, size: sizeof(platform), user_addr: &kaddr)) { |
1184 | kcdata_memcpy(data: kcdata, dst_addr: kaddr, src_addr: &platform, size: sizeof(platform)); |
1185 | } |
1186 | |
1187 | if (KERN_SUCCESS == kcdata_get_memory_addr(data: kcdata, TASK_BTINFO_PROC_PATH, MAXPATHLEN, user_addr: &kaddr)) { |
1188 | char *buf = zalloc_flags(ZV_NAMEI, Z_WAITOK | Z_ZERO); |
1189 | proc_pidpathinfo_internal(p, arg: 0, buffer: buf, MAXPATHLEN, NULL); |
1190 | kcdata_memcpy(data: kcdata, dst_addr: kaddr, src_addr: buf, MAXPATHLEN); |
1191 | zfree(ZV_NAMEI, buf); |
1192 | } |
1193 | |
1194 | if (KERN_SUCCESS == kcdata_get_memory_addr(data: kcdata, TASK_BTINFO_UID, size: sizeof(p->p_uid), user_addr: &kaddr)) { |
1195 | kcdata_memcpy(data: kcdata, dst_addr: kaddr, src_addr: &p->p_uid, size: sizeof(p->p_uid)); |
1196 | } |
1197 | |
1198 | if (KERN_SUCCESS == kcdata_get_memory_addr(data: kcdata, TASK_BTINFO_GID, size: sizeof(p->p_gid), user_addr: &kaddr)) { |
1199 | kcdata_memcpy(data: kcdata, dst_addr: kaddr, src_addr: &p->p_gid, size: sizeof(p->p_gid)); |
1200 | } |
1201 | |
1202 | if (KERN_SUCCESS == kcdata_get_memory_addr(data: kcdata, TASK_BTINFO_PROC_FLAGS, size: sizeof(unsigned int), user_addr: &kaddr)) { |
1203 | unsigned int pflags = p->p_flag & (P_LP64 | P_SUGID | P_TRANSLATED); |
1204 | kcdata_memcpy(data: kcdata, dst_addr: kaddr, src_addr: &pflags, size: sizeof(pflags)); |
1205 | } |
1206 | |
1207 | if (KERN_SUCCESS == kcdata_get_memory_addr(data: kcdata, TASK_BTINFO_CPUTYPE, size: sizeof(cpu_type_t), user_addr: &kaddr)) { |
1208 | cpu_type_t cputype = cpu_type() & ~CPU_ARCH_MASK; |
1209 | if (has_64bit_addr) { |
1210 | cputype |= CPU_ARCH_ABI64; |
1211 | } else if (has_64bit_data) { |
1212 | cputype |= CPU_ARCH_ABI64_32; |
1213 | } |
1214 | kcdata_memcpy(data: kcdata, dst_addr: kaddr, src_addr: &cputype, size: sizeof(cpu_type_t)); |
1215 | } |
1216 | |
1217 | if (KERN_SUCCESS == kcdata_get_memory_addr(data: kcdata, TASK_BTINFO_EXCEPTION_TYPE, size: sizeof(etype), user_addr: &kaddr)) { |
1218 | kcdata_memcpy(data: kcdata, dst_addr: kaddr, src_addr: &etype, size: sizeof(etype)); |
1219 | } |
1220 | |
1221 | if (KERN_SUCCESS == kcdata_get_memory_addr(data: kcdata, TASK_BTINFO_CRASH_COUNT, size: sizeof(int), user_addr: &kaddr)) { |
1222 | kcdata_memcpy(data: kcdata, dst_addr: kaddr, src_addr: &p->p_crash_count, size: sizeof(int)); |
1223 | } |
1224 | |
1225 | if (KERN_SUCCESS == kcdata_get_memory_addr(data: kcdata, TASK_BTINFO_THROTTLE_TIMEOUT, size: sizeof(int), user_addr: &kaddr)) { |
1226 | kcdata_memcpy(data: kcdata, dst_addr: kaddr, src_addr: &p->p_throttle_timeout, size: sizeof(int)); |
1227 | } |
1228 | |
1229 | assert(codeCnt <= EXCEPTION_CODE_MAX); |
1230 | |
1231 | if (KERN_SUCCESS == kcdata_get_memory_addr(data: kcdata, TASK_BTINFO_EXCEPTION_CODES, |
1232 | size: sizeof(mach_exception_code_t) * codeCnt, user_addr: &kaddr)) { |
1233 | kcdata_memcpy(data: kcdata, dst_addr: kaddr, src_addr: code, size: sizeof(mach_exception_code_t) * codeCnt); |
1234 | } |
1235 | |
1236 | if (reason != OS_REASON_NULL) { |
1237 | if (KERN_SUCCESS == kcdata_get_memory_addr(data: kcdata, EXIT_REASON_SNAPSHOT, size: sizeof(struct exit_reason_snapshot), user_addr: &kaddr)) { |
1238 | struct exit_reason_snapshot ers = { |
1239 | .ers_namespace = reason->osr_namespace, |
1240 | .ers_code = reason->osr_code, |
1241 | .ers_flags = reason->osr_flags |
1242 | }; |
1243 | |
1244 | kcdata_memcpy(data: kcdata, dst_addr: kaddr, src_addr: &ers, size: sizeof(ers)); |
1245 | } |
1246 | |
1247 | if (reason->osr_kcd_buf != 0) { |
1248 | uint32_t reason_buf_size = (uint32_t)kcdata_memory_get_used_bytes(kcd: &reason->osr_kcd_descriptor); |
1249 | assert(reason_buf_size != 0); |
1250 | |
1251 | if (KERN_SUCCESS == kcdata_get_memory_addr(data: kcdata, KCDATA_TYPE_NESTED_KCDATA, size: reason_buf_size, user_addr: &kaddr)) { |
1252 | kcdata_memcpy(data: kcdata, dst_addr: kaddr, src_addr: reason->osr_kcd_buf, size: reason_buf_size); |
1253 | } |
1254 | } |
1255 | } |
1256 | |
1257 | threadname[0] = '\0'; |
1258 | if (KERN_SUCCESS == kcdata_get_memory_addr(data: kcdata, TASK_BTINFO_THREAD_NAME, |
1259 | size: sizeof(threadname), user_addr: &kaddr)) { |
1260 | bsd_getthreadname(uth: get_bsdthread_info(current_thread()), buffer: threadname); |
1261 | kcdata_memcpy(data: kcdata, dst_addr: kaddr, src_addr: threadname, size: sizeof(threadname)); |
1262 | } |
1263 | |
1264 | kr = thread_info(target_act: current_thread(), THREAD_IDENTIFIER_INFO, thread_info_out: (thread_info_t)&th_info, thread_info_outCnt: &th_info_count); |
1265 | if (kr == KERN_TERMINATED) { |
1266 | btinfo_flag |= TASK_BTINFO_FLAG_TASK_TERMINATED; |
1267 | } |
1268 | |
1269 | |
1270 | kern_return_t last_kr = kcdata_get_memory_addr(data: kcdata, TASK_BTINFO_THREAD_ID, |
1271 | size: sizeof(uint64_t), user_addr: &kaddr); |
1272 | |
1273 | /* |
1274 | * If the last kcdata_get_memory_addr() failed (unlikely), signal to exception |
1275 | * handler (ReportCrash) that lw corpse collection ran out of space and the |
1276 | * result is incomplete. |
1277 | */ |
1278 | if (last_kr != KERN_SUCCESS) { |
1279 | btinfo_flag |= TASK_BTINFO_FLAG_KCDATA_INCOMPLETE; |
1280 | } |
1281 | |
1282 | if (KERN_SUCCESS == kr && KERN_SUCCESS == last_kr) { |
1283 | kcdata_memcpy(data: kcdata, dst_addr: kaddr, src_addr: &th_info.thread_id, size: sizeof(uint64_t)); |
1284 | } |
1285 | |
1286 | /* Lastly, copy the flags to the address we reserved at the beginning. */ |
1287 | kcdata_memcpy(data: kcdata, dst_addr: btinfo_flag_addr, src_addr: &btinfo_flag, size: sizeof(uint32_t)); |
1288 | |
1289 | *new_desc = kcdata; |
1290 | |
1291 | return KERN_SUCCESS; |
1292 | } |
1293 | |
1294 | /* |
1295 | * We only parse exit reason kcdata blobs for critical process before they die |
1296 | * and we're going to panic or for opt-in, limited diagnostic tools. |
1297 | * |
1298 | * Meant to be called immediately before panicking or limited diagnostic |
1299 | * scenarios. |
1300 | */ |
1301 | char * |
1302 | exit_reason_get_string_desc(os_reason_t exit_reason) |
1303 | { |
1304 | kcdata_iter_t iter; |
1305 | |
1306 | if (exit_reason == OS_REASON_NULL || exit_reason->osr_kcd_buf == NULL || |
1307 | exit_reason->osr_bufsize == 0) { |
1308 | return NULL; |
1309 | } |
1310 | |
1311 | iter = kcdata_iter(buffer: exit_reason->osr_kcd_buf, size: exit_reason->osr_bufsize); |
1312 | if (!kcdata_iter_valid(iter)) { |
1313 | #if DEBUG || DEVELOPMENT |
1314 | printf("exit reason has invalid exit reason buffer\n" ); |
1315 | #endif |
1316 | return NULL; |
1317 | } |
1318 | |
1319 | if (kcdata_iter_type(iter) != KCDATA_BUFFER_BEGIN_OS_REASON) { |
1320 | #if DEBUG || DEVELOPMENT |
1321 | printf("exit reason buffer type mismatch, expected %d got %d\n" , |
1322 | KCDATA_BUFFER_BEGIN_OS_REASON, kcdata_iter_type(iter)); |
1323 | #endif |
1324 | return NULL; |
1325 | } |
1326 | |
1327 | iter = kcdata_iter_find_type(iter, EXIT_REASON_USER_DESC); |
1328 | if (!kcdata_iter_valid(iter)) { |
1329 | return NULL; |
1330 | } |
1331 | |
1332 | return (char *)kcdata_iter_payload(iter); |
1333 | } |
1334 | |
1335 | static int initproc_spawned = 0; |
1336 | |
1337 | static int |
1338 | sysctl_initproc_spawned(struct sysctl_oid *oidp, __unused void *arg1, __unused int arg2, struct sysctl_req *req) |
1339 | { |
1340 | if (req->newptr != 0 && (proc_getpid(req->p) != 1 || initproc_spawned != 0)) { |
1341 | // Can only ever be set by launchd, and only once at boot |
1342 | return EPERM; |
1343 | } |
1344 | return sysctl_handle_int(oidp, arg1: &initproc_spawned, arg2: 0, req); |
1345 | } |
1346 | |
1347 | SYSCTL_PROC(_kern, OID_AUTO, initproc_spawned, |
1348 | CTLFLAG_RW | CTLFLAG_KERN | CTLTYPE_INT | CTLFLAG_LOCKED, 0, 0, |
1349 | sysctl_initproc_spawned, "I" , "Boolean indicator that launchd has reached main" ); |
1350 | |
1351 | #if DEVELOPMENT || DEBUG |
1352 | |
1353 | /* disable user faults */ |
1354 | static TUNABLE(bool, bootarg_disable_user_faults, "-disable_user_faults" , false); |
1355 | #endif /* DEVELOPMENT || DEBUG */ |
1356 | |
1357 | #define OS_REASON_IFLAG_USER_FAULT 0x1 |
1358 | |
1359 | #define OS_REASON_TOTAL_USER_FAULTS_PER_PROC 5 |
1360 | |
1361 | static int |
1362 | abort_with_payload_internal(proc_t p, |
1363 | uint32_t reason_namespace, uint64_t reason_code, |
1364 | user_addr_t payload, uint32_t payload_size, |
1365 | user_addr_t reason_string, uint64_t reason_flags, |
1366 | uint32_t internal_flags) |
1367 | { |
1368 | os_reason_t exit_reason = OS_REASON_NULL; |
1369 | kern_return_t kr = KERN_SUCCESS; |
1370 | |
1371 | if (internal_flags & OS_REASON_IFLAG_USER_FAULT) { |
1372 | uint32_t old_value = atomic_load_explicit(&p->p_user_faults, |
1373 | memory_order_relaxed); |
1374 | |
1375 | #if DEVELOPMENT || DEBUG |
1376 | if (bootarg_disable_user_faults) { |
1377 | return EQFULL; |
1378 | } |
1379 | #endif /* DEVELOPMENT || DEBUG */ |
1380 | |
1381 | for (;;) { |
1382 | if (old_value >= OS_REASON_TOTAL_USER_FAULTS_PER_PROC) { |
1383 | return EQFULL; |
1384 | } |
1385 | // this reloads the value in old_value |
1386 | if (atomic_compare_exchange_strong_explicit(&p->p_user_faults, |
1387 | &old_value, old_value + 1, memory_order_relaxed, |
1388 | memory_order_relaxed)) { |
1389 | break; |
1390 | } |
1391 | } |
1392 | } |
1393 | |
1394 | KERNEL_DEBUG_CONSTANT(BSDDBG_CODE(DBG_BSD_PROC, BSD_PROC_EXITREASON_CREATE) | DBG_FUNC_NONE, |
1395 | proc_getpid(p), reason_namespace, |
1396 | reason_code, 0, 0); |
1397 | |
1398 | exit_reason = build_userspace_exit_reason(reason_namespace, reason_code, |
1399 | payload, payload_size, reason_string, reason_flags: reason_flags | OS_REASON_FLAG_ABORT); |
1400 | |
1401 | if (internal_flags & OS_REASON_IFLAG_USER_FAULT) { |
1402 | mach_exception_code_t code = 0; |
1403 | |
1404 | EXC_GUARD_ENCODE_TYPE(code, GUARD_TYPE_USER); /* simulated EXC_GUARD */ |
1405 | EXC_GUARD_ENCODE_FLAVOR(code, 0); |
1406 | EXC_GUARD_ENCODE_TARGET(code, reason_namespace); |
1407 | |
1408 | if (exit_reason == OS_REASON_NULL) { |
1409 | kr = KERN_RESOURCE_SHORTAGE; |
1410 | } else { |
1411 | kr = task_violated_guard(code, reason_code, exit_reason, TRUE); |
1412 | } |
1413 | os_reason_free(cur_reason: exit_reason); |
1414 | } else { |
1415 | /* |
1416 | * We use SIGABRT (rather than calling exit directly from here) so that |
1417 | * the debugger can catch abort_with_{reason,payload} calls. |
1418 | */ |
1419 | psignal_try_thread_with_reason(p, current_thread(), SIGABRT, exit_reason); |
1420 | } |
1421 | |
1422 | switch (kr) { |
1423 | case KERN_SUCCESS: |
1424 | return 0; |
1425 | case KERN_NOT_SUPPORTED: |
1426 | return ENOTSUP; |
1427 | case KERN_INVALID_ARGUMENT: |
1428 | return EINVAL; |
1429 | case KERN_RESOURCE_SHORTAGE: |
1430 | default: |
1431 | return EBUSY; |
1432 | } |
1433 | } |
1434 | |
1435 | int |
1436 | abort_with_payload(struct proc *cur_proc, struct abort_with_payload_args *args, |
1437 | __unused void *retval) |
1438 | { |
1439 | abort_with_payload_internal(p: cur_proc, reason_namespace: args->reason_namespace, |
1440 | reason_code: args->reason_code, payload: args->payload, payload_size: args->payload_size, |
1441 | reason_string: args->reason_string, reason_flags: args->reason_flags, internal_flags: 0); |
1442 | |
1443 | return 0; |
1444 | } |
1445 | |
1446 | int |
1447 | os_fault_with_payload(struct proc *cur_proc, |
1448 | struct os_fault_with_payload_args *args, __unused int *retval) |
1449 | { |
1450 | return abort_with_payload_internal(p: cur_proc, reason_namespace: args->reason_namespace, |
1451 | reason_code: args->reason_code, payload: args->payload, payload_size: args->payload_size, |
1452 | reason_string: args->reason_string, reason_flags: args->reason_flags, OS_REASON_IFLAG_USER_FAULT); |
1453 | } |
1454 | |
1455 | |
1456 | /* |
1457 | * exit -- |
1458 | * Death of process. |
1459 | */ |
1460 | __attribute__((noreturn)) |
1461 | void |
1462 | exit(proc_t p, struct exit_args *uap, int *retval) |
1463 | { |
1464 | p->p_xhighbits = ((uint32_t)(uap->rval) & 0xFF000000) >> 24; |
1465 | exit1(p, W_EXITCODE((uint32_t)uap->rval, 0), retval); |
1466 | |
1467 | thread_exception_return(); |
1468 | /* NOTREACHED */ |
1469 | while (TRUE) { |
1470 | thread_block(THREAD_CONTINUE_NULL); |
1471 | } |
1472 | /* NOTREACHED */ |
1473 | } |
1474 | |
1475 | /* |
1476 | * Exit: deallocate address space and other resources, change proc state |
1477 | * to zombie, and unlink proc from allproc and parent's lists. Save exit |
1478 | * status and rusage for wait(). Check for child processes and orphan them. |
1479 | */ |
1480 | int |
1481 | exit1(proc_t p, int rv, int *retval) |
1482 | { |
1483 | return exit1_internal(p, rv, retval, FALSE, TRUE, 0); |
1484 | } |
1485 | |
1486 | int |
1487 | exit1_internal(proc_t p, int rv, int *retval, boolean_t thread_can_terminate, boolean_t perf_notify, |
1488 | int jetsam_flags) |
1489 | { |
1490 | return exit_with_reason(p, rv, retval, thread_can_terminate, perf_notify, jetsam_flags, OS_REASON_NULL); |
1491 | } |
1492 | |
1493 | /* |
1494 | * NOTE: exit_with_reason drops a reference on the passed exit_reason |
1495 | */ |
1496 | int |
1497 | exit_with_reason(proc_t p, int rv, int *retval, boolean_t thread_can_terminate, boolean_t perf_notify, |
1498 | int jetsam_flags, struct os_reason *exit_reason) |
1499 | { |
1500 | thread_t self = current_thread(); |
1501 | struct task *task = proc_task(p); |
1502 | struct uthread *ut; |
1503 | int error = 0; |
1504 | bool proc_exiting = false; |
1505 | |
1506 | #if DEVELOPMENT || DEBUG |
1507 | /* |
1508 | * Debug boot-arg: panic here if matching process is exiting with non-zero code. |
1509 | * Example usage: panic_on_error_exit=launchd,logd,watchdogd |
1510 | */ |
1511 | if (rv && strnstr(panic_on_eexit_pcomms, p->p_comm, sizeof(panic_on_eexit_pcomms))) { |
1512 | panic("%s: Process %s with pid %d exited on error with code 0x%x." , |
1513 | __FUNCTION__, p->p_comm, proc_getpid(p), rv); |
1514 | } |
1515 | #endif |
1516 | |
1517 | /* |
1518 | * If a thread in this task has already |
1519 | * called exit(), then halt any others |
1520 | * right here. |
1521 | */ |
1522 | |
1523 | ut = get_bsdthread_info(self); |
1524 | (void)retval; |
1525 | |
1526 | /* |
1527 | * The parameter list of audit_syscall_exit() was augmented to |
1528 | * take the Darwin syscall number as the first parameter, |
1529 | * which is currently required by mac_audit_postselect(). |
1530 | */ |
1531 | |
1532 | /* |
1533 | * The BSM token contains two components: an exit status as passed |
1534 | * to exit(), and a return value to indicate what sort of exit it |
1535 | * was. The exit status is WEXITSTATUS(rv), but it's not clear |
1536 | * what the return value is. |
1537 | */ |
1538 | AUDIT_ARG(exit, WEXITSTATUS(rv), 0); |
1539 | /* |
1540 | * TODO: what to audit here when jetsam calls exit and the uthread, |
1541 | * 'ut' does not belong to the proc, 'p'. |
1542 | */ |
1543 | AUDIT_SYSCALL_EXIT(SYS_exit, p, ut, 0); /* Exit is always successfull */ |
1544 | |
1545 | DTRACE_PROC1(exit, int, CLD_EXITED); |
1546 | |
1547 | /* mark process is going to exit and pull out of DBG/disk throttle */ |
1548 | /* TODO: This should be done after becoming exit thread */ |
1549 | proc_set_task_policy(task: proc_task(p), TASK_POLICY_ATTRIBUTE, |
1550 | TASK_POLICY_TERMINATED, TASK_POLICY_ENABLE); |
1551 | |
1552 | proc_lock(p); |
1553 | error = proc_transstart(p, locked: 1, non_blocking: (jetsam_flags ? 1 : 0)); |
1554 | if (error == EDEADLK) { |
1555 | /* |
1556 | * If proc_transstart() returns EDEADLK, then another thread |
1557 | * is either exec'ing or exiting. Return an error and allow |
1558 | * the other thread to continue. |
1559 | */ |
1560 | proc_unlock(p); |
1561 | os_reason_free(cur_reason: exit_reason); |
1562 | if (current_proc() == p) { |
1563 | if (p->exit_thread == self) { |
1564 | panic("exit_thread failed to exit" ); |
1565 | } |
1566 | |
1567 | if (thread_can_terminate) { |
1568 | thread_exception_return(); |
1569 | } |
1570 | } |
1571 | |
1572 | return error; |
1573 | } |
1574 | |
1575 | proc_exiting = !!(p->p_lflag & P_LEXIT); |
1576 | |
1577 | while (proc_exiting || p->exit_thread != self) { |
1578 | if (proc_exiting || sig_try_locked(p) <= 0) { |
1579 | proc_transend(p, locked: 1); |
1580 | os_reason_free(cur_reason: exit_reason); |
1581 | |
1582 | if (get_threadtask(self) != task) { |
1583 | proc_unlock(p); |
1584 | return 0; |
1585 | } |
1586 | proc_unlock(p); |
1587 | |
1588 | thread_terminate(target_act: self); |
1589 | if (!thread_can_terminate) { |
1590 | return 0; |
1591 | } |
1592 | |
1593 | thread_exception_return(); |
1594 | /* NOTREACHED */ |
1595 | } |
1596 | sig_lock_to_exit(p); |
1597 | } |
1598 | |
1599 | if (exit_reason != OS_REASON_NULL) { |
1600 | KERNEL_DEBUG_CONSTANT(BSDDBG_CODE(DBG_BSD_PROC, BSD_PROC_EXITREASON_COMMIT) | DBG_FUNC_NONE, |
1601 | proc_getpid(p), exit_reason->osr_namespace, |
1602 | exit_reason->osr_code, 0, 0); |
1603 | } |
1604 | |
1605 | assert(p->p_exit_reason == OS_REASON_NULL); |
1606 | p->p_exit_reason = exit_reason; |
1607 | |
1608 | p->p_lflag |= P_LEXIT; |
1609 | p->p_xstat = rv; |
1610 | p->p_lflag |= jetsam_flags; |
1611 | |
1612 | proc_transend(p, locked: 1); |
1613 | proc_unlock(p); |
1614 | |
1615 | proc_prepareexit(p, rv, perf_notify); |
1616 | |
1617 | /* Last thread to terminate will call proc_exit() */ |
1618 | task_terminate_internal(task); |
1619 | |
1620 | return 0; |
1621 | } |
1622 | |
1623 | #if CONFIG_MEMORYSTATUS |
1624 | /* |
1625 | * Remove this process from jetsam bands for freezing or exiting. Note this will block, if the process |
1626 | * is currently being frozen. |
1627 | * The proc_list_lock is held by the caller. |
1628 | * NB: If the process should be ineligible for future freezing or jetsaming the caller should first set |
1629 | * the p_refcount P_REF_DEAD bit. |
1630 | */ |
1631 | static void |
1632 | proc_memorystatus_remove(proc_t p) |
1633 | { |
1634 | LCK_MTX_ASSERT(&proc_list_mlock, LCK_MTX_ASSERT_OWNED); |
1635 | while (memorystatus_remove(p) == EAGAIN) { |
1636 | os_log(OS_LOG_DEFAULT, "memorystatus_remove: Process[%d] tried to exit while being frozen. Blocking exit until freeze completes." , proc_getpid(p)); |
1637 | msleep(chan: &p->p_memstat_state, mtx: &proc_list_mlock, PWAIT, wmesg: "proc_memorystatus_remove" , NULL); |
1638 | } |
1639 | } |
1640 | #endif |
1641 | |
1642 | #if DEVELOPMENT |
1643 | boolean_t crash_behavior_test_mode = FALSE; |
1644 | boolean_t crash_behavior_test_would_panic = FALSE; |
1645 | SYSCTL_UINT(_kern, OID_AUTO, crash_behavior_test_mode, CTLFLAG_RW, &crash_behavior_test_mode, 0, "" ); |
1646 | SYSCTL_UINT(_kern, OID_AUTO, crash_behavior_test_would_panic, CTLFLAG_RW, &crash_behavior_test_would_panic, 0, "" ); |
1647 | #endif /* DEVELOPMENT */ |
1648 | |
1649 | static bool |
1650 | _proc_is_crashing_signal(int sig) |
1651 | { |
1652 | bool result = false; |
1653 | switch (sig) { |
1654 | case SIGILL: |
1655 | case SIGABRT: |
1656 | case SIGFPE: |
1657 | case SIGBUS: |
1658 | case SIGSEGV: |
1659 | case SIGSYS: |
1660 | /* |
1661 | * If SIGTRAP is the terminating signal, then we can safely assume the |
1662 | * process crashed. (On iOS, SIGTRAP will be the terminating signal when |
1663 | * a process calls __builtin_trap(), which will abort.) |
1664 | */ |
1665 | case SIGTRAP: |
1666 | result = true; |
1667 | } |
1668 | |
1669 | return result; |
1670 | } |
1671 | |
1672 | static bool |
1673 | _proc_is_fatal_reason(os_reason_t reason) |
1674 | { |
1675 | if ((reason->osr_flags & OS_REASON_FLAG_ABORT) != 0) { |
1676 | /* Abort is always fatal even if there is no crash report generated */ |
1677 | return true; |
1678 | } |
1679 | if ((reason->osr_flags & OS_REASON_FLAG_NO_CRASH_REPORT) != 0) { |
1680 | /* |
1681 | * No crash report means this reason shouldn't be considered fatal |
1682 | * unless we are in test mode |
1683 | */ |
1684 | #if DEVELOPMENT |
1685 | if (crash_behavior_test_mode) { |
1686 | return true; |
1687 | } |
1688 | #endif /* DEVELOPMENT */ |
1689 | return false; |
1690 | } |
1691 | // By default all OS_REASON are fatal |
1692 | return true; |
1693 | } |
1694 | |
1695 | static TUNABLE(bool, panic_on_crash_disabled, "panic_on_crash_disabled" , false); |
1696 | |
1697 | static bool |
1698 | proc_should_trigger_panic(proc_t p, int rv) |
1699 | { |
1700 | if (p == initproc) { |
1701 | /* Always panic for launchd */ |
1702 | return true; |
1703 | } |
1704 | |
1705 | if (panic_on_crash_disabled) { |
1706 | printf("panic-on-crash disabled via boot-arg\n" ); |
1707 | return false; |
1708 | } |
1709 | |
1710 | if ((p->p_crash_behavior & POSIX_SPAWN_PANIC_ON_EXIT) != 0) { |
1711 | return true; |
1712 | } |
1713 | |
1714 | if ((p->p_crash_behavior & POSIX_SPAWN_PANIC_ON_SPAWN_FAIL) != 0) { |
1715 | return true; |
1716 | } |
1717 | |
1718 | if (p->p_posix_spawn_failed) { |
1719 | /* posix_spawn failures normally don't qualify for panics */ |
1720 | return false; |
1721 | } |
1722 | |
1723 | bool deadline_expired = (mach_continuous_time() > p->p_crash_behavior_deadline); |
1724 | if (p->p_crash_behavior_deadline != 0 && deadline_expired) { |
1725 | return false; |
1726 | } |
1727 | |
1728 | if (WIFEXITED(rv)) { |
1729 | int code = WEXITSTATUS(rv); |
1730 | |
1731 | if ((p->p_crash_behavior & POSIX_SPAWN_PANIC_ON_NON_ZERO_EXIT) != 0) { |
1732 | if (code == 0) { |
1733 | /* No panic if we exit 0 */ |
1734 | return false; |
1735 | } else { |
1736 | /* Panic on non-zero exit */ |
1737 | return true; |
1738 | } |
1739 | } else { |
1740 | /* No panic on normal exit if the process doesn't have the non-zero flag set */ |
1741 | return false; |
1742 | } |
1743 | } else if (WIFSIGNALED(rv)) { |
1744 | int signal = WTERMSIG(rv); |
1745 | /* This is a crash (non-normal exit) */ |
1746 | if ((p->p_crash_behavior & POSIX_SPAWN_PANIC_ON_CRASH) != 0) { |
1747 | os_reason_t reason = p->p_exit_reason; |
1748 | if (reason != OS_REASON_NULL) { |
1749 | if (!_proc_is_fatal_reason(reason)) { |
1750 | // Skip non-fatal terminate_with_reason |
1751 | return false; |
1752 | } |
1753 | if (reason->osr_namespace == OS_REASON_SIGNAL) { |
1754 | /* |
1755 | * OS_REASON_SIGNAL delivers as a SIGKILL with the actual signal |
1756 | * in osr_code, so we should check that signal here |
1757 | */ |
1758 | return _proc_is_crashing_signal(sig: (int)reason->osr_code); |
1759 | } else { |
1760 | /* |
1761 | * This branch covers the case of terminate_with_reason which |
1762 | * delivers a SIGTERM which is still considered a crash even |
1763 | * thought the signal is not considered a crashing signal |
1764 | */ |
1765 | return true; |
1766 | } |
1767 | } |
1768 | return _proc_is_crashing_signal(sig: signal); |
1769 | } else { |
1770 | return false; |
1771 | } |
1772 | } else { |
1773 | /* |
1774 | * This branch implies that we didn't exit normally nor did we receive |
1775 | * a signal. This should be unreachable. |
1776 | */ |
1777 | return true; |
1778 | } |
1779 | } |
1780 | |
1781 | static void |
1782 | proc_crash_coredump(proc_t p) |
1783 | { |
1784 | (void)p; |
1785 | #if (DEVELOPMENT || DEBUG) && CONFIG_COREDUMP |
1786 | /* |
1787 | * For debugging purposes, generate a core file of initproc before |
1788 | * panicking. Leave at least 300 MB free on the root volume, and ignore |
1789 | * the process's corefile ulimit. fsync() the file to ensure it lands on disk |
1790 | * before the panic hits. |
1791 | */ |
1792 | |
1793 | int err; |
1794 | uint64_t coredump_start = mach_absolute_time(); |
1795 | uint64_t coredump_end; |
1796 | clock_sec_t tv_sec; |
1797 | clock_usec_t tv_usec; |
1798 | uint32_t tv_msec; |
1799 | |
1800 | |
1801 | err = coredump(p, 300, COREDUMP_IGNORE_ULIMIT | COREDUMP_FULLFSYNC); |
1802 | |
1803 | coredump_end = mach_absolute_time(); |
1804 | |
1805 | absolutetime_to_microtime(coredump_end - coredump_start, &tv_sec, &tv_usec); |
1806 | |
1807 | tv_msec = tv_usec / 1000; |
1808 | |
1809 | if (err != 0) { |
1810 | printf("Failed to generate core file for pid: %d: error %d, took %d.%03d seconds\n" , |
1811 | proc_getpid(p), err, (uint32_t)tv_sec, tv_msec); |
1812 | } else { |
1813 | printf("Generated core file for pid: %d in %d.%03d seconds\n" , |
1814 | proc_getpid(p), (uint32_t)tv_sec, tv_msec); |
1815 | } |
1816 | #endif /* (DEVELOPMENT || DEBUG) && CONFIG_COREDUMP */ |
1817 | } |
1818 | |
1819 | static void |
1820 | proc_handle_critical_exit(proc_t p, int rv) |
1821 | { |
1822 | if (!proc_should_trigger_panic(p, rv)) { |
1823 | // No panic, bail out |
1824 | return; |
1825 | } |
1826 | |
1827 | #if DEVELOPMENT |
1828 | if (crash_behavior_test_mode) { |
1829 | crash_behavior_test_would_panic = TRUE; |
1830 | // Force test mode off after hitting a panic |
1831 | crash_behavior_test_mode = FALSE; |
1832 | return; |
1833 | } |
1834 | #endif /* DEVELOPMENT */ |
1835 | |
1836 | char *exit_reason_desc = exit_reason_get_string_desc(exit_reason: p->p_exit_reason); |
1837 | |
1838 | if (p->p_exit_reason == OS_REASON_NULL) { |
1839 | printf("pid %d exited -- no exit reason available -- (signal %d, exit %d)\n" , |
1840 | proc_getpid(p), WTERMSIG(rv), WEXITSTATUS(rv)); |
1841 | } else { |
1842 | printf("pid %d exited -- exit reason namespace %d subcode 0x%llx, description %s\n" , proc_getpid(p), |
1843 | p->p_exit_reason->osr_namespace, p->p_exit_reason->osr_code, exit_reason_desc ? |
1844 | exit_reason_desc : "none" ); |
1845 | } |
1846 | |
1847 | const char *prefix_str; |
1848 | char prefix_str_buf[128]; |
1849 | |
1850 | if (p == initproc) { |
1851 | if (strnstr(s: p->p_name, find: "preinit" , slen: sizeof(p->p_name))) { |
1852 | prefix_str = "LTE preinit process exited" ; |
1853 | } else if (initproc_spawned) { |
1854 | prefix_str = "initproc exited" ; |
1855 | } else { |
1856 | prefix_str = "initproc failed to start" ; |
1857 | } |
1858 | } else { |
1859 | /* For processes that aren't launchd, just use the process name and pid */ |
1860 | snprintf(prefix_str_buf, count: sizeof(prefix_str_buf), "%s[%d] exited" , p->p_name, proc_getpid(p)); |
1861 | prefix_str = prefix_str_buf; |
1862 | } |
1863 | |
1864 | proc_crash_coredump(p); |
1865 | |
1866 | sync(p, (void *)NULL, (int *)NULL); |
1867 | |
1868 | if (p->p_exit_reason == OS_REASON_NULL) { |
1869 | panic_with_options(reason: 0, NULL, DEBUGGER_OPTION_INITPROC_PANIC, str: "%s -- no exit reason available -- (signal %d, exit status %d %s)" , |
1870 | prefix_str, WTERMSIG(rv), WEXITSTATUS(rv), ((proc_getcsflags(p) & CS_KILLED) ? "CS_KILLED" : "" )); |
1871 | } else { |
1872 | panic_with_options(reason: 0, NULL, DEBUGGER_OPTION_INITPROC_PANIC, str: "%s %s -- exit reason namespace %d subcode 0x%llx description: %." LAUNCHD_PANIC_REASON_STRING_MAXLEN "s" , |
1873 | ((proc_getcsflags(p) & CS_KILLED) ? "CS_KILLED" : "" ), |
1874 | prefix_str, p->p_exit_reason->osr_namespace, p->p_exit_reason->osr_code, |
1875 | exit_reason_desc ? exit_reason_desc : "none" ); |
1876 | } |
1877 | } |
1878 | |
1879 | void |
1880 | proc_prepareexit(proc_t p, int rv, boolean_t perf_notify) |
1881 | { |
1882 | mach_exception_data_type_t code = 0, subcode = 0; |
1883 | exception_type_t etype; |
1884 | |
1885 | struct uthread *ut; |
1886 | thread_t self = current_thread(); |
1887 | ut = get_bsdthread_info(self); |
1888 | struct rusage_superset *rup; |
1889 | int kr = 0; |
1890 | int create_corpse = FALSE; |
1891 | bool corpse_source = false; |
1892 | task_t task = proc_task(p); |
1893 | |
1894 | |
1895 | if (p->p_crash_behavior != 0 || p == initproc) { |
1896 | proc_handle_critical_exit(p, rv); |
1897 | } |
1898 | |
1899 | if (task) { |
1900 | corpse_source = vm_map_is_corpse_source(map: get_task_map(task)); |
1901 | } |
1902 | |
1903 | /* |
1904 | * Generate a corefile/crashlog if: |
1905 | * The process doesn't have an exit reason that indicates no crash report should be created |
1906 | * AND any of the following are true: |
1907 | * - The process was terminated due to a fatal signal that generates a core |
1908 | * - The process was killed due to a code signing violation |
1909 | * - The process has an exit reason that indicates we should generate a crash report |
1910 | * |
1911 | * The first condition is necessary because abort_with_reason()/payload() use SIGABRT |
1912 | * (which normally triggers a core) but may indicate that no crash report should be created. |
1913 | */ |
1914 | if (!(PROC_HAS_EXITREASON(p) && (PROC_EXITREASON_FLAGS(p) & OS_REASON_FLAG_NO_CRASH_REPORT)) && |
1915 | (hassigprop(WTERMSIG(rv), SA_CORE) || ((proc_getcsflags(p) & CS_KILLED) != 0) || |
1916 | (PROC_HAS_EXITREASON(p) && (PROC_EXITREASON_FLAGS(p) & |
1917 | OS_REASON_FLAG_GENERATE_CRASH_REPORT)))) { |
1918 | /* |
1919 | * Workaround for processes checking up on PT_DENY_ATTACH: |
1920 | * should be backed out post-Leopard (details in 5431025). |
1921 | */ |
1922 | if ((SIGSEGV == WTERMSIG(rv)) && |
1923 | (p->p_pptr->p_lflag & P_LNOATTACH)) { |
1924 | goto skipcheck; |
1925 | } |
1926 | |
1927 | /* |
1928 | * Crash Reporter looks for the signal value, original exception |
1929 | * type, and low 20 bits of the original code in code[0] |
1930 | * (8, 4, and 20 bits respectively). code[1] is unmodified. |
1931 | */ |
1932 | code = ((WTERMSIG(rv) & 0xff) << 24) | |
1933 | ((ut->uu_exception & 0x0f) << 20) | |
1934 | ((int)ut->uu_code & 0xfffff); |
1935 | subcode = ut->uu_subcode; |
1936 | etype = ut->uu_exception; |
1937 | |
1938 | /* Defualt to EXC_CRASH if the exception is not an EXC_RESOURCE or EXC_GUARD */ |
1939 | if (etype != EXC_RESOURCE || etype != EXC_GUARD) { |
1940 | etype = EXC_CRASH; |
1941 | } |
1942 | |
1943 | #if (DEVELOPMENT || DEBUG) |
1944 | if (p->p_pid <= exception_log_max_pid) { |
1945 | char *proc_name = proc_best_name(p); |
1946 | if (PROC_HAS_EXITREASON(p)) { |
1947 | record_system_event(SYSTEM_EVENT_TYPE_INFO, SYSTEM_EVENT_SUBSYSTEM_PROCESS, "process exit" , |
1948 | "pid: %d -- process name: %s -- exit reason namespace: %d -- subcode: 0x%llx -- description: %s" , |
1949 | proc_getpid(p), proc_name, p->p_exit_reason->osr_namespace, p->p_exit_reason->osr_code, |
1950 | exit_reason_get_string_desc(p->p_exit_reason)); |
1951 | } else { |
1952 | record_system_event(SYSTEM_EVENT_TYPE_INFO, SYSTEM_EVENT_SUBSYSTEM_PROCESS, "process exit" , |
1953 | "pid: %d -- process name: %s -- exit status %d" , |
1954 | proc_getpid(p), proc_name, WEXITSTATUS(rv)); |
1955 | } |
1956 | } |
1957 | #endif |
1958 | const bool fatal = false; |
1959 | kr = task_exception_notify(EXC_CRASH, code, subcode, fatal); |
1960 | /* Nobody handled EXC_CRASH?? remember to make corpse */ |
1961 | if ((kr != 0 || corpse_source) && p == current_proc()) { |
1962 | /* |
1963 | * Do not create corpse when exit is called from jetsam thread. |
1964 | * Corpse creation code requires that proc_prepareexit is |
1965 | * called by the exiting proc and not the kernel_proc. |
1966 | */ |
1967 | create_corpse = TRUE; |
1968 | } |
1969 | |
1970 | /* |
1971 | * Revalidate the code signing of the text pages around current PC. |
1972 | * This is an attempt to detect and repair faults due to memory |
1973 | * corruption of text pages. |
1974 | * |
1975 | * The goal here is to fixup infrequent memory corruptions due to |
1976 | * things like aging RAM bit flips. So the approach is to only expect |
1977 | * to have to fixup one thing per crash. This also limits the amount |
1978 | * of extra work we cause in case this is a development kernel with an |
1979 | * active memory stomp happening. |
1980 | */ |
1981 | uintptr_t bt[2]; |
1982 | struct backtrace_user_info btinfo = BTUINFO_INIT; |
1983 | unsigned int frame_count = backtrace_user(bt, btlen: 2, NULL, info_out: &btinfo); |
1984 | int bt_err = btinfo.btui_error; |
1985 | if (bt_err == 0 && frame_count >= 1) { |
1986 | /* |
1987 | * First check at the page containing the current PC. |
1988 | * This passes if the page code signs -or- if we can't figure out |
1989 | * what is at that address. The latter action is so we continue checking |
1990 | * previous pages which may be corrupt and caused a wild branch. |
1991 | */ |
1992 | kr = revalidate_text_page(task, bt[0]); |
1993 | |
1994 | /* No corruption found, check the previous sequential page */ |
1995 | if (kr == KERN_SUCCESS) { |
1996 | kr = revalidate_text_page(task, bt[0] - get_task_page_size(task)); |
1997 | } |
1998 | |
1999 | /* Still no corruption found, check the current function's caller */ |
2000 | if (kr == KERN_SUCCESS) { |
2001 | if (frame_count > 1 && |
2002 | atop(bt[0]) != atop(bt[1]) && /* don't recheck PC page */ |
2003 | atop(bt[0]) - 1 != atop(bt[1])) { /* don't recheck page before */ |
2004 | kr = revalidate_text_page(task, (vm_map_offset_t)bt[1]); |
2005 | } |
2006 | } |
2007 | |
2008 | /* |
2009 | * Log that we found a corruption. |
2010 | */ |
2011 | if (kr != KERN_SUCCESS) { |
2012 | os_log(OS_LOG_DEFAULT, |
2013 | "Text page corruption detected in dying process %d\n" , proc_getpid(p)); |
2014 | } |
2015 | } |
2016 | } |
2017 | |
2018 | skipcheck: |
2019 | if (task_is_driver(task) && PROC_HAS_EXITREASON(p)) { |
2020 | IOUserServerRecordExitReason(task, reason: p->p_exit_reason); |
2021 | } |
2022 | |
2023 | /* Notify the perf server? */ |
2024 | if (perf_notify) { |
2025 | (void)sys_perf_notify(thread: self, pid: proc_getpid(p)); |
2026 | } |
2027 | |
2028 | |
2029 | /* stash the usage into corpse data if making_corpse == true */ |
2030 | if (create_corpse == TRUE) { |
2031 | kr = task_mark_corpse(task); |
2032 | if (kr != KERN_SUCCESS) { |
2033 | if (kr == KERN_NO_SPACE) { |
2034 | printf("Process[%d] has no vm space for corpse info.\n" , proc_getpid(p)); |
2035 | } else if (kr == KERN_NOT_SUPPORTED) { |
2036 | printf("Process[%d] was destined to be corpse. But corpse is disabled by config.\n" , proc_getpid(p)); |
2037 | } else if (kr == KERN_TERMINATED) { |
2038 | printf("Process[%d] has been terminated before it could be converted to a corpse.\n" , proc_getpid(p)); |
2039 | } else { |
2040 | printf("Process[%d] crashed: %s. Too many corpses being created.\n" , proc_getpid(p), p->p_comm); |
2041 | } |
2042 | create_corpse = FALSE; |
2043 | } |
2044 | } |
2045 | |
2046 | if (corpse_source && !create_corpse) { |
2047 | /* vm_map was marked for corpse, but we decided to not create one, unmark the vmmap */ |
2048 | vm_map_unset_corpse_source(map: get_task_map(task)); |
2049 | } |
2050 | |
2051 | if (!proc_is_shadow(p)) { |
2052 | /* |
2053 | * Before this process becomes a zombie, stash resource usage |
2054 | * stats in the proc for external observers to query |
2055 | * via proc_pid_rusage(). |
2056 | * |
2057 | * If the zombie allocation fails, just punt the stats. |
2058 | */ |
2059 | rup = zalloc(kt_view: zombie_zone); |
2060 | gather_rusage_info(p, ru: &rup->ri, RUSAGE_INFO_CURRENT); |
2061 | rup->ri.ri_phys_footprint = 0; |
2062 | rup->ri.ri_proc_exit_abstime = mach_absolute_time(); |
2063 | /* |
2064 | * Make the rusage_info visible to external observers |
2065 | * only after it has been completely filled in. |
2066 | */ |
2067 | p->p_ru = rup; |
2068 | } |
2069 | |
2070 | if (create_corpse) { |
2071 | int est_knotes = 0, num_knotes = 0; |
2072 | uint64_t *buffer = NULL; |
2073 | uint32_t buf_size = 0; |
2074 | |
2075 | /* Get all the udata pointers from kqueue */ |
2076 | est_knotes = kevent_proc_copy_uptrs(proc: p, NULL, bufsize: 0); |
2077 | if (est_knotes > 0) { |
2078 | buf_size = (uint32_t)((est_knotes + 32) * sizeof(uint64_t)); |
2079 | buffer = kalloc_data(buf_size, Z_WAITOK); |
2080 | if (buffer) { |
2081 | num_knotes = kevent_proc_copy_uptrs(proc: p, buf: buffer, bufsize: buf_size); |
2082 | if (num_knotes > est_knotes + 32) { |
2083 | num_knotes = est_knotes + 32; |
2084 | } |
2085 | } |
2086 | } |
2087 | |
2088 | /* Update the code, subcode based on exit reason */ |
2089 | proc_update_corpse_exception_codes(p, code: &code, subcode: &subcode); |
2090 | populate_corpse_crashinfo(p, corpse_task: task, rup, |
2091 | code, subcode, udata_buffer: buffer, num_udata: num_knotes, NULL, etype); |
2092 | kfree_data(buffer, buf_size); |
2093 | } |
2094 | /* |
2095 | * Remove proc from allproc queue and from pidhash chain. |
2096 | * Need to do this before we do anything that can block. |
2097 | * Not doing causes things like mount() find this on allproc |
2098 | * in partially cleaned state. |
2099 | */ |
2100 | |
2101 | proc_list_lock(); |
2102 | |
2103 | #if CONFIG_MEMORYSTATUS |
2104 | proc_memorystatus_remove(p); |
2105 | #endif |
2106 | |
2107 | LIST_REMOVE(p, p_list); |
2108 | LIST_INSERT_HEAD(&zombproc, p, p_list); /* Place onto zombproc. */ |
2109 | /* will not be visible via proc_find */ |
2110 | os_atomic_or(&p->p_refcount, P_REF_DEAD, relaxed); |
2111 | |
2112 | proc_list_unlock(); |
2113 | |
2114 | /* |
2115 | * If parent is waiting for us to exit or exec, |
2116 | * P_LPPWAIT is set; we will wakeup the parent below. |
2117 | */ |
2118 | proc_lock(p); |
2119 | p->p_lflag &= ~(P_LTRACED | P_LPPWAIT); |
2120 | p->p_sigignore = ~(sigcantmask); |
2121 | |
2122 | /* |
2123 | * If a thread is already waiting for us in proc_exit, |
2124 | * P_LTERM is set, wakeup the thread. |
2125 | */ |
2126 | if (p->p_lflag & P_LTERM) { |
2127 | wakeup(chan: &p->exit_thread); |
2128 | } else { |
2129 | p->p_lflag |= P_LTERM; |
2130 | } |
2131 | |
2132 | /* If current proc is exiting, ignore signals on the exit thread */ |
2133 | if (p == current_proc()) { |
2134 | ut->uu_siglist = 0; |
2135 | } |
2136 | proc_unlock(p); |
2137 | } |
2138 | |
2139 | void |
2140 | proc_exit(proc_t p) |
2141 | { |
2142 | proc_t q; |
2143 | proc_t pp; |
2144 | struct task *task = proc_task(p); |
2145 | vnode_t tvp = NULLVP; |
2146 | struct pgrp * pg; |
2147 | struct session *sessp; |
2148 | struct uthread * uth; |
2149 | pid_t pid; |
2150 | int exitval; |
2151 | int knote_hint; |
2152 | |
2153 | uth = current_uthread(); |
2154 | |
2155 | proc_lock(p); |
2156 | proc_transstart(p, locked: 1, non_blocking: 0); |
2157 | if (!(p->p_lflag & P_LEXIT)) { |
2158 | /* |
2159 | * This can happen if a thread_terminate() occurs |
2160 | * in a single-threaded process. |
2161 | */ |
2162 | p->p_lflag |= P_LEXIT; |
2163 | proc_transend(p, locked: 1); |
2164 | proc_unlock(p); |
2165 | proc_prepareexit(p, rv: 0, TRUE); |
2166 | (void) task_terminate_internal(task); |
2167 | proc_lock(p); |
2168 | } else if (!(p->p_lflag & P_LTERM)) { |
2169 | proc_transend(p, locked: 1); |
2170 | /* Jetsam is in middle of calling proc_prepareexit, wait for it */ |
2171 | p->p_lflag |= P_LTERM; |
2172 | msleep(chan: &p->exit_thread, mtx: &p->p_mlock, PWAIT, wmesg: "proc_prepareexit_wait" , NULL); |
2173 | } else { |
2174 | proc_transend(p, locked: 1); |
2175 | } |
2176 | |
2177 | p->p_lflag |= P_LPEXIT; |
2178 | |
2179 | /* |
2180 | * Other kernel threads may be in the middle of signalling this process. |
2181 | * Wait for those threads to wrap it up before making the process |
2182 | * disappear on them. |
2183 | */ |
2184 | if ((p->p_lflag & P_LINSIGNAL) || (p->p_sigwaitcnt > 0)) { |
2185 | p->p_sigwaitcnt++; |
2186 | while ((p->p_lflag & P_LINSIGNAL) || (p->p_sigwaitcnt > 1)) { |
2187 | msleep(chan: &p->p_sigmask, mtx: &p->p_mlock, PWAIT, wmesg: "proc_sigdrain" , NULL); |
2188 | } |
2189 | p->p_sigwaitcnt--; |
2190 | } |
2191 | |
2192 | proc_unlock(p); |
2193 | pid = proc_getpid(p); |
2194 | exitval = p->p_xstat; |
2195 | KERNEL_DEBUG_CONSTANT_IST(KDEBUG_COMMON, |
2196 | BSDDBG_CODE(DBG_BSD_PROC, BSD_PROC_EXIT) | DBG_FUNC_START, |
2197 | pid, exitval, 0, 0, 0); |
2198 | |
2199 | #if DEVELOPMENT || DEBUG |
2200 | proc_exit_lpexit_check(pid, PELS_POS_START); |
2201 | #endif |
2202 | |
2203 | #if CONFIG_DTRACE |
2204 | dtrace_proc_exit(p); |
2205 | #endif |
2206 | |
2207 | proc_refdrain(p); |
2208 | /* We now have unique ref to the proc */ |
2209 | |
2210 | /* if any pending cpu limits action, clear it */ |
2211 | task_clear_cpuusage(task: proc_task(p), TRUE); |
2212 | |
2213 | workq_mark_exiting(p); |
2214 | |
2215 | /* |
2216 | * need to cancel async IO requests that can be cancelled and wait for those |
2217 | * already active. MAY BLOCK! |
2218 | */ |
2219 | _aio_exit( p ); |
2220 | |
2221 | /* |
2222 | * Close open files and release open-file table. |
2223 | * This may block! |
2224 | */ |
2225 | fdt_invalidate(p); |
2226 | |
2227 | /* |
2228 | * Once all the knotes, kqueues & workloops are destroyed, get rid of the |
2229 | * workqueue. |
2230 | */ |
2231 | workq_exit(p); |
2232 | |
2233 | if (uth->uu_lowpri_window) { |
2234 | /* |
2235 | * task is marked as a low priority I/O type |
2236 | * and the I/O we issued while in flushing files on close |
2237 | * collided with normal I/O operations... |
2238 | * no need to throttle this thread since its going away |
2239 | * but we do need to update our bookeeping w/r to throttled threads |
2240 | */ |
2241 | throttle_lowpri_io(sleep_amount: 0); |
2242 | } |
2243 | |
2244 | if (p->p_lflag & P_LNSPACE_RESOLVER) { |
2245 | /* |
2246 | * The namespace resolver is exiting; there may be |
2247 | * outstanding materialization requests to clean up. |
2248 | */ |
2249 | nspace_resolver_exited(p); |
2250 | } |
2251 | |
2252 | #if SYSV_SHM |
2253 | /* Close ref SYSV Shared memory*/ |
2254 | if (p->vm_shm) { |
2255 | shmexit(p); |
2256 | } |
2257 | #endif |
2258 | #if SYSV_SEM |
2259 | /* Release SYSV semaphores */ |
2260 | semexit(p); |
2261 | #endif |
2262 | |
2263 | #if PSYNCH |
2264 | pth_proc_hashdelete(p); |
2265 | #endif /* PSYNCH */ |
2266 | |
2267 | pg = proc_pgrp(p, &sessp); |
2268 | if (SESS_LEADER(p, sessp)) { |
2269 | if (sessp->s_ttyvp != NULLVP) { |
2270 | struct vnode *ttyvp; |
2271 | int ttyvid; |
2272 | int cttyflag = 0; |
2273 | struct vfs_context context; |
2274 | struct tty *tp; |
2275 | struct pgrp *tpgrp = PGRP_NULL; |
2276 | |
2277 | /* |
2278 | * Controlling process. |
2279 | * Signal foreground pgrp, |
2280 | * drain controlling terminal |
2281 | * and revoke access to controlling terminal. |
2282 | */ |
2283 | |
2284 | proc_list_lock(); /* prevent any t_pgrp from changing */ |
2285 | session_lock(sess: sessp); |
2286 | if (sessp->s_ttyp && sessp->s_ttyp->t_session == sessp) { |
2287 | tpgrp = tty_pgrp_locked(tp: sessp->s_ttyp); |
2288 | } |
2289 | proc_list_unlock(); |
2290 | |
2291 | if (tpgrp != PGRP_NULL) { |
2292 | session_unlock(sess: sessp); |
2293 | pgsignal(pgrp: tpgrp, SIGHUP, checkctty: 1); |
2294 | pgrp_rele(pgrp: tpgrp); |
2295 | session_lock(sess: sessp); |
2296 | } |
2297 | |
2298 | cttyflag = (os_atomic_andnot_orig(&sessp->s_refcount, |
2299 | S_CTTYREF, relaxed) & S_CTTYREF); |
2300 | ttyvp = sessp->s_ttyvp; |
2301 | ttyvid = sessp->s_ttyvid; |
2302 | tp = session_clear_tty_locked(sess: sessp); |
2303 | if (ttyvp) { |
2304 | vnode_hold(vp: ttyvp); |
2305 | } |
2306 | session_unlock(sess: sessp); |
2307 | |
2308 | if ((ttyvp != NULLVP) && (vnode_getwithvid(ttyvp, ttyvid) == 0)) { |
2309 | if (tp != TTY_NULL) { |
2310 | tty_lock(tp); |
2311 | (void) ttywait(tp); |
2312 | tty_unlock(tp); |
2313 | } |
2314 | |
2315 | context.vc_thread = NULL; |
2316 | context.vc_ucred = kauth_cred_proc_ref(procp: p); |
2317 | VNOP_REVOKE(ttyvp, REVOKEALL, &context); |
2318 | if (cttyflag) { |
2319 | /* |
2320 | * Release the extra usecount taken in cttyopen. |
2321 | * usecount should be released after VNOP_REVOKE is called. |
2322 | * This usecount was taken to ensure that |
2323 | * the VNOP_REVOKE results in a close to |
2324 | * the tty since cttyclose is a no-op. |
2325 | */ |
2326 | vnode_rele(vp: ttyvp); |
2327 | } |
2328 | vnode_put(vp: ttyvp); |
2329 | kauth_cred_unref(&context.vc_ucred); |
2330 | vnode_drop(vp: ttyvp); |
2331 | ttyvp = NULLVP; |
2332 | } |
2333 | if (ttyvp) { |
2334 | vnode_drop(vp: ttyvp); |
2335 | } |
2336 | if (tp) { |
2337 | ttyfree(tp); |
2338 | } |
2339 | } |
2340 | session_lock(sess: sessp); |
2341 | sessp->s_leader = NULL; |
2342 | session_unlock(sess: sessp); |
2343 | } |
2344 | |
2345 | if (!proc_is_shadow(p)) { |
2346 | fixjobc(p, pgrp: pg, entering: 0); |
2347 | } |
2348 | pgrp_rele(pgrp: pg); |
2349 | |
2350 | /* |
2351 | * Change RLIMIT_FSIZE for accounting/debugging. |
2352 | */ |
2353 | proc_limitsetcur_fsize(p, RLIM_INFINITY); |
2354 | |
2355 | (void)acct_process(p); |
2356 | |
2357 | proc_list_lock(); |
2358 | |
2359 | if ((p->p_listflag & P_LIST_EXITCOUNT) == P_LIST_EXITCOUNT) { |
2360 | p->p_listflag &= ~P_LIST_EXITCOUNT; |
2361 | proc_shutdown_exitcount--; |
2362 | if (proc_shutdown_exitcount == 0) { |
2363 | wakeup(chan: &proc_shutdown_exitcount); |
2364 | } |
2365 | } |
2366 | |
2367 | /* wait till parentrefs are dropped and grant no more */ |
2368 | proc_childdrainstart(p); |
2369 | while ((q = p->p_children.lh_first) != NULL) { |
2370 | if (q->p_stat == SZOMB) { |
2371 | if (p != q->p_pptr) { |
2372 | panic("parent child linkage broken" ); |
2373 | } |
2374 | /* check for sysctl zomb lookup */ |
2375 | while ((q->p_listflag & P_LIST_WAITING) == P_LIST_WAITING) { |
2376 | msleep(chan: &q->p_stat, mtx: &proc_list_mlock, PWAIT, wmesg: "waitcoll" , ts: 0); |
2377 | } |
2378 | q->p_listflag |= P_LIST_WAITING; |
2379 | /* |
2380 | * This is a named reference and it is not granted |
2381 | * if the reap is already in progress. So we get |
2382 | * the reference here exclusively and their can be |
2383 | * no waiters. So there is no need for a wakeup |
2384 | * after we are done. Also the reap frees the structure |
2385 | * and the proc struct cannot be used for wakeups as well. |
2386 | * It is safe to use q here as this is system reap |
2387 | */ |
2388 | reap_flags_t reparent_flags = (q->p_listflag & P_LIST_DEADPARENT) ? |
2389 | REAP_REPARENTED_TO_INIT : 0; |
2390 | reap_child_locked(parent: p, child: q, |
2391 | flags: REAP_DEAD_PARENT | REAP_LOCKED | reparent_flags); |
2392 | } else { |
2393 | /* |
2394 | * Traced processes are killed |
2395 | * since their existence means someone is messing up. |
2396 | */ |
2397 | if (q->p_lflag & P_LTRACED) { |
2398 | struct proc *opp; |
2399 | |
2400 | /* |
2401 | * Take a reference on the child process to |
2402 | * ensure it doesn't exit and disappear between |
2403 | * the time we drop the list_lock and attempt |
2404 | * to acquire its proc_lock. |
2405 | */ |
2406 | if (proc_ref(p: q, true) != q) { |
2407 | continue; |
2408 | } |
2409 | |
2410 | proc_list_unlock(); |
2411 | |
2412 | opp = proc_find(pid: q->p_oppid); |
2413 | if (opp != PROC_NULL) { |
2414 | proc_list_lock(); |
2415 | q->p_oppid = 0; |
2416 | proc_list_unlock(); |
2417 | proc_reparentlocked(child: q, newparent: opp, cansignal: 0, locked: 0); |
2418 | proc_rele(p: opp); |
2419 | } else { |
2420 | /* original parent exited while traced */ |
2421 | proc_list_lock(); |
2422 | q->p_listflag |= P_LIST_DEADPARENT; |
2423 | q->p_oppid = 0; |
2424 | proc_list_unlock(); |
2425 | proc_reparentlocked(child: q, newparent: initproc, cansignal: 0, locked: 0); |
2426 | } |
2427 | |
2428 | proc_lock(q); |
2429 | q->p_lflag &= ~P_LTRACED; |
2430 | |
2431 | if (q->sigwait_thread) { |
2432 | thread_t thread = q->sigwait_thread; |
2433 | |
2434 | proc_unlock(q); |
2435 | /* |
2436 | * The sigwait_thread could be stopped at a |
2437 | * breakpoint. Wake it up to kill. |
2438 | * Need to do this as it could be a thread which is not |
2439 | * the first thread in the task. So any attempts to kill |
2440 | * the process would result into a deadlock on q->sigwait. |
2441 | */ |
2442 | thread_resume(target_act: thread); |
2443 | clear_wait(thread, THREAD_INTERRUPTED); |
2444 | threadsignal(sig_actthread: thread, SIGKILL, code: 0, TRUE); |
2445 | } else { |
2446 | proc_unlock(q); |
2447 | } |
2448 | |
2449 | psignal(p: q, SIGKILL); |
2450 | proc_list_lock(); |
2451 | proc_rele(p: q); |
2452 | } else { |
2453 | q->p_listflag |= P_LIST_DEADPARENT; |
2454 | proc_reparentlocked(child: q, newparent: initproc, cansignal: 0, locked: 1); |
2455 | } |
2456 | } |
2457 | } |
2458 | |
2459 | proc_childdrainend(p); |
2460 | proc_list_unlock(); |
2461 | |
2462 | #if CONFIG_MACF |
2463 | if (!proc_is_shadow(p)) { |
2464 | /* |
2465 | * Notify MAC policies that proc is dead. |
2466 | * This should be replaced with proper label management |
2467 | * (rdar://problem/32126399). |
2468 | */ |
2469 | mac_proc_notify_exit(proc: p); |
2470 | } |
2471 | #endif |
2472 | |
2473 | /* |
2474 | * Release reference to text vnode |
2475 | */ |
2476 | tvp = p->p_textvp; |
2477 | p->p_textvp = NULL; |
2478 | if (tvp != NULLVP) { |
2479 | vnode_rele(vp: tvp); |
2480 | } |
2481 | |
2482 | /* |
2483 | * Save exit status and final rusage info, adding in child rusage |
2484 | * info and self times. If we were unable to allocate a zombie |
2485 | * structure, this information is lost. |
2486 | */ |
2487 | if (p->p_ru != NULL) { |
2488 | calcru(p, up: &p->p_stats->p_ru.ru_utime, sp: &p->p_stats->p_ru.ru_stime, NULL); |
2489 | p->p_ru->ru = p->p_stats->p_ru; |
2490 | |
2491 | ruadd(ru: &(p->p_ru->ru), ru2: &p->p_stats->p_cru); |
2492 | } |
2493 | |
2494 | /* |
2495 | * Free up profiling buffers. |
2496 | */ |
2497 | { |
2498 | struct uprof *p0 = &p->p_stats->p_prof, *p1, *pn; |
2499 | |
2500 | p1 = p0->pr_next; |
2501 | p0->pr_next = NULL; |
2502 | p0->pr_scale = 0; |
2503 | |
2504 | for (; p1 != NULL; p1 = pn) { |
2505 | pn = p1->pr_next; |
2506 | kfree_type(struct uprof, p1); |
2507 | } |
2508 | } |
2509 | |
2510 | proc_free_realitimer(proc: p); |
2511 | |
2512 | /* |
2513 | * Other substructures are freed from wait(). |
2514 | */ |
2515 | zfree(proc_stats_zone, p->p_stats); |
2516 | p->p_stats = NULL; |
2517 | |
2518 | if (p->p_subsystem_root_path) { |
2519 | zfree(ZV_NAMEI, p->p_subsystem_root_path); |
2520 | p->p_subsystem_root_path = NULL; |
2521 | } |
2522 | |
2523 | proc_limitdrop(p); |
2524 | |
2525 | #if DEVELOPMENT || DEBUG |
2526 | proc_exit_lpexit_check(pid, PELS_POS_PRE_TASK_DETACH); |
2527 | #endif |
2528 | |
2529 | /* |
2530 | * Finish up by terminating the task |
2531 | * and halt this thread (only if a |
2532 | * member of the task exiting). |
2533 | */ |
2534 | proc_set_task(p, TASK_NULL); |
2535 | set_bsdtask_info(task, NULL); |
2536 | clear_thread_ro_proc(get_machthread(uth)); |
2537 | |
2538 | #if DEVELOPMENT || DEBUG |
2539 | proc_exit_lpexit_check(pid, PELS_POS_POST_TASK_DETACH); |
2540 | #endif |
2541 | |
2542 | knote_hint = NOTE_EXIT | (p->p_xstat & 0xffff); |
2543 | proc_knote(p, hint: knote_hint); |
2544 | |
2545 | /* mark the thread as the one that is doing proc_exit |
2546 | * no need to hold proc lock in uthread_free |
2547 | */ |
2548 | uth->uu_flag |= UT_PROCEXIT; |
2549 | /* |
2550 | * Notify parent that we're gone. |
2551 | */ |
2552 | pp = proc_parent(p); |
2553 | if (proc_is_shadow(p)) { |
2554 | /* kernel can reap this one, no need to move it to launchd */ |
2555 | proc_list_lock(); |
2556 | p->p_listflag |= P_LIST_DEADPARENT; |
2557 | proc_list_unlock(); |
2558 | } else if (pp->p_flag & P_NOCLDWAIT) { |
2559 | if (p->p_ru != NULL) { |
2560 | proc_lock(pp); |
2561 | #if 3839178 |
2562 | /* |
2563 | * If the parent is ignoring SIGCHLD, then POSIX requires |
2564 | * us to not add the resource usage to the parent process - |
2565 | * we are only going to hand it off to init to get reaped. |
2566 | * We should contest the standard in this case on the basis |
2567 | * of RLIMIT_CPU. |
2568 | */ |
2569 | #else /* !3839178 */ |
2570 | /* |
2571 | * Add child resource usage to parent before giving |
2572 | * zombie to init. If we were unable to allocate a |
2573 | * zombie structure, this information is lost. |
2574 | */ |
2575 | ruadd(&pp->p_stats->p_cru, &p->p_ru->ru); |
2576 | #endif /* !3839178 */ |
2577 | update_rusage_info_child(ru: &pp->p_stats->ri_child, ru_current: &p->p_ru->ri); |
2578 | proc_unlock(pp); |
2579 | } |
2580 | |
2581 | /* kernel can reap this one, no need to move it to launchd */ |
2582 | proc_list_lock(); |
2583 | p->p_listflag |= P_LIST_DEADPARENT; |
2584 | proc_list_unlock(); |
2585 | } |
2586 | if (!proc_is_shadow(p) && |
2587 | ((p->p_listflag & P_LIST_DEADPARENT) == 0 || p->p_oppid)) { |
2588 | if (pp != initproc) { |
2589 | proc_lock(pp); |
2590 | pp->si_pid = proc_getpid(p); |
2591 | pp->p_xhighbits = p->p_xhighbits; |
2592 | p->p_xhighbits = 0; |
2593 | pp->si_status = p->p_xstat; |
2594 | pp->si_code = CLD_EXITED; |
2595 | /* |
2596 | * p_ucred usage is safe as it is an exiting process |
2597 | * and reference is dropped in reap |
2598 | */ |
2599 | pp->si_uid = kauth_cred_getruid(cred: proc_ucred_unsafe(p)); |
2600 | proc_unlock(pp); |
2601 | } |
2602 | /* mark as a zombie */ |
2603 | /* No need to take proc lock as all refs are drained and |
2604 | * no one except parent (reaping ) can look at this. |
2605 | * The write is to an int and is coherent. Also parent is |
2606 | * keyed off of list lock for reaping |
2607 | */ |
2608 | DTRACE_PROC2(exited, proc_t, p, int, exitval); |
2609 | KERNEL_DEBUG_CONSTANT_IST(KDEBUG_COMMON, |
2610 | BSDDBG_CODE(DBG_BSD_PROC, BSD_PROC_EXIT) | DBG_FUNC_END, |
2611 | pid, exitval, 0, 0, 0); |
2612 | p->p_stat = SZOMB; |
2613 | /* |
2614 | * The current process can be reaped so, no one |
2615 | * can depend on this |
2616 | */ |
2617 | |
2618 | psignal(p: pp, SIGCHLD); |
2619 | |
2620 | /* and now wakeup the parent */ |
2621 | proc_list_lock(); |
2622 | wakeup(chan: (caddr_t)pp); |
2623 | proc_list_unlock(); |
2624 | } else { |
2625 | /* should be fine as parent proc would be initproc */ |
2626 | /* mark as a zombie */ |
2627 | /* No need to take proc lock as all refs are drained and |
2628 | * no one except parent (reaping ) can look at this. |
2629 | * The write is to an int and is coherent. Also parent is |
2630 | * keyed off of list lock for reaping |
2631 | */ |
2632 | DTRACE_PROC2(exited, proc_t, p, int, exitval); |
2633 | proc_list_lock(); |
2634 | KERNEL_DEBUG_CONSTANT_IST(KDEBUG_COMMON, |
2635 | BSDDBG_CODE(DBG_BSD_PROC, BSD_PROC_EXIT) | DBG_FUNC_END, |
2636 | pid, exitval, 0, 0, 0); |
2637 | /* check for sysctl zomb lookup */ |
2638 | while ((p->p_listflag & P_LIST_WAITING) == P_LIST_WAITING) { |
2639 | msleep(chan: &p->p_stat, mtx: &proc_list_mlock, PWAIT, wmesg: "waitcoll" , ts: 0); |
2640 | } |
2641 | /* safe to use p as this is a system reap */ |
2642 | p->p_stat = SZOMB; |
2643 | p->p_listflag |= P_LIST_WAITING; |
2644 | |
2645 | /* |
2646 | * This is a named reference and it is not granted |
2647 | * if the reap is already in progress. So we get |
2648 | * the reference here exclusively and their can be |
2649 | * no waiters. So there is no need for a wakeup |
2650 | * after we are done. AlsO the reap frees the structure |
2651 | * and the proc struct cannot be used for wakeups as well. |
2652 | * It is safe to use p here as this is system reap |
2653 | */ |
2654 | reap_child_locked(parent: pp, child: p, |
2655 | flags: REAP_DEAD_PARENT | REAP_LOCKED | REAP_DROP_LOCK); |
2656 | } |
2657 | if (uth->uu_lowpri_window) { |
2658 | /* |
2659 | * task is marked as a low priority I/O type and we've |
2660 | * somehow picked up another throttle during exit processing... |
2661 | * no need to throttle this thread since its going away |
2662 | * but we do need to update our bookeeping w/r to throttled threads |
2663 | */ |
2664 | throttle_lowpri_io(sleep_amount: 0); |
2665 | } |
2666 | |
2667 | proc_rele(p: pp); |
2668 | #if DEVELOPMENT || DEBUG |
2669 | proc_exit_lpexit_check(pid, PELS_POS_END); |
2670 | #endif |
2671 | } |
2672 | |
2673 | |
2674 | /* |
2675 | * reap_child_locked |
2676 | * |
2677 | * Finalize a child exit once its status has been saved. |
2678 | * |
2679 | * If ptrace has attached, detach it and return it to its real parent. Free any |
2680 | * remaining resources. |
2681 | * |
2682 | * Parameters: |
2683 | * - proc_t parent Parent of process being reaped |
2684 | * - proc_t child Process to reap |
2685 | * - reap_flags_t flags Control locking and re-parenting behavior |
2686 | */ |
2687 | static void |
2688 | reap_child_locked(proc_t parent, proc_t child, reap_flags_t flags) |
2689 | { |
2690 | struct pgrp *pg; |
2691 | boolean_t shadow_proc = proc_is_shadow(p: child); |
2692 | |
2693 | if (flags & REAP_LOCKED) { |
2694 | proc_list_unlock(); |
2695 | } |
2696 | |
2697 | /* |
2698 | * Under ptrace, the child should now be re-parented back to its original |
2699 | * parent, unless that parent was initproc or it didn't come to initproc |
2700 | * through re-parenting. |
2701 | */ |
2702 | bool child_ptraced = child->p_oppid != 0; |
2703 | if (!shadow_proc && child_ptraced) { |
2704 | int knote_hint; |
2705 | pid_t orig_ppid = 0; |
2706 | proc_t orig_parent = PROC_NULL; |
2707 | |
2708 | proc_lock(child); |
2709 | orig_ppid = child->p_oppid; |
2710 | child->p_oppid = 0; |
2711 | knote_hint = NOTE_EXIT | (child->p_xstat & 0xffff); |
2712 | proc_unlock(child); |
2713 | |
2714 | orig_parent = proc_find(pid: orig_ppid); |
2715 | if (orig_parent) { |
2716 | /* |
2717 | * Only re-parent the process if its original parent was not |
2718 | * initproc and it did not come to initproc from re-parenting. |
2719 | */ |
2720 | bool reparenting = orig_parent != initproc || |
2721 | (flags & REAP_REPARENTED_TO_INIT) == 0; |
2722 | if (reparenting) { |
2723 | if (orig_parent != initproc) { |
2724 | /* |
2725 | * Internal fields should be safe to access here because the |
2726 | * child is exited and not reaped or re-parented yet. |
2727 | */ |
2728 | proc_lock(orig_parent); |
2729 | orig_parent->si_pid = proc_getpid(child); |
2730 | orig_parent->si_status = child->p_xstat; |
2731 | orig_parent->si_code = CLD_CONTINUED; |
2732 | orig_parent->si_uid = kauth_cred_getruid(cred: proc_ucred_unsafe(p: child)); |
2733 | proc_unlock(orig_parent); |
2734 | } |
2735 | proc_reparentlocked(child, newparent: orig_parent, cansignal: 1, locked: 0); |
2736 | |
2737 | /* |
2738 | * After re-parenting, re-send the child's NOTE_EXIT to the |
2739 | * original parent. |
2740 | */ |
2741 | proc_knote(p: child, hint: knote_hint); |
2742 | psignal(p: orig_parent, SIGCHLD); |
2743 | |
2744 | proc_list_lock(); |
2745 | wakeup(chan: (caddr_t)orig_parent); |
2746 | child->p_listflag &= ~P_LIST_WAITING; |
2747 | wakeup(chan: &child->p_stat); |
2748 | proc_list_unlock(); |
2749 | |
2750 | proc_rele(p: orig_parent); |
2751 | if ((flags & REAP_LOCKED) && !(flags & REAP_DROP_LOCK)) { |
2752 | proc_list_lock(); |
2753 | } |
2754 | return; |
2755 | } else { |
2756 | /* |
2757 | * Satisfy the knote lifecycle because ptraced processes don't |
2758 | * broadcast NOTE_EXIT during initial child termination. |
2759 | */ |
2760 | proc_knote(p: child, hint: knote_hint); |
2761 | proc_rele(p: orig_parent); |
2762 | } |
2763 | } |
2764 | } |
2765 | |
2766 | #pragma clang diagnostic push |
2767 | #pragma clang diagnostic ignored "-Wdeprecated-declarations" |
2768 | proc_knote(p: child, NOTE_REAP); |
2769 | #pragma clang diagnostic pop |
2770 | |
2771 | proc_knote_drain(p: child); |
2772 | |
2773 | child->p_xstat = 0; |
2774 | if (!shadow_proc && child->p_ru) { |
2775 | /* |
2776 | * Roll up the rusage statistics to the parent, unless the parent is |
2777 | * ignoring SIGCHLD. POSIX requires the children's resources of such a |
2778 | * parent to not be included in the parent's usage (seems odd given |
2779 | * RLIMIT_CPU, though). |
2780 | */ |
2781 | proc_lock(parent); |
2782 | bool rollup_child = (parent->p_flag & P_NOCLDWAIT) == 0; |
2783 | if (rollup_child) { |
2784 | ruadd(ru: &parent->p_stats->p_cru, ru2: &child->p_ru->ru); |
2785 | } |
2786 | update_rusage_info_child(ru: &parent->p_stats->ri_child, ru_current: &child->p_ru->ri); |
2787 | proc_unlock(parent); |
2788 | zfree(zombie_zone, child->p_ru); |
2789 | child->p_ru = NULL; |
2790 | } else if (!shadow_proc) { |
2791 | printf("Warning : lost p_ru for %s\n" , child->p_comm); |
2792 | } else { |
2793 | assert(child->p_ru == NULL); |
2794 | } |
2795 | |
2796 | AUDIT_SESSION_PROCEXIT(child); |
2797 | |
2798 | #if CONFIG_PERSONAS |
2799 | persona_proc_drop(p: child); |
2800 | #endif /* CONFIG_PERSONAS */ |
2801 | /* proc_ucred_unsafe is safe, because child is not running */ |
2802 | (void)chgproccnt(uid: kauth_cred_getruid(cred: proc_ucred_unsafe(p: child)), diff: -1); |
2803 | |
2804 | os_reason_free(cur_reason: child->p_exit_reason); |
2805 | |
2806 | proc_list_lock(); |
2807 | |
2808 | pg = pgrp_leave_locked(p: child); |
2809 | LIST_REMOVE(child, p_list); |
2810 | parent->p_childrencnt--; |
2811 | LIST_REMOVE(child, p_sibling); |
2812 | bool no_more_children = (flags & REAP_DEAD_PARENT) && |
2813 | LIST_EMPTY(&parent->p_children); |
2814 | if (no_more_children) { |
2815 | wakeup(chan: (caddr_t)parent); |
2816 | } |
2817 | child->p_listflag &= ~P_LIST_WAITING; |
2818 | wakeup(chan: &child->p_stat); |
2819 | |
2820 | /* Take it out of process hash */ |
2821 | if (!shadow_proc) { |
2822 | phash_remove_locked(child); |
2823 | } |
2824 | proc_checkdeadrefs(child); |
2825 | nprocs--; |
2826 | if (flags & REAP_DEAD_PARENT) { |
2827 | child->p_listflag |= P_LIST_DEADPARENT; |
2828 | } |
2829 | |
2830 | proc_list_unlock(); |
2831 | |
2832 | pgrp_rele(pgrp: pg); |
2833 | fdt_destroy(p: child); |
2834 | lck_mtx_destroy(lck: &child->p_mlock, grp: &proc_mlock_grp); |
2835 | lck_mtx_destroy(lck: &child->p_ucred_mlock, grp: &proc_ucred_mlock_grp); |
2836 | #if CONFIG_AUDIT |
2837 | lck_mtx_destroy(lck: &child->p_audit_mlock, grp: &proc_ucred_mlock_grp); |
2838 | #endif /* CONFIG_AUDIT */ |
2839 | #if CONFIG_DTRACE |
2840 | lck_mtx_destroy(lck: &child->p_dtrace_sprlock, grp: &proc_lck_grp); |
2841 | #endif |
2842 | lck_spin_destroy(lck: &child->p_slock, grp: &proc_slock_grp); |
2843 | proc_wait_release(p: child); |
2844 | |
2845 | if ((flags & REAP_LOCKED) && (flags & REAP_DROP_LOCK) == 0) { |
2846 | proc_list_lock(); |
2847 | } |
2848 | } |
2849 | |
2850 | int |
2851 | wait1continue(int result) |
2852 | { |
2853 | proc_t p; |
2854 | thread_t thread; |
2855 | uthread_t uth; |
2856 | struct _wait4_data *wait4_data; |
2857 | struct wait4_nocancel_args *uap; |
2858 | int *retval; |
2859 | |
2860 | if (result) { |
2861 | return result; |
2862 | } |
2863 | |
2864 | p = current_proc(); |
2865 | thread = current_thread(); |
2866 | uth = (struct uthread *)get_bsdthread_info(thread); |
2867 | |
2868 | wait4_data = &uth->uu_save.uus_wait4_data; |
2869 | uap = wait4_data->args; |
2870 | retval = wait4_data->retval; |
2871 | return wait4_nocancel(p, uap, retval); |
2872 | } |
2873 | |
2874 | int |
2875 | wait4(proc_t q, struct wait4_args *uap, int32_t *retval) |
2876 | { |
2877 | __pthread_testcancel(presyscall: 1); |
2878 | return wait4_nocancel(q, (struct wait4_nocancel_args *)uap, retval); |
2879 | } |
2880 | |
2881 | int |
2882 | wait4_nocancel(proc_t q, struct wait4_nocancel_args *uap, int32_t *retval) |
2883 | { |
2884 | int nfound; |
2885 | int sibling_count; |
2886 | proc_t p; |
2887 | int status, error; |
2888 | uthread_t uth; |
2889 | struct _wait4_data *wait4_data; |
2890 | |
2891 | AUDIT_ARG(pid, uap->pid); |
2892 | |
2893 | if (uap->pid == 0) { |
2894 | uap->pid = -q->p_pgrpid; |
2895 | } |
2896 | |
2897 | if (uap->pid == INT_MIN) { |
2898 | return EINVAL; |
2899 | } |
2900 | |
2901 | loop: |
2902 | proc_list_lock(); |
2903 | loop1: |
2904 | nfound = 0; |
2905 | sibling_count = 0; |
2906 | |
2907 | PCHILDREN_FOREACH(q, p) { |
2908 | if (p->p_sibling.le_next != 0) { |
2909 | sibling_count++; |
2910 | } |
2911 | if (uap->pid != WAIT_ANY && |
2912 | proc_getpid(p) != uap->pid && |
2913 | p->p_pgrpid != -(uap->pid)) { |
2914 | continue; |
2915 | } |
2916 | |
2917 | if (proc_is_shadow(p)) { |
2918 | continue; |
2919 | } |
2920 | |
2921 | nfound++; |
2922 | |
2923 | /* XXX This is racy because we don't get the lock!!!! */ |
2924 | |
2925 | if (p->p_listflag & P_LIST_WAITING) { |
2926 | /* we're not using a continuation here but we still need to stash |
2927 | * the args for stackshot. */ |
2928 | uth = current_uthread(); |
2929 | wait4_data = &uth->uu_save.uus_wait4_data; |
2930 | wait4_data->args = uap; |
2931 | thread_set_pending_block_hint(thread: current_thread(), block_hint: kThreadWaitOnProcess); |
2932 | |
2933 | (void)msleep(chan: &p->p_stat, mtx: &proc_list_mlock, PWAIT, wmesg: "waitcoll" , ts: 0); |
2934 | goto loop1; |
2935 | } |
2936 | p->p_listflag |= P_LIST_WAITING; /* only allow single thread to wait() */ |
2937 | |
2938 | |
2939 | if (p->p_stat == SZOMB) { |
2940 | reap_flags_t reap_flags = (p->p_listflag & P_LIST_DEADPARENT) ? |
2941 | REAP_REPARENTED_TO_INIT : 0; |
2942 | |
2943 | proc_list_unlock(); |
2944 | #if CONFIG_MACF |
2945 | if ((error = mac_proc_check_wait(proc1: q, proc2: p)) != 0) { |
2946 | goto out; |
2947 | } |
2948 | #endif |
2949 | retval[0] = proc_getpid(p); |
2950 | if (uap->status) { |
2951 | /* Legacy apps expect only 8 bits of status */ |
2952 | status = 0xffff & p->p_xstat; /* convert to int */ |
2953 | error = copyout((caddr_t)&status, |
2954 | uap->status, |
2955 | sizeof(status)); |
2956 | if (error) { |
2957 | goto out; |
2958 | } |
2959 | } |
2960 | if (uap->rusage) { |
2961 | if (p->p_ru == NULL) { |
2962 | error = ENOMEM; |
2963 | } else { |
2964 | if (IS_64BIT_PROCESS(q)) { |
2965 | struct user64_rusage my_rusage = {}; |
2966 | munge_user64_rusage(a_rusage_p: &p->p_ru->ru, a_user_rusage_p: &my_rusage); |
2967 | error = copyout((caddr_t)&my_rusage, |
2968 | uap->rusage, |
2969 | sizeof(my_rusage)); |
2970 | } else { |
2971 | struct user32_rusage my_rusage = {}; |
2972 | munge_user32_rusage(a_rusage_p: &p->p_ru->ru, a_user_rusage_p: &my_rusage); |
2973 | error = copyout((caddr_t)&my_rusage, |
2974 | uap->rusage, |
2975 | sizeof(my_rusage)); |
2976 | } |
2977 | } |
2978 | /* information unavailable? */ |
2979 | if (error) { |
2980 | goto out; |
2981 | } |
2982 | } |
2983 | |
2984 | /* Conformance change for 6577252. |
2985 | * When SIGCHLD is blocked and wait() returns because the status |
2986 | * of a child process is available and there are no other |
2987 | * children processes, then any pending SIGCHLD signal is cleared. |
2988 | */ |
2989 | if (sibling_count == 0) { |
2990 | int mask = sigmask(SIGCHLD); |
2991 | uth = current_uthread(); |
2992 | |
2993 | if ((uth->uu_sigmask & mask) != 0) { |
2994 | /* we are blocking SIGCHLD signals. clear any pending SIGCHLD. |
2995 | * This locking looks funny but it is protecting access to the |
2996 | * thread via p_uthlist. |
2997 | */ |
2998 | proc_lock(q); |
2999 | uth->uu_siglist &= ~mask; /* clear pending signal */ |
3000 | proc_unlock(q); |
3001 | } |
3002 | } |
3003 | |
3004 | /* Clean up */ |
3005 | (void)reap_child_locked(parent: q, child: p, flags: reap_flags); |
3006 | |
3007 | return 0; |
3008 | } |
3009 | if (p->p_stat == SSTOP && (p->p_lflag & P_LWAITED) == 0 && |
3010 | (p->p_lflag & P_LTRACED || uap->options & WUNTRACED)) { |
3011 | proc_list_unlock(); |
3012 | #if CONFIG_MACF |
3013 | if ((error = mac_proc_check_wait(proc1: q, proc2: p)) != 0) { |
3014 | goto out; |
3015 | } |
3016 | #endif |
3017 | proc_lock(p); |
3018 | p->p_lflag |= P_LWAITED; |
3019 | proc_unlock(p); |
3020 | retval[0] = proc_getpid(p); |
3021 | if (uap->status) { |
3022 | status = W_STOPCODE(p->p_xstat); |
3023 | error = copyout((caddr_t)&status, |
3024 | uap->status, |
3025 | sizeof(status)); |
3026 | } else { |
3027 | error = 0; |
3028 | } |
3029 | goto out; |
3030 | } |
3031 | /* |
3032 | * If we are waiting for continued processses, and this |
3033 | * process was continued |
3034 | */ |
3035 | if ((uap->options & WCONTINUED) && |
3036 | (p->p_flag & P_CONTINUED)) { |
3037 | proc_list_unlock(); |
3038 | #if CONFIG_MACF |
3039 | if ((error = mac_proc_check_wait(proc1: q, proc2: p)) != 0) { |
3040 | goto out; |
3041 | } |
3042 | #endif |
3043 | |
3044 | /* Prevent other process for waiting for this event */ |
3045 | OSBitAndAtomic(~((uint32_t)P_CONTINUED), &p->p_flag); |
3046 | retval[0] = proc_getpid(p); |
3047 | if (uap->status) { |
3048 | status = W_STOPCODE(SIGCONT); |
3049 | error = copyout((caddr_t)&status, |
3050 | uap->status, |
3051 | sizeof(status)); |
3052 | } else { |
3053 | error = 0; |
3054 | } |
3055 | goto out; |
3056 | } |
3057 | p->p_listflag &= ~P_LIST_WAITING; |
3058 | wakeup(chan: &p->p_stat); |
3059 | } |
3060 | /* list lock is held when we get here any which way */ |
3061 | if (nfound == 0) { |
3062 | proc_list_unlock(); |
3063 | return ECHILD; |
3064 | } |
3065 | |
3066 | if (uap->options & WNOHANG) { |
3067 | retval[0] = 0; |
3068 | proc_list_unlock(); |
3069 | return 0; |
3070 | } |
3071 | |
3072 | /* Save arguments for continuation. Backing storage is in uthread->uu_arg, and will not be deallocated */ |
3073 | uth = current_uthread(); |
3074 | wait4_data = &uth->uu_save.uus_wait4_data; |
3075 | wait4_data->args = uap; |
3076 | wait4_data->retval = retval; |
3077 | |
3078 | thread_set_pending_block_hint(thread: current_thread(), block_hint: kThreadWaitOnProcess); |
3079 | if ((error = msleep0(chan: (caddr_t)q, mtx: &proc_list_mlock, PWAIT | PCATCH | PDROP, wmesg: "wait" , timo: 0, continuation: wait1continue))) { |
3080 | return error; |
3081 | } |
3082 | |
3083 | goto loop; |
3084 | out: |
3085 | proc_list_lock(); |
3086 | p->p_listflag &= ~P_LIST_WAITING; |
3087 | wakeup(chan: &p->p_stat); |
3088 | proc_list_unlock(); |
3089 | return error; |
3090 | } |
3091 | |
3092 | #if DEBUG |
3093 | #define ASSERT_LCK_MTX_OWNED(lock) \ |
3094 | lck_mtx_assert(lock, LCK_MTX_ASSERT_OWNED) |
3095 | #else |
3096 | #define ASSERT_LCK_MTX_OWNED(lock) /* nothing */ |
3097 | #endif |
3098 | |
3099 | int |
3100 | waitidcontinue(int result) |
3101 | { |
3102 | proc_t p; |
3103 | thread_t thread; |
3104 | uthread_t uth; |
3105 | struct _waitid_data *waitid_data; |
3106 | struct waitid_nocancel_args *uap; |
3107 | int *retval; |
3108 | |
3109 | if (result) { |
3110 | return result; |
3111 | } |
3112 | |
3113 | p = current_proc(); |
3114 | thread = current_thread(); |
3115 | uth = (struct uthread *)get_bsdthread_info(thread); |
3116 | |
3117 | waitid_data = &uth->uu_save.uus_waitid_data; |
3118 | uap = waitid_data->args; |
3119 | retval = waitid_data->retval; |
3120 | return waitid_nocancel(p, uap, retval); |
3121 | } |
3122 | |
3123 | /* |
3124 | * Description: Suspend the calling thread until one child of the process |
3125 | * containing the calling thread changes state. |
3126 | * |
3127 | * Parameters: uap->idtype one of P_PID, P_PGID, P_ALL |
3128 | * uap->id pid_t or gid_t or ignored |
3129 | * uap->infop Address of siginfo_t struct in |
3130 | * user space into which to return status |
3131 | * uap->options flag values |
3132 | * |
3133 | * Returns: 0 Success |
3134 | * !0 Error returning status to user space |
3135 | */ |
3136 | int |
3137 | waitid(proc_t q, struct waitid_args *uap, int32_t *retval) |
3138 | { |
3139 | __pthread_testcancel(presyscall: 1); |
3140 | return waitid_nocancel(q, (struct waitid_nocancel_args *)uap, retval); |
3141 | } |
3142 | |
3143 | int |
3144 | waitid_nocancel(proc_t q, struct waitid_nocancel_args *uap, |
3145 | __unused int32_t *retval) |
3146 | { |
3147 | user_siginfo_t siginfo; /* siginfo data to return to caller */ |
3148 | boolean_t caller64 = IS_64BIT_PROCESS(q); |
3149 | int nfound; |
3150 | proc_t p; |
3151 | int error; |
3152 | uthread_t uth; |
3153 | struct _waitid_data *waitid_data; |
3154 | |
3155 | if (uap->options == 0 || |
3156 | (uap->options & ~(WNOHANG | WNOWAIT | WCONTINUED | WSTOPPED | WEXITED))) { |
3157 | return EINVAL; /* bits set that aren't recognized */ |
3158 | } |
3159 | switch (uap->idtype) { |
3160 | case P_PID: /* child with process ID equal to... */ |
3161 | case P_PGID: /* child with process group ID equal to... */ |
3162 | if (((int)uap->id) < 0) { |
3163 | return EINVAL; |
3164 | } |
3165 | break; |
3166 | case P_ALL: /* any child */ |
3167 | break; |
3168 | } |
3169 | |
3170 | loop: |
3171 | proc_list_lock(); |
3172 | loop1: |
3173 | nfound = 0; |
3174 | |
3175 | PCHILDREN_FOREACH(q, p) { |
3176 | switch (uap->idtype) { |
3177 | case P_PID: /* child with process ID equal to... */ |
3178 | if (proc_getpid(p) != (pid_t)uap->id) { |
3179 | continue; |
3180 | } |
3181 | break; |
3182 | case P_PGID: /* child with process group ID equal to... */ |
3183 | if (p->p_pgrpid != (pid_t)uap->id) { |
3184 | continue; |
3185 | } |
3186 | break; |
3187 | case P_ALL: /* any child */ |
3188 | break; |
3189 | } |
3190 | |
3191 | if (proc_is_shadow(p)) { |
3192 | continue; |
3193 | } |
3194 | /* XXX This is racy because we don't get the lock!!!! */ |
3195 | |
3196 | /* |
3197 | * Wait collision; go to sleep and restart; used to maintain |
3198 | * the single return for waited process guarantee. |
3199 | */ |
3200 | if (p->p_listflag & P_LIST_WAITING) { |
3201 | (void) msleep(chan: &p->p_stat, mtx: &proc_list_mlock, |
3202 | PWAIT, wmesg: "waitidcoll" , ts: 0); |
3203 | goto loop1; |
3204 | } |
3205 | p->p_listflag |= P_LIST_WAITING; /* mark busy */ |
3206 | |
3207 | nfound++; |
3208 | |
3209 | bzero(s: &siginfo, n: sizeof(siginfo)); |
3210 | |
3211 | switch (p->p_stat) { |
3212 | case SZOMB: /* Exited */ |
3213 | if (!(uap->options & WEXITED)) { |
3214 | break; |
3215 | } |
3216 | proc_list_unlock(); |
3217 | #if CONFIG_MACF |
3218 | if ((error = mac_proc_check_wait(proc1: q, proc2: p)) != 0) { |
3219 | goto out; |
3220 | } |
3221 | #endif |
3222 | siginfo.si_signo = SIGCHLD; |
3223 | siginfo.si_pid = proc_getpid(p); |
3224 | |
3225 | /* If the child terminated abnormally due to a signal, the signum |
3226 | * needs to be preserved in the exit status. |
3227 | */ |
3228 | if (WIFSIGNALED(p->p_xstat)) { |
3229 | siginfo.si_code = WCOREDUMP(p->p_xstat) ? |
3230 | CLD_DUMPED : CLD_KILLED; |
3231 | siginfo.si_status = WTERMSIG(p->p_xstat); |
3232 | } else { |
3233 | siginfo.si_code = CLD_EXITED; |
3234 | siginfo.si_status = WEXITSTATUS(p->p_xstat) & 0x00FFFFFF; |
3235 | } |
3236 | siginfo.si_status |= (((uint32_t)(p->p_xhighbits) << 24) & 0xFF000000); |
3237 | p->p_xhighbits = 0; |
3238 | |
3239 | if ((error = copyoutsiginfo(native: &siginfo, |
3240 | is64: caller64, uaddr: uap->infop)) != 0) { |
3241 | goto out; |
3242 | } |
3243 | |
3244 | /* Prevent other process for waiting for this event? */ |
3245 | if (!(uap->options & WNOWAIT)) { |
3246 | reap_child_locked(parent: q, child: p, flags: 0); |
3247 | return 0; |
3248 | } |
3249 | goto out; |
3250 | |
3251 | case SSTOP: /* Stopped */ |
3252 | /* |
3253 | * If we are not interested in stopped processes, then |
3254 | * ignore this one. |
3255 | */ |
3256 | if (!(uap->options & WSTOPPED)) { |
3257 | break; |
3258 | } |
3259 | |
3260 | /* |
3261 | * If someone has already waited it, we lost a race |
3262 | * to be the one to return status. |
3263 | */ |
3264 | if ((p->p_lflag & P_LWAITED) != 0) { |
3265 | break; |
3266 | } |
3267 | proc_list_unlock(); |
3268 | #if CONFIG_MACF |
3269 | if ((error = mac_proc_check_wait(proc1: q, proc2: p)) != 0) { |
3270 | goto out; |
3271 | } |
3272 | #endif |
3273 | siginfo.si_signo = SIGCHLD; |
3274 | siginfo.si_pid = proc_getpid(p); |
3275 | siginfo.si_status = p->p_xstat; /* signal number */ |
3276 | siginfo.si_code = CLD_STOPPED; |
3277 | |
3278 | if ((error = copyoutsiginfo(native: &siginfo, |
3279 | is64: caller64, uaddr: uap->infop)) != 0) { |
3280 | goto out; |
3281 | } |
3282 | |
3283 | /* Prevent other process for waiting for this event? */ |
3284 | if (!(uap->options & WNOWAIT)) { |
3285 | proc_lock(p); |
3286 | p->p_lflag |= P_LWAITED; |
3287 | proc_unlock(p); |
3288 | } |
3289 | goto out; |
3290 | |
3291 | default: /* All other states => Continued */ |
3292 | if (!(uap->options & WCONTINUED)) { |
3293 | break; |
3294 | } |
3295 | |
3296 | /* |
3297 | * If the flag isn't set, then this process has not |
3298 | * been stopped and continued, or the status has |
3299 | * already been reaped by another caller of waitid(). |
3300 | */ |
3301 | if ((p->p_flag & P_CONTINUED) == 0) { |
3302 | break; |
3303 | } |
3304 | proc_list_unlock(); |
3305 | #if CONFIG_MACF |
3306 | if ((error = mac_proc_check_wait(proc1: q, proc2: p)) != 0) { |
3307 | goto out; |
3308 | } |
3309 | #endif |
3310 | siginfo.si_signo = SIGCHLD; |
3311 | siginfo.si_code = CLD_CONTINUED; |
3312 | proc_lock(p); |
3313 | siginfo.si_pid = p->p_contproc; |
3314 | siginfo.si_status = p->p_xstat; |
3315 | proc_unlock(p); |
3316 | |
3317 | if ((error = copyoutsiginfo(native: &siginfo, |
3318 | is64: caller64, uaddr: uap->infop)) != 0) { |
3319 | goto out; |
3320 | } |
3321 | |
3322 | /* Prevent other process for waiting for this event? */ |
3323 | if (!(uap->options & WNOWAIT)) { |
3324 | OSBitAndAtomic(~((uint32_t)P_CONTINUED), |
3325 | &p->p_flag); |
3326 | } |
3327 | goto out; |
3328 | } |
3329 | ASSERT_LCK_MTX_OWNED(&proc_list_mlock); |
3330 | |
3331 | /* Not a process we are interested in; go on to next child */ |
3332 | |
3333 | p->p_listflag &= ~P_LIST_WAITING; |
3334 | wakeup(chan: &p->p_stat); |
3335 | } |
3336 | ASSERT_LCK_MTX_OWNED(&proc_list_mlock); |
3337 | |
3338 | /* No child processes that could possibly satisfy the request? */ |
3339 | |
3340 | if (nfound == 0) { |
3341 | proc_list_unlock(); |
3342 | return ECHILD; |
3343 | } |
3344 | |
3345 | if (uap->options & WNOHANG) { |
3346 | proc_list_unlock(); |
3347 | #if CONFIG_MACF |
3348 | if ((error = mac_proc_check_wait(proc1: q, proc2: p)) != 0) { |
3349 | return error; |
3350 | } |
3351 | #endif |
3352 | /* |
3353 | * The state of the siginfo structure in this case |
3354 | * is undefined. Some implementations bzero it, some |
3355 | * (like here) leave it untouched for efficiency. |
3356 | * |
3357 | * Thus the most portable check for "no matching pid with |
3358 | * WNOHANG" is to store a zero into si_pid before |
3359 | * invocation, then check for a non-zero value afterwards. |
3360 | */ |
3361 | return 0; |
3362 | } |
3363 | |
3364 | /* Save arguments for continuation. Backing storage is in uthread->uu_arg, and will not be deallocated */ |
3365 | uth = current_uthread(); |
3366 | waitid_data = &uth->uu_save.uus_waitid_data; |
3367 | waitid_data->args = uap; |
3368 | waitid_data->retval = retval; |
3369 | |
3370 | if ((error = msleep0(chan: q, mtx: &proc_list_mlock, |
3371 | PWAIT | PCATCH | PDROP, wmesg: "waitid" , timo: 0, continuation: waitidcontinue)) != 0) { |
3372 | return error; |
3373 | } |
3374 | |
3375 | goto loop; |
3376 | out: |
3377 | proc_list_lock(); |
3378 | p->p_listflag &= ~P_LIST_WAITING; |
3379 | wakeup(chan: &p->p_stat); |
3380 | proc_list_unlock(); |
3381 | return error; |
3382 | } |
3383 | |
3384 | /* |
3385 | * make process 'parent' the new parent of process 'child'. |
3386 | */ |
3387 | void |
3388 | proc_reparentlocked(proc_t child, proc_t parent, int signallable, int locked) |
3389 | { |
3390 | proc_t oldparent = PROC_NULL; |
3391 | |
3392 | if (child->p_pptr == parent) { |
3393 | return; |
3394 | } |
3395 | |
3396 | if (locked == 0) { |
3397 | proc_list_lock(); |
3398 | } |
3399 | |
3400 | oldparent = child->p_pptr; |
3401 | #if __PROC_INTERNAL_DEBUG |
3402 | if (oldparent == PROC_NULL) { |
3403 | panic("proc_reparent: process %p does not have a parent" , child); |
3404 | } |
3405 | #endif |
3406 | |
3407 | LIST_REMOVE(child, p_sibling); |
3408 | #if __PROC_INTERNAL_DEBUG |
3409 | if (oldparent->p_childrencnt == 0) { |
3410 | panic("process children count already 0" ); |
3411 | } |
3412 | #endif |
3413 | oldparent->p_childrencnt--; |
3414 | #if __PROC_INTERNAL_DEBUG |
3415 | if (oldparent->p_childrencnt < 0) { |
3416 | panic("process children count -ve" ); |
3417 | } |
3418 | #endif |
3419 | LIST_INSERT_HEAD(&parent->p_children, child, p_sibling); |
3420 | parent->p_childrencnt++; |
3421 | child->p_pptr = parent; |
3422 | child->p_ppid = proc_getpid(parent); |
3423 | |
3424 | proc_list_unlock(); |
3425 | |
3426 | if ((signallable != 0) && (initproc == parent) && (child->p_stat == SZOMB)) { |
3427 | psignal(p: initproc, SIGCHLD); |
3428 | } |
3429 | if (locked == 1) { |
3430 | proc_list_lock(); |
3431 | } |
3432 | } |
3433 | |
3434 | /* |
3435 | * Exit: deallocate address space and other resources, change proc state |
3436 | * to zombie, and unlink proc from allproc and parent's lists. Save exit |
3437 | * status and rusage for wait(). Check for child processes and orphan them. |
3438 | */ |
3439 | |
3440 | |
3441 | /* |
3442 | * munge_rusage |
3443 | * LP64 support - long is 64 bits if we are dealing with a 64 bit user |
3444 | * process. We munge the kernel version of rusage into the |
3445 | * 64 bit version. |
3446 | */ |
3447 | __private_extern__ void |
3448 | munge_user64_rusage(struct rusage *a_rusage_p, struct user64_rusage *a_user_rusage_p) |
3449 | { |
3450 | /* Zero-out struct so that padding is cleared */ |
3451 | bzero(s: a_user_rusage_p, n: sizeof(struct user64_rusage)); |
3452 | |
3453 | /* timeval changes size, so utime and stime need special handling */ |
3454 | a_user_rusage_p->ru_utime.tv_sec = a_rusage_p->ru_utime.tv_sec; |
3455 | a_user_rusage_p->ru_utime.tv_usec = a_rusage_p->ru_utime.tv_usec; |
3456 | a_user_rusage_p->ru_stime.tv_sec = a_rusage_p->ru_stime.tv_sec; |
3457 | a_user_rusage_p->ru_stime.tv_usec = a_rusage_p->ru_stime.tv_usec; |
3458 | /* |
3459 | * everything else can be a direct assign, since there is no loss |
3460 | * of precision implied boing 32->64. |
3461 | */ |
3462 | a_user_rusage_p->ru_maxrss = a_rusage_p->ru_maxrss; |
3463 | a_user_rusage_p->ru_ixrss = a_rusage_p->ru_ixrss; |
3464 | a_user_rusage_p->ru_idrss = a_rusage_p->ru_idrss; |
3465 | a_user_rusage_p->ru_isrss = a_rusage_p->ru_isrss; |
3466 | a_user_rusage_p->ru_minflt = a_rusage_p->ru_minflt; |
3467 | a_user_rusage_p->ru_majflt = a_rusage_p->ru_majflt; |
3468 | a_user_rusage_p->ru_nswap = a_rusage_p->ru_nswap; |
3469 | a_user_rusage_p->ru_inblock = a_rusage_p->ru_inblock; |
3470 | a_user_rusage_p->ru_oublock = a_rusage_p->ru_oublock; |
3471 | a_user_rusage_p->ru_msgsnd = a_rusage_p->ru_msgsnd; |
3472 | a_user_rusage_p->ru_msgrcv = a_rusage_p->ru_msgrcv; |
3473 | a_user_rusage_p->ru_nsignals = a_rusage_p->ru_nsignals; |
3474 | a_user_rusage_p->ru_nvcsw = a_rusage_p->ru_nvcsw; |
3475 | a_user_rusage_p->ru_nivcsw = a_rusage_p->ru_nivcsw; |
3476 | } |
3477 | |
3478 | /* For a 64-bit kernel and 32-bit userspace, munging may be needed */ |
3479 | __private_extern__ void |
3480 | munge_user32_rusage(struct rusage *a_rusage_p, struct user32_rusage *a_user_rusage_p) |
3481 | { |
3482 | bzero(s: a_user_rusage_p, n: sizeof(struct user32_rusage)); |
3483 | |
3484 | /* timeval changes size, so utime and stime need special handling */ |
3485 | a_user_rusage_p->ru_utime.tv_sec = (user32_time_t)a_rusage_p->ru_utime.tv_sec; |
3486 | a_user_rusage_p->ru_utime.tv_usec = a_rusage_p->ru_utime.tv_usec; |
3487 | a_user_rusage_p->ru_stime.tv_sec = (user32_time_t)a_rusage_p->ru_stime.tv_sec; |
3488 | a_user_rusage_p->ru_stime.tv_usec = a_rusage_p->ru_stime.tv_usec; |
3489 | /* |
3490 | * everything else can be a direct assign. We currently ignore |
3491 | * the loss of precision |
3492 | */ |
3493 | a_user_rusage_p->ru_maxrss = (user32_long_t)a_rusage_p->ru_maxrss; |
3494 | a_user_rusage_p->ru_ixrss = (user32_long_t)a_rusage_p->ru_ixrss; |
3495 | a_user_rusage_p->ru_idrss = (user32_long_t)a_rusage_p->ru_idrss; |
3496 | a_user_rusage_p->ru_isrss = (user32_long_t)a_rusage_p->ru_isrss; |
3497 | a_user_rusage_p->ru_minflt = (user32_long_t)a_rusage_p->ru_minflt; |
3498 | a_user_rusage_p->ru_majflt = (user32_long_t)a_rusage_p->ru_majflt; |
3499 | a_user_rusage_p->ru_nswap = (user32_long_t)a_rusage_p->ru_nswap; |
3500 | a_user_rusage_p->ru_inblock = (user32_long_t)a_rusage_p->ru_inblock; |
3501 | a_user_rusage_p->ru_oublock = (user32_long_t)a_rusage_p->ru_oublock; |
3502 | a_user_rusage_p->ru_msgsnd = (user32_long_t)a_rusage_p->ru_msgsnd; |
3503 | a_user_rusage_p->ru_msgrcv = (user32_long_t)a_rusage_p->ru_msgrcv; |
3504 | a_user_rusage_p->ru_nsignals = (user32_long_t)a_rusage_p->ru_nsignals; |
3505 | a_user_rusage_p->ru_nvcsw = (user32_long_t)a_rusage_p->ru_nvcsw; |
3506 | a_user_rusage_p->ru_nivcsw = (user32_long_t)a_rusage_p->ru_nivcsw; |
3507 | } |
3508 | |
3509 | void |
3510 | kdp_wait4_find_process(thread_t thread, __unused event64_t wait_event, thread_waitinfo_t *waitinfo) |
3511 | { |
3512 | assert(thread != NULL); |
3513 | assert(waitinfo != NULL); |
3514 | |
3515 | struct uthread *ut = get_bsdthread_info(thread); |
3516 | waitinfo->context = 0; |
3517 | // ensure wmesg is consistent with a thread waiting in wait4 |
3518 | assert(!strcmp(ut->uu_wmesg, "waitcoll" ) || !strcmp(ut->uu_wmesg, "wait" )); |
3519 | struct wait4_nocancel_args *args = ut->uu_save.uus_wait4_data.args; |
3520 | // May not actually contain a pid; this is just the argument to wait4. |
3521 | // See man wait4 for other valid wait4 arguments. |
3522 | waitinfo->owner = args->pid; |
3523 | } |
3524 | |
3525 | int |
3526 | exit_with_guard_exception( |
3527 | proc_t p, |
3528 | mach_exception_data_type_t code, |
3529 | mach_exception_data_type_t subcode) |
3530 | { |
3531 | os_reason_t reason = os_reason_create(OS_REASON_GUARD, osr_code: (uint64_t)code); |
3532 | assert(reason != OS_REASON_NULL); |
3533 | |
3534 | return exit_with_mach_exception(p, reason, EXC_GUARD, code, subcode); |
3535 | } |
3536 | |
3537 | #if __has_feature(ptrauth_calls) |
3538 | int |
3539 | exit_with_pac_exception(proc_t p, exception_type_t exception, mach_exception_code_t code, |
3540 | mach_exception_subcode_t subcode) |
3541 | { |
3542 | os_reason_t reason = os_reason_create(OS_REASON_PAC_EXCEPTION, (uint64_t)code); |
3543 | assert(reason != OS_REASON_NULL); |
3544 | |
3545 | return exit_with_mach_exception(p, reason, exception, code, subcode); |
3546 | } |
3547 | #endif /* __has_feature(ptrauth_calls) */ |
3548 | |
3549 | int |
3550 | exit_with_port_space_exception(proc_t p, mach_exception_data_type_t code, |
3551 | mach_exception_data_type_t subcode) |
3552 | { |
3553 | os_reason_t reason = os_reason_create(OS_REASON_PORT_SPACE, osr_code: (uint64_t)code); |
3554 | assert(reason != OS_REASON_NULL); |
3555 | |
3556 | return exit_with_mach_exception(p, reason, EXC_RESOURCE, code, subcode); |
3557 | } |
3558 | |
3559 | static int |
3560 | exit_with_mach_exception(proc_t p, os_reason_t reason, exception_type_t exception, mach_exception_code_t code, |
3561 | mach_exception_subcode_t subcode) |
3562 | { |
3563 | thread_t self = current_thread(); |
3564 | struct uthread *ut = get_bsdthread_info(self); |
3565 | |
3566 | ut->uu_exception = exception; |
3567 | ut->uu_code = code; |
3568 | ut->uu_subcode = subcode; |
3569 | |
3570 | reason->osr_flags |= OS_REASON_FLAG_GENERATE_CRASH_REPORT; |
3571 | return exit_with_reason(p, W_EXITCODE(0, SIGKILL), NULL, |
3572 | FALSE, FALSE, jetsam_flags: 0, exit_reason: reason); |
3573 | } |
3574 | |
3575 | #if CONFIG_EXCLAVES |
3576 | int |
3577 | exit_with_exclave_exception(proc_t p) |
3578 | { |
3579 | /* Using OS_REASON_GUARD for now */ |
3580 | os_reason_t reason = os_reason_create(OS_REASON_GUARD, (uint64_t)GUARD_REASON_EXCLAVES); |
3581 | assert(reason != OS_REASON_NULL); |
3582 | reason->osr_flags |= OS_REASON_FLAG_GENERATE_CRASH_REPORT; |
3583 | |
3584 | return exit_with_reason(p, W_EXITCODE(0, SIGKILL), (int *)NULL, FALSE, FALSE, |
3585 | 0, reason); |
3586 | } |
3587 | #endif /* CONFIG_EXCLAVES */ |
3588 | |
3589 | int |
3590 | exit_with_jit_exception(proc_t p) |
3591 | { |
3592 | os_reason_t reason = os_reason_create(OS_REASON_GUARD, osr_code: (uint64_t)GUARD_REASON_JIT); |
3593 | assert(reason != OS_REASON_NULL); |
3594 | reason->osr_flags |= OS_REASON_FLAG_GENERATE_CRASH_REPORT; |
3595 | |
3596 | return exit_with_reason(p, W_EXITCODE(0, SIGKILL), retval: (int *)NULL, FALSE, FALSE, |
3597 | jetsam_flags: 0, exit_reason: reason); |
3598 | } |
3599 | |