1 | /* |
2 | * Copyright (c) 2000-2016 Apple Inc. All rights reserved. |
3 | * |
4 | * @APPLE_OSREFERENCE_LICENSE_HEADER_START@ |
5 | * |
6 | * This file contains Original Code and/or Modifications of Original Code |
7 | * as defined in and that are subject to the Apple Public Source License |
8 | * Version 2.0 (the 'License'). You may not use this file except in |
9 | * compliance with the License. The rights granted to you under the License |
10 | * may not be used to create, or enable the creation or redistribution of, |
11 | * unlawful or unlicensed copies of an Apple operating system, or to |
12 | * circumvent, violate, or enable the circumvention or violation of, any |
13 | * terms of an Apple operating system software license agreement. |
14 | * |
15 | * Please obtain a copy of the License at |
16 | * http://www.opensource.apple.com/apsl/ and read it before using this file. |
17 | * |
18 | * The Original Code and all software distributed under the License are |
19 | * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER |
20 | * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES, |
21 | * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY, |
22 | * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT. |
23 | * Please see the License for the specific language governing rights and |
24 | * limitations under the License. |
25 | * |
26 | * @APPLE_OSREFERENCE_LICENSE_HEADER_END@ |
27 | */ |
28 | /* Copyright (c) 1995, 1997 Apple Computer, Inc. All Rights Reserved */ |
29 | /* |
30 | * Copyright (c) 1982, 1986, 1989, 1991, 1993 |
31 | * The Regents of the University of California. All rights reserved. |
32 | * (c) UNIX System Laboratories, Inc. |
33 | * All or some portions of this file are derived from material licensed |
34 | * to the University of California by American Telephone and Telegraph |
35 | * Co. or Unix System Laboratories, Inc. and are reproduced herein with |
36 | * the permission of UNIX System Laboratories, Inc. |
37 | * |
38 | * Redistribution and use in source and binary forms, with or without |
39 | * modification, are permitted provided that the following conditions |
40 | * are met: |
41 | * 1. Redistributions of source code must retain the above copyright |
42 | * notice, this list of conditions and the following disclaimer. |
43 | * 2. Redistributions in binary form must reproduce the above copyright |
44 | * notice, this list of conditions and the following disclaimer in the |
45 | * documentation and/or other materials provided with the distribution. |
46 | * 3. All advertising materials mentioning features or use of this software |
47 | * must display the following acknowledgement: |
48 | * This product includes software developed by the University of |
49 | * California, Berkeley and its contributors. |
50 | * 4. Neither the name of the University nor the names of its contributors |
51 | * may be used to endorse or promote products derived from this software |
52 | * without specific prior written permission. |
53 | * |
54 | * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND |
55 | * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE |
56 | * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE |
57 | * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE |
58 | * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL |
59 | * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS |
60 | * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) |
61 | * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT |
62 | * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY |
63 | * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF |
64 | * SUCH DAMAGE. |
65 | * |
66 | * @(#)kern_exit.c 8.7 (Berkeley) 2/12/94 |
67 | */ |
68 | /* |
69 | * NOTICE: This file was modified by SPARTA, Inc. in 2005 to introduce |
70 | * support for mandatory and extensible security protections. This notice |
71 | * is included in support of clause 2.2 (b) of the Apple Public License, |
72 | * Version 2.0. |
73 | */ |
74 | |
75 | #include <machine/reg.h> |
76 | #include <machine/psl.h> |
77 | #include <stdatomic.h> |
78 | |
79 | #include "compat_43.h" |
80 | |
81 | #include <sys/param.h> |
82 | #include <sys/systm.h> |
83 | #include <sys/ioctl.h> |
84 | #include <sys/proc_internal.h> |
85 | #include <sys/proc.h> |
86 | #include <sys/kauth.h> |
87 | #include <sys/tty.h> |
88 | #include <sys/time.h> |
89 | #include <sys/resource.h> |
90 | #include <sys/kernel.h> |
91 | #include <sys/wait.h> |
92 | #include <sys/file_internal.h> |
93 | #include <sys/vnode_internal.h> |
94 | #include <sys/syslog.h> |
95 | #include <sys/malloc.h> |
96 | #include <sys/resourcevar.h> |
97 | #include <sys/ptrace.h> |
98 | #include <sys/proc_info.h> |
99 | #include <sys/reason.h> |
100 | #include <sys/_types/_timeval64.h> |
101 | #include <sys/user.h> |
102 | #include <sys/aio_kern.h> |
103 | #include <sys/sysproto.h> |
104 | #include <sys/signalvar.h> |
105 | #include <sys/kdebug.h> |
106 | #include <sys/filedesc.h> /* fdfree */ |
107 | #include <sys/acct.h> /* acct_process */ |
108 | #include <sys/codesign.h> |
109 | #include <sys/event.h> /* kevent_proc_copy_uptrs */ |
110 | #include <sys/sdt.h> |
111 | |
112 | #include <security/audit/audit.h> |
113 | #include <bsm/audit_kevents.h> |
114 | |
115 | #include <mach/mach_types.h> |
116 | #include <mach/task.h> |
117 | #include <mach/thread_act.h> |
118 | |
119 | #include <kern/exc_resource.h> |
120 | #include <kern/kern_types.h> |
121 | #include <kern/kalloc.h> |
122 | #include <kern/task.h> |
123 | #include <corpses/task_corpse.h> |
124 | #include <kern/thread.h> |
125 | #include <kern/thread_call.h> |
126 | #include <kern/sched_prim.h> |
127 | #include <kern/assert.h> |
128 | #include <kern/policy_internal.h> |
129 | #include <kern/exc_guard.h> |
130 | |
131 | #include <vm/vm_protos.h> |
132 | |
133 | #include <pexpert/pexpert.h> |
134 | |
135 | #if SYSV_SHM |
136 | #include <sys/shm_internal.h> /* shmexit */ |
137 | #endif /* SYSV_SHM */ |
138 | #if CONFIG_PERSONAS |
139 | #include <sys/persona.h> |
140 | #endif /* CONFIG_PERSONAS */ |
141 | #if CONFIG_MEMORYSTATUS |
142 | #include <sys/kern_memorystatus.h> |
143 | #endif /* CONFIG_MEMORYSTATUS */ |
144 | #if CONFIG_DTRACE |
145 | /* Do not include dtrace.h, it redefines kmem_[alloc/free] */ |
146 | void dtrace_proc_exit(proc_t p); |
147 | #include <sys/dtrace_ptss.h> |
148 | #endif /* CONFIG_DTRACE */ |
149 | #if CONFIG_MACF |
150 | #include <security/mac_framework.h> |
151 | #include <security/mac_mach_internal.h> |
152 | #include <sys/syscall.h> |
153 | #endif /* CONFIG_MACF */ |
154 | |
155 | void proc_prepareexit(proc_t p, int rv, boolean_t perf_notify); |
156 | void gather_populate_corpse_crashinfo(proc_t p, task_t corpse_task, |
157 | mach_exception_data_type_t code, mach_exception_data_type_t subcode, |
158 | uint64_t *udata_buffer, int num_udata, void *reason); |
159 | mach_exception_data_type_t proc_encode_exit_exception_code(proc_t p); |
160 | void vfork_exit(proc_t p, int rv); |
161 | __private_extern__ void munge_user64_rusage(struct rusage *a_rusage_p, struct user64_rusage *a_user_rusage_p); |
162 | __private_extern__ void munge_user32_rusage(struct rusage *a_rusage_p, struct user32_rusage *a_user_rusage_p); |
163 | static int reap_child_locked(proc_t parent, proc_t child, int deadparent, int reparentedtoinit, int locked, int droplock); |
164 | static void populate_corpse_crashinfo(proc_t p, task_t corpse_task, |
165 | struct rusage_superset *rup, mach_exception_data_type_t code, |
166 | mach_exception_data_type_t subcode, uint64_t *udata_buffer, |
167 | int num_udata, os_reason_t reason); |
168 | static void proc_update_corpse_exception_codes(proc_t p, mach_exception_data_type_t *code, mach_exception_data_type_t *subcode); |
169 | extern int proc_pidpathinfo_internal(proc_t p, uint64_t arg, char *buffer, uint32_t buffersize, int32_t *retval); |
170 | static __attribute__((noinline)) void launchd_crashed_panic(proc_t p, int rv); |
171 | extern void proc_piduniqidentifierinfo(proc_t p, struct proc_uniqidentifierinfo *p_uniqidinfo); |
172 | extern void task_coalition_ids(task_t task, uint64_t ids[COALITION_NUM_TYPES]); |
173 | extern uint64_t (task_t); |
174 | int proc_list_uptrs(void *p, uint64_t *udata_buffer, int size); |
175 | extern uint64_t task_corpse_get_crashed_thread_id(task_t corpse_task); |
176 | |
177 | |
178 | /* |
179 | * Things which should have prototypes in headers, but don't |
180 | */ |
181 | void proc_exit(proc_t p); |
182 | int wait1continue(int result); |
183 | int waitidcontinue(int result); |
184 | kern_return_t sys_perf_notify(thread_t thread, int pid); |
185 | kern_return_t task_exception_notify(exception_type_t exception, |
186 | mach_exception_data_type_t code, mach_exception_data_type_t subcode); |
187 | kern_return_t task_violated_guard(mach_exception_code_t, mach_exception_subcode_t, void *); |
188 | void delay(int); |
189 | void gather_rusage_info(proc_t p, rusage_info_current *ru, int flavor); |
190 | |
191 | /* |
192 | * NOTE: Source and target may *NOT* overlap! |
193 | * XXX Should share code with bsd/dev/ppc/unix_signal.c |
194 | */ |
195 | void |
196 | siginfo_user_to_user32(user_siginfo_t *in, user32_siginfo_t *out) |
197 | { |
198 | out->si_signo = in->si_signo; |
199 | out->si_errno = in->si_errno; |
200 | out->si_code = in->si_code; |
201 | out->si_pid = in->si_pid; |
202 | out->si_uid = in->si_uid; |
203 | out->si_status = in->si_status; |
204 | out->si_addr = CAST_DOWN_EXPLICIT(user32_addr_t,in->si_addr); |
205 | /* following cast works for sival_int because of padding */ |
206 | out->si_value.sival_ptr = CAST_DOWN_EXPLICIT(user32_addr_t,in->si_value.sival_ptr); |
207 | out->si_band = in->si_band; /* range reduction */ |
208 | } |
209 | |
210 | void |
211 | siginfo_user_to_user64(user_siginfo_t *in, user64_siginfo_t *out) |
212 | { |
213 | out->si_signo = in->si_signo; |
214 | out->si_errno = in->si_errno; |
215 | out->si_code = in->si_code; |
216 | out->si_pid = in->si_pid; |
217 | out->si_uid = in->si_uid; |
218 | out->si_status = in->si_status; |
219 | out->si_addr = in->si_addr; |
220 | /* following cast works for sival_int because of padding */ |
221 | out->si_value.sival_ptr = in->si_value.sival_ptr; |
222 | out->si_band = in->si_band; /* range reduction */ |
223 | } |
224 | |
225 | static int |
226 | copyoutsiginfo(user_siginfo_t *native, boolean_t is64, user_addr_t uaddr) |
227 | { |
228 | if (is64) { |
229 | user64_siginfo_t sinfo64; |
230 | |
231 | bzero(&sinfo64, sizeof (sinfo64)); |
232 | siginfo_user_to_user64(native, &sinfo64); |
233 | return (copyout(&sinfo64, uaddr, sizeof (sinfo64))); |
234 | } else { |
235 | user32_siginfo_t sinfo32; |
236 | |
237 | bzero(&sinfo32, sizeof (sinfo32)); |
238 | siginfo_user_to_user32(native, &sinfo32); |
239 | return (copyout(&sinfo32, uaddr, sizeof (sinfo32))); |
240 | } |
241 | } |
242 | |
243 | void gather_populate_corpse_crashinfo(proc_t p, task_t corpse_task, |
244 | mach_exception_data_type_t code, mach_exception_data_type_t subcode, |
245 | uint64_t *udata_buffer, int num_udata, void *reason) |
246 | { |
247 | struct rusage_superset rup; |
248 | |
249 | gather_rusage_info(p, &rup.ri, RUSAGE_INFO_CURRENT); |
250 | rup.ri.ri_phys_footprint = 0; |
251 | populate_corpse_crashinfo(p, corpse_task, &rup, code, subcode, |
252 | udata_buffer, num_udata, reason); |
253 | } |
254 | |
255 | static void proc_update_corpse_exception_codes(proc_t p, mach_exception_data_type_t *code, mach_exception_data_type_t *subcode) |
256 | { |
257 | mach_exception_data_type_t code_update = *code; |
258 | mach_exception_data_type_t subcode_update = *subcode; |
259 | if (p->p_exit_reason == OS_REASON_NULL) { |
260 | return; |
261 | } |
262 | |
263 | switch (p->p_exit_reason->osr_namespace) { |
264 | case OS_REASON_JETSAM: |
265 | if (p->p_exit_reason->osr_code == JETSAM_REASON_MEMORY_PERPROCESSLIMIT) { |
266 | /* Update the code with EXC_RESOURCE code for high memory watermark */ |
267 | EXC_RESOURCE_ENCODE_TYPE(code_update, RESOURCE_TYPE_MEMORY); |
268 | EXC_RESOURCE_ENCODE_FLAVOR(code_update, FLAVOR_HIGH_WATERMARK); |
269 | EXC_RESOURCE_HWM_ENCODE_LIMIT(code_update, ((get_task_phys_footprint_limit(p->task)) >> 20)); |
270 | subcode_update = 0; |
271 | break; |
272 | } |
273 | |
274 | break; |
275 | default: |
276 | break; |
277 | } |
278 | |
279 | *code = code_update; |
280 | *subcode = subcode_update; |
281 | return; |
282 | } |
283 | |
284 | mach_exception_data_type_t proc_encode_exit_exception_code(proc_t p) |
285 | { |
286 | uint64_t subcode = 0; |
287 | |
288 | if (p->p_exit_reason == OS_REASON_NULL) { |
289 | return 0; |
290 | } |
291 | |
292 | /* Embed first 32 bits of osr_namespace and osr_code in exception code */ |
293 | ENCODE_OSR_NAMESPACE_TO_MACH_EXCEPTION_CODE(subcode, p->p_exit_reason->osr_namespace); |
294 | ENCODE_OSR_CODE_TO_MACH_EXCEPTION_CODE(subcode, p->p_exit_reason->osr_code); |
295 | return (mach_exception_data_type_t)subcode; |
296 | } |
297 | |
298 | static void |
299 | populate_corpse_crashinfo(proc_t p, task_t corpse_task, struct rusage_superset *rup, |
300 | mach_exception_data_type_t code, mach_exception_data_type_t subcode, |
301 | uint64_t *udata_buffer, int num_udata, os_reason_t reason) |
302 | { |
303 | mach_vm_address_t uaddr = 0; |
304 | mach_exception_data_type_t exc_codes[EXCEPTION_CODE_MAX]; |
305 | exc_codes[0] = code; |
306 | exc_codes[1] = subcode; |
307 | cpu_type_t cputype; |
308 | struct proc_uniqidentifierinfo p_uniqidinfo; |
309 | struct proc_workqueueinfo pwqinfo; |
310 | int retval = 0; |
311 | uint64_t crashed_threadid = task_corpse_get_crashed_thread_id(corpse_task); |
312 | unsigned int pflags = 0; |
313 | uint64_t ; |
314 | uint64_t ; |
315 | |
316 | uint64_t ledger_internal; |
317 | uint64_t ledger_internal_compressed; |
318 | uint64_t ledger_iokit_mapped; |
319 | uint64_t ledger_alternate_accounting; |
320 | uint64_t ledger_alternate_accounting_compressed; |
321 | uint64_t ledger_purgeable_nonvolatile; |
322 | uint64_t ledger_purgeable_nonvolatile_compressed; |
323 | uint64_t ledger_page_table; |
324 | uint64_t ; |
325 | uint64_t ; |
326 | uint64_t ledger_network_nonvolatile; |
327 | uint64_t ledger_network_nonvolatile_compressed; |
328 | uint64_t ledger_wired_mem; |
329 | |
330 | void *crash_info_ptr = task_get_corpseinfo(corpse_task); |
331 | |
332 | #if CONFIG_MEMORYSTATUS |
333 | int memstat_dirty_flags = 0; |
334 | #endif |
335 | |
336 | if (KERN_SUCCESS == kcdata_get_memory_addr(crash_info_ptr, TASK_CRASHINFO_EXCEPTION_CODES, sizeof(exc_codes), &uaddr)) { |
337 | kcdata_memcpy(crash_info_ptr, uaddr, exc_codes, sizeof(exc_codes)); |
338 | } |
339 | |
340 | if (KERN_SUCCESS == kcdata_get_memory_addr(crash_info_ptr, TASK_CRASHINFO_PID, sizeof(p->p_pid), &uaddr)) { |
341 | kcdata_memcpy(crash_info_ptr, uaddr, &p->p_pid, sizeof(p->p_pid)); |
342 | } |
343 | |
344 | if (KERN_SUCCESS == kcdata_get_memory_addr(crash_info_ptr, TASK_CRASHINFO_PPID, sizeof(p->p_ppid), &uaddr)) { |
345 | kcdata_memcpy(crash_info_ptr, uaddr, &p->p_ppid, sizeof(p->p_ppid)); |
346 | } |
347 | |
348 | /* Don't include the crashed thread ID if there's an exit reason that indicates it's irrelevant */ |
349 | if ((p->p_exit_reason == OS_REASON_NULL) || !(p->p_exit_reason->osr_flags & OS_REASON_FLAG_NO_CRASHED_TID)) { |
350 | if (KERN_SUCCESS == kcdata_get_memory_addr(crash_info_ptr, TASK_CRASHINFO_CRASHED_THREADID, sizeof(uint64_t), &uaddr)) { |
351 | kcdata_memcpy(crash_info_ptr, uaddr, &crashed_threadid, sizeof(uint64_t)); |
352 | } |
353 | } |
354 | |
355 | static_assert(sizeof(struct proc_uniqidentifierinfo) == sizeof(struct crashinfo_proc_uniqidentifierinfo)); |
356 | if (KERN_SUCCESS == |
357 | kcdata_get_memory_addr(crash_info_ptr, TASK_CRASHINFO_BSDINFOWITHUNIQID, sizeof(struct proc_uniqidentifierinfo), &uaddr)) { |
358 | proc_piduniqidentifierinfo(p, &p_uniqidinfo); |
359 | kcdata_memcpy(crash_info_ptr, uaddr, &p_uniqidinfo, sizeof(struct proc_uniqidentifierinfo)); |
360 | } |
361 | |
362 | if (KERN_SUCCESS == kcdata_get_memory_addr(crash_info_ptr, TASK_CRASHINFO_RUSAGE_INFO, sizeof(rusage_info_current), &uaddr)) { |
363 | kcdata_memcpy(crash_info_ptr, uaddr, &rup->ri, sizeof(rusage_info_current)); |
364 | } |
365 | |
366 | if (KERN_SUCCESS == kcdata_get_memory_addr(crash_info_ptr, TASK_CRASHINFO_PROC_CSFLAGS, sizeof(p->p_csflags), &uaddr)) { |
367 | kcdata_memcpy(crash_info_ptr, uaddr, &p->p_csflags, sizeof(p->p_csflags)); |
368 | } |
369 | |
370 | if (KERN_SUCCESS == kcdata_get_memory_addr(crash_info_ptr, TASK_CRASHINFO_PROC_NAME, sizeof(p->p_comm), &uaddr)) { |
371 | kcdata_memcpy(crash_info_ptr, uaddr, &p->p_comm, sizeof(p->p_comm)); |
372 | } |
373 | |
374 | if (KERN_SUCCESS == kcdata_get_memory_addr(crash_info_ptr, TASK_CRASHINFO_PROC_STARTTIME, sizeof(p->p_start), &uaddr)) { |
375 | struct timeval64 t64; |
376 | t64.tv_sec = (int64_t)p->p_start.tv_sec; |
377 | t64.tv_usec = (int64_t)p->p_start.tv_usec; |
378 | kcdata_memcpy(crash_info_ptr, uaddr, &t64, sizeof(t64)); |
379 | } |
380 | |
381 | if (KERN_SUCCESS == kcdata_get_memory_addr(crash_info_ptr, TASK_CRASHINFO_USERSTACK, sizeof(p->user_stack), &uaddr)) { |
382 | kcdata_memcpy(crash_info_ptr, uaddr, &p->user_stack, sizeof(p->user_stack)); |
383 | } |
384 | |
385 | if (KERN_SUCCESS == kcdata_get_memory_addr(crash_info_ptr, TASK_CRASHINFO_ARGSLEN, sizeof(p->p_argslen), &uaddr)) { |
386 | kcdata_memcpy(crash_info_ptr, uaddr, &p->p_argslen, sizeof(p->p_argslen)); |
387 | } |
388 | |
389 | if (KERN_SUCCESS == kcdata_get_memory_addr(crash_info_ptr, TASK_CRASHINFO_PROC_ARGC, sizeof(p->p_argc), &uaddr)) { |
390 | kcdata_memcpy(crash_info_ptr, uaddr, &p->p_argc, sizeof(p->p_argc)); |
391 | } |
392 | |
393 | if (KERN_SUCCESS == kcdata_get_memory_addr(crash_info_ptr, TASK_CRASHINFO_PROC_PATH, MAXPATHLEN, &uaddr)) { |
394 | char *buf = (char *) kalloc(MAXPATHLEN); |
395 | if (buf != NULL) { |
396 | bzero(buf, MAXPATHLEN); |
397 | proc_pidpathinfo_internal(p, 0, buf, MAXPATHLEN, &retval); |
398 | kcdata_memcpy(crash_info_ptr, uaddr, buf, MAXPATHLEN); |
399 | kfree(buf, MAXPATHLEN); |
400 | } |
401 | } |
402 | |
403 | pflags = p->p_flag & (P_LP64 | P_SUGID); |
404 | if (KERN_SUCCESS == kcdata_get_memory_addr(crash_info_ptr, TASK_CRASHINFO_PROC_FLAGS, sizeof(pflags), &uaddr)) { |
405 | kcdata_memcpy(crash_info_ptr, uaddr, &pflags, sizeof(pflags)); |
406 | } |
407 | |
408 | if (KERN_SUCCESS == kcdata_get_memory_addr(crash_info_ptr, TASK_CRASHINFO_UID, sizeof(p->p_uid), &uaddr)) { |
409 | kcdata_memcpy(crash_info_ptr, uaddr, &p->p_uid, sizeof(p->p_uid)); |
410 | } |
411 | |
412 | if (KERN_SUCCESS == kcdata_get_memory_addr(crash_info_ptr, TASK_CRASHINFO_GID, sizeof(p->p_gid), &uaddr)) { |
413 | kcdata_memcpy(crash_info_ptr, uaddr, &p->p_gid, sizeof(p->p_gid)); |
414 | } |
415 | |
416 | cputype = cpu_type() & ~CPU_ARCH_MASK; |
417 | if (IS_64BIT_PROCESS(p)) |
418 | cputype |= CPU_ARCH_ABI64; |
419 | |
420 | if (KERN_SUCCESS == kcdata_get_memory_addr(crash_info_ptr, TASK_CRASHINFO_CPUTYPE, sizeof(cpu_type_t), &uaddr)) { |
421 | kcdata_memcpy(crash_info_ptr, uaddr, &cputype, sizeof(cpu_type_t)); |
422 | } |
423 | |
424 | if (KERN_SUCCESS == kcdata_get_memory_addr(crash_info_ptr, TASK_CRASHINFO_MEMORY_LIMIT, sizeof(max_footprint_mb), &uaddr)) { |
425 | max_footprint = get_task_phys_footprint_limit(p->task); |
426 | max_footprint_mb = max_footprint >> 20; |
427 | kcdata_memcpy(crash_info_ptr, uaddr, &max_footprint_mb, sizeof(max_footprint_mb)); |
428 | } |
429 | |
430 | if (KERN_SUCCESS == kcdata_get_memory_addr(crash_info_ptr, TASK_CRASHINFO_LEDGER_PHYS_FOOTPRINT_LIFETIME_MAX, sizeof(ledger_phys_footprint_lifetime_max), &uaddr)) { |
431 | ledger_phys_footprint_lifetime_max = get_task_phys_footprint_lifetime_max(p->task); |
432 | kcdata_memcpy(crash_info_ptr, uaddr, &ledger_phys_footprint_lifetime_max, sizeof(ledger_phys_footprint_lifetime_max)); |
433 | } |
434 | |
435 | // In the forking case, the current ledger info is copied into the corpse while the original task is suspended for consistency |
436 | if (KERN_SUCCESS == kcdata_get_memory_addr(crash_info_ptr, TASK_CRASHINFO_LEDGER_INTERNAL, sizeof(ledger_internal), &uaddr)) { |
437 | ledger_internal = get_task_internal(corpse_task); |
438 | kcdata_memcpy(crash_info_ptr, uaddr, &ledger_internal, sizeof(ledger_internal)); |
439 | } |
440 | |
441 | if (KERN_SUCCESS == kcdata_get_memory_addr(crash_info_ptr, TASK_CRASHINFO_LEDGER_INTERNAL_COMPRESSED, sizeof(ledger_internal_compressed), &uaddr)) { |
442 | ledger_internal_compressed = get_task_internal_compressed(corpse_task); |
443 | kcdata_memcpy(crash_info_ptr, uaddr, &ledger_internal_compressed, sizeof(ledger_internal_compressed)); |
444 | } |
445 | |
446 | if (KERN_SUCCESS == kcdata_get_memory_addr(crash_info_ptr, TASK_CRASHINFO_LEDGER_IOKIT_MAPPED, sizeof(ledger_iokit_mapped), &uaddr)) { |
447 | ledger_iokit_mapped = get_task_iokit_mapped(corpse_task); |
448 | kcdata_memcpy(crash_info_ptr, uaddr, &ledger_iokit_mapped, sizeof(ledger_iokit_mapped)); |
449 | } |
450 | |
451 | if (KERN_SUCCESS == kcdata_get_memory_addr(crash_info_ptr, TASK_CRASHINFO_LEDGER_ALTERNATE_ACCOUNTING, sizeof(ledger_alternate_accounting), &uaddr)) { |
452 | ledger_alternate_accounting = get_task_alternate_accounting(corpse_task); |
453 | kcdata_memcpy(crash_info_ptr, uaddr, &ledger_alternate_accounting, sizeof(ledger_alternate_accounting)); |
454 | } |
455 | |
456 | if (KERN_SUCCESS == kcdata_get_memory_addr(crash_info_ptr, TASK_CRASHINFO_LEDGER_ALTERNATE_ACCOUNTING_COMPRESSED, sizeof(ledger_alternate_accounting_compressed), &uaddr)) { |
457 | ledger_alternate_accounting_compressed = get_task_alternate_accounting_compressed(corpse_task); |
458 | kcdata_memcpy(crash_info_ptr, uaddr, &ledger_alternate_accounting_compressed, sizeof(ledger_alternate_accounting_compressed)); |
459 | } |
460 | |
461 | if (KERN_SUCCESS == kcdata_get_memory_addr(crash_info_ptr, TASK_CRASHINFO_LEDGER_PURGEABLE_NONVOLATILE, sizeof(ledger_purgeable_nonvolatile), &uaddr)) { |
462 | ledger_purgeable_nonvolatile = get_task_purgeable_nonvolatile(corpse_task); |
463 | kcdata_memcpy(crash_info_ptr, uaddr, &ledger_purgeable_nonvolatile, sizeof(ledger_purgeable_nonvolatile)); |
464 | } |
465 | |
466 | if (KERN_SUCCESS == kcdata_get_memory_addr(crash_info_ptr, TASK_CRASHINFO_LEDGER_PURGEABLE_NONVOLATILE_COMPRESSED, sizeof(ledger_purgeable_nonvolatile_compressed), &uaddr)) { |
467 | ledger_purgeable_nonvolatile_compressed = get_task_purgeable_nonvolatile_compressed(corpse_task); |
468 | kcdata_memcpy(crash_info_ptr, uaddr, &ledger_purgeable_nonvolatile_compressed, sizeof(ledger_purgeable_nonvolatile_compressed)); |
469 | } |
470 | |
471 | if (KERN_SUCCESS == kcdata_get_memory_addr(crash_info_ptr, TASK_CRASHINFO_LEDGER_PAGE_TABLE, sizeof(ledger_page_table), &uaddr)) { |
472 | ledger_page_table = get_task_page_table(corpse_task); |
473 | kcdata_memcpy(crash_info_ptr, uaddr, &ledger_page_table, sizeof(ledger_page_table)); |
474 | } |
475 | |
476 | if (KERN_SUCCESS == kcdata_get_memory_addr(crash_info_ptr, TASK_CRASHINFO_LEDGER_PHYS_FOOTPRINT, sizeof(ledger_phys_footprint), &uaddr)) { |
477 | ledger_phys_footprint = get_task_phys_footprint(corpse_task); |
478 | kcdata_memcpy(crash_info_ptr, uaddr, &ledger_phys_footprint, sizeof(ledger_phys_footprint)); |
479 | } |
480 | |
481 | if (KERN_SUCCESS == kcdata_get_memory_addr(crash_info_ptr, TASK_CRASHINFO_LEDGER_NETWORK_NONVOLATILE, sizeof(ledger_network_nonvolatile), &uaddr)) { |
482 | ledger_network_nonvolatile = get_task_network_nonvolatile(corpse_task); |
483 | kcdata_memcpy(crash_info_ptr, uaddr, &ledger_network_nonvolatile, sizeof(ledger_network_nonvolatile)); |
484 | } |
485 | |
486 | if (KERN_SUCCESS == kcdata_get_memory_addr(crash_info_ptr, TASK_CRASHINFO_LEDGER_NETWORK_NONVOLATILE_COMPRESSED, sizeof(ledger_network_nonvolatile_compressed), &uaddr)) { |
487 | ledger_network_nonvolatile_compressed = get_task_network_nonvolatile_compressed(corpse_task); |
488 | kcdata_memcpy(crash_info_ptr, uaddr, &ledger_network_nonvolatile_compressed, sizeof(ledger_network_nonvolatile_compressed)); |
489 | } |
490 | |
491 | if (KERN_SUCCESS == kcdata_get_memory_addr(crash_info_ptr, TASK_CRASHINFO_LEDGER_WIRED_MEM, sizeof(ledger_wired_mem), &uaddr)) { |
492 | ledger_wired_mem = get_task_wired_mem(corpse_task); |
493 | kcdata_memcpy(crash_info_ptr, uaddr, &ledger_wired_mem, sizeof(ledger_wired_mem)); |
494 | } |
495 | |
496 | bzero(&pwqinfo, sizeof(struct proc_workqueueinfo)); |
497 | retval = fill_procworkqueue(p, &pwqinfo); |
498 | if (retval == 0) { |
499 | if (KERN_SUCCESS == kcdata_get_memory_addr(crash_info_ptr, TASK_CRASHINFO_WORKQUEUEINFO, sizeof(struct proc_workqueueinfo), &uaddr)) { |
500 | kcdata_memcpy(crash_info_ptr, uaddr, &pwqinfo, sizeof(struct proc_workqueueinfo)); |
501 | } |
502 | } |
503 | |
504 | if (KERN_SUCCESS == kcdata_get_memory_addr(crash_info_ptr, TASK_CRASHINFO_RESPONSIBLE_PID, sizeof(p->p_responsible_pid), &uaddr)) { |
505 | kcdata_memcpy(crash_info_ptr, uaddr, &p->p_responsible_pid, sizeof(p->p_responsible_pid)); |
506 | } |
507 | |
508 | #if CONFIG_COALITIONS |
509 | if (KERN_SUCCESS == kcdata_get_memory_addr_for_array(crash_info_ptr, TASK_CRASHINFO_COALITION_ID, sizeof(uint64_t), COALITION_NUM_TYPES, &uaddr)) { |
510 | uint64_t coalition_ids[COALITION_NUM_TYPES]; |
511 | task_coalition_ids(p->task, coalition_ids); |
512 | kcdata_memcpy(crash_info_ptr, uaddr, coalition_ids, sizeof(coalition_ids)); |
513 | } |
514 | #endif /* CONFIG_COALITIONS */ |
515 | |
516 | #if CONFIG_MEMORYSTATUS |
517 | memstat_dirty_flags = memorystatus_dirty_get(p); |
518 | if (KERN_SUCCESS == kcdata_get_memory_addr(crash_info_ptr, TASK_CRASHINFO_DIRTY_FLAGS, sizeof(memstat_dirty_flags), &uaddr)) { |
519 | kcdata_memcpy(crash_info_ptr, uaddr, &memstat_dirty_flags, sizeof(memstat_dirty_flags)); |
520 | } |
521 | #endif |
522 | |
523 | if (p->p_exit_reason != OS_REASON_NULL && reason == OS_REASON_NULL) { |
524 | reason = p->p_exit_reason; |
525 | } |
526 | if (reason != OS_REASON_NULL) { |
527 | if (KERN_SUCCESS == kcdata_get_memory_addr(crash_info_ptr, EXIT_REASON_SNAPSHOT, sizeof(struct exit_reason_snapshot), &uaddr)) { |
528 | struct exit_reason_snapshot ers = { |
529 | .ers_namespace = reason->osr_namespace, |
530 | .ers_code = reason->osr_code, |
531 | .ers_flags = reason->osr_flags |
532 | }; |
533 | |
534 | kcdata_memcpy(crash_info_ptr, uaddr, &ers, sizeof(ers)); |
535 | } |
536 | |
537 | if (reason->osr_kcd_buf != 0) { |
538 | uint32_t reason_buf_size = kcdata_memory_get_used_bytes(&reason->osr_kcd_descriptor); |
539 | assert(reason_buf_size != 0); |
540 | |
541 | if (KERN_SUCCESS == kcdata_get_memory_addr(crash_info_ptr, KCDATA_TYPE_NESTED_KCDATA, reason_buf_size, &uaddr)) { |
542 | kcdata_memcpy(crash_info_ptr, uaddr, reason->osr_kcd_buf, reason_buf_size); |
543 | } |
544 | } |
545 | } |
546 | |
547 | if (num_udata > 0) { |
548 | if (KERN_SUCCESS == kcdata_get_memory_addr_for_array(crash_info_ptr, TASK_CRASHINFO_UDATA_PTRS, |
549 | sizeof(uint64_t), num_udata, &uaddr)) { |
550 | kcdata_memcpy(crash_info_ptr, uaddr, udata_buffer, sizeof(uint64_t) * num_udata); |
551 | } |
552 | } |
553 | } |
554 | |
555 | /* |
556 | * We only parse exit reason kcdata blobs for launchd when it dies |
557 | * and we're going to panic. |
558 | * |
559 | * Meant to be called immediately before panicking. |
560 | */ |
561 | char * |
562 | launchd_exit_reason_get_string_desc(os_reason_t exit_reason) |
563 | { |
564 | kcdata_iter_t iter; |
565 | |
566 | if (exit_reason == OS_REASON_NULL || exit_reason->osr_kcd_buf == NULL || |
567 | exit_reason->osr_bufsize == 0) { |
568 | return NULL; |
569 | } |
570 | |
571 | iter = kcdata_iter(exit_reason->osr_kcd_buf, exit_reason->osr_bufsize); |
572 | if (!kcdata_iter_valid(iter)) { |
573 | #if DEBUG || DEVELOPMENT |
574 | printf("launchd exit reason has invalid exit reason buffer\n" ); |
575 | #endif |
576 | return NULL; |
577 | } |
578 | |
579 | if (kcdata_iter_type(iter) != KCDATA_BUFFER_BEGIN_OS_REASON) { |
580 | #if DEBUG || DEVELOPMENT |
581 | printf("launchd exit reason buffer type mismatch, expected %d got %d\n" , |
582 | KCDATA_BUFFER_BEGIN_OS_REASON, kcdata_iter_type(iter)); |
583 | #endif |
584 | return NULL; |
585 | } |
586 | |
587 | iter = kcdata_iter_find_type(iter, EXIT_REASON_USER_DESC); |
588 | if (!kcdata_iter_valid(iter)) { |
589 | return NULL; |
590 | } |
591 | |
592 | return (char *)kcdata_iter_payload(iter); |
593 | } |
594 | |
595 | static __attribute__((noinline)) void |
596 | launchd_crashed_panic(proc_t p, int rv) |
597 | { |
598 | char *launchd_exit_reason_desc = launchd_exit_reason_get_string_desc(p->p_exit_reason); |
599 | |
600 | if (p->p_exit_reason == OS_REASON_NULL) { |
601 | printf("pid 1 exited -- no exit reason available -- (signal %d, exit %d)\n" , |
602 | WTERMSIG(rv), WEXITSTATUS(rv)); |
603 | } else { |
604 | printf("pid 1 exited -- exit reason namespace %d subcode 0x%llx, description %s\n" , |
605 | p->p_exit_reason->osr_namespace, p->p_exit_reason->osr_code, launchd_exit_reason_desc ? |
606 | launchd_exit_reason_desc : "none" ); |
607 | } |
608 | |
609 | const char *launchd_crashed_prefix_str; |
610 | |
611 | if (strnstr(p->p_name, "preinit" , sizeof(p->p_name))) { |
612 | launchd_crashed_prefix_str = "LTE preinit process exited" ; |
613 | } else { |
614 | launchd_crashed_prefix_str = "initproc exited" ; |
615 | } |
616 | |
617 | #if (DEVELOPMENT || DEBUG) && CONFIG_COREDUMP |
618 | /* |
619 | * For debugging purposes, generate a core file of initproc before |
620 | * panicking. Leave at least 300 MB free on the root volume, and ignore |
621 | * the process's corefile ulimit. fsync() the file to ensure it lands on disk |
622 | * before the panic hits. |
623 | */ |
624 | |
625 | int err; |
626 | uint64_t coredump_start = mach_absolute_time(); |
627 | uint64_t coredump_end; |
628 | clock_sec_t tv_sec; |
629 | clock_usec_t tv_usec; |
630 | uint32_t tv_msec; |
631 | |
632 | |
633 | err = coredump(p, 300, COREDUMP_IGNORE_ULIMIT | COREDUMP_FULLFSYNC); |
634 | |
635 | coredump_end = mach_absolute_time(); |
636 | |
637 | absolutetime_to_microtime(coredump_end - coredump_start, &tv_sec, &tv_usec); |
638 | |
639 | tv_msec = tv_usec / 1000; |
640 | |
641 | if (err != 0) { |
642 | printf("Failed to generate initproc core file: error %d, took %d.%03d seconds\n" , |
643 | err, (uint32_t)tv_sec, tv_msec); |
644 | } else { |
645 | printf("Generated initproc core file in %d.%03d seconds\n" , |
646 | (uint32_t)tv_sec, tv_msec); |
647 | } |
648 | #endif /* (DEVELOPMENT || DEBUG) && CONFIG_COREDUMP */ |
649 | |
650 | sync(p, (void *)NULL, (int *)NULL); |
651 | |
652 | if (p->p_exit_reason == OS_REASON_NULL) { |
653 | panic_with_options(0, NULL, DEBUGGER_OPTION_INITPROC_PANIC, "%s -- no exit reason available -- (signal %d, exit status %d %s)" , |
654 | launchd_crashed_prefix_str, WTERMSIG(rv), WEXITSTATUS(rv), ((p->p_csflags & CS_KILLED) ? "CS_KILLED" : "" )); |
655 | } else { |
656 | panic_with_options(0, NULL, DEBUGGER_OPTION_INITPROC_PANIC, "%s %s -- exit reason namespace %d subcode 0x%llx description: %." LAUNCHD_PANIC_REASON_STRING_MAXLEN "s" , |
657 | ((p->p_csflags & CS_KILLED) ? "CS_KILLED" : "" ), |
658 | launchd_crashed_prefix_str, p->p_exit_reason->osr_namespace, p->p_exit_reason->osr_code, |
659 | launchd_exit_reason_desc ? launchd_exit_reason_desc : "none" ); |
660 | } |
661 | } |
662 | |
663 | #define OS_REASON_IFLAG_USER_FAULT 0x1 |
664 | |
665 | #define OS_REASON_TOTAL_USER_FAULTS_PER_PROC 5 |
666 | |
667 | static int |
668 | abort_with_payload_internal(proc_t p, |
669 | uint32_t reason_namespace, uint64_t reason_code, |
670 | user_addr_t payload, uint32_t payload_size, |
671 | user_addr_t reason_string, uint64_t reason_flags, |
672 | uint32_t internal_flags) |
673 | { |
674 | os_reason_t exit_reason = OS_REASON_NULL; |
675 | kern_return_t kr = KERN_SUCCESS; |
676 | |
677 | if (internal_flags & OS_REASON_IFLAG_USER_FAULT) { |
678 | uint32_t old_value = atomic_load_explicit(&p->p_user_faults, |
679 | memory_order_relaxed); |
680 | for (;;) { |
681 | if (old_value >= OS_REASON_TOTAL_USER_FAULTS_PER_PROC) { |
682 | return EQFULL; |
683 | } |
684 | // this reloads the value in old_value |
685 | if (atomic_compare_exchange_strong_explicit(&p->p_user_faults, |
686 | &old_value, old_value + 1, memory_order_relaxed, |
687 | memory_order_relaxed)) { |
688 | break; |
689 | } |
690 | } |
691 | } |
692 | |
693 | KERNEL_DEBUG_CONSTANT(BSDDBG_CODE(DBG_BSD_PROC, BSD_PROC_EXITREASON_CREATE) | DBG_FUNC_NONE, |
694 | p->p_pid, reason_namespace, |
695 | reason_code, 0, 0); |
696 | |
697 | exit_reason = build_userspace_exit_reason(reason_namespace, reason_code, |
698 | payload, payload_size, reason_string, reason_flags | OS_REASON_FLAG_ABORT); |
699 | |
700 | if (internal_flags & OS_REASON_IFLAG_USER_FAULT) { |
701 | mach_exception_code_t code = 0; |
702 | |
703 | EXC_GUARD_ENCODE_TYPE(code, GUARD_TYPE_USER); /* simulated EXC_GUARD */ |
704 | EXC_GUARD_ENCODE_FLAVOR(code, 0); |
705 | EXC_GUARD_ENCODE_TARGET(code, reason_namespace); |
706 | |
707 | if (exit_reason == OS_REASON_NULL) { |
708 | kr = KERN_RESOURCE_SHORTAGE; |
709 | } else { |
710 | kr = task_violated_guard(code, reason_code, exit_reason); |
711 | } |
712 | os_reason_free(exit_reason); |
713 | } else { |
714 | /* |
715 | * We use SIGABRT (rather than calling exit directly from here) so that |
716 | * the debugger can catch abort_with_{reason,payload} calls. |
717 | */ |
718 | psignal_try_thread_with_reason(p, current_thread(), SIGABRT, exit_reason); |
719 | } |
720 | |
721 | switch (kr) { |
722 | case KERN_SUCCESS: |
723 | return 0; |
724 | case KERN_NOT_SUPPORTED: |
725 | return ENOTSUP; |
726 | case KERN_INVALID_ARGUMENT: |
727 | return EINVAL; |
728 | case KERN_RESOURCE_SHORTAGE: |
729 | default: |
730 | return EBUSY; |
731 | } |
732 | } |
733 | |
734 | int |
735 | abort_with_payload(struct proc *cur_proc, struct abort_with_payload_args *args, |
736 | __unused void *retval) |
737 | { |
738 | abort_with_payload_internal(cur_proc, args->reason_namespace, |
739 | args->reason_code, args->payload, args->payload_size, |
740 | args->reason_string, args->reason_flags, 0); |
741 | |
742 | return 0; |
743 | } |
744 | |
745 | int |
746 | os_fault_with_payload(struct proc *cur_proc, |
747 | struct os_fault_with_payload_args *args, __unused int *retval) |
748 | { |
749 | return abort_with_payload_internal(cur_proc, args->reason_namespace, |
750 | args->reason_code, args->payload, args->payload_size, |
751 | args->reason_string, args->reason_flags, OS_REASON_IFLAG_USER_FAULT); |
752 | } |
753 | |
754 | |
755 | /* |
756 | * exit -- |
757 | * Death of process. |
758 | */ |
759 | __attribute__((noreturn)) |
760 | void |
761 | exit(proc_t p, struct exit_args *uap, int *retval) |
762 | { |
763 | p->p_xhighbits = ((uint32_t)(uap->rval) & 0xFF000000) >> 24; |
764 | exit1(p, W_EXITCODE(uap->rval, 0), retval); |
765 | |
766 | thread_exception_return(); |
767 | /* NOTREACHED */ |
768 | while (TRUE) |
769 | thread_block(THREAD_CONTINUE_NULL); |
770 | /* NOTREACHED */ |
771 | } |
772 | |
773 | /* |
774 | * Exit: deallocate address space and other resources, change proc state |
775 | * to zombie, and unlink proc from allproc and parent's lists. Save exit |
776 | * status and rusage for wait(). Check for child processes and orphan them. |
777 | */ |
778 | int |
779 | exit1(proc_t p, int rv, int *retval) |
780 | { |
781 | return exit1_internal(p, rv, retval, TRUE, TRUE, 0); |
782 | } |
783 | |
784 | int |
785 | exit1_internal(proc_t p, int rv, int *retval, boolean_t thread_can_terminate, boolean_t perf_notify, |
786 | int jetsam_flags) |
787 | { |
788 | return exit_with_reason(p, rv, retval, thread_can_terminate, perf_notify, jetsam_flags, OS_REASON_NULL); |
789 | } |
790 | |
791 | /* |
792 | * NOTE: exit_with_reason drops a reference on the passed exit_reason |
793 | */ |
794 | int |
795 | exit_with_reason(proc_t p, int rv, int *retval, boolean_t thread_can_terminate, boolean_t perf_notify, |
796 | int jetsam_flags, struct os_reason *exit_reason) |
797 | { |
798 | thread_t self = current_thread(); |
799 | struct task *task = p->task; |
800 | struct uthread *ut; |
801 | int error = 0; |
802 | |
803 | /* |
804 | * If a thread in this task has already |
805 | * called exit(), then halt any others |
806 | * right here. |
807 | */ |
808 | |
809 | ut = get_bsdthread_info(self); |
810 | if ((p == current_proc()) && |
811 | (ut->uu_flag & UT_VFORK)) { |
812 | os_reason_free(exit_reason); |
813 | if (!thread_can_terminate) { |
814 | return EINVAL; |
815 | } |
816 | |
817 | vfork_exit(p, rv); |
818 | vfork_return(p , retval, p->p_pid); |
819 | unix_syscall_return(0); |
820 | /* NOT REACHED */ |
821 | } |
822 | |
823 | /* |
824 | * The parameter list of audit_syscall_exit() was augmented to |
825 | * take the Darwin syscall number as the first parameter, |
826 | * which is currently required by mac_audit_postselect(). |
827 | */ |
828 | |
829 | /* |
830 | * The BSM token contains two components: an exit status as passed |
831 | * to exit(), and a return value to indicate what sort of exit it |
832 | * was. The exit status is WEXITSTATUS(rv), but it's not clear |
833 | * what the return value is. |
834 | */ |
835 | AUDIT_ARG(exit, WEXITSTATUS(rv), 0); |
836 | /* |
837 | * TODO: what to audit here when jetsam calls exit and the uthread, |
838 | * 'ut' does not belong to the proc, 'p'. |
839 | */ |
840 | AUDIT_SYSCALL_EXIT(SYS_exit, p, ut, 0); /* Exit is always successfull */ |
841 | |
842 | DTRACE_PROC1(exit, int, CLD_EXITED); |
843 | |
844 | /* mark process is going to exit and pull out of DBG/disk throttle */ |
845 | /* TODO: This should be done after becoming exit thread */ |
846 | proc_set_task_policy(p->task, TASK_POLICY_ATTRIBUTE, |
847 | TASK_POLICY_TERMINATED, TASK_POLICY_ENABLE); |
848 | |
849 | proc_lock(p); |
850 | error = proc_transstart(p, 1, (jetsam_flags ? 1 : 0)); |
851 | if (error == EDEADLK) { |
852 | /* |
853 | * If proc_transstart() returns EDEADLK, then another thread |
854 | * is either exec'ing or exiting. Return an error and allow |
855 | * the other thread to continue. |
856 | */ |
857 | proc_unlock(p); |
858 | os_reason_free(exit_reason); |
859 | if (current_proc() == p){ |
860 | if (p->exit_thread == self) { |
861 | printf("exit_thread failed to exit, leaving process %s[%d] in unkillable limbo\n" , |
862 | p->p_comm, p->p_pid); |
863 | } |
864 | |
865 | if (thread_can_terminate) { |
866 | thread_exception_return(); |
867 | } |
868 | } |
869 | |
870 | return error; |
871 | } |
872 | |
873 | while (p->exit_thread != self) { |
874 | if (sig_try_locked(p) <= 0) { |
875 | proc_transend(p, 1); |
876 | os_reason_free(exit_reason); |
877 | |
878 | if (get_threadtask(self) != task) { |
879 | proc_unlock(p); |
880 | return(0); |
881 | } |
882 | proc_unlock(p); |
883 | |
884 | thread_terminate(self); |
885 | if (!thread_can_terminate) { |
886 | return 0; |
887 | } |
888 | |
889 | thread_exception_return(); |
890 | /* NOTREACHED */ |
891 | } |
892 | sig_lock_to_exit(p); |
893 | } |
894 | |
895 | if (exit_reason != OS_REASON_NULL) { |
896 | KERNEL_DEBUG_CONSTANT(BSDDBG_CODE(DBG_BSD_PROC, BSD_PROC_EXITREASON_COMMIT) | DBG_FUNC_NONE, |
897 | p->p_pid, exit_reason->osr_namespace, |
898 | exit_reason->osr_code, 0, 0); |
899 | } |
900 | |
901 | assert(p->p_exit_reason == OS_REASON_NULL); |
902 | p->p_exit_reason = exit_reason; |
903 | |
904 | p->p_lflag |= P_LEXIT; |
905 | p->p_xstat = rv; |
906 | p->p_lflag |= jetsam_flags; |
907 | |
908 | proc_transend(p, 1); |
909 | proc_unlock(p); |
910 | |
911 | proc_prepareexit(p, rv, perf_notify); |
912 | |
913 | /* Last thread to terminate will call proc_exit() */ |
914 | task_terminate_internal(task); |
915 | |
916 | return(0); |
917 | } |
918 | |
919 | void |
920 | proc_prepareexit(proc_t p, int rv, boolean_t perf_notify) |
921 | { |
922 | mach_exception_data_type_t code = 0, subcode = 0; |
923 | |
924 | struct uthread *ut; |
925 | thread_t self = current_thread(); |
926 | ut = get_bsdthread_info(self); |
927 | struct rusage_superset *rup; |
928 | int kr = 0; |
929 | int create_corpse = FALSE; |
930 | |
931 | if (p == initproc) { |
932 | launchd_crashed_panic(p, rv); |
933 | /* NOTREACHED */ |
934 | } |
935 | |
936 | /* |
937 | * Generate a corefile/crashlog if: |
938 | * The process doesn't have an exit reason that indicates no crash report should be created |
939 | * AND any of the following are true: |
940 | * - The process was terminated due to a fatal signal that generates a core |
941 | * - The process was killed due to a code signing violation |
942 | * - The process has an exit reason that indicates we should generate a crash report |
943 | * |
944 | * The first condition is necessary because abort_with_reason()/payload() use SIGABRT |
945 | * (which normally triggers a core) but may indicate that no crash report should be created. |
946 | */ |
947 | if (!(PROC_HAS_EXITREASON(p) && (PROC_EXITREASON_FLAGS(p) & OS_REASON_FLAG_NO_CRASH_REPORT)) && |
948 | (hassigprop(WTERMSIG(rv), SA_CORE) || ((p->p_csflags & CS_KILLED) != 0) || |
949 | (PROC_HAS_EXITREASON(p) && (PROC_EXITREASON_FLAGS(p) & |
950 | OS_REASON_FLAG_GENERATE_CRASH_REPORT)))) { |
951 | /* |
952 | * Workaround for processes checking up on PT_DENY_ATTACH: |
953 | * should be backed out post-Leopard (details in 5431025). |
954 | */ |
955 | if ((SIGSEGV == WTERMSIG(rv)) && |
956 | (p->p_pptr->p_lflag & P_LNOATTACH)) { |
957 | goto skipcheck; |
958 | } |
959 | |
960 | /* |
961 | * Crash Reporter looks for the signal value, original exception |
962 | * type, and low 20 bits of the original code in code[0] |
963 | * (8, 4, and 20 bits respectively). code[1] is unmodified. |
964 | */ |
965 | code = ((WTERMSIG(rv) & 0xff) << 24) | |
966 | ((ut->uu_exception & 0x0f) << 20) | |
967 | ((int)ut->uu_code & 0xfffff); |
968 | subcode = ut->uu_subcode; |
969 | |
970 | kr = task_exception_notify(EXC_CRASH, code, subcode); |
971 | |
972 | /* Nobody handled EXC_CRASH?? remember to make corpse */ |
973 | if (kr != 0) { |
974 | create_corpse = TRUE; |
975 | } |
976 | } |
977 | |
978 | skipcheck: |
979 | /* Notify the perf server? */ |
980 | if (perf_notify) { |
981 | (void)sys_perf_notify(self, p->p_pid); |
982 | } |
983 | |
984 | |
985 | /* stash the usage into corpse data if making_corpse == true */ |
986 | if (create_corpse == TRUE) { |
987 | kr = task_mark_corpse(p->task); |
988 | if (kr != KERN_SUCCESS) { |
989 | if (kr == KERN_NO_SPACE) { |
990 | printf("Process[%d] has no vm space for corpse info.\n" , p->p_pid); |
991 | } else if (kr == KERN_NOT_SUPPORTED) { |
992 | printf("Process[%d] was destined to be corpse. But corpse is disabled by config.\n" , p->p_pid); |
993 | } else { |
994 | printf("Process[%d] crashed: %s. Too many corpses being created.\n" , p->p_pid, p->p_comm); |
995 | } |
996 | create_corpse = FALSE; |
997 | } |
998 | } |
999 | |
1000 | /* |
1001 | * Before this process becomes a zombie, stash resource usage |
1002 | * stats in the proc for external observers to query |
1003 | * via proc_pid_rusage(). |
1004 | * |
1005 | * If the zombie allocation fails, just punt the stats. |
1006 | */ |
1007 | MALLOC_ZONE(rup, struct rusage_superset *, |
1008 | sizeof (*rup), M_ZOMBIE, M_WAITOK); |
1009 | if (rup != NULL) { |
1010 | gather_rusage_info(p, &rup->ri, RUSAGE_INFO_CURRENT); |
1011 | rup->ri.ri_phys_footprint = 0; |
1012 | rup->ri.ri_proc_exit_abstime = mach_absolute_time(); |
1013 | |
1014 | /* |
1015 | * Make the rusage_info visible to external observers |
1016 | * only after it has been completely filled in. |
1017 | */ |
1018 | p->p_ru = rup; |
1019 | } |
1020 | if (create_corpse) { |
1021 | int est_knotes = 0, num_knotes = 0; |
1022 | uint64_t *buffer = NULL; |
1023 | int buf_size = 0; |
1024 | |
1025 | /* Get all the udata pointers from kqueue */ |
1026 | est_knotes = kevent_proc_copy_uptrs(p, NULL, 0); |
1027 | if (est_knotes > 0) { |
1028 | buf_size = (est_knotes + 32) * sizeof(uint64_t); |
1029 | buffer = (uint64_t *) kalloc(buf_size); |
1030 | num_knotes = kevent_proc_copy_uptrs(p, buffer, buf_size); |
1031 | if (num_knotes > est_knotes + 32) { |
1032 | num_knotes = est_knotes + 32; |
1033 | } |
1034 | } |
1035 | |
1036 | /* Update the code, subcode based on exit reason */ |
1037 | proc_update_corpse_exception_codes(p, &code, &subcode); |
1038 | populate_corpse_crashinfo(p, p->task, rup, |
1039 | code, subcode, buffer, num_knotes, NULL); |
1040 | if (buffer != NULL) { |
1041 | kfree(buffer, buf_size); |
1042 | } |
1043 | } |
1044 | /* |
1045 | * Remove proc from allproc queue and from pidhash chain. |
1046 | * Need to do this before we do anything that can block. |
1047 | * Not doing causes things like mount() find this on allproc |
1048 | * in partially cleaned state. |
1049 | */ |
1050 | |
1051 | proc_list_lock(); |
1052 | |
1053 | #if CONFIG_MEMORYSTATUS |
1054 | memorystatus_remove(p, TRUE); |
1055 | #endif |
1056 | |
1057 | LIST_REMOVE(p, p_list); |
1058 | LIST_INSERT_HEAD(&zombproc, p, p_list); /* Place onto zombproc. */ |
1059 | /* will not be visible via proc_find */ |
1060 | p->p_listflag |= P_LIST_EXITED; |
1061 | |
1062 | proc_list_unlock(); |
1063 | |
1064 | |
1065 | #ifdef PGINPROF |
1066 | vmsizmon(); |
1067 | #endif |
1068 | /* |
1069 | * If parent is waiting for us to exit or exec, |
1070 | * P_LPPWAIT is set; we will wakeup the parent below. |
1071 | */ |
1072 | proc_lock(p); |
1073 | p->p_lflag &= ~(P_LTRACED | P_LPPWAIT); |
1074 | p->p_sigignore = ~(sigcantmask); |
1075 | ut->uu_siglist = 0; |
1076 | proc_unlock(p); |
1077 | } |
1078 | |
1079 | void |
1080 | proc_exit(proc_t p) |
1081 | { |
1082 | proc_t q; |
1083 | proc_t pp; |
1084 | struct task *task = p->task; |
1085 | vnode_t tvp = NULLVP; |
1086 | struct pgrp * pg; |
1087 | struct session *sessp; |
1088 | struct uthread * uth; |
1089 | pid_t pid; |
1090 | int exitval; |
1091 | int knote_hint; |
1092 | |
1093 | uth = current_uthread(); |
1094 | |
1095 | proc_lock(p); |
1096 | proc_transstart(p, 1, 0); |
1097 | if( !(p->p_lflag & P_LEXIT)) { |
1098 | /* |
1099 | * This can happen if a thread_terminate() occurs |
1100 | * in a single-threaded process. |
1101 | */ |
1102 | p->p_lflag |= P_LEXIT; |
1103 | proc_transend(p, 1); |
1104 | proc_unlock(p); |
1105 | proc_prepareexit(p, 0, TRUE); |
1106 | (void) task_terminate_internal(task); |
1107 | proc_lock(p); |
1108 | } else { |
1109 | proc_transend(p, 1); |
1110 | } |
1111 | |
1112 | p->p_lflag |= P_LPEXIT; |
1113 | |
1114 | /* |
1115 | * Other kernel threads may be in the middle of signalling this process. |
1116 | * Wait for those threads to wrap it up before making the process |
1117 | * disappear on them. |
1118 | */ |
1119 | if ((p->p_lflag & P_LINSIGNAL) || (p->p_sigwaitcnt > 0)) { |
1120 | p->p_sigwaitcnt++; |
1121 | while ((p->p_lflag & P_LINSIGNAL) || (p->p_sigwaitcnt > 1)) |
1122 | msleep(&p->p_sigmask, &p->p_mlock, PWAIT, "proc_sigdrain" , NULL); |
1123 | p->p_sigwaitcnt--; |
1124 | } |
1125 | |
1126 | proc_unlock(p); |
1127 | pid = p->p_pid; |
1128 | exitval = p->p_xstat; |
1129 | KERNEL_DEBUG_CONSTANT_IST(KDEBUG_COMMON, |
1130 | BSDDBG_CODE(DBG_BSD_PROC, BSD_PROC_EXIT) | DBG_FUNC_START, |
1131 | pid, exitval, 0, 0, 0); |
1132 | |
1133 | #if CONFIG_DTRACE |
1134 | dtrace_proc_exit(p); |
1135 | #endif |
1136 | |
1137 | nspace_proc_exit(p); |
1138 | |
1139 | /* |
1140 | * need to cancel async IO requests that can be cancelled and wait for those |
1141 | * already active. MAY BLOCK! |
1142 | */ |
1143 | |
1144 | proc_refdrain(p); |
1145 | |
1146 | /* if any pending cpu limits action, clear it */ |
1147 | task_clear_cpuusage(p->task, TRUE); |
1148 | |
1149 | workq_mark_exiting(p); |
1150 | |
1151 | _aio_exit( p ); |
1152 | |
1153 | /* |
1154 | * Close open files and release open-file table. |
1155 | * This may block! |
1156 | */ |
1157 | fdfree(p); |
1158 | |
1159 | /* |
1160 | * Once all the knotes, kqueues & workloops are destroyed, get rid of the |
1161 | * workqueue. |
1162 | */ |
1163 | workq_exit(p); |
1164 | |
1165 | if (uth->uu_lowpri_window) { |
1166 | /* |
1167 | * task is marked as a low priority I/O type |
1168 | * and the I/O we issued while in flushing files on close |
1169 | * collided with normal I/O operations... |
1170 | * no need to throttle this thread since its going away |
1171 | * but we do need to update our bookeeping w/r to throttled threads |
1172 | */ |
1173 | throttle_lowpri_io(0); |
1174 | } |
1175 | |
1176 | #if SYSV_SHM |
1177 | /* Close ref SYSV Shared memory*/ |
1178 | if (p->vm_shm) |
1179 | shmexit(p); |
1180 | #endif |
1181 | #if SYSV_SEM |
1182 | /* Release SYSV semaphores */ |
1183 | semexit(p); |
1184 | #endif |
1185 | |
1186 | #if PSYNCH |
1187 | pth_proc_hashdelete(p); |
1188 | #endif /* PSYNCH */ |
1189 | |
1190 | sessp = proc_session(p); |
1191 | if (SESS_LEADER(p, sessp)) { |
1192 | |
1193 | if (sessp->s_ttyvp != NULLVP) { |
1194 | struct vnode *ttyvp; |
1195 | int ttyvid; |
1196 | int cttyflag = 0; |
1197 | struct vfs_context context; |
1198 | struct tty *tp; |
1199 | |
1200 | /* |
1201 | * Controlling process. |
1202 | * Signal foreground pgrp, |
1203 | * drain controlling terminal |
1204 | * and revoke access to controlling terminal. |
1205 | */ |
1206 | session_lock(sessp); |
1207 | tp = SESSION_TP(sessp); |
1208 | if ((tp != TTY_NULL) && (tp->t_session == sessp)) { |
1209 | session_unlock(sessp); |
1210 | |
1211 | /* |
1212 | * We're going to SIGHUP the foreground process |
1213 | * group. It can't change from this point on |
1214 | * until the revoke is complete. |
1215 | * The process group changes under both the tty |
1216 | * lock and proc_list_lock but we need only one |
1217 | */ |
1218 | tty_lock(tp); |
1219 | ttysetpgrphup(tp); |
1220 | tty_unlock(tp); |
1221 | |
1222 | tty_pgsignal(tp, SIGHUP, 1); |
1223 | |
1224 | session_lock(sessp); |
1225 | tp = SESSION_TP(sessp); |
1226 | } |
1227 | cttyflag = sessp->s_flags & S_CTTYREF; |
1228 | sessp->s_flags &= ~S_CTTYREF; |
1229 | ttyvp = sessp->s_ttyvp; |
1230 | ttyvid = sessp->s_ttyvid; |
1231 | sessp->s_ttyvp = NULLVP; |
1232 | sessp->s_ttyvid = 0; |
1233 | sessp->s_ttyp = TTY_NULL; |
1234 | sessp->s_ttypgrpid = NO_PID; |
1235 | session_unlock(sessp); |
1236 | |
1237 | if ((ttyvp != NULLVP) && (vnode_getwithvid(ttyvp, ttyvid) == 0)) { |
1238 | if (tp != TTY_NULL) { |
1239 | tty_lock(tp); |
1240 | (void) ttywait(tp); |
1241 | tty_unlock(tp); |
1242 | } |
1243 | context.vc_thread = proc_thread(p); /* XXX */ |
1244 | context.vc_ucred = kauth_cred_proc_ref(p); |
1245 | VNOP_REVOKE(ttyvp, REVOKEALL, &context); |
1246 | if (cttyflag) { |
1247 | /* |
1248 | * Release the extra usecount taken in cttyopen. |
1249 | * usecount should be released after VNOP_REVOKE is called. |
1250 | * This usecount was taken to ensure that |
1251 | * the VNOP_REVOKE results in a close to |
1252 | * the tty since cttyclose is a no-op. |
1253 | */ |
1254 | vnode_rele(ttyvp); |
1255 | } |
1256 | vnode_put(ttyvp); |
1257 | kauth_cred_unref(&context.vc_ucred); |
1258 | ttyvp = NULLVP; |
1259 | } |
1260 | if (tp) { |
1261 | /* |
1262 | * This is cleared even if not set. This is also done in |
1263 | * spec_close to ensure that the flag is cleared. |
1264 | */ |
1265 | tty_lock(tp); |
1266 | ttyclrpgrphup(tp); |
1267 | tty_unlock(tp); |
1268 | |
1269 | ttyfree(tp); |
1270 | } |
1271 | } |
1272 | session_lock(sessp); |
1273 | sessp->s_leader = NULL; |
1274 | session_unlock(sessp); |
1275 | } |
1276 | session_rele(sessp); |
1277 | |
1278 | pg = proc_pgrp(p); |
1279 | fixjobc(p, pg, 0); |
1280 | pg_rele(pg); |
1281 | |
1282 | p->p_rlimit[RLIMIT_FSIZE].rlim_cur = RLIM_INFINITY; |
1283 | (void)acct_process(p); |
1284 | |
1285 | proc_list_lock(); |
1286 | |
1287 | if ((p->p_listflag & P_LIST_EXITCOUNT) == P_LIST_EXITCOUNT) { |
1288 | p->p_listflag &= ~P_LIST_EXITCOUNT; |
1289 | proc_shutdown_exitcount--; |
1290 | if (proc_shutdown_exitcount == 0) |
1291 | wakeup(&proc_shutdown_exitcount); |
1292 | } |
1293 | |
1294 | /* wait till parentrefs are dropped and grant no more */ |
1295 | proc_childdrainstart(p); |
1296 | while ((q = p->p_children.lh_first) != NULL) { |
1297 | int reparentedtoinit = (q->p_listflag & P_LIST_DEADPARENT) ? 1 : 0; |
1298 | if (q->p_stat == SZOMB) { |
1299 | if (p != q->p_pptr) |
1300 | panic("parent child linkage broken" ); |
1301 | /* check for sysctl zomb lookup */ |
1302 | while ((q->p_listflag & P_LIST_WAITING) == P_LIST_WAITING) { |
1303 | msleep(&q->p_stat, proc_list_mlock, PWAIT, "waitcoll" , 0); |
1304 | } |
1305 | q->p_listflag |= P_LIST_WAITING; |
1306 | /* |
1307 | * This is a named reference and it is not granted |
1308 | * if the reap is already in progress. So we get |
1309 | * the reference here exclusively and their can be |
1310 | * no waiters. So there is no need for a wakeup |
1311 | * after we are done. Also the reap frees the structure |
1312 | * and the proc struct cannot be used for wakeups as well. |
1313 | * It is safe to use q here as this is system reap |
1314 | */ |
1315 | (void)reap_child_locked(p, q, 1, reparentedtoinit, 1, 0); |
1316 | } else { |
1317 | /* |
1318 | * Traced processes are killed |
1319 | * since their existence means someone is messing up. |
1320 | */ |
1321 | if (q->p_lflag & P_LTRACED) { |
1322 | struct proc *opp; |
1323 | |
1324 | /* |
1325 | * Take a reference on the child process to |
1326 | * ensure it doesn't exit and disappear between |
1327 | * the time we drop the list_lock and attempt |
1328 | * to acquire its proc_lock. |
1329 | */ |
1330 | if (proc_ref_locked(q) != q) |
1331 | continue; |
1332 | |
1333 | proc_list_unlock(); |
1334 | |
1335 | opp = proc_find(q->p_oppid); |
1336 | if (opp != PROC_NULL) { |
1337 | proc_list_lock(); |
1338 | q->p_oppid = 0; |
1339 | proc_list_unlock(); |
1340 | proc_reparentlocked(q, opp, 0, 0); |
1341 | proc_rele(opp); |
1342 | } else { |
1343 | /* original parent exited while traced */ |
1344 | proc_list_lock(); |
1345 | q->p_listflag |= P_LIST_DEADPARENT; |
1346 | q->p_oppid = 0; |
1347 | proc_list_unlock(); |
1348 | proc_reparentlocked(q, initproc, 0, 0); |
1349 | } |
1350 | |
1351 | proc_lock(q); |
1352 | q->p_lflag &= ~P_LTRACED; |
1353 | |
1354 | if (q->sigwait_thread) { |
1355 | thread_t thread = q->sigwait_thread; |
1356 | |
1357 | proc_unlock(q); |
1358 | /* |
1359 | * The sigwait_thread could be stopped at a |
1360 | * breakpoint. Wake it up to kill. |
1361 | * Need to do this as it could be a thread which is not |
1362 | * the first thread in the task. So any attempts to kill |
1363 | * the process would result into a deadlock on q->sigwait. |
1364 | */ |
1365 | thread_resume(thread); |
1366 | clear_wait(thread, THREAD_INTERRUPTED); |
1367 | threadsignal(thread, SIGKILL, 0, TRUE); |
1368 | } else { |
1369 | proc_unlock(q); |
1370 | } |
1371 | |
1372 | psignal(q, SIGKILL); |
1373 | proc_list_lock(); |
1374 | proc_rele_locked(q); |
1375 | } else { |
1376 | q->p_listflag |= P_LIST_DEADPARENT; |
1377 | proc_reparentlocked(q, initproc, 0, 1); |
1378 | } |
1379 | } |
1380 | } |
1381 | |
1382 | proc_childdrainend(p); |
1383 | proc_list_unlock(); |
1384 | |
1385 | #if CONFIG_MACF |
1386 | /* |
1387 | * Notify MAC policies that proc is dead. |
1388 | * This should be replaced with proper label management |
1389 | * (rdar://problem/32126399). |
1390 | */ |
1391 | mac_proc_notify_exit(p); |
1392 | #endif |
1393 | |
1394 | /* |
1395 | * Release reference to text vnode |
1396 | */ |
1397 | tvp = p->p_textvp; |
1398 | p->p_textvp = NULL; |
1399 | if (tvp != NULLVP) { |
1400 | vnode_rele(tvp); |
1401 | } |
1402 | |
1403 | /* |
1404 | * Save exit status and final rusage info, adding in child rusage |
1405 | * info and self times. If we were unable to allocate a zombie |
1406 | * structure, this information is lost. |
1407 | */ |
1408 | if (p->p_ru != NULL) { |
1409 | calcru(p, &p->p_stats->p_ru.ru_utime, &p->p_stats->p_ru.ru_stime, NULL); |
1410 | p->p_ru->ru = p->p_stats->p_ru; |
1411 | |
1412 | ruadd(&(p->p_ru->ru), &p->p_stats->p_cru); |
1413 | } |
1414 | |
1415 | /* |
1416 | * Free up profiling buffers. |
1417 | */ |
1418 | { |
1419 | struct uprof *p0 = &p->p_stats->p_prof, *p1, *pn; |
1420 | |
1421 | p1 = p0->pr_next; |
1422 | p0->pr_next = NULL; |
1423 | p0->pr_scale = 0; |
1424 | |
1425 | for (; p1 != NULL; p1 = pn) { |
1426 | pn = p1->pr_next; |
1427 | kfree(p1, sizeof *p1); |
1428 | } |
1429 | } |
1430 | |
1431 | proc_free_realitimer(p); |
1432 | |
1433 | /* |
1434 | * Other substructures are freed from wait(). |
1435 | */ |
1436 | FREE_ZONE(p->p_stats, sizeof *p->p_stats, M_PSTATS); |
1437 | p->p_stats = NULL; |
1438 | |
1439 | FREE_ZONE(p->p_sigacts, sizeof *p->p_sigacts, M_SIGACTS); |
1440 | p->p_sigacts = NULL; |
1441 | |
1442 | proc_limitdrop(p, 1); |
1443 | p->p_limit = NULL; |
1444 | |
1445 | /* |
1446 | * Finish up by terminating the task |
1447 | * and halt this thread (only if a |
1448 | * member of the task exiting). |
1449 | */ |
1450 | p->task = TASK_NULL; |
1451 | set_bsdtask_info(task, NULL); |
1452 | |
1453 | knote_hint = NOTE_EXIT | (p->p_xstat & 0xffff); |
1454 | proc_knote(p, knote_hint); |
1455 | |
1456 | /* mark the thread as the one that is doing proc_exit |
1457 | * no need to hold proc lock in uthread_free |
1458 | */ |
1459 | uth->uu_flag |= UT_PROCEXIT; |
1460 | /* |
1461 | * Notify parent that we're gone. |
1462 | */ |
1463 | pp = proc_parent(p); |
1464 | if (pp->p_flag & P_NOCLDWAIT) { |
1465 | |
1466 | if (p->p_ru != NULL) { |
1467 | proc_lock(pp); |
1468 | #if 3839178 |
1469 | /* |
1470 | * If the parent is ignoring SIGCHLD, then POSIX requires |
1471 | * us to not add the resource usage to the parent process - |
1472 | * we are only going to hand it off to init to get reaped. |
1473 | * We should contest the standard in this case on the basis |
1474 | * of RLIMIT_CPU. |
1475 | */ |
1476 | #else /* !3839178 */ |
1477 | /* |
1478 | * Add child resource usage to parent before giving |
1479 | * zombie to init. If we were unable to allocate a |
1480 | * zombie structure, this information is lost. |
1481 | */ |
1482 | ruadd(&pp->p_stats->p_cru, &p->p_ru->ru); |
1483 | #endif /* !3839178 */ |
1484 | update_rusage_info_child(&pp->p_stats->ri_child, &p->p_ru->ri); |
1485 | proc_unlock(pp); |
1486 | } |
1487 | |
1488 | /* kernel can reap this one, no need to move it to launchd */ |
1489 | proc_list_lock(); |
1490 | p->p_listflag |= P_LIST_DEADPARENT; |
1491 | proc_list_unlock(); |
1492 | } |
1493 | if ((p->p_listflag & P_LIST_DEADPARENT) == 0 || p->p_oppid) { |
1494 | if (pp != initproc) { |
1495 | proc_lock(pp); |
1496 | pp->si_pid = p->p_pid; |
1497 | pp->p_xhighbits = p->p_xhighbits; |
1498 | p->p_xhighbits = 0; |
1499 | pp->si_status = p->p_xstat; |
1500 | pp->si_code = CLD_EXITED; |
1501 | /* |
1502 | * p_ucred usage is safe as it is an exiting process |
1503 | * and reference is dropped in reap |
1504 | */ |
1505 | pp->si_uid = kauth_cred_getruid(p->p_ucred); |
1506 | proc_unlock(pp); |
1507 | } |
1508 | /* mark as a zombie */ |
1509 | /* No need to take proc lock as all refs are drained and |
1510 | * no one except parent (reaping ) can look at this. |
1511 | * The write is to an int and is coherent. Also parent is |
1512 | * keyed off of list lock for reaping |
1513 | */ |
1514 | DTRACE_PROC2(exited, proc_t, p, int, exitval); |
1515 | KERNEL_DEBUG_CONSTANT_IST(KDEBUG_COMMON, |
1516 | BSDDBG_CODE(DBG_BSD_PROC, BSD_PROC_EXIT) | DBG_FUNC_END, |
1517 | pid, exitval, 0, 0, 0); |
1518 | p->p_stat = SZOMB; |
1519 | /* |
1520 | * The current process can be reaped so, no one |
1521 | * can depend on this |
1522 | */ |
1523 | |
1524 | psignal(pp, SIGCHLD); |
1525 | |
1526 | /* and now wakeup the parent */ |
1527 | proc_list_lock(); |
1528 | wakeup((caddr_t)pp); |
1529 | proc_list_unlock(); |
1530 | } else { |
1531 | /* should be fine as parent proc would be initproc */ |
1532 | /* mark as a zombie */ |
1533 | /* No need to take proc lock as all refs are drained and |
1534 | * no one except parent (reaping ) can look at this. |
1535 | * The write is to an int and is coherent. Also parent is |
1536 | * keyed off of list lock for reaping |
1537 | */ |
1538 | DTRACE_PROC2(exited, proc_t, p, int, exitval); |
1539 | proc_list_lock(); |
1540 | KERNEL_DEBUG_CONSTANT_IST(KDEBUG_COMMON, |
1541 | BSDDBG_CODE(DBG_BSD_PROC, BSD_PROC_EXIT) | DBG_FUNC_END, |
1542 | pid, exitval, 0, 0, 0); |
1543 | /* check for sysctl zomb lookup */ |
1544 | while ((p->p_listflag & P_LIST_WAITING) == P_LIST_WAITING) { |
1545 | msleep(&p->p_stat, proc_list_mlock, PWAIT, "waitcoll" , 0); |
1546 | } |
1547 | /* safe to use p as this is a system reap */ |
1548 | p->p_stat = SZOMB; |
1549 | p->p_listflag |= P_LIST_WAITING; |
1550 | |
1551 | /* |
1552 | * This is a named reference and it is not granted |
1553 | * if the reap is already in progress. So we get |
1554 | * the reference here exclusively and their can be |
1555 | * no waiters. So there is no need for a wakeup |
1556 | * after we are done. AlsO the reap frees the structure |
1557 | * and the proc struct cannot be used for wakeups as well. |
1558 | * It is safe to use p here as this is system reap |
1559 | */ |
1560 | (void)reap_child_locked(pp, p, 1, 0, 1, 1); |
1561 | /* list lock dropped by reap_child_locked */ |
1562 | } |
1563 | if (uth->uu_lowpri_window) { |
1564 | /* |
1565 | * task is marked as a low priority I/O type and we've |
1566 | * somehow picked up another throttle during exit processing... |
1567 | * no need to throttle this thread since its going away |
1568 | * but we do need to update our bookeeping w/r to throttled threads |
1569 | */ |
1570 | throttle_lowpri_io(0); |
1571 | } |
1572 | |
1573 | proc_rele(pp); |
1574 | |
1575 | } |
1576 | |
1577 | |
1578 | /* |
1579 | * reap_child_locked |
1580 | * |
1581 | * Description: Given a process from which all status information needed |
1582 | * has already been extracted, if the process is a ptrace |
1583 | * attach process, detach it and give it back to its real |
1584 | * parent, else recover all resources remaining associated |
1585 | * with it. |
1586 | * |
1587 | * Parameters: proc_t parent Parent of process being reaped |
1588 | * proc_t child Process to reap |
1589 | * |
1590 | * Returns: 0 Process was not reaped because it |
1591 | * came from an attach |
1592 | * 1 Process was reaped |
1593 | */ |
1594 | static int |
1595 | reap_child_locked(proc_t parent, proc_t child, int deadparent, int reparentedtoinit, int locked, int droplock) |
1596 | { |
1597 | proc_t trace_parent = PROC_NULL; /* Traced parent process, if tracing */ |
1598 | |
1599 | if (locked == 1) |
1600 | proc_list_unlock(); |
1601 | |
1602 | /* |
1603 | * If we got the child via a ptrace 'attach', |
1604 | * we need to give it back to the old parent. |
1605 | * |
1606 | * Exception: someone who has been reparented to launchd before being |
1607 | * ptraced can simply be reaped, refer to radar 5677288 |
1608 | * p_oppid -> ptraced |
1609 | * trace_parent == initproc -> away from launchd |
1610 | * reparentedtoinit -> came to launchd by reparenting |
1611 | */ |
1612 | if (child->p_oppid) { |
1613 | int knote_hint; |
1614 | pid_t oppid; |
1615 | |
1616 | proc_lock(child); |
1617 | oppid = child->p_oppid; |
1618 | child->p_oppid = 0; |
1619 | knote_hint = NOTE_EXIT | (child->p_xstat & 0xffff); |
1620 | proc_unlock(child); |
1621 | |
1622 | if ((trace_parent = proc_find(oppid)) |
1623 | && !((trace_parent == initproc) && reparentedtoinit)) { |
1624 | |
1625 | if (trace_parent != initproc) { |
1626 | /* |
1627 | * proc internal fileds and p_ucred usage safe |
1628 | * here as child is dead and is not reaped or |
1629 | * reparented yet |
1630 | */ |
1631 | proc_lock(trace_parent); |
1632 | trace_parent->si_pid = child->p_pid; |
1633 | trace_parent->si_status = child->p_xstat; |
1634 | trace_parent->si_code = CLD_CONTINUED; |
1635 | trace_parent->si_uid = kauth_cred_getruid(child->p_ucred); |
1636 | proc_unlock(trace_parent); |
1637 | } |
1638 | proc_reparentlocked(child, trace_parent, 1, 0); |
1639 | |
1640 | /* resend knote to original parent (and others) after reparenting */ |
1641 | proc_knote(child, knote_hint); |
1642 | |
1643 | psignal(trace_parent, SIGCHLD); |
1644 | proc_list_lock(); |
1645 | wakeup((caddr_t)trace_parent); |
1646 | child->p_listflag &= ~P_LIST_WAITING; |
1647 | wakeup(&child->p_stat); |
1648 | proc_list_unlock(); |
1649 | proc_rele(trace_parent); |
1650 | if ((locked == 1) && (droplock == 0)) |
1651 | proc_list_lock(); |
1652 | return (0); |
1653 | } |
1654 | |
1655 | /* |
1656 | * If we can't reparent (e.g. the original parent exited while child was being debugged, or |
1657 | * original parent is the same as the debugger currently exiting), we still need to satisfy |
1658 | * the knote lifecycle for other observers on the system. While the debugger was attached, |
1659 | * the NOTE_EXIT would not have been broadcast during initial child termination. |
1660 | */ |
1661 | proc_knote(child, knote_hint); |
1662 | |
1663 | if (trace_parent != PROC_NULL) { |
1664 | proc_rele(trace_parent); |
1665 | } |
1666 | } |
1667 | |
1668 | #pragma clang diagnostic push |
1669 | #pragma clang diagnostic ignored "-Wdeprecated-declarations" |
1670 | proc_knote(child, NOTE_REAP); |
1671 | #pragma clang diagnostic pop |
1672 | |
1673 | proc_knote_drain(child); |
1674 | |
1675 | child->p_xstat = 0; |
1676 | if (child->p_ru) { |
1677 | proc_lock(parent); |
1678 | #if 3839178 |
1679 | /* |
1680 | * If the parent is ignoring SIGCHLD, then POSIX requires |
1681 | * us to not add the resource usage to the parent process - |
1682 | * we are only going to hand it off to init to get reaped. |
1683 | * We should contest the standard in this case on the basis |
1684 | * of RLIMIT_CPU. |
1685 | */ |
1686 | if (!(parent->p_flag & P_NOCLDWAIT)) |
1687 | #endif /* 3839178 */ |
1688 | ruadd(&parent->p_stats->p_cru, &child->p_ru->ru); |
1689 | update_rusage_info_child(&parent->p_stats->ri_child, &child->p_ru->ri); |
1690 | proc_unlock(parent); |
1691 | FREE_ZONE(child->p_ru, sizeof *child->p_ru, M_ZOMBIE); |
1692 | child->p_ru = NULL; |
1693 | } else { |
1694 | printf("Warning : lost p_ru for %s\n" , child->p_comm); |
1695 | } |
1696 | |
1697 | AUDIT_SESSION_PROCEXIT(child); |
1698 | |
1699 | /* |
1700 | * Decrement the count of procs running with this uid. |
1701 | * p_ucred usage is safe here as it is an exited process. |
1702 | * and refernce is dropped after these calls down below |
1703 | * (locking protection is provided by list lock held in chgproccnt) |
1704 | */ |
1705 | #if CONFIG_PERSONAS |
1706 | /* |
1707 | * persona_proc_drop calls chgproccnt(-1) on the persona uid, |
1708 | * and (+1) on the child->p_ucred uid |
1709 | */ |
1710 | persona_proc_drop(child); |
1711 | #endif |
1712 | (void)chgproccnt(kauth_cred_getruid(child->p_ucred), -1); |
1713 | |
1714 | os_reason_free(child->p_exit_reason); |
1715 | |
1716 | /* |
1717 | * Free up credentials. |
1718 | */ |
1719 | if (IS_VALID_CRED(child->p_ucred)) { |
1720 | kauth_cred_unref(&child->p_ucred); |
1721 | } |
1722 | |
1723 | /* XXXX Note NOT SAFE TO USE p_ucred from this point onwards */ |
1724 | |
1725 | /* |
1726 | * Finally finished with old proc entry. |
1727 | * Unlink it from its process group and free it. |
1728 | */ |
1729 | leavepgrp(child); |
1730 | |
1731 | proc_list_lock(); |
1732 | LIST_REMOVE(child, p_list); /* off zombproc */ |
1733 | parent->p_childrencnt--; |
1734 | LIST_REMOVE(child, p_sibling); |
1735 | /* If there are no more children wakeup parent */ |
1736 | if ((deadparent != 0) && (LIST_EMPTY(&parent->p_children))) |
1737 | wakeup((caddr_t)parent); /* with list lock held */ |
1738 | child->p_listflag &= ~P_LIST_WAITING; |
1739 | wakeup(&child->p_stat); |
1740 | |
1741 | /* Take it out of process hash */ |
1742 | LIST_REMOVE(child, p_hash); |
1743 | child->p_listflag &= ~P_LIST_INHASH; |
1744 | proc_checkdeadrefs(child); |
1745 | nprocs--; |
1746 | |
1747 | if (deadparent) { |
1748 | /* |
1749 | * If a child zombie is being reaped because its parent |
1750 | * is exiting, make sure we update the list flag |
1751 | */ |
1752 | child->p_listflag |= P_LIST_DEADPARENT; |
1753 | } |
1754 | |
1755 | proc_list_unlock(); |
1756 | |
1757 | #if CONFIG_FINE_LOCK_GROUPS |
1758 | lck_mtx_destroy(&child->p_mlock, proc_mlock_grp); |
1759 | lck_mtx_destroy(&child->p_ucred_mlock, proc_ucred_mlock_grp); |
1760 | lck_mtx_destroy(&child->p_fdmlock, proc_fdmlock_grp); |
1761 | #if CONFIG_DTRACE |
1762 | lck_mtx_destroy(&child->p_dtrace_sprlock, proc_lck_grp); |
1763 | #endif |
1764 | lck_spin_destroy(&child->p_slock, proc_slock_grp); |
1765 | #else /* CONFIG_FINE_LOCK_GROUPS */ |
1766 | lck_mtx_destroy(&child->p_mlock, proc_lck_grp); |
1767 | lck_mtx_destroy(&child->p_ucred_mlock, proc_lck_grp); |
1768 | lck_mtx_destroy(&child->p_fdmlock, proc_lck_grp); |
1769 | #if CONFIG_DTRACE |
1770 | lck_mtx_destroy(&child->p_dtrace_sprlock, proc_lck_grp); |
1771 | #endif |
1772 | lck_spin_destroy(&child->p_slock, proc_lck_grp); |
1773 | #endif /* CONFIG_FINE_LOCK_GROUPS */ |
1774 | |
1775 | FREE_ZONE(child, sizeof *child, M_PROC); |
1776 | if ((locked == 1) && (droplock == 0)) |
1777 | proc_list_lock(); |
1778 | |
1779 | return (1); |
1780 | } |
1781 | |
1782 | |
1783 | int |
1784 | wait1continue(int result) |
1785 | { |
1786 | proc_t p; |
1787 | thread_t thread; |
1788 | uthread_t uth; |
1789 | struct _wait4_data *wait4_data; |
1790 | struct wait4_nocancel_args *uap; |
1791 | int *retval; |
1792 | |
1793 | if (result) |
1794 | return(result); |
1795 | |
1796 | p = current_proc(); |
1797 | thread = current_thread(); |
1798 | uth = (struct uthread *)get_bsdthread_info(thread); |
1799 | |
1800 | wait4_data = &uth->uu_save.uus_wait4_data; |
1801 | uap = wait4_data->args; |
1802 | retval = wait4_data->retval; |
1803 | return(wait4_nocancel(p, uap, retval)); |
1804 | } |
1805 | |
1806 | int |
1807 | wait4(proc_t q, struct wait4_args *uap, int32_t *retval) |
1808 | { |
1809 | __pthread_testcancel(1); |
1810 | return(wait4_nocancel(q, (struct wait4_nocancel_args *)uap, retval)); |
1811 | } |
1812 | |
1813 | int |
1814 | wait4_nocancel(proc_t q, struct wait4_nocancel_args *uap, int32_t *retval) |
1815 | { |
1816 | int nfound; |
1817 | int sibling_count; |
1818 | proc_t p; |
1819 | int status, error; |
1820 | uthread_t uth; |
1821 | struct _wait4_data *wait4_data; |
1822 | |
1823 | AUDIT_ARG(pid, uap->pid); |
1824 | |
1825 | if (uap->pid == 0) |
1826 | uap->pid = -q->p_pgrpid; |
1827 | |
1828 | loop: |
1829 | proc_list_lock(); |
1830 | loop1: |
1831 | nfound = 0; |
1832 | sibling_count = 0; |
1833 | |
1834 | PCHILDREN_FOREACH(q, p) { |
1835 | if ( p->p_sibling.le_next != 0 ) |
1836 | sibling_count++; |
1837 | if (uap->pid != WAIT_ANY && |
1838 | p->p_pid != uap->pid && |
1839 | p->p_pgrpid != -(uap->pid)) |
1840 | continue; |
1841 | |
1842 | nfound++; |
1843 | |
1844 | /* XXX This is racy because we don't get the lock!!!! */ |
1845 | |
1846 | if (p->p_listflag & P_LIST_WAITING) { |
1847 | |
1848 | /* we're not using a continuation here but we still need to stash |
1849 | * the args for stackshot. */ |
1850 | uth = current_uthread(); |
1851 | wait4_data = &uth->uu_save.uus_wait4_data; |
1852 | wait4_data->args = uap; |
1853 | thread_set_pending_block_hint(current_thread(), kThreadWaitOnProcess); |
1854 | |
1855 | (void)msleep(&p->p_stat, proc_list_mlock, PWAIT, "waitcoll" , 0); |
1856 | goto loop1; |
1857 | } |
1858 | p->p_listflag |= P_LIST_WAITING; /* only allow single thread to wait() */ |
1859 | |
1860 | |
1861 | if (p->p_stat == SZOMB) { |
1862 | int reparentedtoinit = (p->p_listflag & P_LIST_DEADPARENT) ? 1 : 0; |
1863 | |
1864 | proc_list_unlock(); |
1865 | #if CONFIG_MACF |
1866 | if ((error = mac_proc_check_wait(q, p)) != 0) |
1867 | goto out; |
1868 | #endif |
1869 | retval[0] = p->p_pid; |
1870 | if (uap->status) { |
1871 | /* Legacy apps expect only 8 bits of status */ |
1872 | status = 0xffff & p->p_xstat; /* convert to int */ |
1873 | error = copyout((caddr_t)&status, |
1874 | uap->status, |
1875 | sizeof(status)); |
1876 | if (error) |
1877 | goto out; |
1878 | } |
1879 | if (uap->rusage) { |
1880 | if (p->p_ru == NULL) { |
1881 | error = ENOMEM; |
1882 | } else { |
1883 | if (IS_64BIT_PROCESS(q)) { |
1884 | struct user64_rusage my_rusage = {}; |
1885 | munge_user64_rusage(&p->p_ru->ru, &my_rusage); |
1886 | error = copyout((caddr_t)&my_rusage, |
1887 | uap->rusage, |
1888 | sizeof (my_rusage)); |
1889 | } |
1890 | else { |
1891 | struct user32_rusage my_rusage = {}; |
1892 | munge_user32_rusage(&p->p_ru->ru, &my_rusage); |
1893 | error = copyout((caddr_t)&my_rusage, |
1894 | uap->rusage, |
1895 | sizeof (my_rusage)); |
1896 | } |
1897 | } |
1898 | /* information unavailable? */ |
1899 | if (error) |
1900 | goto out; |
1901 | } |
1902 | |
1903 | /* Conformance change for 6577252. |
1904 | * When SIGCHLD is blocked and wait() returns because the status |
1905 | * of a child process is available and there are no other |
1906 | * children processes, then any pending SIGCHLD signal is cleared. |
1907 | */ |
1908 | if ( sibling_count == 0 ) { |
1909 | int mask = sigmask(SIGCHLD); |
1910 | uth = current_uthread(); |
1911 | |
1912 | if ( (uth->uu_sigmask & mask) != 0 ) { |
1913 | /* we are blocking SIGCHLD signals. clear any pending SIGCHLD. |
1914 | * This locking looks funny but it is protecting access to the |
1915 | * thread via p_uthlist. |
1916 | */ |
1917 | proc_lock(q); |
1918 | uth->uu_siglist &= ~mask; /* clear pending signal */ |
1919 | proc_unlock(q); |
1920 | } |
1921 | } |
1922 | |
1923 | /* Clean up */ |
1924 | (void)reap_child_locked(q, p, 0, reparentedtoinit, 0, 0); |
1925 | |
1926 | return (0); |
1927 | } |
1928 | if (p->p_stat == SSTOP && (p->p_lflag & P_LWAITED) == 0 && |
1929 | (p->p_lflag & P_LTRACED || uap->options & WUNTRACED)) { |
1930 | proc_list_unlock(); |
1931 | #if CONFIG_MACF |
1932 | if ((error = mac_proc_check_wait(q, p)) != 0) |
1933 | goto out; |
1934 | #endif |
1935 | proc_lock(p); |
1936 | p->p_lflag |= P_LWAITED; |
1937 | proc_unlock(p); |
1938 | retval[0] = p->p_pid; |
1939 | if (uap->status) { |
1940 | status = W_STOPCODE(p->p_xstat); |
1941 | error = copyout((caddr_t)&status, |
1942 | uap->status, |
1943 | sizeof(status)); |
1944 | } else |
1945 | error = 0; |
1946 | goto out; |
1947 | } |
1948 | /* |
1949 | * If we are waiting for continued processses, and this |
1950 | * process was continued |
1951 | */ |
1952 | if ((uap->options & WCONTINUED) && |
1953 | (p->p_flag & P_CONTINUED)) { |
1954 | proc_list_unlock(); |
1955 | #if CONFIG_MACF |
1956 | if ((error = mac_proc_check_wait(q, p)) != 0) |
1957 | goto out; |
1958 | #endif |
1959 | |
1960 | /* Prevent other process for waiting for this event */ |
1961 | OSBitAndAtomic(~((uint32_t)P_CONTINUED), &p->p_flag); |
1962 | retval[0] = p->p_pid; |
1963 | if (uap->status) { |
1964 | status = W_STOPCODE(SIGCONT); |
1965 | error = copyout((caddr_t)&status, |
1966 | uap->status, |
1967 | sizeof(status)); |
1968 | } else |
1969 | error = 0; |
1970 | goto out; |
1971 | } |
1972 | p->p_listflag &= ~P_LIST_WAITING; |
1973 | wakeup(&p->p_stat); |
1974 | } |
1975 | /* list lock is held when we get here any which way */ |
1976 | if (nfound == 0) { |
1977 | proc_list_unlock(); |
1978 | return (ECHILD); |
1979 | } |
1980 | |
1981 | if (uap->options & WNOHANG) { |
1982 | retval[0] = 0; |
1983 | proc_list_unlock(); |
1984 | return (0); |
1985 | } |
1986 | |
1987 | /* Save arguments for continuation. Backing storage is in uthread->uu_arg, and will not be deallocated */ |
1988 | uth = current_uthread(); |
1989 | wait4_data = &uth->uu_save.uus_wait4_data; |
1990 | wait4_data->args = uap; |
1991 | wait4_data->retval = retval; |
1992 | |
1993 | thread_set_pending_block_hint(current_thread(), kThreadWaitOnProcess); |
1994 | if ((error = msleep0((caddr_t)q, proc_list_mlock, PWAIT | PCATCH | PDROP, "wait" , 0, wait1continue))) |
1995 | return (error); |
1996 | |
1997 | goto loop; |
1998 | out: |
1999 | proc_list_lock(); |
2000 | p->p_listflag &= ~P_LIST_WAITING; |
2001 | wakeup(&p->p_stat); |
2002 | proc_list_unlock(); |
2003 | return (error); |
2004 | } |
2005 | |
2006 | #if DEBUG |
2007 | #define ASSERT_LCK_MTX_OWNED(lock) \ |
2008 | lck_mtx_assert(lock, LCK_MTX_ASSERT_OWNED) |
2009 | #else |
2010 | #define ASSERT_LCK_MTX_OWNED(lock) /* nothing */ |
2011 | #endif |
2012 | |
2013 | int |
2014 | waitidcontinue(int result) |
2015 | { |
2016 | proc_t p; |
2017 | thread_t thread; |
2018 | uthread_t uth; |
2019 | struct _waitid_data *waitid_data; |
2020 | struct waitid_nocancel_args *uap; |
2021 | int *retval; |
2022 | |
2023 | if (result) |
2024 | return (result); |
2025 | |
2026 | p = current_proc(); |
2027 | thread = current_thread(); |
2028 | uth = (struct uthread *)get_bsdthread_info(thread); |
2029 | |
2030 | waitid_data = &uth->uu_save.uus_waitid_data; |
2031 | uap = waitid_data->args; |
2032 | retval = waitid_data->retval; |
2033 | return(waitid_nocancel(p, uap, retval)); |
2034 | } |
2035 | |
2036 | /* |
2037 | * Description: Suspend the calling thread until one child of the process |
2038 | * containing the calling thread changes state. |
2039 | * |
2040 | * Parameters: uap->idtype one of P_PID, P_PGID, P_ALL |
2041 | * uap->id pid_t or gid_t or ignored |
2042 | * uap->infop Address of siginfo_t struct in |
2043 | * user space into which to return status |
2044 | * uap->options flag values |
2045 | * |
2046 | * Returns: 0 Success |
2047 | * !0 Error returning status to user space |
2048 | */ |
2049 | int |
2050 | waitid(proc_t q, struct waitid_args *uap, int32_t *retval) |
2051 | { |
2052 | __pthread_testcancel(1); |
2053 | return (waitid_nocancel(q, (struct waitid_nocancel_args *)uap, retval)); |
2054 | } |
2055 | |
2056 | int |
2057 | waitid_nocancel(proc_t q, struct waitid_nocancel_args *uap, |
2058 | __unused int32_t *retval) |
2059 | { |
2060 | user_siginfo_t siginfo; /* siginfo data to return to caller */ |
2061 | boolean_t caller64 = IS_64BIT_PROCESS(q); |
2062 | int nfound; |
2063 | proc_t p; |
2064 | int error; |
2065 | uthread_t uth; |
2066 | struct _waitid_data *waitid_data; |
2067 | |
2068 | if (uap->options == 0 || |
2069 | (uap->options & ~(WNOHANG|WNOWAIT|WCONTINUED|WSTOPPED|WEXITED))) |
2070 | return (EINVAL); /* bits set that aren't recognized */ |
2071 | |
2072 | switch (uap->idtype) { |
2073 | case P_PID: /* child with process ID equal to... */ |
2074 | case P_PGID: /* child with process group ID equal to... */ |
2075 | if (((int)uap->id) < 0) |
2076 | return (EINVAL); |
2077 | break; |
2078 | case P_ALL: /* any child */ |
2079 | break; |
2080 | } |
2081 | |
2082 | loop: |
2083 | proc_list_lock(); |
2084 | loop1: |
2085 | nfound = 0; |
2086 | |
2087 | PCHILDREN_FOREACH(q, p) { |
2088 | switch (uap->idtype) { |
2089 | case P_PID: /* child with process ID equal to... */ |
2090 | if (p->p_pid != (pid_t)uap->id) |
2091 | continue; |
2092 | break; |
2093 | case P_PGID: /* child with process group ID equal to... */ |
2094 | if (p->p_pgrpid != (pid_t)uap->id) |
2095 | continue; |
2096 | break; |
2097 | case P_ALL: /* any child */ |
2098 | break; |
2099 | } |
2100 | |
2101 | /* XXX This is racy because we don't get the lock!!!! */ |
2102 | |
2103 | /* |
2104 | * Wait collision; go to sleep and restart; used to maintain |
2105 | * the single return for waited process guarantee. |
2106 | */ |
2107 | if (p->p_listflag & P_LIST_WAITING) { |
2108 | (void) msleep(&p->p_stat, proc_list_mlock, |
2109 | PWAIT, "waitidcoll" , 0); |
2110 | goto loop1; |
2111 | } |
2112 | p->p_listflag |= P_LIST_WAITING; /* mark busy */ |
2113 | |
2114 | nfound++; |
2115 | |
2116 | bzero(&siginfo, sizeof (siginfo)); |
2117 | |
2118 | switch (p->p_stat) { |
2119 | case SZOMB: /* Exited */ |
2120 | if (!(uap->options & WEXITED)) |
2121 | break; |
2122 | proc_list_unlock(); |
2123 | #if CONFIG_MACF |
2124 | if ((error = mac_proc_check_wait(q, p)) != 0) |
2125 | goto out; |
2126 | #endif |
2127 | siginfo.si_signo = SIGCHLD; |
2128 | siginfo.si_pid = p->p_pid; |
2129 | siginfo.si_status = (WEXITSTATUS(p->p_xstat) & 0x00FFFFFF) | (((uint32_t)(p->p_xhighbits) << 24) & 0xFF000000); |
2130 | p->p_xhighbits = 0; |
2131 | if (WIFSIGNALED(p->p_xstat)) { |
2132 | siginfo.si_code = WCOREDUMP(p->p_xstat) ? |
2133 | CLD_DUMPED : CLD_KILLED; |
2134 | } else |
2135 | siginfo.si_code = CLD_EXITED; |
2136 | |
2137 | if ((error = copyoutsiginfo(&siginfo, |
2138 | caller64, uap->infop)) != 0) |
2139 | goto out; |
2140 | |
2141 | /* Prevent other process for waiting for this event? */ |
2142 | if (!(uap->options & WNOWAIT)) { |
2143 | (void) reap_child_locked(q, p, 0, 0, 0, 0); |
2144 | return (0); |
2145 | } |
2146 | goto out; |
2147 | |
2148 | case SSTOP: /* Stopped */ |
2149 | /* |
2150 | * If we are not interested in stopped processes, then |
2151 | * ignore this one. |
2152 | */ |
2153 | if (!(uap->options & WSTOPPED)) |
2154 | break; |
2155 | |
2156 | /* |
2157 | * If someone has already waited it, we lost a race |
2158 | * to be the one to return status. |
2159 | */ |
2160 | if ((p->p_lflag & P_LWAITED) != 0) |
2161 | break; |
2162 | proc_list_unlock(); |
2163 | #if CONFIG_MACF |
2164 | if ((error = mac_proc_check_wait(q, p)) != 0) |
2165 | goto out; |
2166 | #endif |
2167 | siginfo.si_signo = SIGCHLD; |
2168 | siginfo.si_pid = p->p_pid; |
2169 | siginfo.si_status = p->p_xstat; /* signal number */ |
2170 | siginfo.si_code = CLD_STOPPED; |
2171 | |
2172 | if ((error = copyoutsiginfo(&siginfo, |
2173 | caller64, uap->infop)) != 0) |
2174 | goto out; |
2175 | |
2176 | /* Prevent other process for waiting for this event? */ |
2177 | if (!(uap->options & WNOWAIT)) { |
2178 | proc_lock(p); |
2179 | p->p_lflag |= P_LWAITED; |
2180 | proc_unlock(p); |
2181 | } |
2182 | goto out; |
2183 | |
2184 | default: /* All other states => Continued */ |
2185 | if (!(uap->options & WCONTINUED)) |
2186 | break; |
2187 | |
2188 | /* |
2189 | * If the flag isn't set, then this process has not |
2190 | * been stopped and continued, or the status has |
2191 | * already been reaped by another caller of waitid(). |
2192 | */ |
2193 | if ((p->p_flag & P_CONTINUED) == 0) |
2194 | break; |
2195 | proc_list_unlock(); |
2196 | #if CONFIG_MACF |
2197 | if ((error = mac_proc_check_wait(q, p)) != 0) |
2198 | goto out; |
2199 | #endif |
2200 | siginfo.si_signo = SIGCHLD; |
2201 | siginfo.si_code = CLD_CONTINUED; |
2202 | proc_lock(p); |
2203 | siginfo.si_pid = p->p_contproc; |
2204 | siginfo.si_status = p->p_xstat; |
2205 | proc_unlock(p); |
2206 | |
2207 | if ((error = copyoutsiginfo(&siginfo, |
2208 | caller64, uap->infop)) != 0) |
2209 | goto out; |
2210 | |
2211 | /* Prevent other process for waiting for this event? */ |
2212 | if (!(uap->options & WNOWAIT)) { |
2213 | OSBitAndAtomic(~((uint32_t)P_CONTINUED), |
2214 | &p->p_flag); |
2215 | } |
2216 | goto out; |
2217 | } |
2218 | ASSERT_LCK_MTX_OWNED(proc_list_mlock); |
2219 | |
2220 | /* Not a process we are interested in; go on to next child */ |
2221 | |
2222 | p->p_listflag &= ~P_LIST_WAITING; |
2223 | wakeup(&p->p_stat); |
2224 | } |
2225 | ASSERT_LCK_MTX_OWNED(proc_list_mlock); |
2226 | |
2227 | /* No child processes that could possibly satisfy the request? */ |
2228 | |
2229 | if (nfound == 0) { |
2230 | proc_list_unlock(); |
2231 | return (ECHILD); |
2232 | } |
2233 | |
2234 | if (uap->options & WNOHANG) { |
2235 | proc_list_unlock(); |
2236 | #if CONFIG_MACF |
2237 | if ((error = mac_proc_check_wait(q, p)) != 0) |
2238 | return (error); |
2239 | #endif |
2240 | /* |
2241 | * The state of the siginfo structure in this case |
2242 | * is undefined. Some implementations bzero it, some |
2243 | * (like here) leave it untouched for efficiency. |
2244 | * |
2245 | * Thus the most portable check for "no matching pid with |
2246 | * WNOHANG" is to store a zero into si_pid before |
2247 | * invocation, then check for a non-zero value afterwards. |
2248 | */ |
2249 | return (0); |
2250 | } |
2251 | |
2252 | /* Save arguments for continuation. Backing storage is in uthread->uu_arg, and will not be deallocated */ |
2253 | uth = current_uthread(); |
2254 | waitid_data = &uth->uu_save.uus_waitid_data; |
2255 | waitid_data->args = uap; |
2256 | waitid_data->retval = retval; |
2257 | |
2258 | if ((error = msleep0(q, proc_list_mlock, |
2259 | PWAIT | PCATCH | PDROP, "waitid" , 0, waitidcontinue)) != 0) |
2260 | return (error); |
2261 | |
2262 | goto loop; |
2263 | out: |
2264 | proc_list_lock(); |
2265 | p->p_listflag &= ~P_LIST_WAITING; |
2266 | wakeup(&p->p_stat); |
2267 | proc_list_unlock(); |
2268 | return (error); |
2269 | } |
2270 | |
2271 | /* |
2272 | * make process 'parent' the new parent of process 'child'. |
2273 | */ |
2274 | void |
2275 | proc_reparentlocked(proc_t child, proc_t parent, int signallable, int locked) |
2276 | { |
2277 | proc_t oldparent = PROC_NULL; |
2278 | |
2279 | if (child->p_pptr == parent) |
2280 | return; |
2281 | |
2282 | if (locked == 0) |
2283 | proc_list_lock(); |
2284 | |
2285 | oldparent = child->p_pptr; |
2286 | #if __PROC_INTERNAL_DEBUG |
2287 | if (oldparent == PROC_NULL) |
2288 | panic("proc_reparent: process %p does not have a parent\n" , child); |
2289 | #endif |
2290 | |
2291 | LIST_REMOVE(child, p_sibling); |
2292 | #if __PROC_INTERNAL_DEBUG |
2293 | if (oldparent->p_childrencnt == 0) |
2294 | panic("process children count already 0\n" ); |
2295 | #endif |
2296 | oldparent->p_childrencnt--; |
2297 | #if __PROC_INTERNAL_DEBUG1 |
2298 | if (oldparent->p_childrencnt < 0) |
2299 | panic("process children count -ve\n" ); |
2300 | #endif |
2301 | LIST_INSERT_HEAD(&parent->p_children, child, p_sibling); |
2302 | parent->p_childrencnt++; |
2303 | child->p_pptr = parent; |
2304 | child->p_ppid = parent->p_pid; |
2305 | |
2306 | proc_list_unlock(); |
2307 | |
2308 | if ((signallable != 0) && (initproc == parent) && (child->p_stat == SZOMB)) |
2309 | psignal(initproc, SIGCHLD); |
2310 | if (locked == 1) |
2311 | proc_list_lock(); |
2312 | } |
2313 | |
2314 | /* |
2315 | * Exit: deallocate address space and other resources, change proc state |
2316 | * to zombie, and unlink proc from allproc and parent's lists. Save exit |
2317 | * status and rusage for wait(). Check for child processes and orphan them. |
2318 | */ |
2319 | |
2320 | void |
2321 | vfork_exit(proc_t p, int rv) |
2322 | { |
2323 | vfork_exit_internal(p, rv, 0); |
2324 | } |
2325 | |
2326 | void |
2327 | vfork_exit_internal(proc_t p, int rv, int forceexit) |
2328 | { |
2329 | thread_t self = current_thread(); |
2330 | #ifdef FIXME |
2331 | struct task *task = p->task; |
2332 | #endif |
2333 | struct uthread *ut; |
2334 | |
2335 | /* |
2336 | * If a thread in this task has already |
2337 | * called exit(), then halt any others |
2338 | * right here. |
2339 | */ |
2340 | |
2341 | ut = get_bsdthread_info(self); |
2342 | |
2343 | |
2344 | proc_lock(p); |
2345 | if ((p->p_lflag & P_LPEXIT) == P_LPEXIT) { |
2346 | /* |
2347 | * This happens when a parent exits/killed and vfork is in progress |
2348 | * other threads. But shutdown code for ex has already called exit1() |
2349 | */ |
2350 | proc_unlock(p); |
2351 | return; |
2352 | } |
2353 | p->p_lflag |= (P_LEXIT | P_LPEXIT); |
2354 | proc_unlock(p); |
2355 | |
2356 | if (forceexit == 0) { |
2357 | /* |
2358 | * parent of a vfork child has already called exit() and the |
2359 | * thread that has vfork in proress terminates. So there is no |
2360 | * separate address space here and it has already been marked for |
2361 | * termination. This was never covered before and could cause problems |
2362 | * if we block here for outside code. |
2363 | */ |
2364 | /* Notify the perf server */ |
2365 | (void)sys_perf_notify(self, p->p_pid); |
2366 | } |
2367 | |
2368 | /* |
2369 | * Remove proc from allproc queue and from pidhash chain. |
2370 | * Need to do this before we do anything that can block. |
2371 | * Not doing causes things like mount() find this on allproc |
2372 | * in partially cleaned state. |
2373 | */ |
2374 | |
2375 | proc_list_lock(); |
2376 | |
2377 | #if CONFIG_MEMORYSTATUS |
2378 | memorystatus_remove(p, TRUE); |
2379 | #endif |
2380 | |
2381 | LIST_REMOVE(p, p_list); |
2382 | LIST_INSERT_HEAD(&zombproc, p, p_list); /* Place onto zombproc. */ |
2383 | /* will not be visible via proc_find */ |
2384 | p->p_listflag |= P_LIST_EXITED; |
2385 | |
2386 | proc_list_unlock(); |
2387 | |
2388 | proc_lock(p); |
2389 | p->p_xstat = rv; |
2390 | p->p_lflag &= ~(P_LTRACED | P_LPPWAIT); |
2391 | p->p_sigignore = ~0; |
2392 | proc_unlock(p); |
2393 | |
2394 | ut->uu_siglist = 0; |
2395 | |
2396 | /* begin vproc_exit */ |
2397 | |
2398 | proc_t q; |
2399 | proc_t pp; |
2400 | |
2401 | vnode_t tvp; |
2402 | |
2403 | struct pgrp * pg; |
2404 | struct session *sessp; |
2405 | struct rusage_superset *rup; |
2406 | |
2407 | /* XXX Zombie allocation may fail, in which case stats get lost */ |
2408 | MALLOC_ZONE(rup, struct rusage_superset *, |
2409 | sizeof (*rup), M_ZOMBIE, M_WAITOK); |
2410 | |
2411 | proc_refdrain(p); |
2412 | |
2413 | /* |
2414 | * Close open files and release open-file table. |
2415 | * This may block! |
2416 | */ |
2417 | fdfree(p); |
2418 | |
2419 | sessp = proc_session(p); |
2420 | if (SESS_LEADER(p, sessp)) { |
2421 | |
2422 | if (sessp->s_ttyvp != NULLVP) { |
2423 | struct vnode *ttyvp; |
2424 | int ttyvid; |
2425 | int cttyflag = 0; |
2426 | struct vfs_context context; |
2427 | struct tty *tp; |
2428 | |
2429 | /* |
2430 | * Controlling process. |
2431 | * Signal foreground pgrp, |
2432 | * drain controlling terminal |
2433 | * and revoke access to controlling terminal. |
2434 | */ |
2435 | session_lock(sessp); |
2436 | tp = SESSION_TP(sessp); |
2437 | if ((tp != TTY_NULL) && (tp->t_session == sessp)) { |
2438 | session_unlock(sessp); |
2439 | |
2440 | /* |
2441 | * We're going to SIGHUP the foreground process |
2442 | * group. It can't change from this point on |
2443 | * until the revoke is complete. |
2444 | * The process group changes under both the tty |
2445 | * lock and proc_list_lock but we need only one |
2446 | */ |
2447 | tty_lock(tp); |
2448 | ttysetpgrphup(tp); |
2449 | tty_unlock(tp); |
2450 | |
2451 | tty_pgsignal(tp, SIGHUP, 1); |
2452 | |
2453 | session_lock(sessp); |
2454 | tp = SESSION_TP(sessp); |
2455 | } |
2456 | cttyflag = sessp->s_flags & S_CTTYREF; |
2457 | sessp->s_flags &= ~S_CTTYREF; |
2458 | ttyvp = sessp->s_ttyvp; |
2459 | ttyvid = sessp->s_ttyvid; |
2460 | sessp->s_ttyvp = NULL; |
2461 | sessp->s_ttyvid = 0; |
2462 | sessp->s_ttyp = TTY_NULL; |
2463 | sessp->s_ttypgrpid = NO_PID; |
2464 | session_unlock(sessp); |
2465 | |
2466 | if ((ttyvp != NULLVP) && (vnode_getwithvid(ttyvp, ttyvid) == 0)) { |
2467 | if (tp != TTY_NULL) { |
2468 | tty_lock(tp); |
2469 | (void) ttywait(tp); |
2470 | tty_unlock(tp); |
2471 | } |
2472 | context.vc_thread = proc_thread(p); /* XXX */ |
2473 | context.vc_ucred = kauth_cred_proc_ref(p); |
2474 | VNOP_REVOKE(ttyvp, REVOKEALL, &context); |
2475 | if (cttyflag) { |
2476 | /* |
2477 | * Release the extra usecount taken in cttyopen. |
2478 | * usecount should be released after VNOP_REVOKE is called. |
2479 | * This usecount was taken to ensure that |
2480 | * the VNOP_REVOKE results in a close to |
2481 | * the tty since cttyclose is a no-op. |
2482 | */ |
2483 | vnode_rele(ttyvp); |
2484 | } |
2485 | vnode_put(ttyvp); |
2486 | kauth_cred_unref(&context.vc_ucred); |
2487 | ttyvp = NULLVP; |
2488 | } |
2489 | if (tp) { |
2490 | /* |
2491 | * This is cleared even if not set. This is also done in |
2492 | * spec_close to ensure that the flag is cleared. |
2493 | */ |
2494 | tty_lock(tp); |
2495 | ttyclrpgrphup(tp); |
2496 | tty_unlock(tp); |
2497 | |
2498 | ttyfree(tp); |
2499 | } |
2500 | } |
2501 | session_lock(sessp); |
2502 | sessp->s_leader = NULL; |
2503 | session_unlock(sessp); |
2504 | } |
2505 | session_rele(sessp); |
2506 | |
2507 | pg = proc_pgrp(p); |
2508 | fixjobc(p, pg, 0); |
2509 | pg_rele(pg); |
2510 | |
2511 | p->p_rlimit[RLIMIT_FSIZE].rlim_cur = RLIM_INFINITY; |
2512 | |
2513 | proc_list_lock(); |
2514 | proc_childdrainstart(p); |
2515 | while ((q = p->p_children.lh_first) != NULL) { |
2516 | if (q->p_stat == SZOMB) { |
2517 | if (p != q->p_pptr) |
2518 | panic("parent child linkage broken" ); |
2519 | /* check for lookups by zomb sysctl */ |
2520 | while ((q->p_listflag & P_LIST_WAITING) == P_LIST_WAITING) { |
2521 | msleep(&q->p_stat, proc_list_mlock, PWAIT, "waitcoll" , 0); |
2522 | } |
2523 | q->p_listflag |= P_LIST_WAITING; |
2524 | /* |
2525 | * This is a named reference and it is not granted |
2526 | * if the reap is already in progress. So we get |
2527 | * the reference here exclusively and their can be |
2528 | * no waiters. So there is no need for a wakeup |
2529 | * after we are done. AlsO the reap frees the structure |
2530 | * and the proc struct cannot be used for wakeups as well. |
2531 | * It is safe to use q here as this is system reap |
2532 | */ |
2533 | (void)reap_child_locked(p, q, 1, 0, 1, 0); |
2534 | } else { |
2535 | /* |
2536 | * Traced processes are killed |
2537 | * since their existence means someone is messing up. |
2538 | */ |
2539 | if (q->p_lflag & P_LTRACED) { |
2540 | struct proc *opp; |
2541 | |
2542 | proc_list_unlock(); |
2543 | |
2544 | opp = proc_find(q->p_oppid); |
2545 | if (opp != PROC_NULL) { |
2546 | proc_list_lock(); |
2547 | q->p_oppid = 0; |
2548 | proc_list_unlock(); |
2549 | proc_reparentlocked(q, opp, 0, 0); |
2550 | proc_rele(opp); |
2551 | } else { |
2552 | /* original parent exited while traced */ |
2553 | proc_list_lock(); |
2554 | q->p_listflag |= P_LIST_DEADPARENT; |
2555 | q->p_oppid = 0; |
2556 | proc_list_unlock(); |
2557 | proc_reparentlocked(q, initproc, 0, 0); |
2558 | } |
2559 | |
2560 | proc_lock(q); |
2561 | q->p_lflag &= ~P_LTRACED; |
2562 | |
2563 | if (q->sigwait_thread) { |
2564 | thread_t thread = q->sigwait_thread; |
2565 | |
2566 | proc_unlock(q); |
2567 | /* |
2568 | * The sigwait_thread could be stopped at a |
2569 | * breakpoint. Wake it up to kill. |
2570 | * Need to do this as it could be a thread which is not |
2571 | * the first thread in the task. So any attempts to kill |
2572 | * the process would result into a deadlock on q->sigwait. |
2573 | */ |
2574 | thread_resume(thread); |
2575 | clear_wait(thread, THREAD_INTERRUPTED); |
2576 | threadsignal(thread, SIGKILL, 0, TRUE); |
2577 | } else { |
2578 | proc_unlock(q); |
2579 | } |
2580 | |
2581 | psignal(q, SIGKILL); |
2582 | proc_list_lock(); |
2583 | } else { |
2584 | q->p_listflag |= P_LIST_DEADPARENT; |
2585 | proc_reparentlocked(q, initproc, 0, 1); |
2586 | } |
2587 | } |
2588 | } |
2589 | |
2590 | proc_childdrainend(p); |
2591 | proc_list_unlock(); |
2592 | |
2593 | /* |
2594 | * Release reference to text vnode |
2595 | */ |
2596 | tvp = p->p_textvp; |
2597 | p->p_textvp = NULL; |
2598 | if (tvp != NULLVP) { |
2599 | vnode_rele(tvp); |
2600 | } |
2601 | |
2602 | /* |
2603 | * Save exit status and final rusage info, adding in child rusage |
2604 | * info and self times. If we were unable to allocate a zombie |
2605 | * structure, this information is lost. |
2606 | */ |
2607 | if (rup != NULL) { |
2608 | rup->ru = p->p_stats->p_ru; |
2609 | timerclear(&rup->ru.ru_utime); |
2610 | timerclear(&rup->ru.ru_stime); |
2611 | |
2612 | #ifdef FIXME |
2613 | if (task) { |
2614 | mach_task_basic_info_data_t tinfo; |
2615 | task_thread_times_info_data_t ttimesinfo; |
2616 | int task_info_stuff, task_ttimes_stuff; |
2617 | struct timeval ut,st; |
2618 | |
2619 | task_info_stuff = MACH_TASK_BASIC_INFO_COUNT; |
2620 | task_info(task, MACH_TASK_BASIC_INFO, |
2621 | &tinfo, &task_info_stuff); |
2622 | p->p_ru->ru.ru_utime.tv_sec = tinfo.user_time.seconds; |
2623 | p->p_ru->ru.ru_utime.tv_usec = tinfo.user_time.microseconds; |
2624 | p->p_ru->ru.ru_stime.tv_sec = tinfo.system_time.seconds; |
2625 | p->p_ru->ru.ru_stime.tv_usec = tinfo.system_time.microseconds; |
2626 | |
2627 | task_ttimes_stuff = TASK_THREAD_TIMES_INFO_COUNT; |
2628 | task_info(task, TASK_THREAD_TIMES_INFO, |
2629 | &ttimesinfo, &task_ttimes_stuff); |
2630 | |
2631 | ut.tv_sec = ttimesinfo.user_time.seconds; |
2632 | ut.tv_usec = ttimesinfo.user_time.microseconds; |
2633 | st.tv_sec = ttimesinfo.system_time.seconds; |
2634 | st.tv_usec = ttimesinfo.system_time.microseconds; |
2635 | timeradd(&ut,&p->p_ru->ru.ru_utime,&p->p_ru->ru.ru_utime); |
2636 | timeradd(&st,&p->p_ru->ru.ru_stime,&p->p_ru->ru.ru_stime); |
2637 | } |
2638 | #endif /* FIXME */ |
2639 | |
2640 | ruadd(&rup->ru, &p->p_stats->p_cru); |
2641 | |
2642 | gather_rusage_info(p, &rup->ri, RUSAGE_INFO_CURRENT); |
2643 | rup->ri.ri_phys_footprint = 0; |
2644 | rup->ri.ri_proc_exit_abstime = mach_absolute_time(); |
2645 | |
2646 | /* |
2647 | * Now that we have filled in the rusage info, make it |
2648 | * visible to an external observer via proc_pid_rusage(). |
2649 | */ |
2650 | p->p_ru = rup; |
2651 | } |
2652 | |
2653 | /* |
2654 | * Free up profiling buffers. |
2655 | */ |
2656 | { |
2657 | struct uprof *p0 = &p->p_stats->p_prof, *p1, *pn; |
2658 | |
2659 | p1 = p0->pr_next; |
2660 | p0->pr_next = NULL; |
2661 | p0->pr_scale = 0; |
2662 | |
2663 | for (; p1 != NULL; p1 = pn) { |
2664 | pn = p1->pr_next; |
2665 | kfree(p1, sizeof *p1); |
2666 | } |
2667 | } |
2668 | |
2669 | #if PSYNCH |
2670 | pth_proc_hashdelete(p); |
2671 | #endif /* PSYNCH */ |
2672 | |
2673 | proc_free_realitimer(p); |
2674 | |
2675 | /* |
2676 | * Other substructures are freed from wait(). |
2677 | */ |
2678 | FREE_ZONE(p->p_stats, sizeof *p->p_stats, M_PSTATS); |
2679 | p->p_stats = NULL; |
2680 | |
2681 | FREE_ZONE(p->p_sigacts, sizeof *p->p_sigacts, M_SIGACTS); |
2682 | p->p_sigacts = NULL; |
2683 | |
2684 | proc_limitdrop(p, 1); |
2685 | p->p_limit = NULL; |
2686 | |
2687 | /* |
2688 | * Finish up by terminating the task |
2689 | * and halt this thread (only if a |
2690 | * member of the task exiting). |
2691 | */ |
2692 | p->task = TASK_NULL; |
2693 | |
2694 | /* |
2695 | * Notify parent that we're gone. |
2696 | */ |
2697 | pp = proc_parent(p); |
2698 | if ((p->p_listflag & P_LIST_DEADPARENT) == 0) { |
2699 | if (pp != initproc) { |
2700 | proc_lock(pp); |
2701 | pp->si_pid = p->p_pid; |
2702 | pp->p_xhighbits = p->p_xhighbits; |
2703 | p->p_xhighbits = 0; |
2704 | pp->si_status = p->p_xstat; |
2705 | pp->si_code = CLD_EXITED; |
2706 | /* |
2707 | * p_ucred usage is safe as it is an exiting process |
2708 | * and reference is dropped in reap |
2709 | */ |
2710 | pp->si_uid = kauth_cred_getruid(p->p_ucred); |
2711 | proc_unlock(pp); |
2712 | } |
2713 | /* mark as a zombie */ |
2714 | /* mark as a zombie */ |
2715 | /* No need to take proc lock as all refs are drained and |
2716 | * no one except parent (reaping ) can look at this. |
2717 | * The write is to an int and is coherent. Also parent is |
2718 | * keyed off of list lock for reaping |
2719 | */ |
2720 | p->p_stat = SZOMB; |
2721 | |
2722 | psignal(pp, SIGCHLD); |
2723 | |
2724 | /* and now wakeup the parent */ |
2725 | proc_list_lock(); |
2726 | wakeup((caddr_t)pp); |
2727 | proc_list_unlock(); |
2728 | } else { |
2729 | proc_list_lock(); |
2730 | /* check for lookups by zomb sysctl */ |
2731 | while ((p->p_listflag & P_LIST_WAITING) == P_LIST_WAITING) { |
2732 | msleep(&p->p_stat, proc_list_mlock, PWAIT, "waitcoll" , 0); |
2733 | } |
2734 | p->p_stat = SZOMB; |
2735 | p->p_listflag |= P_LIST_WAITING; |
2736 | |
2737 | /* |
2738 | * This is a named reference and it is not granted |
2739 | * if the reap is already in progress. So we get |
2740 | * the reference here exclusively and their can be |
2741 | * no waiters. So there is no need for a wakeup |
2742 | * after we are done. AlsO the reap frees the structure |
2743 | * and the proc struct cannot be used for wakeups as well. |
2744 | * It is safe to use p here as this is system reap |
2745 | */ |
2746 | (void)reap_child_locked(pp, p, 0, 0, 1, 1); |
2747 | /* list lock dropped by reap_child_locked */ |
2748 | } |
2749 | proc_rele(pp); |
2750 | } |
2751 | |
2752 | |
2753 | /* |
2754 | * munge_rusage |
2755 | * LP64 support - long is 64 bits if we are dealing with a 64 bit user |
2756 | * process. We munge the kernel version of rusage into the |
2757 | * 64 bit version. |
2758 | */ |
2759 | __private_extern__ void |
2760 | munge_user64_rusage(struct rusage *a_rusage_p, struct user64_rusage *a_user_rusage_p) |
2761 | { |
2762 | /* Zero-out struct so that padding is cleared */ |
2763 | bzero(a_user_rusage_p, sizeof(struct user64_rusage)); |
2764 | |
2765 | /* timeval changes size, so utime and stime need special handling */ |
2766 | a_user_rusage_p->ru_utime.tv_sec = a_rusage_p->ru_utime.tv_sec; |
2767 | a_user_rusage_p->ru_utime.tv_usec = a_rusage_p->ru_utime.tv_usec; |
2768 | a_user_rusage_p->ru_stime.tv_sec = a_rusage_p->ru_stime.tv_sec; |
2769 | a_user_rusage_p->ru_stime.tv_usec = a_rusage_p->ru_stime.tv_usec; |
2770 | /* |
2771 | * everything else can be a direct assign, since there is no loss |
2772 | * of precision implied boing 32->64. |
2773 | */ |
2774 | a_user_rusage_p->ru_maxrss = a_rusage_p->ru_maxrss; |
2775 | a_user_rusage_p->ru_ixrss = a_rusage_p->ru_ixrss; |
2776 | a_user_rusage_p->ru_idrss = a_rusage_p->ru_idrss; |
2777 | a_user_rusage_p->ru_isrss = a_rusage_p->ru_isrss; |
2778 | a_user_rusage_p->ru_minflt = a_rusage_p->ru_minflt; |
2779 | a_user_rusage_p->ru_majflt = a_rusage_p->ru_majflt; |
2780 | a_user_rusage_p->ru_nswap = a_rusage_p->ru_nswap; |
2781 | a_user_rusage_p->ru_inblock = a_rusage_p->ru_inblock; |
2782 | a_user_rusage_p->ru_oublock = a_rusage_p->ru_oublock; |
2783 | a_user_rusage_p->ru_msgsnd = a_rusage_p->ru_msgsnd; |
2784 | a_user_rusage_p->ru_msgrcv = a_rusage_p->ru_msgrcv; |
2785 | a_user_rusage_p->ru_nsignals = a_rusage_p->ru_nsignals; |
2786 | a_user_rusage_p->ru_nvcsw = a_rusage_p->ru_nvcsw; |
2787 | a_user_rusage_p->ru_nivcsw = a_rusage_p->ru_nivcsw; |
2788 | } |
2789 | |
2790 | /* For a 64-bit kernel and 32-bit userspace, munging may be needed */ |
2791 | __private_extern__ void |
2792 | munge_user32_rusage(struct rusage *a_rusage_p, struct user32_rusage *a_user_rusage_p) |
2793 | { |
2794 | /* timeval changes size, so utime and stime need special handling */ |
2795 | a_user_rusage_p->ru_utime.tv_sec = a_rusage_p->ru_utime.tv_sec; |
2796 | a_user_rusage_p->ru_utime.tv_usec = a_rusage_p->ru_utime.tv_usec; |
2797 | a_user_rusage_p->ru_stime.tv_sec = a_rusage_p->ru_stime.tv_sec; |
2798 | a_user_rusage_p->ru_stime.tv_usec = a_rusage_p->ru_stime.tv_usec; |
2799 | /* |
2800 | * everything else can be a direct assign. We currently ignore |
2801 | * the loss of precision |
2802 | */ |
2803 | a_user_rusage_p->ru_maxrss = a_rusage_p->ru_maxrss; |
2804 | a_user_rusage_p->ru_ixrss = a_rusage_p->ru_ixrss; |
2805 | a_user_rusage_p->ru_idrss = a_rusage_p->ru_idrss; |
2806 | a_user_rusage_p->ru_isrss = a_rusage_p->ru_isrss; |
2807 | a_user_rusage_p->ru_minflt = a_rusage_p->ru_minflt; |
2808 | a_user_rusage_p->ru_majflt = a_rusage_p->ru_majflt; |
2809 | a_user_rusage_p->ru_nswap = a_rusage_p->ru_nswap; |
2810 | a_user_rusage_p->ru_inblock = a_rusage_p->ru_inblock; |
2811 | a_user_rusage_p->ru_oublock = a_rusage_p->ru_oublock; |
2812 | a_user_rusage_p->ru_msgsnd = a_rusage_p->ru_msgsnd; |
2813 | a_user_rusage_p->ru_msgrcv = a_rusage_p->ru_msgrcv; |
2814 | a_user_rusage_p->ru_nsignals = a_rusage_p->ru_nsignals; |
2815 | a_user_rusage_p->ru_nvcsw = a_rusage_p->ru_nvcsw; |
2816 | a_user_rusage_p->ru_nivcsw = a_rusage_p->ru_nivcsw; |
2817 | } |
2818 | |
2819 | void |
2820 | kdp_wait4_find_process(thread_t thread, __unused event64_t wait_event, thread_waitinfo_t *waitinfo) |
2821 | { |
2822 | assert(thread != NULL); |
2823 | assert(waitinfo != NULL); |
2824 | |
2825 | struct uthread *ut = get_bsdthread_info(thread); |
2826 | waitinfo->context = 0; |
2827 | // ensure wmesg is consistent with a thread waiting in wait4 |
2828 | assert(!strcmp(ut->uu_wmesg, "waitcoll" ) || !strcmp(ut->uu_wmesg, "wait" )); |
2829 | struct wait4_nocancel_args *args = ut->uu_save.uus_wait4_data.args; |
2830 | // May not actually contain a pid; this is just the argument to wait4. |
2831 | // See man wait4 for other valid wait4 arguments. |
2832 | waitinfo->owner = args->pid; |
2833 | } |
2834 | |
2835 | |