1/*
2 * Copyright (c) 2000-2007, 2015 Apple Inc. All rights reserved.
3 *
4 * @APPLE_OSREFERENCE_LICENSE_HEADER_START@
5 *
6 * This file contains Original Code and/or Modifications of Original Code
7 * as defined in and that are subject to the Apple Public Source License
8 * Version 2.0 (the 'License'). You may not use this file except in
9 * compliance with the License. The rights granted to you under the License
10 * may not be used to create, or enable the creation or redistribution of,
11 * unlawful or unlicensed copies of an Apple operating system, or to
12 * circumvent, violate, or enable the circumvention or violation of, any
13 * terms of an Apple operating system software license agreement.
14 *
15 * Please obtain a copy of the License at
16 * http://www.opensource.apple.com/apsl/ and read it before using this file.
17 *
18 * The Original Code and all software distributed under the License are
19 * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER
20 * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES,
21 * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY,
22 * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT.
23 * Please see the License for the specific language governing rights and
24 * limitations under the License.
25 *
26 * @APPLE_OSREFERENCE_LICENSE_HEADER_END@
27 */
28/* Copyright (c) 1995, 1997 Apple Computer, Inc. All Rights Reserved */
29/*
30 * Copyright (c) 1982, 1986, 1989, 1991, 1993
31 * The Regents of the University of California. All rights reserved.
32 * (c) UNIX System Laboratories, Inc.
33 * All or some portions of this file are derived from material licensed
34 * to the University of California by American Telephone and Telegraph
35 * Co. or Unix System Laboratories, Inc. and are reproduced herein with
36 * the permission of UNIX System Laboratories, Inc.
37 *
38 * Redistribution and use in source and binary forms, with or without
39 * modification, are permitted provided that the following conditions
40 * are met:
41 * 1. Redistributions of source code must retain the above copyright
42 * notice, this list of conditions and the following disclaimer.
43 * 2. Redistributions in binary form must reproduce the above copyright
44 * notice, this list of conditions and the following disclaimer in the
45 * documentation and/or other materials provided with the distribution.
46 * 3. All advertising materials mentioning features or use of this software
47 * must display the following acknowledgement:
48 * This product includes software developed by the University of
49 * California, Berkeley and its contributors.
50 * 4. Neither the name of the University nor the names of its contributors
51 * may be used to endorse or promote products derived from this software
52 * without specific prior written permission.
53 *
54 * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
55 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
56 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
57 * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
58 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
59 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
60 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
61 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
62 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
63 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
64 * SUCH DAMAGE.
65 *
66 * @(#)kern_fork.c 8.8 (Berkeley) 2/14/95
67 */
68/*
69 * NOTICE: This file was modified by McAfee Research in 2004 to introduce
70 * support for mandatory and extensible security protections. This notice
71 * is included in support of clause 2.2 (b) of the Apple Public License,
72 * Version 2.0.
73 */
74/*
75 * NOTICE: This file was modified by SPARTA, Inc. in 2005 to introduce
76 * support for mandatory and extensible security protections. This notice
77 * is included in support of clause 2.2 (b) of the Apple Public License,
78 * Version 2.0.
79 */
80
81#include <kern/assert.h>
82#include <sys/param.h>
83#include <sys/systm.h>
84#include <sys/filedesc.h>
85#include <sys/kernel.h>
86#include <sys/malloc.h>
87#include <sys/proc_internal.h>
88#include <sys/kauth.h>
89#include <sys/user.h>
90#include <sys/reason.h>
91#include <sys/resourcevar.h>
92#include <sys/vnode_internal.h>
93#include <sys/file_internal.h>
94#include <sys/acct.h>
95#include <sys/codesign.h>
96#include <sys/sysproto.h>
97#if CONFIG_PERSONAS
98#include <sys/persona.h>
99#endif
100#include <sys/doc_tombstone.h>
101#if CONFIG_DTRACE
102/* Do not include dtrace.h, it redefines kmem_[alloc/free] */
103extern void (*dtrace_proc_waitfor_exec_ptr)(proc_t);
104extern void dtrace_proc_fork(proc_t, proc_t, int);
105
106/*
107 * Since dtrace_proc_waitfor_exec_ptr can be added/removed in dtrace_subr.c,
108 * we will store its value before actually calling it.
109 */
110static void (*dtrace_proc_waitfor_hook)(proc_t) = NULL;
111
112#include <sys/dtrace_ptss.h>
113#endif
114
115#include <security/audit/audit.h>
116
117#include <mach/mach_types.h>
118#include <kern/coalition.h>
119#include <kern/kern_types.h>
120#include <kern/kalloc.h>
121#include <kern/mach_param.h>
122#include <kern/task.h>
123#include <kern/thread.h>
124#include <kern/thread_call.h>
125#include <kern/zalloc.h>
126
127#include <os/log.h>
128
129#include <os/log.h>
130
131#if CONFIG_MACF
132#include <security/mac_framework.h>
133#include <security/mac_mach_internal.h>
134#endif
135
136#include <vm/vm_map.h>
137#include <vm/vm_protos.h>
138#include <vm/vm_shared_region.h>
139
140#include <sys/shm_internal.h> /* for shmfork() */
141#include <mach/task.h> /* for thread_create() */
142#include <mach/thread_act.h> /* for thread_resume() */
143
144#include <sys/sdt.h>
145
146#if CONFIG_MEMORYSTATUS
147#include <sys/kern_memorystatus.h>
148#endif
149
150/* XXX routines which should have Mach prototypes, but don't */
151void thread_set_parent(thread_t parent, int pid);
152extern void act_thread_catt(void *ctx);
153void thread_set_child(thread_t child, int pid);
154void *act_thread_csave(void);
155extern boolean_t task_is_exec_copy(task_t);
156
157
158thread_t cloneproc(task_t, coalition_t *, proc_t, int, int);
159proc_t forkproc(proc_t);
160void forkproc_free(proc_t);
161thread_t fork_create_child(task_t parent_task,
162 coalition_t *parent_coalitions,
163 proc_t child,
164 int inherit_memory,
165 int is_64bit_addr,
166 int is_64bit_data,
167 int in_exec);
168void proc_vfork_begin(proc_t parent_proc);
169void proc_vfork_end(proc_t parent_proc);
170
171#define DOFORK 0x1 /* fork() system call */
172#define DOVFORK 0x2 /* vfork() system call */
173
174/*
175 * proc_vfork_begin
176 *
177 * Description: start a vfork on a process
178 *
179 * Parameters: parent_proc process (re)entering vfork state
180 *
181 * Returns: (void)
182 *
183 * Notes: Although this function increments a count, a count in
184 * excess of 1 is not currently supported. According to the
185 * POSIX standard, calling anything other than execve() or
186 * _exit() following a vfork(), including calling vfork()
187 * itself again, will result in undefined behaviour
188 */
189void
190proc_vfork_begin(proc_t parent_proc)
191{
192 proc_lock(parent_proc);
193 parent_proc->p_lflag |= P_LVFORK;
194 parent_proc->p_vforkcnt++;
195 proc_unlock(parent_proc);
196}
197
198/*
199 * proc_vfork_end
200 *
201 * Description: stop a vfork on a process
202 *
203 * Parameters: parent_proc process leaving vfork state
204 *
205 * Returns: (void)
206 *
207 * Notes: Decrements the count; currently, reentrancy of vfork()
208 * is unsupported on the current process
209 */
210void
211proc_vfork_end(proc_t parent_proc)
212{
213 proc_lock(parent_proc);
214 parent_proc->p_vforkcnt--;
215 if (parent_proc->p_vforkcnt < 0)
216 panic("vfork cnt is -ve");
217 if (parent_proc->p_vforkcnt == 0)
218 parent_proc->p_lflag &= ~P_LVFORK;
219 proc_unlock(parent_proc);
220}
221
222
223/*
224 * vfork
225 *
226 * Description: vfork system call
227 *
228 * Parameters: void [no arguments]
229 *
230 * Retval: 0 (to child process)
231 * !0 pid of child (to parent process)
232 * -1 error (see "Returns:")
233 *
234 * Returns: EAGAIN Administrative limit reached
235 * EINVAL vfork() called during vfork()
236 * ENOMEM Failed to allocate new process
237 *
238 * Note: After a successful call to this function, the parent process
239 * has its task, thread, and uthread lent to the child process,
240 * and control is returned to the caller; if this function is
241 * invoked as a system call, the return is to user space, and
242 * is effectively running on the child process.
243 *
244 * Subsequent calls that operate on process state are permitted,
245 * though discouraged, and will operate on the child process; any
246 * operations on the task, thread, or uthread will result in
247 * changes in the parent state, and, if inheritable, the child
248 * state, when a task, thread, and uthread are realized for the
249 * child process at execve() time, will also be effected. Given
250 * this, it's recemmended that people use the posix_spawn() call
251 * instead.
252 *
253 * BLOCK DIAGRAM OF VFORK
254 *
255 * Before:
256 *
257 * ,----------------. ,-------------.
258 * | | task | |
259 * | parent_thread | ------> | parent_task |
260 * | | <.list. | |
261 * `----------------' `-------------'
262 * uthread | ^ bsd_info | ^
263 * v | vc_thread v | task
264 * ,----------------. ,-------------.
265 * | | | |
266 * | parent_uthread | <.list. | parent_proc | <-- current_proc()
267 * | | | |
268 * `----------------' `-------------'
269 * uu_proc |
270 * v
271 * NULL
272 *
273 * After:
274 *
275 * ,----------------. ,-------------.
276 * | | task | |
277 * ,----> | parent_thread | ------> | parent_task |
278 * | | | <.list. | |
279 * | `----------------' `-------------'
280 * | uthread | ^ bsd_info | ^
281 * | v | vc_thread v | task
282 * | ,----------------. ,-------------.
283 * | | | | |
284 * | | parent_uthread | <.list. | parent_proc |
285 * | | | | |
286 * | `----------------' `-------------'
287 * | uu_proc | . list
288 * | v v
289 * | ,----------------.
290 * `----- | |
291 * p_vforkact | child_proc | <-- current_proc()
292 * | |
293 * `----------------'
294 */
295int
296vfork(proc_t parent_proc, __unused struct vfork_args *uap, int32_t *retval)
297{
298 thread_t child_thread;
299 int err;
300
301 if ((err = fork1(parent_proc, &child_thread, PROC_CREATE_VFORK, NULL)) != 0) {
302 retval[1] = 0;
303 } else {
304 uthread_t ut = get_bsdthread_info(current_thread());
305 proc_t child_proc = ut->uu_proc;
306
307 retval[0] = child_proc->p_pid;
308 retval[1] = 1; /* flag child return for user space */
309
310 /*
311 * Drop the signal lock on the child which was taken on our
312 * behalf by forkproc()/cloneproc() to prevent signals being
313 * received by the child in a partially constructed state.
314 */
315 proc_signalend(child_proc, 0);
316 proc_transend(child_proc, 0);
317
318 proc_knote(parent_proc, NOTE_FORK | child_proc->p_pid);
319 DTRACE_PROC1(create, proc_t, child_proc);
320 ut->uu_flag &= ~UT_VFORKING;
321 }
322
323 return (err);
324}
325
326
327/*
328 * fork1
329 *
330 * Description: common code used by all new process creation other than the
331 * bootstrap of the initial process on the system
332 *
333 * Parameters: parent_proc parent process of the process being
334 * child_threadp pointer to location to receive the
335 * Mach thread_t of the child process
336 * created
337 * kind kind of creation being requested
338 * coalitions if spawn, the set of coalitions the
339 * child process should join, or NULL to
340 * inherit the parent's. On non-spawns,
341 * this param is ignored and the child
342 * always inherits the parent's
343 * coalitions.
344 *
345 * Notes: Permissable values for 'kind':
346 *
347 * PROC_CREATE_FORK Create a complete process which will
348 * return actively running in both the
349 * parent and the child; the child copies
350 * the parent address space.
351 * PROC_CREATE_SPAWN Create a complete process which will
352 * return actively running in the parent
353 * only after returning actively running
354 * in the child; the child address space
355 * is newly created by an image activator,
356 * after which the child is run.
357 * PROC_CREATE_VFORK Creates a partial process which will
358 * borrow the parent task, thread, and
359 * uthread to return running in the child;
360 * the child address space and other parts
361 * are lazily created at execve() time, or
362 * the child is terminated, and the parent
363 * does not actively run until that
364 * happens.
365 *
366 * At first it may seem strange that we return the child thread
367 * address rather than process structure, since the process is
368 * the only part guaranteed to be "new"; however, since we do
369 * not actualy adjust other references between Mach and BSD (see
370 * the block diagram above the implementation of vfork()), this
371 * is the only method which guarantees us the ability to get
372 * back to the other information.
373 */
374int
375fork1(proc_t parent_proc, thread_t *child_threadp, int kind, coalition_t *coalitions)
376{
377 thread_t parent_thread = (thread_t)current_thread();
378 uthread_t parent_uthread = (uthread_t)get_bsdthread_info(parent_thread);
379 proc_t child_proc = NULL; /* set in switch, but compiler... */
380 thread_t child_thread = NULL;
381 uid_t uid;
382 int count;
383 int err = 0;
384 int spawn = 0;
385
386 /*
387 * Although process entries are dynamically created, we still keep
388 * a global limit on the maximum number we will create. Don't allow
389 * a nonprivileged user to use the last process; don't let root
390 * exceed the limit. The variable nprocs is the current number of
391 * processes, maxproc is the limit.
392 */
393 uid = kauth_getruid();
394 proc_list_lock();
395 if ((nprocs >= maxproc - 1 && uid != 0) || nprocs >= maxproc) {
396#if (DEVELOPMENT || DEBUG) && CONFIG_EMBEDDED
397 /*
398 * On the development kernel, panic so that the fact that we hit
399 * the process limit is obvious, as this may very well wedge the
400 * system.
401 */
402 panic("The process table is full; parent pid=%d", parent_proc->p_pid);
403#endif
404 proc_list_unlock();
405 tablefull("proc");
406 return (EAGAIN);
407 }
408 proc_list_unlock();
409
410 /*
411 * Increment the count of procs running with this uid. Don't allow
412 * a nonprivileged user to exceed their current limit, which is
413 * always less than what an rlim_t can hold.
414 * (locking protection is provided by list lock held in chgproccnt)
415 */
416 count = chgproccnt(uid, 1);
417 if (uid != 0 &&
418 (rlim_t)count > parent_proc->p_rlimit[RLIMIT_NPROC].rlim_cur) {
419#if (DEVELOPMENT || DEBUG) && CONFIG_EMBEDDED
420 /*
421 * On the development kernel, panic so that the fact that we hit
422 * the per user process limit is obvious. This may be less dire
423 * than hitting the global process limit, but we cannot rely on
424 * that.
425 */
426 panic("The per-user process limit has been hit; parent pid=%d, uid=%d", parent_proc->p_pid, uid);
427#endif
428 err = EAGAIN;
429 goto bad;
430 }
431
432#if CONFIG_MACF
433 /*
434 * Determine if MAC policies applied to the process will allow
435 * it to fork. This is an advisory-only check.
436 */
437 err = mac_proc_check_fork(parent_proc);
438 if (err != 0) {
439 goto bad;
440 }
441#endif
442
443 switch(kind) {
444 case PROC_CREATE_VFORK:
445 /*
446 * Prevent a vfork while we are in vfork(); we should
447 * also likely preventing a fork here as well, and this
448 * check should then be outside the switch statement,
449 * since the proc struct contents will copy from the
450 * child and the tash/thread/uthread from the parent in
451 * that case. We do not support vfork() in vfork()
452 * because we don't have to; the same non-requirement
453 * is true of both fork() and posix_spawn() and any
454 * call other than execve() amd _exit(), but we've
455 * been historically lenient, so we continue to be so
456 * (for now).
457 *
458 * <rdar://6640521> Probably a source of random panics
459 */
460 if (parent_uthread->uu_flag & UT_VFORK) {
461 printf("fork1 called within vfork by %s\n", parent_proc->p_comm);
462 err = EINVAL;
463 goto bad;
464 }
465
466 /*
467 * Flag us in progress; if we chose to support vfork() in
468 * vfork(), we would chain our parent at this point (in
469 * effect, a stack push). We don't, since we actually want
470 * to disallow everything not specified in the standard
471 */
472 proc_vfork_begin(parent_proc);
473
474 /* The newly created process comes with signal lock held */
475 if ((child_proc = forkproc(parent_proc)) == NULL) {
476 /* Failed to allocate new process */
477 proc_vfork_end(parent_proc);
478 err = ENOMEM;
479 goto bad;
480 }
481
482// XXX BEGIN: wants to move to be common code (and safe)
483#if CONFIG_MACF
484 /*
485 * allow policies to associate the credential/label that
486 * we referenced from the parent ... with the child
487 * JMM - this really isn't safe, as we can drop that
488 * association without informing the policy in other
489 * situations (keep long enough to get policies changed)
490 */
491 mac_cred_label_associate_fork(child_proc->p_ucred, child_proc);
492#endif
493
494 /*
495 * Propogate change of PID - may get new cred if auditing.
496 *
497 * NOTE: This has no effect in the vfork case, since
498 * child_proc->task != current_task(), but we duplicate it
499 * because this is probably, ultimately, wrong, since we
500 * will be running in the "child" which is the parent task
501 * with the wrong token until we get to the execve() or
502 * _exit() call; a lot of "undefined" can happen before
503 * that.
504 *
505 * <rdar://6640530> disallow everything but exeve()/_exit()?
506 */
507 set_security_token(child_proc);
508
509 AUDIT_ARG(pid, child_proc->p_pid);
510
511// XXX END: wants to move to be common code (and safe)
512
513 /*
514 * BORROW PARENT TASK, THREAD, UTHREAD FOR CHILD
515 *
516 * Note: this is where we would "push" state instead of setting
517 * it for nested vfork() support (see proc_vfork_end() for
518 * description if issues here).
519 */
520 child_proc->task = parent_proc->task;
521
522 child_proc->p_lflag |= P_LINVFORK;
523 child_proc->p_vforkact = parent_thread;
524 child_proc->p_stat = SRUN;
525
526 /*
527 * Until UT_VFORKING is cleared at the end of the vfork
528 * syscall, the process identity of this thread is slightly
529 * murky.
530 *
531 * As long as UT_VFORK and it's associated field (uu_proc)
532 * is set, current_proc() will always return the child process.
533 *
534 * However dtrace_proc_selfpid() returns the parent pid to
535 * ensure that e.g. the proc:::create probe actions accrue
536 * to the parent. (Otherwise the child magically seems to
537 * have created itself!)
538 */
539 parent_uthread->uu_flag |= UT_VFORK | UT_VFORKING;
540 parent_uthread->uu_proc = child_proc;
541 parent_uthread->uu_userstate = (void *)act_thread_csave();
542 parent_uthread->uu_vforkmask = parent_uthread->uu_sigmask;
543
544 /* temporarily drop thread-set-id state */
545 if (parent_uthread->uu_flag & UT_SETUID) {
546 parent_uthread->uu_flag |= UT_WASSETUID;
547 parent_uthread->uu_flag &= ~UT_SETUID;
548 }
549
550 /* blow thread state information */
551 /* XXX is this actually necessary, given syscall return? */
552 thread_set_child(parent_thread, child_proc->p_pid);
553
554 child_proc->p_acflag = AFORK; /* forked but not exec'ed */
555
556 /*
557 * Preserve synchronization semantics of vfork. If
558 * waiting for child to exec or exit, set P_PPWAIT
559 * on child, and sleep on our proc (in case of exit).
560 */
561 child_proc->p_lflag |= P_LPPWAIT;
562 pinsertchild(parent_proc, child_proc); /* set visible */
563
564 break;
565
566 case PROC_CREATE_SPAWN:
567 /*
568 * A spawned process differs from a forked process in that
569 * the spawned process does not carry around the parents
570 * baggage with regard to address space copying, dtrace,
571 * and so on.
572 */
573 spawn = 1;
574
575 /* FALLSTHROUGH */
576
577 case PROC_CREATE_FORK:
578 /*
579 * When we clone the parent process, we are going to inherit
580 * its task attributes and memory, since when we fork, we
581 * will, in effect, create a duplicate of it, with only minor
582 * differences. Contrarily, spawned processes do not inherit.
583 */
584 if ((child_thread = cloneproc(parent_proc->task,
585 spawn ? coalitions : NULL,
586 parent_proc,
587 spawn ? FALSE : TRUE,
588 FALSE)) == NULL) {
589 /* Failed to create thread */
590 err = EAGAIN;
591 goto bad;
592 }
593
594 /* copy current thread state into the child thread (only for fork) */
595 if (!spawn) {
596 thread_dup(child_thread);
597 }
598
599 /* child_proc = child_thread->task->proc; */
600 child_proc = (proc_t)(get_bsdtask_info(get_threadtask(child_thread)));
601
602// XXX BEGIN: wants to move to be common code (and safe)
603#if CONFIG_MACF
604 /*
605 * allow policies to associate the credential/label that
606 * we referenced from the parent ... with the child
607 * JMM - this really isn't safe, as we can drop that
608 * association without informing the policy in other
609 * situations (keep long enough to get policies changed)
610 */
611 mac_cred_label_associate_fork(child_proc->p_ucred, child_proc);
612#endif
613
614 /*
615 * Propogate change of PID - may get new cred if auditing.
616 *
617 * NOTE: This has no effect in the vfork case, since
618 * child_proc->task != current_task(), but we duplicate it
619 * because this is probably, ultimately, wrong, since we
620 * will be running in the "child" which is the parent task
621 * with the wrong token until we get to the execve() or
622 * _exit() call; a lot of "undefined" can happen before
623 * that.
624 *
625 * <rdar://6640530> disallow everything but exeve()/_exit()?
626 */
627 set_security_token(child_proc);
628
629 AUDIT_ARG(pid, child_proc->p_pid);
630
631// XXX END: wants to move to be common code (and safe)
632
633 /*
634 * Blow thread state information; this is what gives the child
635 * process its "return" value from a fork() call.
636 *
637 * Note: this should probably move to fork() proper, since it
638 * is not relevent to spawn, and the value won't matter
639 * until we resume the child there. If you are in here
640 * refactoring code, consider doing this at the same time.
641 */
642 thread_set_child(child_thread, child_proc->p_pid);
643
644 child_proc->p_acflag = AFORK; /* forked but not exec'ed */
645
646#if CONFIG_DTRACE
647 dtrace_proc_fork(parent_proc, child_proc, spawn);
648#endif /* CONFIG_DTRACE */
649 if (!spawn) {
650 /*
651 * Of note, we need to initialize the bank context behind
652 * the protection of the proc_trans lock to prevent a race with exit.
653 */
654 task_bank_init(get_threadtask(child_thread));
655 }
656
657 break;
658
659 default:
660 panic("fork1 called with unknown kind %d", kind);
661 break;
662 }
663
664
665 /* return the thread pointer to the caller */
666 *child_threadp = child_thread;
667
668bad:
669 /*
670 * In the error case, we return a 0 value for the returned pid (but
671 * it is ignored in the trampoline due to the error return); this
672 * is probably not necessary.
673 */
674 if (err) {
675 (void)chgproccnt(uid, -1);
676 }
677
678 return (err);
679}
680
681
682/*
683 * vfork_return
684 *
685 * Description: "Return" to parent vfork thread() following execve/_exit;
686 * this is done by reassociating the parent process structure
687 * with the task, thread, and uthread.
688 *
689 * Refer to the ASCII art above vfork() to figure out the
690 * state we're undoing.
691 *
692 * Parameters: child_proc Child process
693 * retval System call return value array
694 * rval Return value to present to parent
695 *
696 * Returns: void
697 *
698 * Notes: The caller resumes or exits the parent, as appropriate, after
699 * calling this function.
700 */
701void
702vfork_return(proc_t child_proc, int32_t *retval, int rval)
703{
704 task_t parent_task = get_threadtask(child_proc->p_vforkact);
705 proc_t parent_proc = get_bsdtask_info(parent_task);
706 thread_t th = current_thread();
707 uthread_t uth = get_bsdthread_info(th);
708
709 act_thread_catt(uth->uu_userstate);
710
711 /* clear vfork state in parent proc structure */
712 proc_vfork_end(parent_proc);
713
714 /* REPATRIATE PARENT TASK, THREAD, UTHREAD */
715 uth->uu_userstate = 0;
716 uth->uu_flag &= ~UT_VFORK;
717 /* restore thread-set-id state */
718 if (uth->uu_flag & UT_WASSETUID) {
719 uth->uu_flag |= UT_SETUID;
720 uth->uu_flag &= UT_WASSETUID;
721 }
722 uth->uu_proc = 0;
723 uth->uu_sigmask = uth->uu_vforkmask;
724
725 proc_lock(child_proc);
726 child_proc->p_lflag &= ~P_LINVFORK;
727 child_proc->p_vforkact = 0;
728 proc_unlock(child_proc);
729
730 thread_set_parent(th, rval);
731
732 if (retval) {
733 retval[0] = rval;
734 retval[1] = 0; /* mark parent */
735 }
736}
737
738
739/*
740 * fork_create_child
741 *
742 * Description: Common operations associated with the creation of a child
743 * process
744 *
745 * Parameters: parent_task parent task
746 * parent_coalitions parent's set of coalitions
747 * child_proc child process
748 * inherit_memory TRUE, if the parents address space is
749 * to be inherited by the child
750 * is_64bit_addr TRUE, if the child being created will
751 * be associated with a 64 bit address space
752 * is_64bit_data TRUE if the child being created will use a
753 64-bit register state
754 * in_exec TRUE, if called from execve or posix spawn set exec
755 * FALSE, if called from fork or vfexec
756 *
757 * Note: This code is called in the fork() case, from the execve() call
758 * graph, if implementing an execve() following a vfork(), from
759 * the posix_spawn() call graph (which implicitly includes a
760 * vfork() equivalent call, and in the system bootstrap case.
761 *
762 * It creates a new task and thread (and as a side effect of the
763 * thread creation, a uthread) in the parent coalition set, which is
764 * then associated with the process 'child'. If the parent
765 * process address space is to be inherited, then a flag
766 * indicates that the newly created task should inherit this from
767 * the child task.
768 *
769 * As a special concession to bootstrapping the initial process
770 * in the system, it's possible for 'parent_task' to be TASK_NULL;
771 * in this case, 'inherit_memory' MUST be FALSE.
772 */
773thread_t
774fork_create_child(task_t parent_task,
775 coalition_t *parent_coalitions,
776 proc_t child_proc,
777 int inherit_memory,
778 int is_64bit_addr,
779 int is_64bit_data,
780 int in_exec)
781{
782 thread_t child_thread = NULL;
783 task_t child_task;
784 kern_return_t result;
785
786 /* Create a new task for the child process */
787 result = task_create_internal(parent_task,
788 parent_coalitions,
789 inherit_memory,
790 is_64bit_addr,
791 is_64bit_data,
792 TF_LRETURNWAIT | TF_LRETURNWAITER, /* All created threads will wait in task_wait_to_return */
793 in_exec ? TPF_EXEC_COPY : TPF_NONE, /* Mark the task exec copy if in execve */
794 &child_task);
795 if (result != KERN_SUCCESS) {
796 printf("%s: task_create_internal failed. Code: %d\n",
797 __func__, result);
798 goto bad;
799 }
800
801 if (!in_exec) {
802 /*
803 * Set the child process task to the new task if not in exec,
804 * will set the task for exec case in proc_exec_switch_task after image activation.
805 */
806 child_proc->task = child_task;
807 }
808
809 /* Set child task process to child proc */
810 set_bsdtask_info(child_task, child_proc);
811
812 /* Propagate CPU limit timer from parent */
813 if (timerisset(&child_proc->p_rlim_cpu))
814 task_vtimer_set(child_task, TASK_VTIMER_RLIM);
815
816 /*
817 * Set child process BSD visible scheduler priority if nice value
818 * inherited from parent
819 */
820 if (child_proc->p_nice != 0)
821 resetpriority(child_proc);
822
823 /*
824 * Create a new thread for the child process
825 * The new thread is waiting on the event triggered by 'task_clear_return_wait'
826 */
827 result = thread_create_waiting(child_task,
828 (thread_continue_t)task_wait_to_return,
829 task_get_return_wait_event(child_task),
830 &child_thread);
831
832 if (result != KERN_SUCCESS) {
833 printf("%s: thread_create failed. Code: %d\n",
834 __func__, result);
835 task_deallocate(child_task);
836 child_task = NULL;
837 }
838
839 /*
840 * Tag thread as being the first thread in its task.
841 */
842 thread_set_tag(child_thread, THREAD_TAG_MAINTHREAD);
843
844bad:
845 thread_yield_internal(1);
846
847 return(child_thread);
848}
849
850
851/*
852 * fork
853 *
854 * Description: fork system call.
855 *
856 * Parameters: parent Parent process to fork
857 * uap (void) [unused]
858 * retval Return value
859 *
860 * Returns: 0 Success
861 * EAGAIN Resource unavailable, try again
862 *
863 * Notes: Attempts to create a new child process which inherits state
864 * from the parent process. If successful, the call returns
865 * having created an initially suspended child process with an
866 * extra Mach task and thread reference, for which the thread
867 * is initially suspended. Until we resume the child process,
868 * it is not yet running.
869 *
870 * The return information to the child is contained in the
871 * thread state structure of the new child, and does not
872 * become visible to the child through a normal return process,
873 * since it never made the call into the kernel itself in the
874 * first place.
875 *
876 * After resuming the thread, this function returns directly to
877 * the parent process which invoked the fork() system call.
878 *
879 * Important: The child thread_resume occurs before the parent returns;
880 * depending on scheduling latency, this means that it is not
881 * deterministic as to whether the parent or child is scheduled
882 * to run first. It is entirely possible that the child could
883 * run to completion prior to the parent running.
884 */
885int
886fork(proc_t parent_proc, __unused struct fork_args *uap, int32_t *retval)
887{
888 thread_t child_thread;
889 int err;
890
891 retval[1] = 0; /* flag parent return for user space */
892
893 if ((err = fork1(parent_proc, &child_thread, PROC_CREATE_FORK, NULL)) == 0) {
894 task_t child_task;
895 proc_t child_proc;
896
897 /* Return to the parent */
898 child_proc = (proc_t)get_bsdthreadtask_info(child_thread);
899 retval[0] = child_proc->p_pid;
900
901 /*
902 * Drop the signal lock on the child which was taken on our
903 * behalf by forkproc()/cloneproc() to prevent signals being
904 * received by the child in a partially constructed state.
905 */
906 proc_signalend(child_proc, 0);
907 proc_transend(child_proc, 0);
908
909 /* flag the fork has occurred */
910 proc_knote(parent_proc, NOTE_FORK | child_proc->p_pid);
911 DTRACE_PROC1(create, proc_t, child_proc);
912
913#if CONFIG_DTRACE
914 if ((dtrace_proc_waitfor_hook = dtrace_proc_waitfor_exec_ptr) != NULL)
915 (*dtrace_proc_waitfor_hook)(child_proc);
916#endif
917
918 /* "Return" to the child */
919 task_clear_return_wait(get_threadtask(child_thread));
920
921 /* drop the extra references we got during the creation */
922 if ((child_task = (task_t)get_threadtask(child_thread)) != NULL) {
923 task_deallocate(child_task);
924 }
925 thread_deallocate(child_thread);
926 }
927
928 return(err);
929}
930
931
932/*
933 * cloneproc
934 *
935 * Description: Create a new process from a specified process.
936 *
937 * Parameters: parent_task The parent task to be cloned, or
938 * TASK_NULL is task characteristics
939 * are not to be inherited
940 * be cloned, or TASK_NULL if the new
941 * task is not to inherit the VM
942 * characteristics of the parent
943 * parent_proc The parent process to be cloned
944 * inherit_memory True if the child is to inherit
945 * memory from the parent; if this is
946 * non-NULL, then the parent_task must
947 * also be non-NULL
948 * memstat_internal Whether to track the process in the
949 * jetsam priority list (if configured)
950 *
951 * Returns: !NULL pointer to new child thread
952 * NULL Failure (unspecified)
953 *
954 * Note: On return newly created child process has signal lock held
955 * to block delivery of signal to it if called with lock set.
956 * fork() code needs to explicity remove this lock before
957 * signals can be delivered
958 *
959 * In the case of bootstrap, this function can be called from
960 * bsd_utaskbootstrap() in order to bootstrap the first process;
961 * the net effect is to provide a uthread structure for the
962 * kernel process associated with the kernel task.
963 *
964 * XXX: Tristating using the value parent_task as the major key
965 * and inherit_memory as the minor key is something we should
966 * refactor later; we owe the current semantics, ultimately,
967 * to the semantics of task_create_internal. For now, we will
968 * live with this being somewhat awkward.
969 */
970thread_t
971cloneproc(task_t parent_task, coalition_t *parent_coalitions, proc_t parent_proc, int inherit_memory, int memstat_internal)
972{
973#if !CONFIG_MEMORYSTATUS
974#pragma unused(memstat_internal)
975#endif
976 task_t child_task;
977 proc_t child_proc;
978 thread_t child_thread = NULL;
979
980 if ((child_proc = forkproc(parent_proc)) == NULL) {
981 /* Failed to allocate new process */
982 goto bad;
983 }
984
985 /*
986 * In the case where the parent_task is TASK_NULL (during the init path)
987 * we make the assumption that the register size will be the same as the
988 * address space size since there's no way to determine the possible
989 * register size until an image is exec'd.
990 *
991 * The only architecture that has different address space and register sizes
992 * (arm64_32) isn't being used within kernel-space, so the above assumption
993 * always holds true for the init path.
994 */
995 const int parent_64bit_addr = parent_proc->p_flag & P_LP64;
996 const int parent_64bit_data = (parent_task == TASK_NULL) ? parent_64bit_addr : task_get_64bit_data(parent_task);
997
998 child_thread = fork_create_child(parent_task,
999 parent_coalitions,
1000 child_proc,
1001 inherit_memory,
1002 parent_64bit_addr,
1003 parent_64bit_data,
1004 FALSE);
1005
1006 if (child_thread == NULL) {
1007 /*
1008 * Failed to create thread; now we must deconstruct the new
1009 * process previously obtained from forkproc().
1010 */
1011 forkproc_free(child_proc);
1012 goto bad;
1013 }
1014
1015 child_task = get_threadtask(child_thread);
1016 if (parent_64bit_addr) {
1017 OSBitOrAtomic(P_LP64, (UInt32 *)&child_proc->p_flag);
1018 } else {
1019 OSBitAndAtomic(~((uint32_t)P_LP64), (UInt32 *)&child_proc->p_flag);
1020 }
1021
1022#if CONFIG_MEMORYSTATUS
1023 if (memstat_internal) {
1024 proc_list_lock();
1025 child_proc->p_memstat_state |= P_MEMSTAT_INTERNAL;
1026 proc_list_unlock();
1027 }
1028#endif
1029
1030 /* make child visible */
1031 pinsertchild(parent_proc, child_proc);
1032
1033 /*
1034 * Make child runnable, set start time.
1035 */
1036 child_proc->p_stat = SRUN;
1037bad:
1038 return(child_thread);
1039}
1040
1041
1042/*
1043 * Destroy a process structure that resulted from a call to forkproc(), but
1044 * which must be returned to the system because of a subsequent failure
1045 * preventing it from becoming active.
1046 *
1047 * Parameters: p The incomplete process from forkproc()
1048 *
1049 * Returns: (void)
1050 *
1051 * Note: This function should only be used in an error handler following
1052 * a call to forkproc().
1053 *
1054 * Operations occur in reverse order of those in forkproc().
1055 */
1056void
1057forkproc_free(proc_t p)
1058{
1059#if CONFIG_PERSONAS
1060 persona_proc_drop(p);
1061#endif /* CONFIG_PERSONAS */
1062
1063#if PSYNCH
1064 pth_proc_hashdelete(p);
1065#endif /* PSYNCH */
1066
1067 /* We held signal and a transition locks; drop them */
1068 proc_signalend(p, 0);
1069 proc_transend(p, 0);
1070
1071 /*
1072 * If we have our own copy of the resource limits structure, we
1073 * need to free it. If it's a shared copy, we need to drop our
1074 * reference on it.
1075 */
1076 proc_limitdrop(p, 0);
1077 p->p_limit = NULL;
1078
1079#if SYSV_SHM
1080 /* Need to drop references to the shared memory segment(s), if any */
1081 if (p->vm_shm) {
1082 /*
1083 * Use shmexec(): we have no address space, so no mappings
1084 *
1085 * XXX Yes, the routine is badly named.
1086 */
1087 shmexec(p);
1088 }
1089#endif
1090
1091 /* Need to undo the effects of the fdcopy(), if any */
1092 fdfree(p);
1093
1094 /*
1095 * Drop the reference on a text vnode pointer, if any
1096 * XXX This code is broken in forkproc(); see <rdar://4256419>;
1097 * XXX if anyone ever uses this field, we will be extremely unhappy.
1098 */
1099 if (p->p_textvp) {
1100 vnode_rele(p->p_textvp);
1101 p->p_textvp = NULL;
1102 }
1103
1104 /* Stop the profiling clock */
1105 stopprofclock(p);
1106
1107 /* Update the audit session proc count */
1108 AUDIT_SESSION_PROCEXIT(p);
1109
1110#if CONFIG_FINE_LOCK_GROUPS
1111 lck_mtx_destroy(&p->p_mlock, proc_mlock_grp);
1112 lck_mtx_destroy(&p->p_fdmlock, proc_fdmlock_grp);
1113 lck_mtx_destroy(&p->p_ucred_mlock, proc_ucred_mlock_grp);
1114#if CONFIG_DTRACE
1115 lck_mtx_destroy(&p->p_dtrace_sprlock, proc_lck_grp);
1116#endif
1117 lck_spin_destroy(&p->p_slock, proc_slock_grp);
1118#else /* CONFIG_FINE_LOCK_GROUPS */
1119 lck_mtx_destroy(&p->p_mlock, proc_lck_grp);
1120 lck_mtx_destroy(&p->p_fdmlock, proc_lck_grp);
1121 lck_mtx_destroy(&p->p_ucred_mlock, proc_lck_grp);
1122#if CONFIG_DTRACE
1123 lck_mtx_destroy(&p->p_dtrace_sprlock, proc_lck_grp);
1124#endif
1125 lck_spin_destroy(&p->p_slock, proc_lck_grp);
1126#endif /* CONFIG_FINE_LOCK_GROUPS */
1127
1128 /* Release the credential reference */
1129 kauth_cred_unref(&p->p_ucred);
1130
1131 proc_list_lock();
1132 /* Decrement the count of processes in the system */
1133 nprocs--;
1134
1135 /* Take it out of process hash */
1136 LIST_REMOVE(p, p_hash);
1137
1138 proc_list_unlock();
1139
1140 thread_call_free(p->p_rcall);
1141
1142 /* Free allocated memory */
1143 FREE_ZONE(p->p_sigacts, sizeof *p->p_sigacts, M_SIGACTS);
1144 p->p_sigacts = NULL;
1145 FREE_ZONE(p->p_stats, sizeof *p->p_stats, M_PSTATS);
1146 p->p_stats = NULL;
1147
1148 proc_checkdeadrefs(p);
1149 FREE_ZONE(p, sizeof *p, M_PROC);
1150}
1151
1152
1153/*
1154 * forkproc
1155 *
1156 * Description: Create a new process structure, given a parent process
1157 * structure.
1158 *
1159 * Parameters: parent_proc The parent process
1160 *
1161 * Returns: !NULL The new process structure
1162 * NULL Error (insufficient free memory)
1163 *
1164 * Note: When successful, the newly created process structure is
1165 * partially initialized; if a caller needs to deconstruct the
1166 * returned structure, they must call forkproc_free() to do so.
1167 */
1168proc_t
1169forkproc(proc_t parent_proc)
1170{
1171 proc_t child_proc; /* Our new process */
1172 static int nextpid = 0, pidwrap = 0, nextpidversion = 0;
1173 static uint64_t nextuniqueid = 0;
1174 int error = 0;
1175 struct session *sessp;
1176 uthread_t parent_uthread = (uthread_t)get_bsdthread_info(current_thread());
1177
1178 MALLOC_ZONE(child_proc, proc_t , sizeof *child_proc, M_PROC, M_WAITOK);
1179 if (child_proc == NULL) {
1180 printf("forkproc: M_PROC zone exhausted\n");
1181 goto bad;
1182 }
1183 /* zero it out as we need to insert in hash */
1184 bzero(child_proc, sizeof *child_proc);
1185
1186 MALLOC_ZONE(child_proc->p_stats, struct pstats *,
1187 sizeof *child_proc->p_stats, M_PSTATS, M_WAITOK);
1188 if (child_proc->p_stats == NULL) {
1189 printf("forkproc: M_SUBPROC zone exhausted (p_stats)\n");
1190 FREE_ZONE(child_proc, sizeof *child_proc, M_PROC);
1191 child_proc = NULL;
1192 goto bad;
1193 }
1194 MALLOC_ZONE(child_proc->p_sigacts, struct sigacts *,
1195 sizeof *child_proc->p_sigacts, M_SIGACTS, M_WAITOK);
1196 if (child_proc->p_sigacts == NULL) {
1197 printf("forkproc: M_SUBPROC zone exhausted (p_sigacts)\n");
1198 FREE_ZONE(child_proc->p_stats, sizeof *child_proc->p_stats, M_PSTATS);
1199 child_proc->p_stats = NULL;
1200 FREE_ZONE(child_proc, sizeof *child_proc, M_PROC);
1201 child_proc = NULL;
1202 goto bad;
1203 }
1204
1205 /* allocate a callout for use by interval timers */
1206 child_proc->p_rcall = thread_call_allocate((thread_call_func_t)realitexpire, child_proc);
1207 if (child_proc->p_rcall == NULL) {
1208 FREE_ZONE(child_proc->p_sigacts, sizeof *child_proc->p_sigacts, M_SIGACTS);
1209 child_proc->p_sigacts = NULL;
1210 FREE_ZONE(child_proc->p_stats, sizeof *child_proc->p_stats, M_PSTATS);
1211 child_proc->p_stats = NULL;
1212 FREE_ZONE(child_proc, sizeof *child_proc, M_PROC);
1213 child_proc = NULL;
1214 goto bad;
1215 }
1216
1217
1218 /*
1219 * Find an unused PID.
1220 */
1221
1222 proc_list_lock();
1223
1224 nextpid++;
1225retry:
1226 /*
1227 * If the process ID prototype has wrapped around,
1228 * restart somewhat above 0, as the low-numbered procs
1229 * tend to include daemons that don't exit.
1230 */
1231 if (nextpid >= PID_MAX) {
1232 nextpid = 100;
1233 pidwrap = 1;
1234 }
1235 if (pidwrap != 0) {
1236
1237 /* if the pid stays in hash both for zombie and runniing state */
1238 if (pfind_locked(nextpid) != PROC_NULL) {
1239 nextpid++;
1240 goto retry;
1241 }
1242
1243 if (pgfind_internal(nextpid) != PGRP_NULL) {
1244 nextpid++;
1245 goto retry;
1246 }
1247 if (session_find_internal(nextpid) != SESSION_NULL) {
1248 nextpid++;
1249 goto retry;
1250 }
1251 }
1252 nprocs++;
1253 child_proc->p_pid = nextpid;
1254 child_proc->p_responsible_pid = nextpid; /* initially responsible for self */
1255 child_proc->p_idversion = nextpidversion++;
1256 /* kernel process is handcrafted and not from fork, so start from 1 */
1257 child_proc->p_uniqueid = ++nextuniqueid;
1258#if 1
1259 if (child_proc->p_pid != 0) {
1260 if (pfind_locked(child_proc->p_pid) != PROC_NULL)
1261 panic("proc in the list already\n");
1262 }
1263#endif
1264 /* Insert in the hash */
1265 child_proc->p_listflag |= (P_LIST_INHASH | P_LIST_INCREATE);
1266 LIST_INSERT_HEAD(PIDHASH(child_proc->p_pid), child_proc, p_hash);
1267 proc_list_unlock();
1268
1269 if (child_proc->p_uniqueid == startup_serial_num_procs) {
1270 /*
1271 * Turn off startup serial logging now that we have reached
1272 * the defined number of startup processes.
1273 */
1274 startup_serial_logging_active = false;
1275 }
1276
1277 /*
1278 * We've identified the PID we are going to use; initialize the new
1279 * process structure.
1280 */
1281 child_proc->p_stat = SIDL;
1282 child_proc->p_pgrpid = PGRPID_DEAD;
1283
1284 /*
1285 * The zero'ing of the proc was at the allocation time due to need
1286 * for insertion to hash. Copy the section that is to be copied
1287 * directly from the parent.
1288 */
1289 bcopy(&parent_proc->p_startcopy, &child_proc->p_startcopy,
1290 (unsigned) ((caddr_t)&child_proc->p_endcopy - (caddr_t)&child_proc->p_startcopy));
1291
1292 /*
1293 * Some flags are inherited from the parent.
1294 * Duplicate sub-structures as needed.
1295 * Increase reference counts on shared objects.
1296 * The p_stats and p_sigacts substructs are set in vm_fork.
1297 */
1298#if !CONFIG_EMBEDDED
1299 child_proc->p_flag = (parent_proc->p_flag & (P_LP64 | P_DISABLE_ASLR | P_DELAYIDLESLEEP | P_SUGID));
1300#else /* !CONFIG_EMBEDDED */
1301 child_proc->p_flag = (parent_proc->p_flag & (P_LP64 | P_DISABLE_ASLR | P_SUGID));
1302#endif /* !CONFIG_EMBEDDED */
1303 if (parent_proc->p_flag & P_PROFIL)
1304 startprofclock(child_proc);
1305
1306 child_proc->p_vfs_iopolicy = (parent_proc->p_vfs_iopolicy & (P_VFS_IOPOLICY_VALID_MASK));
1307
1308 /*
1309 * Note that if the current thread has an assumed identity, this
1310 * credential will be granted to the new process.
1311 */
1312 child_proc->p_ucred = kauth_cred_get_with_ref();
1313 /* update cred on proc */
1314 PROC_UPDATE_CREDS_ONPROC(child_proc);
1315 /* update audit session proc count */
1316 AUDIT_SESSION_PROCNEW(child_proc);
1317
1318#if CONFIG_FINE_LOCK_GROUPS
1319 lck_mtx_init(&child_proc->p_mlock, proc_mlock_grp, proc_lck_attr);
1320 lck_mtx_init(&child_proc->p_fdmlock, proc_fdmlock_grp, proc_lck_attr);
1321 lck_mtx_init(&child_proc->p_ucred_mlock, proc_ucred_mlock_grp, proc_lck_attr);
1322#if CONFIG_DTRACE
1323 lck_mtx_init(&child_proc->p_dtrace_sprlock, proc_lck_grp, proc_lck_attr);
1324#endif
1325 lck_spin_init(&child_proc->p_slock, proc_slock_grp, proc_lck_attr);
1326#else /* !CONFIG_FINE_LOCK_GROUPS */
1327 lck_mtx_init(&child_proc->p_mlock, proc_lck_grp, proc_lck_attr);
1328 lck_mtx_init(&child_proc->p_fdmlock, proc_lck_grp, proc_lck_attr);
1329 lck_mtx_init(&child_proc->p_ucred_mlock, proc_lck_grp, proc_lck_attr);
1330#if CONFIG_DTRACE
1331 lck_mtx_init(&child_proc->p_dtrace_sprlock, proc_lck_grp, proc_lck_attr);
1332#endif
1333 lck_spin_init(&child_proc->p_slock, proc_lck_grp, proc_lck_attr);
1334#endif /* !CONFIG_FINE_LOCK_GROUPS */
1335 klist_init(&child_proc->p_klist);
1336
1337 if (child_proc->p_textvp != NULLVP) {
1338 /* bump references to the text vnode */
1339 /* Need to hold iocount across the ref call */
1340 if (vnode_getwithref(child_proc->p_textvp) == 0) {
1341 error = vnode_ref(child_proc->p_textvp);
1342 vnode_put(child_proc->p_textvp);
1343 if (error != 0)
1344 child_proc->p_textvp = NULLVP;
1345 }
1346 }
1347
1348 /*
1349 * Copy the parents per process open file table to the child; if
1350 * there is a per-thread current working directory, set the childs
1351 * per-process current working directory to that instead of the
1352 * parents.
1353 *
1354 * XXX may fail to copy descriptors to child
1355 */
1356 child_proc->p_fd = fdcopy(parent_proc, parent_uthread->uu_cdir);
1357
1358#if SYSV_SHM
1359 if (parent_proc->vm_shm) {
1360 /* XXX may fail to attach shm to child */
1361 (void)shmfork(parent_proc, child_proc);
1362 }
1363#endif
1364 /*
1365 * inherit the limit structure to child
1366 */
1367 proc_limitfork(parent_proc, child_proc);
1368
1369 if (child_proc->p_limit->pl_rlimit[RLIMIT_CPU].rlim_cur != RLIM_INFINITY) {
1370 uint64_t rlim_cur = child_proc->p_limit->pl_rlimit[RLIMIT_CPU].rlim_cur;
1371 child_proc->p_rlim_cpu.tv_sec = (rlim_cur > __INT_MAX__) ? __INT_MAX__ : rlim_cur;
1372 }
1373
1374 /* Intialize new process stats, including start time */
1375 /* <rdar://6640543> non-zeroed portion contains garbage AFAICT */
1376 bzero(child_proc->p_stats, sizeof(*child_proc->p_stats));
1377 microtime_with_abstime(&child_proc->p_start, &child_proc->p_stats->ps_start);
1378
1379 if (parent_proc->p_sigacts != NULL)
1380 (void)memcpy(child_proc->p_sigacts,
1381 parent_proc->p_sigacts, sizeof *child_proc->p_sigacts);
1382 else
1383 (void)memset(child_proc->p_sigacts, 0, sizeof *child_proc->p_sigacts);
1384
1385 sessp = proc_session(parent_proc);
1386 if (sessp->s_ttyvp != NULL && parent_proc->p_flag & P_CONTROLT)
1387 OSBitOrAtomic(P_CONTROLT, &child_proc->p_flag);
1388 session_rele(sessp);
1389
1390 /*
1391 * block all signals to reach the process.
1392 * no transition race should be occuring with the child yet,
1393 * but indicate that the process is in (the creation) transition.
1394 */
1395 proc_signalstart(child_proc, 0);
1396 proc_transstart(child_proc, 0, 0);
1397
1398 child_proc->p_pcaction = 0;
1399
1400 TAILQ_INIT(&child_proc->p_uthlist);
1401 TAILQ_INIT(&child_proc->p_aio_activeq);
1402 TAILQ_INIT(&child_proc->p_aio_doneq);
1403
1404 /* Inherit the parent flags for code sign */
1405 child_proc->p_csflags = (parent_proc->p_csflags & ~CS_KILLED);
1406
1407 /*
1408 * Copy work queue information
1409 *
1410 * Note: This should probably only happen in the case where we are
1411 * creating a child that is a copy of the parent; since this
1412 * routine is called in the non-duplication case of vfork()
1413 * or posix_spawn(), then this information should likely not
1414 * be duplicated.
1415 *
1416 * <rdar://6640553> Work queue pointers that no longer point to code
1417 */
1418 child_proc->p_wqthread = parent_proc->p_wqthread;
1419 child_proc->p_threadstart = parent_proc->p_threadstart;
1420 child_proc->p_pthsize = parent_proc->p_pthsize;
1421 if ((parent_proc->p_lflag & P_LREGISTER) != 0) {
1422 child_proc->p_lflag |= P_LREGISTER;
1423 }
1424 child_proc->p_dispatchqueue_offset = parent_proc->p_dispatchqueue_offset;
1425 child_proc->p_dispatchqueue_serialno_offset = parent_proc->p_dispatchqueue_serialno_offset;
1426 child_proc->p_return_to_kernel_offset = parent_proc->p_return_to_kernel_offset;
1427 child_proc->p_mach_thread_self_offset = parent_proc->p_mach_thread_self_offset;
1428 child_proc->p_pth_tsd_offset = parent_proc->p_pth_tsd_offset;
1429#if PSYNCH
1430 pth_proc_hashinit(child_proc);
1431#endif /* PSYNCH */
1432
1433#if CONFIG_PERSONAS
1434 child_proc->p_persona = NULL;
1435 error = persona_proc_inherit(child_proc, parent_proc);
1436 if (error != 0) {
1437 printf("forkproc: persona_proc_inherit failed (persona %d being destroyed?)\n", persona_get_uid(parent_proc->p_persona));
1438 forkproc_free(child_proc);
1439 child_proc = NULL;
1440 goto bad;
1441 }
1442#endif
1443
1444#if CONFIG_MEMORYSTATUS
1445 /* Memorystatus init */
1446 child_proc->p_memstat_state = 0;
1447 child_proc->p_memstat_effectivepriority = JETSAM_PRIORITY_DEFAULT;
1448 child_proc->p_memstat_requestedpriority = JETSAM_PRIORITY_DEFAULT;
1449 child_proc->p_memstat_userdata = 0;
1450 child_proc->p_memstat_idle_start = 0;
1451 child_proc->p_memstat_idle_delta = 0;
1452 child_proc->p_memstat_memlimit = 0;
1453 child_proc->p_memstat_memlimit_active = 0;
1454 child_proc->p_memstat_memlimit_inactive = 0;
1455#if CONFIG_FREEZE
1456 child_proc->p_memstat_freeze_sharedanon_pages = 0;
1457#endif
1458 child_proc->p_memstat_dirty = 0;
1459 child_proc->p_memstat_idledeadline = 0;
1460#endif /* CONFIG_MEMORYSTATUS */
1461
1462bad:
1463 return(child_proc);
1464}
1465
1466void
1467proc_lock(proc_t p)
1468{
1469 LCK_MTX_ASSERT(proc_list_mlock, LCK_MTX_ASSERT_NOTOWNED);
1470 lck_mtx_lock(&p->p_mlock);
1471}
1472
1473void
1474proc_unlock(proc_t p)
1475{
1476 lck_mtx_unlock(&p->p_mlock);
1477}
1478
1479void
1480proc_spinlock(proc_t p)
1481{
1482 lck_spin_lock(&p->p_slock);
1483}
1484
1485void
1486proc_spinunlock(proc_t p)
1487{
1488 lck_spin_unlock(&p->p_slock);
1489}
1490
1491void
1492proc_list_lock(void)
1493{
1494 lck_mtx_lock(proc_list_mlock);
1495}
1496
1497void
1498proc_list_unlock(void)
1499{
1500 lck_mtx_unlock(proc_list_mlock);
1501}
1502
1503void
1504proc_ucred_lock(proc_t p)
1505{
1506 lck_mtx_lock(&p->p_ucred_mlock);
1507}
1508
1509void
1510proc_ucred_unlock(proc_t p)
1511{
1512 lck_mtx_unlock(&p->p_ucred_mlock);
1513}
1514
1515#include <kern/zalloc.h>
1516
1517struct zone *uthread_zone = NULL;
1518
1519static lck_grp_t *rethrottle_lock_grp;
1520static lck_attr_t *rethrottle_lock_attr;
1521static lck_grp_attr_t *rethrottle_lock_grp_attr;
1522
1523static void
1524uthread_zone_init(void)
1525{
1526 assert(uthread_zone == NULL);
1527
1528 rethrottle_lock_grp_attr = lck_grp_attr_alloc_init();
1529 rethrottle_lock_grp = lck_grp_alloc_init("rethrottle", rethrottle_lock_grp_attr);
1530 rethrottle_lock_attr = lck_attr_alloc_init();
1531
1532 uthread_zone = zinit(sizeof(struct uthread),
1533 thread_max * sizeof(struct uthread),
1534 THREAD_CHUNK * sizeof(struct uthread),
1535 "uthreads");
1536}
1537
1538void *
1539uthread_alloc(task_t task, thread_t thread, int noinherit)
1540{
1541 proc_t p;
1542 uthread_t uth;
1543 uthread_t uth_parent;
1544 void *ut;
1545
1546 if (uthread_zone == NULL)
1547 uthread_zone_init();
1548
1549 ut = (void *)zalloc(uthread_zone);
1550 bzero(ut, sizeof(struct uthread));
1551
1552 p = (proc_t) get_bsdtask_info(task);
1553 uth = (uthread_t)ut;
1554 uth->uu_thread = thread;
1555
1556 lck_spin_init(&uth->uu_rethrottle_lock, rethrottle_lock_grp,
1557 rethrottle_lock_attr);
1558
1559 /*
1560 * Thread inherits credential from the creating thread, if both
1561 * are in the same task.
1562 *
1563 * If the creating thread has no credential or is from another
1564 * task we can leave the new thread credential NULL. If it needs
1565 * one later, it will be lazily assigned from the task's process.
1566 */
1567 uth_parent = (uthread_t)get_bsdthread_info(current_thread());
1568 if ((noinherit == 0) && task == current_task() &&
1569 uth_parent != NULL &&
1570 IS_VALID_CRED(uth_parent->uu_ucred)) {
1571 /*
1572 * XXX The new thread is, in theory, being created in context
1573 * XXX of parent thread, so a direct reference to the parent
1574 * XXX is OK.
1575 */
1576 kauth_cred_ref(uth_parent->uu_ucred);
1577 uth->uu_ucred = uth_parent->uu_ucred;
1578 /* the credential we just inherited is an assumed credential */
1579 if (uth_parent->uu_flag & UT_SETUID)
1580 uth->uu_flag |= UT_SETUID;
1581 } else {
1582 /* sometimes workqueue threads are created out task context */
1583 if ((task != kernel_task) && (p != PROC_NULL))
1584 uth->uu_ucred = kauth_cred_proc_ref(p);
1585 else
1586 uth->uu_ucred = NOCRED;
1587 }
1588
1589
1590 if ((task != kernel_task) && p) {
1591
1592 proc_lock(p);
1593 if (noinherit != 0) {
1594 /* workq threads will not inherit masks */
1595 uth->uu_sigmask = ~workq_threadmask;
1596 } else if (uth_parent) {
1597 if (uth_parent->uu_flag & UT_SAS_OLDMASK)
1598 uth->uu_sigmask = uth_parent->uu_oldmask;
1599 else
1600 uth->uu_sigmask = uth_parent->uu_sigmask;
1601 }
1602 uth->uu_context.vc_thread = thread;
1603 /*
1604 * Do not add the uthread to proc uthlist for exec copy task,
1605 * since they do not hold a ref on proc.
1606 */
1607 if (!task_is_exec_copy(task)) {
1608 TAILQ_INSERT_TAIL(&p->p_uthlist, uth, uu_list);
1609 }
1610 proc_unlock(p);
1611
1612#if CONFIG_DTRACE
1613 if (p->p_dtrace_ptss_pages != NULL && !task_is_exec_copy(task)) {
1614 uth->t_dtrace_scratch = dtrace_ptss_claim_entry(p);
1615 }
1616#endif
1617 }
1618
1619 return (ut);
1620}
1621
1622/*
1623 * This routine frees the thread name field of the uthread_t structure. Split out of
1624 * uthread_cleanup() so thread name does not get deallocated while generating a corpse fork.
1625 */
1626void
1627uthread_cleanup_name(void *uthread)
1628{
1629 uthread_t uth = (uthread_t)uthread;
1630
1631 /*
1632 * <rdar://17834538>
1633 * Set pth_name to NULL before calling free().
1634 * Previously there was a race condition in the
1635 * case this code was executing during a stackshot
1636 * where the stackshot could try and copy pth_name
1637 * after it had been freed and before if was marked
1638 * as null.
1639 */
1640 if (uth->pth_name != NULL) {
1641 void *pth_name = uth->pth_name;
1642 uth->pth_name = NULL;
1643 kfree(pth_name, MAXTHREADNAMESIZE);
1644 }
1645 return;
1646}
1647
1648/*
1649 * This routine frees all the BSD context in uthread except the credential.
1650 * It does not free the uthread structure as well
1651 */
1652void
1653uthread_cleanup(task_t task, void *uthread, void * bsd_info)
1654{
1655 struct _select *sel;
1656 uthread_t uth = (uthread_t)uthread;
1657 proc_t p = (proc_t)bsd_info;
1658
1659#if PROC_REF_DEBUG
1660 if (__improbable(uthread_get_proc_refcount(uthread) != 0)) {
1661 panic("uthread_cleanup called for uthread %p with uu_proc_refcount != 0", uthread);
1662 }
1663#endif
1664
1665 if (uth->uu_lowpri_window || uth->uu_throttle_info) {
1666 /*
1667 * task is marked as a low priority I/O type
1668 * and we've somehow managed to not dismiss the throttle
1669 * through the normal exit paths back to user space...
1670 * no need to throttle this thread since its going away
1671 * but we do need to update our bookeeping w/r to throttled threads
1672 *
1673 * Calling this routine will clean up any throttle info reference
1674 * still inuse by the thread.
1675 */
1676 throttle_lowpri_io(0);
1677 }
1678 /*
1679 * Per-thread audit state should never last beyond system
1680 * call return. Since we don't audit the thread creation/
1681 * removal, the thread state pointer should never be
1682 * non-NULL when we get here.
1683 */
1684 assert(uth->uu_ar == NULL);
1685
1686 if (uth->uu_kqr_bound) {
1687 kqueue_threadreq_unbind(p, uth->uu_kqr_bound);
1688 }
1689
1690 sel = &uth->uu_select;
1691 /* cleanup the select bit space */
1692 if (sel->nbytes) {
1693 FREE(sel->ibits, M_TEMP);
1694 FREE(sel->obits, M_TEMP);
1695 sel->nbytes = 0;
1696 }
1697
1698 if (uth->uu_cdir) {
1699 vnode_rele(uth->uu_cdir);
1700 uth->uu_cdir = NULLVP;
1701 }
1702
1703 if (uth->uu_wqset) {
1704 if (waitq_set_is_valid(uth->uu_wqset))
1705 waitq_set_deinit(uth->uu_wqset);
1706 FREE(uth->uu_wqset, M_SELECT);
1707 uth->uu_wqset = NULL;
1708 uth->uu_wqstate_sz = 0;
1709 }
1710
1711 os_reason_free(uth->uu_exit_reason);
1712
1713 if ((task != kernel_task) && p) {
1714
1715 if (((uth->uu_flag & UT_VFORK) == UT_VFORK) && (uth->uu_proc != PROC_NULL)) {
1716 vfork_exit_internal(uth->uu_proc, 0, 1);
1717 }
1718 /*
1719 * Remove the thread from the process list and
1720 * transfer [appropriate] pending signals to the process.
1721 * Do not remove the uthread from proc uthlist for exec
1722 * copy task, since they does not have a ref on proc and
1723 * would not have been added to the list.
1724 */
1725 if (get_bsdtask_info(task) == p && !task_is_exec_copy(task)) {
1726 proc_lock(p);
1727
1728 TAILQ_REMOVE(&p->p_uthlist, uth, uu_list);
1729 p->p_siglist |= (uth->uu_siglist & execmask & (~p->p_sigignore | sigcantmask));
1730 proc_unlock(p);
1731 }
1732#if CONFIG_DTRACE
1733 struct dtrace_ptss_page_entry *tmpptr = uth->t_dtrace_scratch;
1734 uth->t_dtrace_scratch = NULL;
1735 if (tmpptr != NULL && !task_is_exec_copy(task)) {
1736 dtrace_ptss_release_entry(p, tmpptr);
1737 }
1738#endif
1739 }
1740}
1741
1742/* This routine releases the credential stored in uthread */
1743void
1744uthread_cred_free(void *uthread)
1745{
1746 uthread_t uth = (uthread_t)uthread;
1747
1748 /* and free the uthread itself */
1749 if (IS_VALID_CRED(uth->uu_ucred)) {
1750 kauth_cred_t oldcred = uth->uu_ucred;
1751 uth->uu_ucred = NOCRED;
1752 kauth_cred_unref(&oldcred);
1753 }
1754}
1755
1756/* This routine frees the uthread structure held in thread structure */
1757void
1758uthread_zone_free(void *uthread)
1759{
1760 uthread_t uth = (uthread_t)uthread;
1761
1762 if (uth->t_tombstone) {
1763 kfree(uth->t_tombstone, sizeof(struct doc_tombstone));
1764 uth->t_tombstone = NULL;
1765 }
1766
1767 lck_spin_destroy(&uth->uu_rethrottle_lock, rethrottle_lock_grp);
1768
1769 uthread_cleanup_name(uthread);
1770 /* and free the uthread itself */
1771 zfree(uthread_zone, uthread);
1772}
1773