1 | /* |
2 | * Copyright (c) 2000-2007 Apple Inc. All rights reserved. |
3 | * |
4 | * @APPLE_OSREFERENCE_LICENSE_HEADER_START@ |
5 | * |
6 | * This file contains Original Code and/or Modifications of Original Code |
7 | * as defined in and that are subject to the Apple Public Source License |
8 | * Version 2.0 (the 'License'). You may not use this file except in |
9 | * compliance with the License. The rights granted to you under the License |
10 | * may not be used to create, or enable the creation or redistribution of, |
11 | * unlawful or unlicensed copies of an Apple operating system, or to |
12 | * circumvent, violate, or enable the circumvention or violation of, any |
13 | * terms of an Apple operating system software license agreement. |
14 | * |
15 | * Please obtain a copy of the License at |
16 | * http://www.opensource.apple.com/apsl/ and read it before using this file. |
17 | * |
18 | * The Original Code and all software distributed under the License are |
19 | * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER |
20 | * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES, |
21 | * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY, |
22 | * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT. |
23 | * Please see the License for the specific language governing rights and |
24 | * limitations under the License. |
25 | * |
26 | * @APPLE_OSREFERENCE_LICENSE_HEADER_END@ |
27 | */ |
28 | /* Copyright (c) 1995 NeXT Computer, Inc. All Rights Reserved */ |
29 | /* |
30 | * Copyright (c) 1982, 1986, 1989, 1991, 1993 |
31 | * The Regents of the University of California. All rights reserved. |
32 | * |
33 | * Redistribution and use in source and binary forms, with or without |
34 | * modification, are permitted provided that the following conditions |
35 | * are met: |
36 | * 1. Redistributions of source code must retain the above copyright |
37 | * notice, this list of conditions and the following disclaimer. |
38 | * 2. Redistributions in binary form must reproduce the above copyright |
39 | * notice, this list of conditions and the following disclaimer in the |
40 | * documentation and/or other materials provided with the distribution. |
41 | * 3. All advertising materials mentioning features or use of this software |
42 | * must display the following acknowledgement: |
43 | * This product includes software developed by the University of |
44 | * California, Berkeley and its contributors. |
45 | * 4. Neither the name of the University nor the names of its contributors |
46 | * may be used to endorse or promote products derived from this software |
47 | * without specific prior written permission. |
48 | * |
49 | * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND |
50 | * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE |
51 | * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE |
52 | * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE |
53 | * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL |
54 | * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS |
55 | * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) |
56 | * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT |
57 | * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY |
58 | * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF |
59 | * SUCH DAMAGE. |
60 | * |
61 | * @(#)kern_proc.c 8.4 (Berkeley) 1/4/94 |
62 | */ |
63 | /* |
64 | * NOTICE: This file was modified by SPARTA, Inc. in 2005 to introduce |
65 | * support for mandatory and extensible security protections. This notice |
66 | * is included in support of clause 2.2 (b) of the Apple Public License, |
67 | * Version 2.0. |
68 | */ |
69 | /* HISTORY |
70 | * 04-Aug-97 Umesh Vaishampayan (umeshv@apple.com) |
71 | * Added current_proc_EXTERNAL() function for the use of kernel |
72 | * lodable modules. |
73 | * |
74 | * 05-Jun-95 Mac Gillon (mgillon) at NeXT |
75 | * New version based on 3.3NS and 4.4 |
76 | */ |
77 | |
78 | |
79 | #include <sys/param.h> |
80 | #include <sys/systm.h> |
81 | #include <sys/kernel.h> |
82 | #include <sys/proc_internal.h> |
83 | #include <sys/acct.h> |
84 | #include <sys/wait.h> |
85 | #include <sys/file_internal.h> |
86 | #include <sys/uio.h> |
87 | #include <sys/malloc.h> |
88 | #include <sys/lock.h> |
89 | #include <sys/mbuf.h> |
90 | #include <sys/ioctl.h> |
91 | #include <sys/tty.h> |
92 | #include <sys/signalvar.h> |
93 | #include <sys/syslog.h> |
94 | #include <sys/sysctl.h> |
95 | #include <sys/sysproto.h> |
96 | #include <sys/kauth.h> |
97 | #include <sys/codesign.h> |
98 | #include <sys/kernel_types.h> |
99 | #include <sys/ubc.h> |
100 | #include <kern/kalloc.h> |
101 | #include <kern/task.h> |
102 | #include <kern/coalition.h> |
103 | #include <sys/coalition.h> |
104 | #include <kern/assert.h> |
105 | #include <vm/vm_protos.h> |
106 | #include <vm/vm_map.h> /* vm_map_switch_protect() */ |
107 | #include <vm/vm_pageout.h> |
108 | #include <mach/task.h> |
109 | #include <mach/message.h> |
110 | #include <sys/priv.h> |
111 | #include <sys/proc_info.h> |
112 | #include <sys/bsdtask_info.h> |
113 | #include <sys/persona.h> |
114 | |
115 | #ifdef CONFIG_32BIT_TELEMETRY |
116 | #include <sys/kasl.h> |
117 | #endif /* CONFIG_32BIT_TELEMETRY */ |
118 | |
119 | #if CONFIG_CSR |
120 | #include <sys/csr.h> |
121 | #endif |
122 | |
123 | #if CONFIG_MEMORYSTATUS |
124 | #include <sys/kern_memorystatus.h> |
125 | #endif |
126 | |
127 | #if CONFIG_MACF |
128 | #include <security/mac_framework.h> |
129 | #endif |
130 | |
131 | #include <libkern/crypto/sha1.h> |
132 | |
133 | #ifdef CONFIG_32BIT_TELEMETRY |
134 | #define MAX_32BIT_EXEC_SIG_SIZE 160 |
135 | #endif /* CONFIG_32BIT_TELEMETRY */ |
136 | |
137 | /* |
138 | * Structure associated with user cacheing. |
139 | */ |
140 | struct uidinfo { |
141 | LIST_ENTRY(uidinfo) ui_hash; |
142 | uid_t ui_uid; |
143 | long ui_proccnt; |
144 | }; |
145 | #define UIHASH(uid) (&uihashtbl[(uid) & uihash]) |
146 | LIST_HEAD(uihashhead, uidinfo) *uihashtbl; |
147 | u_long uihash; /* size of hash table - 1 */ |
148 | |
149 | /* |
150 | * Other process lists |
151 | */ |
152 | struct pidhashhead *pidhashtbl; |
153 | u_long pidhash; |
154 | struct pgrphashhead *pgrphashtbl; |
155 | u_long pgrphash; |
156 | struct sesshashhead *sesshashtbl; |
157 | u_long sesshash; |
158 | |
159 | struct proclist allproc; |
160 | struct proclist zombproc; |
161 | extern struct tty cons; |
162 | |
163 | extern int cs_debug; |
164 | |
165 | #if DEBUG |
166 | #define __PROC_INTERNAL_DEBUG 1 |
167 | #endif |
168 | #if CONFIG_COREDUMP |
169 | /* Name to give to core files */ |
170 | #if defined(XNU_TARGET_OS_BRIDGE) |
171 | __XNU_PRIVATE_EXTERN char corefilename[MAXPATHLEN+1] = {"/private/var/internal/%N.core" }; |
172 | #elif CONFIG_EMBEDDED |
173 | __XNU_PRIVATE_EXTERN char corefilename[MAXPATHLEN+1] = {"/private/var/cores/%N.core" }; |
174 | #else |
175 | __XNU_PRIVATE_EXTERN char corefilename[MAXPATHLEN+1] = {"/cores/core.%P" }; |
176 | #endif |
177 | #endif |
178 | |
179 | #if PROC_REF_DEBUG |
180 | #include <kern/backtrace.h> |
181 | #endif |
182 | |
183 | typedef uint64_t unaligned_u64 __attribute__((aligned(1))); |
184 | |
185 | static void orphanpg(struct pgrp * pg); |
186 | void proc_name_kdp(task_t t, char * buf, int size); |
187 | int proc_threadname_kdp(void * uth, char * buf, size_t size); |
188 | void proc_starttime_kdp(void * p, unaligned_u64 *tv_sec, unaligned_u64 *tv_usec, unaligned_u64 *abstime); |
189 | char * proc_name_address(void * p); |
190 | |
191 | static void pgrp_add(struct pgrp * pgrp, proc_t parent, proc_t child); |
192 | static void pgrp_remove(proc_t p); |
193 | static void pgrp_replace(proc_t p, struct pgrp *pgrp); |
194 | static void pgdelete_dropref(struct pgrp *pgrp); |
195 | extern void pg_rele_dropref(struct pgrp * pgrp); |
196 | static int csops_internal(pid_t pid, int ops, user_addr_t uaddr, user_size_t usersize, user_addr_t uaddittoken); |
197 | static boolean_t proc_parent_is_currentproc(proc_t p); |
198 | |
199 | struct fixjob_iterargs { |
200 | struct pgrp * pg; |
201 | struct session * mysession; |
202 | int entering; |
203 | }; |
204 | |
205 | int fixjob_callback(proc_t, void *); |
206 | |
207 | uint64_t |
208 | get_current_unique_pid(void) |
209 | { |
210 | proc_t p = current_proc(); |
211 | |
212 | if (p) |
213 | return p->p_uniqueid; |
214 | else |
215 | return 0; |
216 | } |
217 | |
218 | /* |
219 | * Initialize global process hashing structures. |
220 | */ |
221 | void |
222 | procinit(void) |
223 | { |
224 | LIST_INIT(&allproc); |
225 | LIST_INIT(&zombproc); |
226 | pidhashtbl = hashinit(maxproc / 4, M_PROC, &pidhash); |
227 | pgrphashtbl = hashinit(maxproc / 4, M_PROC, &pgrphash); |
228 | sesshashtbl = hashinit(maxproc / 4, M_PROC, &sesshash); |
229 | uihashtbl = hashinit(maxproc / 16, M_PROC, &uihash); |
230 | #if CONFIG_PERSONAS |
231 | personas_bootstrap(); |
232 | #endif |
233 | } |
234 | |
235 | /* |
236 | * Change the count associated with number of processes |
237 | * a given user is using. This routine protects the uihash |
238 | * with the list lock |
239 | */ |
240 | int |
241 | chgproccnt(uid_t uid, int diff) |
242 | { |
243 | struct uidinfo *uip; |
244 | struct uidinfo *newuip = NULL; |
245 | struct uihashhead *uipp; |
246 | int retval; |
247 | |
248 | again: |
249 | proc_list_lock(); |
250 | uipp = UIHASH(uid); |
251 | for (uip = uipp->lh_first; uip != 0; uip = uip->ui_hash.le_next) |
252 | if (uip->ui_uid == uid) |
253 | break; |
254 | if (uip) { |
255 | uip->ui_proccnt += diff; |
256 | if (uip->ui_proccnt > 0) { |
257 | retval = uip->ui_proccnt; |
258 | proc_list_unlock(); |
259 | goto out; |
260 | } |
261 | if (uip->ui_proccnt < 0) |
262 | panic("chgproccnt: procs < 0" ); |
263 | LIST_REMOVE(uip, ui_hash); |
264 | retval = 0; |
265 | proc_list_unlock(); |
266 | FREE_ZONE(uip, sizeof(*uip), M_PROC); |
267 | goto out; |
268 | } |
269 | if (diff <= 0) { |
270 | if (diff == 0) { |
271 | retval = 0; |
272 | proc_list_unlock(); |
273 | goto out; |
274 | } |
275 | panic("chgproccnt: lost user" ); |
276 | } |
277 | if (newuip != NULL) { |
278 | uip = newuip; |
279 | newuip = NULL; |
280 | LIST_INSERT_HEAD(uipp, uip, ui_hash); |
281 | uip->ui_uid = uid; |
282 | uip->ui_proccnt = diff; |
283 | retval = diff; |
284 | proc_list_unlock(); |
285 | goto out; |
286 | } |
287 | proc_list_unlock(); |
288 | MALLOC_ZONE(newuip, struct uidinfo *, sizeof(*uip), M_PROC, M_WAITOK); |
289 | if (newuip == NULL) |
290 | panic("chgproccnt: M_PROC zone depleted" ); |
291 | goto again; |
292 | out: |
293 | if (newuip != NULL) |
294 | FREE_ZONE(newuip, sizeof(*uip), M_PROC); |
295 | return(retval); |
296 | } |
297 | |
298 | /* |
299 | * Is p an inferior of the current process? |
300 | */ |
301 | int |
302 | inferior(proc_t p) |
303 | { |
304 | int retval = 0; |
305 | |
306 | proc_list_lock(); |
307 | for (; p != current_proc(); p = p->p_pptr) |
308 | if (p->p_pid == 0) |
309 | goto out; |
310 | retval = 1; |
311 | out: |
312 | proc_list_unlock(); |
313 | return(retval); |
314 | } |
315 | |
316 | /* |
317 | * Is p an inferior of t ? |
318 | */ |
319 | int |
320 | isinferior(proc_t p, proc_t t) |
321 | { |
322 | int retval = 0; |
323 | int nchecked = 0; |
324 | proc_t start = p; |
325 | |
326 | /* if p==t they are not inferior */ |
327 | if (p == t) |
328 | return(0); |
329 | |
330 | proc_list_lock(); |
331 | for (; p != t; p = p->p_pptr) { |
332 | nchecked++; |
333 | |
334 | /* Detect here if we're in a cycle */ |
335 | if ((p->p_pid == 0) || (p->p_pptr == start) || (nchecked >= nprocs)) |
336 | goto out; |
337 | } |
338 | retval = 1; |
339 | out: |
340 | proc_list_unlock(); |
341 | return(retval); |
342 | } |
343 | |
344 | int |
345 | proc_isinferior(int pid1, int pid2) |
346 | { |
347 | proc_t p = PROC_NULL; |
348 | proc_t t = PROC_NULL; |
349 | int retval = 0; |
350 | |
351 | if (((p = proc_find(pid1)) != (proc_t)0 ) && ((t = proc_find(pid2)) != (proc_t)0)) |
352 | retval = isinferior(p, t); |
353 | |
354 | if (p != PROC_NULL) |
355 | proc_rele(p); |
356 | if (t != PROC_NULL) |
357 | proc_rele(t); |
358 | |
359 | return(retval); |
360 | } |
361 | |
362 | proc_t |
363 | proc_find(int pid) |
364 | { |
365 | return(proc_findinternal(pid, 0)); |
366 | } |
367 | |
368 | proc_t |
369 | proc_findinternal(int pid, int locked) |
370 | { |
371 | proc_t p = PROC_NULL; |
372 | |
373 | if (locked == 0) { |
374 | proc_list_lock(); |
375 | } |
376 | |
377 | p = pfind_locked(pid); |
378 | if ((p == PROC_NULL) || (p != proc_ref_locked(p))) |
379 | p = PROC_NULL; |
380 | |
381 | if (locked == 0) { |
382 | proc_list_unlock(); |
383 | } |
384 | |
385 | return(p); |
386 | } |
387 | |
388 | proc_t |
389 | proc_findthread(thread_t thread) |
390 | { |
391 | proc_t p = PROC_NULL; |
392 | struct uthread *uth; |
393 | |
394 | proc_list_lock(); |
395 | uth = get_bsdthread_info(thread); |
396 | if (uth && (uth->uu_flag & UT_VFORK)) |
397 | p = uth->uu_proc; |
398 | else |
399 | p = (proc_t)(get_bsdthreadtask_info(thread)); |
400 | p = proc_ref_locked(p); |
401 | proc_list_unlock(); |
402 | return(p); |
403 | } |
404 | |
405 | void |
406 | uthread_reset_proc_refcount(void *uthread) { |
407 | uthread_t uth; |
408 | |
409 | uth = (uthread_t) uthread; |
410 | uth->uu_proc_refcount = 0; |
411 | |
412 | #if PROC_REF_DEBUG |
413 | if (proc_ref_tracking_disabled) { |
414 | return; |
415 | } |
416 | |
417 | uth->uu_pindex = 0; |
418 | #endif |
419 | } |
420 | |
421 | #if PROC_REF_DEBUG |
422 | int |
423 | uthread_get_proc_refcount(void *uthread) { |
424 | uthread_t uth; |
425 | |
426 | if (proc_ref_tracking_disabled) { |
427 | return 0; |
428 | } |
429 | |
430 | uth = (uthread_t) uthread; |
431 | |
432 | return uth->uu_proc_refcount; |
433 | } |
434 | #endif |
435 | |
436 | static void |
437 | record_procref(proc_t p __unused, int count) { |
438 | uthread_t uth; |
439 | |
440 | uth = current_uthread(); |
441 | uth->uu_proc_refcount += count; |
442 | |
443 | #if PROC_REF_DEBUG |
444 | if (proc_ref_tracking_disabled) { |
445 | return; |
446 | } |
447 | |
448 | if (count == 1) { |
449 | if (uth->uu_pindex < NUM_PROC_REFS_TO_TRACK) { |
450 | backtrace((uintptr_t *) &uth->uu_proc_pcs[uth->uu_pindex], PROC_REF_STACK_DEPTH); |
451 | |
452 | uth->uu_proc_ps[uth->uu_pindex] = p; |
453 | uth->uu_pindex++; |
454 | } |
455 | } |
456 | #endif |
457 | } |
458 | |
459 | static boolean_t |
460 | uthread_needs_to_wait_in_proc_refwait(void) { |
461 | uthread_t uth = current_uthread(); |
462 | |
463 | /* |
464 | * Allow threads holding no proc refs to wait |
465 | * in proc_refwait, allowing threads holding |
466 | * proc refs to wait in proc_refwait causes |
467 | * deadlocks and makes proc_find non-reentrant. |
468 | */ |
469 | if (uth->uu_proc_refcount == 0) |
470 | return TRUE; |
471 | |
472 | return FALSE; |
473 | } |
474 | |
475 | int |
476 | proc_rele(proc_t p) |
477 | { |
478 | proc_list_lock(); |
479 | proc_rele_locked(p); |
480 | proc_list_unlock(); |
481 | |
482 | return(0); |
483 | } |
484 | |
485 | proc_t |
486 | proc_self(void) |
487 | { |
488 | struct proc * p; |
489 | |
490 | p = current_proc(); |
491 | |
492 | proc_list_lock(); |
493 | if (p != proc_ref_locked(p)) |
494 | p = PROC_NULL; |
495 | proc_list_unlock(); |
496 | return(p); |
497 | } |
498 | |
499 | |
500 | proc_t |
501 | proc_ref_locked(proc_t p) |
502 | { |
503 | proc_t p1 = p; |
504 | int pid = proc_pid(p); |
505 | |
506 | retry: |
507 | /* |
508 | * if process still in creation or proc got recycled |
509 | * during msleep then return failure. |
510 | */ |
511 | if ((p == PROC_NULL) || (p1 != p) || ((p->p_listflag & P_LIST_INCREATE) != 0)) |
512 | return (PROC_NULL); |
513 | |
514 | /* |
515 | * Do not return process marked for termination |
516 | * or proc_refdrain called without ref wait. |
517 | * Wait for proc_refdrain_with_refwait to complete if |
518 | * process in refdrain and refwait flag is set, unless |
519 | * the current thread is holding to a proc_ref |
520 | * for any proc. |
521 | */ |
522 | if ((p->p_stat != SZOMB) && |
523 | ((p->p_listflag & P_LIST_EXITED) == 0) && |
524 | ((p->p_listflag & P_LIST_DEAD) == 0) && |
525 | (((p->p_listflag & (P_LIST_DRAIN | P_LIST_DRAINWAIT)) == 0) || |
526 | ((p->p_listflag & P_LIST_REFWAIT) != 0))) { |
527 | if ((p->p_listflag & P_LIST_REFWAIT) != 0 && uthread_needs_to_wait_in_proc_refwait()) { |
528 | msleep(&p->p_listflag, proc_list_mlock, 0, "proc_refwait" , 0) ; |
529 | /* |
530 | * the proc might have been recycled since we dropped |
531 | * the proc list lock, get the proc again. |
532 | */ |
533 | p = pfind_locked(pid); |
534 | goto retry; |
535 | } |
536 | p->p_refcount++; |
537 | record_procref(p, 1); |
538 | } |
539 | else |
540 | p1 = PROC_NULL; |
541 | |
542 | return(p1); |
543 | } |
544 | |
545 | void |
546 | proc_rele_locked(proc_t p) |
547 | { |
548 | |
549 | if (p->p_refcount > 0) { |
550 | p->p_refcount--; |
551 | record_procref(p, -1); |
552 | if ((p->p_refcount == 0) && ((p->p_listflag & P_LIST_DRAINWAIT) == P_LIST_DRAINWAIT)) { |
553 | p->p_listflag &= ~P_LIST_DRAINWAIT; |
554 | wakeup(&p->p_refcount); |
555 | } |
556 | } else |
557 | panic("proc_rele_locked -ve ref\n" ); |
558 | |
559 | } |
560 | |
561 | proc_t |
562 | proc_find_zombref(int pid) |
563 | { |
564 | proc_t p; |
565 | |
566 | proc_list_lock(); |
567 | |
568 | again: |
569 | p = pfind_locked(pid); |
570 | |
571 | /* should we bail? */ |
572 | if ((p == PROC_NULL) /* not found */ |
573 | || ((p->p_listflag & P_LIST_INCREATE) != 0) /* not created yet */ |
574 | || ((p->p_listflag & P_LIST_EXITED) == 0)) { /* not started exit */ |
575 | |
576 | proc_list_unlock(); |
577 | return (PROC_NULL); |
578 | } |
579 | |
580 | /* If someone else is controlling the (unreaped) zombie - wait */ |
581 | if ((p->p_listflag & P_LIST_WAITING) != 0) { |
582 | (void)msleep(&p->p_stat, proc_list_mlock, PWAIT, "waitcoll" , 0); |
583 | goto again; |
584 | } |
585 | p->p_listflag |= P_LIST_WAITING; |
586 | |
587 | proc_list_unlock(); |
588 | |
589 | return(p); |
590 | } |
591 | |
592 | void |
593 | proc_drop_zombref(proc_t p) |
594 | { |
595 | proc_list_lock(); |
596 | if ((p->p_listflag & P_LIST_WAITING) == P_LIST_WAITING) { |
597 | p->p_listflag &= ~P_LIST_WAITING; |
598 | wakeup(&p->p_stat); |
599 | } |
600 | proc_list_unlock(); |
601 | } |
602 | |
603 | |
604 | void |
605 | proc_refdrain(proc_t p) |
606 | { |
607 | proc_refdrain_with_refwait(p, FALSE); |
608 | } |
609 | |
610 | proc_t |
611 | proc_refdrain_with_refwait(proc_t p, boolean_t get_ref_and_allow_wait) |
612 | { |
613 | boolean_t initexec = FALSE; |
614 | proc_list_lock(); |
615 | |
616 | p->p_listflag |= P_LIST_DRAIN; |
617 | if (get_ref_and_allow_wait) { |
618 | /* |
619 | * All the calls to proc_ref_locked will wait |
620 | * for the flag to get cleared before returning a ref, |
621 | * unless the current thread is holding to a proc ref |
622 | * for any proc. |
623 | */ |
624 | p->p_listflag |= P_LIST_REFWAIT; |
625 | if (p == initproc) { |
626 | initexec = TRUE; |
627 | } |
628 | } |
629 | |
630 | /* Do not wait in ref drain for launchd exec */ |
631 | while (p->p_refcount && !initexec) { |
632 | p->p_listflag |= P_LIST_DRAINWAIT; |
633 | msleep(&p->p_refcount, proc_list_mlock, 0, "proc_refdrain" , 0) ; |
634 | } |
635 | |
636 | p->p_listflag &= ~P_LIST_DRAIN; |
637 | if (!get_ref_and_allow_wait) { |
638 | p->p_listflag |= P_LIST_DEAD; |
639 | } else { |
640 | /* Return a ref to the caller */ |
641 | p->p_refcount++; |
642 | record_procref(p, 1); |
643 | } |
644 | |
645 | proc_list_unlock(); |
646 | |
647 | if (get_ref_and_allow_wait) { |
648 | return (p); |
649 | } |
650 | return NULL; |
651 | } |
652 | |
653 | void |
654 | proc_refwake(proc_t p) |
655 | { |
656 | proc_list_lock(); |
657 | p->p_listflag &= ~P_LIST_REFWAIT; |
658 | wakeup(&p->p_listflag); |
659 | proc_list_unlock(); |
660 | } |
661 | |
662 | proc_t |
663 | proc_parentholdref(proc_t p) |
664 | { |
665 | proc_t parent = PROC_NULL; |
666 | proc_t pp; |
667 | int loopcnt = 0; |
668 | |
669 | |
670 | proc_list_lock(); |
671 | loop: |
672 | pp = p->p_pptr; |
673 | if ((pp == PROC_NULL) || (pp->p_stat == SZOMB) || ((pp->p_listflag & (P_LIST_CHILDDRSTART | P_LIST_CHILDDRAINED)) == (P_LIST_CHILDDRSTART | P_LIST_CHILDDRAINED))) { |
674 | parent = PROC_NULL; |
675 | goto out; |
676 | } |
677 | |
678 | if ((pp->p_listflag & (P_LIST_CHILDDRSTART | P_LIST_CHILDDRAINED)) == P_LIST_CHILDDRSTART) { |
679 | pp->p_listflag |= P_LIST_CHILDDRWAIT; |
680 | msleep(&pp->p_childrencnt, proc_list_mlock, 0, "proc_parent" , 0); |
681 | loopcnt++; |
682 | if (loopcnt == 5) { |
683 | parent = PROC_NULL; |
684 | goto out; |
685 | } |
686 | goto loop; |
687 | } |
688 | |
689 | if ((pp->p_listflag & (P_LIST_CHILDDRSTART | P_LIST_CHILDDRAINED)) == 0) { |
690 | pp->p_parentref++; |
691 | parent = pp; |
692 | goto out; |
693 | } |
694 | |
695 | out: |
696 | proc_list_unlock(); |
697 | return(parent); |
698 | } |
699 | int |
700 | proc_parentdropref(proc_t p, int listlocked) |
701 | { |
702 | if (listlocked == 0) |
703 | proc_list_lock(); |
704 | |
705 | if (p->p_parentref > 0) { |
706 | p->p_parentref--; |
707 | if ((p->p_parentref == 0) && ((p->p_listflag & P_LIST_PARENTREFWAIT) == P_LIST_PARENTREFWAIT)) { |
708 | p->p_listflag &= ~P_LIST_PARENTREFWAIT; |
709 | wakeup(&p->p_parentref); |
710 | } |
711 | } else |
712 | panic("proc_parentdropref -ve ref\n" ); |
713 | if (listlocked == 0) |
714 | proc_list_unlock(); |
715 | |
716 | return(0); |
717 | } |
718 | |
719 | void |
720 | proc_childdrainstart(proc_t p) |
721 | { |
722 | #if __PROC_INTERNAL_DEBUG |
723 | if ((p->p_listflag & P_LIST_CHILDDRSTART) == P_LIST_CHILDDRSTART) |
724 | panic("proc_childdrainstart: childdrain already started\n" ); |
725 | #endif |
726 | p->p_listflag |= P_LIST_CHILDDRSTART; |
727 | /* wait for all that hold parentrefs to drop */ |
728 | while (p->p_parentref > 0) { |
729 | p->p_listflag |= P_LIST_PARENTREFWAIT; |
730 | msleep(&p->p_parentref, proc_list_mlock, 0, "proc_childdrainstart" , 0) ; |
731 | } |
732 | } |
733 | |
734 | |
735 | void |
736 | proc_childdrainend(proc_t p) |
737 | { |
738 | #if __PROC_INTERNAL_DEBUG |
739 | if (p->p_childrencnt > 0) |
740 | panic("exiting: children stil hanging around\n" ); |
741 | #endif |
742 | p->p_listflag |= P_LIST_CHILDDRAINED; |
743 | if ((p->p_listflag & (P_LIST_CHILDLKWAIT |P_LIST_CHILDDRWAIT)) != 0) { |
744 | p->p_listflag &= ~(P_LIST_CHILDLKWAIT |P_LIST_CHILDDRWAIT); |
745 | wakeup(&p->p_childrencnt); |
746 | } |
747 | } |
748 | |
749 | void |
750 | proc_checkdeadrefs(__unused proc_t p) |
751 | { |
752 | #if __PROC_INTERNAL_DEBUG |
753 | if ((p->p_listflag & P_LIST_INHASH) != 0) |
754 | panic("proc being freed and still in hash %p: %u\n" , p, p->p_listflag); |
755 | if (p->p_childrencnt != 0) |
756 | panic("proc being freed and pending children cnt %p:%d\n" , p, p->p_childrencnt); |
757 | if (p->p_refcount != 0) |
758 | panic("proc being freed and pending refcount %p:%d\n" , p, p->p_refcount); |
759 | if (p->p_parentref != 0) |
760 | panic("proc being freed and pending parentrefs %p:%d\n" , p, p->p_parentref); |
761 | #endif |
762 | } |
763 | |
764 | int |
765 | proc_pid(proc_t p) |
766 | { |
767 | if (p != NULL) |
768 | return (p->p_pid); |
769 | return -1; |
770 | } |
771 | |
772 | int |
773 | proc_ppid(proc_t p) |
774 | { |
775 | if (p != NULL) |
776 | return (p->p_ppid); |
777 | return -1; |
778 | } |
779 | |
780 | int |
781 | proc_selfpid(void) |
782 | { |
783 | return (current_proc()->p_pid); |
784 | } |
785 | |
786 | int |
787 | proc_selfppid(void) |
788 | { |
789 | return (current_proc()->p_ppid); |
790 | } |
791 | |
792 | int |
793 | proc_selfcsflags(void) |
794 | { |
795 | return (current_proc()->p_csflags); |
796 | } |
797 | |
798 | #if CONFIG_DTRACE |
799 | static proc_t |
800 | dtrace_current_proc_vforking(void) |
801 | { |
802 | thread_t th = current_thread(); |
803 | struct uthread *ut = get_bsdthread_info(th); |
804 | |
805 | if (ut && |
806 | ((ut->uu_flag & (UT_VFORK|UT_VFORKING)) == (UT_VFORK|UT_VFORKING))) { |
807 | /* |
808 | * Handle the narrow window where we're in the vfork syscall, |
809 | * but we're not quite ready to claim (in particular, to DTrace) |
810 | * that we're running as the child. |
811 | */ |
812 | return (get_bsdtask_info(get_threadtask(th))); |
813 | } |
814 | return (current_proc()); |
815 | } |
816 | |
817 | int |
818 | dtrace_proc_selfpid(void) |
819 | { |
820 | return (dtrace_current_proc_vforking()->p_pid); |
821 | } |
822 | |
823 | int |
824 | dtrace_proc_selfppid(void) |
825 | { |
826 | return (dtrace_current_proc_vforking()->p_ppid); |
827 | } |
828 | |
829 | uid_t |
830 | dtrace_proc_selfruid(void) |
831 | { |
832 | return (dtrace_current_proc_vforking()->p_ruid); |
833 | } |
834 | #endif /* CONFIG_DTRACE */ |
835 | |
836 | proc_t |
837 | proc_parent(proc_t p) |
838 | { |
839 | proc_t parent; |
840 | proc_t pp; |
841 | |
842 | proc_list_lock(); |
843 | loop: |
844 | pp = p->p_pptr; |
845 | parent = proc_ref_locked(pp); |
846 | if ((parent == PROC_NULL) && (pp != PROC_NULL) && (pp->p_stat != SZOMB) && ((pp->p_listflag & P_LIST_EXITED) != 0) && ((pp->p_listflag & P_LIST_CHILDDRAINED)== 0)){ |
847 | pp->p_listflag |= P_LIST_CHILDLKWAIT; |
848 | msleep(&pp->p_childrencnt, proc_list_mlock, 0, "proc_parent" , 0); |
849 | goto loop; |
850 | } |
851 | proc_list_unlock(); |
852 | return(parent); |
853 | } |
854 | |
855 | static boolean_t |
856 | proc_parent_is_currentproc(proc_t p) |
857 | { |
858 | boolean_t ret = FALSE; |
859 | |
860 | proc_list_lock(); |
861 | if (p->p_pptr == current_proc()) |
862 | ret = TRUE; |
863 | |
864 | proc_list_unlock(); |
865 | return ret; |
866 | } |
867 | |
868 | void |
869 | proc_name(int pid, char * buf, int size) |
870 | { |
871 | proc_t p; |
872 | |
873 | if ((p = proc_find(pid)) != PROC_NULL) { |
874 | strlcpy(buf, &p->p_comm[0], size); |
875 | proc_rele(p); |
876 | } |
877 | } |
878 | |
879 | void |
880 | proc_name_kdp(task_t t, char * buf, int size) |
881 | { |
882 | proc_t p = get_bsdtask_info(t); |
883 | if (p == PROC_NULL) |
884 | return; |
885 | |
886 | if ((size_t)size > sizeof(p->p_comm)) |
887 | strlcpy(buf, &p->p_name[0], MIN((int)sizeof(p->p_name), size)); |
888 | else |
889 | strlcpy(buf, &p->p_comm[0], MIN((int)sizeof(p->p_comm), size)); |
890 | } |
891 | |
892 | int |
893 | proc_threadname_kdp(void * uth, char * buf, size_t size) |
894 | { |
895 | if (size < MAXTHREADNAMESIZE) { |
896 | /* this is really just a protective measure for the future in |
897 | * case the thread name size in stackshot gets out of sync with |
898 | * the BSD max thread name size. Note that bsd_getthreadname |
899 | * doesn't take input buffer size into account. */ |
900 | return -1; |
901 | } |
902 | |
903 | if (uth != NULL) { |
904 | bsd_getthreadname(uth, buf); |
905 | } |
906 | return 0; |
907 | } |
908 | |
909 | |
910 | /* note that this function is generally going to be called from stackshot, |
911 | * and the arguments will be coming from a struct which is declared packed |
912 | * thus the input arguments will in general be unaligned. We have to handle |
913 | * that here. */ |
914 | void |
915 | proc_starttime_kdp(void *p, unaligned_u64 *tv_sec, unaligned_u64 *tv_usec, unaligned_u64 *abstime) |
916 | { |
917 | proc_t pp = (proc_t)p; |
918 | if (pp != PROC_NULL) { |
919 | if (tv_sec != NULL) |
920 | *tv_sec = pp->p_start.tv_sec; |
921 | if (tv_usec != NULL) |
922 | *tv_usec = pp->p_start.tv_usec; |
923 | if (abstime != NULL) { |
924 | if (pp->p_stats != NULL) |
925 | *abstime = pp->p_stats->ps_start; |
926 | else |
927 | *abstime = 0; |
928 | } |
929 | } |
930 | } |
931 | |
932 | char * |
933 | proc_name_address(void *p) |
934 | { |
935 | return &((proc_t)p)->p_comm[0]; |
936 | } |
937 | |
938 | char * |
939 | proc_best_name(proc_t p) |
940 | { |
941 | if (p->p_name[0] != 0) |
942 | return (&p->p_name[0]); |
943 | return (&p->p_comm[0]); |
944 | } |
945 | |
946 | void |
947 | proc_selfname(char * buf, int size) |
948 | { |
949 | proc_t p; |
950 | |
951 | if ((p = current_proc())!= (proc_t)0) { |
952 | strlcpy(buf, &p->p_comm[0], size); |
953 | } |
954 | } |
955 | |
956 | void |
957 | proc_signal(int pid, int signum) |
958 | { |
959 | proc_t p; |
960 | |
961 | if ((p = proc_find(pid)) != PROC_NULL) { |
962 | psignal(p, signum); |
963 | proc_rele(p); |
964 | } |
965 | } |
966 | |
967 | int |
968 | proc_issignal(int pid, sigset_t mask) |
969 | { |
970 | proc_t p; |
971 | int error=0; |
972 | |
973 | if ((p = proc_find(pid)) != PROC_NULL) { |
974 | error = proc_pendingsignals(p, mask); |
975 | proc_rele(p); |
976 | } |
977 | |
978 | return(error); |
979 | } |
980 | |
981 | int |
982 | proc_noremotehang(proc_t p) |
983 | { |
984 | int retval = 0; |
985 | |
986 | if (p) |
987 | retval = p->p_flag & P_NOREMOTEHANG; |
988 | return(retval? 1: 0); |
989 | |
990 | } |
991 | |
992 | int |
993 | proc_exiting(proc_t p) |
994 | { |
995 | int retval = 0; |
996 | |
997 | if (p) |
998 | retval = p->p_lflag & P_LEXIT; |
999 | return(retval? 1: 0); |
1000 | } |
1001 | |
1002 | int |
1003 | proc_in_teardown(proc_t p) |
1004 | { |
1005 | int retval = 0; |
1006 | |
1007 | if (p) |
1008 | retval = p->p_lflag & P_LPEXIT; |
1009 | return(retval? 1: 0); |
1010 | |
1011 | } |
1012 | |
1013 | int |
1014 | proc_forcequota(proc_t p) |
1015 | { |
1016 | int retval = 0; |
1017 | |
1018 | if (p) |
1019 | retval = p->p_flag & P_FORCEQUOTA; |
1020 | return(retval? 1: 0); |
1021 | |
1022 | } |
1023 | |
1024 | int |
1025 | proc_suser(proc_t p) |
1026 | { |
1027 | kauth_cred_t my_cred; |
1028 | int error; |
1029 | |
1030 | my_cred = kauth_cred_proc_ref(p); |
1031 | error = suser(my_cred, &p->p_acflag); |
1032 | kauth_cred_unref(&my_cred); |
1033 | return(error); |
1034 | } |
1035 | |
1036 | task_t |
1037 | proc_task(proc_t proc) |
1038 | { |
1039 | return (task_t)proc->task; |
1040 | } |
1041 | |
1042 | /* |
1043 | * Obtain the first thread in a process |
1044 | * |
1045 | * XXX This is a bad thing to do; it exists predominantly to support the |
1046 | * XXX use of proc_t's in places that should really be using |
1047 | * XXX thread_t's instead. This maintains historical behaviour, but really |
1048 | * XXX needs an audit of the context (proxy vs. not) to clean up. |
1049 | */ |
1050 | thread_t |
1051 | proc_thread(proc_t proc) |
1052 | { |
1053 | uthread_t uth = TAILQ_FIRST(&proc->p_uthlist); |
1054 | |
1055 | if (uth != NULL) |
1056 | return(uth->uu_context.vc_thread); |
1057 | |
1058 | return(NULL); |
1059 | } |
1060 | |
1061 | kauth_cred_t |
1062 | proc_ucred(proc_t p) |
1063 | { |
1064 | return(p->p_ucred); |
1065 | } |
1066 | |
1067 | struct uthread * |
1068 | current_uthread() |
1069 | { |
1070 | thread_t th = current_thread(); |
1071 | |
1072 | return((struct uthread *)get_bsdthread_info(th)); |
1073 | } |
1074 | |
1075 | |
1076 | int |
1077 | proc_is64bit(proc_t p) |
1078 | { |
1079 | return(IS_64BIT_PROCESS(p)); |
1080 | } |
1081 | |
1082 | int |
1083 | proc_is64bit_data(proc_t p) |
1084 | { |
1085 | assert(p->task); |
1086 | return (int)task_get_64bit_data(p->task); |
1087 | } |
1088 | |
1089 | int |
1090 | proc_pidversion(proc_t p) |
1091 | { |
1092 | return(p->p_idversion); |
1093 | } |
1094 | |
1095 | uint32_t |
1096 | proc_persona_id(proc_t p) |
1097 | { |
1098 | return (uint32_t)persona_id_from_proc(p); |
1099 | } |
1100 | |
1101 | uint32_t |
1102 | proc_getuid(proc_t p) |
1103 | { |
1104 | return(p->p_uid); |
1105 | } |
1106 | |
1107 | uint32_t |
1108 | proc_getgid(proc_t p) |
1109 | { |
1110 | return(p->p_gid); |
1111 | } |
1112 | |
1113 | uint64_t |
1114 | proc_uniqueid(proc_t p) |
1115 | { |
1116 | return(p->p_uniqueid); |
1117 | } |
1118 | |
1119 | uint64_t |
1120 | proc_puniqueid(proc_t p) |
1121 | { |
1122 | return(p->p_puniqueid); |
1123 | } |
1124 | |
1125 | void |
1126 | proc_coalitionids(__unused proc_t p, __unused uint64_t ids[COALITION_NUM_TYPES]) |
1127 | { |
1128 | #if CONFIG_COALITIONS |
1129 | task_coalition_ids(p->task, ids); |
1130 | #else |
1131 | memset(ids, 0, sizeof(uint64_t [COALITION_NUM_TYPES])); |
1132 | #endif |
1133 | return; |
1134 | } |
1135 | |
1136 | uint64_t |
1137 | proc_was_throttled(proc_t p) |
1138 | { |
1139 | return (p->was_throttled); |
1140 | } |
1141 | |
1142 | uint64_t |
1143 | proc_did_throttle(proc_t p) |
1144 | { |
1145 | return (p->did_throttle); |
1146 | } |
1147 | |
1148 | int |
1149 | proc_getcdhash(proc_t p, unsigned char *cdhash) |
1150 | { |
1151 | return vn_getcdhash(p->p_textvp, p->p_textoff, cdhash); |
1152 | } |
1153 | |
1154 | void |
1155 | proc_getexecutableuuid(proc_t p, unsigned char *uuidbuf, unsigned long size) |
1156 | { |
1157 | if (size >= sizeof(p->p_uuid)) { |
1158 | memcpy(uuidbuf, p->p_uuid, sizeof(p->p_uuid)); |
1159 | } |
1160 | } |
1161 | |
1162 | /* Return vnode for executable with an iocount. Must be released with vnode_put() */ |
1163 | vnode_t |
1164 | proc_getexecutablevnode(proc_t p) |
1165 | { |
1166 | vnode_t tvp = p->p_textvp; |
1167 | |
1168 | if ( tvp != NULLVP) { |
1169 | if (vnode_getwithref(tvp) == 0) { |
1170 | return tvp; |
1171 | } |
1172 | } |
1173 | |
1174 | return NULLVP; |
1175 | } |
1176 | |
1177 | |
1178 | void |
1179 | bsd_set_dependency_capable(task_t task) |
1180 | { |
1181 | proc_t p = get_bsdtask_info(task); |
1182 | |
1183 | if (p) { |
1184 | OSBitOrAtomic(P_DEPENDENCY_CAPABLE, &p->p_flag); |
1185 | } |
1186 | } |
1187 | |
1188 | |
1189 | #ifndef __arm__ |
1190 | int |
1191 | IS_64BIT_PROCESS(proc_t p) |
1192 | { |
1193 | if (p && (p->p_flag & P_LP64)) |
1194 | return(1); |
1195 | else |
1196 | return(0); |
1197 | } |
1198 | #endif |
1199 | |
1200 | /* |
1201 | * Locate a process by number |
1202 | */ |
1203 | proc_t |
1204 | pfind_locked(pid_t pid) |
1205 | { |
1206 | proc_t p; |
1207 | #if DEBUG |
1208 | proc_t q; |
1209 | #endif |
1210 | |
1211 | if (!pid) |
1212 | return (kernproc); |
1213 | |
1214 | for (p = PIDHASH(pid)->lh_first; p != 0; p = p->p_hash.le_next) { |
1215 | if (p->p_pid == pid) { |
1216 | #if DEBUG |
1217 | for (q = p->p_hash.le_next; q != 0; q = q->p_hash.le_next) { |
1218 | if ((p !=q) && (q->p_pid == pid)) |
1219 | panic("two procs with same pid %p:%p:%d:%d\n" , p, q, p->p_pid, q->p_pid); |
1220 | } |
1221 | #endif |
1222 | return (p); |
1223 | } |
1224 | } |
1225 | return (NULL); |
1226 | } |
1227 | |
1228 | /* |
1229 | * Locate a zombie by PID |
1230 | */ |
1231 | __private_extern__ proc_t |
1232 | pzfind(pid_t pid) |
1233 | { |
1234 | proc_t p; |
1235 | |
1236 | |
1237 | proc_list_lock(); |
1238 | |
1239 | for (p = zombproc.lh_first; p != 0; p = p->p_list.le_next) |
1240 | if (p->p_pid == pid) |
1241 | break; |
1242 | |
1243 | proc_list_unlock(); |
1244 | |
1245 | return (p); |
1246 | } |
1247 | |
1248 | /* |
1249 | * Locate a process group by number |
1250 | */ |
1251 | |
1252 | struct pgrp * |
1253 | pgfind(pid_t pgid) |
1254 | { |
1255 | struct pgrp * pgrp; |
1256 | |
1257 | proc_list_lock(); |
1258 | pgrp = pgfind_internal(pgid); |
1259 | if ((pgrp == NULL) || ((pgrp->pg_listflags & PGRP_FLAG_TERMINATE) != 0)) |
1260 | pgrp = PGRP_NULL; |
1261 | else |
1262 | pgrp->pg_refcount++; |
1263 | proc_list_unlock(); |
1264 | return(pgrp); |
1265 | } |
1266 | |
1267 | |
1268 | |
1269 | struct pgrp * |
1270 | pgfind_internal(pid_t pgid) |
1271 | { |
1272 | struct pgrp *pgrp; |
1273 | |
1274 | for (pgrp = PGRPHASH(pgid)->lh_first; pgrp != 0; pgrp = pgrp->pg_hash.le_next) |
1275 | if (pgrp->pg_id == pgid) |
1276 | return (pgrp); |
1277 | return (NULL); |
1278 | } |
1279 | |
1280 | void |
1281 | pg_rele(struct pgrp * pgrp) |
1282 | { |
1283 | if(pgrp == PGRP_NULL) |
1284 | return; |
1285 | pg_rele_dropref(pgrp); |
1286 | } |
1287 | |
1288 | void |
1289 | pg_rele_dropref(struct pgrp * pgrp) |
1290 | { |
1291 | proc_list_lock(); |
1292 | if ((pgrp->pg_refcount == 1) && ((pgrp->pg_listflags & PGRP_FLAG_TERMINATE) == PGRP_FLAG_TERMINATE)) { |
1293 | proc_list_unlock(); |
1294 | pgdelete_dropref(pgrp); |
1295 | return; |
1296 | } |
1297 | |
1298 | pgrp->pg_refcount--; |
1299 | proc_list_unlock(); |
1300 | } |
1301 | |
1302 | struct session * |
1303 | session_find_internal(pid_t sessid) |
1304 | { |
1305 | struct session *sess; |
1306 | |
1307 | for (sess = SESSHASH(sessid)->lh_first; sess != 0; sess = sess->s_hash.le_next) |
1308 | if (sess->s_sid == sessid) |
1309 | return (sess); |
1310 | return (NULL); |
1311 | } |
1312 | |
1313 | |
1314 | /* |
1315 | * Make a new process ready to become a useful member of society by making it |
1316 | * visible in all the right places and initialize its own lists to empty. |
1317 | * |
1318 | * Parameters: parent The parent of the process to insert |
1319 | * child The child process to insert |
1320 | * |
1321 | * Returns: (void) |
1322 | * |
1323 | * Notes: Insert a child process into the parents process group, assign |
1324 | * the child the parent process pointer and PPID of the parent, |
1325 | * place it on the parents p_children list as a sibling, |
1326 | * initialize its own child list, place it in the allproc list, |
1327 | * insert it in the proper hash bucket, and initialize its |
1328 | * event list. |
1329 | */ |
1330 | void |
1331 | pinsertchild(proc_t parent, proc_t child) |
1332 | { |
1333 | struct pgrp * pg; |
1334 | |
1335 | LIST_INIT(&child->p_children); |
1336 | TAILQ_INIT(&child->p_evlist); |
1337 | child->p_pptr = parent; |
1338 | child->p_ppid = parent->p_pid; |
1339 | child->p_puniqueid = parent->p_uniqueid; |
1340 | child->p_xhighbits = 0; |
1341 | |
1342 | pg = proc_pgrp(parent); |
1343 | pgrp_add(pg, parent, child); |
1344 | pg_rele(pg); |
1345 | |
1346 | proc_list_lock(); |
1347 | |
1348 | #if CONFIG_MEMORYSTATUS |
1349 | memorystatus_add(child, TRUE); |
1350 | #endif |
1351 | |
1352 | parent->p_childrencnt++; |
1353 | LIST_INSERT_HEAD(&parent->p_children, child, p_sibling); |
1354 | |
1355 | LIST_INSERT_HEAD(&allproc, child, p_list); |
1356 | /* mark the completion of proc creation */ |
1357 | child->p_listflag &= ~P_LIST_INCREATE; |
1358 | |
1359 | proc_list_unlock(); |
1360 | } |
1361 | |
1362 | /* |
1363 | * Move p to a new or existing process group (and session) |
1364 | * |
1365 | * Returns: 0 Success |
1366 | * ESRCH No such process |
1367 | */ |
1368 | int |
1369 | enterpgrp(proc_t p, pid_t pgid, int mksess) |
1370 | { |
1371 | struct pgrp *pgrp; |
1372 | struct pgrp *mypgrp; |
1373 | struct session * procsp; |
1374 | |
1375 | pgrp = pgfind(pgid); |
1376 | mypgrp = proc_pgrp(p); |
1377 | procsp = proc_session(p); |
1378 | |
1379 | #if DIAGNOSTIC |
1380 | if (pgrp != NULL && mksess) /* firewalls */ |
1381 | panic("enterpgrp: setsid into non-empty pgrp" ); |
1382 | if (SESS_LEADER(p, procsp)) |
1383 | panic("enterpgrp: session leader attempted setpgrp" ); |
1384 | #endif |
1385 | if (pgrp == PGRP_NULL) { |
1386 | pid_t savepid = p->p_pid; |
1387 | proc_t np = PROC_NULL; |
1388 | /* |
1389 | * new process group |
1390 | */ |
1391 | #if DIAGNOSTIC |
1392 | if (p->p_pid != pgid) |
1393 | panic("enterpgrp: new pgrp and pid != pgid" ); |
1394 | #endif |
1395 | MALLOC_ZONE(pgrp, struct pgrp *, sizeof(struct pgrp), M_PGRP, |
1396 | M_WAITOK); |
1397 | if (pgrp == NULL) |
1398 | panic("enterpgrp: M_PGRP zone depleted" ); |
1399 | if ((np = proc_find(savepid)) == NULL || np != p) { |
1400 | if (np != PROC_NULL) |
1401 | proc_rele(np); |
1402 | if (mypgrp != PGRP_NULL) |
1403 | pg_rele(mypgrp); |
1404 | if (procsp != SESSION_NULL) |
1405 | session_rele(procsp); |
1406 | FREE_ZONE(pgrp, sizeof(struct pgrp), M_PGRP); |
1407 | return (ESRCH); |
1408 | } |
1409 | proc_rele(np); |
1410 | if (mksess) { |
1411 | struct session *sess; |
1412 | |
1413 | /* |
1414 | * new session |
1415 | */ |
1416 | MALLOC_ZONE(sess, struct session *, |
1417 | sizeof(struct session), M_SESSION, M_WAITOK); |
1418 | if (sess == NULL) |
1419 | panic("enterpgrp: M_SESSION zone depleted" ); |
1420 | sess->s_leader = p; |
1421 | sess->s_sid = p->p_pid; |
1422 | sess->s_count = 1; |
1423 | sess->s_ttyvp = NULL; |
1424 | sess->s_ttyp = TTY_NULL; |
1425 | sess->s_flags = 0; |
1426 | sess->s_listflags = 0; |
1427 | sess->s_ttypgrpid = NO_PID; |
1428 | #if CONFIG_FINE_LOCK_GROUPS |
1429 | lck_mtx_init(&sess->s_mlock, proc_mlock_grp, proc_lck_attr); |
1430 | #else |
1431 | lck_mtx_init(&sess->s_mlock, proc_lck_grp, proc_lck_attr); |
1432 | #endif |
1433 | bcopy(procsp->s_login, sess->s_login, |
1434 | sizeof(sess->s_login)); |
1435 | OSBitAndAtomic(~((uint32_t)P_CONTROLT), &p->p_flag); |
1436 | proc_list_lock(); |
1437 | LIST_INSERT_HEAD(SESSHASH(sess->s_sid), sess, s_hash); |
1438 | proc_list_unlock(); |
1439 | pgrp->pg_session = sess; |
1440 | #if DIAGNOSTIC |
1441 | if (p != current_proc()) |
1442 | panic("enterpgrp: mksession and p != curproc" ); |
1443 | #endif |
1444 | } else { |
1445 | proc_list_lock(); |
1446 | pgrp->pg_session = procsp; |
1447 | |
1448 | if ((pgrp->pg_session->s_listflags & (S_LIST_TERM | S_LIST_DEAD)) != 0) |
1449 | panic("enterpgrp: providing ref to terminating session " ); |
1450 | pgrp->pg_session->s_count++; |
1451 | proc_list_unlock(); |
1452 | } |
1453 | pgrp->pg_id = pgid; |
1454 | #if CONFIG_FINE_LOCK_GROUPS |
1455 | lck_mtx_init(&pgrp->pg_mlock, proc_mlock_grp, proc_lck_attr); |
1456 | #else |
1457 | lck_mtx_init(&pgrp->pg_mlock, proc_lck_grp, proc_lck_attr); |
1458 | #endif |
1459 | LIST_INIT(&pgrp->pg_members); |
1460 | pgrp->pg_membercnt = 0; |
1461 | pgrp->pg_jobc = 0; |
1462 | proc_list_lock(); |
1463 | pgrp->pg_refcount = 1; |
1464 | pgrp->pg_listflags = 0; |
1465 | LIST_INSERT_HEAD(PGRPHASH(pgid), pgrp, pg_hash); |
1466 | proc_list_unlock(); |
1467 | } else if (pgrp == mypgrp) { |
1468 | pg_rele(pgrp); |
1469 | if (mypgrp != NULL) |
1470 | pg_rele(mypgrp); |
1471 | if (procsp != SESSION_NULL) |
1472 | session_rele(procsp); |
1473 | return (0); |
1474 | } |
1475 | |
1476 | if (procsp != SESSION_NULL) |
1477 | session_rele(procsp); |
1478 | /* |
1479 | * Adjust eligibility of affected pgrps to participate in job control. |
1480 | * Increment eligibility counts before decrementing, otherwise we |
1481 | * could reach 0 spuriously during the first call. |
1482 | */ |
1483 | fixjobc(p, pgrp, 1); |
1484 | fixjobc(p, mypgrp, 0); |
1485 | |
1486 | if(mypgrp != PGRP_NULL) |
1487 | pg_rele(mypgrp); |
1488 | pgrp_replace(p, pgrp); |
1489 | pg_rele(pgrp); |
1490 | |
1491 | return(0); |
1492 | } |
1493 | |
1494 | /* |
1495 | * remove process from process group |
1496 | */ |
1497 | int |
1498 | leavepgrp(proc_t p) |
1499 | { |
1500 | |
1501 | pgrp_remove(p); |
1502 | return (0); |
1503 | } |
1504 | |
1505 | /* |
1506 | * delete a process group |
1507 | */ |
1508 | static void |
1509 | pgdelete_dropref(struct pgrp *pgrp) |
1510 | { |
1511 | struct tty *ttyp; |
1512 | int emptypgrp = 1; |
1513 | struct session *sessp; |
1514 | |
1515 | |
1516 | pgrp_lock(pgrp); |
1517 | if (pgrp->pg_membercnt != 0) { |
1518 | emptypgrp = 0; |
1519 | } |
1520 | pgrp_unlock(pgrp); |
1521 | |
1522 | proc_list_lock(); |
1523 | pgrp->pg_refcount--; |
1524 | if ((emptypgrp == 0) || (pgrp->pg_membercnt != 0)) { |
1525 | proc_list_unlock(); |
1526 | return; |
1527 | } |
1528 | |
1529 | pgrp->pg_listflags |= PGRP_FLAG_TERMINATE; |
1530 | |
1531 | if (pgrp->pg_refcount > 0) { |
1532 | proc_list_unlock(); |
1533 | return; |
1534 | } |
1535 | |
1536 | pgrp->pg_listflags |= PGRP_FLAG_DEAD; |
1537 | LIST_REMOVE(pgrp, pg_hash); |
1538 | |
1539 | proc_list_unlock(); |
1540 | |
1541 | ttyp = SESSION_TP(pgrp->pg_session); |
1542 | if (ttyp != TTY_NULL) { |
1543 | if (ttyp->t_pgrp == pgrp) { |
1544 | tty_lock(ttyp); |
1545 | /* Re-check after acquiring the lock */ |
1546 | if (ttyp->t_pgrp == pgrp) { |
1547 | ttyp->t_pgrp = NULL; |
1548 | pgrp->pg_session->s_ttypgrpid = NO_PID; |
1549 | } |
1550 | tty_unlock(ttyp); |
1551 | } |
1552 | } |
1553 | |
1554 | proc_list_lock(); |
1555 | |
1556 | sessp = pgrp->pg_session; |
1557 | if ((sessp->s_listflags & (S_LIST_TERM | S_LIST_DEAD)) != 0) |
1558 | panic("pg_deleteref: manipulating refs of already terminating session" ); |
1559 | if (--sessp->s_count == 0) { |
1560 | if ((sessp->s_listflags & (S_LIST_TERM | S_LIST_DEAD)) != 0) |
1561 | panic("pg_deleteref: terminating already terminated session" ); |
1562 | sessp->s_listflags |= S_LIST_TERM; |
1563 | ttyp = SESSION_TP(sessp); |
1564 | LIST_REMOVE(sessp, s_hash); |
1565 | proc_list_unlock(); |
1566 | if (ttyp != TTY_NULL) { |
1567 | tty_lock(ttyp); |
1568 | if (ttyp->t_session == sessp) |
1569 | ttyp->t_session = NULL; |
1570 | tty_unlock(ttyp); |
1571 | } |
1572 | proc_list_lock(); |
1573 | sessp->s_listflags |= S_LIST_DEAD; |
1574 | if (sessp->s_count != 0) |
1575 | panic("pg_deleteref: freeing session in use" ); |
1576 | proc_list_unlock(); |
1577 | #if CONFIG_FINE_LOCK_GROUPS |
1578 | lck_mtx_destroy(&sessp->s_mlock, proc_mlock_grp); |
1579 | #else |
1580 | lck_mtx_destroy(&sessp->s_mlock, proc_lck_grp); |
1581 | #endif |
1582 | FREE_ZONE(sessp, sizeof(struct session), M_SESSION); |
1583 | } else |
1584 | proc_list_unlock(); |
1585 | #if CONFIG_FINE_LOCK_GROUPS |
1586 | lck_mtx_destroy(&pgrp->pg_mlock, proc_mlock_grp); |
1587 | #else |
1588 | lck_mtx_destroy(&pgrp->pg_mlock, proc_lck_grp); |
1589 | #endif |
1590 | FREE_ZONE(pgrp, sizeof(*pgrp), M_PGRP); |
1591 | } |
1592 | |
1593 | |
1594 | /* |
1595 | * Adjust pgrp jobc counters when specified process changes process group. |
1596 | * We count the number of processes in each process group that "qualify" |
1597 | * the group for terminal job control (those with a parent in a different |
1598 | * process group of the same session). If that count reaches zero, the |
1599 | * process group becomes orphaned. Check both the specified process' |
1600 | * process group and that of its children. |
1601 | * entering == 0 => p is leaving specified group. |
1602 | * entering == 1 => p is entering specified group. |
1603 | */ |
1604 | int |
1605 | fixjob_callback(proc_t p, void * arg) |
1606 | { |
1607 | struct fixjob_iterargs *fp; |
1608 | struct pgrp * pg, *hispg; |
1609 | struct session * mysession, *hissess; |
1610 | int entering; |
1611 | |
1612 | fp = (struct fixjob_iterargs *)arg; |
1613 | pg = fp->pg; |
1614 | mysession = fp->mysession; |
1615 | entering = fp->entering; |
1616 | |
1617 | hispg = proc_pgrp(p); |
1618 | hissess = proc_session(p); |
1619 | |
1620 | if ((hispg != pg) && |
1621 | (hissess == mysession)) { |
1622 | pgrp_lock(hispg); |
1623 | if (entering) { |
1624 | hispg->pg_jobc++; |
1625 | pgrp_unlock(hispg); |
1626 | } else if (--hispg->pg_jobc == 0) { |
1627 | pgrp_unlock(hispg); |
1628 | orphanpg(hispg); |
1629 | } else |
1630 | pgrp_unlock(hispg); |
1631 | } |
1632 | if (hissess != SESSION_NULL) |
1633 | session_rele(hissess); |
1634 | if (hispg != PGRP_NULL) |
1635 | pg_rele(hispg); |
1636 | |
1637 | return(PROC_RETURNED); |
1638 | } |
1639 | |
1640 | void |
1641 | fixjobc(proc_t p, struct pgrp *pgrp, int entering) |
1642 | { |
1643 | struct pgrp *hispgrp = PGRP_NULL; |
1644 | struct session *hissess = SESSION_NULL; |
1645 | struct session *mysession = pgrp->pg_session; |
1646 | proc_t parent; |
1647 | struct fixjob_iterargs fjarg; |
1648 | boolean_t proc_parent_self; |
1649 | |
1650 | /* |
1651 | * Check if p's parent is current proc, if yes then no need to take |
1652 | * a ref; calling proc_parent with current proc as parent may |
1653 | * deadlock if current proc is exiting. |
1654 | */ |
1655 | proc_parent_self = proc_parent_is_currentproc(p); |
1656 | if (proc_parent_self) |
1657 | parent = current_proc(); |
1658 | else |
1659 | parent = proc_parent(p); |
1660 | |
1661 | if (parent != PROC_NULL) { |
1662 | hispgrp = proc_pgrp(parent); |
1663 | hissess = proc_session(parent); |
1664 | if (!proc_parent_self) |
1665 | proc_rele(parent); |
1666 | } |
1667 | |
1668 | |
1669 | /* |
1670 | * Check p's parent to see whether p qualifies its own process |
1671 | * group; if so, adjust count for p's process group. |
1672 | */ |
1673 | if ((hispgrp != pgrp) && |
1674 | (hissess == mysession)) { |
1675 | pgrp_lock(pgrp); |
1676 | if (entering) { |
1677 | pgrp->pg_jobc++; |
1678 | pgrp_unlock(pgrp); |
1679 | }else if (--pgrp->pg_jobc == 0) { |
1680 | pgrp_unlock(pgrp); |
1681 | orphanpg(pgrp); |
1682 | } else |
1683 | pgrp_unlock(pgrp); |
1684 | } |
1685 | |
1686 | if (hissess != SESSION_NULL) |
1687 | session_rele(hissess); |
1688 | if (hispgrp != PGRP_NULL) |
1689 | pg_rele(hispgrp); |
1690 | |
1691 | /* |
1692 | * Check this process' children to see whether they qualify |
1693 | * their process groups; if so, adjust counts for children's |
1694 | * process groups. |
1695 | */ |
1696 | fjarg.pg = pgrp; |
1697 | fjarg.mysession = mysession; |
1698 | fjarg.entering = entering; |
1699 | proc_childrenwalk(p, fixjob_callback, &fjarg); |
1700 | } |
1701 | |
1702 | /* |
1703 | * A process group has become orphaned; if there are any stopped processes in |
1704 | * the group, hang-up all process in that group. |
1705 | */ |
1706 | static void |
1707 | orphanpg(struct pgrp *pgrp) |
1708 | { |
1709 | pid_t *pid_list; |
1710 | proc_t p; |
1711 | vm_size_t pid_list_size = 0; |
1712 | vm_size_t pid_list_size_needed = 0; |
1713 | int pid_count = 0; |
1714 | int pid_count_available = 0; |
1715 | |
1716 | assert(pgrp != NULL); |
1717 | |
1718 | /* allocate outside of the pgrp_lock */ |
1719 | for (;;) { |
1720 | pgrp_lock(pgrp); |
1721 | |
1722 | boolean_t should_iterate = FALSE; |
1723 | pid_count_available = 0; |
1724 | |
1725 | PGMEMBERS_FOREACH(pgrp, p) { |
1726 | pid_count_available++; |
1727 | |
1728 | if (p->p_stat == SSTOP) { |
1729 | should_iterate = TRUE; |
1730 | } |
1731 | } |
1732 | |
1733 | if (pid_count_available == 0 || !should_iterate) { |
1734 | pgrp_unlock(pgrp); |
1735 | return; |
1736 | } |
1737 | |
1738 | pid_list_size_needed = pid_count_available * sizeof(pid_t); |
1739 | if (pid_list_size >= pid_list_size_needed) { |
1740 | break; |
1741 | } |
1742 | pgrp_unlock(pgrp); |
1743 | |
1744 | if (pid_list_size != 0) { |
1745 | kfree(pid_list, pid_list_size); |
1746 | } |
1747 | pid_list = kalloc(pid_list_size_needed); |
1748 | if (!pid_list) { |
1749 | return; |
1750 | } |
1751 | pid_list_size = pid_list_size_needed; |
1752 | } |
1753 | |
1754 | /* no orphaned processes */ |
1755 | if (pid_list_size == 0) { |
1756 | pgrp_unlock(pgrp); |
1757 | return; |
1758 | } |
1759 | |
1760 | PGMEMBERS_FOREACH(pgrp, p) { |
1761 | pid_list[pid_count++] = proc_pid(p); |
1762 | if (pid_count >= pid_count_available) { |
1763 | break; |
1764 | } |
1765 | } |
1766 | pgrp_unlock(pgrp); |
1767 | |
1768 | if (pid_count == 0) { |
1769 | goto out; |
1770 | } |
1771 | |
1772 | for (int i = 0; i < pid_count; i++) { |
1773 | /* do not handle kernproc */ |
1774 | if (pid_list[i] == 0) { |
1775 | continue; |
1776 | } |
1777 | p = proc_find(pid_list[i]); |
1778 | if (!p) { |
1779 | continue; |
1780 | } |
1781 | |
1782 | proc_transwait(p, 0); |
1783 | pt_setrunnable(p); |
1784 | psignal(p, SIGHUP); |
1785 | psignal(p, SIGCONT); |
1786 | proc_rele(p); |
1787 | } |
1788 | |
1789 | out: |
1790 | kfree(pid_list, pid_list_size); |
1791 | return; |
1792 | } |
1793 | |
1794 | int |
1795 | proc_is_classic(proc_t p __unused) |
1796 | { |
1797 | return (0); |
1798 | } |
1799 | |
1800 | /* XXX Why does this function exist? Need to kill it off... */ |
1801 | proc_t |
1802 | current_proc_EXTERNAL(void) |
1803 | { |
1804 | return (current_proc()); |
1805 | } |
1806 | |
1807 | int |
1808 | proc_is_forcing_hfs_case_sensitivity(proc_t p) |
1809 | { |
1810 | return (p->p_vfs_iopolicy & P_VFS_IOPOLICY_FORCE_HFS_CASE_SENSITIVITY) ? 1 : 0; |
1811 | } |
1812 | |
1813 | #if CONFIG_COREDUMP |
1814 | /* |
1815 | * proc_core_name(name, uid, pid) |
1816 | * Expand the name described in corefilename, using name, uid, and pid. |
1817 | * corefilename is a printf-like string, with three format specifiers: |
1818 | * %N name of process ("name") |
1819 | * %P process id (pid) |
1820 | * %U user id (uid) |
1821 | * For example, "%N.core" is the default; they can be disabled completely |
1822 | * by using "/dev/null", or all core files can be stored in "/cores/%U/%N-%P". |
1823 | * This is controlled by the sysctl variable kern.corefile (see above). |
1824 | */ |
1825 | __private_extern__ int |
1826 | proc_core_name(const char *name, uid_t uid, pid_t pid, char *cf_name, |
1827 | size_t cf_name_len) |
1828 | { |
1829 | const char *format, *appendstr; |
1830 | char id_buf[11]; /* Buffer for pid/uid -- max 4B */ |
1831 | size_t i, l, n; |
1832 | |
1833 | if (cf_name == NULL) |
1834 | goto toolong; |
1835 | |
1836 | format = corefilename; |
1837 | for (i = 0, n = 0; n < cf_name_len && format[i]; i++) { |
1838 | switch (format[i]) { |
1839 | case '%': /* Format character */ |
1840 | i++; |
1841 | switch (format[i]) { |
1842 | case '%': |
1843 | appendstr = "%" ; |
1844 | break; |
1845 | case 'N': /* process name */ |
1846 | appendstr = name; |
1847 | break; |
1848 | case 'P': /* process id */ |
1849 | snprintf(id_buf, sizeof(id_buf), "%u" , pid); |
1850 | appendstr = id_buf; |
1851 | break; |
1852 | case 'U': /* user id */ |
1853 | snprintf(id_buf, sizeof(id_buf), "%u" , uid); |
1854 | appendstr = id_buf; |
1855 | break; |
1856 | case '\0': /* format string ended in % symbol */ |
1857 | goto endofstring; |
1858 | default: |
1859 | appendstr = "" ; |
1860 | log(LOG_ERR, |
1861 | "Unknown format character %c in `%s'\n" , |
1862 | format[i], format); |
1863 | } |
1864 | l = strlen(appendstr); |
1865 | if ((n + l) >= cf_name_len) |
1866 | goto toolong; |
1867 | bcopy(appendstr, cf_name + n, l); |
1868 | n += l; |
1869 | break; |
1870 | default: |
1871 | cf_name[n++] = format[i]; |
1872 | } |
1873 | } |
1874 | if (format[i] != '\0') |
1875 | goto toolong; |
1876 | return (0); |
1877 | toolong: |
1878 | log(LOG_ERR, "pid %ld (%s), uid (%u): corename is too long\n" , |
1879 | (long)pid, name, (uint32_t)uid); |
1880 | return (1); |
1881 | endofstring: |
1882 | log(LOG_ERR, "pid %ld (%s), uid (%u): unexpected end of string after %% token\n" , |
1883 | (long)pid, name, (uint32_t)uid); |
1884 | return (1); |
1885 | } |
1886 | #endif /* CONFIG_COREDUMP */ |
1887 | |
1888 | /* Code Signing related routines */ |
1889 | |
1890 | int |
1891 | csops(__unused proc_t p, struct csops_args *uap, __unused int32_t *retval) |
1892 | { |
1893 | return(csops_internal(uap->pid, uap->ops, uap->useraddr, |
1894 | uap->usersize, USER_ADDR_NULL)); |
1895 | } |
1896 | |
1897 | int |
1898 | csops_audittoken(__unused proc_t p, struct csops_audittoken_args *uap, __unused int32_t *retval) |
1899 | { |
1900 | if (uap->uaudittoken == USER_ADDR_NULL) |
1901 | return(EINVAL); |
1902 | return(csops_internal(uap->pid, uap->ops, uap->useraddr, |
1903 | uap->usersize, uap->uaudittoken)); |
1904 | } |
1905 | |
1906 | static int |
1907 | csops_copy_token(void *start, size_t length, user_size_t usize, user_addr_t uaddr) |
1908 | { |
1909 | char [8] = { 0 }; |
1910 | int error; |
1911 | |
1912 | if (usize < sizeof(fakeheader)) |
1913 | return ERANGE; |
1914 | |
1915 | /* if no blob, fill in zero header */ |
1916 | if (NULL == start) { |
1917 | start = fakeheader; |
1918 | length = sizeof(fakeheader); |
1919 | } else if (usize < length) { |
1920 | /* ... if input too short, copy out length of entitlement */ |
1921 | uint32_t length32 = htonl((uint32_t)length); |
1922 | memcpy(&fakeheader[4], &length32, sizeof(length32)); |
1923 | |
1924 | error = copyout(fakeheader, uaddr, sizeof(fakeheader)); |
1925 | if (error == 0) |
1926 | return ERANGE; /* input buffer to short, ERANGE signals that */ |
1927 | return error; |
1928 | } |
1929 | return copyout(start, uaddr, length); |
1930 | } |
1931 | |
1932 | static int |
1933 | csops_internal(pid_t pid, int ops, user_addr_t uaddr, user_size_t usersize, user_addr_t uaudittoken) |
1934 | { |
1935 | size_t usize = (size_t)CAST_DOWN(size_t, usersize); |
1936 | proc_t pt; |
1937 | int forself; |
1938 | int error; |
1939 | vnode_t tvp; |
1940 | off_t toff; |
1941 | unsigned char cdhash[SHA1_RESULTLEN]; |
1942 | audit_token_t token; |
1943 | unsigned int upid=0, uidversion = 0; |
1944 | |
1945 | forself = error = 0; |
1946 | |
1947 | if (pid == 0) |
1948 | pid = proc_selfpid(); |
1949 | if (pid == proc_selfpid()) |
1950 | forself = 1; |
1951 | |
1952 | |
1953 | switch (ops) { |
1954 | case CS_OPS_STATUS: |
1955 | case CS_OPS_CDHASH: |
1956 | case CS_OPS_PIDOFFSET: |
1957 | case CS_OPS_ENTITLEMENTS_BLOB: |
1958 | case CS_OPS_IDENTITY: |
1959 | case CS_OPS_BLOB: |
1960 | case CS_OPS_TEAMID: |
1961 | break; /* not restricted to root */ |
1962 | default: |
1963 | if (forself == 0 && kauth_cred_issuser(kauth_cred_get()) != TRUE) |
1964 | return(EPERM); |
1965 | break; |
1966 | } |
1967 | |
1968 | pt = proc_find(pid); |
1969 | if (pt == PROC_NULL) |
1970 | return(ESRCH); |
1971 | |
1972 | upid = pt->p_pid; |
1973 | uidversion = pt->p_idversion; |
1974 | if (uaudittoken != USER_ADDR_NULL) { |
1975 | |
1976 | error = copyin(uaudittoken, &token, sizeof(audit_token_t)); |
1977 | if (error != 0) |
1978 | goto out; |
1979 | /* verify the audit token pid/idversion matches with proc */ |
1980 | if ((token.val[5] != upid) || (token.val[7] != uidversion)) { |
1981 | error = ESRCH; |
1982 | goto out; |
1983 | } |
1984 | } |
1985 | |
1986 | #if CONFIG_MACF |
1987 | switch (ops) { |
1988 | case CS_OPS_MARKINVALID: |
1989 | case CS_OPS_MARKHARD: |
1990 | case CS_OPS_MARKKILL: |
1991 | case CS_OPS_MARKRESTRICT: |
1992 | case CS_OPS_SET_STATUS: |
1993 | case CS_OPS_CLEARINSTALLER: |
1994 | case CS_OPS_CLEARPLATFORM: |
1995 | if ((error = mac_proc_check_set_cs_info(current_proc(), pt, ops))) |
1996 | goto out; |
1997 | break; |
1998 | default: |
1999 | if ((error = mac_proc_check_get_cs_info(current_proc(), pt, ops))) |
2000 | goto out; |
2001 | } |
2002 | #endif |
2003 | |
2004 | switch (ops) { |
2005 | |
2006 | case CS_OPS_STATUS: { |
2007 | uint32_t retflags; |
2008 | |
2009 | proc_lock(pt); |
2010 | retflags = pt->p_csflags; |
2011 | if (cs_process_enforcement(pt)) |
2012 | retflags |= CS_ENFORCEMENT; |
2013 | if (csproc_get_platform_binary(pt)) |
2014 | retflags |= CS_PLATFORM_BINARY; |
2015 | if (csproc_get_platform_path(pt)) |
2016 | retflags |= CS_PLATFORM_PATH; |
2017 | //Don't return CS_REQUIRE_LV if we turned it on with CS_FORCED_LV but still report CS_FORCED_LV |
2018 | if ((pt->p_csflags & CS_FORCED_LV) == CS_FORCED_LV) { |
2019 | retflags &= (~CS_REQUIRE_LV); |
2020 | } |
2021 | proc_unlock(pt); |
2022 | |
2023 | if (uaddr != USER_ADDR_NULL) |
2024 | error = copyout(&retflags, uaddr, sizeof(uint32_t)); |
2025 | break; |
2026 | } |
2027 | case CS_OPS_MARKINVALID: |
2028 | proc_lock(pt); |
2029 | if ((pt->p_csflags & CS_VALID) == CS_VALID) { /* is currently valid */ |
2030 | pt->p_csflags &= ~CS_VALID; /* set invalid */ |
2031 | if ((pt->p_csflags & CS_KILL) == CS_KILL) { |
2032 | pt->p_csflags |= CS_KILLED; |
2033 | proc_unlock(pt); |
2034 | if (cs_debug) { |
2035 | printf("CODE SIGNING: marked invalid by pid %d: " |
2036 | "p=%d[%s] honoring CS_KILL, final status 0x%x\n" , |
2037 | proc_selfpid(), pt->p_pid, pt->p_comm, pt->p_csflags); |
2038 | } |
2039 | psignal(pt, SIGKILL); |
2040 | } else |
2041 | proc_unlock(pt); |
2042 | } else |
2043 | proc_unlock(pt); |
2044 | |
2045 | break; |
2046 | |
2047 | case CS_OPS_MARKHARD: |
2048 | proc_lock(pt); |
2049 | pt->p_csflags |= CS_HARD; |
2050 | if ((pt->p_csflags & CS_VALID) == 0) { |
2051 | /* @@@ allow? reject? kill? @@@ */ |
2052 | proc_unlock(pt); |
2053 | error = EINVAL; |
2054 | goto out; |
2055 | } else |
2056 | proc_unlock(pt); |
2057 | break; |
2058 | |
2059 | case CS_OPS_MARKKILL: |
2060 | proc_lock(pt); |
2061 | pt->p_csflags |= CS_KILL; |
2062 | if ((pt->p_csflags & CS_VALID) == 0) { |
2063 | proc_unlock(pt); |
2064 | psignal(pt, SIGKILL); |
2065 | } else |
2066 | proc_unlock(pt); |
2067 | break; |
2068 | |
2069 | case CS_OPS_PIDOFFSET: |
2070 | toff = pt->p_textoff; |
2071 | proc_rele(pt); |
2072 | error = copyout(&toff, uaddr, sizeof(toff)); |
2073 | return(error); |
2074 | |
2075 | case CS_OPS_CDHASH: |
2076 | |
2077 | /* pt already holds a reference on its p_textvp */ |
2078 | tvp = pt->p_textvp; |
2079 | toff = pt->p_textoff; |
2080 | |
2081 | if (tvp == NULLVP || usize != SHA1_RESULTLEN) { |
2082 | proc_rele(pt); |
2083 | return EINVAL; |
2084 | } |
2085 | |
2086 | error = vn_getcdhash(tvp, toff, cdhash); |
2087 | proc_rele(pt); |
2088 | |
2089 | if (error == 0) { |
2090 | error = copyout(cdhash, uaddr, sizeof (cdhash)); |
2091 | } |
2092 | |
2093 | return error; |
2094 | |
2095 | case CS_OPS_ENTITLEMENTS_BLOB: { |
2096 | void *start; |
2097 | size_t length; |
2098 | |
2099 | proc_lock(pt); |
2100 | |
2101 | if ((pt->p_csflags & (CS_VALID | CS_DEBUGGED)) == 0) { |
2102 | proc_unlock(pt); |
2103 | error = EINVAL; |
2104 | break; |
2105 | } |
2106 | |
2107 | error = cs_entitlements_blob_get(pt, &start, &length); |
2108 | proc_unlock(pt); |
2109 | if (error) |
2110 | break; |
2111 | |
2112 | error = csops_copy_token(start, length, usize, uaddr); |
2113 | break; |
2114 | } |
2115 | case CS_OPS_MARKRESTRICT: |
2116 | proc_lock(pt); |
2117 | pt->p_csflags |= CS_RESTRICT; |
2118 | proc_unlock(pt); |
2119 | break; |
2120 | |
2121 | case CS_OPS_SET_STATUS: { |
2122 | uint32_t flags; |
2123 | |
2124 | if (usize < sizeof(flags)) { |
2125 | error = ERANGE; |
2126 | break; |
2127 | } |
2128 | |
2129 | error = copyin(uaddr, &flags, sizeof(flags)); |
2130 | if (error) |
2131 | break; |
2132 | |
2133 | /* only allow setting a subset of all code sign flags */ |
2134 | flags &= |
2135 | CS_HARD | CS_EXEC_SET_HARD | |
2136 | CS_KILL | CS_EXEC_SET_KILL | |
2137 | CS_RESTRICT | |
2138 | CS_REQUIRE_LV | |
2139 | CS_ENFORCEMENT | CS_EXEC_SET_ENFORCEMENT; |
2140 | |
2141 | proc_lock(pt); |
2142 | if (pt->p_csflags & CS_VALID) |
2143 | pt->p_csflags |= flags; |
2144 | else |
2145 | error = EINVAL; |
2146 | proc_unlock(pt); |
2147 | |
2148 | break; |
2149 | } |
2150 | case CS_OPS_BLOB: { |
2151 | void *start; |
2152 | size_t length; |
2153 | |
2154 | proc_lock(pt); |
2155 | if ((pt->p_csflags & (CS_VALID | CS_DEBUGGED)) == 0) { |
2156 | proc_unlock(pt); |
2157 | error = EINVAL; |
2158 | break; |
2159 | } |
2160 | |
2161 | error = cs_blob_get(pt, &start, &length); |
2162 | proc_unlock(pt); |
2163 | if (error) |
2164 | break; |
2165 | |
2166 | error = csops_copy_token(start, length, usize, uaddr); |
2167 | break; |
2168 | } |
2169 | case CS_OPS_IDENTITY: |
2170 | case CS_OPS_TEAMID: { |
2171 | const char *identity; |
2172 | uint8_t [8]; |
2173 | uint32_t idlen; |
2174 | size_t length; |
2175 | |
2176 | /* |
2177 | * Make identity have a blob header to make it |
2178 | * easier on userland to guess the identity |
2179 | * length. |
2180 | */ |
2181 | if (usize < sizeof(fakeheader)) { |
2182 | error = ERANGE; |
2183 | break; |
2184 | } |
2185 | memset(fakeheader, 0, sizeof(fakeheader)); |
2186 | |
2187 | proc_lock(pt); |
2188 | if ((pt->p_csflags & (CS_VALID | CS_DEBUGGED)) == 0) { |
2189 | proc_unlock(pt); |
2190 | error = EINVAL; |
2191 | break; |
2192 | } |
2193 | |
2194 | identity = ops == CS_OPS_TEAMID ? csproc_get_teamid(pt) : cs_identity_get(pt); |
2195 | proc_unlock(pt); |
2196 | if (identity == NULL) { |
2197 | error = ENOENT; |
2198 | break; |
2199 | } |
2200 | |
2201 | length = strlen(identity) + 1; /* include NUL */ |
2202 | idlen = htonl(length + sizeof(fakeheader)); |
2203 | memcpy(&fakeheader[4], &idlen, sizeof(idlen)); |
2204 | |
2205 | error = copyout(fakeheader, uaddr, sizeof(fakeheader)); |
2206 | if (error) |
2207 | break; |
2208 | |
2209 | if (usize < sizeof(fakeheader) + length) |
2210 | error = ERANGE; |
2211 | else if (usize > sizeof(fakeheader)) |
2212 | error = copyout(identity, uaddr + sizeof(fakeheader), length); |
2213 | |
2214 | break; |
2215 | } |
2216 | |
2217 | case CS_OPS_CLEARINSTALLER: |
2218 | proc_lock(pt); |
2219 | pt->p_csflags &= ~(CS_INSTALLER | CS_DATAVAULT_CONTROLLER | CS_EXEC_INHERIT_SIP); |
2220 | proc_unlock(pt); |
2221 | break; |
2222 | |
2223 | case CS_OPS_CLEARPLATFORM: |
2224 | #if DEVELOPMENT || DEBUG |
2225 | if (cs_process_global_enforcement()) { |
2226 | error = ENOTSUP; |
2227 | break; |
2228 | } |
2229 | |
2230 | #if CONFIG_CSR |
2231 | if (csr_check(CSR_ALLOW_APPLE_INTERNAL) != 0) { |
2232 | error = ENOTSUP; |
2233 | break; |
2234 | } |
2235 | #endif |
2236 | |
2237 | proc_lock(pt); |
2238 | pt->p_csflags &= ~(CS_PLATFORM_BINARY|CS_PLATFORM_PATH); |
2239 | csproc_clear_platform_binary(pt); |
2240 | proc_unlock(pt); |
2241 | break; |
2242 | #else |
2243 | error = ENOTSUP; |
2244 | break; |
2245 | #endif /* !DEVELOPMENT || DEBUG */ |
2246 | |
2247 | default: |
2248 | error = EINVAL; |
2249 | break; |
2250 | } |
2251 | out: |
2252 | proc_rele(pt); |
2253 | return(error); |
2254 | } |
2255 | |
2256 | int |
2257 | proc_iterate( |
2258 | unsigned int flags, |
2259 | proc_iterate_fn_t callout, |
2260 | void *arg, |
2261 | proc_iterate_fn_t filterfn, |
2262 | void *filterarg) |
2263 | { |
2264 | pid_t *pid_list = NULL; |
2265 | vm_size_t pid_list_size = 0; |
2266 | vm_size_t pid_list_size_needed = 0; |
2267 | int pid_count = 0; |
2268 | int pid_count_available = 0; |
2269 | |
2270 | assert(callout != NULL); |
2271 | |
2272 | /* allocate outside of the proc_list_lock */ |
2273 | for (;;) { |
2274 | proc_list_lock(); |
2275 | |
2276 | pid_count_available = nprocs + 1 /* kernel_task not counted in nprocs */; |
2277 | assert(pid_count_available > 0); |
2278 | |
2279 | pid_list_size_needed = pid_count_available * sizeof(pid_t); |
2280 | if (pid_list_size >= pid_list_size_needed) { |
2281 | break; |
2282 | } |
2283 | proc_list_unlock(); |
2284 | |
2285 | if (pid_list_size != 0) { |
2286 | kfree(pid_list, pid_list_size); |
2287 | } |
2288 | pid_list = kalloc(pid_list_size_needed); |
2289 | if (!pid_list) { |
2290 | return 1; |
2291 | } |
2292 | pid_list_size = pid_list_size_needed; |
2293 | } |
2294 | assert(pid_list != NULL); |
2295 | |
2296 | /* filter pids into pid_list */ |
2297 | |
2298 | if (flags & PROC_ALLPROCLIST) { |
2299 | proc_t p; |
2300 | ALLPROC_FOREACH(p) { |
2301 | /* ignore processes that are being forked */ |
2302 | if (p->p_stat == SIDL) { |
2303 | continue; |
2304 | } |
2305 | if ((filterfn != NULL) && (filterfn(p, filterarg) == 0)) { |
2306 | continue; |
2307 | } |
2308 | |
2309 | pid_list[pid_count++] = proc_pid(p); |
2310 | if (pid_count >= pid_count_available) { |
2311 | break; |
2312 | } |
2313 | } |
2314 | } |
2315 | |
2316 | if ((pid_count < pid_count_available) && |
2317 | (flags & PROC_ZOMBPROCLIST)) |
2318 | { |
2319 | proc_t p; |
2320 | ZOMBPROC_FOREACH(p) { |
2321 | if ((filterfn != NULL) && (filterfn(p, filterarg) == 0)) { |
2322 | continue; |
2323 | } |
2324 | |
2325 | pid_list[pid_count++] = proc_pid(p); |
2326 | if (pid_count >= pid_count_available) { |
2327 | break; |
2328 | } |
2329 | } |
2330 | } |
2331 | |
2332 | proc_list_unlock(); |
2333 | |
2334 | /* call callout on processes in the pid_list */ |
2335 | |
2336 | for (int i = 0; i < pid_count; i++) { |
2337 | proc_t p = proc_find(pid_list[i]); |
2338 | if (p) { |
2339 | if ((flags & PROC_NOWAITTRANS) == 0) { |
2340 | proc_transwait(p, 0); |
2341 | } |
2342 | int callout_ret = callout(p, arg); |
2343 | |
2344 | switch (callout_ret) { |
2345 | case PROC_RETURNED_DONE: |
2346 | proc_rele(p); |
2347 | /* FALLTHROUGH */ |
2348 | case PROC_CLAIMED_DONE: |
2349 | goto out; |
2350 | |
2351 | case PROC_RETURNED: |
2352 | proc_rele(p); |
2353 | /* FALLTHROUGH */ |
2354 | case PROC_CLAIMED: |
2355 | break; |
2356 | |
2357 | default: |
2358 | panic("proc_iterate: callout returned %d for pid %d" , |
2359 | callout_ret, pid_list[i]); |
2360 | break; |
2361 | } |
2362 | } else if (flags & PROC_ZOMBPROCLIST) { |
2363 | p = proc_find_zombref(pid_list[i]); |
2364 | if (!p) { |
2365 | continue; |
2366 | } |
2367 | int callout_ret = callout(p, arg); |
2368 | |
2369 | switch (callout_ret) { |
2370 | case PROC_RETURNED_DONE: |
2371 | proc_drop_zombref(p); |
2372 | /* FALLTHROUGH */ |
2373 | case PROC_CLAIMED_DONE: |
2374 | goto out; |
2375 | |
2376 | case PROC_RETURNED: |
2377 | proc_drop_zombref(p); |
2378 | /* FALLTHROUGH */ |
2379 | case PROC_CLAIMED: |
2380 | break; |
2381 | |
2382 | default: |
2383 | panic("proc_iterate: callout returned %d for zombie pid %d" , |
2384 | callout_ret, pid_list[i]); |
2385 | break; |
2386 | } |
2387 | } |
2388 | } |
2389 | |
2390 | out: |
2391 | kfree(pid_list, pid_list_size); |
2392 | return 0; |
2393 | |
2394 | } |
2395 | |
2396 | void |
2397 | proc_rebootscan( |
2398 | proc_iterate_fn_t callout, |
2399 | void *arg, |
2400 | proc_iterate_fn_t filterfn, |
2401 | void *filterarg) |
2402 | { |
2403 | proc_t p; |
2404 | |
2405 | assert(callout != NULL); |
2406 | |
2407 | proc_shutdown_exitcount = 0; |
2408 | |
2409 | restart_foreach: |
2410 | |
2411 | proc_list_lock(); |
2412 | |
2413 | ALLPROC_FOREACH(p) { |
2414 | if ((filterfn != NULL) && filterfn(p, filterarg) == 0) { |
2415 | continue; |
2416 | } |
2417 | p = proc_ref_locked(p); |
2418 | if (!p) { |
2419 | continue; |
2420 | } |
2421 | |
2422 | proc_list_unlock(); |
2423 | |
2424 | proc_transwait(p, 0); |
2425 | (void)callout(p, arg); |
2426 | proc_rele(p); |
2427 | |
2428 | goto restart_foreach; |
2429 | } |
2430 | |
2431 | proc_list_unlock(); |
2432 | } |
2433 | |
2434 | int |
2435 | proc_childrenwalk( |
2436 | proc_t parent, |
2437 | proc_iterate_fn_t callout, |
2438 | void *arg) |
2439 | { |
2440 | pid_t *pid_list; |
2441 | vm_size_t pid_list_size = 0; |
2442 | vm_size_t pid_list_size_needed = 0; |
2443 | int pid_count = 0; |
2444 | int pid_count_available = 0; |
2445 | |
2446 | assert(parent != NULL); |
2447 | assert(callout != NULL); |
2448 | |
2449 | for (;;) { |
2450 | proc_list_lock(); |
2451 | |
2452 | pid_count_available = parent->p_childrencnt; |
2453 | if (pid_count_available == 0) { |
2454 | proc_list_unlock(); |
2455 | return 0; |
2456 | } |
2457 | |
2458 | pid_list_size_needed = pid_count_available * sizeof(pid_t); |
2459 | if (pid_list_size >= pid_list_size_needed) { |
2460 | break; |
2461 | } |
2462 | proc_list_unlock(); |
2463 | |
2464 | if (pid_list_size != 0) { |
2465 | kfree(pid_list, pid_list_size); |
2466 | } |
2467 | pid_list = kalloc(pid_list_size_needed); |
2468 | if (!pid_list) { |
2469 | return 1; |
2470 | } |
2471 | pid_list_size = pid_list_size_needed; |
2472 | } |
2473 | |
2474 | proc_t p; |
2475 | PCHILDREN_FOREACH(parent, p) { |
2476 | if (p->p_stat == SIDL) { |
2477 | continue; |
2478 | } |
2479 | |
2480 | pid_list[pid_count++] = proc_pid(p); |
2481 | if (pid_count >= pid_count_available) { |
2482 | break; |
2483 | } |
2484 | } |
2485 | |
2486 | proc_list_unlock(); |
2487 | |
2488 | for (int i = 0; i < pid_count; i++) { |
2489 | p = proc_find(pid_list[i]); |
2490 | if (!p) { |
2491 | continue; |
2492 | } |
2493 | |
2494 | int callout_ret = callout(p, arg); |
2495 | |
2496 | switch (callout_ret) { |
2497 | case PROC_RETURNED_DONE: |
2498 | proc_rele(p); |
2499 | /* FALLTHROUGH */ |
2500 | case PROC_CLAIMED_DONE: |
2501 | goto out; |
2502 | |
2503 | case PROC_RETURNED: |
2504 | proc_rele(p); |
2505 | /* FALLTHROUGH */ |
2506 | case PROC_CLAIMED: |
2507 | break; |
2508 | default: |
2509 | panic("proc_childrenwalk: callout returned %d for pid %d" , |
2510 | callout_ret, pid_list[i]); |
2511 | break; |
2512 | } |
2513 | } |
2514 | |
2515 | out: |
2516 | kfree(pid_list, pid_list_size); |
2517 | return 0; |
2518 | } |
2519 | |
2520 | int |
2521 | pgrp_iterate( |
2522 | struct pgrp *pgrp, |
2523 | unsigned int flags, |
2524 | proc_iterate_fn_t callout, |
2525 | void * arg, |
2526 | proc_iterate_fn_t filterfn, |
2527 | void * filterarg) |
2528 | { |
2529 | pid_t *pid_list; |
2530 | proc_t p; |
2531 | vm_size_t pid_list_size = 0; |
2532 | vm_size_t pid_list_size_needed = 0; |
2533 | int pid_count = 0; |
2534 | int pid_count_available = 0; |
2535 | |
2536 | pid_t pgid; |
2537 | |
2538 | assert(pgrp != NULL); |
2539 | assert(callout != NULL); |
2540 | |
2541 | for (;;) { |
2542 | pgrp_lock(pgrp); |
2543 | |
2544 | pid_count_available = pgrp->pg_membercnt; |
2545 | if (pid_count_available == 0) { |
2546 | pgrp_unlock(pgrp); |
2547 | return 0; |
2548 | } |
2549 | |
2550 | pid_list_size_needed = pid_count_available * sizeof(pid_t); |
2551 | if (pid_list_size >= pid_list_size_needed) { |
2552 | break; |
2553 | } |
2554 | pgrp_unlock(pgrp); |
2555 | |
2556 | if (pid_list_size != 0) { |
2557 | kfree(pid_list, pid_list_size); |
2558 | } |
2559 | pid_list = kalloc(pid_list_size_needed); |
2560 | if (!pid_list) { |
2561 | return 1; |
2562 | } |
2563 | pid_list_size = pid_list_size_needed; |
2564 | } |
2565 | |
2566 | pgid = pgrp->pg_id; |
2567 | |
2568 | PGMEMBERS_FOREACH(pgrp, p) { |
2569 | if ((filterfn != NULL) && (filterfn(p, filterarg) == 0)) { |
2570 | continue;; |
2571 | } |
2572 | pid_list[pid_count++] = proc_pid(p); |
2573 | if (pid_count >= pid_count_available) { |
2574 | break; |
2575 | } |
2576 | } |
2577 | |
2578 | pgrp_unlock(pgrp); |
2579 | |
2580 | if (flags & PGRP_DROPREF) { |
2581 | pg_rele(pgrp); |
2582 | } |
2583 | |
2584 | for (int i = 0; i< pid_count; i++) { |
2585 | /* do not handle kernproc */ |
2586 | if (pid_list[i] == 0) { |
2587 | continue; |
2588 | } |
2589 | p = proc_find(pid_list[i]); |
2590 | if (!p) { |
2591 | continue; |
2592 | } |
2593 | if (p->p_pgrpid != pgid) { |
2594 | proc_rele(p); |
2595 | continue; |
2596 | } |
2597 | |
2598 | int callout_ret = callout(p, arg); |
2599 | |
2600 | switch (callout_ret) { |
2601 | case PROC_RETURNED: |
2602 | proc_rele(p); |
2603 | /* FALLTHROUGH */ |
2604 | case PROC_CLAIMED: |
2605 | break; |
2606 | |
2607 | case PROC_RETURNED_DONE: |
2608 | proc_rele(p); |
2609 | /* FALLTHROUGH */ |
2610 | case PROC_CLAIMED_DONE: |
2611 | goto out; |
2612 | |
2613 | default: |
2614 | panic("pgrp_iterate: callout returned %d for pid %d" , |
2615 | callout_ret, pid_list[i]); |
2616 | } |
2617 | } |
2618 | |
2619 | out: |
2620 | kfree(pid_list, pid_list_size); |
2621 | return 0; |
2622 | } |
2623 | |
2624 | static void |
2625 | pgrp_add(struct pgrp * pgrp, struct proc * parent, struct proc * child) |
2626 | { |
2627 | proc_list_lock(); |
2628 | child->p_pgrp = pgrp; |
2629 | child->p_pgrpid = pgrp->pg_id; |
2630 | child->p_listflag |= P_LIST_INPGRP; |
2631 | /* |
2632 | * When pgrp is being freed , a process can still |
2633 | * request addition using setpgid from bash when |
2634 | * login is terminated (login cycler) return ESRCH |
2635 | * Safe to hold lock due to refcount on pgrp |
2636 | */ |
2637 | if ((pgrp->pg_listflags & (PGRP_FLAG_TERMINATE | PGRP_FLAG_DEAD)) == PGRP_FLAG_TERMINATE) { |
2638 | pgrp->pg_listflags &= ~PGRP_FLAG_TERMINATE; |
2639 | } |
2640 | |
2641 | if ((pgrp->pg_listflags & PGRP_FLAG_DEAD) == PGRP_FLAG_DEAD) |
2642 | panic("pgrp_add : pgrp is dead adding process" ); |
2643 | proc_list_unlock(); |
2644 | |
2645 | pgrp_lock(pgrp); |
2646 | pgrp->pg_membercnt++; |
2647 | if ( parent != PROC_NULL) { |
2648 | LIST_INSERT_AFTER(parent, child, p_pglist); |
2649 | }else { |
2650 | LIST_INSERT_HEAD(&pgrp->pg_members, child, p_pglist); |
2651 | } |
2652 | pgrp_unlock(pgrp); |
2653 | |
2654 | proc_list_lock(); |
2655 | if (((pgrp->pg_listflags & (PGRP_FLAG_TERMINATE | PGRP_FLAG_DEAD)) == PGRP_FLAG_TERMINATE) && (pgrp->pg_membercnt != 0)) { |
2656 | pgrp->pg_listflags &= ~PGRP_FLAG_TERMINATE; |
2657 | } |
2658 | proc_list_unlock(); |
2659 | } |
2660 | |
2661 | static void |
2662 | pgrp_remove(struct proc * p) |
2663 | { |
2664 | struct pgrp * pg; |
2665 | |
2666 | pg = proc_pgrp(p); |
2667 | |
2668 | proc_list_lock(); |
2669 | #if __PROC_INTERNAL_DEBUG |
2670 | if ((p->p_listflag & P_LIST_INPGRP) == 0) |
2671 | panic("removing from pglist but no named ref\n" ); |
2672 | #endif |
2673 | p->p_pgrpid = PGRPID_DEAD; |
2674 | p->p_listflag &= ~P_LIST_INPGRP; |
2675 | p->p_pgrp = NULL; |
2676 | proc_list_unlock(); |
2677 | |
2678 | if (pg == PGRP_NULL) |
2679 | panic("pgrp_remove: pg is NULL" ); |
2680 | pgrp_lock(pg); |
2681 | pg->pg_membercnt--; |
2682 | |
2683 | if (pg->pg_membercnt < 0) |
2684 | panic("pgprp: -ve membercnt pgprp:%p p:%p\n" ,pg, p); |
2685 | |
2686 | LIST_REMOVE(p, p_pglist); |
2687 | if (pg->pg_members.lh_first == 0) { |
2688 | pgrp_unlock(pg); |
2689 | pgdelete_dropref(pg); |
2690 | } else { |
2691 | pgrp_unlock(pg); |
2692 | pg_rele(pg); |
2693 | } |
2694 | } |
2695 | |
2696 | |
2697 | /* cannot use proc_pgrp as it maybe stalled */ |
2698 | static void |
2699 | pgrp_replace(struct proc * p, struct pgrp * newpg) |
2700 | { |
2701 | struct pgrp * oldpg; |
2702 | |
2703 | |
2704 | |
2705 | proc_list_lock(); |
2706 | |
2707 | while ((p->p_listflag & P_LIST_PGRPTRANS) == P_LIST_PGRPTRANS) { |
2708 | p->p_listflag |= P_LIST_PGRPTRWAIT; |
2709 | (void)msleep(&p->p_pgrpid, proc_list_mlock, 0, "proc_pgrp" , 0); |
2710 | } |
2711 | |
2712 | p->p_listflag |= P_LIST_PGRPTRANS; |
2713 | |
2714 | oldpg = p->p_pgrp; |
2715 | if (oldpg == PGRP_NULL) |
2716 | panic("pgrp_replace: oldpg NULL" ); |
2717 | oldpg->pg_refcount++; |
2718 | #if __PROC_INTERNAL_DEBUG |
2719 | if ((p->p_listflag & P_LIST_INPGRP) == 0) |
2720 | panic("removing from pglist but no named ref\n" ); |
2721 | #endif |
2722 | p->p_pgrpid = PGRPID_DEAD; |
2723 | p->p_listflag &= ~P_LIST_INPGRP; |
2724 | p->p_pgrp = NULL; |
2725 | |
2726 | proc_list_unlock(); |
2727 | |
2728 | pgrp_lock(oldpg); |
2729 | oldpg->pg_membercnt--; |
2730 | if (oldpg->pg_membercnt < 0) |
2731 | panic("pgprp: -ve membercnt pgprp:%p p:%p\n" ,oldpg, p); |
2732 | LIST_REMOVE(p, p_pglist); |
2733 | if (oldpg->pg_members.lh_first == 0) { |
2734 | pgrp_unlock(oldpg); |
2735 | pgdelete_dropref(oldpg); |
2736 | } else { |
2737 | pgrp_unlock(oldpg); |
2738 | pg_rele(oldpg); |
2739 | } |
2740 | |
2741 | proc_list_lock(); |
2742 | p->p_pgrp = newpg; |
2743 | p->p_pgrpid = newpg->pg_id; |
2744 | p->p_listflag |= P_LIST_INPGRP; |
2745 | /* |
2746 | * When pgrp is being freed , a process can still |
2747 | * request addition using setpgid from bash when |
2748 | * login is terminated (login cycler) return ESRCH |
2749 | * Safe to hold lock due to refcount on pgrp |
2750 | */ |
2751 | if ((newpg->pg_listflags & (PGRP_FLAG_TERMINATE | PGRP_FLAG_DEAD)) == PGRP_FLAG_TERMINATE) { |
2752 | newpg->pg_listflags &= ~PGRP_FLAG_TERMINATE; |
2753 | } |
2754 | |
2755 | if ((newpg->pg_listflags & PGRP_FLAG_DEAD) == PGRP_FLAG_DEAD) |
2756 | panic("pgrp_add : pgrp is dead adding process" ); |
2757 | proc_list_unlock(); |
2758 | |
2759 | pgrp_lock(newpg); |
2760 | newpg->pg_membercnt++; |
2761 | LIST_INSERT_HEAD(&newpg->pg_members, p, p_pglist); |
2762 | pgrp_unlock(newpg); |
2763 | |
2764 | proc_list_lock(); |
2765 | if (((newpg->pg_listflags & (PGRP_FLAG_TERMINATE | PGRP_FLAG_DEAD)) == PGRP_FLAG_TERMINATE) && (newpg->pg_membercnt != 0)) { |
2766 | newpg->pg_listflags &= ~PGRP_FLAG_TERMINATE; |
2767 | } |
2768 | |
2769 | p->p_listflag &= ~P_LIST_PGRPTRANS; |
2770 | if ((p->p_listflag & P_LIST_PGRPTRWAIT) == P_LIST_PGRPTRWAIT) { |
2771 | p->p_listflag &= ~P_LIST_PGRPTRWAIT; |
2772 | wakeup(&p->p_pgrpid); |
2773 | |
2774 | } |
2775 | proc_list_unlock(); |
2776 | } |
2777 | |
2778 | void |
2779 | pgrp_lock(struct pgrp * pgrp) |
2780 | { |
2781 | lck_mtx_lock(&pgrp->pg_mlock); |
2782 | } |
2783 | |
2784 | void |
2785 | pgrp_unlock(struct pgrp * pgrp) |
2786 | { |
2787 | lck_mtx_unlock(&pgrp->pg_mlock); |
2788 | } |
2789 | |
2790 | void |
2791 | session_lock(struct session * sess) |
2792 | { |
2793 | lck_mtx_lock(&sess->s_mlock); |
2794 | } |
2795 | |
2796 | |
2797 | void |
2798 | session_unlock(struct session * sess) |
2799 | { |
2800 | lck_mtx_unlock(&sess->s_mlock); |
2801 | } |
2802 | |
2803 | struct pgrp * |
2804 | proc_pgrp(proc_t p) |
2805 | { |
2806 | struct pgrp * pgrp; |
2807 | |
2808 | if (p == PROC_NULL) |
2809 | return(PGRP_NULL); |
2810 | proc_list_lock(); |
2811 | |
2812 | while ((p->p_listflag & P_LIST_PGRPTRANS) == P_LIST_PGRPTRANS) { |
2813 | p->p_listflag |= P_LIST_PGRPTRWAIT; |
2814 | (void)msleep(&p->p_pgrpid, proc_list_mlock, 0, "proc_pgrp" , 0); |
2815 | } |
2816 | |
2817 | pgrp = p->p_pgrp; |
2818 | |
2819 | assert(pgrp != NULL); |
2820 | |
2821 | if (pgrp != PGRP_NULL) { |
2822 | pgrp->pg_refcount++; |
2823 | if ((pgrp->pg_listflags & (PGRP_FLAG_TERMINATE | PGRP_FLAG_DEAD)) != 0) |
2824 | panic("proc_pgrp: ref being povided for dead pgrp" ); |
2825 | } |
2826 | |
2827 | proc_list_unlock(); |
2828 | |
2829 | return(pgrp); |
2830 | } |
2831 | |
2832 | struct pgrp * |
2833 | tty_pgrp(struct tty * tp) |
2834 | { |
2835 | struct pgrp * pg = PGRP_NULL; |
2836 | |
2837 | proc_list_lock(); |
2838 | pg = tp->t_pgrp; |
2839 | |
2840 | if (pg != PGRP_NULL) { |
2841 | if ((pg->pg_listflags & PGRP_FLAG_DEAD) != 0) |
2842 | panic("tty_pgrp: ref being povided for dead pgrp" ); |
2843 | pg->pg_refcount++; |
2844 | } |
2845 | proc_list_unlock(); |
2846 | |
2847 | return(pg); |
2848 | } |
2849 | |
2850 | struct session * |
2851 | proc_session(proc_t p) |
2852 | { |
2853 | struct session * sess = SESSION_NULL; |
2854 | |
2855 | if (p == PROC_NULL) |
2856 | return(SESSION_NULL); |
2857 | |
2858 | proc_list_lock(); |
2859 | |
2860 | /* wait during transitions */ |
2861 | while ((p->p_listflag & P_LIST_PGRPTRANS) == P_LIST_PGRPTRANS) { |
2862 | p->p_listflag |= P_LIST_PGRPTRWAIT; |
2863 | (void)msleep(&p->p_pgrpid, proc_list_mlock, 0, "proc_pgrp" , 0); |
2864 | } |
2865 | |
2866 | if ((p->p_pgrp != PGRP_NULL) && ((sess = p->p_pgrp->pg_session) != SESSION_NULL)) { |
2867 | if ((sess->s_listflags & (S_LIST_TERM | S_LIST_DEAD)) != 0) |
2868 | panic("proc_session:returning sesssion ref on terminating session" ); |
2869 | sess->s_count++; |
2870 | } |
2871 | proc_list_unlock(); |
2872 | return(sess); |
2873 | } |
2874 | |
2875 | void |
2876 | session_rele(struct session *sess) |
2877 | { |
2878 | proc_list_lock(); |
2879 | if (--sess->s_count == 0) { |
2880 | if ((sess->s_listflags & (S_LIST_TERM | S_LIST_DEAD)) != 0) |
2881 | panic("session_rele: terminating already terminated session" ); |
2882 | sess->s_listflags |= S_LIST_TERM; |
2883 | LIST_REMOVE(sess, s_hash); |
2884 | sess->s_listflags |= S_LIST_DEAD; |
2885 | if (sess->s_count != 0) |
2886 | panic("session_rele: freeing session in use" ); |
2887 | proc_list_unlock(); |
2888 | #if CONFIG_FINE_LOCK_GROUPS |
2889 | lck_mtx_destroy(&sess->s_mlock, proc_mlock_grp); |
2890 | #else |
2891 | lck_mtx_destroy(&sess->s_mlock, proc_lck_grp); |
2892 | #endif |
2893 | FREE_ZONE(sess, sizeof(struct session), M_SESSION); |
2894 | } else |
2895 | proc_list_unlock(); |
2896 | } |
2897 | |
2898 | int |
2899 | proc_transstart(proc_t p, int locked, int non_blocking) |
2900 | { |
2901 | if (locked == 0) |
2902 | proc_lock(p); |
2903 | while ((p->p_lflag & P_LINTRANSIT) == P_LINTRANSIT) { |
2904 | if (((p->p_lflag & P_LTRANSCOMMIT) == P_LTRANSCOMMIT) || non_blocking) { |
2905 | if (locked == 0) |
2906 | proc_unlock(p); |
2907 | return EDEADLK; |
2908 | } |
2909 | p->p_lflag |= P_LTRANSWAIT; |
2910 | msleep(&p->p_lflag, &p->p_mlock, 0, "proc_signstart" , NULL); |
2911 | } |
2912 | p->p_lflag |= P_LINTRANSIT; |
2913 | p->p_transholder = current_thread(); |
2914 | if (locked == 0) |
2915 | proc_unlock(p); |
2916 | return 0; |
2917 | } |
2918 | |
2919 | void |
2920 | proc_transcommit(proc_t p, int locked) |
2921 | { |
2922 | if (locked == 0) |
2923 | proc_lock(p); |
2924 | |
2925 | assert ((p->p_lflag & P_LINTRANSIT) == P_LINTRANSIT); |
2926 | assert (p->p_transholder == current_thread()); |
2927 | p->p_lflag |= P_LTRANSCOMMIT; |
2928 | |
2929 | if ((p->p_lflag & P_LTRANSWAIT) == P_LTRANSWAIT) { |
2930 | p->p_lflag &= ~P_LTRANSWAIT; |
2931 | wakeup(&p->p_lflag); |
2932 | } |
2933 | if (locked == 0) |
2934 | proc_unlock(p); |
2935 | } |
2936 | |
2937 | void |
2938 | proc_transend(proc_t p, int locked) |
2939 | { |
2940 | if (locked == 0) |
2941 | proc_lock(p); |
2942 | |
2943 | p->p_lflag &= ~( P_LINTRANSIT | P_LTRANSCOMMIT); |
2944 | p->p_transholder = NULL; |
2945 | |
2946 | if ((p->p_lflag & P_LTRANSWAIT) == P_LTRANSWAIT) { |
2947 | p->p_lflag &= ~P_LTRANSWAIT; |
2948 | wakeup(&p->p_lflag); |
2949 | } |
2950 | if (locked == 0) |
2951 | proc_unlock(p); |
2952 | } |
2953 | |
2954 | int |
2955 | proc_transwait(proc_t p, int locked) |
2956 | { |
2957 | if (locked == 0) |
2958 | proc_lock(p); |
2959 | while ((p->p_lflag & P_LINTRANSIT) == P_LINTRANSIT) { |
2960 | if ((p->p_lflag & P_LTRANSCOMMIT) == P_LTRANSCOMMIT && current_proc() == p) { |
2961 | if (locked == 0) |
2962 | proc_unlock(p); |
2963 | return EDEADLK; |
2964 | } |
2965 | p->p_lflag |= P_LTRANSWAIT; |
2966 | msleep(&p->p_lflag, &p->p_mlock, 0, "proc_signstart" , NULL); |
2967 | } |
2968 | if (locked == 0) |
2969 | proc_unlock(p); |
2970 | return 0; |
2971 | } |
2972 | |
2973 | void |
2974 | proc_klist_lock(void) |
2975 | { |
2976 | lck_mtx_lock(proc_klist_mlock); |
2977 | } |
2978 | |
2979 | void |
2980 | proc_klist_unlock(void) |
2981 | { |
2982 | lck_mtx_unlock(proc_klist_mlock); |
2983 | } |
2984 | |
2985 | void |
2986 | proc_knote(struct proc * p, long hint) |
2987 | { |
2988 | proc_klist_lock(); |
2989 | KNOTE(&p->p_klist, hint); |
2990 | proc_klist_unlock(); |
2991 | } |
2992 | |
2993 | void |
2994 | proc_knote_drain(struct proc *p) |
2995 | { |
2996 | struct knote *kn = NULL; |
2997 | |
2998 | /* |
2999 | * Clear the proc's klist to avoid references after the proc is reaped. |
3000 | */ |
3001 | proc_klist_lock(); |
3002 | while ((kn = SLIST_FIRST(&p->p_klist))) { |
3003 | kn->kn_ptr.p_proc = PROC_NULL; |
3004 | KNOTE_DETACH(&p->p_klist, kn); |
3005 | } |
3006 | proc_klist_unlock(); |
3007 | } |
3008 | |
3009 | void |
3010 | proc_setregister(proc_t p) |
3011 | { |
3012 | proc_lock(p); |
3013 | p->p_lflag |= P_LREGISTER; |
3014 | proc_unlock(p); |
3015 | } |
3016 | |
3017 | void |
3018 | proc_resetregister(proc_t p) |
3019 | { |
3020 | proc_lock(p); |
3021 | p->p_lflag &= ~P_LREGISTER; |
3022 | proc_unlock(p); |
3023 | } |
3024 | |
3025 | pid_t |
3026 | proc_pgrpid(proc_t p) |
3027 | { |
3028 | return p->p_pgrpid; |
3029 | } |
3030 | |
3031 | pid_t |
3032 | proc_selfpgrpid() |
3033 | { |
3034 | return current_proc()->p_pgrpid; |
3035 | } |
3036 | |
3037 | |
3038 | /* return control and action states */ |
3039 | int |
3040 | proc_getpcontrol(int pid, int * pcontrolp) |
3041 | { |
3042 | proc_t p; |
3043 | |
3044 | p = proc_find(pid); |
3045 | if (p == PROC_NULL) |
3046 | return(ESRCH); |
3047 | if (pcontrolp != NULL) |
3048 | *pcontrolp = p->p_pcaction; |
3049 | |
3050 | proc_rele(p); |
3051 | return(0); |
3052 | } |
3053 | |
3054 | int |
3055 | proc_dopcontrol(proc_t p) |
3056 | { |
3057 | int pcontrol; |
3058 | |
3059 | proc_lock(p); |
3060 | |
3061 | pcontrol = PROC_CONTROL_STATE(p); |
3062 | |
3063 | if (PROC_ACTION_STATE(p) == 0) { |
3064 | switch(pcontrol) { |
3065 | case P_PCTHROTTLE: |
3066 | PROC_SETACTION_STATE(p); |
3067 | proc_unlock(p); |
3068 | printf("low swap: throttling pid %d (%s)\n" , p->p_pid, p->p_comm); |
3069 | break; |
3070 | |
3071 | case P_PCSUSP: |
3072 | PROC_SETACTION_STATE(p); |
3073 | proc_unlock(p); |
3074 | printf("low swap: suspending pid %d (%s)\n" , p->p_pid, p->p_comm); |
3075 | task_suspend(p->task); |
3076 | break; |
3077 | |
3078 | case P_PCKILL: |
3079 | PROC_SETACTION_STATE(p); |
3080 | proc_unlock(p); |
3081 | printf("low swap: killing pid %d (%s)\n" , p->p_pid, p->p_comm); |
3082 | psignal(p, SIGKILL); |
3083 | break; |
3084 | |
3085 | default: |
3086 | proc_unlock(p); |
3087 | } |
3088 | |
3089 | } else |
3090 | proc_unlock(p); |
3091 | |
3092 | return(PROC_RETURNED); |
3093 | } |
3094 | |
3095 | |
3096 | /* |
3097 | * Resume a throttled or suspended process. This is an internal interface that's only |
3098 | * used by the user level code that presents the GUI when we run out of swap space and |
3099 | * hence is restricted to processes with superuser privileges. |
3100 | */ |
3101 | |
3102 | int |
3103 | proc_resetpcontrol(int pid) |
3104 | { |
3105 | proc_t p; |
3106 | int pcontrol; |
3107 | int error; |
3108 | proc_t self = current_proc(); |
3109 | |
3110 | /* if the process has been validated to handle resource control or root is valid one */ |
3111 | if (((self->p_lflag & P_LVMRSRCOWNER) == 0) && (error = suser(kauth_cred_get(), 0))) |
3112 | return error; |
3113 | |
3114 | p = proc_find(pid); |
3115 | if (p == PROC_NULL) |
3116 | return(ESRCH); |
3117 | |
3118 | proc_lock(p); |
3119 | |
3120 | pcontrol = PROC_CONTROL_STATE(p); |
3121 | |
3122 | if(PROC_ACTION_STATE(p) !=0) { |
3123 | switch(pcontrol) { |
3124 | case P_PCTHROTTLE: |
3125 | PROC_RESETACTION_STATE(p); |
3126 | proc_unlock(p); |
3127 | printf("low swap: unthrottling pid %d (%s)\n" , p->p_pid, p->p_comm); |
3128 | break; |
3129 | |
3130 | case P_PCSUSP: |
3131 | PROC_RESETACTION_STATE(p); |
3132 | proc_unlock(p); |
3133 | printf("low swap: resuming pid %d (%s)\n" , p->p_pid, p->p_comm); |
3134 | task_resume(p->task); |
3135 | break; |
3136 | |
3137 | case P_PCKILL: |
3138 | /* Huh? */ |
3139 | PROC_SETACTION_STATE(p); |
3140 | proc_unlock(p); |
3141 | printf("low swap: attempt to unkill pid %d (%s) ignored\n" , p->p_pid, p->p_comm); |
3142 | break; |
3143 | |
3144 | default: |
3145 | proc_unlock(p); |
3146 | } |
3147 | |
3148 | } else |
3149 | proc_unlock(p); |
3150 | |
3151 | proc_rele(p); |
3152 | return(0); |
3153 | } |
3154 | |
3155 | |
3156 | |
3157 | struct no_paging_space |
3158 | { |
3159 | uint64_t pcs_max_size; |
3160 | uint64_t pcs_uniqueid; |
3161 | int pcs_pid; |
3162 | int pcs_proc_count; |
3163 | uint64_t pcs_total_size; |
3164 | |
3165 | uint64_t npcs_max_size; |
3166 | uint64_t npcs_uniqueid; |
3167 | int npcs_pid; |
3168 | int npcs_proc_count; |
3169 | uint64_t npcs_total_size; |
3170 | |
3171 | int apcs_proc_count; |
3172 | uint64_t apcs_total_size; |
3173 | }; |
3174 | |
3175 | |
3176 | static int |
3177 | proc_pcontrol_filter(proc_t p, void *arg) |
3178 | { |
3179 | struct no_paging_space *nps; |
3180 | uint64_t compressed; |
3181 | |
3182 | nps = (struct no_paging_space *)arg; |
3183 | |
3184 | compressed = get_task_compressed(p->task); |
3185 | |
3186 | if (PROC_CONTROL_STATE(p)) { |
3187 | if (PROC_ACTION_STATE(p) == 0) { |
3188 | if (compressed > nps->pcs_max_size) { |
3189 | nps->pcs_pid = p->p_pid; |
3190 | nps->pcs_uniqueid = p->p_uniqueid; |
3191 | nps->pcs_max_size = compressed; |
3192 | } |
3193 | nps->pcs_total_size += compressed; |
3194 | nps->pcs_proc_count++; |
3195 | } else { |
3196 | nps->apcs_total_size += compressed; |
3197 | nps->apcs_proc_count++; |
3198 | } |
3199 | } else { |
3200 | if (compressed > nps->npcs_max_size) { |
3201 | nps->npcs_pid = p->p_pid; |
3202 | nps->npcs_uniqueid = p->p_uniqueid; |
3203 | nps->npcs_max_size = compressed; |
3204 | } |
3205 | nps->npcs_total_size += compressed; |
3206 | nps->npcs_proc_count++; |
3207 | |
3208 | } |
3209 | return (0); |
3210 | } |
3211 | |
3212 | |
3213 | static int |
3214 | proc_pcontrol_null(__unused proc_t p, __unused void *arg) |
3215 | { |
3216 | return(PROC_RETURNED); |
3217 | } |
3218 | |
3219 | |
3220 | /* |
3221 | * Deal with the low on compressor pool space condition... this function |
3222 | * gets called when we are approaching the limits of the compressor pool or |
3223 | * we are unable to create a new swap file. |
3224 | * Since this eventually creates a memory deadlock situtation, we need to take action to free up |
3225 | * memory resources (both compressed and uncompressed) in order to prevent the system from hanging completely. |
3226 | * There are 2 categories of processes to deal with. Those that have an action |
3227 | * associated with them by the task itself and those that do not. Actionable |
3228 | * tasks can have one of three categories specified: ones that |
3229 | * can be killed immediately, ones that should be suspended, and ones that should |
3230 | * be throttled. Processes that do not have an action associated with them are normally |
3231 | * ignored unless they are utilizing such a large percentage of the compressor pool (currently 50%) |
3232 | * that only by killing them can we hope to put the system back into a usable state. |
3233 | */ |
3234 | |
3235 | #define NO_PAGING_SPACE_DEBUG 0 |
3236 | |
3237 | extern uint64_t vm_compressor_pages_compressed(void); |
3238 | |
3239 | struct timeval last_no_space_action = {0, 0}; |
3240 | |
3241 | #if DEVELOPMENT || DEBUG |
3242 | extern boolean_t kill_on_no_paging_space; |
3243 | #endif /* DEVELOPMENT || DEBUG */ |
3244 | |
3245 | #define MB_SIZE (1024 * 1024ULL) |
3246 | boolean_t memorystatus_kill_on_VM_compressor_space_shortage(boolean_t); |
3247 | |
3248 | extern int32_t max_kill_priority; |
3249 | extern int memorystatus_get_proccnt_upto_priority(int32_t max_bucket_index); |
3250 | |
3251 | int |
3252 | no_paging_space_action() |
3253 | { |
3254 | proc_t p; |
3255 | struct no_paging_space nps; |
3256 | struct timeval now; |
3257 | |
3258 | /* |
3259 | * Throttle how often we come through here. Once every 5 seconds should be plenty. |
3260 | */ |
3261 | microtime(&now); |
3262 | |
3263 | if (now.tv_sec <= last_no_space_action.tv_sec + 5) |
3264 | return (0); |
3265 | |
3266 | /* |
3267 | * Examine all processes and find the biggest (biggest is based on the number of pages this |
3268 | * task has in the compressor pool) that has been marked to have some action |
3269 | * taken when swap space runs out... we also find the biggest that hasn't been marked for |
3270 | * action. |
3271 | * |
3272 | * If the biggest non-actionable task is over the "dangerously big" threashold (currently 50% of |
3273 | * the total number of pages held by the compressor, we go ahead and kill it since no other task |
3274 | * can have any real effect on the situation. Otherwise, we go after the actionable process. |
3275 | */ |
3276 | bzero(&nps, sizeof(nps)); |
3277 | |
3278 | proc_iterate(PROC_ALLPROCLIST, proc_pcontrol_null, (void *)NULL, proc_pcontrol_filter, (void *)&nps); |
3279 | |
3280 | #if NO_PAGING_SPACE_DEBUG |
3281 | printf("low swap: npcs_proc_count = %d, npcs_total_size = %qd, npcs_max_size = %qd\n" , |
3282 | nps.npcs_proc_count, nps.npcs_total_size, nps.npcs_max_size); |
3283 | printf("low swap: pcs_proc_count = %d, pcs_total_size = %qd, pcs_max_size = %qd\n" , |
3284 | nps.pcs_proc_count, nps.pcs_total_size, nps.pcs_max_size); |
3285 | printf("low swap: apcs_proc_count = %d, apcs_total_size = %qd\n" , |
3286 | nps.apcs_proc_count, nps.apcs_total_size); |
3287 | #endif |
3288 | if (nps.npcs_max_size > (vm_compressor_pages_compressed() * 50) / 100) { |
3289 | /* |
3290 | * for now we'll knock out any task that has more then 50% of the pages |
3291 | * held by the compressor |
3292 | */ |
3293 | if ((p = proc_find(nps.npcs_pid)) != PROC_NULL) { |
3294 | |
3295 | if (nps.npcs_uniqueid == p->p_uniqueid) { |
3296 | /* |
3297 | * verify this is still the same process |
3298 | * in case the proc exited and the pid got reused while |
3299 | * we were finishing the proc_iterate and getting to this point |
3300 | */ |
3301 | last_no_space_action = now; |
3302 | |
3303 | printf("low swap: killing largest compressed process with pid %d (%s) and size %llu MB\n" , p->p_pid, p->p_comm, (nps.pcs_max_size/MB_SIZE)); |
3304 | psignal(p, SIGKILL); |
3305 | |
3306 | proc_rele(p); |
3307 | |
3308 | return (0); |
3309 | } |
3310 | |
3311 | proc_rele(p); |
3312 | } |
3313 | } |
3314 | |
3315 | /* |
3316 | * We have some processes within our jetsam bands of consideration and hence can be killed. |
3317 | * So we will invoke the memorystatus thread to go ahead and kill something. |
3318 | */ |
3319 | if (memorystatus_get_proccnt_upto_priority(max_kill_priority) > 0) { |
3320 | |
3321 | last_no_space_action = now; |
3322 | memorystatus_kill_on_VM_compressor_space_shortage(TRUE /* async */); |
3323 | return (1); |
3324 | } |
3325 | |
3326 | /* |
3327 | * No eligible processes to kill. So let's suspend/kill the largest |
3328 | * process depending on its policy control specifications. |
3329 | */ |
3330 | |
3331 | if (nps.pcs_max_size > 0) { |
3332 | if ((p = proc_find(nps.pcs_pid)) != PROC_NULL) { |
3333 | |
3334 | if (nps.pcs_uniqueid == p->p_uniqueid) { |
3335 | /* |
3336 | * verify this is still the same process |
3337 | * in case the proc exited and the pid got reused while |
3338 | * we were finishing the proc_iterate and getting to this point |
3339 | */ |
3340 | last_no_space_action = now; |
3341 | |
3342 | proc_dopcontrol(p); |
3343 | |
3344 | proc_rele(p); |
3345 | |
3346 | return (1); |
3347 | } |
3348 | |
3349 | proc_rele(p); |
3350 | } |
3351 | } |
3352 | last_no_space_action = now; |
3353 | |
3354 | printf("low swap: unable to find any eligible processes to take action on\n" ); |
3355 | |
3356 | return (0); |
3357 | } |
3358 | |
3359 | int |
3360 | proc_trace_log(__unused proc_t p, struct proc_trace_log_args *uap, __unused int *retval) |
3361 | { |
3362 | int ret = 0; |
3363 | proc_t target_proc = PROC_NULL; |
3364 | pid_t target_pid = uap->pid; |
3365 | uint64_t target_uniqueid = uap->uniqueid; |
3366 | task_t target_task = NULL; |
3367 | |
3368 | if (priv_check_cred(kauth_cred_get(), PRIV_PROC_TRACE_INSPECT, 0)) { |
3369 | ret = EPERM; |
3370 | goto out; |
3371 | } |
3372 | target_proc = proc_find(target_pid); |
3373 | if (target_proc != PROC_NULL) { |
3374 | if (target_uniqueid != proc_uniqueid(target_proc)) { |
3375 | ret = ENOENT; |
3376 | goto out; |
3377 | } |
3378 | |
3379 | target_task = proc_task(target_proc); |
3380 | if (task_send_trace_memory(target_task, target_pid, target_uniqueid)) { |
3381 | ret = EINVAL; |
3382 | goto out; |
3383 | } |
3384 | } else |
3385 | ret = ENOENT; |
3386 | |
3387 | out: |
3388 | if (target_proc != PROC_NULL) |
3389 | proc_rele(target_proc); |
3390 | return (ret); |
3391 | } |
3392 | |
3393 | #if VM_SCAN_FOR_SHADOW_CHAIN |
3394 | extern int vm_map_shadow_max(vm_map_t map); |
3395 | int proc_shadow_max(void); |
3396 | int proc_shadow_max(void) |
3397 | { |
3398 | int retval, max; |
3399 | proc_t p; |
3400 | task_t task; |
3401 | vm_map_t map; |
3402 | |
3403 | max = 0; |
3404 | proc_list_lock(); |
3405 | for (p = allproc.lh_first; (p != 0); p = p->p_list.le_next) { |
3406 | if (p->p_stat == SIDL) |
3407 | continue; |
3408 | task = p->task; |
3409 | if (task == NULL) { |
3410 | continue; |
3411 | } |
3412 | map = get_task_map(task); |
3413 | if (map == NULL) { |
3414 | continue; |
3415 | } |
3416 | retval = vm_map_shadow_max(map); |
3417 | if (retval > max) { |
3418 | max = retval; |
3419 | } |
3420 | } |
3421 | proc_list_unlock(); |
3422 | return max; |
3423 | } |
3424 | #endif /* VM_SCAN_FOR_SHADOW_CHAIN */ |
3425 | |
3426 | void proc_set_responsible_pid(proc_t target_proc, pid_t responsible_pid); |
3427 | void proc_set_responsible_pid(proc_t target_proc, pid_t responsible_pid) |
3428 | { |
3429 | if (target_proc != NULL) { |
3430 | target_proc->p_responsible_pid = responsible_pid; |
3431 | } |
3432 | return; |
3433 | } |
3434 | |
3435 | int |
3436 | proc_chrooted(proc_t p) |
3437 | { |
3438 | int retval = 0; |
3439 | |
3440 | if (p) { |
3441 | proc_fdlock(p); |
3442 | retval = (p->p_fd->fd_rdir != NULL) ? 1 : 0; |
3443 | proc_fdunlock(p); |
3444 | } |
3445 | |
3446 | return retval; |
3447 | } |
3448 | |
3449 | boolean_t |
3450 | proc_send_synchronous_EXC_RESOURCE(proc_t p) |
3451 | { |
3452 | if (p == PROC_NULL) |
3453 | return FALSE; |
3454 | |
3455 | /* Send sync EXC_RESOURCE if the process is traced */ |
3456 | if (ISSET(p->p_lflag, P_LTRACED)) { |
3457 | return TRUE; |
3458 | } |
3459 | return FALSE; |
3460 | } |
3461 | |
3462 | #ifdef CONFIG_32BIT_TELEMETRY |
3463 | void |
3464 | proc_log_32bit_telemetry(proc_t p) |
3465 | { |
3466 | /* Gather info */ |
3467 | char signature_buf[MAX_32BIT_EXEC_SIG_SIZE] = { 0 }; |
3468 | char * signature_cur_end = &signature_buf[0]; |
3469 | char * signature_buf_end = &signature_buf[MAX_32BIT_EXEC_SIG_SIZE - 1]; |
3470 | int bytes_printed = 0; |
3471 | |
3472 | const char * teamid = NULL; |
3473 | const char * identity = NULL; |
3474 | struct cs_blob * csblob = NULL; |
3475 | |
3476 | proc_list_lock(); |
3477 | |
3478 | /* |
3479 | * Get proc name and parent proc name; if the parent execs, we'll get a |
3480 | * garbled name. |
3481 | */ |
3482 | bytes_printed = snprintf(signature_cur_end, |
3483 | signature_buf_end - signature_cur_end, |
3484 | "%s,%s," , p->p_name, |
3485 | (p->p_pptr ? p->p_pptr->p_name : "" )); |
3486 | |
3487 | if (bytes_printed > 0) { |
3488 | signature_cur_end += bytes_printed; |
3489 | } |
3490 | |
3491 | proc_list_unlock(); |
3492 | |
3493 | /* Get developer info. */ |
3494 | vnode_t v = proc_getexecutablevnode(p); |
3495 | |
3496 | if (v) { |
3497 | csblob = csvnode_get_blob(v, 0); |
3498 | |
3499 | if (csblob) { |
3500 | teamid = csblob_get_teamid(csblob); |
3501 | identity = csblob_get_identity(csblob); |
3502 | } |
3503 | } |
3504 | |
3505 | if (teamid == NULL) { |
3506 | teamid = "" ; |
3507 | } |
3508 | |
3509 | if (identity == NULL) { |
3510 | identity = "" ; |
3511 | } |
3512 | |
3513 | bytes_printed = snprintf(signature_cur_end, |
3514 | signature_buf_end - signature_cur_end, |
3515 | "%s,%s" , teamid, identity); |
3516 | |
3517 | if (bytes_printed > 0) { |
3518 | signature_cur_end += bytes_printed; |
3519 | } |
3520 | |
3521 | if (v) { |
3522 | vnode_put(v); |
3523 | } |
3524 | |
3525 | /* |
3526 | * We may want to rate limit here, although the SUMMARIZE key should |
3527 | * help us aggregate events in userspace. |
3528 | */ |
3529 | |
3530 | /* Emit log */ |
3531 | kern_asl_msg(LOG_DEBUG, "messagetracer" , 3, |
3532 | /* 0 */ "com.apple.message.domain" , "com.apple.kernel.32bit_exec" , |
3533 | /* 1 */ "com.apple.message.signature" , signature_buf, |
3534 | /* 2 */ "com.apple.message.summarize" , "YES" , |
3535 | NULL); |
3536 | } |
3537 | #endif /* CONFIG_32BIT_TELEMETRY */ |
3538 | |