1 | /* |
2 | * Copyright (c) 2000-2020 Apple Inc. All rights reserved. |
3 | * |
4 | * @APPLE_OSREFERENCE_LICENSE_HEADER_START@ |
5 | * |
6 | * This file contains Original Code and/or Modifications of Original Code |
7 | * as defined in and that are subject to the Apple Public Source License |
8 | * Version 2.0 (the 'License'). You may not use this file except in |
9 | * compliance with the License. The rights granted to you under the License |
10 | * may not be used to create, or enable the creation or redistribution of, |
11 | * unlawful or unlicensed copies of an Apple operating system, or to |
12 | * circumvent, violate, or enable the circumvention or violation of, any |
13 | * terms of an Apple operating system software license agreement. |
14 | * |
15 | * Please obtain a copy of the License at |
16 | * http://www.opensource.apple.com/apsl/ and read it before using this file. |
17 | * |
18 | * The Original Code and all software distributed under the License are |
19 | * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER |
20 | * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES, |
21 | * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY, |
22 | * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT. |
23 | * Please see the License for the specific language governing rights and |
24 | * limitations under the License. |
25 | * |
26 | * @APPLE_OSREFERENCE_LICENSE_HEADER_END@ |
27 | */ |
28 | /* Copyright (c) 1995, 1997 Apple Computer, Inc. All Rights Reserved */ |
29 | /*- |
30 | * Copyright (c) 1982, 1986, 1991, 1993 |
31 | * The Regents of the University of California. All rights reserved. |
32 | * (c) UNIX System Laboratories, Inc. |
33 | * All or some portions of this file are derived from material licensed |
34 | * to the University of California by American Telephone and Telegraph |
35 | * Co. or Unix System Laboratories, Inc. and are reproduced herein with |
36 | * the permission of UNIX System Laboratories, Inc. |
37 | * |
38 | * Redistribution and use in source and binary forms, with or without |
39 | * modification, are permitted provided that the following conditions |
40 | * are met: |
41 | * 1. Redistributions of source code must retain the above copyright |
42 | * notice, this list of conditions and the following disclaimer. |
43 | * 2. Redistributions in binary form must reproduce the above copyright |
44 | * notice, this list of conditions and the following disclaimer in the |
45 | * documentation and/or other materials provided with the distribution. |
46 | * 3. All advertising materials mentioning features or use of this software |
47 | * must display the following acknowledgement: |
48 | * This product includes software developed by the University of |
49 | * California, Berkeley and its contributors. |
50 | * 4. Neither the name of the University nor the names of its contributors |
51 | * may be used to endorse or promote products derived from this software |
52 | * without specific prior written permission. |
53 | * |
54 | * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND |
55 | * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE |
56 | * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE |
57 | * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE |
58 | * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL |
59 | * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS |
60 | * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) |
61 | * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT |
62 | * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY |
63 | * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF |
64 | * SUCH DAMAGE. |
65 | * |
66 | * @(#)kern_resource.c 8.5 (Berkeley) 1/21/94 |
67 | */ |
68 | /* |
69 | * NOTICE: This file was modified by SPARTA, Inc. in 2005 to introduce |
70 | * support for mandatory and extensible security protections. This notice |
71 | * is included in support of clause 2.2 (b) of the Apple Public License, |
72 | * Version 2.0. |
73 | */ |
74 | |
75 | #include <sys/param.h> |
76 | #include <sys/systm.h> |
77 | #include <sys/sysctl.h> |
78 | #include <sys/kernel.h> |
79 | #include <sys/file_internal.h> |
80 | #include <sys/resourcevar.h> |
81 | #include <sys/malloc.h> |
82 | #include <sys/proc_internal.h> |
83 | #include <sys/kauth.h> |
84 | #include <sys/mount_internal.h> |
85 | #include <sys/sysproto.h> |
86 | |
87 | #include <security/audit/audit.h> |
88 | |
89 | #include <machine/vmparam.h> |
90 | |
91 | #include <mach/mach_types.h> |
92 | #include <mach/time_value.h> |
93 | #include <mach/task.h> |
94 | #include <mach/task_info.h> |
95 | #include <mach/vm_map.h> |
96 | #include <mach/mach_vm.h> |
97 | #include <mach/thread_act.h> /* for thread_policy_set( ) */ |
98 | #include <kern/thread.h> |
99 | #include <kern/policy_internal.h> |
100 | |
101 | #include <kern/task.h> |
102 | #include <kern/clock.h> /* for absolutetime_to_microtime() */ |
103 | #include <netinet/in.h> /* for TRAFFIC_MGT_SO_* */ |
104 | #if CONFIG_FREEZE |
105 | #include <sys/kern_memorystatus_freeze.h> /* for memorystatus_freeze_mark_ui_transition */ |
106 | #endif /* CONFIG_FREEZE */ |
107 | #include <sys/socketvar.h> /* for struct socket */ |
108 | #if NECP |
109 | #include <net/necp.h> |
110 | #endif /* NECP */ |
111 | |
112 | #include <vm/vm_map.h> |
113 | |
114 | #include <kern/assert.h> |
115 | #include <sys/resource.h> |
116 | #include <sys/resource_private.h> |
117 | #include <sys/priv.h> |
118 | #include <IOKit/IOBSD.h> |
119 | |
120 | #if CONFIG_MACF |
121 | #include <security/mac_framework.h> |
122 | #endif |
123 | |
124 | static void proc_limitblock(proc_t p); |
125 | static void proc_limitunblock(proc_t p); |
126 | static void proc_limitupdate(proc_t p, bool unblock, |
127 | void (^update)(struct plimit *plim)); |
128 | |
129 | static int donice(struct proc *curp, struct proc *chgp, int n); |
130 | static int dosetrlimit(struct proc *p, u_int which, struct rlimit *limp); |
131 | static void do_background_socket(struct proc *p, thread_t thread); |
132 | static int do_background_thread(thread_t thread, int priority); |
133 | static int do_background_proc(struct proc *curp, struct proc *targetp, int priority); |
134 | static int set_gpudeny_proc(struct proc *curp, struct proc *targetp, int priority); |
135 | static int proc_set_darwin_role(proc_t curp, proc_t targetp, int priority); |
136 | static int proc_get_darwin_role(proc_t curp, proc_t targetp, int *priority); |
137 | static int proc_set_game_mode(proc_t targetp, int priority); |
138 | static int proc_get_game_mode(proc_t targetp, int *priority); |
139 | static int get_background_proc(struct proc *curp, struct proc *targetp, int *priority); |
140 | |
141 | int fill_task_rusage(task_t task, rusage_info_current *ri); |
142 | void fill_task_billed_usage(task_t task, rusage_info_current *ri); |
143 | int fill_task_io_rusage(task_t task, rusage_info_current *ri); |
144 | int fill_task_qos_rusage(task_t task, rusage_info_current *ri); |
145 | uint64_t get_task_logical_writes(task_t task, bool external); |
146 | |
147 | rlim_t maxdmap = MAXDSIZ; /* XXX */ |
148 | rlim_t maxsmap = MAXSSIZ - PAGE_MAX_SIZE; /* XXX */ |
149 | |
150 | /* For plimit reference count */ |
151 | os_refgrp_decl(, rlimit_refgrp, "plimit_refcnt" , NULL); |
152 | |
153 | static KALLOC_TYPE_DEFINE(plimit_zone, struct plimit, KT_DEFAULT); |
154 | |
155 | /* |
156 | * Limits on the number of open files per process, and the number |
157 | * of child processes per process. |
158 | * |
159 | * Note: would be in kern/subr_param.c in FreeBSD. |
160 | */ |
161 | __private_extern__ int maxfilesperproc = OPEN_MAX; /* per-proc open files limit */ |
162 | |
163 | SYSCTL_INT(_kern, KERN_MAXPROCPERUID, maxprocperuid, CTLFLAG_RW | CTLFLAG_LOCKED, |
164 | &maxprocperuid, 0, "Maximum processes allowed per userid" ); |
165 | |
166 | SYSCTL_INT(_kern, KERN_MAXFILESPERPROC, maxfilesperproc, CTLFLAG_RW | CTLFLAG_LOCKED, |
167 | &maxfilesperproc, 0, "Maximum files allowed open per process" ); |
168 | |
169 | /* Args and fn for proc_iteration callback used in setpriority */ |
170 | struct puser_nice_args { |
171 | proc_t curp; |
172 | int prio; |
173 | id_t who; |
174 | int * foundp; |
175 | int * errorp; |
176 | }; |
177 | static int puser_donice_callback(proc_t p, void * arg); |
178 | |
179 | |
180 | /* Args and fn for proc_iteration callback used in setpriority */ |
181 | struct ppgrp_nice_args { |
182 | proc_t curp; |
183 | int prio; |
184 | int * foundp; |
185 | int * errorp; |
186 | }; |
187 | static int ppgrp_donice_callback(proc_t p, void * arg); |
188 | |
189 | /* |
190 | * Resource controls and accounting. |
191 | */ |
192 | int |
193 | getpriority(struct proc *curp, struct getpriority_args *uap, int32_t *retval) |
194 | { |
195 | struct proc *p; |
196 | int low = PRIO_MAX + 1; |
197 | kauth_cred_t my_cred; |
198 | int refheld = 0; |
199 | int error = 0; |
200 | |
201 | /* would also test (uap->who < 0), but id_t is unsigned */ |
202 | if (uap->who > 0x7fffffff) { |
203 | return EINVAL; |
204 | } |
205 | |
206 | switch (uap->which) { |
207 | case PRIO_PROCESS: |
208 | if (uap->who == 0) { |
209 | p = curp; |
210 | low = p->p_nice; |
211 | } else { |
212 | p = proc_find(pid: uap->who); |
213 | if (p == 0) { |
214 | break; |
215 | } |
216 | low = p->p_nice; |
217 | proc_rele(p); |
218 | } |
219 | break; |
220 | |
221 | case PRIO_PGRP: { |
222 | struct pgrp *pg = PGRP_NULL; |
223 | |
224 | if (uap->who == 0) { |
225 | /* returns the pgrp to ref */ |
226 | pg = proc_pgrp(curp, NULL); |
227 | } else if ((pg = pgrp_find(uap->who)) == PGRP_NULL) { |
228 | break; |
229 | } |
230 | /* No need for iteration as it is a simple scan */ |
231 | pgrp_lock(pgrp: pg); |
232 | PGMEMBERS_FOREACH(pg, p) { |
233 | if (p->p_nice < low) { |
234 | low = p->p_nice; |
235 | } |
236 | } |
237 | pgrp_unlock(pgrp: pg); |
238 | pgrp_rele(pgrp: pg); |
239 | break; |
240 | } |
241 | |
242 | case PRIO_USER: |
243 | if (uap->who == 0) { |
244 | uap->who = kauth_cred_getuid(cred: kauth_cred_get()); |
245 | } |
246 | |
247 | proc_list_lock(); |
248 | |
249 | for (p = allproc.lh_first; p != 0; p = p->p_list.le_next) { |
250 | my_cred = kauth_cred_proc_ref(procp: p); |
251 | if (kauth_cred_getuid(cred: my_cred) == uap->who && |
252 | p->p_nice < low) { |
253 | low = p->p_nice; |
254 | } |
255 | kauth_cred_unref(&my_cred); |
256 | } |
257 | |
258 | proc_list_unlock(); |
259 | |
260 | break; |
261 | |
262 | case PRIO_DARWIN_THREAD: |
263 | /* we currently only support the current thread */ |
264 | if (uap->who != 0) { |
265 | return EINVAL; |
266 | } |
267 | |
268 | low = proc_get_thread_policy(thread: current_thread(), TASK_POLICY_INTERNAL, TASK_POLICY_DARWIN_BG); |
269 | |
270 | break; |
271 | |
272 | case PRIO_DARWIN_PROCESS: |
273 | if (uap->who == 0) { |
274 | p = curp; |
275 | } else { |
276 | p = proc_find(pid: uap->who); |
277 | if (p == PROC_NULL) { |
278 | break; |
279 | } |
280 | refheld = 1; |
281 | } |
282 | |
283 | error = get_background_proc(curp, targetp: p, priority: &low); |
284 | |
285 | if (refheld) { |
286 | proc_rele(p); |
287 | } |
288 | if (error) { |
289 | return error; |
290 | } |
291 | break; |
292 | |
293 | case PRIO_DARWIN_ROLE: |
294 | if (uap->who == 0) { |
295 | p = curp; |
296 | } else { |
297 | p = proc_find(pid: uap->who); |
298 | if (p == PROC_NULL) { |
299 | break; |
300 | } |
301 | refheld = 1; |
302 | } |
303 | |
304 | error = proc_get_darwin_role(curp, targetp: p, priority: &low); |
305 | |
306 | if (refheld) { |
307 | proc_rele(p); |
308 | } |
309 | if (error) { |
310 | return error; |
311 | } |
312 | break; |
313 | |
314 | case PRIO_DARWIN_GAME_MODE: |
315 | if (uap->who == 0) { |
316 | p = curp; |
317 | } else { |
318 | p = proc_find(pid: uap->who); |
319 | if (p == PROC_NULL) { |
320 | break; |
321 | } |
322 | refheld = 1; |
323 | } |
324 | |
325 | |
326 | error = proc_get_game_mode(targetp: p, priority: &low); |
327 | |
328 | if (refheld) { |
329 | proc_rele(p); |
330 | } |
331 | if (error) { |
332 | return error; |
333 | } |
334 | break; |
335 | |
336 | default: |
337 | return EINVAL; |
338 | } |
339 | if (low == PRIO_MAX + 1) { |
340 | return ESRCH; |
341 | } |
342 | *retval = low; |
343 | return 0; |
344 | } |
345 | |
346 | /* call back function used for proc iteration in PRIO_USER */ |
347 | static int |
348 | puser_donice_callback(proc_t p, void * arg) |
349 | { |
350 | int error, n; |
351 | struct puser_nice_args * pun = (struct puser_nice_args *)arg; |
352 | kauth_cred_t my_cred; |
353 | |
354 | my_cred = kauth_cred_proc_ref(procp: p); |
355 | if (kauth_cred_getuid(cred: my_cred) == pun->who) { |
356 | error = donice(curp: pun->curp, chgp: p, n: pun->prio); |
357 | if (pun->errorp != NULL) { |
358 | *pun->errorp = error; |
359 | } |
360 | if (pun->foundp != NULL) { |
361 | n = *pun->foundp; |
362 | *pun->foundp = n + 1; |
363 | } |
364 | } |
365 | kauth_cred_unref(&my_cred); |
366 | |
367 | return PROC_RETURNED; |
368 | } |
369 | |
370 | /* call back function used for proc iteration in PRIO_PGRP */ |
371 | static int |
372 | ppgrp_donice_callback(proc_t p, void * arg) |
373 | { |
374 | int error; |
375 | struct ppgrp_nice_args * pun = (struct ppgrp_nice_args *)arg; |
376 | int n; |
377 | |
378 | error = donice(curp: pun->curp, chgp: p, n: pun->prio); |
379 | if (pun->errorp != NULL) { |
380 | *pun->errorp = error; |
381 | } |
382 | if (pun->foundp != NULL) { |
383 | n = *pun->foundp; |
384 | *pun->foundp = n + 1; |
385 | } |
386 | |
387 | return PROC_RETURNED; |
388 | } |
389 | |
390 | /* |
391 | * Returns: 0 Success |
392 | * EINVAL |
393 | * ESRCH |
394 | * donice:EPERM |
395 | * donice:EACCES |
396 | */ |
397 | /* ARGSUSED */ |
398 | int |
399 | setpriority(struct proc *curp, struct setpriority_args *uap, int32_t *retval) |
400 | { |
401 | struct proc *p; |
402 | int found = 0, error = 0; |
403 | int refheld = 0; |
404 | |
405 | AUDIT_ARG(cmd, uap->which); |
406 | AUDIT_ARG(owner, uap->who, 0); |
407 | AUDIT_ARG(value32, uap->prio); |
408 | |
409 | /* would also test (uap->who < 0), but id_t is unsigned */ |
410 | if (uap->who > 0x7fffffff) { |
411 | return EINVAL; |
412 | } |
413 | |
414 | switch (uap->which) { |
415 | case PRIO_PROCESS: |
416 | if (uap->who == 0) { |
417 | p = curp; |
418 | } else { |
419 | p = proc_find(pid: uap->who); |
420 | if (p == 0) { |
421 | break; |
422 | } |
423 | refheld = 1; |
424 | } |
425 | error = donice(curp, chgp: p, n: uap->prio); |
426 | found++; |
427 | if (refheld != 0) { |
428 | proc_rele(p); |
429 | } |
430 | break; |
431 | |
432 | case PRIO_PGRP: { |
433 | struct pgrp *pg = PGRP_NULL; |
434 | struct ppgrp_nice_args ppgrp; |
435 | |
436 | if (uap->who == 0) { |
437 | pg = proc_pgrp(curp, NULL); |
438 | } else if ((pg = pgrp_find(uap->who)) == PGRP_NULL) { |
439 | break; |
440 | } |
441 | |
442 | ppgrp.curp = curp; |
443 | ppgrp.prio = uap->prio; |
444 | ppgrp.foundp = &found; |
445 | ppgrp.errorp = &error; |
446 | |
447 | pgrp_iterate(pgrp: pg, callout: ppgrp_donice_callback, arg: (void *)&ppgrp, NULL); |
448 | pgrp_rele(pgrp: pg); |
449 | |
450 | break; |
451 | } |
452 | |
453 | case PRIO_USER: { |
454 | struct puser_nice_args punice; |
455 | |
456 | if (uap->who == 0) { |
457 | uap->who = kauth_cred_getuid(cred: kauth_cred_get()); |
458 | } |
459 | |
460 | punice.curp = curp; |
461 | punice.prio = uap->prio; |
462 | punice.who = uap->who; |
463 | punice.foundp = &found; |
464 | error = 0; |
465 | punice.errorp = &error; |
466 | proc_iterate(PROC_ALLPROCLIST, callout: puser_donice_callback, arg: (void *)&punice, NULL, NULL); |
467 | |
468 | break; |
469 | } |
470 | |
471 | case PRIO_DARWIN_THREAD: { |
472 | /* we currently only support the current thread */ |
473 | if (uap->who != 0) { |
474 | return EINVAL; |
475 | } |
476 | |
477 | error = do_background_thread(thread: current_thread(), priority: uap->prio); |
478 | found++; |
479 | break; |
480 | } |
481 | |
482 | case PRIO_DARWIN_PROCESS: { |
483 | if (uap->who == 0) { |
484 | p = curp; |
485 | } else { |
486 | p = proc_find(pid: uap->who); |
487 | if (p == 0) { |
488 | break; |
489 | } |
490 | refheld = 1; |
491 | } |
492 | |
493 | error = do_background_proc(curp, targetp: p, priority: uap->prio); |
494 | |
495 | found++; |
496 | if (refheld != 0) { |
497 | proc_rele(p); |
498 | } |
499 | break; |
500 | } |
501 | |
502 | case PRIO_DARWIN_GPU: { |
503 | if (uap->who == 0) { |
504 | return EINVAL; |
505 | } |
506 | |
507 | p = proc_find(pid: uap->who); |
508 | if (p == PROC_NULL) { |
509 | break; |
510 | } |
511 | |
512 | error = set_gpudeny_proc(curp, targetp: p, priority: uap->prio); |
513 | |
514 | found++; |
515 | proc_rele(p); |
516 | break; |
517 | } |
518 | |
519 | case PRIO_DARWIN_ROLE: { |
520 | if (uap->who == 0) { |
521 | p = curp; |
522 | } else { |
523 | p = proc_find(pid: uap->who); |
524 | if (p == PROC_NULL) { |
525 | break; |
526 | } |
527 | refheld = 1; |
528 | } |
529 | |
530 | error = proc_set_darwin_role(curp, targetp: p, priority: uap->prio); |
531 | |
532 | found++; |
533 | if (refheld != 0) { |
534 | proc_rele(p); |
535 | } |
536 | break; |
537 | } |
538 | |
539 | case PRIO_DARWIN_GAME_MODE: { |
540 | if (uap->who == 0) { |
541 | p = curp; |
542 | } else { |
543 | p = proc_find(pid: uap->who); |
544 | if (p == PROC_NULL) { |
545 | break; |
546 | } |
547 | refheld = 1; |
548 | } |
549 | |
550 | |
551 | error = proc_set_game_mode(targetp: p, priority: uap->prio); |
552 | |
553 | found++; |
554 | if (refheld != 0) { |
555 | proc_rele(p); |
556 | } |
557 | break; |
558 | } |
559 | |
560 | default: |
561 | return EINVAL; |
562 | } |
563 | if (found == 0) { |
564 | return ESRCH; |
565 | } |
566 | if (error == EIDRM) { |
567 | *retval = -2; |
568 | error = 0; |
569 | } |
570 | return error; |
571 | } |
572 | |
573 | |
574 | /* |
575 | * Returns: 0 Success |
576 | * EPERM |
577 | * EACCES |
578 | * mac_check_proc_sched:??? |
579 | */ |
580 | static int |
581 | donice(struct proc *curp, struct proc *chgp, int n) |
582 | { |
583 | int error = 0; |
584 | kauth_cred_t ucred; |
585 | kauth_cred_t my_cred; |
586 | |
587 | ucred = kauth_cred_proc_ref(procp: curp); |
588 | my_cred = kauth_cred_proc_ref(procp: chgp); |
589 | |
590 | if (suser(cred: ucred, NULL) && kauth_cred_getruid(cred: ucred) && |
591 | kauth_cred_getuid(cred: ucred) != kauth_cred_getuid(cred: my_cred) && |
592 | kauth_cred_getruid(cred: ucred) != kauth_cred_getuid(cred: my_cred)) { |
593 | error = EPERM; |
594 | goto out; |
595 | } |
596 | if (n > PRIO_MAX) { |
597 | n = PRIO_MAX; |
598 | } |
599 | if (n < PRIO_MIN) { |
600 | n = PRIO_MIN; |
601 | } |
602 | if (n < chgp->p_nice && suser(cred: ucred, acflag: &curp->p_acflag)) { |
603 | error = EACCES; |
604 | goto out; |
605 | } |
606 | #if CONFIG_MACF |
607 | error = mac_proc_check_sched(proc: curp, proc2: chgp); |
608 | if (error) { |
609 | goto out; |
610 | } |
611 | #endif |
612 | proc_lock(chgp); |
613 | chgp->p_nice = (char)n; |
614 | proc_unlock(chgp); |
615 | (void)resetpriority(chgp); |
616 | out: |
617 | kauth_cred_unref(&ucred); |
618 | kauth_cred_unref(&my_cred); |
619 | return error; |
620 | } |
621 | |
622 | static int |
623 | set_gpudeny_proc(struct proc *curp, struct proc *targetp, int priority) |
624 | { |
625 | int error = 0; |
626 | kauth_cred_t ucred; |
627 | kauth_cred_t target_cred; |
628 | |
629 | ucred = kauth_cred_get(); |
630 | target_cred = kauth_cred_proc_ref(procp: targetp); |
631 | |
632 | /* TODO: Entitlement instead of uid check */ |
633 | |
634 | if (!kauth_cred_issuser(cred: ucred) && kauth_cred_getruid(cred: ucred) && |
635 | kauth_cred_getuid(cred: ucred) != kauth_cred_getuid(cred: target_cred) && |
636 | kauth_cred_getruid(cred: ucred) != kauth_cred_getuid(cred: target_cred)) { |
637 | error = EPERM; |
638 | goto out; |
639 | } |
640 | |
641 | if (curp == targetp) { |
642 | error = EPERM; |
643 | goto out; |
644 | } |
645 | |
646 | #if CONFIG_MACF |
647 | error = mac_proc_check_sched(proc: curp, proc2: targetp); |
648 | if (error) { |
649 | goto out; |
650 | } |
651 | #endif |
652 | |
653 | switch (priority) { |
654 | case PRIO_DARWIN_GPU_DENY: |
655 | task_set_gpu_denied(task: proc_task(targetp), TRUE); |
656 | break; |
657 | case PRIO_DARWIN_GPU_ALLOW: |
658 | task_set_gpu_denied(task: proc_task(targetp), FALSE); |
659 | break; |
660 | default: |
661 | error = EINVAL; |
662 | goto out; |
663 | } |
664 | |
665 | out: |
666 | kauth_cred_unref(&target_cred); |
667 | return error; |
668 | } |
669 | |
670 | static int |
671 | proc_set_darwin_role(proc_t curp, proc_t targetp, int priority) |
672 | { |
673 | int error = 0; |
674 | uint32_t flagsp = 0; |
675 | |
676 | kauth_cred_t ucred, target_cred; |
677 | |
678 | ucred = kauth_cred_get(); |
679 | target_cred = kauth_cred_proc_ref(procp: targetp); |
680 | |
681 | if (!kauth_cred_issuser(cred: ucred) && kauth_cred_getruid(cred: ucred) && |
682 | kauth_cred_getuid(cred: ucred) != kauth_cred_getuid(cred: target_cred) && |
683 | kauth_cred_getruid(cred: ucred) != kauth_cred_getuid(cred: target_cred)) { |
684 | if (priv_check_cred(cred: ucred, PRIV_SETPRIORITY_DARWIN_ROLE, flags: 0) != 0) { |
685 | error = EPERM; |
686 | goto out; |
687 | } |
688 | } |
689 | |
690 | if (curp != targetp) { |
691 | #if CONFIG_MACF |
692 | if ((error = mac_proc_check_sched(proc: curp, proc2: targetp))) { |
693 | goto out; |
694 | } |
695 | #endif |
696 | } |
697 | |
698 | proc_get_darwinbgstate(task: proc_task(targetp), flagsp: &flagsp); |
699 | if ((flagsp & PROC_FLAG_APPLICATION) != PROC_FLAG_APPLICATION) { |
700 | error = ENOTSUP; |
701 | goto out; |
702 | } |
703 | |
704 | task_role_t role = TASK_UNSPECIFIED; |
705 | |
706 | if ((error = proc_darwin_role_to_task_role(darwin_role: priority, task_role: &role))) { |
707 | goto out; |
708 | } |
709 | |
710 | proc_set_task_policy(task: proc_task(targetp), TASK_POLICY_ATTRIBUTE, |
711 | TASK_POLICY_ROLE, value: role); |
712 | |
713 | #if CONFIG_FREEZE |
714 | if (priority == PRIO_DARWIN_ROLE_UI_FOCAL || priority == PRIO_DARWIN_ROLE_UI || priority == PRIO_DARWIN_ROLE_UI_NON_FOCAL) { |
715 | memorystatus_freezer_mark_ui_transition(targetp); |
716 | } |
717 | #endif /* CONFIG_FREEZE */ |
718 | |
719 | out: |
720 | kauth_cred_unref(&target_cred); |
721 | return error; |
722 | } |
723 | |
724 | static int |
725 | proc_get_darwin_role(proc_t curp, proc_t targetp, int *priority) |
726 | { |
727 | int error = 0; |
728 | int role = 0; |
729 | |
730 | kauth_cred_t ucred, target_cred; |
731 | |
732 | ucred = kauth_cred_get(); |
733 | target_cred = kauth_cred_proc_ref(procp: targetp); |
734 | |
735 | if (!kauth_cred_issuser(cred: ucred) && kauth_cred_getruid(cred: ucred) && |
736 | kauth_cred_getuid(cred: ucred) != kauth_cred_getuid(cred: target_cred) && |
737 | kauth_cred_getruid(cred: ucred) != kauth_cred_getuid(cred: target_cred)) { |
738 | error = EPERM; |
739 | goto out; |
740 | } |
741 | |
742 | if (curp != targetp) { |
743 | #if CONFIG_MACF |
744 | if ((error = mac_proc_check_sched(proc: curp, proc2: targetp))) { |
745 | goto out; |
746 | } |
747 | #endif |
748 | } |
749 | |
750 | role = proc_get_task_policy(task: proc_task(targetp), TASK_POLICY_ATTRIBUTE, TASK_POLICY_ROLE); |
751 | |
752 | *priority = proc_task_role_to_darwin_role(task_role: role); |
753 | |
754 | out: |
755 | kauth_cred_unref(&target_cred); |
756 | return error; |
757 | } |
758 | |
759 | #define SET_GAME_MODE_ENTITLEMENT "com.apple.private.set-game-mode" |
760 | |
761 | static int |
762 | proc_set_game_mode(proc_t targetp, int priority) |
763 | { |
764 | int error = 0; |
765 | |
766 | kauth_cred_t ucred, target_cred; |
767 | |
768 | ucred = kauth_cred_get(); |
769 | target_cred = kauth_cred_proc_ref(procp: targetp); |
770 | |
771 | boolean_t entitled = FALSE; |
772 | entitled = IOCurrentTaskHasEntitlement(SET_GAME_MODE_ENTITLEMENT); |
773 | if (!entitled) { |
774 | error = EPERM; |
775 | goto out; |
776 | } |
777 | |
778 | /* Even with entitlement, non-root is only alllowed to set same-user */ |
779 | if (!kauth_cred_issuser(cred: ucred) && |
780 | kauth_cred_getuid(cred: ucred) != kauth_cred_getuid(cred: target_cred)) { |
781 | error = EPERM; |
782 | goto out; |
783 | } |
784 | |
785 | switch (priority) { |
786 | case PRIO_DARWIN_GAME_MODE_OFF: |
787 | task_set_game_mode(task: proc_task(targetp), false); |
788 | break; |
789 | case PRIO_DARWIN_GAME_MODE_ON: |
790 | task_set_game_mode(task: proc_task(targetp), true); |
791 | break; |
792 | default: |
793 | error = EINVAL; |
794 | goto out; |
795 | } |
796 | |
797 | out: |
798 | kauth_cred_unref(&target_cred); |
799 | return error; |
800 | } |
801 | |
802 | static int |
803 | proc_get_game_mode(proc_t targetp, int *priority) |
804 | { |
805 | int error = 0; |
806 | |
807 | kauth_cred_t ucred, target_cred; |
808 | |
809 | ucred = kauth_cred_get(); |
810 | target_cred = kauth_cred_proc_ref(procp: targetp); |
811 | |
812 | boolean_t entitled = FALSE; |
813 | entitled = IOCurrentTaskHasEntitlement(SET_GAME_MODE_ENTITLEMENT); |
814 | |
815 | /* Root is allowed to get without entitlement */ |
816 | if (!kauth_cred_issuser(cred: ucred) && !entitled) { |
817 | error = EPERM; |
818 | goto out; |
819 | } |
820 | |
821 | /* Even with entitlement, non-root is only alllowed to see same-user */ |
822 | if (!kauth_cred_issuser(cred: ucred) && |
823 | kauth_cred_getuid(cred: ucred) != kauth_cred_getuid(cred: target_cred)) { |
824 | error = EPERM; |
825 | goto out; |
826 | } |
827 | |
828 | if (task_get_game_mode(task: proc_task(targetp))) { |
829 | *priority = PRIO_DARWIN_GAME_MODE_ON; |
830 | } else { |
831 | *priority = PRIO_DARWIN_GAME_MODE_OFF; |
832 | } |
833 | |
834 | out: |
835 | kauth_cred_unref(&target_cred); |
836 | return error; |
837 | } |
838 | |
839 | |
840 | |
841 | static int |
842 | get_background_proc(struct proc *curp, struct proc *targetp, int *priority) |
843 | { |
844 | int external = 0; |
845 | int error = 0; |
846 | kauth_cred_t ucred, target_cred; |
847 | |
848 | ucred = kauth_cred_get(); |
849 | target_cred = kauth_cred_proc_ref(procp: targetp); |
850 | |
851 | if (!kauth_cred_issuser(cred: ucred) && kauth_cred_getruid(cred: ucred) && |
852 | kauth_cred_getuid(cred: ucred) != kauth_cred_getuid(cred: target_cred) && |
853 | kauth_cred_getruid(cred: ucred) != kauth_cred_getuid(cred: target_cred)) { |
854 | error = EPERM; |
855 | goto out; |
856 | } |
857 | |
858 | external = (curp == targetp) ? TASK_POLICY_INTERNAL : TASK_POLICY_EXTERNAL; |
859 | |
860 | *priority = proc_get_task_policy(task: current_task(), category: external, TASK_POLICY_DARWIN_BG); |
861 | |
862 | out: |
863 | kauth_cred_unref(&target_cred); |
864 | return error; |
865 | } |
866 | |
867 | static int |
868 | do_background_proc(struct proc *curp, struct proc *targetp, int priority) |
869 | { |
870 | #if !CONFIG_MACF |
871 | #pragma unused(curp) |
872 | #endif |
873 | int error = 0; |
874 | kauth_cred_t ucred; |
875 | kauth_cred_t target_cred; |
876 | int external; |
877 | int enable; |
878 | |
879 | ucred = kauth_cred_get(); |
880 | target_cred = kauth_cred_proc_ref(procp: targetp); |
881 | |
882 | if (!kauth_cred_issuser(cred: ucred) && kauth_cred_getruid(cred: ucred) && |
883 | kauth_cred_getuid(cred: ucred) != kauth_cred_getuid(cred: target_cred) && |
884 | kauth_cred_getruid(cred: ucred) != kauth_cred_getuid(cred: target_cred)) { |
885 | error = EPERM; |
886 | goto out; |
887 | } |
888 | |
889 | #if CONFIG_MACF |
890 | error = mac_proc_check_sched(proc: curp, proc2: targetp); |
891 | if (error) { |
892 | goto out; |
893 | } |
894 | #endif |
895 | |
896 | external = (curp == targetp) ? TASK_POLICY_INTERNAL : TASK_POLICY_EXTERNAL; |
897 | |
898 | switch (priority) { |
899 | case PRIO_DARWIN_BG: |
900 | enable = TASK_POLICY_ENABLE; |
901 | break; |
902 | case PRIO_DARWIN_NONUI: |
903 | /* ignored for compatibility */ |
904 | goto out; |
905 | default: |
906 | /* TODO: EINVAL if priority != 0 */ |
907 | enable = TASK_POLICY_DISABLE; |
908 | break; |
909 | } |
910 | |
911 | proc_set_task_policy(task: proc_task(targetp), category: external, TASK_POLICY_DARWIN_BG, value: enable); |
912 | |
913 | out: |
914 | kauth_cred_unref(&target_cred); |
915 | return error; |
916 | } |
917 | |
918 | static void |
919 | do_background_socket(struct proc *p, thread_t thread) |
920 | { |
921 | #if SOCKETS |
922 | struct fileproc *fp; |
923 | int background = false; |
924 | #if NECP |
925 | int update_necp = false; |
926 | #endif /* NECP */ |
927 | |
928 | if (thread != THREAD_NULL && |
929 | get_threadtask(thread) != proc_task(p)) { |
930 | return; |
931 | } |
932 | |
933 | proc_fdlock(p); |
934 | |
935 | if (thread != THREAD_NULL) { |
936 | background = proc_get_effective_thread_policy(thread, TASK_POLICY_ALL_SOCKETS_BG); |
937 | } else { |
938 | background = proc_get_effective_task_policy(task: proc_task(p), TASK_POLICY_ALL_SOCKETS_BG); |
939 | } |
940 | |
941 | if (background) { |
942 | /* |
943 | * For PRIO_DARWIN_PROCESS (thread is NULL), simply mark |
944 | * the sockets with the background flag. There's nothing |
945 | * to do here for the PRIO_DARWIN_THREAD case. |
946 | */ |
947 | if (thread == THREAD_NULL) { |
948 | fdt_foreach(fp, p) { |
949 | if (FILEGLOB_DTYPE(fp->fp_glob) == DTYPE_SOCKET) { |
950 | struct socket *sockp = (struct socket *)fp_get_data(fp); |
951 | socket_set_traffic_mgt_flags(so: sockp, TRAFFIC_MGT_SO_BACKGROUND); |
952 | sockp->so_background_thread = NULL; |
953 | } |
954 | #if NECP |
955 | else if (FILEGLOB_DTYPE(fp->fp_glob) == DTYPE_NETPOLICY) { |
956 | if (necp_set_client_as_background(proc: p, fp, background)) { |
957 | update_necp = true; |
958 | } |
959 | } |
960 | #endif /* NECP */ |
961 | } |
962 | } |
963 | } else { |
964 | /* disable networking IO throttle. |
965 | * NOTE - It is a known limitation of the current design that we |
966 | * could potentially clear TRAFFIC_MGT_SO_BACKGROUND bit for |
967 | * sockets created by other threads within this process. |
968 | */ |
969 | fdt_foreach(fp, p) { |
970 | struct socket *sockp; |
971 | |
972 | if (FILEGLOB_DTYPE(fp->fp_glob) == DTYPE_SOCKET) { |
973 | sockp = (struct socket *)fp_get_data(fp); |
974 | /* skip if only clearing this thread's sockets */ |
975 | if ((thread) && (sockp->so_background_thread != thread)) { |
976 | continue; |
977 | } |
978 | socket_clear_traffic_mgt_flags(so: sockp, TRAFFIC_MGT_SO_BACKGROUND); |
979 | sockp->so_background_thread = NULL; |
980 | } |
981 | #if NECP |
982 | else if (FILEGLOB_DTYPE(fp->fp_glob) == DTYPE_NETPOLICY) { |
983 | if (necp_set_client_as_background(proc: p, fp, background)) { |
984 | update_necp = true; |
985 | } |
986 | } |
987 | #endif /* NECP */ |
988 | } |
989 | } |
990 | |
991 | proc_fdunlock(p); |
992 | |
993 | #if NECP |
994 | if (update_necp) { |
995 | necp_update_all_clients(); |
996 | } |
997 | #endif /* NECP */ |
998 | #else |
999 | #pragma unused(p, thread) |
1000 | #endif |
1001 | } |
1002 | |
1003 | |
1004 | /* |
1005 | * do_background_thread |
1006 | * |
1007 | * Requires: thread reference |
1008 | * |
1009 | * Returns: 0 Success |
1010 | * EPERM Tried to background while in vfork |
1011 | * XXX - todo - does this need a MACF hook? |
1012 | */ |
1013 | static int |
1014 | do_background_thread(thread_t thread, int priority) |
1015 | { |
1016 | int enable, external; |
1017 | int rv = 0; |
1018 | |
1019 | /* Backgrounding is unsupported for workq threads */ |
1020 | if (thread_is_static_param(thread)) { |
1021 | return EPERM; |
1022 | } |
1023 | |
1024 | /* Not allowed to combine QoS and DARWIN_BG, doing so strips the QoS */ |
1025 | if (thread_has_qos_policy(thread)) { |
1026 | thread_remove_qos_policy(thread); |
1027 | rv = EIDRM; |
1028 | } |
1029 | |
1030 | /* TODO: Fail if someone passes something besides 0 or PRIO_DARWIN_BG */ |
1031 | enable = (priority == PRIO_DARWIN_BG) ? TASK_POLICY_ENABLE : TASK_POLICY_DISABLE; |
1032 | external = (current_thread() == thread) ? TASK_POLICY_INTERNAL : TASK_POLICY_EXTERNAL; |
1033 | |
1034 | proc_set_thread_policy(thread, category: external, TASK_POLICY_DARWIN_BG, value: enable); |
1035 | |
1036 | return rv; |
1037 | } |
1038 | |
1039 | |
1040 | /* |
1041 | * Returns: 0 Success |
1042 | * copyin:EFAULT |
1043 | * dosetrlimit: |
1044 | */ |
1045 | /* ARGSUSED */ |
1046 | int |
1047 | setrlimit(struct proc *p, struct setrlimit_args *uap, __unused int32_t *retval) |
1048 | { |
1049 | struct rlimit alim; |
1050 | int error; |
1051 | |
1052 | if ((error = copyin(uap->rlp, (caddr_t)&alim, |
1053 | sizeof(struct rlimit)))) { |
1054 | return error; |
1055 | } |
1056 | |
1057 | return dosetrlimit(p, which: uap->which, limp: &alim); |
1058 | } |
1059 | |
1060 | /* |
1061 | * Returns: 0 Success |
1062 | * EINVAL |
1063 | * suser:EPERM |
1064 | * |
1065 | * Notes: EINVAL is returned both for invalid arguments, and in the |
1066 | * case that the current usage (e.g. RLIMIT_STACK) is already |
1067 | * in excess of the requested limit. |
1068 | */ |
1069 | static int |
1070 | dosetrlimit(struct proc *p, u_int which, struct rlimit *newrlim) |
1071 | { |
1072 | struct rlimit rlim, stack_rlim = {.rlim_cur = 0, .rlim_max = 0}; |
1073 | int error; |
1074 | kern_return_t kr; |
1075 | |
1076 | /* Mask out POSIX flag, saved above */ |
1077 | which &= ~_RLIMIT_POSIX_FLAG; |
1078 | |
1079 | /* Unknown resource */ |
1080 | if (which >= RLIM_NLIMITS) { |
1081 | return EINVAL; |
1082 | } |
1083 | |
1084 | proc_lock(p); |
1085 | |
1086 | /* Only one thread is able to change the current process's rlimit values */ |
1087 | proc_limitblock(p); |
1088 | |
1089 | /* |
1090 | * Take a snapshot of the current rlimit values and read this throughout |
1091 | * this routine. This minimizes the critical sections and allow other |
1092 | * processes in the system to access the plimit while we are in the |
1093 | * middle of this setrlimit call. |
1094 | */ |
1095 | rlim = smr_serialized_load(&p->p_limit)->pl_rlimit[which]; |
1096 | |
1097 | proc_unlock(p); |
1098 | |
1099 | error = 0; |
1100 | /* Sanity check: new soft limit cannot exceed new hard limit */ |
1101 | if (newrlim->rlim_cur > newrlim->rlim_max) { |
1102 | error = EINVAL; |
1103 | } |
1104 | /* |
1105 | * Sanity check: only super-user may raise the hard limit. |
1106 | * newrlim->rlim_cur > rlim.rlim_max implies that the call |
1107 | * is increasing the hard limit as well. |
1108 | */ |
1109 | else if (newrlim->rlim_cur > rlim.rlim_max || newrlim->rlim_max > rlim.rlim_max) { |
1110 | /* suser() returns 0 if the calling thread is super user. */ |
1111 | error = suser(cred: kauth_cred_get(), acflag: &p->p_acflag); |
1112 | } |
1113 | |
1114 | if (error) { |
1115 | /* Invalid setrlimit request: EINVAL or EPERM */ |
1116 | goto out; |
1117 | } |
1118 | |
1119 | /* We have the reader lock of the process's plimit so it's safe to read the rlimit values */ |
1120 | switch (which) { |
1121 | case RLIMIT_CPU: |
1122 | if (newrlim->rlim_cur == RLIM_INFINITY) { |
1123 | task_vtimer_clear(task: proc_task(p), TASK_VTIMER_RLIM); |
1124 | timerclear(&p->p_rlim_cpu); |
1125 | } else { |
1126 | task_absolutetime_info_data_t tinfo; |
1127 | mach_msg_type_number_t count; |
1128 | struct timeval ttv, tv; |
1129 | clock_sec_t tv_sec; |
1130 | clock_usec_t tv_usec; |
1131 | |
1132 | count = TASK_ABSOLUTETIME_INFO_COUNT; |
1133 | task_info(task: proc_task(p), TASK_ABSOLUTETIME_INFO, task_info_out: (task_info_t)&tinfo, task_info_count: &count); |
1134 | absolutetime_to_microtime(abstime: tinfo.total_user + tinfo.total_system, secs: &tv_sec, microsecs: &tv_usec); |
1135 | ttv.tv_sec = tv_sec; |
1136 | ttv.tv_usec = tv_usec; |
1137 | |
1138 | tv.tv_sec = (newrlim->rlim_cur > __INT_MAX__ ? __INT_MAX__ : (__darwin_time_t)newrlim->rlim_cur); |
1139 | tv.tv_usec = 0; |
1140 | timersub(&tv, &ttv, &p->p_rlim_cpu); |
1141 | |
1142 | timerclear(&tv); |
1143 | if (timercmp(&p->p_rlim_cpu, &tv, >)) { |
1144 | task_vtimer_set(task: proc_task(p), TASK_VTIMER_RLIM); |
1145 | } else { |
1146 | task_vtimer_clear(task: proc_task(p), TASK_VTIMER_RLIM); |
1147 | |
1148 | timerclear(&p->p_rlim_cpu); |
1149 | |
1150 | psignal(p, SIGXCPU); |
1151 | } |
1152 | } |
1153 | break; |
1154 | |
1155 | case RLIMIT_DATA: |
1156 | #if 00 |
1157 | if (newrlim->rlim_cur > maxdmap) { |
1158 | newrlim->rlim_cur = maxdmap; |
1159 | } |
1160 | if (newrlim->rlim_max > maxdmap) { |
1161 | newrlim->rlim_max = maxdmap; |
1162 | } |
1163 | #endif |
1164 | |
1165 | /* Over to Mach VM to validate the new data limit */ |
1166 | if (vm_map_set_data_limit(map: current_map(), limit: newrlim->rlim_cur) != KERN_SUCCESS) { |
1167 | /* The limit specified cannot be lowered because current usage is already higher than the limit. */ |
1168 | error = EINVAL; |
1169 | goto out; |
1170 | } |
1171 | break; |
1172 | |
1173 | case RLIMIT_STACK: |
1174 | if (p->p_lflag & P_LCUSTOM_STACK) { |
1175 | /* Process has a custom stack set - rlimit cannot be used to change it */ |
1176 | error = EINVAL; |
1177 | goto out; |
1178 | } |
1179 | |
1180 | /* |
1181 | * Note: the real stack size limit is enforced by maxsmap, not a process's RLIMIT_STACK. |
1182 | * |
1183 | * The kernel uses maxsmap to control the actual stack size limit. While we allow |
1184 | * processes to set RLIMIT_STACK to RLIM_INFINITY (UNIX 03), accessing memory |
1185 | * beyond the maxsmap will still trigger an exception. |
1186 | * |
1187 | * stack_rlim is used to store the user-defined RLIMIT_STACK values while we adjust |
1188 | * the stack size using kernel limit (i.e. maxsmap). |
1189 | */ |
1190 | if (newrlim->rlim_cur > maxsmap || |
1191 | newrlim->rlim_max > maxsmap) { |
1192 | if (newrlim->rlim_cur > maxsmap) { |
1193 | stack_rlim.rlim_cur = newrlim->rlim_cur; |
1194 | newrlim->rlim_cur = maxsmap; |
1195 | } |
1196 | if (newrlim->rlim_max > maxsmap) { |
1197 | stack_rlim.rlim_max = newrlim->rlim_max; |
1198 | newrlim->rlim_max = maxsmap; |
1199 | } |
1200 | } |
1201 | |
1202 | /* |
1203 | * Stack is allocated to the max at exec time with only |
1204 | * "rlim_cur" bytes accessible. If stack limit is going |
1205 | * up make more accessible, if going down make inaccessible. |
1206 | */ |
1207 | if (newrlim->rlim_cur > rlim.rlim_cur) { |
1208 | mach_vm_offset_t addr; |
1209 | mach_vm_size_t size; |
1210 | |
1211 | /* grow stack */ |
1212 | size = newrlim->rlim_cur; |
1213 | if (round_page_overflow(size, &size)) { |
1214 | error = EINVAL; |
1215 | goto out; |
1216 | } |
1217 | size -= round_page_64(x: rlim.rlim_cur); |
1218 | |
1219 | addr = (mach_vm_offset_t)(p->user_stack - round_page_64(x: newrlim->rlim_cur)); |
1220 | kr = mach_vm_protect(target_task: current_map(), address: addr, size, FALSE, VM_PROT_DEFAULT); |
1221 | if (kr != KERN_SUCCESS) { |
1222 | error = EINVAL; |
1223 | goto out; |
1224 | } |
1225 | } else if (newrlim->rlim_cur < rlim.rlim_cur) { |
1226 | mach_vm_offset_t addr; |
1227 | mach_vm_size_t size; |
1228 | uint64_t cur_sp; |
1229 | |
1230 | /* shrink stack */ |
1231 | |
1232 | /* |
1233 | * First check if new stack limit would agree |
1234 | * with current stack usage. |
1235 | * Get the current thread's stack pointer... |
1236 | */ |
1237 | cur_sp = thread_adjuserstack(thread: current_thread(), adjust: 0); |
1238 | if (cur_sp <= p->user_stack && |
1239 | cur_sp > (p->user_stack - round_page_64(x: rlim.rlim_cur))) { |
1240 | /* stack pointer is in main stack */ |
1241 | if (cur_sp <= (p->user_stack - round_page_64(x: newrlim->rlim_cur))) { |
1242 | /* |
1243 | * New limit would cause current usage to be invalid: |
1244 | * reject new limit. |
1245 | */ |
1246 | error = EINVAL; |
1247 | goto out; |
1248 | } |
1249 | } else { |
1250 | /* not on the main stack: reject */ |
1251 | error = EINVAL; |
1252 | goto out; |
1253 | } |
1254 | |
1255 | size = round_page_64(x: rlim.rlim_cur); |
1256 | size -= round_page_64(x: rlim.rlim_cur); |
1257 | |
1258 | addr = (mach_vm_offset_t)(p->user_stack - round_page_64(x: rlim.rlim_cur)); |
1259 | |
1260 | kr = mach_vm_protect(target_task: current_map(), address: addr, size, FALSE, VM_PROT_NONE); |
1261 | if (kr != KERN_SUCCESS) { |
1262 | error = EINVAL; |
1263 | goto out; |
1264 | } |
1265 | } else { |
1266 | /* no change ... */ |
1267 | } |
1268 | |
1269 | /* |
1270 | * We've adjusted the process's stack region. If the user-defined limit is greater |
1271 | * than maxsmap, we need to reflect this change in rlimit interface. |
1272 | */ |
1273 | if (stack_rlim.rlim_cur != 0) { |
1274 | newrlim->rlim_cur = stack_rlim.rlim_cur; |
1275 | } |
1276 | if (stack_rlim.rlim_max != 0) { |
1277 | newrlim->rlim_max = stack_rlim.rlim_max; |
1278 | } |
1279 | break; |
1280 | |
1281 | case RLIMIT_NOFILE: |
1282 | /* |
1283 | * Nothing to be done here as we already performed the sanity checks before entering the switch code block. |
1284 | * The real NOFILE limits enforced by the kernel is capped at MIN(RLIMIT_NOFILE, maxfilesperproc) |
1285 | */ |
1286 | break; |
1287 | |
1288 | case RLIMIT_AS: |
1289 | /* Over to Mach VM to validate the new address space limit */ |
1290 | if (vm_map_set_size_limit(map: current_map(), limit: newrlim->rlim_cur) != KERN_SUCCESS) { |
1291 | /* The limit specified cannot be lowered because current usage is already higher than the limit. */ |
1292 | error = EINVAL; |
1293 | goto out; |
1294 | } |
1295 | break; |
1296 | |
1297 | case RLIMIT_NPROC: |
1298 | /* |
1299 | * Only root can set to the maxproc limits, as it is |
1300 | * systemwide resource; all others are limited to |
1301 | * maxprocperuid (presumably less than maxproc). |
1302 | */ |
1303 | if (kauth_cred_issuser(cred: kauth_cred_get())) { |
1304 | if (newrlim->rlim_cur > (rlim_t)maxproc) { |
1305 | newrlim->rlim_cur = maxproc; |
1306 | } |
1307 | if (newrlim->rlim_max > (rlim_t)maxproc) { |
1308 | newrlim->rlim_max = maxproc; |
1309 | } |
1310 | } else { |
1311 | if (newrlim->rlim_cur > (rlim_t)maxprocperuid) { |
1312 | newrlim->rlim_cur = maxprocperuid; |
1313 | } |
1314 | if (newrlim->rlim_max > (rlim_t)maxprocperuid) { |
1315 | newrlim->rlim_max = maxprocperuid; |
1316 | } |
1317 | } |
1318 | break; |
1319 | |
1320 | case RLIMIT_MEMLOCK: |
1321 | /* |
1322 | * Tell the Mach VM layer about the new limit value. |
1323 | */ |
1324 | newrlim->rlim_cur = (vm_size_t)newrlim->rlim_cur; |
1325 | vm_map_set_user_wire_limit(map: current_map(), limit: (vm_size_t)newrlim->rlim_cur); |
1326 | break; |
1327 | } /* switch... */ |
1328 | |
1329 | /* Everything checks out and we are now ready to update the rlimit */ |
1330 | error = 0; |
1331 | |
1332 | out: |
1333 | |
1334 | if (error == 0) { |
1335 | /* |
1336 | * COW the current plimit if it's shared, otherwise update it in place. |
1337 | * Finally unblock other threads wishing to change plimit. |
1338 | */ |
1339 | proc_limitupdate(p, true, update: ^(struct plimit *plim) { |
1340 | plim->pl_rlimit[which] = *newrlim; |
1341 | }); |
1342 | } else { |
1343 | /* |
1344 | * This setrlimit has failed, just leave the plimit as is and unblock other |
1345 | * threads wishing to change plimit. |
1346 | */ |
1347 | proc_lock(p); |
1348 | proc_limitunblock(p); |
1349 | proc_unlock(p); |
1350 | } |
1351 | |
1352 | return error; |
1353 | } |
1354 | |
1355 | /* ARGSUSED */ |
1356 | int |
1357 | getrlimit(struct proc *p, struct getrlimit_args *uap, __unused int32_t *retval) |
1358 | { |
1359 | struct rlimit lim = {}; |
1360 | |
1361 | /* |
1362 | * Take out flag now in case we need to use it to trigger variant |
1363 | * behaviour later. |
1364 | */ |
1365 | uap->which &= ~_RLIMIT_POSIX_FLAG; |
1366 | |
1367 | if (uap->which >= RLIM_NLIMITS) { |
1368 | return EINVAL; |
1369 | } |
1370 | lim = proc_limitget(p, which: uap->which); |
1371 | return copyout((caddr_t)&lim, |
1372 | uap->rlp, sizeof(struct rlimit)); |
1373 | } |
1374 | |
1375 | /* |
1376 | * Transform the running time and tick information in proc p into user, |
1377 | * system, and interrupt time usage. |
1378 | */ |
1379 | /* No lock on proc is held for this.. */ |
1380 | void |
1381 | calcru(struct proc *p, struct timeval *up, struct timeval *sp, struct timeval *ip) |
1382 | { |
1383 | task_t task; |
1384 | |
1385 | timerclear(up); |
1386 | timerclear(sp); |
1387 | if (ip != NULL) { |
1388 | timerclear(ip); |
1389 | } |
1390 | |
1391 | task = proc_task(p); |
1392 | if (task) { |
1393 | mach_task_basic_info_data_t tinfo; |
1394 | task_thread_times_info_data_t ttimesinfo; |
1395 | task_events_info_data_t teventsinfo; |
1396 | mach_msg_type_number_t task_info_count, task_ttimes_count; |
1397 | mach_msg_type_number_t task_events_count; |
1398 | struct timeval ut, st; |
1399 | |
1400 | task_info_count = MACH_TASK_BASIC_INFO_COUNT; |
1401 | task_info(task, MACH_TASK_BASIC_INFO, |
1402 | task_info_out: (task_info_t)&tinfo, task_info_count: &task_info_count); |
1403 | ut.tv_sec = tinfo.user_time.seconds; |
1404 | ut.tv_usec = tinfo.user_time.microseconds; |
1405 | st.tv_sec = tinfo.system_time.seconds; |
1406 | st.tv_usec = tinfo.system_time.microseconds; |
1407 | timeradd(&ut, up, up); |
1408 | timeradd(&st, sp, sp); |
1409 | |
1410 | task_ttimes_count = TASK_THREAD_TIMES_INFO_COUNT; |
1411 | task_info(task, TASK_THREAD_TIMES_INFO, |
1412 | task_info_out: (task_info_t)&ttimesinfo, task_info_count: &task_ttimes_count); |
1413 | |
1414 | ut.tv_sec = ttimesinfo.user_time.seconds; |
1415 | ut.tv_usec = ttimesinfo.user_time.microseconds; |
1416 | st.tv_sec = ttimesinfo.system_time.seconds; |
1417 | st.tv_usec = ttimesinfo.system_time.microseconds; |
1418 | timeradd(&ut, up, up); |
1419 | timeradd(&st, sp, sp); |
1420 | |
1421 | task_events_count = TASK_EVENTS_INFO_COUNT; |
1422 | task_info(task, TASK_EVENTS_INFO, |
1423 | task_info_out: (task_info_t)&teventsinfo, task_info_count: &task_events_count); |
1424 | |
1425 | /* |
1426 | * No need to lock "p": this does not need to be |
1427 | * completely consistent, right ? |
1428 | */ |
1429 | p->p_stats->p_ru.ru_minflt = (teventsinfo.faults - |
1430 | teventsinfo.pageins); |
1431 | p->p_stats->p_ru.ru_majflt = teventsinfo.pageins; |
1432 | p->p_stats->p_ru.ru_nivcsw = (teventsinfo.csw - |
1433 | p->p_stats->p_ru.ru_nvcsw); |
1434 | if (p->p_stats->p_ru.ru_nivcsw < 0) { |
1435 | p->p_stats->p_ru.ru_nivcsw = 0; |
1436 | } |
1437 | |
1438 | p->p_stats->p_ru.ru_maxrss = (long)tinfo.resident_size_max; |
1439 | } |
1440 | } |
1441 | |
1442 | __private_extern__ void munge_user64_rusage(struct rusage *a_rusage_p, struct user64_rusage *a_user_rusage_p); |
1443 | __private_extern__ void munge_user32_rusage(struct rusage *a_rusage_p, struct user32_rusage *a_user_rusage_p); |
1444 | |
1445 | /* ARGSUSED */ |
1446 | int |
1447 | getrusage(struct proc *p, struct getrusage_args *uap, __unused int32_t *retval) |
1448 | { |
1449 | struct rusage *rup, rubuf; |
1450 | struct user64_rusage rubuf64 = {}; |
1451 | struct user32_rusage rubuf32 = {}; |
1452 | size_t retsize = sizeof(rubuf); /* default: 32 bits */ |
1453 | caddr_t retbuf = (caddr_t)&rubuf; /* default: 32 bits */ |
1454 | struct timeval utime; |
1455 | struct timeval stime; |
1456 | |
1457 | |
1458 | switch (uap->who) { |
1459 | case RUSAGE_SELF: |
1460 | calcru(p, up: &utime, sp: &stime, NULL); |
1461 | proc_lock(p); |
1462 | rup = &p->p_stats->p_ru; |
1463 | rup->ru_utime = utime; |
1464 | rup->ru_stime = stime; |
1465 | |
1466 | rubuf = *rup; |
1467 | proc_unlock(p); |
1468 | |
1469 | break; |
1470 | |
1471 | case RUSAGE_CHILDREN: |
1472 | proc_lock(p); |
1473 | rup = &p->p_stats->p_cru; |
1474 | rubuf = *rup; |
1475 | proc_unlock(p); |
1476 | break; |
1477 | |
1478 | default: |
1479 | return EINVAL; |
1480 | } |
1481 | if (IS_64BIT_PROCESS(p)) { |
1482 | retsize = sizeof(rubuf64); |
1483 | retbuf = (caddr_t)&rubuf64; |
1484 | munge_user64_rusage(a_rusage_p: &rubuf, a_user_rusage_p: &rubuf64); |
1485 | } else { |
1486 | retsize = sizeof(rubuf32); |
1487 | retbuf = (caddr_t)&rubuf32; |
1488 | munge_user32_rusage(a_rusage_p: &rubuf, a_user_rusage_p: &rubuf32); |
1489 | } |
1490 | |
1491 | return copyout(retbuf, uap->rusage, retsize); |
1492 | } |
1493 | |
1494 | void |
1495 | ruadd(struct rusage *ru, struct rusage *ru2) |
1496 | { |
1497 | long *ip, *ip2; |
1498 | long i; |
1499 | |
1500 | timeradd(&ru->ru_utime, &ru2->ru_utime, &ru->ru_utime); |
1501 | timeradd(&ru->ru_stime, &ru2->ru_stime, &ru->ru_stime); |
1502 | if (ru->ru_maxrss < ru2->ru_maxrss) { |
1503 | ru->ru_maxrss = ru2->ru_maxrss; |
1504 | } |
1505 | ip = &ru->ru_first; ip2 = &ru2->ru_first; |
1506 | for (i = &ru->ru_last - &ru->ru_first; i >= 0; i--) { |
1507 | *ip++ += *ip2++; |
1508 | } |
1509 | } |
1510 | |
1511 | /* |
1512 | * Add the rusage stats of child in parent. |
1513 | * |
1514 | * It adds rusage statistics of child process and statistics of all its |
1515 | * children to its parent. |
1516 | * |
1517 | * Note: proc lock of parent should be held while calling this function. |
1518 | */ |
1519 | void |
1520 | update_rusage_info_child(struct rusage_info_child *ri, rusage_info_current *ri_current) |
1521 | { |
1522 | ri->ri_child_user_time += (ri_current->ri_user_time + |
1523 | ri_current->ri_child_user_time); |
1524 | ri->ri_child_system_time += (ri_current->ri_system_time + |
1525 | ri_current->ri_child_system_time); |
1526 | ri->ri_child_pkg_idle_wkups += (ri_current->ri_pkg_idle_wkups + |
1527 | ri_current->ri_child_pkg_idle_wkups); |
1528 | ri->ri_child_interrupt_wkups += (ri_current->ri_interrupt_wkups + |
1529 | ri_current->ri_child_interrupt_wkups); |
1530 | ri->ri_child_pageins += (ri_current->ri_pageins + |
1531 | ri_current->ri_child_pageins); |
1532 | ri->ri_child_elapsed_abstime += ((ri_current->ri_proc_exit_abstime - |
1533 | ri_current->ri_proc_start_abstime) + ri_current->ri_child_elapsed_abstime); |
1534 | } |
1535 | |
1536 | static void |
1537 | proc_limit_free(smr_node_t node) |
1538 | { |
1539 | struct plimit *plimit = __container_of(node, struct plimit, pl_node); |
1540 | |
1541 | zfree(plimit_zone, plimit); |
1542 | } |
1543 | |
1544 | static void |
1545 | proc_limit_release(struct plimit *plimit) |
1546 | { |
1547 | if (os_ref_release(rc: &plimit->pl_refcnt) == 0) { |
1548 | smr_proc_task_call(&plimit->pl_node, sizeof(*plimit), proc_limit_free); |
1549 | } |
1550 | } |
1551 | |
1552 | /* |
1553 | * Reading soft limit from specified resource. |
1554 | */ |
1555 | rlim_t |
1556 | proc_limitgetcur(proc_t p, int which) |
1557 | { |
1558 | rlim_t rlim_cur; |
1559 | |
1560 | assert(p); |
1561 | assert(which < RLIM_NLIMITS); |
1562 | |
1563 | smr_proc_task_enter(); |
1564 | rlim_cur = smr_entered_load(&p->p_limit)->pl_rlimit[which].rlim_cur; |
1565 | smr_proc_task_leave(); |
1566 | |
1567 | return rlim_cur; |
1568 | } |
1569 | |
1570 | /* |
1571 | * Handle commonly asked limit that needs to be clamped with maxfilesperproc. |
1572 | */ |
1573 | int |
1574 | proc_limitgetcur_nofile(struct proc *p) |
1575 | { |
1576 | rlim_t lim = proc_limitgetcur(p, RLIMIT_NOFILE); |
1577 | |
1578 | return (int)MIN(lim, maxfilesperproc); |
1579 | } |
1580 | |
1581 | /* |
1582 | * Writing soft limit to specified resource. This is an internal function |
1583 | * used only by proc_exit to update RLIMIT_FSIZE in |
1584 | * place without invoking setrlimit. |
1585 | */ |
1586 | void |
1587 | proc_limitsetcur_fsize(proc_t p, rlim_t value) |
1588 | { |
1589 | proc_limitupdate(p, false, update: ^(struct plimit *plimit) { |
1590 | plimit->pl_rlimit[RLIMIT_FSIZE].rlim_cur = value; |
1591 | }); |
1592 | } |
1593 | |
1594 | struct rlimit |
1595 | proc_limitget(proc_t p, int which) |
1596 | { |
1597 | struct rlimit lim; |
1598 | |
1599 | assert(which < RLIM_NLIMITS); |
1600 | |
1601 | smr_proc_task_enter(); |
1602 | lim = smr_entered_load(&p->p_limit)->pl_rlimit[which]; |
1603 | smr_proc_task_leave(); |
1604 | |
1605 | return lim; |
1606 | } |
1607 | |
1608 | void |
1609 | proc_limitfork(proc_t parent, proc_t child) |
1610 | { |
1611 | struct plimit *plim; |
1612 | |
1613 | proc_lock(parent); |
1614 | plim = smr_serialized_load(&parent->p_limit); |
1615 | os_ref_retain(rc: &plim->pl_refcnt); |
1616 | proc_unlock(parent); |
1617 | |
1618 | smr_init_store(&child->p_limit, plim); |
1619 | } |
1620 | |
1621 | void |
1622 | proc_limitdrop(proc_t p) |
1623 | { |
1624 | struct plimit *plimit = NULL; |
1625 | |
1626 | proc_lock(p); |
1627 | plimit = smr_serialized_load(&p->p_limit); |
1628 | smr_clear_store(&p->p_limit); |
1629 | proc_unlock(p); |
1630 | |
1631 | proc_limit_release(plimit); |
1632 | } |
1633 | |
1634 | /* |
1635 | * proc_limitblock/unblock are used to serialize access to plimit |
1636 | * from concurrent threads within the same process. |
1637 | * Callers must be holding the proc lock to enter, return with |
1638 | * the proc lock locked |
1639 | */ |
1640 | static void |
1641 | proc_limitblock(proc_t p) |
1642 | { |
1643 | lck_mtx_assert(lck: &p->p_mlock, LCK_MTX_ASSERT_OWNED); |
1644 | |
1645 | while (p->p_lflag & P_LLIMCHANGE) { |
1646 | p->p_lflag |= P_LLIMWAIT; |
1647 | msleep(chan: &p->p_limit, mtx: &p->p_mlock, pri: 0, wmesg: "proc_limitblock" , NULL); |
1648 | } |
1649 | p->p_lflag |= P_LLIMCHANGE; |
1650 | } |
1651 | |
1652 | /* |
1653 | * Callers must be holding the proc lock to enter, return with |
1654 | * the proc lock locked |
1655 | */ |
1656 | static void |
1657 | proc_limitunblock(proc_t p) |
1658 | { |
1659 | lck_mtx_assert(lck: &p->p_mlock, LCK_MTX_ASSERT_OWNED); |
1660 | |
1661 | p->p_lflag &= ~P_LLIMCHANGE; |
1662 | if (p->p_lflag & P_LLIMWAIT) { |
1663 | p->p_lflag &= ~P_LLIMWAIT; |
1664 | wakeup(chan: &p->p_limit); |
1665 | } |
1666 | } |
1667 | |
1668 | /* |
1669 | * Perform an rlimit update (as defined by the arbitrary `update` function). |
1670 | * |
1671 | * Because plimits are accessed without holding any locks, |
1672 | * with only a hazard reference, the struct plimit is always |
1673 | * copied, updated, and replaced, to implement a const value type. |
1674 | */ |
1675 | static void |
1676 | proc_limitupdate(proc_t p, bool unblock, void (^update)(struct plimit *)) |
1677 | { |
1678 | struct plimit *cur_plim; |
1679 | struct plimit *copy_plim; |
1680 | |
1681 | copy_plim = zalloc_flags(plimit_zone, Z_WAITOK | Z_ZERO | Z_NOFAIL); |
1682 | |
1683 | proc_lock(p); |
1684 | |
1685 | cur_plim = smr_serialized_load(&p->p_limit); |
1686 | |
1687 | os_ref_init_count(©_plim->pl_refcnt, &rlimit_refgrp, 1); |
1688 | bcopy(src: cur_plim->pl_rlimit, dst: copy_plim->pl_rlimit, |
1689 | n: sizeof(struct rlimit) * RLIM_NLIMITS); |
1690 | |
1691 | update(copy_plim); |
1692 | |
1693 | smr_serialized_store(&p->p_limit, copy_plim); |
1694 | |
1695 | if (unblock) { |
1696 | proc_limitunblock(p); |
1697 | } |
1698 | proc_unlock(p); |
1699 | |
1700 | proc_limit_release(plimit: cur_plim); |
1701 | } |
1702 | |
1703 | static int |
1704 | iopolicysys_disk(struct proc *p, int cmd, int scope, int policy, struct _iopol_param_t *iop_param); |
1705 | static int |
1706 | iopolicysys_vfs_hfs_case_sensitivity(struct proc *p, int cmd, int scope, int policy, struct _iopol_param_t *iop_param); |
1707 | static int |
1708 | iopolicysys_vfs_atime_updates(struct proc *p, int cmd, int scope, int policy, struct _iopol_param_t *iop_param); |
1709 | static int |
1710 | iopolicysys_vfs_statfs_no_data_volume(struct proc *p, int cmd, int scope, int policy, struct _iopol_param_t *iop_param); |
1711 | static int |
1712 | iopolicysys_vfs_trigger_resolve(struct proc *p, int cmd, int scope, int policy, struct _iopol_param_t *iop_param); |
1713 | static int |
1714 | iopolicysys_vfs_ignore_content_protection(struct proc *p, int cmd, int scope, int policy, struct _iopol_param_t *iop_param); |
1715 | static int |
1716 | iopolicysys_vfs_ignore_node_permissions(struct proc *p, int cmd, int scope, int policy, struct _iopol_param_t *ipo_param); |
1717 | static int |
1718 | iopolicysys_vfs_skip_mtime_update(struct proc *p, int cmd, int scope, int policy, struct _iopol_param_t *iop_param); |
1719 | static int |
1720 | iopolicysys_vfs_allow_lowspace_writes(struct proc *p, int cmd, int scope, int policy, struct _iopol_param_t *iop_param); |
1721 | static int |
1722 | iopolicysys_vfs_disallow_rw_for_o_evtonly(struct proc *p, int cmd, int scope, int policy, struct _iopol_param_t *iop_param); |
1723 | static int iopolicysys_vfs_altlink(struct proc *p, int cmd, int scope, int policy, struct _iopol_param_t *iop_param); |
1724 | static int iopolicysys_vfs_nocache_write_fs_blksize(struct proc *p, int cmd, int scope, int policy, struct _iopol_param_t *iop_param); |
1725 | |
1726 | /* |
1727 | * iopolicysys |
1728 | * |
1729 | * Description: System call MUX for use in manipulating I/O policy attributes of the current process or thread |
1730 | * |
1731 | * Parameters: cmd Policy command |
1732 | * arg Pointer to policy arguments |
1733 | * |
1734 | * Returns: 0 Success |
1735 | * EINVAL Invalid command or invalid policy arguments |
1736 | * |
1737 | */ |
1738 | int |
1739 | iopolicysys(struct proc *p, struct iopolicysys_args *uap, int32_t *retval) |
1740 | { |
1741 | int error = 0; |
1742 | struct _iopol_param_t iop_param; |
1743 | |
1744 | if ((error = copyin(uap->arg, &iop_param, sizeof(iop_param))) != 0) { |
1745 | goto out; |
1746 | } |
1747 | |
1748 | switch (iop_param.iop_iotype) { |
1749 | case IOPOL_TYPE_DISK: |
1750 | error = iopolicysys_disk(p, cmd: uap->cmd, scope: iop_param.iop_scope, policy: iop_param.iop_policy, iop_param: &iop_param); |
1751 | if (error == EIDRM) { |
1752 | *retval = -2; |
1753 | error = 0; |
1754 | } |
1755 | if (error) { |
1756 | goto out; |
1757 | } |
1758 | break; |
1759 | case IOPOL_TYPE_VFS_HFS_CASE_SENSITIVITY: |
1760 | error = iopolicysys_vfs_hfs_case_sensitivity(p, cmd: uap->cmd, scope: iop_param.iop_scope, policy: iop_param.iop_policy, iop_param: &iop_param); |
1761 | if (error) { |
1762 | goto out; |
1763 | } |
1764 | break; |
1765 | case IOPOL_TYPE_VFS_ATIME_UPDATES: |
1766 | error = iopolicysys_vfs_atime_updates(p, cmd: uap->cmd, scope: iop_param.iop_scope, policy: iop_param.iop_policy, iop_param: &iop_param); |
1767 | if (error) { |
1768 | goto out; |
1769 | } |
1770 | break; |
1771 | case IOPOL_TYPE_VFS_MATERIALIZE_DATALESS_FILES: |
1772 | error = iopolicysys_vfs_materialize_dataless_files(p, cmd: uap->cmd, scope: iop_param.iop_scope, policy: iop_param.iop_policy, iop_param: &iop_param); |
1773 | if (error) { |
1774 | goto out; |
1775 | } |
1776 | break; |
1777 | case IOPOL_TYPE_VFS_STATFS_NO_DATA_VOLUME: |
1778 | error = iopolicysys_vfs_statfs_no_data_volume(p, cmd: uap->cmd, scope: iop_param.iop_scope, policy: iop_param.iop_policy, iop_param: &iop_param); |
1779 | if (error) { |
1780 | goto out; |
1781 | } |
1782 | break; |
1783 | case IOPOL_TYPE_VFS_TRIGGER_RESOLVE: |
1784 | error = iopolicysys_vfs_trigger_resolve(p, cmd: uap->cmd, scope: iop_param.iop_scope, policy: iop_param.iop_policy, iop_param: &iop_param); |
1785 | if (error) { |
1786 | goto out; |
1787 | } |
1788 | break; |
1789 | case IOPOL_TYPE_VFS_IGNORE_CONTENT_PROTECTION: |
1790 | error = iopolicysys_vfs_ignore_content_protection(p, cmd: uap->cmd, scope: iop_param.iop_scope, policy: iop_param.iop_policy, iop_param: &iop_param); |
1791 | if (error) { |
1792 | goto out; |
1793 | } |
1794 | break; |
1795 | case IOPOL_TYPE_VFS_IGNORE_PERMISSIONS: |
1796 | error = iopolicysys_vfs_ignore_node_permissions(p, cmd: uap->cmd, scope: iop_param.iop_scope, policy: iop_param.iop_policy, ipo_param: &iop_param); |
1797 | if (error) { |
1798 | goto out; |
1799 | } |
1800 | break; |
1801 | case IOPOL_TYPE_VFS_SKIP_MTIME_UPDATE: |
1802 | error = iopolicysys_vfs_skip_mtime_update(p, cmd: uap->cmd, scope: iop_param.iop_scope, policy: iop_param.iop_policy, iop_param: &iop_param); |
1803 | if (error) { |
1804 | goto out; |
1805 | } |
1806 | break; |
1807 | case IOPOL_TYPE_VFS_ALLOW_LOW_SPACE_WRITES: |
1808 | error = iopolicysys_vfs_allow_lowspace_writes(p, cmd: uap->cmd, scope: iop_param.iop_scope, policy: iop_param.iop_policy, iop_param: &iop_param); |
1809 | if (error) { |
1810 | goto out; |
1811 | } |
1812 | break; |
1813 | case IOPOL_TYPE_VFS_DISALLOW_RW_FOR_O_EVTONLY: |
1814 | error = iopolicysys_vfs_disallow_rw_for_o_evtonly(p, cmd: uap->cmd, scope: iop_param.iop_scope, policy: iop_param.iop_policy, iop_param: &iop_param); |
1815 | if (error) { |
1816 | goto out; |
1817 | } |
1818 | break; |
1819 | case IOPOL_TYPE_VFS_ALTLINK: |
1820 | error = iopolicysys_vfs_altlink(p, cmd: uap->cmd, scope: iop_param.iop_scope, policy: iop_param.iop_policy, iop_param: &iop_param); |
1821 | if (error) { |
1822 | goto out; |
1823 | } |
1824 | break; |
1825 | case IOPOL_TYPE_VFS_NOCACHE_WRITE_FS_BLKSIZE: |
1826 | error = iopolicysys_vfs_nocache_write_fs_blksize(p, cmd: uap->cmd, scope: iop_param.iop_scope, policy: iop_param.iop_policy, iop_param: &iop_param); |
1827 | if (error) { |
1828 | goto out; |
1829 | } |
1830 | break; |
1831 | |
1832 | default: |
1833 | error = EINVAL; |
1834 | goto out; |
1835 | } |
1836 | |
1837 | /* Individual iotype handlers are expected to update iop_param, if requested with a GET command */ |
1838 | if (uap->cmd == IOPOL_CMD_GET) { |
1839 | error = copyout((caddr_t)&iop_param, uap->arg, sizeof(iop_param)); |
1840 | if (error) { |
1841 | goto out; |
1842 | } |
1843 | } |
1844 | |
1845 | out: |
1846 | return error; |
1847 | } |
1848 | |
1849 | static int |
1850 | iopolicysys_disk(struct proc *p __unused, int cmd, int scope, int policy, struct _iopol_param_t *iop_param) |
1851 | { |
1852 | int error = 0; |
1853 | thread_t thread; |
1854 | int policy_flavor; |
1855 | |
1856 | /* Validate scope */ |
1857 | switch (scope) { |
1858 | case IOPOL_SCOPE_PROCESS: |
1859 | thread = THREAD_NULL; |
1860 | policy_flavor = TASK_POLICY_IOPOL; |
1861 | break; |
1862 | |
1863 | case IOPOL_SCOPE_THREAD: |
1864 | thread = current_thread(); |
1865 | policy_flavor = TASK_POLICY_IOPOL; |
1866 | |
1867 | /* Not allowed to combine QoS and (non-PASSIVE) IO policy, doing so strips the QoS */ |
1868 | if (cmd == IOPOL_CMD_SET && thread_has_qos_policy(thread)) { |
1869 | switch (policy) { |
1870 | case IOPOL_DEFAULT: |
1871 | case IOPOL_PASSIVE: |
1872 | break; |
1873 | case IOPOL_UTILITY: |
1874 | case IOPOL_THROTTLE: |
1875 | case IOPOL_IMPORTANT: |
1876 | case IOPOL_STANDARD: |
1877 | if (!thread_is_static_param(thread)) { |
1878 | thread_remove_qos_policy(thread); |
1879 | /* |
1880 | * This is not an error case, this is to return a marker to user-space that |
1881 | * we stripped the thread of its QoS class. |
1882 | */ |
1883 | error = EIDRM; |
1884 | break; |
1885 | } |
1886 | OS_FALLTHROUGH; |
1887 | default: |
1888 | error = EINVAL; |
1889 | goto out; |
1890 | } |
1891 | } |
1892 | break; |
1893 | |
1894 | case IOPOL_SCOPE_DARWIN_BG: |
1895 | #if !defined(XNU_TARGET_OS_OSX) |
1896 | /* We don't want this on platforms outside of macOS as BG is always IOPOL_THROTTLE */ |
1897 | error = ENOTSUP; |
1898 | goto out; |
1899 | #else /* !defined(XNU_TARGET_OS_OSX) */ |
1900 | thread = THREAD_NULL; |
1901 | policy_flavor = TASK_POLICY_DARWIN_BG_IOPOL; |
1902 | break; |
1903 | #endif /* !defined(XNU_TARGET_OS_OSX) */ |
1904 | |
1905 | default: |
1906 | error = EINVAL; |
1907 | goto out; |
1908 | } |
1909 | |
1910 | /* Validate policy */ |
1911 | if (cmd == IOPOL_CMD_SET) { |
1912 | switch (policy) { |
1913 | case IOPOL_DEFAULT: |
1914 | if (scope == IOPOL_SCOPE_DARWIN_BG) { |
1915 | /* the current default BG throttle level is UTILITY */ |
1916 | policy = IOPOL_UTILITY; |
1917 | } else { |
1918 | policy = IOPOL_IMPORTANT; |
1919 | } |
1920 | break; |
1921 | case IOPOL_UTILITY: |
1922 | /* fall-through */ |
1923 | case IOPOL_THROTTLE: |
1924 | /* These levels are OK */ |
1925 | break; |
1926 | case IOPOL_IMPORTANT: |
1927 | /* fall-through */ |
1928 | case IOPOL_STANDARD: |
1929 | /* fall-through */ |
1930 | case IOPOL_PASSIVE: |
1931 | if (scope == IOPOL_SCOPE_DARWIN_BG) { |
1932 | /* These levels are invalid for BG */ |
1933 | error = EINVAL; |
1934 | goto out; |
1935 | } else { |
1936 | /* OK for other scopes */ |
1937 | } |
1938 | break; |
1939 | default: |
1940 | error = EINVAL; |
1941 | goto out; |
1942 | } |
1943 | } |
1944 | |
1945 | /* Perform command */ |
1946 | switch (cmd) { |
1947 | case IOPOL_CMD_SET: |
1948 | if (thread != THREAD_NULL) { |
1949 | proc_set_thread_policy(thread, TASK_POLICY_INTERNAL, flavor: policy_flavor, value: policy); |
1950 | } else { |
1951 | proc_set_task_policy(task: current_task(), TASK_POLICY_INTERNAL, flavor: policy_flavor, value: policy); |
1952 | } |
1953 | break; |
1954 | case IOPOL_CMD_GET: |
1955 | if (thread != THREAD_NULL) { |
1956 | policy = proc_get_thread_policy(thread, TASK_POLICY_INTERNAL, flavor: policy_flavor); |
1957 | } else { |
1958 | policy = proc_get_task_policy(task: current_task(), TASK_POLICY_INTERNAL, flavor: policy_flavor); |
1959 | } |
1960 | iop_param->iop_policy = policy; |
1961 | break; |
1962 | default: |
1963 | error = EINVAL; /* unknown command */ |
1964 | break; |
1965 | } |
1966 | |
1967 | out: |
1968 | return error; |
1969 | } |
1970 | |
1971 | static int |
1972 | iopolicysys_vfs_hfs_case_sensitivity(struct proc *p, int cmd, int scope, int policy, struct _iopol_param_t *iop_param) |
1973 | { |
1974 | int error = 0; |
1975 | |
1976 | /* Validate scope */ |
1977 | switch (scope) { |
1978 | case IOPOL_SCOPE_PROCESS: |
1979 | /* Only process OK */ |
1980 | break; |
1981 | default: |
1982 | error = EINVAL; |
1983 | goto out; |
1984 | } |
1985 | |
1986 | /* Validate policy */ |
1987 | if (cmd == IOPOL_CMD_SET) { |
1988 | switch (policy) { |
1989 | case IOPOL_VFS_HFS_CASE_SENSITIVITY_DEFAULT: |
1990 | /* fall-through */ |
1991 | case IOPOL_VFS_HFS_CASE_SENSITIVITY_FORCE_CASE_SENSITIVE: |
1992 | /* These policies are OK */ |
1993 | break; |
1994 | default: |
1995 | error = EINVAL; |
1996 | goto out; |
1997 | } |
1998 | } |
1999 | |
2000 | /* Perform command */ |
2001 | switch (cmd) { |
2002 | case IOPOL_CMD_SET: |
2003 | if (0 == kauth_cred_issuser(cred: kauth_cred_get())) { |
2004 | /* If it's a non-root process, it needs to have the entitlement to set the policy */ |
2005 | boolean_t entitled = FALSE; |
2006 | entitled = IOCurrentTaskHasEntitlement(entitlement: "com.apple.private.iopol.case_sensitivity" ); |
2007 | if (!entitled) { |
2008 | error = EPERM; |
2009 | goto out; |
2010 | } |
2011 | } |
2012 | |
2013 | switch (policy) { |
2014 | case IOPOL_VFS_HFS_CASE_SENSITIVITY_DEFAULT: |
2015 | OSBitAndAtomic16(mask: ~((uint32_t)P_VFS_IOPOLICY_FORCE_HFS_CASE_SENSITIVITY), address: &p->p_vfs_iopolicy); |
2016 | break; |
2017 | case IOPOL_VFS_HFS_CASE_SENSITIVITY_FORCE_CASE_SENSITIVE: |
2018 | OSBitOrAtomic16(mask: (uint32_t)P_VFS_IOPOLICY_FORCE_HFS_CASE_SENSITIVITY, address: &p->p_vfs_iopolicy); |
2019 | break; |
2020 | default: |
2021 | error = EINVAL; |
2022 | goto out; |
2023 | } |
2024 | |
2025 | break; |
2026 | case IOPOL_CMD_GET: |
2027 | iop_param->iop_policy = (p->p_vfs_iopolicy & P_VFS_IOPOLICY_FORCE_HFS_CASE_SENSITIVITY) |
2028 | ? IOPOL_VFS_HFS_CASE_SENSITIVITY_FORCE_CASE_SENSITIVE |
2029 | : IOPOL_VFS_HFS_CASE_SENSITIVITY_DEFAULT; |
2030 | break; |
2031 | default: |
2032 | error = EINVAL; /* unknown command */ |
2033 | break; |
2034 | } |
2035 | |
2036 | out: |
2037 | return error; |
2038 | } |
2039 | |
2040 | static inline int |
2041 | get_thread_atime_policy(struct uthread *ut) |
2042 | { |
2043 | return (ut->uu_flag & UT_ATIME_UPDATE) ? IOPOL_ATIME_UPDATES_OFF : IOPOL_ATIME_UPDATES_DEFAULT; |
2044 | } |
2045 | |
2046 | static inline void |
2047 | set_thread_atime_policy(struct uthread *ut, int policy) |
2048 | { |
2049 | if (policy == IOPOL_ATIME_UPDATES_OFF) { |
2050 | ut->uu_flag |= UT_ATIME_UPDATE; |
2051 | } else { |
2052 | ut->uu_flag &= ~UT_ATIME_UPDATE; |
2053 | } |
2054 | } |
2055 | |
2056 | static inline void |
2057 | set_task_atime_policy(struct proc *p, int policy) |
2058 | { |
2059 | if (policy == IOPOL_ATIME_UPDATES_OFF) { |
2060 | OSBitOrAtomic16(mask: (uint16_t)P_VFS_IOPOLICY_ATIME_UPDATES, address: &p->p_vfs_iopolicy); |
2061 | } else { |
2062 | OSBitAndAtomic16(mask: ~((uint16_t)P_VFS_IOPOLICY_ATIME_UPDATES), address: &p->p_vfs_iopolicy); |
2063 | } |
2064 | } |
2065 | |
2066 | static inline int |
2067 | get_task_atime_policy(struct proc *p) |
2068 | { |
2069 | return (p->p_vfs_iopolicy & P_VFS_IOPOLICY_ATIME_UPDATES) ? IOPOL_ATIME_UPDATES_OFF : IOPOL_ATIME_UPDATES_DEFAULT; |
2070 | } |
2071 | |
2072 | static int |
2073 | iopolicysys_vfs_atime_updates(struct proc *p __unused, int cmd, int scope, int policy, struct _iopol_param_t *iop_param) |
2074 | { |
2075 | int error = 0; |
2076 | thread_t thread; |
2077 | |
2078 | /* Validate scope */ |
2079 | switch (scope) { |
2080 | case IOPOL_SCOPE_THREAD: |
2081 | thread = current_thread(); |
2082 | break; |
2083 | case IOPOL_SCOPE_PROCESS: |
2084 | thread = THREAD_NULL; |
2085 | break; |
2086 | default: |
2087 | error = EINVAL; |
2088 | goto out; |
2089 | } |
2090 | |
2091 | /* Validate policy */ |
2092 | if (cmd == IOPOL_CMD_SET) { |
2093 | switch (policy) { |
2094 | case IOPOL_ATIME_UPDATES_DEFAULT: |
2095 | case IOPOL_ATIME_UPDATES_OFF: |
2096 | break; |
2097 | default: |
2098 | error = EINVAL; |
2099 | goto out; |
2100 | } |
2101 | } |
2102 | |
2103 | /* Perform command */ |
2104 | switch (cmd) { |
2105 | case IOPOL_CMD_SET: |
2106 | if (thread != THREAD_NULL) { |
2107 | set_thread_atime_policy(ut: get_bsdthread_info(thread), policy); |
2108 | } else { |
2109 | set_task_atime_policy(p, policy); |
2110 | } |
2111 | break; |
2112 | case IOPOL_CMD_GET: |
2113 | if (thread != THREAD_NULL) { |
2114 | policy = get_thread_atime_policy(ut: get_bsdthread_info(thread)); |
2115 | } else { |
2116 | policy = get_task_atime_policy(p); |
2117 | } |
2118 | iop_param->iop_policy = policy; |
2119 | break; |
2120 | default: |
2121 | error = EINVAL; /* unknown command */ |
2122 | break; |
2123 | } |
2124 | |
2125 | out: |
2126 | return error; |
2127 | } |
2128 | |
2129 | static inline int |
2130 | get_thread_materialize_policy(struct uthread *ut) |
2131 | { |
2132 | if (ut->uu_flag & UT_NSPACE_NODATALESSFAULTS) { |
2133 | return IOPOL_MATERIALIZE_DATALESS_FILES_OFF; |
2134 | } else if (ut->uu_flag & UT_NSPACE_FORCEDATALESSFAULTS) { |
2135 | return IOPOL_MATERIALIZE_DATALESS_FILES_ON; |
2136 | } |
2137 | /* Default thread behavior is "inherit process behavior". */ |
2138 | return IOPOL_MATERIALIZE_DATALESS_FILES_DEFAULT; |
2139 | } |
2140 | |
2141 | static inline void |
2142 | set_thread_materialize_policy(struct uthread *ut, int policy) |
2143 | { |
2144 | if (policy == IOPOL_MATERIALIZE_DATALESS_FILES_OFF) { |
2145 | ut->uu_flag &= ~UT_NSPACE_FORCEDATALESSFAULTS; |
2146 | ut->uu_flag |= UT_NSPACE_NODATALESSFAULTS; |
2147 | } else if (policy == IOPOL_MATERIALIZE_DATALESS_FILES_ON) { |
2148 | ut->uu_flag &= ~UT_NSPACE_NODATALESSFAULTS; |
2149 | ut->uu_flag |= UT_NSPACE_FORCEDATALESSFAULTS; |
2150 | } else { |
2151 | ut->uu_flag &= ~(UT_NSPACE_NODATALESSFAULTS | UT_NSPACE_FORCEDATALESSFAULTS); |
2152 | } |
2153 | } |
2154 | |
2155 | static inline void |
2156 | set_proc_materialize_policy(struct proc *p, int policy) |
2157 | { |
2158 | if (policy == IOPOL_MATERIALIZE_DATALESS_FILES_DEFAULT) { |
2159 | /* |
2160 | * Caller has specified "use the default policy". |
2161 | * The default policy is to NOT materialize dataless |
2162 | * files. |
2163 | */ |
2164 | policy = IOPOL_MATERIALIZE_DATALESS_FILES_OFF; |
2165 | } |
2166 | if (policy == IOPOL_MATERIALIZE_DATALESS_FILES_ON) { |
2167 | OSBitOrAtomic16(mask: (uint16_t)P_VFS_IOPOLICY_MATERIALIZE_DATALESS_FILES, address: &p->p_vfs_iopolicy); |
2168 | } else { |
2169 | OSBitAndAtomic16(mask: ~((uint16_t)P_VFS_IOPOLICY_MATERIALIZE_DATALESS_FILES), address: &p->p_vfs_iopolicy); |
2170 | } |
2171 | } |
2172 | |
2173 | static int |
2174 | get_proc_materialize_policy(struct proc *p) |
2175 | { |
2176 | return (p->p_vfs_iopolicy & P_VFS_IOPOLICY_MATERIALIZE_DATALESS_FILES) ? IOPOL_MATERIALIZE_DATALESS_FILES_ON : IOPOL_MATERIALIZE_DATALESS_FILES_OFF; |
2177 | } |
2178 | |
2179 | int |
2180 | iopolicysys_vfs_materialize_dataless_files(struct proc *p __unused, int cmd, int scope, int policy, struct _iopol_param_t *iop_param) |
2181 | { |
2182 | int error = 0; |
2183 | thread_t thread; |
2184 | |
2185 | /* Validate scope */ |
2186 | switch (scope) { |
2187 | case IOPOL_SCOPE_THREAD: |
2188 | thread = current_thread(); |
2189 | break; |
2190 | case IOPOL_SCOPE_PROCESS: |
2191 | thread = THREAD_NULL; |
2192 | break; |
2193 | default: |
2194 | error = EINVAL; |
2195 | goto out; |
2196 | } |
2197 | |
2198 | /* Validate policy */ |
2199 | if (cmd == IOPOL_CMD_SET) { |
2200 | switch (policy) { |
2201 | case IOPOL_MATERIALIZE_DATALESS_FILES_DEFAULT: |
2202 | case IOPOL_MATERIALIZE_DATALESS_FILES_OFF: |
2203 | case IOPOL_MATERIALIZE_DATALESS_FILES_ON: |
2204 | break; |
2205 | default: |
2206 | error = EINVAL; |
2207 | goto out; |
2208 | } |
2209 | } |
2210 | |
2211 | /* Perform command */ |
2212 | switch (cmd) { |
2213 | case IOPOL_CMD_SET: |
2214 | if (thread != THREAD_NULL) { |
2215 | set_thread_materialize_policy(ut: get_bsdthread_info(thread), policy); |
2216 | } else { |
2217 | set_proc_materialize_policy(p, policy); |
2218 | } |
2219 | break; |
2220 | case IOPOL_CMD_GET: |
2221 | if (thread != THREAD_NULL) { |
2222 | policy = get_thread_materialize_policy(ut: get_bsdthread_info(thread)); |
2223 | } else { |
2224 | policy = get_proc_materialize_policy(p); |
2225 | } |
2226 | iop_param->iop_policy = policy; |
2227 | break; |
2228 | default: |
2229 | error = EINVAL; /* unknown command */ |
2230 | break; |
2231 | } |
2232 | |
2233 | out: |
2234 | return error; |
2235 | } |
2236 | |
2237 | static int |
2238 | iopolicysys_vfs_statfs_no_data_volume(struct proc *p __unused, int cmd, |
2239 | int scope, int policy, struct _iopol_param_t *iop_param) |
2240 | { |
2241 | int error = 0; |
2242 | |
2243 | /* Validate scope */ |
2244 | switch (scope) { |
2245 | case IOPOL_SCOPE_PROCESS: |
2246 | /* Only process OK */ |
2247 | break; |
2248 | default: |
2249 | error = EINVAL; |
2250 | goto out; |
2251 | } |
2252 | |
2253 | /* Validate policy */ |
2254 | if (cmd == IOPOL_CMD_SET) { |
2255 | switch (policy) { |
2256 | case IOPOL_VFS_STATFS_NO_DATA_VOLUME_DEFAULT: |
2257 | /* fall-through */ |
2258 | case IOPOL_VFS_STATFS_FORCE_NO_DATA_VOLUME: |
2259 | /* These policies are OK */ |
2260 | break; |
2261 | default: |
2262 | error = EINVAL; |
2263 | goto out; |
2264 | } |
2265 | } |
2266 | |
2267 | /* Perform command */ |
2268 | switch (cmd) { |
2269 | case IOPOL_CMD_SET: |
2270 | if (0 == kauth_cred_issuser(cred: kauth_cred_get())) { |
2271 | /* If it's a non-root process, it needs to have the entitlement to set the policy */ |
2272 | boolean_t entitled = FALSE; |
2273 | entitled = IOCurrentTaskHasEntitlement(entitlement: "com.apple.private.iopol.case_sensitivity" ); |
2274 | if (!entitled) { |
2275 | error = EPERM; |
2276 | goto out; |
2277 | } |
2278 | } |
2279 | |
2280 | switch (policy) { |
2281 | case IOPOL_VFS_STATFS_NO_DATA_VOLUME_DEFAULT: |
2282 | OSBitAndAtomic16(mask: ~((uint32_t)P_VFS_IOPOLICY_STATFS_NO_DATA_VOLUME), address: &p->p_vfs_iopolicy); |
2283 | break; |
2284 | case IOPOL_VFS_STATFS_FORCE_NO_DATA_VOLUME: |
2285 | OSBitOrAtomic16(mask: (uint32_t)P_VFS_IOPOLICY_STATFS_NO_DATA_VOLUME, address: &p->p_vfs_iopolicy); |
2286 | break; |
2287 | default: |
2288 | error = EINVAL; |
2289 | goto out; |
2290 | } |
2291 | |
2292 | break; |
2293 | case IOPOL_CMD_GET: |
2294 | iop_param->iop_policy = (p->p_vfs_iopolicy & P_VFS_IOPOLICY_STATFS_NO_DATA_VOLUME) |
2295 | ? IOPOL_VFS_STATFS_FORCE_NO_DATA_VOLUME |
2296 | : IOPOL_VFS_STATFS_NO_DATA_VOLUME_DEFAULT; |
2297 | break; |
2298 | default: |
2299 | error = EINVAL; /* unknown command */ |
2300 | break; |
2301 | } |
2302 | |
2303 | out: |
2304 | return error; |
2305 | } |
2306 | |
2307 | static int |
2308 | iopolicysys_vfs_trigger_resolve(struct proc *p __unused, int cmd, |
2309 | int scope, int policy, struct _iopol_param_t *iop_param) |
2310 | { |
2311 | int error = 0; |
2312 | |
2313 | /* Validate scope */ |
2314 | switch (scope) { |
2315 | case IOPOL_SCOPE_PROCESS: |
2316 | /* Only process OK */ |
2317 | break; |
2318 | default: |
2319 | error = EINVAL; |
2320 | goto out; |
2321 | } |
2322 | |
2323 | /* Validate policy */ |
2324 | if (cmd == IOPOL_CMD_SET) { |
2325 | switch (policy) { |
2326 | case IOPOL_VFS_TRIGGER_RESOLVE_DEFAULT: |
2327 | /* fall-through */ |
2328 | case IOPOL_VFS_TRIGGER_RESOLVE_OFF: |
2329 | /* These policies are OK */ |
2330 | break; |
2331 | default: |
2332 | error = EINVAL; |
2333 | goto out; |
2334 | } |
2335 | } |
2336 | |
2337 | /* Perform command */ |
2338 | switch (cmd) { |
2339 | case IOPOL_CMD_SET: |
2340 | switch (policy) { |
2341 | case IOPOL_VFS_TRIGGER_RESOLVE_DEFAULT: |
2342 | OSBitAndAtomic16(mask: ~((uint32_t)P_VFS_IOPOLICY_TRIGGER_RESOLVE_DISABLE), address: &p->p_vfs_iopolicy); |
2343 | break; |
2344 | case IOPOL_VFS_TRIGGER_RESOLVE_OFF: |
2345 | OSBitOrAtomic16(mask: (uint32_t)P_VFS_IOPOLICY_TRIGGER_RESOLVE_DISABLE, address: &p->p_vfs_iopolicy); |
2346 | break; |
2347 | default: |
2348 | error = EINVAL; |
2349 | goto out; |
2350 | } |
2351 | |
2352 | break; |
2353 | case IOPOL_CMD_GET: |
2354 | iop_param->iop_policy = (p->p_vfs_iopolicy & P_VFS_IOPOLICY_TRIGGER_RESOLVE_DISABLE) |
2355 | ? IOPOL_VFS_TRIGGER_RESOLVE_OFF |
2356 | : IOPOL_VFS_TRIGGER_RESOLVE_DEFAULT; |
2357 | break; |
2358 | default: |
2359 | error = EINVAL; /* unknown command */ |
2360 | break; |
2361 | } |
2362 | |
2363 | out: |
2364 | return error; |
2365 | } |
2366 | |
2367 | static int |
2368 | iopolicysys_vfs_ignore_content_protection(struct proc *p, int cmd, int scope, |
2369 | int policy, struct _iopol_param_t *iop_param) |
2370 | { |
2371 | int error = 0; |
2372 | |
2373 | /* Validate scope */ |
2374 | switch (scope) { |
2375 | case IOPOL_SCOPE_PROCESS: |
2376 | /* Only process OK */ |
2377 | break; |
2378 | default: |
2379 | error = EINVAL; |
2380 | goto out; |
2381 | } |
2382 | |
2383 | /* Validate policy */ |
2384 | if (cmd == IOPOL_CMD_SET) { |
2385 | switch (policy) { |
2386 | case IOPOL_VFS_CONTENT_PROTECTION_DEFAULT: |
2387 | OS_FALLTHROUGH; |
2388 | case IOPOL_VFS_CONTENT_PROTECTION_IGNORE: |
2389 | /* These policies are OK */ |
2390 | break; |
2391 | default: |
2392 | error = EINVAL; |
2393 | goto out; |
2394 | } |
2395 | } |
2396 | |
2397 | /* Perform command */ |
2398 | switch (cmd) { |
2399 | case IOPOL_CMD_SET: |
2400 | if (0 == kauth_cred_issuser(cred: kauth_cred_get())) { |
2401 | /* If it's a non-root process, it needs to have the entitlement to set the policy */ |
2402 | boolean_t entitled = FALSE; |
2403 | entitled = IOCurrentTaskHasEntitlement(entitlement: "com.apple.private.iopol.case_sensitivity" ); |
2404 | if (!entitled) { |
2405 | error = EPERM; |
2406 | goto out; |
2407 | } |
2408 | } |
2409 | |
2410 | switch (policy) { |
2411 | case IOPOL_VFS_CONTENT_PROTECTION_DEFAULT: |
2412 | os_atomic_andnot(&p->p_vfs_iopolicy, P_VFS_IOPOLICY_IGNORE_CONTENT_PROTECTION, relaxed); |
2413 | break; |
2414 | case IOPOL_VFS_CONTENT_PROTECTION_IGNORE: |
2415 | os_atomic_or(&p->p_vfs_iopolicy, P_VFS_IOPOLICY_IGNORE_CONTENT_PROTECTION, relaxed); |
2416 | break; |
2417 | default: |
2418 | error = EINVAL; |
2419 | goto out; |
2420 | } |
2421 | |
2422 | break; |
2423 | case IOPOL_CMD_GET: |
2424 | iop_param->iop_policy = (os_atomic_load(&p->p_vfs_iopolicy, relaxed) & P_VFS_IOPOLICY_IGNORE_CONTENT_PROTECTION) |
2425 | ? IOPOL_VFS_CONTENT_PROTECTION_IGNORE |
2426 | : IOPOL_VFS_CONTENT_PROTECTION_DEFAULT; |
2427 | break; |
2428 | default: |
2429 | error = EINVAL; /* unknown command */ |
2430 | break; |
2431 | } |
2432 | |
2433 | out: |
2434 | return error; |
2435 | } |
2436 | |
2437 | #define AUTHORIZED_ACCESS_ENTITLEMENT \ |
2438 | "com.apple.private.vfs.authorized-access" |
2439 | int |
2440 | iopolicysys_vfs_ignore_node_permissions(struct proc *p, int cmd, int scope, |
2441 | int policy, __unused struct _iopol_param_t *iop_param) |
2442 | { |
2443 | int error = EINVAL; |
2444 | |
2445 | switch (scope) { |
2446 | case IOPOL_SCOPE_PROCESS: |
2447 | break; |
2448 | default: |
2449 | goto out; |
2450 | } |
2451 | |
2452 | switch (cmd) { |
2453 | case IOPOL_CMD_GET: |
2454 | policy = os_atomic_load(&p->p_vfs_iopolicy, relaxed) & P_VFS_IOPOLICY_IGNORE_NODE_PERMISSIONS ? |
2455 | IOPOL_VFS_IGNORE_PERMISSIONS_ON : IOPOL_VFS_IGNORE_PERMISSIONS_OFF; |
2456 | iop_param->iop_policy = policy; |
2457 | goto out_ok; |
2458 | case IOPOL_CMD_SET: |
2459 | /* SET is handled after the switch */ |
2460 | break; |
2461 | default: |
2462 | goto out; |
2463 | } |
2464 | |
2465 | if (!IOCurrentTaskHasEntitlement(AUTHORIZED_ACCESS_ENTITLEMENT)) { |
2466 | error = EPERM; |
2467 | goto out; |
2468 | } |
2469 | |
2470 | switch (policy) { |
2471 | case IOPOL_VFS_IGNORE_PERMISSIONS_OFF: |
2472 | os_atomic_andnot(&p->p_vfs_iopolicy, P_VFS_IOPOLICY_IGNORE_NODE_PERMISSIONS, relaxed); |
2473 | break; |
2474 | case IOPOL_VFS_IGNORE_PERMISSIONS_ON: |
2475 | os_atomic_or(&p->p_vfs_iopolicy, P_VFS_IOPOLICY_IGNORE_NODE_PERMISSIONS, relaxed); |
2476 | break; |
2477 | default: |
2478 | break; |
2479 | } |
2480 | |
2481 | out_ok: |
2482 | error = 0; |
2483 | out: |
2484 | return error; |
2485 | } |
2486 | |
2487 | #define SKIP_MTIME_UPDATE_ENTITLEMENT \ |
2488 | "com.apple.private.vfs.skip-mtime-updates" |
2489 | int |
2490 | iopolicysys_vfs_skip_mtime_update(struct proc *p, int cmd, int scope, |
2491 | int policy, __unused struct _iopol_param_t *iop_param) |
2492 | { |
2493 | int error = EINVAL; |
2494 | |
2495 | switch (scope) { |
2496 | case IOPOL_SCOPE_PROCESS: |
2497 | break; |
2498 | default: |
2499 | goto out; |
2500 | } |
2501 | |
2502 | switch (cmd) { |
2503 | case IOPOL_CMD_GET: |
2504 | policy = os_atomic_load(&p->p_vfs_iopolicy, relaxed) & P_VFS_IOPOLICY_SKIP_MTIME_UPDATE ? |
2505 | IOPOL_VFS_SKIP_MTIME_UPDATE_ON : IOPOL_VFS_SKIP_MTIME_UPDATE_OFF; |
2506 | iop_param->iop_policy = policy; |
2507 | goto out_ok; |
2508 | case IOPOL_CMD_SET: |
2509 | break; |
2510 | default: |
2511 | break; |
2512 | } |
2513 | |
2514 | if (!IOCurrentTaskHasEntitlement(SKIP_MTIME_UPDATE_ENTITLEMENT)) { |
2515 | error = EPERM; |
2516 | goto out; |
2517 | } |
2518 | |
2519 | switch (policy) { |
2520 | case IOPOL_VFS_SKIP_MTIME_UPDATE_OFF: |
2521 | os_atomic_andnot(&p->p_vfs_iopolicy, P_VFS_IOPOLICY_SKIP_MTIME_UPDATE, relaxed); |
2522 | break; |
2523 | case IOPOL_VFS_SKIP_MTIME_UPDATE_ON: |
2524 | os_atomic_or(&p->p_vfs_iopolicy, P_VFS_IOPOLICY_SKIP_MTIME_UPDATE, relaxed); |
2525 | break; |
2526 | default: |
2527 | break; |
2528 | } |
2529 | |
2530 | out_ok: |
2531 | error = 0; |
2532 | out: |
2533 | return error; |
2534 | } |
2535 | |
2536 | #define ALLOW_LOW_SPACE_WRITES_ENTITLEMENT \ |
2537 | "com.apple.private.vfs.allow-low-space-writes" |
2538 | static int |
2539 | iopolicysys_vfs_allow_lowspace_writes(struct proc *p, int cmd, int scope, |
2540 | int policy, __unused struct _iopol_param_t *iop_param) |
2541 | { |
2542 | int error = EINVAL; |
2543 | |
2544 | switch (scope) { |
2545 | case IOPOL_SCOPE_PROCESS: |
2546 | break; |
2547 | default: |
2548 | goto out; |
2549 | } |
2550 | |
2551 | switch (cmd) { |
2552 | case IOPOL_CMD_GET: |
2553 | policy = os_atomic_load(&p->p_vfs_iopolicy, relaxed) & P_VFS_IOPOLICY_ALLOW_LOW_SPACE_WRITES ? |
2554 | IOPOL_VFS_ALLOW_LOW_SPACE_WRITES_ON : IOPOL_VFS_ALLOW_LOW_SPACE_WRITES_OFF; |
2555 | iop_param->iop_policy = policy; |
2556 | goto out_ok; |
2557 | case IOPOL_CMD_SET: |
2558 | break; |
2559 | default: |
2560 | break; |
2561 | } |
2562 | |
2563 | if (!IOCurrentTaskHasEntitlement(ALLOW_LOW_SPACE_WRITES_ENTITLEMENT)) { |
2564 | error = EPERM; |
2565 | goto out; |
2566 | } |
2567 | |
2568 | switch (policy) { |
2569 | case IOPOL_VFS_ALLOW_LOW_SPACE_WRITES_OFF: |
2570 | os_atomic_andnot(&p->p_vfs_iopolicy, P_VFS_IOPOLICY_ALLOW_LOW_SPACE_WRITES, relaxed); |
2571 | break; |
2572 | case IOPOL_VFS_ALLOW_LOW_SPACE_WRITES_ON: |
2573 | os_atomic_or(&p->p_vfs_iopolicy, P_VFS_IOPOLICY_ALLOW_LOW_SPACE_WRITES, relaxed); |
2574 | break; |
2575 | default: |
2576 | break; |
2577 | } |
2578 | |
2579 | out_ok: |
2580 | error = 0; |
2581 | out: |
2582 | return error; |
2583 | } |
2584 | |
2585 | static int |
2586 | iopolicysys_vfs_disallow_rw_for_o_evtonly(struct proc *p, int cmd, int scope, |
2587 | int policy, __unused struct _iopol_param_t *iop_param) |
2588 | { |
2589 | int error = EINVAL; |
2590 | |
2591 | switch (scope) { |
2592 | case IOPOL_SCOPE_PROCESS: |
2593 | break; |
2594 | default: |
2595 | goto out; |
2596 | } |
2597 | |
2598 | switch (cmd) { |
2599 | case IOPOL_CMD_GET: |
2600 | policy = (os_atomic_load(&p->p_vfs_iopolicy, relaxed) & |
2601 | P_VFS_IOPOLICY_DISALLOW_RW_FOR_O_EVTONLY) ? |
2602 | IOPOL_VFS_DISALLOW_RW_FOR_O_EVTONLY_ON : |
2603 | IOPOL_VFS_DISALLOW_RW_FOR_O_EVTONLY_DEFAULT; |
2604 | iop_param->iop_policy = policy; |
2605 | goto out_ok; |
2606 | case IOPOL_CMD_SET: |
2607 | break; |
2608 | default: |
2609 | goto out; |
2610 | } |
2611 | |
2612 | /* Once set, we don't allow the process to clear it. */ |
2613 | switch (policy) { |
2614 | case IOPOL_VFS_DISALLOW_RW_FOR_O_EVTONLY_ON: |
2615 | os_atomic_or(&p->p_vfs_iopolicy, |
2616 | P_VFS_IOPOLICY_DISALLOW_RW_FOR_O_EVTONLY, relaxed); |
2617 | break; |
2618 | default: |
2619 | goto out; |
2620 | } |
2621 | |
2622 | out_ok: |
2623 | error = 0; |
2624 | out: |
2625 | return error; |
2626 | } |
2627 | |
2628 | static int |
2629 | iopolicysys_vfs_altlink(struct proc *p, int cmd, int scope, int policy, |
2630 | struct _iopol_param_t *iop_param) |
2631 | { |
2632 | if (scope != IOPOL_SCOPE_PROCESS) { |
2633 | return EINVAL; |
2634 | } |
2635 | |
2636 | if (cmd == IOPOL_CMD_GET) { |
2637 | policy = (os_atomic_load(&p->p_vfs_iopolicy, relaxed) & P_VFS_IOPOLICY_ALTLINK) ? |
2638 | IOPOL_VFS_ALTLINK_ENABLED : IOPOL_VFS_ALTLINK_DISABLED; |
2639 | iop_param->iop_policy = policy; |
2640 | return 0; |
2641 | } |
2642 | |
2643 | /* Once set, we don't allow the process to clear it. */ |
2644 | if (policy == IOPOL_VFS_ALTLINK_ENABLED) { |
2645 | os_atomic_or(&p->p_vfs_iopolicy, P_VFS_IOPOLICY_ALTLINK, relaxed); |
2646 | return 0; |
2647 | } |
2648 | |
2649 | return EINVAL; |
2650 | } |
2651 | |
2652 | static int |
2653 | iopolicysys_vfs_nocache_write_fs_blksize(struct proc *p, int cmd, int scope, int policy, |
2654 | struct _iopol_param_t *iop_param) |
2655 | { |
2656 | thread_t thread; |
2657 | |
2658 | switch (scope) { |
2659 | case IOPOL_SCOPE_THREAD: |
2660 | thread = current_thread(); |
2661 | break; |
2662 | case IOPOL_SCOPE_PROCESS: |
2663 | thread = THREAD_NULL; |
2664 | break; |
2665 | default: |
2666 | return EINVAL; |
2667 | } |
2668 | |
2669 | if (cmd == IOPOL_CMD_GET) { |
2670 | if (thread != THREAD_NULL) { |
2671 | struct uthread *ut = get_bsdthread_info(thread); |
2672 | policy = ut->uu_flag & UT_FS_BLKSIZE_NOCACHE_WRITES ? |
2673 | IOPOL_VFS_NOCACHE_WRITE_FS_BLKSIZE_ON : IOPOL_VFS_NOCACHE_WRITE_FS_BLKSIZE_DEFAULT; |
2674 | } else { |
2675 | policy = (os_atomic_load(&p->p_vfs_iopolicy, relaxed) & P_VFS_IOPOLICY_NOCACHE_WRITE_FS_BLKSIZE) ? |
2676 | IOPOL_VFS_NOCACHE_WRITE_FS_BLKSIZE_ON : IOPOL_VFS_NOCACHE_WRITE_FS_BLKSIZE_DEFAULT; |
2677 | } |
2678 | iop_param->iop_policy = policy; |
2679 | return 0; |
2680 | } |
2681 | |
2682 | /* Once set, we don't allow the process or thread to clear it. */ |
2683 | if ((cmd == IOPOL_CMD_SET) && (policy == IOPOL_VFS_NOCACHE_WRITE_FS_BLKSIZE_ON)) { |
2684 | #if 0 |
2685 | if (thread != THREAD_NULL) { |
2686 | struct uthread *ut = get_bsdthread_info(thread); |
2687 | ut->uu_flag |= UT_FS_BLKSIZE_NOCACHE_WRITES; |
2688 | } else { |
2689 | os_atomic_or(&p->p_vfs_iopolicy, P_VFS_IOPOLICY_NOCACHE_WRITE_FS_BLKSIZE, relaxed); |
2690 | } |
2691 | #endif |
2692 | return 0; |
2693 | } |
2694 | |
2695 | return EINVAL; |
2696 | } |
2697 | |
2698 | void |
2699 | proc_apply_task_networkbg(int pid, thread_t thread) |
2700 | { |
2701 | proc_t p = proc_find(pid); |
2702 | |
2703 | if (p != PROC_NULL) { |
2704 | do_background_socket(p, thread); |
2705 | proc_rele(p); |
2706 | } |
2707 | } |
2708 | |
2709 | void |
2710 | gather_rusage_info(proc_t p, rusage_info_current *ru, int flavor) |
2711 | { |
2712 | struct rusage_info_child *ri_child; |
2713 | |
2714 | assert(p->p_stats != NULL); |
2715 | memset(s: ru, c: 0, n: sizeof(*ru)); |
2716 | switch (flavor) { |
2717 | case RUSAGE_INFO_V6: |
2718 | /* Any P-specific resource counters are captured in fill_task_rusage. */ |
2719 | OS_FALLTHROUGH; |
2720 | |
2721 | case RUSAGE_INFO_V5: |
2722 | #if __has_feature(ptrauth_calls) |
2723 | if (vm_shared_region_is_reslide(proc_task(p))) { |
2724 | ru->ri_flags |= RU_PROC_RUNS_RESLIDE; |
2725 | } |
2726 | #endif /* __has_feature(ptrauth_calls) */ |
2727 | OS_FALLTHROUGH; |
2728 | |
2729 | case RUSAGE_INFO_V4: |
2730 | ru->ri_logical_writes = get_task_logical_writes(task: proc_task(p), false); |
2731 | ru->ri_lifetime_max_phys_footprint = get_task_phys_footprint_lifetime_max(proc_task(p)); |
2732 | #if CONFIG_LEDGER_INTERVAL_MAX |
2733 | ru->ri_interval_max_phys_footprint = get_task_phys_footprint_interval_max(proc_task(p), FALSE); |
2734 | #endif |
2735 | OS_FALLTHROUGH; |
2736 | |
2737 | case RUSAGE_INFO_V3: |
2738 | fill_task_qos_rusage(task: proc_task(p), ri: ru); |
2739 | fill_task_billed_usage(task: proc_task(p), ri: ru); |
2740 | OS_FALLTHROUGH; |
2741 | |
2742 | case RUSAGE_INFO_V2: |
2743 | fill_task_io_rusage(task: proc_task(p), ri: ru); |
2744 | OS_FALLTHROUGH; |
2745 | |
2746 | case RUSAGE_INFO_V1: |
2747 | /* |
2748 | * p->p_stats->ri_child statistics are protected under proc lock. |
2749 | */ |
2750 | proc_lock(p); |
2751 | |
2752 | ri_child = &(p->p_stats->ri_child); |
2753 | ru->ri_child_user_time = ri_child->ri_child_user_time; |
2754 | ru->ri_child_system_time = ri_child->ri_child_system_time; |
2755 | ru->ri_child_pkg_idle_wkups = ri_child->ri_child_pkg_idle_wkups; |
2756 | ru->ri_child_interrupt_wkups = ri_child->ri_child_interrupt_wkups; |
2757 | ru->ri_child_pageins = ri_child->ri_child_pageins; |
2758 | ru->ri_child_elapsed_abstime = ri_child->ri_child_elapsed_abstime; |
2759 | |
2760 | proc_unlock(p); |
2761 | OS_FALLTHROUGH; |
2762 | |
2763 | case RUSAGE_INFO_V0: |
2764 | proc_getexecutableuuid(p, (unsigned char *)&ru->ri_uuid, sizeof(ru->ri_uuid)); |
2765 | fill_task_rusage(task: proc_task(p), ri: ru); |
2766 | ru->ri_proc_start_abstime = p->p_stats->ps_start; |
2767 | } |
2768 | } |
2769 | |
2770 | int |
2771 | proc_get_rusage(proc_t p, int flavor, user_addr_t buffer, __unused int is_zombie) |
2772 | { |
2773 | rusage_info_current ri_current = {}; |
2774 | |
2775 | size_t size = 0; |
2776 | |
2777 | switch (flavor) { |
2778 | case RUSAGE_INFO_V0: |
2779 | size = sizeof(struct rusage_info_v0); |
2780 | break; |
2781 | |
2782 | case RUSAGE_INFO_V1: |
2783 | size = sizeof(struct rusage_info_v1); |
2784 | break; |
2785 | |
2786 | case RUSAGE_INFO_V2: |
2787 | size = sizeof(struct rusage_info_v2); |
2788 | break; |
2789 | |
2790 | case RUSAGE_INFO_V3: |
2791 | size = sizeof(struct rusage_info_v3); |
2792 | break; |
2793 | |
2794 | case RUSAGE_INFO_V4: |
2795 | size = sizeof(struct rusage_info_v4); |
2796 | break; |
2797 | |
2798 | case RUSAGE_INFO_V5: |
2799 | size = sizeof(struct rusage_info_v5); |
2800 | break; |
2801 | |
2802 | case RUSAGE_INFO_V6: |
2803 | size = sizeof(struct rusage_info_v6); |
2804 | break; |
2805 | default: |
2806 | return EINVAL; |
2807 | } |
2808 | |
2809 | if (size == 0) { |
2810 | return EINVAL; |
2811 | } |
2812 | |
2813 | /* |
2814 | * If task is still alive, collect info from the live task itself. |
2815 | * Otherwise, look to the cached info in the zombie proc. |
2816 | */ |
2817 | if (p->p_ru) { |
2818 | return copyout(&p->p_ru->ri, buffer, size); |
2819 | } else { |
2820 | gather_rusage_info(p, ru: &ri_current, flavor); |
2821 | ri_current.ri_proc_exit_abstime = 0; |
2822 | return copyout(&ri_current, buffer, size); |
2823 | } |
2824 | } |
2825 | |
2826 | static int |
2827 | mach_to_bsd_rv(int mach_rv) |
2828 | { |
2829 | int bsd_rv = 0; |
2830 | |
2831 | switch (mach_rv) { |
2832 | case KERN_SUCCESS: |
2833 | bsd_rv = 0; |
2834 | break; |
2835 | case KERN_INVALID_ARGUMENT: |
2836 | bsd_rv = EINVAL; |
2837 | break; |
2838 | default: |
2839 | panic("unknown error %#x" , mach_rv); |
2840 | } |
2841 | |
2842 | return bsd_rv; |
2843 | } |
2844 | |
2845 | /* |
2846 | * Resource limit controls |
2847 | * |
2848 | * uap->flavor available flavors: |
2849 | * |
2850 | * RLIMIT_WAKEUPS_MONITOR |
2851 | * RLIMIT_CPU_USAGE_MONITOR |
2852 | * RLIMIT_THREAD_CPULIMITS |
2853 | * RLIMIT_FOOTPRINT_INTERVAL |
2854 | */ |
2855 | int |
2856 | proc_rlimit_control(__unused struct proc *p, struct proc_rlimit_control_args *uap, __unused int32_t *retval) |
2857 | { |
2858 | proc_t targetp; |
2859 | int error = 0; |
2860 | struct proc_rlimit_control_wakeupmon wakeupmon_args; |
2861 | uint32_t cpumon_flags; |
2862 | uint32_t cpulimits_flags; |
2863 | kauth_cred_t my_cred, target_cred; |
2864 | #if CONFIG_LEDGER_INTERVAL_MAX |
2865 | uint32_t ; |
2866 | uint64_t ; |
2867 | #endif /* CONFIG_LEDGER_INTERVAL_MAX */ |
2868 | |
2869 | /* -1 implicitly means our own process (perhaps even the current thread for per-thread attributes) */ |
2870 | if (uap->pid == -1) { |
2871 | targetp = proc_self(); |
2872 | } else { |
2873 | targetp = proc_find(pid: uap->pid); |
2874 | } |
2875 | |
2876 | /* proc_self() can return NULL for an exiting process */ |
2877 | if (targetp == PROC_NULL) { |
2878 | return ESRCH; |
2879 | } |
2880 | |
2881 | my_cred = kauth_cred_get(); |
2882 | target_cred = kauth_cred_proc_ref(procp: targetp); |
2883 | |
2884 | if (!kauth_cred_issuser(cred: my_cred) && kauth_cred_getruid(cred: my_cred) && |
2885 | kauth_cred_getuid(cred: my_cred) != kauth_cred_getuid(cred: target_cred) && |
2886 | kauth_cred_getruid(cred: my_cred) != kauth_cred_getuid(cred: target_cred)) { |
2887 | proc_rele(p: targetp); |
2888 | kauth_cred_unref(&target_cred); |
2889 | return EACCES; |
2890 | } |
2891 | |
2892 | switch (uap->flavor) { |
2893 | case RLIMIT_WAKEUPS_MONITOR: |
2894 | if ((error = copyin(uap->arg, &wakeupmon_args, sizeof(wakeupmon_args))) != 0) { |
2895 | break; |
2896 | } |
2897 | if ((error = mach_to_bsd_rv(mach_rv: task_wakeups_monitor_ctl(task: proc_task(targetp), rate_hz: &wakeupmon_args.wm_flags, |
2898 | flags: &wakeupmon_args.wm_rate))) != 0) { |
2899 | break; |
2900 | } |
2901 | error = copyout(&wakeupmon_args, uap->arg, sizeof(wakeupmon_args)); |
2902 | break; |
2903 | case RLIMIT_CPU_USAGE_MONITOR: |
2904 | cpumon_flags = (uint32_t)uap->arg; // XXX temporarily stashing flags in argp (12592127) |
2905 | error = mach_to_bsd_rv(mach_rv: task_cpu_usage_monitor_ctl(task: proc_task(targetp), flags: &cpumon_flags)); |
2906 | break; |
2907 | case RLIMIT_THREAD_CPULIMITS: |
2908 | cpulimits_flags = (uint32_t)uap->arg; // only need a limited set of bits, pass in void * argument |
2909 | |
2910 | if (uap->pid != -1) { |
2911 | error = EINVAL; |
2912 | break; |
2913 | } |
2914 | |
2915 | uint8_t percent = 0; |
2916 | uint32_t ms_refill = 0; |
2917 | uint64_t ns_refill; |
2918 | |
2919 | percent = (uint8_t)(cpulimits_flags & 0xffU); /* low 8 bits for percent */ |
2920 | ms_refill = (cpulimits_flags >> 8) & 0xffffff; /* next 24 bits represent ms refill value */ |
2921 | if (percent >= 100 || percent == 0) { |
2922 | error = EINVAL; |
2923 | break; |
2924 | } |
2925 | |
2926 | ns_refill = ((uint64_t)ms_refill) * NSEC_PER_MSEC; |
2927 | |
2928 | error = mach_to_bsd_rv(mach_rv: thread_set_cpulimit(THREAD_CPULIMIT_BLOCK, percentage: percent, interval_ns: ns_refill)); |
2929 | break; |
2930 | |
2931 | #if CONFIG_LEDGER_INTERVAL_MAX |
2932 | case RLIMIT_FOOTPRINT_INTERVAL: |
2933 | footprint_interval_flags = (uint32_t)uap->arg; // XXX temporarily stashing flags in argp (12592127) |
2934 | /* |
2935 | * There is currently only one option for this flavor. |
2936 | */ |
2937 | if ((footprint_interval_flags & FOOTPRINT_INTERVAL_RESET) == 0) { |
2938 | error = EINVAL; |
2939 | break; |
2940 | } |
2941 | interval_max_footprint = get_task_phys_footprint_interval_max(proc_task(targetp), TRUE); |
2942 | break; |
2943 | #endif /* CONFIG_LEDGER_INTERVAL_MAX */ |
2944 | default: |
2945 | error = EINVAL; |
2946 | break; |
2947 | } |
2948 | |
2949 | proc_rele(p: targetp); |
2950 | kauth_cred_unref(&target_cred); |
2951 | |
2952 | /* |
2953 | * Return value from this function becomes errno to userland caller. |
2954 | */ |
2955 | return error; |
2956 | } |
2957 | |
2958 | /* |
2959 | * Return the current amount of CPU consumed by this thread (in either user or kernel mode) |
2960 | */ |
2961 | int |
2962 | thread_selfusage(struct proc *p __unused, struct thread_selfusage_args *uap __unused, uint64_t *retval) |
2963 | { |
2964 | uint64_t runtime; |
2965 | |
2966 | runtime = thread_get_runtime_self(); |
2967 | *retval = runtime; |
2968 | |
2969 | return 0; |
2970 | } |
2971 | |