1/*
2 * Copyright (c) 2018 Apple Inc. All rights reserved.
3 *
4 * @APPLE_OSREFERENCE_LICENSE_HEADER_START@
5 *
6 * This file contains Original Code and/or Modifications of Original Code
7 * as defined in and that are subject to the Apple Public Source License
8 * Version 2.0 (the 'License'). You may not use this file except in
9 * compliance with the License. The rights granted to you under the License
10 * may not be used to create, or enable the creation or redistribution of,
11 * unlawful or unlicensed copies of an Apple operating system, or to
12 * circumvent, violate, or enable the circumvention or violation of, any
13 * terms of an Apple operating system software license agreement.
14 *
15 * Please obtain a copy of the License at
16 * http://www.opensource.apple.com/apsl/ and read it before using this file.
17 *
18 * The Original Code and all software distributed under the License are
19 * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER
20 * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES,
21 * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY,
22 * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT.
23 * Please see the License for the specific language governing rights and
24 * limitations under the License.
25 *
26 * @APPLE_OSREFERENCE_LICENSE_HEADER_END@
27 */
28
29#include <sys/param.h>
30#include <sys/systm.h>
31#include <sys/filedesc.h>
32#include <sys/kernel.h>
33#include <sys/file_internal.h>
34#include <sys/guarded.h>
35#include <sys/sysproto.h>
36#include <sys/vnode.h>
37#include <sys/vnode_internal.h>
38#include <sys/uio_internal.h>
39#include <sys/ubc_internal.h>
40#include <vfs/vfs_support.h>
41#include <security/audit/audit.h>
42#include <sys/syscall.h>
43#include <sys/kauth.h>
44#include <sys/kdebug.h>
45#include <stdbool.h>
46#include <vm/vm_protos.h>
47#include <libkern/section_keywords.h>
48
49#include <kern/kalloc.h>
50#include <kern/task.h>
51#include <kern/exc_guard.h>
52
53#if CONFIG_MACF && CONFIG_VNGUARD
54#include <security/mac.h>
55#include <security/mac_framework.h>
56#include <security/mac_policy.h>
57#include <pexpert/pexpert.h>
58#include <sys/sysctl.h>
59#include <sys/reason.h>
60#endif
61
62#define f_flag fp_glob->fg_flag
63extern int writev_uio(struct proc *p, int fd, user_addr_t user_iovp,
64 int iovcnt, off_t offset, int flags, guardid_t *puguard,
65 user_ssize_t *retval);
66extern int write_internal(struct proc *p, int fd, user_addr_t buf,
67 user_size_t nbyte, off_t offset, int flags, guardid_t *puguard,
68 user_ssize_t *retval);
69extern int exit_with_guard_exception(void *p, mach_exception_data_type_t code,
70 mach_exception_data_type_t subcode);
71/*
72 * Experimental guarded file descriptor support.
73 */
74
75kern_return_t task_exception_notify(exception_type_t exception,
76 mach_exception_data_type_t code, mach_exception_data_type_t subcode, const bool fatal);
77
78#define GUARD_REQUIRED (GUARD_DUP)
79#define GUARD_ALL (GUARD_REQUIRED | \
80 (GUARD_CLOSE | GUARD_SOCKET_IPC | GUARD_FILEPORT | GUARD_WRITE))
81
82static KALLOC_TYPE_DEFINE(fp_guard_zone, struct fileproc_guard, KT_DEFAULT);
83
84struct gfp_crarg {
85 guardid_t gca_guard;
86 uint16_t gca_attrs;
87};
88
89static struct fileproc_guard *
90guarded_fileproc_alloc(guardid_t guard)
91{
92 struct fileproc_guard *fpg;
93
94 fpg = zalloc_flags(fp_guard_zone, Z_WAITOK | Z_ZERO | Z_NOFAIL);
95 fpg->fpg_guard = guard;
96 return fpg;
97}
98
99static void
100guarded_fileproc_init(struct fileproc *fp, void *initarg)
101{
102 struct gfp_crarg *arg = initarg;
103
104 assert(arg->gca_attrs);
105 fp->fp_guard = guarded_fileproc_alloc(guard: arg->gca_guard);
106 fp->fp_guard_attrs = arg->gca_attrs;
107}
108
109/*
110 * This is called from fdt_fork(),
111 * where it needs to copy a guarded
112 * fd to the new shadow proc.
113 */
114void
115guarded_fileproc_copy_guard(struct fileproc *ofp, struct fileproc *nfp)
116{
117 struct gfp_crarg arg = {
118 .gca_guard = ofp->fp_guard->fpg_guard,
119 .gca_attrs = ofp->fp_guard_attrs
120 };
121 guarded_fileproc_init(fp: nfp, initarg: &arg);
122}
123
124/*
125 * This is called from fileproc_free(),
126 * which is why it is safe to call
127 * without holding the proc_fdlock.
128 */
129void
130guarded_fileproc_unguard(struct fileproc *fp)
131{
132 struct fileproc_guard *fpg = fp->fp_guard;
133
134 fp->fp_guard_attrs = 0;
135 fp->fp_wset = fpg->fpg_wset;
136
137 zfree(fp_guard_zone, fpg);
138}
139
140static int
141fp_lookup_guarded_locked(proc_t p, int fd, guardid_t guard,
142 struct fileproc **fpp)
143{
144 int error;
145 struct fileproc *fp;
146
147 if ((error = fp_lookup(p, fd, resultfp: &fp, locked: 1)) != 0) {
148 return error;
149 }
150
151 if (fp->fp_guard_attrs == 0) {
152 (void) fp_drop(p, fd, fp, locked: 1);
153 return EINVAL;
154 }
155
156 if (guard != fp->fp_guard->fpg_guard) {
157 (void) fp_drop(p, fd, fp, locked: 1);
158 return EPERM; /* *not* a mismatch exception */
159 }
160
161 *fpp = fp;
162 return 0;
163}
164
165int
166fp_lookup_guarded(proc_t p, int fd, guardid_t guard,
167 struct fileproc **fpp, int locked)
168{
169 int error;
170
171 if (!locked) {
172 proc_fdlock_spin(p);
173 }
174
175 error = fp_lookup_guarded_locked(p, fd, guard, fpp);
176
177 if (!locked) {
178 proc_fdunlock(p);
179 }
180
181 return error;
182}
183
184/*
185 * Expected use pattern:
186 *
187 * if (fp_isguarded(fp, GUARD_CLOSE)) {
188 * error = fp_guard_exception(p, fd, fp, kGUARD_EXC_CLOSE);
189 * proc_fdunlock(p);
190 * return error;
191 * }
192 */
193int
194fp_isguarded(struct fileproc *fp, u_int attrs)
195{
196 return fp->fp_guard_attrs && (fp->fp_guard_attrs & attrs) == attrs;
197}
198
199extern char *proc_name_address(void *p);
200
201int
202fp_guard_exception(proc_t p, int fd, struct fileproc *fp, u_int flavor)
203{
204 /* all fp guard fields protected via proc_fdlock() */
205 proc_fdlock_assert(p, LCK_MTX_ASSERT_OWNED);
206
207 mach_exception_code_t code = 0;
208 EXC_GUARD_ENCODE_TYPE(code, GUARD_TYPE_FD);
209 EXC_GUARD_ENCODE_FLAVOR(code, flavor);
210 EXC_GUARD_ENCODE_TARGET(code, fd);
211 mach_exception_subcode_t subcode = fp->fp_guard->fpg_guard;
212
213 assert(fp->fp_guard_attrs);
214
215 thread_t t = current_thread();
216 thread_guard_violation(t, code, subcode, TRUE);
217 return EPERM;
218}
219
220/*
221 * (Invoked before returning to userland from the syscall handler.)
222 */
223void
224fd_guard_ast(
225 thread_t __unused t,
226 mach_exception_code_t code,
227 mach_exception_subcode_t subcode)
228{
229 const bool fatal = true;
230 /*
231 * Check if anyone has registered for Synchronous EXC_GUARD, if yes then,
232 * deliver it synchronously and then kill the process, else kill the process
233 * and deliver the exception via EXC_CORPSE_NOTIFY. Always kill the process if we are not in dev mode.
234 */
235 if (task_exception_notify(EXC_GUARD, code, subcode, fatal) == KERN_SUCCESS) {
236 psignal(p: current_proc(), SIGKILL);
237 } else {
238 exit_with_guard_exception(p: current_proc(), code, subcode);
239 }
240}
241
242/*
243 * Experimental guarded file descriptor SPIs
244 */
245
246/*
247 * int guarded_open_np(const char *pathname, int flags,
248 * const guardid_t *guard, u_int guardflags, ...);
249 *
250 * In this initial implementation, GUARD_DUP must be specified.
251 * GUARD_CLOSE, GUARD_SOCKET_IPC and GUARD_FILEPORT are optional.
252 *
253 * If GUARD_DUP wasn't specified, then we'd have to do the (extra) work
254 * to allow dup-ing a descriptor to inherit the guard onto the new
255 * descriptor. (Perhaps GUARD_DUP behaviours should just always be true
256 * for a guarded fd? Or, more sanely, all the dup operations should
257 * just always propagate the guard?)
258 *
259 * Guarded descriptors are always close-on-exec, and GUARD_CLOSE
260 * requires close-on-fork; O_CLOEXEC must be set in flags.
261 * This setting is immutable; attempts to clear the flag will
262 * cause a guard exception.
263 *
264 * XXX It's somewhat broken that change_fdguard_np() can completely
265 * remove the guard and thus revoke down the immutability
266 * promises above. Ick.
267 */
268int
269guarded_open_np(proc_t p, struct guarded_open_np_args *uap, int32_t *retval)
270{
271 if ((uap->flags & O_CLOEXEC) == 0) {
272 return EINVAL;
273 }
274
275 if (((uap->guardflags & GUARD_REQUIRED) != GUARD_REQUIRED) ||
276 ((uap->guardflags & ~GUARD_ALL) != 0)) {
277 return EINVAL;
278 }
279
280 int error;
281 struct gfp_crarg crarg = {
282 .gca_attrs = (uint16_t)uap->guardflags
283 };
284
285 if ((error = copyin(uap->guard,
286 &(crarg.gca_guard), sizeof(crarg.gca_guard))) != 0) {
287 return error;
288 }
289
290 /*
291 * Disallow certain guard values -- is zero enough?
292 */
293 if (crarg.gca_guard == 0) {
294 return EINVAL;
295 }
296
297 struct vnode_attr va;
298 struct nameidata nd;
299 vfs_context_t ctx = vfs_context_current();
300 int cmode;
301
302 VATTR_INIT(&va);
303 cmode = ((uap->mode & ~p->p_fd.fd_cmask) & ALLPERMS) & ~S_ISTXT;
304 VATTR_SET(&va, va_mode, cmode & ACCESSPERMS);
305
306 NDINIT(&nd, LOOKUP, OP_OPEN, FOLLOW | AUDITVNPATH1, UIO_USERSPACE,
307 uap->path, ctx);
308
309 return open1(ctx, ndp: &nd, uflags: uap->flags | O_CLOFORK, vap: &va,
310 fp_init: guarded_fileproc_init, initarg: &crarg, retval, AUTH_OPEN_NOAUTHFD);
311}
312
313/*
314 * int guarded_open_dprotected_np(const char *pathname, int flags,
315 * const guardid_t *guard, u_int guardflags, int dpclass, int dpflags, ...);
316 *
317 * This SPI is extension of guarded_open_np() to include dataprotection class on creation
318 * in "dpclass" and dataprotection flags 'dpflags'. Otherwise behaviors are same as in
319 * guarded_open_np()
320 */
321int
322guarded_open_dprotected_np(proc_t p, struct guarded_open_dprotected_np_args *uap, int32_t *retval)
323{
324 if ((uap->flags & O_CLOEXEC) == 0) {
325 return EINVAL;
326 }
327
328 if (((uap->guardflags & GUARD_REQUIRED) != GUARD_REQUIRED) ||
329 ((uap->guardflags & ~GUARD_ALL) != 0)) {
330 return EINVAL;
331 }
332
333 int error;
334 struct gfp_crarg crarg = {
335 .gca_attrs = (uint16_t)uap->guardflags
336 };
337
338 if ((error = copyin(uap->guard,
339 &(crarg.gca_guard), sizeof(crarg.gca_guard))) != 0) {
340 return error;
341 }
342
343 /*
344 * Disallow certain guard values -- is zero enough?
345 */
346 if (crarg.gca_guard == 0) {
347 return EINVAL;
348 }
349
350 struct vnode_attr va;
351 struct nameidata nd;
352 vfs_context_t ctx = vfs_context_current();
353 int cmode;
354
355 VATTR_INIT(&va);
356 cmode = ((uap->mode & ~p->p_fd.fd_cmask) & ALLPERMS) & ~S_ISTXT;
357 VATTR_SET(&va, va_mode, cmode & ACCESSPERMS);
358
359 NDINIT(&nd, LOOKUP, OP_OPEN, FOLLOW | AUDITVNPATH1, UIO_USERSPACE,
360 uap->path, ctx);
361
362 /*
363 * Initialize the extra fields in vnode_attr to pass down dataprotection
364 * extra fields.
365 * 1. target cprotect class.
366 * 2. set a flag to mark it as requiring open-raw-encrypted semantics.
367 */
368 if (uap->flags & O_CREAT) {
369 VATTR_SET(&va, va_dataprotect_class, uap->dpclass);
370 }
371
372 if (uap->dpflags & (O_DP_GETRAWENCRYPTED | O_DP_GETRAWUNENCRYPTED)) {
373 if (uap->flags & (O_RDWR | O_WRONLY)) {
374 /* Not allowed to write raw encrypted bytes */
375 return EINVAL;
376 }
377 if (uap->dpflags & O_DP_GETRAWENCRYPTED) {
378 VATTR_SET(&va, va_dataprotect_flags, VA_DP_RAWENCRYPTED);
379 }
380 if (uap->dpflags & O_DP_GETRAWUNENCRYPTED) {
381 VATTR_SET(&va, va_dataprotect_flags, VA_DP_RAWUNENCRYPTED);
382 }
383 }
384
385 return open1(ctx, ndp: &nd, uflags: uap->flags | O_CLOFORK, vap: &va,
386 fp_init: guarded_fileproc_init, initarg: &crarg, retval, AUTH_OPEN_NOAUTHFD);
387}
388
389/*
390 * int guarded_kqueue_np(const guardid_t *guard, u_int guardflags);
391 *
392 * Create a guarded kqueue descriptor with guardid and guardflags.
393 *
394 * Same restrictions on guardflags as for guarded_open_np().
395 * All kqueues are -always- close-on-exec and close-on-fork by themselves
396 * and are not sendable.
397 */
398int
399guarded_kqueue_np(proc_t p, struct guarded_kqueue_np_args *uap, int32_t *retval)
400{
401 if (((uap->guardflags & GUARD_REQUIRED) != GUARD_REQUIRED) ||
402 ((uap->guardflags & ~GUARD_ALL) != 0)) {
403 return EINVAL;
404 }
405
406 int error;
407 struct gfp_crarg crarg = {
408 .gca_attrs = (uint16_t)uap->guardflags
409 };
410
411 if ((error = copyin(uap->guard,
412 &(crarg.gca_guard), sizeof(crarg.gca_guard))) != 0) {
413 return error;
414 }
415
416 if (crarg.gca_guard == 0) {
417 return EINVAL;
418 }
419
420 return kqueue_internal(p, guarded_fileproc_init, initarg: &crarg, retval);
421}
422
423/*
424 * int guarded_close_np(int fd, const guardid_t *guard);
425 */
426int
427guarded_close_np(proc_t p, struct guarded_close_np_args *uap,
428 __unused int32_t *retval)
429{
430 struct fileproc *fp;
431 kauth_cred_t p_cred;
432 int fd = uap->fd;
433 int error;
434 guardid_t uguard;
435
436 AUDIT_SYSCLOSE(p, fd);
437
438 if ((error = copyin(uap->guard, &uguard, sizeof(uguard))) != 0) {
439 return error;
440 }
441
442 proc_fdlock(p);
443 if ((error = fp_lookup_guarded(p, fd, guard: uguard, fpp: &fp, locked: 1)) != 0) {
444 proc_fdunlock(p);
445 return error;
446 }
447 fp_drop(p, fd, fp, locked: 1);
448
449 p_cred = current_cached_proc_cred(p);
450 return fp_close_and_unlock(p, p_cred, fd, fp, flags: 0);
451}
452
453/*
454 * int
455 * change_fdguard_np(int fd, const guardid_t *guard, u_int guardflags,
456 * const guardid_t *nguard, u_int nguardflags, int *fdflagsp);
457 *
458 * Given a file descriptor, atomically exchange <guard, guardflags> for
459 * a new guard <nguard, nguardflags>, returning the previous fd
460 * flags (see fcntl:F_SETFD) in *fdflagsp.
461 *
462 * This syscall can be used to either (a) add a new guard to an existing
463 * unguarded file descriptor (b) remove the old guard from an existing
464 * guarded file descriptor or (c) change the guard (guardid and/or
465 * guardflags) on a guarded file descriptor.
466 *
467 * If 'guard' is NULL, fd must be unguarded at entry. If the call completes
468 * successfully the fd will be guarded with <nguard, nguardflags>.
469 *
470 * Guarding a file descriptor has some side-effects on the "fp_flags"
471 * associated with the descriptor - in particular FD_CLOEXEC is
472 * forced ON unconditionally, and FD_CLOFORK is forced ON by GUARD_CLOSE.
473 * Callers who wish to subsequently restore the state of the fd should save
474 * the value of *fdflagsp after a successful invocation.
475 *
476 * If 'nguard' is NULL, fd must be guarded at entry, <guard, guardflags>
477 * must match with what's already guarding the descriptor, and the
478 * result will be to completely remove the guard.
479 *
480 * If the descriptor is guarded, and neither 'guard' nor 'nguard' is NULL
481 * and <guard, guardflags> matches what's already guarding the descriptor,
482 * then <nguard, nguardflags> becomes the new guard. In this case, even if
483 * the GUARD_CLOSE flag is being cleared, it is still possible to continue
484 * to keep FD_CLOFORK on the descriptor by passing FD_CLOFORK via fdflagsp.
485 *
486 * (File descriptors whose underlying fileglobs are marked FG_CONFINED are
487 * still close-on-fork, regardless of the setting of FD_CLOFORK.)
488 *
489 * Example 1: Guard an unguarded descriptor during a set of operations,
490 * then restore the original state of the descriptor.
491 *
492 * int sav_flags = 0;
493 * change_fdguard_np(fd, NULL, 0, &myguard, GUARD_CLOSE, &sav_flags);
494 * // do things with now guarded 'fd'
495 * change_fdguard_np(fd, &myguard, GUARD_CLOSE, NULL, 0, &sav_flags);
496 * // fd now unguarded.
497 *
498 * Example 2: Change the guard of a guarded descriptor during a set of
499 * operations, then restore the original state of the descriptor.
500 *
501 * int sav_flags = (gdflags & GUARD_CLOSE) ? FD_CLOFORK : 0;
502 * change_fdguard_np(fd, &gd, gdflags, &myguard, GUARD_CLOSE, &sav_flags);
503 * // do things with 'fd' with a different guard
504 * change_fdguard_np(fd, &myg, GUARD_CLOSE, &gd, gdflags, &sav_flags);
505 * // back to original guarded state
506 *
507 * XXX This SPI is too much of a chainsaw and should be revised.
508 */
509
510int
511change_fdguard_np(proc_t p, struct change_fdguard_np_args *uap,
512 __unused int32_t *retval)
513{
514 struct fileproc_guard *fpg = NULL;
515 struct fileproc *fp;
516 int fd = uap->fd;
517 int error;
518 guardid_t oldg = 0, newg = 0;
519 int nfdflags = 0;
520
521 if (0 != uap->guard &&
522 0 != (error = copyin(uap->guard, &oldg, sizeof(oldg)))) {
523 return error; /* can't copyin current guard */
524 }
525 if (0 != uap->nguard &&
526 0 != (error = copyin(uap->nguard, &newg, sizeof(newg)))) {
527 return error; /* can't copyin new guard */
528 }
529 if (0 != uap->fdflagsp &&
530 0 != (error = copyin(uap->fdflagsp, &nfdflags, sizeof(nfdflags)))) {
531 return error; /* can't copyin new fdflags */
532 }
533
534 if (oldg == 0 && newg) {
535 fpg = guarded_fileproc_alloc(guard: newg);
536 }
537
538 proc_fdlock(p);
539
540 if ((error = fp_lookup(p, fd, resultfp: &fp, locked: 1)) != 0) {
541 proc_fdunlock(p);
542 return error;
543 }
544
545 if (0 != uap->fdflagsp) {
546 int ofl = 0;
547 if (fp->fp_flags & FP_CLOEXEC) {
548 ofl |= FD_CLOEXEC;
549 }
550 if (fp->fp_flags & FP_CLOFORK) {
551 ofl |= FD_CLOFORK;
552 }
553 proc_fdunlock(p);
554 if (0 != (error = copyout(&ofl, uap->fdflagsp, sizeof(ofl)))) {
555 proc_fdlock(p);
556 goto dropout; /* can't copyout old fdflags */
557 }
558 proc_fdlock(p);
559 }
560
561 if (fp->fp_guard_attrs) {
562 if (0 == uap->guard || 0 == uap->guardflags) {
563 error = EINVAL; /* missing guard! */
564 } else if (0 == oldg) {
565 error = EPERM; /* guardids cannot be zero */
566 }
567 } else {
568 if (0 != uap->guard || 0 != uap->guardflags) {
569 error = EINVAL; /* guard provided, but none needed! */
570 }
571 }
572
573 if (0 != error) {
574 goto dropout;
575 }
576
577 if (0 != uap->nguard) {
578 /*
579 * There's a new guard in town.
580 */
581 if (0 == newg) {
582 error = EINVAL; /* guards cannot contain zero */
583 } else if (((uap->nguardflags & GUARD_REQUIRED) != GUARD_REQUIRED) ||
584 ((uap->nguardflags & ~GUARD_ALL) != 0)) {
585 error = EINVAL; /* must have valid attributes too */
586 }
587 if (0 != error) {
588 goto dropout;
589 }
590
591 if (fp->fp_guard_attrs) {
592 /*
593 * Replace old guard with new guard
594 */
595 if (oldg == fp->fp_guard->fpg_guard &&
596 uap->guardflags == fp->fp_guard_attrs) {
597 /*
598 * Must match existing guard + attributes
599 * before we'll swap them to new ones, managing
600 * fdflags "side-effects" as we go. Note that
601 * userland can request FD_CLOFORK semantics.
602 */
603 if (fp->fp_guard_attrs & GUARD_CLOSE) {
604 fp->fp_flags &= ~FP_CLOFORK;
605 }
606 fp->fp_guard->fpg_guard = newg;
607 fp->fp_guard_attrs = (uint16_t)uap->nguardflags;
608 if ((fp->fp_guard_attrs & GUARD_CLOSE) ||
609 (nfdflags & FD_CLOFORK)) {
610 fp->fp_flags |= FP_CLOFORK;
611 }
612 /* FG_CONFINED enforced regardless */
613 } else {
614 error = EPERM;
615 }
616 } else {
617 /*
618 * Add a guard to a previously unguarded descriptor
619 */
620 switch (FILEGLOB_DTYPE(fp->fp_glob)) {
621 case DTYPE_VNODE:
622 case DTYPE_PIPE:
623 case DTYPE_SOCKET:
624 case DTYPE_KQUEUE:
625 case DTYPE_NETPOLICY:
626 break;
627 default:
628 error = ENOTSUP;
629 goto dropout;
630 }
631
632 fp->fp_guard_attrs = (uint16_t)uap->nguardflags;
633 fpg->fpg_wset = fp->fp_wset;
634 fp->fp_guard = fpg;
635 fpg = NULL;
636 if (fp->fp_guard_attrs & GUARD_CLOSE) {
637 fp->fp_flags |= FP_CLOFORK;
638 }
639 fp->fp_flags |= FP_CLOEXEC;
640 }
641 } else {
642 if (fp->fp_guard_attrs) {
643 /*
644 * Remove the guard altogether.
645 */
646 if (0 != uap->nguardflags) {
647 error = EINVAL;
648 goto dropout;
649 }
650
651 if (oldg != fp->fp_guard->fpg_guard ||
652 uap->guardflags != fp->fp_guard_attrs) {
653 error = EPERM;
654 goto dropout;
655 }
656
657 assert(fpg == NULL);
658 fp->fp_guard_attrs = 0;
659 fpg = fp->fp_guard;
660 fp->fp_wset = fpg->fpg_wset;
661
662 fp->fp_flags &= ~(FP_CLOEXEC | FP_CLOFORK);
663 if (nfdflags & FD_CLOFORK) {
664 fp->fp_flags |= FP_CLOFORK;
665 }
666 if (nfdflags & FD_CLOEXEC) {
667 fp->fp_flags |= FP_CLOEXEC;
668 }
669 } else {
670 /*
671 * Not already guarded, and no new guard?
672 */
673 error = EINVAL;
674 }
675 }
676
677dropout:
678 (void) fp_drop(p, fd, fp, locked: 1);
679 proc_fdunlock(p);
680
681 if (fpg) {
682 zfree(fp_guard_zone, fpg);
683 }
684 return error;
685}
686
687/*
688 * user_ssize_t guarded_write_np(int fd, const guardid_t *guard,
689 * user_addr_t cbuf, user_ssize_t nbyte);
690 *
691 * Initial implementation of guarded writes.
692 */
693int
694guarded_write_np(struct proc *p, struct guarded_write_np_args *uap, user_ssize_t *retval)
695{
696 int error;
697 guardid_t uguard;
698
699 AUDIT_ARG(fd, uap->fd);
700
701 if ((error = copyin(uap->guard, &uguard, sizeof(uguard))) != 0) {
702 return error;
703 }
704
705 return write_internal(p, fd: uap->fd, buf: uap->cbuf, nbyte: uap->nbyte, offset: 0, flags: 0, puguard: &uguard, retval);
706}
707
708/*
709 * user_ssize_t guarded_pwrite_np(int fd, const guardid_t *guard,
710 * user_addr_t buf, user_size_t nbyte, off_t offset);
711 *
712 * Initial implementation of guarded pwrites.
713 */
714int
715guarded_pwrite_np(struct proc *p, struct guarded_pwrite_np_args *uap, user_ssize_t *retval)
716{
717 int error;
718 guardid_t uguard;
719
720 AUDIT_ARG(fd, uap->fd);
721
722 if ((error = copyin(uap->guard, &uguard, sizeof(uguard))) != 0) {
723 return error;
724 }
725
726 KERNEL_DEBUG_CONSTANT((BSDDBG_CODE(DBG_BSD_SC_EXTENDED_INFO, SYS_guarded_pwrite_np) | DBG_FUNC_NONE),
727 uap->fd, uap->nbyte, (unsigned int)((uap->offset >> 32)), (unsigned int)(uap->offset), 0);
728
729 return write_internal(p, fd: uap->fd, buf: uap->buf, nbyte: uap->nbyte, offset: uap->offset, FOF_OFFSET,
730 puguard: &uguard, retval);
731}
732
733/*
734 * user_ssize_t guarded_writev_np(int fd, const guardid_t *guard,
735 * struct iovec *iovp, u_int iovcnt);
736 *
737 * Initial implementation of guarded writev.
738 *
739 */
740int
741guarded_writev_np(struct proc *p, struct guarded_writev_np_args *uap, user_ssize_t *retval)
742{
743 int error;
744 guardid_t uguard;
745
746 AUDIT_ARG(fd, uap->fd);
747
748 if ((error = copyin(uap->guard, &uguard, sizeof(uguard))) != 0) {
749 return error;
750 }
751
752 return writev_uio(p, fd: uap->fd, user_iovp: uap->iovp, iovcnt: uap->iovcnt, offset: 0, flags: 0, puguard: &uguard, retval);
753}
754
755/*
756 * int falloc_guarded(struct proc *p, struct fileproc **fp, int *fd,
757 * vfs_context_t ctx, const guardid_t *guard, u_int attrs);
758 *
759 * This SPI is the guarded variant of falloc(). It borrows the same
760 * restrictions as those used by the rest of the guarded_* routines.
761 */
762int
763falloc_guarded(struct proc *p, struct fileproc **fp, int *fd,
764 vfs_context_t ctx, const guardid_t *guard, u_int attrs)
765{
766 kauth_cred_t p_cred = current_cached_proc_cred(p);
767 struct gfp_crarg crarg;
768
769 if (((attrs & GUARD_REQUIRED) != GUARD_REQUIRED) ||
770 ((attrs & ~GUARD_ALL) != 0) || (*guard == 0)) {
771 return EINVAL;
772 }
773
774 bzero(s: &crarg, n: sizeof(crarg));
775 crarg.gca_guard = *guard;
776 crarg.gca_attrs = (uint16_t)attrs;
777
778 return falloc_withinit(p, p_cred, ctx, resultfp: fp, resultfd: fd, fp_init: guarded_fileproc_init, initarg: &crarg);
779}
780
781#if CONFIG_MACF && CONFIG_VNGUARD
782
783/*
784 * Guarded vnodes
785 *
786 * Uses MAC hooks to guard operations on vnodes in the system. Given an fd,
787 * add data to the label on the fileglob and the vnode it points at.
788 * The data contains a pointer to the fileglob, the set of attributes to
789 * guard, a guard value for uniquification, and the pid of the process
790 * who set the guard up in the first place.
791 *
792 * The fd must have been opened read/write, and the underlying
793 * fileglob is FG_CONFINED so that there's no ambiguity about the
794 * owning process.
795 *
796 * When there's a callback for a vnode operation of interest (rename, unlink,
797 * etc.) check to see if the guard permits that operation, and if not
798 * take an action e.g. log a message or generate a crash report.
799 *
800 * The label is removed from the vnode and the fileglob when the fileglob
801 * is closed.
802 *
803 * The initial action to be taken can be specified by a boot arg (vnguard=0x42)
804 * and change via the "kern.vnguard.flags" sysctl.
805 */
806
807struct vng_owner;
808
809struct vng_info { /* lives on the vnode label */
810 guardid_t vgi_guard;
811 unsigned vgi_attrs;
812 TAILQ_HEAD(, vng_owner) vgi_owners;
813};
814
815struct vng_owner { /* lives on the fileglob label */
816 proc_t vgo_p;
817 struct vng_info *vgo_vgi;
818 TAILQ_ENTRY(vng_owner) vgo_link;
819};
820
821static struct vng_info *
822new_vgi(unsigned attrs, guardid_t guard)
823{
824 struct vng_info *vgi = kalloc_type(struct vng_info, Z_WAITOK);
825 vgi->vgi_guard = guard;
826 vgi->vgi_attrs = attrs;
827 TAILQ_INIT(&vgi->vgi_owners);
828 return vgi;
829}
830
831static struct vng_owner *
832new_vgo(proc_t p)
833{
834 struct vng_owner *vgo = kalloc_type(struct vng_owner, Z_WAITOK | Z_ZERO);
835 vgo->vgo_p = p;
836 return vgo;
837}
838
839static void
840vgi_add_vgo(struct vng_info *vgi, struct vng_owner *vgo)
841{
842 vgo->vgo_vgi = vgi;
843 TAILQ_INSERT_HEAD(&vgi->vgi_owners, vgo, vgo_link);
844}
845
846static boolean_t
847vgi_remove_vgo(struct vng_info *vgi, struct vng_owner *vgo)
848{
849 TAILQ_REMOVE(&vgi->vgi_owners, vgo, vgo_link);
850 vgo->vgo_vgi = NULL;
851 return TAILQ_EMPTY(&vgi->vgi_owners);
852}
853
854static void
855free_vgi(struct vng_info *vgi)
856{
857 assert(TAILQ_EMPTY(&vgi->vgi_owners));
858#if DEVELOP || DEBUG
859 memset(vgi, 0xbeadfade, sizeof(*vgi));
860#endif
861 kfree_type(struct vng_info, vgi);
862}
863
864static void
865free_vgo(struct vng_owner *vgo)
866{
867#if DEVELOP || DEBUG
868 memset(vgo, 0x2bedf1d0, sizeof(*vgo));
869#endif
870 kfree_type(struct vng_owner, vgo);
871}
872
873static int label_slot;
874static LCK_GRP_DECLARE(llock_grp, VNG_POLICY_NAME);
875static LCK_RW_DECLARE(llock, &llock_grp);
876
877static __inline void *
878vng_lbl_get(struct label *label)
879{
880 lck_rw_assert(&llock, LCK_RW_ASSERT_HELD);
881 void *data;
882 if (NULL == label) {
883 data = NULL;
884 } else {
885 data = (void *)mac_label_get(label, label_slot);
886 }
887 return data;
888}
889
890static __inline struct vng_info *
891vng_lbl_get_withattr(struct label *label, unsigned attrmask)
892{
893 struct vng_info *vgi = vng_lbl_get(label);
894 assert(NULL == vgi || (vgi->vgi_attrs & ~VNG_ALL) == 0);
895 if (NULL != vgi && 0 == (vgi->vgi_attrs & attrmask)) {
896 vgi = NULL;
897 }
898 return vgi;
899}
900
901static __inline void
902vng_lbl_set(struct label *label, void *data)
903{
904 assert(NULL != label);
905 lck_rw_assert(&llock, LCK_RW_ASSERT_EXCLUSIVE);
906 mac_label_set(label, label_slot, (intptr_t)data);
907}
908
909static int
910vnguard_sysc_getguardattr(proc_t p, struct vnguard_getattr *vga)
911{
912 const int fd = vga->vga_fd;
913
914 if (0 == vga->vga_guard) {
915 return EINVAL;
916 }
917
918 int error;
919 struct fileproc *fp;
920 if (0 != (error = fp_lookup(p, fd, &fp, 0))) {
921 return error;
922 }
923 do {
924 struct fileglob *fg = fp->fp_glob;
925 if (FILEGLOB_DTYPE(fg) != DTYPE_VNODE) {
926 error = EBADF;
927 break;
928 }
929 struct vnode *vp = fg_get_data(fg);
930 if (!vnode_isreg(vp) || NULL == vp->v_mount) {
931 error = EBADF;
932 break;
933 }
934 error = vnode_getwithref(vp);
935 if (0 != error) {
936 break;
937 }
938
939 vga->vga_attrs = 0;
940
941 lck_rw_lock_shared(&llock);
942
943 if (NULL != mac_vnode_label(vp)) {
944 const struct vng_info *vgi = vng_lbl_get(mac_vnode_label(vp));
945 if (NULL != vgi) {
946 if (vgi->vgi_guard != vga->vga_guard) {
947 error = EPERM;
948 } else {
949 vga->vga_attrs = vgi->vgi_attrs;
950 }
951 }
952 }
953
954 lck_rw_unlock_shared(&llock);
955 vnode_put(vp);
956 } while (0);
957
958 fp_drop(p, fd, fp, 0);
959 return error;
960}
961
962static int
963vnguard_sysc_setguard(proc_t p, const struct vnguard_set *vns)
964{
965 const int fd = vns->vns_fd;
966
967 if ((vns->vns_attrs & ~VNG_ALL) != 0 ||
968 0 == vns->vns_attrs || 0 == vns->vns_guard) {
969 return EINVAL;
970 }
971
972 int error;
973 struct fileproc *fp;
974 if (0 != (error = fp_lookup(p, fd, &fp, 0))) {
975 return error;
976 }
977 do {
978 /*
979 * To avoid trivial DoS, insist that the caller
980 * has read/write access to the file.
981 */
982 if ((FREAD | FWRITE) != (fp->f_flag & (FREAD | FWRITE))) {
983 error = EBADF;
984 break;
985 }
986 struct fileglob *fg = fp->fp_glob;
987 if (FILEGLOB_DTYPE(fg) != DTYPE_VNODE) {
988 error = EBADF;
989 break;
990 }
991 /*
992 * Confinement means there's only one fd pointing at
993 * this fileglob, and will always be associated with
994 * this pid.
995 */
996 if (0 == (FG_CONFINED & fg->fg_lflags)) {
997 error = EBADF;
998 break;
999 }
1000 struct vnode *vp = fg_get_data(fg);
1001 if (!vnode_isreg(vp) || NULL == vp->v_mount) {
1002 error = EBADF;
1003 break;
1004 }
1005 error = vnode_getwithref(vp);
1006 if (0 != error) {
1007 break;
1008 }
1009
1010 /* Ensure the target vnode -has- a label */
1011 struct vfs_context *ctx = vfs_context_current();
1012 mac_vnode_label_update(ctx, vp, NULL);
1013
1014 struct vng_info *nvgi = new_vgi(vns->vns_attrs, vns->vns_guard);
1015 struct vng_owner *nvgo = new_vgo(p);
1016
1017 lck_rw_lock_exclusive(&llock);
1018
1019 do {
1020 /*
1021 * A vnode guard is associated with one or more
1022 * fileglobs in one or more processes.
1023 */
1024 struct vng_info *vgi = vng_lbl_get(mac_vnode_label(vp));
1025 struct vng_owner *vgo = fg->fg_vgo;
1026
1027 if (NULL == vgi) {
1028 /* vnode unguarded, add the first guard */
1029 if (NULL != vgo) {
1030 panic("vnguard label on fileglob "
1031 "but not vnode");
1032 }
1033 /* add a kusecount so we can unlabel later */
1034 error = vnode_ref_ext(vp, O_EVTONLY, 0);
1035 if (0 == error) {
1036 /* add the guard */
1037 vgi_add_vgo(nvgi, nvgo);
1038 vng_lbl_set(mac_vnode_label(vp), nvgi);
1039 fg->fg_vgo = nvgo;
1040 } else {
1041 free_vgo(nvgo);
1042 free_vgi(nvgi);
1043 }
1044 } else {
1045 /* vnode already guarded */
1046 free_vgi(nvgi);
1047 if (vgi->vgi_guard != vns->vns_guard) {
1048 error = EPERM; /* guard mismatch */
1049 } else if (vgi->vgi_attrs != vns->vns_attrs) {
1050 /*
1051 * Temporary workaround for older versions of SQLite:
1052 * allow newer guard attributes to be silently cleared.
1053 */
1054 const unsigned mask = ~(VNG_WRITE_OTHER | VNG_TRUNC_OTHER);
1055 if ((vgi->vgi_attrs & mask) == (vns->vns_attrs & mask)) {
1056 vgi->vgi_attrs &= vns->vns_attrs;
1057 } else {
1058 error = EACCES; /* attr mismatch */
1059 }
1060 }
1061 if (0 != error || NULL != vgo) {
1062 free_vgo(nvgo);
1063 break;
1064 }
1065 /* record shared ownership */
1066 vgi_add_vgo(vgi, nvgo);
1067 fg->fg_vgo = nvgo;
1068 }
1069 } while (0);
1070
1071 lck_rw_unlock_exclusive(&llock);
1072 vnode_put(vp);
1073 } while (0);
1074
1075 fp_drop(p, fd, fp, 0);
1076 return error;
1077}
1078
1079static int
1080vng_policy_syscall(proc_t p, int cmd, user_addr_t arg)
1081{
1082 int error = EINVAL;
1083
1084 switch (cmd) {
1085 case VNG_SYSC_PING:
1086 if (0 == arg) {
1087 error = 0;
1088 }
1089 break;
1090 case VNG_SYSC_SET_GUARD: {
1091 struct vnguard_set vns;
1092 error = copyin(arg, (void *)&vns, sizeof(vns));
1093 if (error) {
1094 break;
1095 }
1096 error = vnguard_sysc_setguard(p, &vns);
1097 break;
1098 }
1099 case VNG_SYSC_GET_ATTR: {
1100 struct vnguard_getattr vga;
1101 error = copyin(arg, (void *)&vga, sizeof(vga));
1102 if (error) {
1103 break;
1104 }
1105 error = vnguard_sysc_getguardattr(p, &vga);
1106 if (error) {
1107 break;
1108 }
1109 error = copyout((void *)&vga, arg, sizeof(vga));
1110 break;
1111 }
1112 default:
1113 break;
1114 }
1115 return error;
1116}
1117
1118/*
1119 * This is called just before the fileglob disappears in fg_free().
1120 * Take the exclusive lock: no other thread can add or remove
1121 * a vng_info to any vnode in the system.
1122 */
1123void
1124vng_file_label_destroy(struct fileglob *fg)
1125{
1126 struct vng_owner *lvgo = fg->fg_vgo;
1127 struct vng_info *vgi = NULL;
1128
1129 if (lvgo) {
1130 lck_rw_lock_exclusive(&llock);
1131 fg->fg_vgo = NULL;
1132 vgi = lvgo->vgo_vgi;
1133 assert(vgi);
1134 if (vgi_remove_vgo(vgi, lvgo)) {
1135 /* that was the last reference */
1136 vgi->vgi_attrs = 0;
1137 if (DTYPE_VNODE == FILEGLOB_DTYPE(fg)) {
1138 struct vnode *vp = fg_get_data(fg);
1139 int error = vnode_getwithref(vp);
1140 if (0 == error) {
1141 vng_lbl_set(mac_vnode_label(vp), 0);
1142 lck_rw_unlock_exclusive(&llock);
1143 /* may trigger VNOP_INACTIVE */
1144 vnode_rele_ext(vp, O_EVTONLY, 0);
1145 vnode_put(vp);
1146 free_vgi(vgi);
1147 free_vgo(lvgo);
1148 return;
1149 }
1150 }
1151 }
1152 lck_rw_unlock_exclusive(&llock);
1153 free_vgo(lvgo);
1154 }
1155}
1156
1157static os_reason_t
1158vng_reason_from_pathname(const char *path, uint32_t pathlen)
1159{
1160 os_reason_t r = os_reason_create(OS_REASON_GUARD, GUARD_REASON_VNODE);
1161 if (NULL == r) {
1162 return r;
1163 }
1164 /*
1165 * If the pathname is very long, just keep the trailing part
1166 */
1167 const uint32_t pathmax = 3 * EXIT_REASON_USER_DESC_MAX_LEN / 4;
1168 if (pathlen > pathmax) {
1169 path += (pathlen - pathmax);
1170 pathlen = pathmax;
1171 }
1172 uint32_t rsize = kcdata_estimate_required_buffer_size(1, pathlen);
1173 if (0 == os_reason_alloc_buffer(r, rsize)) {
1174 struct kcdata_descriptor *kcd = &r->osr_kcd_descriptor;
1175 mach_vm_address_t addr;
1176 if (kcdata_get_memory_addr(kcd,
1177 EXIT_REASON_USER_DESC, pathlen, &addr) == KERN_SUCCESS) {
1178 kcdata_memcpy(kcd, addr, path, pathlen);
1179 return r;
1180 }
1181 }
1182 os_reason_free(r);
1183 return OS_REASON_NULL;
1184}
1185
1186static int vng_policy_flags;
1187
1188/*
1189 * Note: if an EXC_GUARD is generated, llock will be dropped and
1190 * subsequently reacquired by this routine. Data derived from
1191 * any label in the caller should be regenerated.
1192 */
1193static int
1194vng_guard_violation(const struct vng_info *vgi,
1195 unsigned opval, vnode_t vp)
1196{
1197 int retval = 0;
1198
1199 if (vng_policy_flags & kVNG_POLICY_EPERM) {
1200 /* deny the operation */
1201 retval = EPERM;
1202 }
1203
1204 if (vng_policy_flags & (kVNG_POLICY_LOGMSG | kVNG_POLICY_UPRINTMSG)) {
1205 /* log a message */
1206 const char *op;
1207 switch (opval) {
1208 case VNG_RENAME_FROM:
1209 op = "rename-from";
1210 break;
1211 case VNG_RENAME_TO:
1212 op = "rename-to";
1213 break;
1214 case VNG_UNLINK:
1215 op = "unlink";
1216 break;
1217 case VNG_LINK:
1218 op = "link";
1219 break;
1220 case VNG_EXCHDATA:
1221 op = "exchdata";
1222 break;
1223 case VNG_WRITE_OTHER:
1224 op = "write";
1225 break;
1226 case VNG_TRUNC_OTHER:
1227 op = "truncate";
1228 break;
1229 default:
1230 op = "(unknown)";
1231 break;
1232 }
1233
1234 const char *nm = vnode_getname(vp);
1235 proc_t p = current_proc();
1236 const struct vng_owner *vgo;
1237 TAILQ_FOREACH(vgo, &vgi->vgi_owners, vgo_link) {
1238 const char fmt[] =
1239 "%s[%d]: %s%s: '%s' guarded by %s[%d] (0x%llx)\n";
1240
1241 if (vng_policy_flags & kVNG_POLICY_LOGMSG) {
1242 printf(fmt,
1243 proc_name_address(p), proc_pid(p), op,
1244 0 != retval ? " denied" : "",
1245 NULL != nm ? nm : "(unknown)",
1246 proc_name_address(vgo->vgo_p),
1247 proc_pid(vgo->vgo_p), vgi->vgi_guard);
1248 }
1249 if (vng_policy_flags & kVNG_POLICY_UPRINTMSG) {
1250 uprintf(fmt,
1251 proc_name_address(p), proc_pid(p), op,
1252 0 != retval ? " denied" : "",
1253 NULL != nm ? nm : "(unknown)",
1254 proc_name_address(vgo->vgo_p),
1255 proc_pid(vgo->vgo_p), vgi->vgi_guard);
1256 }
1257 }
1258 if (NULL != nm) {
1259 vnode_putname(nm);
1260 }
1261 }
1262
1263 if (vng_policy_flags & (kVNG_POLICY_EXC | kVNG_POLICY_EXC_CORPSE)) {
1264 /* EXC_GUARD exception */
1265 const struct vng_owner *vgo = TAILQ_FIRST(&vgi->vgi_owners);
1266 pid_t pid = vgo ? proc_pid(vgo->vgo_p) : 0;
1267 mach_exception_code_t code;
1268 mach_exception_subcode_t subcode;
1269
1270 code = 0;
1271 EXC_GUARD_ENCODE_TYPE(code, GUARD_TYPE_VN);
1272 EXC_GUARD_ENCODE_FLAVOR(code, opval);
1273 EXC_GUARD_ENCODE_TARGET(code, pid);
1274 subcode = vgi->vgi_guard;
1275
1276 lck_rw_unlock_shared(&llock);
1277
1278 if (vng_policy_flags & kVNG_POLICY_EXC_CORPSE) {
1279 char *path;
1280 int len = MAXPATHLEN;
1281
1282 path = zalloc_flags(ZV_NAMEI, Z_WAITOK | Z_NOFAIL);
1283
1284 os_reason_t r = NULL;
1285 vn_getpath(vp, path, &len);
1286 if (*path && len) {
1287 r = vng_reason_from_pathname(path, len);
1288 }
1289 task_violated_guard(code, subcode, r, TRUE); /* not fatal */
1290 if (NULL != r) {
1291 os_reason_free(r);
1292 }
1293
1294 zfree(ZV_NAMEI, path);
1295 } else {
1296 thread_t t = current_thread();
1297 thread_guard_violation(t, code, subcode, TRUE);
1298 }
1299
1300 lck_rw_lock_shared(&llock);
1301 } else if (vng_policy_flags & kVNG_POLICY_SIGKILL) {
1302 proc_t p = current_proc();
1303 psignal(p, SIGKILL);
1304 }
1305
1306 return retval;
1307}
1308
1309/*
1310 * A fatal vnode guard was tripped on this thread.
1311 *
1312 * (Invoked before returning to userland from the syscall handler.)
1313 */
1314void
1315vn_guard_ast(thread_t __unused t,
1316 mach_exception_data_type_t code, mach_exception_data_type_t subcode)
1317{
1318 const bool fatal = true;
1319 /*
1320 * Check if anyone has registered for Synchronous EXC_GUARD, if yes then,
1321 * deliver it synchronously and then kill the process, else kill the process
1322 * and deliver the exception via EXC_CORPSE_NOTIFY. Always kill the process if we are not in dev mode.
1323 */
1324 if (task_exception_notify(EXC_GUARD, code, subcode, fatal) == KERN_SUCCESS) {
1325 psignal(current_proc(), SIGKILL);
1326 } else {
1327 exit_with_guard_exception(current_proc(), code, subcode);
1328 }
1329}
1330
1331/*
1332 * vnode callbacks
1333 */
1334
1335static int
1336vng_vnode_check_rename(kauth_cred_t __unused cred,
1337 struct vnode *__unused dvp, struct label *__unused dlabel,
1338 struct vnode *vp, struct label *label,
1339 struct componentname *__unused cnp,
1340 struct vnode *__unused tdvp, struct label *__unused tdlabel,
1341 struct vnode *tvp, struct label *tlabel,
1342 struct componentname *__unused tcnp)
1343{
1344 int error = 0;
1345 if (NULL != label || NULL != tlabel) {
1346 lck_rw_lock_shared(&llock);
1347 const struct vng_info *vgi =
1348 vng_lbl_get_withattr(label, VNG_RENAME_FROM);
1349 if (NULL != vgi) {
1350 error = vng_guard_violation(vgi, VNG_RENAME_FROM, vp);
1351 }
1352 if (0 == error) {
1353 vgi = vng_lbl_get_withattr(tlabel, VNG_RENAME_TO);
1354 if (NULL != vgi) {
1355 error = vng_guard_violation(vgi,
1356 VNG_RENAME_TO, tvp);
1357 }
1358 }
1359 lck_rw_unlock_shared(&llock);
1360 }
1361 return error;
1362}
1363
1364static int
1365vng_vnode_check_link(kauth_cred_t __unused cred,
1366 struct vnode *__unused dvp, struct label *__unused dlabel,
1367 struct vnode *vp, struct label *label, struct componentname *__unused cnp)
1368{
1369 int error = 0;
1370 if (NULL != label) {
1371 lck_rw_lock_shared(&llock);
1372 const struct vng_info *vgi =
1373 vng_lbl_get_withattr(label, VNG_LINK);
1374 if (vgi) {
1375 error = vng_guard_violation(vgi, VNG_LINK, vp);
1376 }
1377 lck_rw_unlock_shared(&llock);
1378 }
1379 return error;
1380}
1381
1382static int
1383vng_vnode_check_unlink(kauth_cred_t __unused cred,
1384 struct vnode *__unused dvp, struct label *__unused dlabel,
1385 struct vnode *vp, struct label *label, struct componentname *__unused cnp)
1386{
1387 int error = 0;
1388 if (NULL != label) {
1389 lck_rw_lock_shared(&llock);
1390 const struct vng_info *vgi =
1391 vng_lbl_get_withattr(label, VNG_UNLINK);
1392 if (vgi) {
1393 error = vng_guard_violation(vgi, VNG_UNLINK, vp);
1394 }
1395 lck_rw_unlock_shared(&llock);
1396 }
1397 return error;
1398}
1399
1400/*
1401 * Only check violations for writes performed by "other processes"
1402 */
1403static int
1404vng_vnode_check_write(kauth_cred_t __unused actv_cred,
1405 kauth_cred_t __unused file_cred, struct vnode *vp, struct label *label)
1406{
1407 int error = 0;
1408 if (NULL != label) {
1409 lck_rw_lock_shared(&llock);
1410 const struct vng_info *vgi =
1411 vng_lbl_get_withattr(label, VNG_WRITE_OTHER);
1412 if (vgi) {
1413 proc_t p = current_proc();
1414 const struct vng_owner *vgo;
1415 TAILQ_FOREACH(vgo, &vgi->vgi_owners, vgo_link) {
1416 if (vgo->vgo_p == p) {
1417 goto done;
1418 }
1419 }
1420 error = vng_guard_violation(vgi, VNG_WRITE_OTHER, vp);
1421 }
1422done:
1423 lck_rw_unlock_shared(&llock);
1424 }
1425 return error;
1426}
1427
1428/*
1429 * Only check violations for truncates performed by "other processes"
1430 */
1431static int
1432vng_vnode_check_truncate(kauth_cred_t __unused actv_cred,
1433 kauth_cred_t __unused file_cred, struct vnode *vp,
1434 struct label *label)
1435{
1436 int error = 0;
1437 if (NULL != label) {
1438 lck_rw_lock_shared(&llock);
1439 const struct vng_info *vgi =
1440 vng_lbl_get_withattr(label, VNG_TRUNC_OTHER);
1441 if (vgi) {
1442 proc_t p = current_proc();
1443 const struct vng_owner *vgo;
1444 TAILQ_FOREACH(vgo, &vgi->vgi_owners, vgo_link) {
1445 if (vgo->vgo_p == p) {
1446 goto done;
1447 }
1448 }
1449 error = vng_guard_violation(vgi, VNG_TRUNC_OTHER, vp);
1450 }
1451done:
1452 lck_rw_unlock_shared(&llock);
1453 }
1454 return error;
1455}
1456
1457static int
1458vng_vnode_check_exchangedata(kauth_cred_t __unused cred,
1459 struct vnode *fvp, struct label *flabel,
1460 struct vnode *svp, struct label *slabel)
1461{
1462 int error = 0;
1463 if (NULL != flabel || NULL != slabel) {
1464 lck_rw_lock_shared(&llock);
1465 const struct vng_info *vgi =
1466 vng_lbl_get_withattr(flabel, VNG_EXCHDATA);
1467 if (NULL != vgi) {
1468 error = vng_guard_violation(vgi, VNG_EXCHDATA, fvp);
1469 }
1470 if (0 == error) {
1471 vgi = vng_lbl_get_withattr(slabel, VNG_EXCHDATA);
1472 if (NULL != vgi) {
1473 error = vng_guard_violation(vgi,
1474 VNG_EXCHDATA, svp);
1475 }
1476 }
1477 lck_rw_unlock_shared(&llock);
1478 }
1479 return error;
1480}
1481
1482/* Intercept open-time truncations (by "other") of a guarded vnode */
1483
1484static int
1485vng_vnode_check_open(kauth_cred_t cred,
1486 struct vnode *vp, struct label *label, int acc_mode)
1487{
1488 if (0 == (acc_mode & O_TRUNC)) {
1489 return 0;
1490 }
1491 return vng_vnode_check_truncate(cred, NULL, vp, label);
1492}
1493
1494/*
1495 * Configuration gorp
1496 */
1497
1498SECURITY_READ_ONLY_EARLY(static struct mac_policy_ops) vng_policy_ops = {
1499 .mpo_vnode_check_link = vng_vnode_check_link,
1500 .mpo_vnode_check_unlink = vng_vnode_check_unlink,
1501 .mpo_vnode_check_rename = vng_vnode_check_rename,
1502 .mpo_vnode_check_write = vng_vnode_check_write,
1503 .mpo_vnode_check_truncate = vng_vnode_check_truncate,
1504 .mpo_vnode_check_exchangedata = vng_vnode_check_exchangedata,
1505 .mpo_vnode_check_open = vng_vnode_check_open,
1506
1507 .mpo_policy_syscall = vng_policy_syscall,
1508};
1509
1510static const char *vng_labelnames[] = {
1511 "vnguard",
1512};
1513
1514#define ACOUNT(arr) ((unsigned)(sizeof (arr) / sizeof (arr[0])))
1515
1516SECURITY_READ_ONLY_LATE(static struct mac_policy_conf) vng_policy_conf = {
1517 .mpc_name = VNG_POLICY_NAME,
1518 .mpc_fullname = "Guarded vnode policy",
1519 .mpc_field_off = &label_slot,
1520 .mpc_labelnames = vng_labelnames,
1521 .mpc_labelname_count = ACOUNT(vng_labelnames),
1522 .mpc_ops = &vng_policy_ops,
1523 .mpc_loadtime_flags = 0,
1524 .mpc_runtime_flags = 0
1525};
1526
1527SECURITY_READ_ONLY_LATE(static mac_policy_handle_t) vng_policy_handle;
1528
1529void
1530vnguard_policy_init(void)
1531{
1532 if (0 == PE_i_can_has_debugger(NULL)) {
1533 return;
1534 }
1535 vng_policy_flags = kVNG_POLICY_LOGMSG |
1536 kVNG_POLICY_EXC_CORPSE | kVNG_POLICY_UPRINTMSG;
1537 PE_parse_boot_argn("vnguard", &vng_policy_flags, sizeof(vng_policy_flags));
1538 if (vng_policy_flags) {
1539 mac_policy_register(&vng_policy_conf, &vng_policy_handle, NULL);
1540 }
1541}
1542
1543#if DEBUG || DEVELOPMENT
1544#include <sys/sysctl.h>
1545
1546SYSCTL_DECL(_kern_vnguard);
1547SYSCTL_NODE(_kern, OID_AUTO, vnguard, CTLFLAG_RW | CTLFLAG_LOCKED, 0, "vnguard");
1548SYSCTL_INT(_kern_vnguard, OID_AUTO, flags, CTLFLAG_RW | CTLFLAG_LOCKED,
1549 &vng_policy_flags, 0, "vnguard policy flags");
1550#endif
1551
1552#endif /* CONFIG_MACF && CONFIG_VNGUARD */
1553