1/*
2 * Copyright (c) 2018 Apple Inc. All rights reserved.
3 *
4 * @APPLE_OSREFERENCE_LICENSE_HEADER_START@
5 *
6 * This file contains Original Code and/or Modifications of Original Code
7 * as defined in and that are subject to the Apple Public Source License
8 * Version 2.0 (the 'License'). You may not use this file except in
9 * compliance with the License. The rights granted to you under the License
10 * may not be used to create, or enable the creation or redistribution of,
11 * unlawful or unlicensed copies of an Apple operating system, or to
12 * circumvent, violate, or enable the circumvention or violation of, any
13 * terms of an Apple operating system software license agreement.
14 *
15 * Please obtain a copy of the License at
16 * http://www.opensource.apple.com/apsl/ and read it before using this file.
17 *
18 * The Original Code and all software distributed under the License are
19 * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER
20 * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES,
21 * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY,
22 * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT.
23 * Please see the License for the specific language governing rights and
24 * limitations under the License.
25 *
26 * @APPLE_OSREFERENCE_LICENSE_HEADER_END@
27 */
28
29#include <sys/param.h>
30#include <sys/systm.h>
31#include <sys/filedesc.h>
32#include <sys/kernel.h>
33#include <sys/file_internal.h>
34#include <kern/exc_guard.h>
35#include <sys/guarded.h>
36#include <kern/kalloc.h>
37#include <sys/sysproto.h>
38#include <sys/vnode.h>
39#include <sys/vnode_internal.h>
40#include <sys/uio_internal.h>
41#include <sys/ubc_internal.h>
42#include <vfs/vfs_support.h>
43#include <security/audit/audit.h>
44#include <sys/syscall.h>
45#include <sys/kauth.h>
46#include <sys/kdebug.h>
47#include <stdbool.h>
48#include <vm/vm_protos.h>
49#include <libkern/section_keywords.h>
50#if CONFIG_MACF && CONFIG_VNGUARD
51#include <security/mac.h>
52#include <security/mac_framework.h>
53#include <security/mac_policy.h>
54#include <pexpert/pexpert.h>
55#include <sys/sysctl.h>
56#include <sys/reason.h>
57#endif
58
59
60#define f_flag f_fglob->fg_flag
61#define f_type f_fglob->fg_ops->fo_type
62extern int dofilewrite(vfs_context_t ctx, struct fileproc *fp,
63 user_addr_t bufp, user_size_t nbyte, off_t offset,
64 int flags, user_ssize_t *retval );
65extern int wr_uio(struct proc *p, struct fileproc *fp, uio_t uio, user_ssize_t *retval);
66
67/*
68 * Experimental guarded file descriptor support.
69 */
70
71kern_return_t task_exception_notify(exception_type_t exception,
72 mach_exception_data_type_t code, mach_exception_data_type_t subcode);
73kern_return_t task_violated_guard(mach_exception_code_t, mach_exception_subcode_t, void *);
74
75/*
76 * Most fd's have an underlying fileproc struct; but some may be
77 * guarded_fileproc structs which implement guarded fds. The latter
78 * struct (below) embeds the former.
79 *
80 * The two types should be distinguished by the "type" portion of f_flags.
81 * There's also a magic number to help catch misuse and bugs.
82 *
83 * This is a bit unpleasant, but results from the desire to allow
84 * alternate file behaviours for a few file descriptors without
85 * growing the fileproc data structure.
86 */
87
88struct guarded_fileproc {
89 struct fileproc gf_fileproc;
90 u_int gf_magic;
91 u_int gf_attrs;
92 guardid_t gf_guard;
93};
94
95const size_t sizeof_guarded_fileproc = sizeof (struct guarded_fileproc);
96
97#define FP_TO_GFP(fp) ((struct guarded_fileproc *)(fp))
98#define GFP_TO_FP(gfp) (&(gfp)->gf_fileproc)
99
100#define GUARDED_FILEPROC_MAGIC 0x29083
101
102struct gfp_crarg {
103 guardid_t gca_guard;
104 u_int gca_attrs;
105};
106
107static struct fileproc *
108guarded_fileproc_alloc_init(void *crarg)
109{
110 struct gfp_crarg *aarg = crarg;
111 struct guarded_fileproc *gfp;
112
113 if ((gfp = kalloc(sizeof (*gfp))) == NULL)
114 return (NULL);
115
116 bzero(gfp, sizeof (*gfp));
117 gfp->gf_fileproc.f_flags = FTYPE_GUARDED;
118 gfp->gf_magic = GUARDED_FILEPROC_MAGIC;
119 gfp->gf_guard = aarg->gca_guard;
120 gfp->gf_attrs = aarg->gca_attrs;
121
122 return (GFP_TO_FP(gfp));
123}
124
125void
126guarded_fileproc_free(struct fileproc *fp)
127{
128 struct guarded_fileproc *gfp = FP_TO_GFP(fp);
129
130 if (FILEPROC_TYPE(fp) != FTYPE_GUARDED ||
131 GUARDED_FILEPROC_MAGIC != gfp->gf_magic)
132 panic("%s: corrupt fp %p flags %x", __func__, fp, fp->f_flags);
133
134 kfree(gfp, sizeof (*gfp));
135}
136
137static int
138fp_lookup_guarded(proc_t p, int fd, guardid_t guard,
139 struct guarded_fileproc **gfpp, int locked)
140{
141 struct fileproc *fp;
142 int error;
143
144 if ((error = fp_lookup(p, fd, &fp, locked)) != 0)
145 return (error);
146 if (FILEPROC_TYPE(fp) != FTYPE_GUARDED) {
147 (void) fp_drop(p, fd, fp, locked);
148 return (EINVAL);
149 }
150 struct guarded_fileproc *gfp = FP_TO_GFP(fp);
151
152 if (GUARDED_FILEPROC_MAGIC != gfp->gf_magic)
153 panic("%s: corrupt fp %p", __func__, fp);
154
155 if (guard != gfp->gf_guard) {
156 (void) fp_drop(p, fd, fp, locked);
157 return (EPERM); /* *not* a mismatch exception */
158 }
159 if (gfpp)
160 *gfpp = gfp;
161 return (0);
162}
163
164/*
165 * Expected use pattern:
166 *
167 * if (FP_ISGUARDED(fp, GUARD_CLOSE)) {
168 * error = fp_guard_exception(p, fd, fp, kGUARD_EXC_CLOSE);
169 * proc_fdunlock(p);
170 * return (error);
171 * }
172 */
173
174int
175fp_isguarded(struct fileproc *fp, u_int attrs)
176{
177 if (FILEPROC_TYPE(fp) == FTYPE_GUARDED) {
178 struct guarded_fileproc *gfp = FP_TO_GFP(fp);
179
180 if (GUARDED_FILEPROC_MAGIC != gfp->gf_magic)
181 panic("%s: corrupt gfp %p flags %x",
182 __func__, gfp, fp->f_flags);
183 return ((attrs & gfp->gf_attrs) == attrs);
184 }
185 return (0);
186}
187
188extern char *proc_name_address(void *p);
189
190int
191fp_guard_exception(proc_t p, int fd, struct fileproc *fp, u_int flavor)
192{
193 if (FILEPROC_TYPE(fp) != FTYPE_GUARDED)
194 panic("%s corrupt fp %p flags %x", __func__, fp, fp->f_flags);
195
196 struct guarded_fileproc *gfp = FP_TO_GFP(fp);
197 /* all gfd fields protected via proc_fdlock() */
198 proc_fdlock_assert(p, LCK_MTX_ASSERT_OWNED);
199
200 mach_exception_code_t code = 0;
201 EXC_GUARD_ENCODE_TYPE(code, GUARD_TYPE_FD);
202 EXC_GUARD_ENCODE_FLAVOR(code, flavor);
203 EXC_GUARD_ENCODE_TARGET(code, fd);
204 mach_exception_subcode_t subcode = gfp->gf_guard;
205
206 thread_t t = current_thread();
207 thread_guard_violation(t, code, subcode);
208 return (EPERM);
209}
210
211/*
212 * (Invoked before returning to userland from the syscall handler.)
213 */
214void
215fd_guard_ast(
216 thread_t __unused t,
217 mach_exception_code_t code,
218 mach_exception_subcode_t subcode)
219{
220 task_exception_notify(EXC_GUARD, code, subcode);
221 proc_t p = current_proc();
222 psignal(p, SIGKILL);
223}
224
225/*
226 * Experimental guarded file descriptor SPIs
227 */
228
229/*
230 * int guarded_open_np(const char *pathname, int flags,
231 * const guardid_t *guard, u_int guardflags, ...);
232 *
233 * In this initial implementation, GUARD_DUP must be specified.
234 * GUARD_CLOSE, GUARD_SOCKET_IPC and GUARD_FILEPORT are optional.
235 *
236 * If GUARD_DUP wasn't specified, then we'd have to do the (extra) work
237 * to allow dup-ing a descriptor to inherit the guard onto the new
238 * descriptor. (Perhaps GUARD_DUP behaviours should just always be true
239 * for a guarded fd? Or, more sanely, all the dup operations should
240 * just always propagate the guard?)
241 *
242 * Guarded descriptors are always close-on-exec, and GUARD_CLOSE
243 * requires close-on-fork; O_CLOEXEC must be set in flags.
244 * This setting is immutable; attempts to clear the flag will
245 * cause a guard exception.
246 *
247 * XXX It's somewhat broken that change_fdguard_np() can completely
248 * remove the guard and thus revoke down the immutability
249 * promises above. Ick.
250 */
251int
252guarded_open_np(proc_t p, struct guarded_open_np_args *uap, int32_t *retval)
253{
254 if ((uap->flags & O_CLOEXEC) == 0)
255 return (EINVAL);
256
257#define GUARD_REQUIRED (GUARD_DUP)
258#define GUARD_ALL (GUARD_REQUIRED | \
259 (GUARD_CLOSE | GUARD_SOCKET_IPC | GUARD_FILEPORT | GUARD_WRITE))
260
261 if (((uap->guardflags & GUARD_REQUIRED) != GUARD_REQUIRED) ||
262 ((uap->guardflags & ~GUARD_ALL) != 0))
263 return (EINVAL);
264
265 int error;
266 struct gfp_crarg crarg = {
267 .gca_attrs = uap->guardflags
268 };
269
270 if ((error = copyin(uap->guard,
271 &(crarg.gca_guard), sizeof (crarg.gca_guard))) != 0)
272 return (error);
273
274 /*
275 * Disallow certain guard values -- is zero enough?
276 */
277 if (crarg.gca_guard == 0)
278 return (EINVAL);
279
280 struct filedesc *fdp = p->p_fd;
281 struct vnode_attr va;
282 struct nameidata nd;
283 vfs_context_t ctx = vfs_context_current();
284 int cmode;
285
286 VATTR_INIT(&va);
287 cmode = ((uap->mode & ~fdp->fd_cmask) & ALLPERMS) & ~S_ISTXT;
288 VATTR_SET(&va, va_mode, cmode & ACCESSPERMS);
289
290 NDINIT(&nd, LOOKUP, OP_OPEN, FOLLOW | AUDITVNPATH1, UIO_USERSPACE,
291 uap->path, ctx);
292
293 return (open1(ctx, &nd, uap->flags | O_CLOFORK, &va,
294 guarded_fileproc_alloc_init, &crarg, retval));
295}
296
297/*
298 * int guarded_open_dprotected_np(const char *pathname, int flags,
299 * const guardid_t *guard, u_int guardflags, int dpclass, int dpflags, ...);
300 *
301 * This SPI is extension of guarded_open_np() to include dataprotection class on creation
302 * in "dpclass" and dataprotection flags 'dpflags'. Otherwise behaviors are same as in
303 * guarded_open_np()
304 */
305int
306guarded_open_dprotected_np(proc_t p, struct guarded_open_dprotected_np_args *uap, int32_t *retval)
307{
308 if ((uap->flags & O_CLOEXEC) == 0)
309 return (EINVAL);
310
311 if (((uap->guardflags & GUARD_REQUIRED) != GUARD_REQUIRED) ||
312 ((uap->guardflags & ~GUARD_ALL) != 0))
313 return (EINVAL);
314
315 int error;
316 struct gfp_crarg crarg = {
317 .gca_attrs = uap->guardflags
318 };
319
320 if ((error = copyin(uap->guard,
321 &(crarg.gca_guard), sizeof (crarg.gca_guard))) != 0)
322 return (error);
323
324 /*
325 * Disallow certain guard values -- is zero enough?
326 */
327 if (crarg.gca_guard == 0)
328 return (EINVAL);
329
330 struct filedesc *fdp = p->p_fd;
331 struct vnode_attr va;
332 struct nameidata nd;
333 vfs_context_t ctx = vfs_context_current();
334 int cmode;
335
336 VATTR_INIT(&va);
337 cmode = ((uap->mode & ~fdp->fd_cmask) & ALLPERMS) & ~S_ISTXT;
338 VATTR_SET(&va, va_mode, cmode & ACCESSPERMS);
339
340 NDINIT(&nd, LOOKUP, OP_OPEN, FOLLOW | AUDITVNPATH1, UIO_USERSPACE,
341 uap->path, ctx);
342
343 /*
344 * Initialize the extra fields in vnode_attr to pass down dataprotection
345 * extra fields.
346 * 1. target cprotect class.
347 * 2. set a flag to mark it as requiring open-raw-encrypted semantics.
348 */
349 if (uap->flags & O_CREAT) {
350 VATTR_SET(&va, va_dataprotect_class, uap->dpclass);
351 }
352
353 if (uap->dpflags & (O_DP_GETRAWENCRYPTED|O_DP_GETRAWUNENCRYPTED)) {
354 if ( uap->flags & (O_RDWR | O_WRONLY)) {
355 /* Not allowed to write raw encrypted bytes */
356 return EINVAL;
357 }
358 if (uap->dpflags & O_DP_GETRAWENCRYPTED) {
359 VATTR_SET(&va, va_dataprotect_flags, VA_DP_RAWENCRYPTED);
360 }
361 if (uap->dpflags & O_DP_GETRAWUNENCRYPTED) {
362 VATTR_SET(&va, va_dataprotect_flags, VA_DP_RAWUNENCRYPTED);
363 }
364 }
365
366 return (open1(ctx, &nd, uap->flags | O_CLOFORK, &va,
367 guarded_fileproc_alloc_init, &crarg, retval));
368}
369
370/*
371 * int guarded_kqueue_np(const guardid_t *guard, u_int guardflags);
372 *
373 * Create a guarded kqueue descriptor with guardid and guardflags.
374 *
375 * Same restrictions on guardflags as for guarded_open_np().
376 * All kqueues are -always- close-on-exec and close-on-fork by themselves
377 * and are not sendable.
378 */
379int
380guarded_kqueue_np(proc_t p, struct guarded_kqueue_np_args *uap, int32_t *retval)
381{
382 if (((uap->guardflags & GUARD_REQUIRED) != GUARD_REQUIRED) ||
383 ((uap->guardflags & ~GUARD_ALL) != 0))
384 return (EINVAL);
385
386 int error;
387 struct gfp_crarg crarg = {
388 .gca_attrs = uap->guardflags
389 };
390
391 if ((error = copyin(uap->guard,
392 &(crarg.gca_guard), sizeof (crarg.gca_guard))) != 0)
393 return (error);
394
395 if (crarg.gca_guard == 0)
396 return (EINVAL);
397
398 return (kqueue_body(p, guarded_fileproc_alloc_init, &crarg, retval));
399}
400
401/*
402 * int guarded_close_np(int fd, const guardid_t *guard);
403 */
404int
405guarded_close_np(proc_t p, struct guarded_close_np_args *uap,
406 __unused int32_t *retval)
407{
408 struct guarded_fileproc *gfp;
409 int fd = uap->fd;
410 int error;
411 guardid_t uguard;
412
413 AUDIT_SYSCLOSE(p, fd);
414
415 if ((error = copyin(uap->guard, &uguard, sizeof (uguard))) != 0)
416 return (error);
417
418 proc_fdlock(p);
419 if ((error = fp_lookup_guarded(p, fd, uguard, &gfp, 1)) != 0) {
420 proc_fdunlock(p);
421 return (error);
422 }
423 error = close_internal_locked(p, fd, GFP_TO_FP(gfp), 0);
424 proc_fdunlock(p);
425 return (error);
426}
427
428/*
429 * int
430 * change_fdguard_np(int fd, const guardid_t *guard, u_int guardflags,
431 * const guardid_t *nguard, u_int nguardflags, int *fdflagsp);
432 *
433 * Given a file descriptor, atomically exchange <guard, guardflags> for
434 * a new guard <nguard, nguardflags>, returning the previous fd
435 * flags (see fcntl:F_SETFD) in *fdflagsp.
436 *
437 * This syscall can be used to either (a) add a new guard to an existing
438 * unguarded file descriptor (b) remove the old guard from an existing
439 * guarded file descriptor or (c) change the guard (guardid and/or
440 * guardflags) on a guarded file descriptor.
441 *
442 * If 'guard' is NULL, fd must be unguarded at entry. If the call completes
443 * successfully the fd will be guarded with <nguard, nguardflags>.
444 *
445 * Guarding a file descriptor has some side-effects on the "fdflags"
446 * associated with the descriptor - in particular FD_CLOEXEC is
447 * forced ON unconditionally, and FD_CLOFORK is forced ON by GUARD_CLOSE.
448 * Callers who wish to subsequently restore the state of the fd should save
449 * the value of *fdflagsp after a successful invocation.
450 *
451 * If 'nguard' is NULL, fd must be guarded at entry, <guard, guardflags>
452 * must match with what's already guarding the descriptor, and the
453 * result will be to completely remove the guard. Note also that the
454 * fdflags are copied to the descriptor from the incoming *fdflagsp argument.
455 *
456 * If the descriptor is guarded, and neither 'guard' nor 'nguard' is NULL
457 * and <guard, guardflags> matches what's already guarding the descriptor,
458 * then <nguard, nguardflags> becomes the new guard. In this case, even if
459 * the GUARD_CLOSE flag is being cleared, it is still possible to continue
460 * to keep FD_CLOFORK on the descriptor by passing FD_CLOFORK via fdflagsp.
461 *
462 * (File descriptors whose underlying fileglobs are marked FG_CONFINED are
463 * still close-on-fork, regardless of the setting of FD_CLOFORK.)
464 *
465 * Example 1: Guard an unguarded descriptor during a set of operations,
466 * then restore the original state of the descriptor.
467 *
468 * int sav_flags = 0;
469 * change_fdguard_np(fd, NULL, 0, &myguard, GUARD_CLOSE, &sav_flags);
470 * // do things with now guarded 'fd'
471 * change_fdguard_np(fd, &myguard, GUARD_CLOSE, NULL, 0, &sav_flags);
472 * // fd now unguarded.
473 *
474 * Example 2: Change the guard of a guarded descriptor during a set of
475 * operations, then restore the original state of the descriptor.
476 *
477 * int sav_flags = (gdflags & GUARD_CLOSE) ? FD_CLOFORK : 0;
478 * change_fdguard_np(fd, &gd, gdflags, &myguard, GUARD_CLOSE, &sav_flags);
479 * // do things with 'fd' with a different guard
480 * change_fdguard_np(fd, &myg, GUARD_CLOSE, &gd, gdflags, &sav_flags);
481 * // back to original guarded state
482 *
483 * XXX This SPI is too much of a chainsaw and should be revised.
484 */
485
486int
487change_fdguard_np(proc_t p, struct change_fdguard_np_args *uap,
488 __unused int32_t *retval)
489{
490 struct fileproc *fp;
491 int fd = uap->fd;
492 int error;
493 guardid_t oldg = 0, newg = 0;
494 int nfdflags = 0;
495
496 if (0 != uap->guard &&
497 0 != (error = copyin(uap->guard, &oldg, sizeof (oldg))))
498 return (error); /* can't copyin current guard */
499
500 if (0 != uap->nguard &&
501 0 != (error = copyin(uap->nguard, &newg, sizeof (newg))))
502 return (error); /* can't copyin new guard */
503
504 if (0 != uap->fdflagsp &&
505 0 != (error = copyin(uap->fdflagsp, &nfdflags, sizeof (nfdflags))))
506 return (error); /* can't copyin new fdflags */
507
508 proc_fdlock(p);
509restart:
510 if ((error = fp_lookup(p, fd, &fp, 1)) != 0) {
511 proc_fdunlock(p);
512 return (error);
513 }
514
515 if (0 != uap->fdflagsp) {
516 int ofdflags = FDFLAGS_GET(p, fd);
517 int ofl = ((ofdflags & UF_EXCLOSE) ? FD_CLOEXEC : 0) |
518 ((ofdflags & UF_FORKCLOSE) ? FD_CLOFORK : 0);
519 proc_fdunlock(p);
520 if (0 != (error = copyout(&ofl, uap->fdflagsp, sizeof (ofl)))) {
521 proc_fdlock(p);
522 goto dropout; /* can't copyout old fdflags */
523 }
524 proc_fdlock(p);
525 }
526
527 if (FILEPROC_TYPE(fp) == FTYPE_GUARDED) {
528 if (0 == uap->guard || 0 == uap->guardflags)
529 error = EINVAL; /* missing guard! */
530 else if (0 == oldg)
531 error = EPERM; /* guardids cannot be zero */
532 } else {
533 if (0 != uap->guard || 0 != uap->guardflags)
534 error = EINVAL; /* guard provided, but none needed! */
535 }
536
537 if (0 != error)
538 goto dropout;
539
540 if (0 != uap->nguard) {
541 /*
542 * There's a new guard in town.
543 */
544 if (0 == newg)
545 error = EINVAL; /* guards cannot contain zero */
546 else if (((uap->nguardflags & GUARD_REQUIRED) != GUARD_REQUIRED) ||
547 ((uap->nguardflags & ~GUARD_ALL) != 0))
548 error = EINVAL; /* must have valid attributes too */
549 if (0 != error)
550 goto dropout;
551
552 if (FILEPROC_TYPE(fp) == FTYPE_GUARDED) {
553 /*
554 * Replace old guard with new guard
555 */
556 struct guarded_fileproc *gfp = FP_TO_GFP(fp);
557
558 if (GUARDED_FILEPROC_MAGIC != gfp->gf_magic)
559 panic("%s: corrupt gfp %p flags %x",
560 __func__, gfp, fp->f_flags);
561
562 if (oldg == gfp->gf_guard &&
563 uap->guardflags == gfp->gf_attrs) {
564 /*
565 * Must match existing guard + attributes
566 * before we'll swap them to new ones, managing
567 * fdflags "side-effects" as we go. Note that
568 * userland can request FD_CLOFORK semantics.
569 */
570 if (gfp->gf_attrs & GUARD_CLOSE)
571 FDFLAGS_CLR(p, fd, UF_FORKCLOSE);
572 gfp->gf_guard = newg;
573 gfp->gf_attrs = uap->nguardflags;
574 if (gfp->gf_attrs & GUARD_CLOSE)
575 FDFLAGS_SET(p, fd, UF_FORKCLOSE);
576 FDFLAGS_SET(p, fd,
577 (nfdflags & FD_CLOFORK) ? UF_FORKCLOSE : 0);
578 /* FG_CONFINED enforced regardless */
579 } else {
580 error = EPERM;
581 }
582 goto dropout;
583 } else {
584 /*
585 * Add a guard to a previously unguarded descriptor
586 */
587 switch (FILEGLOB_DTYPE(fp->f_fglob)) {
588 case DTYPE_VNODE:
589 case DTYPE_PIPE:
590 case DTYPE_SOCKET:
591 case DTYPE_KQUEUE:
592 case DTYPE_NETPOLICY:
593 break;
594 default:
595 error = ENOTSUP;
596 goto dropout;
597 }
598
599 proc_fdunlock(p);
600
601 struct gfp_crarg crarg = {
602 .gca_guard = newg,
603 .gca_attrs = uap->nguardflags
604 };
605 struct fileproc *nfp =
606 guarded_fileproc_alloc_init(&crarg);
607 struct guarded_fileproc *gfp;
608
609 proc_fdlock(p);
610
611 switch (error = fp_tryswap(p, fd, nfp)) {
612 case 0: /* guarded-ness comes with side-effects */
613 gfp = FP_TO_GFP(nfp);
614 if (gfp->gf_attrs & GUARD_CLOSE)
615 FDFLAGS_SET(p, fd, UF_FORKCLOSE);
616 FDFLAGS_SET(p, fd, UF_EXCLOSE);
617 (void) fp_drop(p, fd, nfp, 1);
618 fileproc_free(fp);
619 break;
620 case EKEEPLOOKING: /* f_iocount indicates a collision */
621 (void) fp_drop(p, fd, fp, 1);
622 fileproc_free(nfp);
623 goto restart;
624 default:
625 (void) fp_drop(p, fd, fp, 1);
626 fileproc_free(nfp);
627 break;
628 }
629 proc_fdunlock(p);
630 return (error);
631 }
632 } else {
633 /*
634 * No new guard.
635 */
636 if (FILEPROC_TYPE(fp) == FTYPE_GUARDED) {
637 /*
638 * Remove the guard altogether.
639 */
640 struct guarded_fileproc *gfp = FP_TO_GFP(fp);
641
642 if (0 != uap->nguardflags) {
643 error = EINVAL;
644 goto dropout;
645 }
646
647 if (GUARDED_FILEPROC_MAGIC != gfp->gf_magic)
648 panic("%s: corrupt gfp %p flags %x",
649 __func__, gfp, fp->f_flags);
650
651 if (oldg != gfp->gf_guard ||
652 uap->guardflags != gfp->gf_attrs) {
653 error = EPERM;
654 goto dropout;
655 }
656
657 proc_fdunlock(p);
658 struct fileproc *nfp = fileproc_alloc_init(NULL);
659 proc_fdlock(p);
660
661 switch (error = fp_tryswap(p, fd, nfp)) {
662 case 0: /* undo side-effects of guarded-ness */
663 FDFLAGS_CLR(p, fd, UF_FORKCLOSE | UF_EXCLOSE);
664 FDFLAGS_SET(p, fd,
665 (nfdflags & FD_CLOFORK) ? UF_FORKCLOSE : 0);
666 /* FG_CONFINED enforced regardless */
667 FDFLAGS_SET(p, fd,
668 (nfdflags & FD_CLOEXEC) ? UF_EXCLOSE : 0);
669 (void) fp_drop(p, fd, nfp, 1);
670 fileproc_free(fp);
671 break;
672 case EKEEPLOOKING: /* f_iocount indicates collision */
673 (void) fp_drop(p, fd, fp, 1);
674 fileproc_free(nfp);
675 goto restart;
676 default:
677 (void) fp_drop(p, fd, fp, 1);
678 fileproc_free(nfp);
679 break;
680 }
681 proc_fdunlock(p);
682 return (error);
683 } else {
684 /*
685 * Not already guarded, and no new guard?
686 */
687 error = EINVAL;
688 }
689 }
690
691dropout:
692 (void) fp_drop(p, fd, fp, 1);
693 proc_fdunlock(p);
694 return (error);
695}
696
697/*
698 * user_ssize_t guarded_write_np(int fd, const guardid_t *guard,
699 * user_addr_t cbuf, user_ssize_t nbyte);
700 *
701 * Initial implementation of guarded writes.
702 */
703int
704guarded_write_np(struct proc *p, struct guarded_write_np_args *uap, user_ssize_t *retval)
705{
706 int error;
707 int fd = uap->fd;
708 guardid_t uguard;
709 struct fileproc *fp;
710 struct guarded_fileproc *gfp;
711 bool wrote_some = false;
712
713 AUDIT_ARG(fd, fd);
714
715 if ((error = copyin(uap->guard, &uguard, sizeof (uguard))) != 0)
716 return (error);
717
718 error = fp_lookup_guarded(p, fd, uguard, &gfp, 0);
719 if (error)
720 return(error);
721
722 fp = GFP_TO_FP(gfp);
723 if ((fp->f_flag & FWRITE) == 0) {
724 error = EBADF;
725 } else {
726
727 struct vfs_context context = *(vfs_context_current());
728 context.vc_ucred = fp->f_fglob->fg_cred;
729
730 error = dofilewrite(&context, fp, uap->cbuf, uap->nbyte,
731 (off_t)-1, 0, retval);
732 wrote_some = *retval > 0;
733 }
734 if (wrote_some)
735 fp_drop_written(p, fd, fp);
736 else
737 fp_drop(p, fd, fp, 0);
738 return(error);
739}
740
741/*
742 * user_ssize_t guarded_pwrite_np(int fd, const guardid_t *guard,
743 * user_addr_t buf, user_size_t nbyte, off_t offset);
744 *
745 * Initial implementation of guarded pwrites.
746 */
747 int
748 guarded_pwrite_np(struct proc *p, struct guarded_pwrite_np_args *uap, user_ssize_t *retval)
749 {
750 struct fileproc *fp;
751 int error;
752 int fd = uap->fd;
753 vnode_t vp = (vnode_t)0;
754 guardid_t uguard;
755 struct guarded_fileproc *gfp;
756 bool wrote_some = false;
757
758 AUDIT_ARG(fd, fd);
759
760 if ((error = copyin(uap->guard, &uguard, sizeof (uguard))) != 0)
761 return (error);
762
763 error = fp_lookup_guarded(p, fd, uguard, &gfp, 0);
764 if (error)
765 return(error);
766
767 fp = GFP_TO_FP(gfp);
768 if ((fp->f_flag & FWRITE) == 0) {
769 error = EBADF;
770 } else {
771 struct vfs_context context = *vfs_context_current();
772 context.vc_ucred = fp->f_fglob->fg_cred;
773
774 if (fp->f_type != DTYPE_VNODE) {
775 error = ESPIPE;
776 goto errout;
777 }
778 vp = (vnode_t)fp->f_fglob->fg_data;
779 if (vnode_isfifo(vp)) {
780 error = ESPIPE;
781 goto errout;
782 }
783 if ((vp->v_flag & VISTTY)) {
784 error = ENXIO;
785 goto errout;
786 }
787 if (uap->offset == (off_t)-1) {
788 error = EINVAL;
789 goto errout;
790 }
791
792 error = dofilewrite(&context, fp, uap->buf, uap->nbyte,
793 uap->offset, FOF_OFFSET, retval);
794 wrote_some = *retval > 0;
795 }
796errout:
797 if (wrote_some)
798 fp_drop_written(p, fd, fp);
799 else
800 fp_drop(p, fd, fp, 0);
801
802 KERNEL_DEBUG_CONSTANT((BSDDBG_CODE(DBG_BSD_SC_EXTENDED_INFO, SYS_guarded_pwrite_np) | DBG_FUNC_NONE),
803 uap->fd, uap->nbyte, (unsigned int)((uap->offset >> 32)), (unsigned int)(uap->offset), 0);
804
805 return(error);
806}
807
808/*
809 * user_ssize_t guarded_writev_np(int fd, const guardid_t *guard,
810 * struct iovec *iovp, u_int iovcnt);
811 *
812 * Initial implementation of guarded writev.
813 *
814 */
815int
816guarded_writev_np(struct proc *p, struct guarded_writev_np_args *uap, user_ssize_t *retval)
817{
818 uio_t auio = NULL;
819 int error;
820 struct fileproc *fp;
821 struct user_iovec *iovp;
822 guardid_t uguard;
823 struct guarded_fileproc *gfp;
824 bool wrote_some = false;
825
826 AUDIT_ARG(fd, uap->fd);
827
828 /* Verify range bedfore calling uio_create() */
829 if (uap->iovcnt <= 0 || uap->iovcnt > UIO_MAXIOV)
830 return (EINVAL);
831
832 /* allocate a uio large enough to hold the number of iovecs passed */
833 auio = uio_create(uap->iovcnt, 0,
834 (IS_64BIT_PROCESS(p) ? UIO_USERSPACE64 : UIO_USERSPACE32),
835 UIO_WRITE);
836
837 /* get location of iovecs within the uio. then copyin the iovecs from
838 * user space.
839 */
840 iovp = uio_iovsaddr(auio);
841 if (iovp == NULL) {
842 error = ENOMEM;
843 goto ExitThisRoutine;
844 }
845 error = copyin_user_iovec_array(uap->iovp,
846 IS_64BIT_PROCESS(p) ? UIO_USERSPACE64 : UIO_USERSPACE32,
847 uap->iovcnt, iovp);
848 if (error) {
849 goto ExitThisRoutine;
850 }
851
852 /* finalize uio_t for use and do the IO
853 */
854 error = uio_calculateresid(auio);
855 if (error) {
856 goto ExitThisRoutine;
857 }
858
859 if ((error = copyin(uap->guard, &uguard, sizeof (uguard))) != 0)
860 goto ExitThisRoutine;
861
862 error = fp_lookup_guarded(p, uap->fd, uguard, &gfp, 0);
863 if (error)
864 goto ExitThisRoutine;
865
866 fp = GFP_TO_FP(gfp);
867 if ((fp->f_flag & FWRITE) == 0) {
868 error = EBADF;
869 } else {
870 error = wr_uio(p, fp, auio, retval);
871 wrote_some = *retval > 0;
872 }
873
874 if (wrote_some)
875 fp_drop_written(p, uap->fd, fp);
876 else
877 fp_drop(p, uap->fd, fp, 0);
878ExitThisRoutine:
879 if (auio != NULL) {
880 uio_free(auio);
881 }
882 return (error);
883}
884
885/*
886 * int falloc_guarded(struct proc *p, struct fileproc **fp, int *fd,
887 * vfs_context_t ctx, const guardid_t *guard, u_int attrs);
888 *
889 * This SPI is the guarded variant of falloc(). It borrows the same
890 * restrictions as those used by the rest of the guarded_* routines.
891 */
892int
893falloc_guarded(struct proc *p, struct fileproc **fp, int *fd,
894 vfs_context_t ctx, const guardid_t *guard, u_int attrs)
895{
896 struct gfp_crarg crarg;
897
898 if (((attrs & GUARD_REQUIRED) != GUARD_REQUIRED) ||
899 ((attrs & ~GUARD_ALL) != 0) || (*guard == 0))
900 return (EINVAL);
901
902 bzero(&crarg, sizeof (crarg));
903 crarg.gca_guard = *guard;
904 crarg.gca_attrs = attrs;
905
906 return (falloc_withalloc(p, fp, fd, ctx, guarded_fileproc_alloc_init,
907 &crarg));
908}
909
910#if CONFIG_MACF && CONFIG_VNGUARD
911
912/*
913 * Guarded vnodes
914 *
915 * Uses MAC hooks to guard operations on vnodes in the system. Given an fd,
916 * add data to the label on the fileglob and the vnode it points at.
917 * The data contains a pointer to the fileglob, the set of attributes to
918 * guard, a guard value for uniquification, and the pid of the process
919 * who set the guard up in the first place.
920 *
921 * The fd must have been opened read/write, and the underlying
922 * fileglob is FG_CONFINED so that there's no ambiguity about the
923 * owning process.
924 *
925 * When there's a callback for a vnode operation of interest (rename, unlink,
926 * etc.) check to see if the guard permits that operation, and if not
927 * take an action e.g. log a message or generate a crash report.
928 *
929 * The label is removed from the vnode and the fileglob when the fileglob
930 * is closed.
931 *
932 * The initial action to be taken can be specified by a boot arg (vnguard=0x42)
933 * and change via the "kern.vnguard.flags" sysctl.
934 */
935
936struct vng_owner;
937
938struct vng_info { /* lives on the vnode label */
939 guardid_t vgi_guard;
940 unsigned vgi_attrs;
941 TAILQ_HEAD(, vng_owner) vgi_owners;
942};
943
944struct vng_owner { /* lives on the fileglob label */
945 proc_t vgo_p;
946 struct fileglob *vgo_fg;
947 struct vng_info *vgo_vgi;
948 TAILQ_ENTRY(vng_owner) vgo_link;
949};
950
951static struct vng_info *
952new_vgi(unsigned attrs, guardid_t guard)
953{
954 struct vng_info *vgi = kalloc(sizeof (*vgi));
955 vgi->vgi_guard = guard;
956 vgi->vgi_attrs = attrs;
957 TAILQ_INIT(&vgi->vgi_owners);
958 return vgi;
959}
960
961static struct vng_owner *
962new_vgo(proc_t p, struct fileglob *fg)
963{
964 struct vng_owner *vgo = kalloc(sizeof (*vgo));
965 memset(vgo, 0, sizeof (*vgo));
966 vgo->vgo_p = p;
967 vgo->vgo_fg = fg;
968 return vgo;
969}
970
971static void
972vgi_add_vgo(struct vng_info *vgi, struct vng_owner *vgo)
973{
974 vgo->vgo_vgi = vgi;
975 TAILQ_INSERT_HEAD(&vgi->vgi_owners, vgo, vgo_link);
976}
977
978static boolean_t
979vgi_remove_vgo(struct vng_info *vgi, struct vng_owner *vgo)
980{
981 TAILQ_REMOVE(&vgi->vgi_owners, vgo, vgo_link);
982 vgo->vgo_vgi = NULL;
983 return TAILQ_EMPTY(&vgi->vgi_owners);
984}
985
986static void
987free_vgi(struct vng_info *vgi)
988{
989 assert(TAILQ_EMPTY(&vgi->vgi_owners));
990#if DEVELOP || DEBUG
991 memset(vgi, 0xbeadfade, sizeof (*vgi));
992#endif
993 kfree(vgi, sizeof (*vgi));
994}
995
996static void
997free_vgo(struct vng_owner *vgo)
998{
999#if DEVELOP || DEBUG
1000 memset(vgo, 0x2bedf1d0, sizeof (*vgo));
1001#endif
1002 kfree(vgo, sizeof (*vgo));
1003}
1004
1005static int label_slot;
1006static lck_rw_t llock;
1007static lck_grp_t *llock_grp;
1008
1009static __inline void *
1010vng_lbl_get(struct label *label)
1011{
1012 lck_rw_assert(&llock, LCK_RW_ASSERT_HELD);
1013 void *data;
1014 if (NULL == label)
1015 data = NULL;
1016 else
1017 data = (void *)mac_label_get(label, label_slot);
1018 return data;
1019}
1020
1021static __inline struct vng_info *
1022vng_lbl_get_withattr(struct label *label, unsigned attrmask)
1023{
1024 struct vng_info *vgi = vng_lbl_get(label);
1025 assert(NULL == vgi || (vgi->vgi_attrs & ~VNG_ALL) == 0);
1026 if (NULL != vgi && 0 == (vgi->vgi_attrs & attrmask))
1027 vgi = NULL;
1028 return vgi;
1029}
1030
1031static __inline void
1032vng_lbl_set(struct label *label, void *data)
1033{
1034 assert(NULL != label);
1035 lck_rw_assert(&llock, LCK_RW_ASSERT_EXCLUSIVE);
1036 mac_label_set(label, label_slot, (intptr_t)data);
1037}
1038
1039static int
1040vnguard_sysc_setguard(proc_t p, const struct vnguard_set *vns)
1041{
1042 const int fd = vns->vns_fd;
1043
1044 if ((vns->vns_attrs & ~VNG_ALL) != 0 ||
1045 0 == vns->vns_attrs || 0 == vns->vns_guard)
1046 return EINVAL;
1047
1048 int error;
1049 struct fileproc *fp;
1050 if (0 != (error = fp_lookup(p, fd, &fp, 0)))
1051 return error;
1052 do {
1053 /*
1054 * To avoid trivial DoS, insist that the caller
1055 * has read/write access to the file.
1056 */
1057 if ((FREAD|FWRITE) != (fp->f_flag & (FREAD|FWRITE))) {
1058 error = EBADF;
1059 break;
1060 }
1061 struct fileglob *fg = fp->f_fglob;
1062 if (FILEGLOB_DTYPE(fg) != DTYPE_VNODE) {
1063 error = EBADF;
1064 break;
1065 }
1066 /*
1067 * Confinement means there's only one fd pointing at
1068 * this fileglob, and will always be associated with
1069 * this pid.
1070 */
1071 if (0 == (FG_CONFINED & fg->fg_lflags)) {
1072 error = EBADF;
1073 break;
1074 }
1075 struct vnode *vp = fg->fg_data;
1076 if (!vnode_isreg(vp) || NULL == vp->v_mount) {
1077 error = EBADF;
1078 break;
1079 }
1080 error = vnode_getwithref(vp);
1081 if (0 != error) {
1082 fp_drop(p, fd, fp, 0);
1083 break;
1084 }
1085 /* Ensure the target vnode -has- a label */
1086 struct vfs_context *ctx = vfs_context_current();
1087 mac_vnode_label_update(ctx, vp, NULL);
1088
1089 struct vng_info *nvgi = new_vgi(vns->vns_attrs, vns->vns_guard);
1090 struct vng_owner *nvgo = new_vgo(p, fg);
1091
1092 lck_rw_lock_exclusive(&llock);
1093
1094 do {
1095 /*
1096 * A vnode guard is associated with one or more
1097 * fileglobs in one or more processes.
1098 */
1099 struct vng_info *vgi = vng_lbl_get(vp->v_label);
1100 struct vng_owner *vgo = vng_lbl_get(fg->fg_label);
1101
1102 if (NULL == vgi) {
1103 /* vnode unguarded, add the first guard */
1104 if (NULL != vgo)
1105 panic("vnguard label on fileglob "
1106 "but not vnode");
1107 /* add a kusecount so we can unlabel later */
1108 error = vnode_ref_ext(vp, O_EVTONLY, 0);
1109 if (0 == error) {
1110 /* add the guard */
1111 vgi_add_vgo(nvgi, nvgo);
1112 vng_lbl_set(vp->v_label, nvgi);
1113 vng_lbl_set(fg->fg_label, nvgo);
1114 } else {
1115 free_vgo(nvgo);
1116 free_vgi(nvgi);
1117 }
1118 } else {
1119 /* vnode already guarded */
1120 free_vgi(nvgi);
1121 if (vgi->vgi_guard != vns->vns_guard)
1122 error = EPERM; /* guard mismatch */
1123 else if (vgi->vgi_attrs != vns->vns_attrs)
1124 error = EACCES; /* attr mismatch */
1125 if (0 != error || NULL != vgo) {
1126 free_vgo(nvgo);
1127 break;
1128 }
1129 /* record shared ownership */
1130 vgi_add_vgo(vgi, nvgo);
1131 vng_lbl_set(fg->fg_label, nvgo);
1132 }
1133 } while (0);
1134
1135 lck_rw_unlock_exclusive(&llock);
1136 vnode_put(vp);
1137 } while (0);
1138
1139 fp_drop(p, fd, fp, 0);
1140 return error;
1141}
1142
1143static int
1144vng_policy_syscall(proc_t p, int cmd, user_addr_t arg)
1145{
1146 int error = EINVAL;
1147
1148 switch (cmd) {
1149 case VNG_SYSC_PING:
1150 if (0 == arg)
1151 error = 0;
1152 break;
1153 case VNG_SYSC_SET_GUARD: {
1154 struct vnguard_set vns;
1155 error = copyin(arg, (void *)&vns, sizeof (vns));
1156 if (error)
1157 break;
1158 error = vnguard_sysc_setguard(p, &vns);
1159 break;
1160 }
1161 default:
1162 break;
1163 }
1164 return (error);
1165}
1166
1167/*
1168 * This is called just before the fileglob disappears in fg_free().
1169 * Take the exclusive lock: no other thread can add or remove
1170 * a vng_info to any vnode in the system.
1171 */
1172static void
1173vng_file_label_destroy(struct label *label)
1174{
1175 lck_rw_lock_exclusive(&llock);
1176 struct vng_owner *lvgo = vng_lbl_get(label);
1177 if (lvgo) {
1178 vng_lbl_set(label, 0);
1179 struct vng_info *vgi = lvgo->vgo_vgi;
1180 assert(vgi);
1181 if (vgi_remove_vgo(vgi, lvgo)) {
1182 /* that was the last reference */
1183 vgi->vgi_attrs = 0;
1184 struct fileglob *fg = lvgo->vgo_fg;
1185 assert(fg);
1186 if (DTYPE_VNODE == FILEGLOB_DTYPE(fg)) {
1187 struct vnode *vp = fg->fg_data;
1188 int error = vnode_getwithref(vp);
1189 if (0 == error) {
1190 vng_lbl_set(vp->v_label, 0);
1191 lck_rw_unlock_exclusive(&llock);
1192 /* may trigger VNOP_INACTIVE */
1193 vnode_rele_ext(vp, O_EVTONLY, 0);
1194 vnode_put(vp);
1195 free_vgi(vgi);
1196 free_vgo(lvgo);
1197 return;
1198 }
1199 }
1200 }
1201 free_vgo(lvgo);
1202 }
1203 lck_rw_unlock_exclusive(&llock);
1204}
1205
1206static os_reason_t
1207vng_reason_from_pathname(const char *path, uint32_t pathlen)
1208{
1209 os_reason_t r = os_reason_create(OS_REASON_GUARD, GUARD_REASON_VNODE);
1210 if (NULL == r)
1211 return (r);
1212 /*
1213 * If the pathname is very long, just keep the trailing part
1214 */
1215 const uint32_t pathmax = 3 * EXIT_REASON_USER_DESC_MAX_LEN / 4;
1216 if (pathlen > pathmax) {
1217 path += (pathlen - pathmax);
1218 pathlen = pathmax;
1219 }
1220 uint32_t rsize = kcdata_estimate_required_buffer_size(1, pathlen);
1221 if (0 == os_reason_alloc_buffer(r, rsize)) {
1222 struct kcdata_descriptor *kcd = &r->osr_kcd_descriptor;
1223 mach_vm_address_t addr;
1224 if (kcdata_get_memory_addr(kcd,
1225 EXIT_REASON_USER_DESC, pathlen, &addr) == KERN_SUCCESS) {
1226 kcdata_memcpy(kcd, addr, path, pathlen);
1227 return (r);
1228 }
1229 }
1230 os_reason_free(r);
1231 return (OS_REASON_NULL);
1232}
1233
1234static int vng_policy_flags;
1235
1236static int
1237vng_guard_violation(const struct vng_info *vgi,
1238 unsigned opval, vnode_t vp)
1239{
1240 int retval = 0;
1241
1242 if (vng_policy_flags & kVNG_POLICY_EPERM) {
1243 /* deny the operation */
1244 retval = EPERM;
1245 }
1246
1247 if (vng_policy_flags & (kVNG_POLICY_LOGMSG|kVNG_POLICY_UPRINTMSG)) {
1248 /* log a message */
1249 const char *op;
1250 switch (opval) {
1251 case VNG_RENAME_FROM:
1252 op = "rename-from";
1253 break;
1254 case VNG_RENAME_TO:
1255 op = "rename-to";
1256 break;
1257 case VNG_UNLINK:
1258 op = "unlink";
1259 break;
1260 case VNG_LINK:
1261 op = "link";
1262 break;
1263 case VNG_EXCHDATA:
1264 op = "exchdata";
1265 break;
1266 case VNG_WRITE_OTHER:
1267 op = "write";
1268 break;
1269 case VNG_TRUNC_OTHER:
1270 op = "truncate";
1271 break;
1272 default:
1273 op = "(unknown)";
1274 break;
1275 }
1276
1277 const char *nm = vnode_getname(vp);
1278 proc_t p = current_proc();
1279 const struct vng_owner *vgo;
1280 TAILQ_FOREACH(vgo, &vgi->vgi_owners, vgo_link) {
1281 const char fmt[] =
1282 "%s[%d]: %s%s: '%s' guarded by %s[%d] (0x%llx)\n";
1283
1284 if (vng_policy_flags & kVNG_POLICY_LOGMSG) {
1285 printf(fmt,
1286 proc_name_address(p), proc_pid(p), op,
1287 0 != retval ? " denied" : "",
1288 NULL != nm ? nm : "(unknown)",
1289 proc_name_address(vgo->vgo_p),
1290 proc_pid(vgo->vgo_p), vgi->vgi_guard);
1291 }
1292 if (vng_policy_flags & kVNG_POLICY_UPRINTMSG) {
1293 uprintf(fmt,
1294 proc_name_address(p), proc_pid(p), op,
1295 0 != retval ? " denied" : "",
1296 NULL != nm ? nm : "(unknown)",
1297 proc_name_address(vgo->vgo_p),
1298 proc_pid(vgo->vgo_p), vgi->vgi_guard);
1299 }
1300 }
1301 if (NULL != nm)
1302 vnode_putname(nm);
1303 }
1304
1305 if (vng_policy_flags & (kVNG_POLICY_EXC|kVNG_POLICY_EXC_CORPSE)) {
1306 /* EXC_GUARD exception */
1307 const struct vng_owner *vgo = TAILQ_FIRST(&vgi->vgi_owners);
1308 pid_t pid = vgo ? proc_pid(vgo->vgo_p) : 0;
1309 mach_exception_code_t code;
1310 mach_exception_subcode_t subcode;
1311
1312 code = 0;
1313 EXC_GUARD_ENCODE_TYPE(code, GUARD_TYPE_VN);
1314 EXC_GUARD_ENCODE_FLAVOR(code, opval);
1315 EXC_GUARD_ENCODE_TARGET(code, pid);
1316 subcode = vgi->vgi_guard;
1317
1318 if (vng_policy_flags & kVNG_POLICY_EXC_CORPSE) {
1319 char *path;
1320 int len = MAXPATHLEN;
1321 MALLOC(path, char *, len, M_TEMP, M_WAITOK);
1322 os_reason_t r = NULL;
1323 if (NULL != path) {
1324 vn_getpath(vp, path, &len);
1325 if (*path && len)
1326 r = vng_reason_from_pathname(path, len);
1327 }
1328 task_violated_guard(code, subcode, r); /* not fatal */
1329 if (NULL != r)
1330 os_reason_free(r);
1331 if (NULL != path)
1332 FREE(path, M_TEMP);
1333 } else {
1334 thread_t t = current_thread();
1335 thread_guard_violation(t, code, subcode);
1336 }
1337 } else if (vng_policy_flags & kVNG_POLICY_SIGKILL) {
1338 proc_t p = current_proc();
1339 psignal(p, SIGKILL);
1340 }
1341
1342 return (retval);
1343}
1344
1345/*
1346 * A fatal vnode guard was tripped on this thread.
1347 *
1348 * (Invoked before returning to userland from the syscall handler.)
1349 */
1350void
1351vn_guard_ast(thread_t __unused t,
1352 mach_exception_data_type_t code, mach_exception_data_type_t subcode)
1353{
1354 task_exception_notify(EXC_GUARD, code, subcode);
1355 proc_t p = current_proc();
1356 psignal(p, SIGKILL);
1357}
1358
1359/*
1360 * vnode callbacks
1361 */
1362
1363static int
1364vng_vnode_check_rename(kauth_cred_t __unused cred,
1365 struct vnode *__unused dvp, struct label *__unused dlabel,
1366 struct vnode *vp, struct label *label,
1367 struct componentname *__unused cnp,
1368 struct vnode *__unused tdvp, struct label *__unused tdlabel,
1369 struct vnode *tvp, struct label *tlabel,
1370 struct componentname *__unused tcnp)
1371{
1372 int error = 0;
1373 if (NULL != label || NULL != tlabel) {
1374 lck_rw_lock_shared(&llock);
1375 const struct vng_info *vgi =
1376 vng_lbl_get_withattr(label, VNG_RENAME_FROM);
1377 if (NULL != vgi)
1378 error = vng_guard_violation(vgi, VNG_RENAME_FROM, vp);
1379 if (0 == error) {
1380 vgi = vng_lbl_get_withattr(tlabel, VNG_RENAME_TO);
1381 if (NULL != vgi)
1382 error = vng_guard_violation(vgi,
1383 VNG_RENAME_TO, tvp);
1384 }
1385 lck_rw_unlock_shared(&llock);
1386 }
1387 return (error);
1388}
1389
1390static int
1391vng_vnode_check_link(kauth_cred_t __unused cred,
1392 struct vnode *__unused dvp, struct label *__unused dlabel,
1393 struct vnode *vp, struct label *label, struct componentname *__unused cnp)
1394{
1395 int error = 0;
1396 if (NULL != label) {
1397 lck_rw_lock_shared(&llock);
1398 const struct vng_info *vgi =
1399 vng_lbl_get_withattr(label, VNG_LINK);
1400 if (vgi)
1401 error = vng_guard_violation(vgi, VNG_LINK, vp);
1402 lck_rw_unlock_shared(&llock);
1403 }
1404 return (error);
1405}
1406
1407static int
1408vng_vnode_check_unlink(kauth_cred_t __unused cred,
1409 struct vnode *__unused dvp, struct label *__unused dlabel,
1410 struct vnode *vp, struct label *label, struct componentname *__unused cnp)
1411{
1412 int error = 0;
1413 if (NULL != label) {
1414 lck_rw_lock_shared(&llock);
1415 const struct vng_info *vgi =
1416 vng_lbl_get_withattr(label, VNG_UNLINK);
1417 if (vgi)
1418 error = vng_guard_violation(vgi, VNG_UNLINK, vp);
1419 lck_rw_unlock_shared(&llock);
1420 }
1421 return (error);
1422}
1423
1424/*
1425 * Only check violations for writes performed by "other processes"
1426 */
1427static int
1428vng_vnode_check_write(kauth_cred_t __unused actv_cred,
1429 kauth_cred_t __unused file_cred, struct vnode *vp, struct label *label)
1430{
1431 int error = 0;
1432 if (NULL != label) {
1433 lck_rw_lock_shared(&llock);
1434 const struct vng_info *vgi =
1435 vng_lbl_get_withattr(label, VNG_WRITE_OTHER);
1436 if (vgi) {
1437 proc_t p = current_proc();
1438 const struct vng_owner *vgo;
1439 TAILQ_FOREACH(vgo, &vgi->vgi_owners, vgo_link) {
1440 if (vgo->vgo_p == p)
1441 goto done;
1442 }
1443 error = vng_guard_violation(vgi, VNG_WRITE_OTHER, vp);
1444 }
1445 done:
1446 lck_rw_unlock_shared(&llock);
1447 }
1448 return (error);
1449}
1450
1451/*
1452 * Only check violations for truncates performed by "other processes"
1453 */
1454static int
1455vng_vnode_check_truncate(kauth_cred_t __unused actv_cred,
1456 kauth_cred_t __unused file_cred, struct vnode *vp,
1457 struct label *label)
1458{
1459 int error = 0;
1460 if (NULL != label) {
1461 lck_rw_lock_shared(&llock);
1462 const struct vng_info *vgi =
1463 vng_lbl_get_withattr(label, VNG_TRUNC_OTHER);
1464 if (vgi) {
1465 proc_t p = current_proc();
1466 const struct vng_owner *vgo;
1467 TAILQ_FOREACH(vgo, &vgi->vgi_owners, vgo_link) {
1468 if (vgo->vgo_p == p)
1469 goto done;
1470 }
1471 error = vng_guard_violation(vgi, VNG_TRUNC_OTHER, vp);
1472 }
1473 done:
1474 lck_rw_unlock_shared(&llock);
1475 }
1476 return error;
1477}
1478
1479static int
1480vng_vnode_check_exchangedata(kauth_cred_t __unused cred,
1481 struct vnode *fvp, struct label *flabel,
1482 struct vnode *svp, struct label *slabel)
1483{
1484 int error = 0;
1485 if (NULL != flabel || NULL != slabel) {
1486 lck_rw_lock_shared(&llock);
1487 const struct vng_info *vgi =
1488 vng_lbl_get_withattr(flabel, VNG_EXCHDATA);
1489 if (NULL != vgi)
1490 error = vng_guard_violation(vgi, VNG_EXCHDATA, fvp);
1491 if (0 == error) {
1492 vgi = vng_lbl_get_withattr(slabel, VNG_EXCHDATA);
1493 if (NULL != vgi)
1494 error = vng_guard_violation(vgi,
1495 VNG_EXCHDATA, svp);
1496 }
1497 lck_rw_unlock_shared(&llock);
1498 }
1499 return (error);
1500}
1501
1502/* Intercept open-time truncations (by "other") of a guarded vnode */
1503
1504static int
1505vng_vnode_check_open(kauth_cred_t cred,
1506 struct vnode *vp, struct label *label, int acc_mode)
1507{
1508 if (0 == (acc_mode & O_TRUNC))
1509 return (0);
1510 return (vng_vnode_check_truncate(cred, NULL, vp, label));
1511}
1512
1513/*
1514 * Configuration gorp
1515 */
1516
1517static void
1518vng_init(struct mac_policy_conf *mpc)
1519{
1520 llock_grp = lck_grp_alloc_init(mpc->mpc_name, LCK_GRP_ATTR_NULL);
1521 lck_rw_init(&llock, llock_grp, LCK_ATTR_NULL);
1522}
1523
1524SECURITY_READ_ONLY_EARLY(static struct mac_policy_ops) vng_policy_ops = {
1525 .mpo_file_label_destroy = vng_file_label_destroy,
1526
1527 .mpo_vnode_check_link = vng_vnode_check_link,
1528 .mpo_vnode_check_unlink = vng_vnode_check_unlink,
1529 .mpo_vnode_check_rename = vng_vnode_check_rename,
1530 .mpo_vnode_check_write = vng_vnode_check_write,
1531 .mpo_vnode_check_truncate = vng_vnode_check_truncate,
1532 .mpo_vnode_check_exchangedata = vng_vnode_check_exchangedata,
1533 .mpo_vnode_check_open = vng_vnode_check_open,
1534
1535 .mpo_policy_syscall = vng_policy_syscall,
1536 .mpo_policy_init = vng_init,
1537};
1538
1539static const char *vng_labelnames[] = {
1540 "vnguard",
1541};
1542
1543#define ACOUNT(arr) ((unsigned)(sizeof (arr) / sizeof (arr[0])))
1544
1545SECURITY_READ_ONLY_LATE(static struct mac_policy_conf) vng_policy_conf = {
1546 .mpc_name = VNG_POLICY_NAME,
1547 .mpc_fullname = "Guarded vnode policy",
1548 .mpc_field_off = &label_slot,
1549 .mpc_labelnames = vng_labelnames,
1550 .mpc_labelname_count = ACOUNT(vng_labelnames),
1551 .mpc_ops = &vng_policy_ops,
1552 .mpc_loadtime_flags = 0,
1553 .mpc_runtime_flags = 0
1554};
1555
1556static mac_policy_handle_t vng_policy_handle;
1557
1558void
1559vnguard_policy_init(void)
1560{
1561 if (0 == PE_i_can_has_debugger(NULL))
1562 return;
1563 vng_policy_flags = kVNG_POLICY_LOGMSG |
1564 kVNG_POLICY_EXC_CORPSE | kVNG_POLICY_UPRINTMSG;
1565 PE_parse_boot_argn("vnguard", &vng_policy_flags, sizeof (vng_policy_flags));
1566 if (vng_policy_flags)
1567 mac_policy_register(&vng_policy_conf, &vng_policy_handle, NULL);
1568}
1569
1570#if DEBUG || DEVELOPMENT
1571#include <sys/sysctl.h>
1572
1573SYSCTL_DECL(_kern_vnguard);
1574SYSCTL_NODE(_kern, OID_AUTO, vnguard, CTLFLAG_RW | CTLFLAG_LOCKED, 0, "vnguard");
1575SYSCTL_INT(_kern_vnguard, OID_AUTO, flags, CTLFLAG_RW | CTLFLAG_LOCKED,
1576 &vng_policy_flags, 0, "vnguard policy flags");
1577#endif
1578
1579#endif /* CONFIG_MACF && CONFIG_VNGUARD */
1580