1/*
2 * Copyright (c) 2019 Apple Inc. All rights reserved.
3 *
4 * @APPLE_LICENSE_HEADER_START@
5 *
6 * This file contains Original Code and/or Modifications of Original Code
7 * as defined in and that are subject to the Apple Public Source License
8 * Version 2.0 (the 'License'). You may not use this file except in
9 * compliance with the License. Please obtain a copy of the License at
10 * http://www.opensource.apple.com/apsl/ and read it before using this
11 * file.
12 *
13 * The Original Code and all software distributed under the License are
14 * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER
15 * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES,
16 * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY,
17 * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT.
18 * Please see the License for the specific language governing rights and
19 * limitations under the License.
20 *
21 * @APPLE_LICENSE_HEADER_END@
22 */
23
24/*-
25 * Portions Copyright (c) 1992, 1993
26 * The Regents of the University of California. All rights reserved.
27 *
28 * This code is derived from software contributed to Berkeley by
29 * John Heidemann of the UCLA Ficus project.
30 *
31 * Redistribution and use in source and binary forms, with or without
32 * modification, are permitted provided that the following conditions
33 * are met:
34 * 1. Redistributions of source code must retain the above copyright
35 * notice, this list of conditions and the following disclaimer.
36 * 2. Redistributions in binary form must reproduce the above copyright
37 * notice, this list of conditions and the following disclaimer in the
38 * documentation and/or other materials provided with the distribution.
39 * 4. Neither the name of the University nor the names of its contributors
40 * may be used to endorse or promote products derived from this software
41 * without specific prior written permission.
42 *
43 * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
44 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
45 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
46 * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
47 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
48 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
49 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
50 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
51 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
52 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
53 * SUCH DAMAGE.
54 *
55 * @(#)null_vnops.c 8.6 (Berkeley) 5/27/95
56 *
57 * Ancestors:
58 * @(#)lofs_vnops.c 1.2 (Berkeley) 6/18/92
59 * ...and...
60 * @(#)null_vnodeops.c 1.20 92/07/07 UCLA Ficus project
61 *
62 * $FreeBSD$
63 */
64
65#include <sys/param.h>
66#include <sys/systm.h>
67#include <sys/conf.h>
68#include <sys/kernel.h>
69#include <sys/lock.h>
70#include <sys/malloc.h>
71#include <sys/mount.h>
72#include <sys/mount_internal.h>
73#include <sys/namei.h>
74#include <sys/sysctl.h>
75#include <sys/vnode.h>
76#include <sys/xattr.h>
77#include <sys/ubc.h>
78#include <sys/types.h>
79#include <sys/dirent.h>
80#include <sys/kauth.h>
81
82#include "nullfs.h"
83
84#define NULL_ROOT_INO 2
85#define NULL_SECOND_INO 3
86#define NULL_THIRD_INO 4
87
88vop_t * nullfs_vnodeop_p = NULL;
89
90/* the mountpoint lock should be held going into this function */
91static int
92nullfs_isspecialvp(struct vnode * vp)
93{
94 struct null_mount * null_mp;
95
96 null_mp = MOUNTTONULLMOUNT(vnode_mount(vp));
97
98 /* only check for root and second here, third is special in a different way,
99 * related only to lookup and readdir */
100 if (vp && (vp == null_mp->nullm_rootvp || vp == null_mp->nullm_secondvp)) {
101 return 1;
102 }
103 return 0;
104}
105
106/* helper function to handle locking where possible */
107static int
108nullfs_checkspecialvp(struct vnode* vp)
109{
110 int result = 0;
111 struct null_mount * null_mp;
112
113 null_mp = MOUNTTONULLMOUNT(vnode_mount(vp));
114
115 lck_mtx_lock(lck: &null_mp->nullm_lock);
116 result = (nullfs_isspecialvp(vp));
117 lck_mtx_unlock(lck: &null_mp->nullm_lock);
118
119 return result;
120}
121
122vfs_context_t
123nullfs_get_patched_context(struct null_mount * null_mp, vfs_context_t ctx)
124{
125 struct vfs_context *ectx = ctx;
126 kauth_cred_t ucred;
127
128 if ((null_mp->nullm_flags & NULLM_UNVEIL) == NULLM_UNVEIL) {
129 ectx = vfs_context_create(ctx);
130 ucred = kauth_cred_derive(cred: ectx->vc_ucred,
131 fn: ^bool (kauth_cred_t parent __unused, kauth_cred_t model) {
132 return kauth_cred_model_setuidgid(model,
133 uid: null_mp->uid, gid: null_mp->gid);
134 });
135 kauth_cred_unref(&ectx->vc_ucred);
136 ectx->vc_ucred = ucred;
137 }
138 return ectx;
139}
140
141void
142nullfs_cleanup_patched_context(struct null_mount * null_mp, vfs_context_t ctx)
143{
144 if ((null_mp->nullm_flags & NULLM_UNVEIL) == NULLM_UNVEIL) {
145 vfs_context_rele(ctx);
146 }
147}
148
149static int
150nullfs_default(__unused struct vnop_generic_args * args)
151{
152 NULLFSDEBUG("%s (default)\n", ((struct vnodeop_desc_fake *)args->a_desc)->vdesc_name);
153 return ENOTSUP;
154}
155
156static int
157nullfs_special_getattr(struct vnop_getattr_args * args)
158{
159 mount_t mp = vnode_mount(vp: args->a_vp);
160 struct null_mount * null_mp = MOUNTTONULLMOUNT(mp);
161
162 ino_t ino = NULL_ROOT_INO;
163 struct vnode_attr covered_rootattr;
164 vnode_t checkvp = null_mp->nullm_lowerrootvp;
165 vfs_context_t ectx = nullfs_get_patched_context(null_mp, ctx: args->a_context);
166
167 VATTR_INIT(&covered_rootattr);
168 VATTR_WANTED(&covered_rootattr, va_uid);
169 VATTR_WANTED(&covered_rootattr, va_gid);
170 VATTR_WANTED(&covered_rootattr, va_create_time);
171 VATTR_WANTED(&covered_rootattr, va_modify_time);
172 VATTR_WANTED(&covered_rootattr, va_access_time);
173
174 /* prefer to get this from the lower root vp, but if not (i.e. forced unmount
175 * of lower fs) try the mount point covered vnode */
176 if (vnode_getwithvid(checkvp, null_mp->nullm_lowerrootvid)) {
177 checkvp = vfs_vnodecovered(mp);
178 if (checkvp == NULL) {
179 nullfs_cleanup_patched_context(null_mp, ctx: ectx);
180 return EIO;
181 }
182 }
183
184 int error = vnode_getattr(vp: checkvp, vap: &covered_rootattr, ctx: ectx);
185
186 vnode_put(vp: checkvp);
187 if (error) {
188 /* we should have been able to get attributes fore one of the two choices so
189 * fail if we didn't */
190 nullfs_cleanup_patched_context(null_mp, ctx: ectx);
191 return error;
192 }
193
194 /* we got the attributes of the vnode we cover so plow ahead */
195 if (args->a_vp == null_mp->nullm_secondvp) {
196 ino = NULL_SECOND_INO;
197 }
198
199 VATTR_RETURN(args->a_vap, va_type, vnode_vtype(args->a_vp));
200 VATTR_RETURN(args->a_vap, va_rdev, 0);
201 VATTR_RETURN(args->a_vap, va_nlink, 3); /* always just ., .., and the child */
202 VATTR_RETURN(args->a_vap, va_total_size, 0); // hoping this is ok
203
204 VATTR_RETURN(args->a_vap, va_data_size, 0); // hoping this is ok
205 VATTR_RETURN(args->a_vap, va_data_alloc, 0);
206 VATTR_RETURN(args->a_vap, va_iosize, vfs_statfs(mp)->f_iosize);
207 VATTR_RETURN(args->a_vap, va_fileid, ino);
208 VATTR_RETURN(args->a_vap, va_linkid, ino);
209 if (VATTR_IS_ACTIVE(args->a_vap, va_fsid)) {
210 VATTR_RETURN(args->a_vap, va_fsid, vfs_statfs(mp)->f_fsid.val[0]); // return the fsid of the mount point
211 }
212 if (VATTR_IS_ACTIVE(args->a_vap, va_fsid64)) {
213 VATTR_RETURN(args->a_vap, va_fsid64, vfs_statfs(mp)->f_fsid);
214 }
215 VATTR_RETURN(args->a_vap, va_filerev, 0);
216 VATTR_RETURN(args->a_vap, va_gen, 0);
217 VATTR_RETURN(args->a_vap, va_flags, UF_HIDDEN); /* mark our fake directories as hidden. People
218 * shouldn't be enocouraged to poke around in them */
219
220 if (ino == NULL_SECOND_INO) {
221 VATTR_RETURN(args->a_vap, va_parentid, NULL_ROOT_INO); /* no parent at the root, so
222 * the only other vnode that
223 * goes through this path is
224 * second and its parent is
225 * 1.*/
226 }
227
228 if (VATTR_IS_ACTIVE(args->a_vap, va_mode)) {
229 /* force dr_xr_xr_x */
230 VATTR_RETURN(args->a_vap, va_mode, S_IFDIR | S_IRUSR | S_IXUSR | S_IRGRP | S_IXGRP | S_IROTH | S_IXOTH);
231 }
232 if (VATTR_IS_ACTIVE(args->a_vap, va_uid)) {
233 VATTR_RETURN(args->a_vap, va_uid, covered_rootattr.va_uid);
234 }
235 if (VATTR_IS_ACTIVE(args->a_vap, va_gid)) {
236 VATTR_RETURN(args->a_vap, va_gid, covered_rootattr.va_gid);
237 }
238
239 if (VATTR_IS_ACTIVE(args->a_vap, va_create_time)) {
240 VATTR_SET_SUPPORTED(args->a_vap, va_create_time);
241 args->a_vap->va_create_time.tv_sec = covered_rootattr.va_create_time.tv_sec;
242 args->a_vap->va_create_time.tv_nsec = covered_rootattr.va_create_time.tv_nsec;
243 }
244 if (VATTR_IS_ACTIVE(args->a_vap, va_modify_time)) {
245 VATTR_SET_SUPPORTED(args->a_vap, va_modify_time);
246 args->a_vap->va_modify_time.tv_sec = covered_rootattr.va_modify_time.tv_sec;
247 args->a_vap->va_modify_time.tv_nsec = covered_rootattr.va_modify_time.tv_nsec;
248 }
249 if (VATTR_IS_ACTIVE(args->a_vap, va_access_time)) {
250 VATTR_SET_SUPPORTED(args->a_vap, va_access_time);
251 args->a_vap->va_modify_time.tv_sec = covered_rootattr.va_access_time.tv_sec;
252 args->a_vap->va_modify_time.tv_nsec = covered_rootattr.va_access_time.tv_nsec;
253 }
254
255 nullfs_cleanup_patched_context(null_mp, ctx: ectx);
256 return 0;
257}
258
259static int
260nullfs_getattr(struct vnop_getattr_args * args)
261{
262 int error;
263 struct null_mount * null_mp = MOUNTTONULLMOUNT(vnode_mount(args->a_vp));
264 kauth_cred_t cred = vfs_context_ucred(ctx: args->a_context);
265 NULLFSDEBUG("%s %p\n", __FUNCTION__, args->a_vp);
266
267 if (nullfs_checkspecialvp(vp: args->a_vp)) {
268 error = nullfs_special_getattr(args);
269 return error;
270 }
271
272 /* this will return a different inode for third than read dir will */
273 struct vnode * lowervp = NULLVPTOLOWERVP(args->a_vp);
274 vfs_context_t ectx = nullfs_get_patched_context(null_mp, ctx: args->a_context);
275 error = vnode_getwithref(vp: lowervp);
276
277 if (error == 0) {
278 error = VNOP_GETATTR(lowervp, args->a_vap, ectx);
279 vnode_put(vp: lowervp);
280
281 if (error == 0) {
282 /* fix up fsid so it doesn't say the underlying fs*/
283 if (VATTR_IS_ACTIVE(args->a_vap, va_fsid)) {
284 VATTR_RETURN(args->a_vap, va_fsid, vfs_statfs(vnode_mount(args->a_vp))->f_fsid.val[0]);
285 }
286 if (VATTR_IS_ACTIVE(args->a_vap, va_fsid64)) {
287 VATTR_RETURN(args->a_vap, va_fsid64, vfs_statfs(vnode_mount(args->a_vp))->f_fsid);
288 }
289
290 /* Conjure up permissions */
291 if ((null_mp->nullm_flags & NULLM_UNVEIL) == NULLM_UNVEIL) {
292 if (VATTR_IS_ACTIVE(args->a_vap, va_mode)) {
293 mode_t mode = args->a_vap->va_mode; // We will take away permisions if we don't have them
294
295 // Check for authorizations
296 // If we can read:
297 if (vnode_authorize(vp: lowervp, NULL, KAUTH_VNODE_GENERIC_READ_BITS, ctx: ectx) == 0) {
298 mode |= S_IRUSR;
299 } else {
300 mode &= ~S_IRUSR;
301 }
302
303 // Or execute
304 // Directories need an execute bit...
305 if (vnode_authorize(vp: lowervp, NULL, KAUTH_VNODE_GENERIC_EXECUTE_BITS, ctx: ectx) == 0) {
306 mode |= S_IXUSR;
307 } else {
308 mode &= ~S_IXUSR;
309 }
310
311 NULLFSDEBUG("Settings bits to %d\n", mode);
312 VATTR_RETURN(args->a_vap, va_mode, mode);
313 }
314 if (VATTR_IS_ACTIVE(args->a_vap, va_uid)) {
315 VATTR_RETURN(args->a_vap, va_uid, kauth_cred_getuid(cred));
316 }
317 if (VATTR_IS_ACTIVE(args->a_vap, va_gid)) {
318 VATTR_RETURN(args->a_vap, va_gid, kauth_cred_getgid(cred));
319 }
320 }
321 }
322 }
323
324 nullfs_cleanup_patched_context(null_mp, ctx: ectx);
325 return error;
326}
327
328static int
329nullfs_open(struct vnop_open_args * args)
330{
331 int error;
332 struct vnode *vp, *lvp;
333 mount_t mp = vnode_mount(vp: args->a_vp);
334 struct null_mount * null_mp = MOUNTTONULLMOUNT(mp);
335 NULLFSDEBUG("%s %p\n", __FUNCTION__, args->a_vp);
336
337 if (nullfs_checkspecialvp(vp: args->a_vp)) {
338 return 0; /* nothing extra needed */
339 }
340
341 vfs_context_t ectx = nullfs_get_patched_context(null_mp, ctx: args->a_context);
342 vp = args->a_vp;
343 lvp = NULLVPTOLOWERVP(vp);
344 error = vnode_getwithref(vp: lvp);
345 if (error == 0) {
346 error = VNOP_OPEN(lvp, args->a_mode, ectx);
347 vnode_put(vp: lvp);
348 }
349
350 nullfs_cleanup_patched_context(null_mp, ctx: ectx);
351 return error;
352}
353
354static int
355nullfs_close(struct vnop_close_args * args)
356{
357 int error;
358 struct vnode *vp, *lvp;
359 mount_t mp = vnode_mount(vp: args->a_vp);
360 struct null_mount * null_mp = MOUNTTONULLMOUNT(mp);
361
362 NULLFSDEBUG("%s %p\n", __FUNCTION__, args->a_vp);
363
364 if (nullfs_checkspecialvp(vp: args->a_vp)) {
365 return 0; /* nothing extra needed */
366 }
367
368 vfs_context_t ectx = nullfs_get_patched_context(null_mp, ctx: args->a_context);
369 vp = args->a_vp;
370 lvp = NULLVPTOLOWERVP(vp);
371
372 error = vnode_getwithref(vp: lvp);
373 if (error == 0) {
374 error = VNOP_CLOSE(lvp, args->a_fflag, ectx);
375 vnode_put(vp: lvp);
376 }
377
378 nullfs_cleanup_patched_context(null_mp, ctx: ectx);
379 return error;
380}
381
382/* get lvp's parent, if possible, even if it isn't set.
383 *
384 * lvp is expected to have an iocount before and after this call.
385 *
386 * if a dvpp is populated the returned vnode has an iocount. */
387static int
388null_get_lowerparent(vnode_t lvp, vnode_t * dvpp, vfs_context_t ctx)
389{
390 int error = 0;
391 struct vnode_attr va;
392 mount_t mp = vnode_mount(vp: lvp);
393 vnode_t dvp = vnode_parent(vp: lvp);
394
395 if (dvp) {
396 error = vnode_get(dvp);
397 goto end;
398 }
399
400 error = ENOENT;
401 if (!(mp->mnt_kern_flag & MNTK_PATH_FROM_ID)) {
402 goto end;
403 }
404
405 VATTR_INIT(&va);
406 VATTR_WANTED(&va, va_parentid);
407
408 error = vnode_getattr(vp: lvp, vap: &va, ctx);
409
410 if (error || !VATTR_IS_SUPPORTED(&va, va_parentid)) {
411 if (!error) {
412 error = ENOTSUP;
413 }
414 goto end;
415 }
416
417 error = VFS_VGET(mp, (ino64_t)va.va_parentid, &dvp, ctx);
418
419end:
420 if (error == 0) {
421 *dvpp = dvp;
422 }
423 return error;
424}
425
426/* the mountpoint lock should be held going into this function */
427static int
428null_special_lookup(struct vnop_lookup_args * ap)
429{
430 struct componentname * cnp = ap->a_cnp;
431 struct vnode * dvp = ap->a_dvp;
432 struct vnode * ldvp = NULL;
433 struct vnode * lvp = NULL;
434 struct vnode * vp = NULL;
435 struct vnode * tempvp = NULL;
436 struct mount * mp = vnode_mount(vp: dvp);
437 struct null_mount * null_mp = MOUNTTONULLMOUNT(mp);
438 int error = ENOENT;
439 vfs_context_t ectx = nullfs_get_patched_context(null_mp, ctx: ap->a_context);
440
441 // null_mp->nullm_lock is locked
442 if (dvp == null_mp->nullm_rootvp) {
443 /* handle . and .. */
444 if (cnp->cn_nameptr[0] == '.') {
445 if (cnp->cn_namelen == 1 || (cnp->cn_namelen == 2 && cnp->cn_nameptr[1] == '.')) {
446 /* this is the root so both . and .. give back the root */
447 vp = dvp;
448 lck_mtx_unlock(lck: &null_mp->nullm_lock);
449 error = vnode_get(vp);
450 goto end;
451 }
452 }
453
454 /* our virtual wrapper directory should be d but D is acceptable if the
455 * lower file system is case insensitive */
456 if (cnp->cn_namelen == 1 &&
457 (cnp->cn_nameptr[0] == 'd' || (null_mp->nullm_flags & NULLM_CASEINSENSITIVE ? cnp->cn_nameptr[0] == 'D' : 0))) {
458 error = 0;
459 if (null_mp->nullm_secondvp == NULL) {
460 // drop the lock before making a new vnode
461 lck_mtx_unlock(lck: &null_mp->nullm_lock);
462 error = null_getnewvnode(mp, NULL, dvp, vpp: &vp, cnp, root: 0);
463 if (error) {
464 goto end;
465 }
466 // Get the lock before modifying nullm_secondvp
467 lck_mtx_lock(lck: &null_mp->nullm_lock);
468 if (null_mp->nullm_secondvp == NULL) {
469 null_mp->nullm_secondvp = vp;
470 lck_mtx_unlock(lck: &null_mp->nullm_lock);
471 } else {
472 /* Another thread already set null_mp->nullm_secondvp while the
473 * lock was dropped so recycle the vnode we just made */
474 tempvp = vp;
475 vp = null_mp->nullm_secondvp;
476 lck_mtx_unlock(lck: &null_mp->nullm_lock);
477 /* recycle will call reclaim which will get rid of the internals */
478 vnode_recycle(vp: tempvp);
479 vnode_put(vp: tempvp);
480
481 error = vnode_get(vp);
482 }
483 } else {
484 vp = null_mp->nullm_secondvp;
485 lck_mtx_unlock(lck: &null_mp->nullm_lock);
486 error = vnode_get(vp);
487 }
488 } else {
489 lck_mtx_unlock(lck: &null_mp->nullm_lock);
490 }
491 } else if (dvp == null_mp->nullm_secondvp) {
492 /* handle . and .. */
493 if (cnp->cn_nameptr[0] == '.') {
494 if (cnp->cn_namelen == 1) {
495 vp = dvp;
496 lck_mtx_unlock(lck: &null_mp->nullm_lock);
497 error = vnode_get(vp);
498 goto end;
499 } else if (cnp->cn_namelen == 2 && cnp->cn_nameptr[1] == '.') {
500 /* parent here is the root vp */
501 vp = null_mp->nullm_rootvp;
502 lck_mtx_unlock(lck: &null_mp->nullm_lock);
503 error = vnode_get(vp);
504 goto end;
505 }
506 }
507 /* nullmp->nullm_lowerrootvp was set at mount time so don't need to lock to
508 * access it */
509 /* Drop the global lock since we aren't accessing rootvp or secondvp any more */
510 lck_mtx_unlock(lck: &null_mp->nullm_lock);
511 error = vnode_getwithvid(null_mp->nullm_lowerrootvp, null_mp->nullm_lowerrootvid);
512 if (error) {
513 goto end;
514 }
515
516 /* We don't want to mess with case insensitivity and unicode, so the plan to
517 * check here is
518 * 1. try to get the lower root's parent
519 * 2. If we get a parent, then perform a lookup on the lower file system
520 * using the parent and the passed in cnp
521 * 3. If that worked and we got a vp, then see if the vp is lowerrootvp. If
522 * so we got a match
523 * 4. Anything else results in ENOENT.
524 */
525 error = null_get_lowerparent(lvp: null_mp->nullm_lowerrootvp, dvpp: &ldvp, ctx: ectx);
526
527 if (error == 0) {
528 error = VNOP_LOOKUP(ldvp, &lvp, cnp, ectx);
529 vnode_put(vp: ldvp);
530
531 if (error == 0) {
532 // nullm_lowerrootvp is only touched during mount and unmount so we don't need the lock to check it.
533 if (lvp == null_mp->nullm_lowerrootvp) {
534 /* always check the hashmap for a vnode for this, the root of the
535 * mirrored system */
536 error = null_nodeget(mp, lowervp: lvp, dvp, vpp: &vp, cnp, root: 0);
537 } else {
538 error = ENOENT;
539 }
540 vnode_put(vp: lvp);
541 }
542 }
543 vnode_put(vp: null_mp->nullm_lowerrootvp);
544 }
545
546end:
547 nullfs_cleanup_patched_context(null_mp, ctx: ectx);
548 if (error == 0) {
549 *ap->a_vpp = vp;
550 }
551 return error;
552}
553
554/*
555 * We have to carry on the locking protocol on the null layer vnodes
556 * as we progress through the tree. We also have to enforce read-only
557 * if this layer is mounted read-only.
558 */
559static int
560null_lookup(struct vnop_lookup_args * ap)
561{
562 struct componentname * cnp = ap->a_cnp;
563 struct vnode * dvp = ap->a_dvp;
564 struct vnode *vp, *ldvp, *lvp;
565 struct mount * mp;
566 struct null_mount * null_mp;
567 int error;
568 vfs_context_t ectx;
569
570 NULLFSDEBUG("%s parent: %p component: %.*s\n", __FUNCTION__, ap->a_dvp, cnp->cn_namelen, cnp->cn_nameptr);
571
572 mp = vnode_mount(vp: dvp);
573 /* rename and delete are not allowed. this is a read only file system */
574 if (cnp->cn_nameiop == DELETE || cnp->cn_nameiop == RENAME || cnp->cn_nameiop == CREATE) {
575 return EROFS;
576 }
577 null_mp = MOUNTTONULLMOUNT(mp);
578
579
580 lck_mtx_lock(lck: &null_mp->nullm_lock);
581 if (nullfs_isspecialvp(vp: dvp)) {
582 error = null_special_lookup(ap);
583 // null_special_lookup drops the lock
584 return error;
585 }
586 lck_mtx_unlock(lck: &null_mp->nullm_lock);
587
588 // . and .. handling
589 if (cnp->cn_nameptr[0] == '.') {
590 if (cnp->cn_namelen == 1) {
591 vp = dvp;
592 } else if (cnp->cn_namelen == 2 && cnp->cn_nameptr[1] == '.') {
593 /* mount point crossing is handled in null_special_lookup */
594 vp = vnode_parent(vp: dvp);
595 } else {
596 goto notdot;
597 }
598
599 error = vp ? vnode_get(vp) : ENOENT;
600
601 if (error == 0) {
602 *ap->a_vpp = vp;
603 }
604
605 return error;
606 }
607
608notdot:
609 ectx = nullfs_get_patched_context(null_mp, ctx: ap->a_context);
610 ldvp = NULLVPTOLOWERVP(dvp);
611 vp = lvp = NULL;
612
613 /*
614 * Hold ldvp. The reference on it, owned by dvp, is lost in
615 * case of dvp reclamation.
616 */
617 error = vnode_getwithref(vp: ldvp);
618 if (error) {
619 nullfs_cleanup_patched_context(null_mp, ctx: ectx);
620 return error;
621 }
622
623 error = VNOP_LOOKUP(ldvp, &lvp, cnp, ectx);
624
625 vnode_put(vp: ldvp);
626
627 if ((error == 0 || error == EJUSTRETURN) && lvp != NULL) {
628 if (ldvp == lvp) {
629 vp = dvp;
630 error = vnode_get(vp);
631 } else {
632 error = null_nodeget(mp, lowervp: lvp, dvp, vpp: &vp, cnp, root: 0);
633 }
634 if (error == 0) {
635 *ap->a_vpp = vp;
636 }
637 /* if we got lvp, drop the iocount from VNOP_LOOKUP */
638 if (lvp != NULL) {
639 vnode_put(vp: lvp);
640 }
641 }
642
643 nullfs_cleanup_patched_context(null_mp, ctx: ectx);
644 return error;
645}
646
647/*
648 * Don't think this needs to do anything
649 */
650static int
651null_inactive(__unused struct vnop_inactive_args * ap)
652{
653 NULLFSDEBUG("%s %p\n", __FUNCTION__, ap->a_vp);
654
655 return 0;
656}
657
658static int
659null_reclaim(struct vnop_reclaim_args * ap)
660{
661 struct vnode * vp;
662 struct null_node * xp;
663 struct vnode * lowervp;
664 struct null_mount * null_mp = MOUNTTONULLMOUNT(vnode_mount(ap->a_vp));
665
666 NULLFSDEBUG("%s %p\n", __FUNCTION__, ap->a_vp);
667
668 vp = ap->a_vp;
669
670 xp = VTONULL(vp);
671 lowervp = xp->null_lowervp;
672
673 lck_mtx_lock(lck: &null_mp->nullm_lock);
674
675 vnode_removefsref(vp);
676
677 if (lowervp != NULL) {
678 /* root and second don't have a lowervp, so nothing to release and nothing
679 * got hashed */
680 if (xp->null_flags & NULL_FLAG_HASHED) {
681 /* only call this if we actually made it into the hash list. reclaim gets
682 * called also to
683 * clean up a vnode that got created when it didn't need to under race
684 * conditions */
685 null_hashrem(xp);
686 }
687 vnode_rele(vp: lowervp);
688 }
689
690 if (vp == null_mp->nullm_rootvp) {
691 null_mp->nullm_rootvp = NULL;
692 } else if (vp == null_mp->nullm_secondvp) {
693 null_mp->nullm_secondvp = NULL;
694 }
695
696 lck_mtx_unlock(lck: &null_mp->nullm_lock);
697
698 cache_purge(vp);
699 vnode_clearfsnode(vp);
700
701 kfree_type(struct null_node, xp);
702
703 return 0;
704}
705
706#define DIRENT_SZ(dp) ((sizeof(struct dirent) - NAME_MAX) + (((dp)->d_namlen + 1 + 3) & ~3))
707
708static int
709store_entry_special(ino_t ino, const char * name, struct uio * uio)
710{
711 struct dirent e;
712 size_t namelen = strlen(s: name);
713 int error = EINVAL;
714
715 if (namelen + 1 <= NAME_MAX) {
716 memset(s: &e, c: 0, n: sizeof(e));
717
718 e.d_ino = ino;
719 e.d_type = DT_DIR;
720
721 e.d_namlen = namelen; /* don't include NUL */
722 e.d_reclen = DIRENT_SZ(&e);
723 if (uio_resid(a_uio: uio) >= e.d_reclen) {
724 strlcpy(dst: e.d_name, src: name, NAME_MAX);
725 error = uiomove(cp: (caddr_t)&e, n: e.d_reclen, uio);
726 } else {
727 error = EMSGSIZE;
728 }
729 }
730 return error;
731}
732
733static int
734nullfs_special_readdir(struct vnop_readdir_args * ap)
735{
736 struct vnode * vp = ap->a_vp;
737 struct uio * uio = ap->a_uio;
738 struct null_mount * null_mp = MOUNTTONULLMOUNT(vnode_mount(vp));
739 off_t offset = uio_offset(a_uio: uio);
740 int error = ERANGE;
741 int items = 0;
742 ino_t ino = 0;
743 const char * name = NULL;
744 boolean_t locked = TRUE;
745
746 if (ap->a_flags & (VNODE_READDIR_EXTENDED | VNODE_READDIR_REQSEEKOFF)) {
747 lck_mtx_unlock(lck: &null_mp->nullm_lock);
748 return EINVAL;
749 }
750
751 if (offset == 0) {
752 /* . case */
753 if (vp == null_mp->nullm_rootvp) {
754 ino = NULL_ROOT_INO;
755 } else { /* only get here if vp matches nullm_rootvp or nullm_secondvp */
756 ino = NULL_SECOND_INO;
757 }
758 error = store_entry_special(ino, name: ".", uio);
759 if (error) {
760 goto out;
761 }
762 offset++;
763 items++;
764 }
765 if (offset == 1) {
766 /* .. case */
767 /* only get here if vp matches nullm_rootvp or nullm_secondvp */
768 ino = NULL_ROOT_INO;
769
770 error = store_entry_special(ino, name: "..", uio);
771 if (error) {
772 goto out;
773 }
774 offset++;
775 items++;
776 }
777 if (offset == 2) {
778 /* the directory case */
779 if (vp == null_mp->nullm_rootvp) {
780 ino = NULL_SECOND_INO;
781 name = "d";
782 } else { /* only get here if vp matches nullm_rootvp or nullm_secondvp */
783 // drop the lock before performing operations on nullm_lowerrootvp
784 lck_mtx_unlock(lck: &null_mp->nullm_lock);
785 locked = FALSE;
786 ino = NULL_THIRD_INO;
787 if (vnode_getwithvid(null_mp->nullm_lowerrootvp, null_mp->nullm_lowerrootvid)) {
788 /* In this case the lower file system has been ripped out from under us,
789 * but we don't want to error out
790 * Instead we just want d to look empty. */
791 error = 0;
792 goto out;
793 }
794 name = vnode_getname_printable(vp: null_mp->nullm_lowerrootvp);
795 }
796 error = store_entry_special(ino, name, uio);
797
798 if (ino == NULL_THIRD_INO) {
799 vnode_putname_printable(name);
800 vnode_put(vp: null_mp->nullm_lowerrootvp);
801 }
802
803 if (error) {
804 goto out;
805 }
806 offset++;
807 items++;
808 }
809
810out:
811 if (locked) {
812 lck_mtx_unlock(lck: &null_mp->nullm_lock);
813 }
814 if (error == EMSGSIZE) {
815 error = 0; /* return success if we ran out of space, but we wanted to make
816 * sure that we didn't update offset and items incorrectly */
817 }
818 uio_setoffset(a_uio: uio, a_offset: offset);
819 if (ap->a_numdirent) {
820 *ap->a_numdirent = items;
821 }
822 return error;
823}
824
825static int
826nullfs_readdir(struct vnop_readdir_args * ap)
827{
828 struct vnode *vp, *lvp;
829 int error;
830 struct null_mount * null_mp = MOUNTTONULLMOUNT(vnode_mount(ap->a_vp));
831
832 NULLFSDEBUG("%s %p\n", __FUNCTION__, ap->a_vp);
833 /* assumption is that any vp that comes through here had to go through lookup
834 */
835
836 lck_mtx_lock(lck: &null_mp->nullm_lock);
837 if (nullfs_isspecialvp(vp: ap->a_vp)) {
838 error = nullfs_special_readdir(ap);
839 // nullfs_special_readdir drops the lock
840 return error;
841 }
842 lck_mtx_unlock(lck: &null_mp->nullm_lock);
843
844 vfs_context_t ectx = nullfs_get_patched_context(null_mp, ctx: ap->a_context);
845 vp = ap->a_vp;
846 lvp = NULLVPTOLOWERVP(vp);
847 error = vnode_getwithref(vp: lvp);
848 if (error == 0) {
849 error = VNOP_READDIR(lvp, ap->a_uio, ap->a_flags, ap->a_eofflag, ap->a_numdirent, ectx);
850 vnode_put(vp: lvp);
851 }
852
853 nullfs_cleanup_patched_context(null_mp, ctx: ectx);
854 return error;
855}
856
857static int
858nullfs_readlink(struct vnop_readlink_args * ap)
859{
860 NULLFSDEBUG("%s %p\n", __FUNCTION__, ap->a_vp);
861 int error;
862 struct vnode *vp, *lvp;
863 struct null_mount * null_mp = MOUNTTONULLMOUNT(vnode_mount(ap->a_vp));
864
865 if (nullfs_checkspecialvp(vp: ap->a_vp)) {
866 return ENOTSUP; /* the special vnodes aren't links */
867 }
868
869 vfs_context_t ectx = nullfs_get_patched_context(null_mp, ctx: ap->a_context);
870 vp = ap->a_vp;
871 lvp = NULLVPTOLOWERVP(vp);
872
873 error = vnode_getwithref(vp: lvp);
874 if (error == 0) {
875 error = VNOP_READLINK(lvp, ap->a_uio, ectx);
876 vnode_put(vp: lvp);
877
878 if (error) {
879 NULLFSDEBUG("readlink failed: %d\n", error);
880 }
881 }
882
883 nullfs_cleanup_patched_context(null_mp, ctx: ectx);
884 return error;
885}
886
887static int
888nullfs_pathconf(__unused struct vnop_pathconf_args * args)
889{
890 NULLFSDEBUG("%s %p\n", __FUNCTION__, args->a_vp);
891 return EINVAL;
892}
893
894static int
895nullfs_fsync(__unused struct vnop_fsync_args * args)
896{
897 NULLFSDEBUG("%s %p\n", __FUNCTION__, args->a_vp);
898 return 0;
899}
900
901static int
902nullfs_mmap(struct vnop_mmap_args * args)
903{
904 int error;
905 struct vnode *vp, *lvp;
906 struct null_mount * null_mp = MOUNTTONULLMOUNT(vnode_mount(args->a_vp));
907
908 NULLFSDEBUG("%s %p\n", __FUNCTION__, args->a_vp);
909
910 if (nullfs_checkspecialvp(vp: args->a_vp)) {
911 return 0; /* nothing extra needed */
912 }
913
914 vfs_context_t ectx = nullfs_get_patched_context(null_mp, ctx: args->a_context);
915 vp = args->a_vp;
916 lvp = NULLVPTOLOWERVP(vp);
917 error = vnode_getwithref(vp: lvp);
918 if (error == 0) {
919 error = VNOP_MMAP(lvp, args->a_fflags, ectx);
920 vnode_put(vp: lvp);
921 }
922
923 nullfs_cleanup_patched_context(null_mp, ctx: ectx);
924 return error;
925}
926
927static int
928nullfs_mnomap(struct vnop_mnomap_args * args)
929{
930 int error;
931 struct vnode *vp, *lvp;
932 struct null_mount * null_mp = MOUNTTONULLMOUNT(vnode_mount(args->a_vp));
933
934 NULLFSDEBUG("%s %p\n", __FUNCTION__, args->a_vp);
935
936 if (nullfs_checkspecialvp(vp: args->a_vp)) {
937 return 0; /* nothing extra needed */
938 }
939
940 vfs_context_t ectx = nullfs_get_patched_context(null_mp, ctx: args->a_context);
941 vp = args->a_vp;
942 lvp = NULLVPTOLOWERVP(vp);
943 error = vnode_getwithref(vp: lvp);
944 if (error == 0) {
945 error = VNOP_MNOMAP(lvp, ectx);
946 vnode_put(vp: lvp);
947 }
948
949 nullfs_cleanup_patched_context(null_mp, ctx: ectx);
950 return error;
951}
952
953static int
954nullfs_getxattr(struct vnop_getxattr_args * args)
955{
956 int error;
957 struct vnode *vp, *lvp;
958 struct null_mount * null_mp = MOUNTTONULLMOUNT(vnode_mount(args->a_vp));
959
960 NULLFSDEBUG("%s %p\n", __FUNCTION__, args->a_vp);
961
962 if (nullfs_checkspecialvp(vp: args->a_vp)) {
963 return ENOATTR; /* no xattrs on the special vnodes */
964 }
965
966 vfs_context_t ectx = nullfs_get_patched_context(null_mp, ctx: args->a_context);
967 vp = args->a_vp;
968 lvp = NULLVPTOLOWERVP(vp);
969 error = vnode_getwithref(vp: lvp);
970 if (error == 0) {
971 error = VNOP_GETXATTR(vp: lvp, name: args->a_name, uio: args->a_uio, size: args->a_size, options: args->a_options, ctx: ectx);
972 vnode_put(vp: lvp);
973 }
974
975 nullfs_cleanup_patched_context(null_mp, ctx: ectx);
976 return error;
977}
978
979static int
980nullfs_listxattr(struct vnop_listxattr_args * args)
981{
982 int error;
983 struct vnode *vp, *lvp;
984 struct null_mount * null_mp = MOUNTTONULLMOUNT(vnode_mount(args->a_vp));
985
986 NULLFSDEBUG("%s %p\n", __FUNCTION__, args->a_vp);
987
988 if (nullfs_checkspecialvp(vp: args->a_vp)) {
989 return 0; /* no xattrs on the special vnodes */
990 }
991
992 vfs_context_t ectx = nullfs_get_patched_context(null_mp, ctx: args->a_context);
993 vp = args->a_vp;
994 lvp = NULLVPTOLOWERVP(vp);
995 error = vnode_getwithref(vp: lvp);
996 if (error == 0) {
997 error = VNOP_LISTXATTR(lvp, args->a_uio, args->a_size, args->a_options, ectx);
998 vnode_put(vp: lvp);
999 }
1000
1001 nullfs_cleanup_patched_context(null_mp, ctx: ectx);
1002 return error;
1003}
1004
1005/* relies on v1 paging */
1006static int
1007nullfs_pagein(struct vnop_pagein_args * ap)
1008{
1009 int error = EIO;
1010 struct vnode *vp, *lvp;
1011 struct null_mount * null_mp = MOUNTTONULLMOUNT(vnode_mount(ap->a_vp));
1012 NULLFSDEBUG("%s %p\n", __FUNCTION__, ap->a_vp);
1013
1014 vp = ap->a_vp;
1015 lvp = NULLVPTOLOWERVP(vp);
1016
1017 if (vnode_vtype(vp) != VREG) {
1018 return ENOTSUP;
1019 }
1020
1021 vfs_context_t ectx = nullfs_get_patched_context(null_mp, ctx: ap->a_context);
1022 /*
1023 * Ask VM/UBC/VFS to do our bidding
1024 */
1025 if (vnode_getwithvid(lvp, NULLVPTOLOWERVID(vp)) == 0) {
1026 vm_offset_t ioaddr;
1027 uio_t auio;
1028 kern_return_t kret;
1029 off_t bytes_to_commit;
1030 off_t lowersize;
1031 upl_t upl = ap->a_pl;
1032 user_ssize_t bytes_remaining = 0;
1033
1034 auio = uio_create(a_iovcount: 1, a_offset: ap->a_f_offset, a_spacetype: UIO_SYSSPACE, a_iodirection: UIO_READ);
1035 if (auio == NULL) {
1036 error = EIO;
1037 goto exit_no_unmap;
1038 }
1039
1040 kret = ubc_upl_map(upl, &ioaddr);
1041 if (KERN_SUCCESS != kret) {
1042 panic("nullfs_pagein: ubc_upl_map() failed with (%d)", kret);
1043 }
1044
1045 ioaddr += ap->a_pl_offset;
1046
1047 error = uio_addiov(a_uio: auio, a_baseaddr: (user_addr_t)ioaddr, a_length: ap->a_size);
1048 if (error) {
1049 goto exit;
1050 }
1051
1052 lowersize = ubc_getsize(lvp);
1053 if (lowersize != ubc_getsize(vp)) {
1054 (void)ubc_setsize(vp, lowersize); /* ignore failures, nothing can be done */
1055 }
1056
1057 error = VNOP_READ(vp: lvp, uio: auio, ioflag: ((ap->a_flags & UPL_IOSYNC) ? IO_SYNC : 0), ctx: ectx);
1058
1059 bytes_remaining = uio_resid(a_uio: auio);
1060 if (bytes_remaining > 0 && bytes_remaining <= (user_ssize_t)ap->a_size) {
1061 /* zero bytes that weren't read in to the upl */
1062 bzero(s: (void*)((uintptr_t)(ioaddr + ap->a_size - bytes_remaining)), n: (size_t) bytes_remaining);
1063 }
1064
1065exit:
1066 kret = ubc_upl_unmap(upl);
1067 if (KERN_SUCCESS != kret) {
1068 panic("nullfs_pagein: ubc_upl_unmap() failed with (%d)", kret);
1069 }
1070
1071 if (auio != NULL) {
1072 uio_free(a_uio: auio);
1073 }
1074
1075exit_no_unmap:
1076 if ((ap->a_flags & UPL_NOCOMMIT) == 0) {
1077 if (!error && (bytes_remaining >= 0) && (bytes_remaining <= (user_ssize_t)ap->a_size)) {
1078 /* only commit what was read in (page aligned)*/
1079 bytes_to_commit = ap->a_size - bytes_remaining;
1080 if (bytes_to_commit) {
1081 /* need to make sure bytes_to_commit and byte_remaining are page aligned before calling ubc_upl_commit_range*/
1082 if (bytes_to_commit & PAGE_MASK) {
1083 bytes_to_commit = (bytes_to_commit & (~PAGE_MASK)) + (PAGE_MASK + 1);
1084 assert(bytes_to_commit <= (off_t)ap->a_size);
1085
1086 bytes_remaining = ap->a_size - bytes_to_commit;
1087 }
1088 ubc_upl_commit_range(upl, ap->a_pl_offset, (upl_size_t)bytes_to_commit, UPL_COMMIT_FREE_ON_EMPTY);
1089 }
1090
1091 /* abort anything thats left */
1092 if (bytes_remaining) {
1093 ubc_upl_abort_range(upl, ap->a_pl_offset + bytes_to_commit, (upl_size_t)bytes_remaining, UPL_ABORT_ERROR | UPL_ABORT_FREE_ON_EMPTY);
1094 }
1095 } else {
1096 ubc_upl_abort_range(upl, ap->a_pl_offset, (upl_size_t)ap->a_size, UPL_ABORT_ERROR | UPL_ABORT_FREE_ON_EMPTY);
1097 }
1098 }
1099 vnode_put(vp: lvp);
1100 } else if ((ap->a_flags & UPL_NOCOMMIT) == 0) {
1101 ubc_upl_abort_range(ap->a_pl, ap->a_pl_offset, (upl_size_t)ap->a_size, UPL_ABORT_ERROR | UPL_ABORT_FREE_ON_EMPTY);
1102 }
1103
1104 nullfs_cleanup_patched_context(null_mp, ctx: ectx);
1105 return error;
1106}
1107
1108static int
1109nullfs_read(struct vnop_read_args * ap)
1110{
1111 int error = EIO;
1112
1113 struct vnode *vp, *lvp;
1114 struct null_mount * null_mp = MOUNTTONULLMOUNT(vnode_mount(ap->a_vp));
1115 NULLFSDEBUG("%s %p\n", __FUNCTION__, ap->a_vp);
1116
1117 if (nullfs_checkspecialvp(vp: ap->a_vp)) {
1118 return ENOTSUP; /* the special vnodes can't be read */
1119 }
1120
1121 vfs_context_t ectx = nullfs_get_patched_context(null_mp, ctx: ap->a_context);
1122 vp = ap->a_vp;
1123 lvp = NULLVPTOLOWERVP(vp);
1124
1125 /*
1126 * First some house keeping
1127 */
1128 if (vnode_getwithvid(lvp, NULLVPTOLOWERVID(vp)) == 0) {
1129 if (!vnode_isreg(vp: lvp) && !vnode_islnk(vp: lvp)) {
1130 error = EPERM;
1131 goto end;
1132 }
1133
1134 if (uio_resid(a_uio: ap->a_uio) == 0) {
1135 error = 0;
1136 goto end;
1137 }
1138
1139 /*
1140 * Now ask VM/UBC/VFS to do our bidding
1141 */
1142
1143 error = VNOP_READ(vp: lvp, uio: ap->a_uio, ioflag: ap->a_ioflag, ctx: ectx);
1144 if (error) {
1145 NULLFSDEBUG("VNOP_READ failed: %d\n", error);
1146 }
1147end:
1148 vnode_put(vp: lvp);
1149 }
1150
1151 nullfs_cleanup_patched_context(null_mp, ctx: ectx);
1152 return error;
1153}
1154
1155/*
1156 * Global vfs data structures
1157 */
1158
1159static const struct vnodeopv_entry_desc nullfs_vnodeop_entries[] = {
1160 {.opve_op = &vnop_default_desc, .opve_impl = (vop_t)nullfs_default}, {.opve_op = &vnop_getattr_desc, .opve_impl = (vop_t)nullfs_getattr},
1161 {.opve_op = &vnop_open_desc, .opve_impl = (vop_t)nullfs_open}, {.opve_op = &vnop_close_desc, .opve_impl = (vop_t)nullfs_close},
1162 {.opve_op = &vnop_inactive_desc, .opve_impl = (vop_t)null_inactive}, {.opve_op = &vnop_reclaim_desc, .opve_impl = (vop_t)null_reclaim},
1163 {.opve_op = &vnop_lookup_desc, .opve_impl = (vop_t)null_lookup}, {.opve_op = &vnop_readdir_desc, .opve_impl = (vop_t)nullfs_readdir},
1164 {.opve_op = &vnop_readlink_desc, .opve_impl = (vop_t)nullfs_readlink}, {.opve_op = &vnop_pathconf_desc, .opve_impl = (vop_t)nullfs_pathconf},
1165 {.opve_op = &vnop_fsync_desc, .opve_impl = (vop_t)nullfs_fsync}, {.opve_op = &vnop_mmap_desc, .opve_impl = (vop_t)nullfs_mmap},
1166 {.opve_op = &vnop_mnomap_desc, .opve_impl = (vop_t)nullfs_mnomap}, {.opve_op = &vnop_getxattr_desc, .opve_impl = (vop_t)nullfs_getxattr},
1167 {.opve_op = &vnop_pagein_desc, .opve_impl = (vop_t)nullfs_pagein}, {.opve_op = &vnop_read_desc, .opve_impl = (vop_t)nullfs_read},
1168 {.opve_op = &vnop_listxattr_desc, .opve_impl = (vop_t)nullfs_listxattr}, {.opve_op = NULL, .opve_impl = NULL},
1169};
1170
1171const struct vnodeopv_desc nullfs_vnodeop_opv_desc = {.opv_desc_vector_p = &nullfs_vnodeop_p, .opv_desc_ops = nullfs_vnodeop_entries};
1172
1173//NULLFS Specific helper function
1174
1175int
1176nullfs_getbackingvnode(vnode_t in_vp, vnode_t* out_vpp)
1177{
1178 int result = EINVAL;
1179
1180 if (out_vpp == NULL || in_vp == NULL) {
1181 goto end;
1182 }
1183
1184 struct vfsstatfs * sp = NULL;
1185 mount_t mp = vnode_mount(vp: in_vp);
1186
1187 sp = vfs_statfs(mp);
1188 //If this isn't a nullfs vnode or it is but it's a special vnode
1189 if (strcmp(s1: sp->f_fstypename, s2: "nullfs") != 0 || nullfs_checkspecialvp(vp: in_vp)) {
1190 *out_vpp = NULLVP;
1191 result = ENOENT;
1192 goto end;
1193 }
1194
1195 vnode_t lvp = NULLVPTOLOWERVP(in_vp);
1196 if ((result = vnode_getwithvid(lvp, NULLVPTOLOWERVID(in_vp)))) {
1197 goto end;
1198 }
1199
1200 *out_vpp = lvp;
1201
1202end:
1203 return result;
1204}
1205