1 | /* |
2 | * Copyright (c) 2000-2014 Apple Inc. All rights reserved. |
3 | * |
4 | * @APPLE_OSREFERENCE_LICENSE_HEADER_START@ |
5 | * |
6 | * This file contains Original Code and/or Modifications of Original Code |
7 | * as defined in and that are subject to the Apple Public Source License |
8 | * Version 2.0 (the 'License'). You may not use this file except in |
9 | * compliance with the License. The rights granted to you under the License |
10 | * may not be used to create, or enable the creation or redistribution of, |
11 | * unlawful or unlicensed copies of an Apple operating system, or to |
12 | * circumvent, violate, or enable the circumvention or violation of, any |
13 | * terms of an Apple operating system software license agreement. |
14 | * |
15 | * Please obtain a copy of the License at |
16 | * http://www.opensource.apple.com/apsl/ and read it before using this file. |
17 | * |
18 | * The Original Code and all software distributed under the License are |
19 | * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER |
20 | * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES, |
21 | * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY, |
22 | * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT. |
23 | * Please see the License for the specific language governing rights and |
24 | * limitations under the License. |
25 | * |
26 | * @APPLE_OSREFERENCE_LICENSE_HEADER_END@ |
27 | */ |
28 | /* Copyright (c) 1995 NeXT Computer, Inc. All Rights Reserved */ |
29 | /* |
30 | * Copyright (c) 1982, 1986, 1989, 1993 |
31 | * The Regents of the University of California. All rights reserved. |
32 | * (c) UNIX System Laboratories, Inc. |
33 | * All or some portions of this file are derived from material licensed |
34 | * to the University of California by American Telephone and Telegraph |
35 | * Co. or Unix System Laboratories, Inc. and are reproduced herein with |
36 | * the permission of UNIX System Laboratories, Inc. |
37 | * |
38 | * Redistribution and use in source and binary forms, with or without |
39 | * modification, are permitted provided that the following conditions |
40 | * are met: |
41 | * 1. Redistributions of source code must retain the above copyright |
42 | * notice, this list of conditions and the following disclaimer. |
43 | * 2. Redistributions in binary form must reproduce the above copyright |
44 | * notice, this list of conditions and the following disclaimer in the |
45 | * documentation and/or other materials provided with the distribution. |
46 | * 3. All advertising materials mentioning features or use of this software |
47 | * must display the following acknowledgement: |
48 | * This product includes software developed by the University of |
49 | * California, Berkeley and its contributors. |
50 | * 4. Neither the name of the University nor the names of its contributors |
51 | * may be used to endorse or promote products derived from this software |
52 | * without specific prior written permission. |
53 | * |
54 | * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND |
55 | * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE |
56 | * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE |
57 | * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE |
58 | * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL |
59 | * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS |
60 | * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) |
61 | * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT |
62 | * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY |
63 | * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF |
64 | * SUCH DAMAGE. |
65 | * |
66 | * @(#)vfs_vnops.c 8.14 (Berkeley) 6/15/95 |
67 | * |
68 | */ |
69 | /* |
70 | * NOTICE: This file was modified by SPARTA, Inc. in 2005 to introduce |
71 | * support for mandatory and extensible security protections. This notice |
72 | * is included in support of clause 2.2 (b) of the Apple Public License, |
73 | * Version 2.0. |
74 | */ |
75 | |
76 | #include <sys/param.h> |
77 | #include <sys/types.h> |
78 | #include <sys/systm.h> |
79 | #include <sys/kernel.h> |
80 | #include <sys/file_internal.h> |
81 | #include <sys/stat.h> |
82 | #include <sys/proc_internal.h> |
83 | #include <sys/kauth.h> |
84 | #include <sys/mount_internal.h> |
85 | #include <sys/namei.h> |
86 | #include <sys/vnode_internal.h> |
87 | #include <sys/ioctl.h> |
88 | #include <sys/tty.h> |
89 | /* Temporary workaround for ubc.h until <rdar://4714366 is resolved */ |
90 | #define ubc_setcred ubc_setcred_deprecated |
91 | #include <sys/ubc.h> |
92 | #undef ubc_setcred |
93 | int ubc_setcred(struct vnode *, struct proc *); |
94 | #include <sys/conf.h> |
95 | #include <sys/disk.h> |
96 | #include <sys/fsevents.h> |
97 | #include <sys/kdebug.h> |
98 | #include <sys/xattr.h> |
99 | #include <sys/ubc_internal.h> |
100 | #include <sys/uio_internal.h> |
101 | #include <sys/resourcevar.h> |
102 | #include <sys/signalvar.h> |
103 | |
104 | #include <vm/vm_kern.h> |
105 | #include <vm/vm_map.h> |
106 | |
107 | #include <miscfs/specfs/specdev.h> |
108 | #include <miscfs/fifofs/fifo.h> |
109 | |
110 | #if CONFIG_MACF |
111 | #include <security/mac_framework.h> |
112 | #endif |
113 | |
114 | #include <IOKit/IOBSD.h> |
115 | #include <libkern/section_keywords.h> |
116 | |
117 | static int vn_closefile(struct fileglob *fp, vfs_context_t ctx); |
118 | static int vn_ioctl(struct fileproc *fp, u_long com, caddr_t data, |
119 | vfs_context_t ctx); |
120 | static int vn_read(struct fileproc *fp, struct uio *uio, int flags, |
121 | vfs_context_t ctx); |
122 | static int vn_write(struct fileproc *fp, struct uio *uio, int flags, |
123 | vfs_context_t ctx); |
124 | static int vn_select( struct fileproc *fp, int which, void * wql, |
125 | vfs_context_t ctx); |
126 | static int vn_kqfilt_add(struct fileproc *fp, struct knote *kn, |
127 | struct kevent_internal_s *kev, vfs_context_t ctx); |
128 | static void filt_vndetach(struct knote *kn); |
129 | static int filt_vnode(struct knote *kn, long hint); |
130 | static int filt_vnode_common(struct knote *kn, vnode_t vp, long hint); |
131 | static int vn_open_auth_finish(vnode_t vp, int fmode, vfs_context_t ctx); |
132 | #if 0 |
133 | static int vn_kqfilt_remove(struct vnode *vp, uintptr_t ident, |
134 | vfs_context_t ctx); |
135 | #endif |
136 | |
137 | const struct fileops vnops = { |
138 | .fo_type = DTYPE_VNODE, |
139 | .fo_read = vn_read, |
140 | .fo_write = vn_write, |
141 | .fo_ioctl = vn_ioctl, |
142 | .fo_select = vn_select, |
143 | .fo_close = vn_closefile, |
144 | .fo_kqfilter = vn_kqfilt_add, |
145 | .fo_drain = NULL, |
146 | }; |
147 | |
148 | static int filt_vntouch(struct knote *kn, struct kevent_internal_s *kev); |
149 | static int filt_vnprocess(struct knote *kn, struct filt_process_s *data, struct kevent_internal_s *kev); |
150 | |
151 | SECURITY_READ_ONLY_EARLY(struct filterops) vnode_filtops = { |
152 | .f_isfd = 1, |
153 | .f_attach = NULL, |
154 | .f_detach = filt_vndetach, |
155 | .f_event = filt_vnode, |
156 | .f_touch = filt_vntouch, |
157 | .f_process = filt_vnprocess, |
158 | }; |
159 | |
160 | /* |
161 | * Common code for vnode open operations. |
162 | * Check permissions, and call the VNOP_OPEN or VNOP_CREATE routine. |
163 | * |
164 | * XXX the profusion of interfaces here is probably a bad thing. |
165 | */ |
166 | int |
167 | vn_open(struct nameidata *ndp, int fmode, int cmode) |
168 | { |
169 | return(vn_open_modflags(ndp, &fmode, cmode)); |
170 | } |
171 | |
172 | int |
173 | vn_open_modflags(struct nameidata *ndp, int *fmodep, int cmode) |
174 | { |
175 | struct vnode_attr va; |
176 | |
177 | VATTR_INIT(&va); |
178 | VATTR_SET(&va, va_mode, cmode); |
179 | |
180 | return(vn_open_auth(ndp, fmodep, &va)); |
181 | } |
182 | |
183 | static int |
184 | vn_open_auth_finish(vnode_t vp, int fmode, vfs_context_t ctx) |
185 | { |
186 | int error; |
187 | |
188 | if ((error = vnode_ref_ext(vp, fmode, 0)) != 0) { |
189 | goto bad; |
190 | } |
191 | |
192 | /* Call out to allow 3rd party notification of open. |
193 | * Ignore result of kauth_authorize_fileop call. |
194 | */ |
195 | #if CONFIG_MACF |
196 | mac_vnode_notify_open(ctx, vp, fmode); |
197 | #endif |
198 | kauth_authorize_fileop(vfs_context_ucred(ctx), KAUTH_FILEOP_OPEN, |
199 | (uintptr_t)vp, 0); |
200 | |
201 | return 0; |
202 | |
203 | bad: |
204 | return error; |
205 | |
206 | } |
207 | |
208 | /* |
209 | * May do nameidone() to allow safely adding an FSEvent. Cue off of ni_dvp to |
210 | * determine whether that has happened. |
211 | */ |
212 | static int |
213 | vn_open_auth_do_create(struct nameidata *ndp, struct vnode_attr *vap, int fmode, boolean_t *did_create, boolean_t *did_open, vfs_context_t ctx) |
214 | { |
215 | uint32_t status = 0; |
216 | vnode_t dvp = ndp->ni_dvp; |
217 | int batched; |
218 | int error; |
219 | vnode_t vp; |
220 | |
221 | batched = vnode_compound_open_available(ndp->ni_dvp); |
222 | *did_open = FALSE; |
223 | |
224 | VATTR_SET(vap, va_type, VREG); |
225 | if (fmode & O_EXCL) |
226 | vap->va_vaflags |= VA_EXCLUSIVE; |
227 | |
228 | #if NAMEDRSRCFORK |
229 | if (ndp->ni_cnd.cn_flags & CN_WANTSRSRCFORK) { |
230 | if ((error = vn_authorize_create(dvp, &ndp->ni_cnd, vap, ctx, NULL)) != 0) |
231 | goto out; |
232 | if ((error = vnode_makenamedstream(dvp, &ndp->ni_vp, XATTR_RESOURCEFORK_NAME, 0, ctx)) != 0) |
233 | goto out; |
234 | *did_create = TRUE; |
235 | } else { |
236 | #endif |
237 | if (!batched) { |
238 | if ((error = vn_authorize_create(dvp, &ndp->ni_cnd, vap, ctx, NULL)) != 0) |
239 | goto out; |
240 | } |
241 | |
242 | error = vn_create(dvp, &ndp->ni_vp, ndp, vap, VN_CREATE_DOOPEN, fmode, &status, ctx); |
243 | if (error != 0) { |
244 | if (batched) { |
245 | *did_create = (status & COMPOUND_OPEN_STATUS_DID_CREATE) ? TRUE : FALSE; |
246 | } else { |
247 | *did_create = FALSE; |
248 | } |
249 | |
250 | if (error == EKEEPLOOKING) { |
251 | if (*did_create) { |
252 | panic("EKEEPLOOKING, but we did a create?" ); |
253 | } |
254 | if (!batched) { |
255 | panic("EKEEPLOOKING from filesystem that doesn't support compound vnops?" ); |
256 | } |
257 | if ((ndp->ni_flag & NAMEI_CONTLOOKUP) == 0) { |
258 | panic("EKEEPLOOKING, but continue flag not set?" ); |
259 | } |
260 | |
261 | /* |
262 | * Do NOT drop the dvp: we need everything to continue the lookup. |
263 | */ |
264 | return error; |
265 | } |
266 | } else { |
267 | if (batched) { |
268 | *did_create = (status & COMPOUND_OPEN_STATUS_DID_CREATE) ? 1 : 0; |
269 | *did_open = TRUE; |
270 | } else { |
271 | *did_create = TRUE; |
272 | } |
273 | } |
274 | #if NAMEDRSRCFORK |
275 | } |
276 | #endif |
277 | |
278 | vp = ndp->ni_vp; |
279 | |
280 | if (*did_create) { |
281 | int update_flags = 0; |
282 | |
283 | // Make sure the name & parent pointers are hooked up |
284 | if (vp->v_name == NULL) |
285 | update_flags |= VNODE_UPDATE_NAME; |
286 | if (vp->v_parent == NULLVP) |
287 | update_flags |= VNODE_UPDATE_PARENT; |
288 | |
289 | if (update_flags) |
290 | vnode_update_identity(vp, dvp, ndp->ni_cnd.cn_nameptr, ndp->ni_cnd.cn_namelen, ndp->ni_cnd.cn_hash, update_flags); |
291 | |
292 | vnode_put(dvp); |
293 | ndp->ni_dvp = NULLVP; |
294 | |
295 | #if CONFIG_FSE |
296 | if (need_fsevent(FSE_CREATE_FILE, vp)) { |
297 | add_fsevent(FSE_CREATE_FILE, ctx, |
298 | FSE_ARG_VNODE, vp, |
299 | FSE_ARG_DONE); |
300 | } |
301 | #endif |
302 | } |
303 | out: |
304 | if (ndp->ni_dvp != NULLVP) { |
305 | vnode_put(dvp); |
306 | ndp->ni_dvp = NULLVP; |
307 | } |
308 | |
309 | return error; |
310 | } |
311 | |
312 | /* |
313 | * This is the number of times we'll loop in vn_open_auth without explicitly |
314 | * yielding the CPU when we determine we have to retry. |
315 | */ |
316 | #define RETRY_NO_YIELD_COUNT 5 |
317 | |
318 | /* |
319 | * Open a file with authorization, updating the contents of the structures |
320 | * pointed to by ndp, fmodep, and vap as necessary to perform the requested |
321 | * operation. This function is used for both opens of existing files, and |
322 | * creation of new files. |
323 | * |
324 | * Parameters: ndp The nami data pointer describing the |
325 | * file |
326 | * fmodep A pointer to an int containg the mode |
327 | * information to be used for the open |
328 | * vap A pointer to the vnode attribute |
329 | * descriptor to be used for the open |
330 | * |
331 | * Indirect: * Contents of the data structures pointed |
332 | * to by the parameters are modified as |
333 | * necessary to the requested operation. |
334 | * |
335 | * Returns: 0 Success |
336 | * !0 errno value |
337 | * |
338 | * Notes: The kauth_filesec_t in 'vap', if any, is in host byte order. |
339 | * |
340 | * The contents of '*ndp' will be modified, based on the other |
341 | * arguments to this function, and to return file and directory |
342 | * data necessary to satisfy the requested operation. |
343 | * |
344 | * If the file does not exist and we are creating it, then the |
345 | * O_TRUNC flag will be cleared in '*fmodep' to indicate to the |
346 | * caller that the file was not truncated. |
347 | * |
348 | * If the file exists and the O_EXCL flag was not specified, then |
349 | * the O_CREAT flag will be cleared in '*fmodep' to indicate to |
350 | * the caller that the existing file was merely opened rather |
351 | * than created. |
352 | * |
353 | * The contents of '*vap' will be modified as necessary to |
354 | * complete the operation, including setting of supported |
355 | * attribute, clearing of fields containing unsupported attributes |
356 | * in the request, if the request proceeds without them, etc.. |
357 | * |
358 | * XXX: This function is too complicated in actings on its arguments |
359 | * |
360 | * XXX: We should enummerate the possible errno values here, and where |
361 | * in the code they originated. |
362 | */ |
363 | int |
364 | vn_open_auth(struct nameidata *ndp, int *fmodep, struct vnode_attr *vap) |
365 | { |
366 | struct vnode *vp; |
367 | struct vnode *dvp; |
368 | vfs_context_t ctx = ndp->ni_cnd.cn_context; |
369 | int error; |
370 | int fmode; |
371 | uint32_t origcnflags; |
372 | boolean_t did_create; |
373 | boolean_t did_open; |
374 | boolean_t need_vnop_open; |
375 | boolean_t batched; |
376 | boolean_t ref_failed; |
377 | int nretries = 0; |
378 | |
379 | again: |
380 | vp = NULL; |
381 | dvp = NULL; |
382 | batched = FALSE; |
383 | did_create = FALSE; |
384 | need_vnop_open = TRUE; |
385 | ref_failed = FALSE; |
386 | fmode = *fmodep; |
387 | origcnflags = ndp->ni_cnd.cn_flags; |
388 | |
389 | // If raw encrypted mode is requested, handle that here |
390 | if (VATTR_IS_ACTIVE (vap, va_dataprotect_flags) |
391 | && ISSET(vap->va_dataprotect_flags, VA_DP_RAWENCRYPTED)) { |
392 | fmode |= FENCRYPTED; |
393 | } |
394 | |
395 | /* |
396 | * O_CREAT |
397 | */ |
398 | if (fmode & O_CREAT) { |
399 | if ( (fmode & O_DIRECTORY) ) { |
400 | error = EINVAL; |
401 | goto out; |
402 | } |
403 | ndp->ni_cnd.cn_nameiop = CREATE; |
404 | #if CONFIG_TRIGGERS |
405 | ndp->ni_op = OP_LINK; |
406 | #endif |
407 | /* Inherit USEDVP, vnode_open() supported flags only */ |
408 | ndp->ni_cnd.cn_flags &= (USEDVP | NOCROSSMOUNT); |
409 | ndp->ni_cnd.cn_flags |= LOCKPARENT | LOCKLEAF | AUDITVNPATH1; |
410 | ndp->ni_flag = NAMEI_COMPOUNDOPEN; |
411 | #if NAMEDRSRCFORK |
412 | /* open calls are allowed for resource forks. */ |
413 | ndp->ni_cnd.cn_flags |= CN_ALLOWRSRCFORK; |
414 | #endif |
415 | if ((fmode & O_EXCL) == 0 && (fmode & O_NOFOLLOW) == 0 && (origcnflags & FOLLOW) != 0) |
416 | ndp->ni_cnd.cn_flags |= FOLLOW; |
417 | |
418 | continue_create_lookup: |
419 | if ( (error = namei(ndp)) ) |
420 | goto out; |
421 | |
422 | dvp = ndp->ni_dvp; |
423 | vp = ndp->ni_vp; |
424 | |
425 | batched = vnode_compound_open_available(dvp); |
426 | |
427 | /* not found, create */ |
428 | if (vp == NULL) { |
429 | /* must have attributes for a new file */ |
430 | if (vap == NULL) { |
431 | vnode_put(dvp); |
432 | error = EINVAL; |
433 | goto out; |
434 | } |
435 | /* |
436 | * Attempt a create. For a system supporting compound VNOPs, we may |
437 | * find an existing file or create one; in either case, we will already |
438 | * have the file open and no VNOP_OPEN() will be needed. |
439 | */ |
440 | error = vn_open_auth_do_create(ndp, vap, fmode, &did_create, &did_open, ctx); |
441 | |
442 | dvp = ndp->ni_dvp; |
443 | vp = ndp->ni_vp; |
444 | |
445 | /* |
446 | * Detected a node that the filesystem couldn't handle. Don't call |
447 | * nameidone() yet, because we need that path buffer. |
448 | */ |
449 | if (error == EKEEPLOOKING) { |
450 | if (!batched) { |
451 | panic("EKEEPLOOKING from a filesystem that doesn't support compound VNOPs?" ); |
452 | } |
453 | goto continue_create_lookup; |
454 | } |
455 | |
456 | nameidone(ndp); |
457 | if (dvp) { |
458 | panic("Shouldn't have a dvp here." ); |
459 | } |
460 | |
461 | if (error) { |
462 | /* |
463 | * Check for a create race. |
464 | */ |
465 | if ((error == EEXIST) && !(fmode & O_EXCL)){ |
466 | if (vp) |
467 | vnode_put(vp); |
468 | goto again; |
469 | } |
470 | goto bad; |
471 | } |
472 | |
473 | need_vnop_open = !did_open; |
474 | } |
475 | else { |
476 | if (fmode & O_EXCL) |
477 | error = EEXIST; |
478 | |
479 | /* |
480 | * We have a vnode. Use compound open if available |
481 | * or else fall through to "traditional" path. Note: can't |
482 | * do a compound open for root, because the parent belongs |
483 | * to a different FS. |
484 | */ |
485 | if (error == 0 && batched && (vnode_mount(dvp) == vnode_mount(vp))) { |
486 | error = VNOP_COMPOUND_OPEN(dvp, &ndp->ni_vp, ndp, 0, fmode, NULL, NULL, ctx); |
487 | |
488 | if (error == 0) { |
489 | vp = ndp->ni_vp; |
490 | need_vnop_open = FALSE; |
491 | } else if (error == EKEEPLOOKING) { |
492 | if ((ndp->ni_flag & NAMEI_CONTLOOKUP) == 0) { |
493 | panic("EKEEPLOOKING, but continue flag not set?" ); |
494 | } |
495 | goto continue_create_lookup; |
496 | } |
497 | } |
498 | nameidone(ndp); |
499 | vnode_put(dvp); |
500 | ndp->ni_dvp = NULLVP; |
501 | |
502 | if (error) { |
503 | goto bad; |
504 | } |
505 | |
506 | fmode &= ~O_CREAT; |
507 | |
508 | /* Fall through */ |
509 | } |
510 | } |
511 | else { |
512 | /* |
513 | * Not O_CREAT |
514 | */ |
515 | ndp->ni_cnd.cn_nameiop = LOOKUP; |
516 | /* Inherit USEDVP, vnode_open() supported flags only */ |
517 | ndp->ni_cnd.cn_flags &= (USEDVP | NOCROSSMOUNT); |
518 | ndp->ni_cnd.cn_flags |= FOLLOW | LOCKLEAF | AUDITVNPATH1 | WANTPARENT; |
519 | #if NAMEDRSRCFORK |
520 | /* open calls are allowed for resource forks. */ |
521 | ndp->ni_cnd.cn_flags |= CN_ALLOWRSRCFORK; |
522 | #endif |
523 | if (fmode & FENCRYPTED) |
524 | ndp->ni_cnd.cn_flags |= CN_RAW_ENCRYPTED | CN_SKIPNAMECACHE; |
525 | ndp->ni_flag = NAMEI_COMPOUNDOPEN; |
526 | |
527 | /* preserve NOFOLLOW from vnode_open() */ |
528 | if (fmode & O_NOFOLLOW || fmode & O_SYMLINK || (origcnflags & FOLLOW) == 0) { |
529 | ndp->ni_cnd.cn_flags &= ~FOLLOW; |
530 | } |
531 | |
532 | /* Do a lookup, possibly going directly to filesystem for compound operation */ |
533 | do { |
534 | if ( (error = namei(ndp)) ) |
535 | goto out; |
536 | vp = ndp->ni_vp; |
537 | dvp = ndp->ni_dvp; |
538 | |
539 | /* Check for batched lookup-open */ |
540 | batched = vnode_compound_open_available(dvp); |
541 | if (batched && ((vp == NULLVP) || (vnode_mount(dvp) == vnode_mount(vp)))) { |
542 | error = VNOP_COMPOUND_OPEN(dvp, &ndp->ni_vp, ndp, 0, fmode, NULL, NULL, ctx); |
543 | vp = ndp->ni_vp; |
544 | if (error == 0) { |
545 | need_vnop_open = FALSE; |
546 | } else if (error == EKEEPLOOKING) { |
547 | if ((ndp->ni_flag & NAMEI_CONTLOOKUP) == 0) { |
548 | panic("EKEEPLOOKING, but continue flag not set?" ); |
549 | } |
550 | } |
551 | } |
552 | } while (error == EKEEPLOOKING); |
553 | |
554 | nameidone(ndp); |
555 | vnode_put(dvp); |
556 | ndp->ni_dvp = NULLVP; |
557 | |
558 | if (error) { |
559 | goto bad; |
560 | } |
561 | } |
562 | |
563 | /* |
564 | * By this point, nameidone() is called, dvp iocount is dropped, |
565 | * and dvp pointer is cleared. |
566 | */ |
567 | if (ndp->ni_dvp != NULLVP) { |
568 | panic("Haven't cleaned up adequately in vn_open_auth()" ); |
569 | } |
570 | |
571 | #if DEVELOPMENT || DEBUG |
572 | /* |
573 | * XXX VSWAP: Check for entitlements or special flag here |
574 | * so we can restrict access appropriately. |
575 | */ |
576 | #else /* DEVELOPMENT || DEBUG */ |
577 | |
578 | if (vnode_isswap(vp) && (fmode & (FWRITE | O_TRUNC)) && (ctx != vfs_context_kernel())) { |
579 | error = EPERM; |
580 | goto bad; |
581 | } |
582 | #endif /* DEVELOPMENT || DEBUG */ |
583 | |
584 | /* |
585 | * Expect to use this code for filesystems without compound VNOPs, for the root |
586 | * of a filesystem, which can't be "looked up" in the sense of VNOP_LOOKUP(), |
587 | * and for shadow files, which do not live on the same filesystems as their "parents." |
588 | */ |
589 | if (need_vnop_open) { |
590 | if (batched && !vnode_isvroot(vp) && !vnode_isnamedstream(vp)) { |
591 | panic("Why am I trying to use VNOP_OPEN() on anything other than the root or a named stream?" ); |
592 | } |
593 | |
594 | if (!did_create) { |
595 | error = vn_authorize_open_existing(vp, &ndp->ni_cnd, fmode, ctx, NULL); |
596 | if (error) { |
597 | goto bad; |
598 | } |
599 | } |
600 | |
601 | if (VATTR_IS_ACTIVE (vap, va_dataprotect_flags) |
602 | && ISSET(vap->va_dataprotect_flags, VA_DP_RAWUNENCRYPTED)) { |
603 | /* Don't allow unencrypted io request from user space unless entitled */ |
604 | boolean_t entitled = FALSE; |
605 | #if !SECURE_KERNEL |
606 | entitled = IOTaskHasEntitlement(current_task(), "com.apple.private.security.file-unencrypt-access" ); |
607 | #endif |
608 | if (!entitled) { |
609 | error = EPERM; |
610 | goto bad; |
611 | } |
612 | fmode |= FUNENCRYPTED; |
613 | } |
614 | |
615 | error = VNOP_OPEN(vp, fmode, ctx); |
616 | if (error) { |
617 | goto bad; |
618 | } |
619 | need_vnop_open = FALSE; |
620 | } |
621 | |
622 | // if the vnode is tagged VOPENEVT and the current process |
623 | // has the P_CHECKOPENEVT flag set, then we or in the O_EVTONLY |
624 | // flag to the open mode so that this open won't count against |
625 | // the vnode when carbon delete() does a vnode_isinuse() to see |
626 | // if a file is currently in use. this allows spotlight |
627 | // importers to not interfere with carbon apps that depend on |
628 | // the no-delete-if-busy semantics of carbon delete(). |
629 | // |
630 | if (!did_create && (vp->v_flag & VOPENEVT) && (current_proc()->p_flag & P_CHECKOPENEVT)) { |
631 | fmode |= O_EVTONLY; |
632 | } |
633 | |
634 | /* |
635 | * Grab reference, etc. |
636 | */ |
637 | error = vn_open_auth_finish(vp, fmode, ctx); |
638 | if (error) { |
639 | ref_failed = TRUE; |
640 | goto bad; |
641 | } |
642 | |
643 | /* Compound VNOP open is responsible for doing the truncate */ |
644 | if (batched || did_create) |
645 | fmode &= ~O_TRUNC; |
646 | |
647 | *fmodep = fmode; |
648 | return (0); |
649 | |
650 | bad: |
651 | /* Opened either explicitly or by a batched create */ |
652 | if (!need_vnop_open) { |
653 | VNOP_CLOSE(vp, fmode, ctx); |
654 | } |
655 | |
656 | ndp->ni_vp = NULL; |
657 | if (vp) { |
658 | #if NAMEDRSRCFORK |
659 | /* Aggressively recycle shadow files if we error'd out during open() */ |
660 | if ((vnode_isnamedstream(vp)) && |
661 | (vp->v_parent != NULLVP) && |
662 | (vnode_isshadow(vp))) { |
663 | vnode_recycle(vp); |
664 | } |
665 | #endif |
666 | vnode_put(vp); |
667 | /* |
668 | * Check for a race against unlink. We had a vnode |
669 | * but according to vnode_authorize or VNOP_OPEN it |
670 | * no longer exists. |
671 | * |
672 | * EREDRIVEOPEN: means that we were hit by the tty allocation race. |
673 | */ |
674 | if (((error == ENOENT) && (*fmodep & O_CREAT)) || (error == EREDRIVEOPEN) || ref_failed) { |
675 | /* |
676 | * We'll retry here but it may be possible that we get |
677 | * into a retry "spin" inside the kernel and not allow |
678 | * threads, which need to run in order for the retry |
679 | * loop to end, to run. An example is an open of a |
680 | * terminal which is getting revoked and we spin here |
681 | * without yielding becasue namei and VNOP_OPEN are |
682 | * successful but vnode_ref fails. The revoke needs |
683 | * threads with an iocount to run but if spin here we |
684 | * may possibly be blcoking other threads from running. |
685 | * |
686 | * We start yielding the CPU after some number of |
687 | * retries for increasing durations. Note that this is |
688 | * still a loop without an exit condition. |
689 | */ |
690 | nretries += 1; |
691 | if (nretries > RETRY_NO_YIELD_COUNT) { |
692 | /* Every hz/100 secs is 10 msecs ... */ |
693 | tsleep(&nretries, PVFS, "vn_open_auth_retry" , |
694 | MIN((nretries * (hz/100)), hz)); |
695 | } |
696 | goto again; |
697 | } |
698 | } |
699 | |
700 | out: |
701 | return (error); |
702 | } |
703 | |
704 | #if vn_access_DEPRECATED |
705 | /* |
706 | * Authorize an action against a vnode. This has been the canonical way to |
707 | * ensure that the credential/process/etc. referenced by a vfs_context |
708 | * is granted the rights called out in 'mode' against the vnode 'vp'. |
709 | * |
710 | * Unfortunately, the use of VREAD/VWRITE/VEXEC makes it very difficult |
711 | * to add support for more rights. As such, this interface will be deprecated |
712 | * and callers will use vnode_authorize instead. |
713 | */ |
714 | int |
715 | vn_access(vnode_t vp, int mode, vfs_context_t context) |
716 | { |
717 | kauth_action_t action; |
718 | |
719 | action = 0; |
720 | if (mode & VREAD) |
721 | action |= KAUTH_VNODE_READ_DATA; |
722 | if (mode & VWRITE) |
723 | action |= KAUTH_VNODE_WRITE_DATA; |
724 | if (mode & VEXEC) |
725 | action |= KAUTH_VNODE_EXECUTE; |
726 | |
727 | return(vnode_authorize(vp, NULL, action, context)); |
728 | } |
729 | #endif /* vn_access_DEPRECATED */ |
730 | |
731 | /* |
732 | * Vnode close call |
733 | */ |
734 | int |
735 | vn_close(struct vnode *vp, int flags, vfs_context_t ctx) |
736 | { |
737 | int error; |
738 | int flusherror = 0; |
739 | |
740 | #if NAMEDRSRCFORK |
741 | /* Sync data from resource fork shadow file if needed. */ |
742 | if ((vp->v_flag & VISNAMEDSTREAM) && |
743 | (vp->v_parent != NULLVP) && |
744 | vnode_isshadow(vp)) { |
745 | if (flags & FWASWRITTEN) { |
746 | flusherror = vnode_flushnamedstream(vp->v_parent, vp, ctx); |
747 | } |
748 | } |
749 | #endif |
750 | |
751 | /* work around for foxhound */ |
752 | if (vnode_isspec(vp)) |
753 | (void)vnode_rele_ext(vp, flags, 0); |
754 | |
755 | /* |
756 | * On HFS, we flush when the last writer closes. We do this |
757 | * because resource fork vnodes hold a reference on data fork |
758 | * vnodes and that will prevent them from getting VNOP_INACTIVE |
759 | * which will delay when we flush cached data. In future, we |
760 | * might find it beneficial to do this for all file systems. |
761 | * Note that it's OK to access v_writecount without the lock |
762 | * in this context. |
763 | */ |
764 | if (vp->v_tag == VT_HFS && (flags & FWRITE) && vp->v_writecount == 1) |
765 | VNOP_FSYNC(vp, MNT_NOWAIT, ctx); |
766 | |
767 | error = VNOP_CLOSE(vp, flags, ctx); |
768 | |
769 | #if CONFIG_FSE |
770 | if (flags & FWASWRITTEN) { |
771 | if (need_fsevent(FSE_CONTENT_MODIFIED, vp)) { |
772 | add_fsevent(FSE_CONTENT_MODIFIED, ctx, |
773 | FSE_ARG_VNODE, vp, |
774 | FSE_ARG_DONE); |
775 | } |
776 | } |
777 | #endif |
778 | |
779 | if (!vnode_isspec(vp)) |
780 | (void)vnode_rele_ext(vp, flags, 0); |
781 | |
782 | if (flusherror) { |
783 | error = flusherror; |
784 | } |
785 | return (error); |
786 | } |
787 | |
788 | static int |
789 | vn_read_swapfile( |
790 | struct vnode *vp, |
791 | uio_t uio) |
792 | { |
793 | int error; |
794 | off_t swap_count, this_count; |
795 | off_t file_end, read_end; |
796 | off_t prev_resid; |
797 | char *my_swap_page; |
798 | |
799 | /* |
800 | * Reading from a swap file will get you zeroes. |
801 | */ |
802 | |
803 | my_swap_page = NULL; |
804 | error = 0; |
805 | swap_count = uio_resid(uio); |
806 | |
807 | file_end = ubc_getsize(vp); |
808 | read_end = uio->uio_offset + uio_resid(uio); |
809 | if (uio->uio_offset >= file_end) { |
810 | /* uio starts after end of file: nothing to read */ |
811 | swap_count = 0; |
812 | } else if (read_end > file_end) { |
813 | /* uio extends beyond end of file: stop before that */ |
814 | swap_count -= (read_end - file_end); |
815 | } |
816 | |
817 | while (swap_count > 0) { |
818 | if (my_swap_page == NULL) { |
819 | MALLOC(my_swap_page, char *, PAGE_SIZE, |
820 | M_TEMP, M_WAITOK); |
821 | memset(my_swap_page, '\0', PAGE_SIZE); |
822 | /* add an end-of-line to keep line counters happy */ |
823 | my_swap_page[PAGE_SIZE-1] = '\n'; |
824 | } |
825 | this_count = swap_count; |
826 | if (this_count > PAGE_SIZE) { |
827 | this_count = PAGE_SIZE; |
828 | } |
829 | |
830 | prev_resid = uio_resid(uio); |
831 | error = uiomove((caddr_t) my_swap_page, |
832 | this_count, |
833 | uio); |
834 | if (error) { |
835 | break; |
836 | } |
837 | swap_count -= (prev_resid - uio_resid(uio)); |
838 | } |
839 | if (my_swap_page != NULL) { |
840 | FREE(my_swap_page, M_TEMP); |
841 | my_swap_page = NULL; |
842 | } |
843 | |
844 | return error; |
845 | } |
846 | /* |
847 | * Package up an I/O request on a vnode into a uio and do it. |
848 | */ |
849 | int |
850 | vn_rdwr( |
851 | enum uio_rw rw, |
852 | struct vnode *vp, |
853 | caddr_t base, |
854 | int len, |
855 | off_t offset, |
856 | enum uio_seg segflg, |
857 | int ioflg, |
858 | kauth_cred_t cred, |
859 | int *aresid, |
860 | proc_t p) |
861 | { |
862 | int64_t resid; |
863 | int result; |
864 | |
865 | result = vn_rdwr_64(rw, |
866 | vp, |
867 | (uint64_t)(uintptr_t)base, |
868 | (int64_t)len, |
869 | offset, |
870 | segflg, |
871 | ioflg, |
872 | cred, |
873 | &resid, |
874 | p); |
875 | |
876 | /* "resid" should be bounded above by "len," which is an int */ |
877 | if (aresid != NULL) { |
878 | *aresid = resid; |
879 | } |
880 | |
881 | return result; |
882 | } |
883 | |
884 | |
885 | int |
886 | vn_rdwr_64( |
887 | enum uio_rw rw, |
888 | struct vnode *vp, |
889 | uint64_t base, |
890 | int64_t len, |
891 | off_t offset, |
892 | enum uio_seg segflg, |
893 | int ioflg, |
894 | kauth_cred_t cred, |
895 | int64_t *aresid, |
896 | proc_t p) |
897 | { |
898 | uio_t auio; |
899 | int spacetype; |
900 | struct vfs_context context; |
901 | int error=0; |
902 | char uio_buf[ UIO_SIZEOF(1) ]; |
903 | |
904 | context.vc_thread = current_thread(); |
905 | context.vc_ucred = cred; |
906 | |
907 | if (UIO_SEG_IS_USER_SPACE(segflg)) { |
908 | spacetype = proc_is64bit(p) ? UIO_USERSPACE64 : UIO_USERSPACE32; |
909 | } |
910 | else { |
911 | spacetype = UIO_SYSSPACE; |
912 | } |
913 | auio = uio_createwithbuffer(1, offset, spacetype, rw, |
914 | &uio_buf[0], sizeof(uio_buf)); |
915 | uio_addiov(auio, base, len); |
916 | |
917 | #if CONFIG_MACF |
918 | /* XXXMAC |
919 | * IO_NOAUTH should be re-examined. |
920 | * Likely that mediation should be performed in caller. |
921 | */ |
922 | if ((ioflg & IO_NOAUTH) == 0) { |
923 | /* passed cred is fp->f_cred */ |
924 | if (rw == UIO_READ) |
925 | error = mac_vnode_check_read(&context, cred, vp); |
926 | else |
927 | error = mac_vnode_check_write(&context, cred, vp); |
928 | } |
929 | #endif |
930 | |
931 | if (error == 0) { |
932 | if (rw == UIO_READ) { |
933 | if (vnode_isswap(vp) && ((ioflg & IO_SWAP_DISPATCH) == 0)) { |
934 | error = vn_read_swapfile(vp, auio); |
935 | } else { |
936 | error = VNOP_READ(vp, auio, ioflg, &context); |
937 | } |
938 | } else { |
939 | |
940 | #if DEVELOPMENT || DEBUG |
941 | /* |
942 | * XXX VSWAP: Check for entitlements or special flag here |
943 | * so we can restrict access appropriately. |
944 | */ |
945 | error = VNOP_WRITE(vp, auio, ioflg, &context); |
946 | #else /* DEVELOPMENT || DEBUG */ |
947 | |
948 | if (vnode_isswap(vp) && ((ioflg & (IO_SWAP_DISPATCH | IO_SKIP_ENCRYPTION)) == 0)) { |
949 | error = EPERM; |
950 | } else { |
951 | error = VNOP_WRITE(vp, auio, ioflg, &context); |
952 | } |
953 | #endif /* DEVELOPMENT || DEBUG */ |
954 | } |
955 | } |
956 | |
957 | if (aresid) |
958 | *aresid = uio_resid(auio); |
959 | else |
960 | if (uio_resid(auio) && error == 0) |
961 | error = EIO; |
962 | return (error); |
963 | } |
964 | |
965 | static inline void |
966 | vn_offset_lock(struct fileglob *fg) |
967 | { |
968 | lck_mtx_lock_spin(&fg->fg_lock); |
969 | while (fg->fg_lflags & FG_OFF_LOCKED) { |
970 | fg->fg_lflags |= FG_OFF_LOCKWANT; |
971 | msleep(&fg->fg_lflags, &fg->fg_lock, PVFS | PSPIN, |
972 | "fg_offset_lock_wait" , 0); |
973 | } |
974 | fg->fg_lflags |= FG_OFF_LOCKED; |
975 | lck_mtx_unlock(&fg->fg_lock); |
976 | } |
977 | |
978 | static inline void |
979 | vn_offset_unlock(struct fileglob *fg) |
980 | { |
981 | int lock_wanted = 0; |
982 | |
983 | lck_mtx_lock_spin(&fg->fg_lock); |
984 | if (fg->fg_lflags & FG_OFF_LOCKWANT) { |
985 | lock_wanted = 1; |
986 | } |
987 | fg->fg_lflags &= ~(FG_OFF_LOCKED | FG_OFF_LOCKWANT); |
988 | lck_mtx_unlock(&fg->fg_lock); |
989 | if (lock_wanted) { |
990 | wakeup(&fg->fg_lflags); |
991 | } |
992 | } |
993 | |
994 | /* |
995 | * File table vnode read routine. |
996 | */ |
997 | static int |
998 | vn_read(struct fileproc *fp, struct uio *uio, int flags, vfs_context_t ctx) |
999 | { |
1000 | struct vnode *vp; |
1001 | int error; |
1002 | int ioflag; |
1003 | off_t count; |
1004 | int offset_locked = 0; |
1005 | |
1006 | vp = (struct vnode *)fp->f_fglob->fg_data; |
1007 | if ( (error = vnode_getwithref(vp)) ) { |
1008 | return(error); |
1009 | } |
1010 | |
1011 | #if CONFIG_MACF |
1012 | error = mac_vnode_check_read(ctx, vfs_context_ucred(ctx), vp); |
1013 | if (error) { |
1014 | (void)vnode_put(vp); |
1015 | return (error); |
1016 | } |
1017 | #endif |
1018 | |
1019 | /* This signals to VNOP handlers that this read came from a file table read */ |
1020 | ioflag = IO_SYSCALL_DISPATCH; |
1021 | |
1022 | if (fp->f_fglob->fg_flag & FNONBLOCK) |
1023 | ioflag |= IO_NDELAY; |
1024 | if ((fp->f_fglob->fg_flag & FNOCACHE) || vnode_isnocache(vp)) |
1025 | ioflag |= IO_NOCACHE; |
1026 | if (fp->f_fglob->fg_flag & FENCRYPTED) { |
1027 | ioflag |= IO_ENCRYPTED; |
1028 | } |
1029 | if (fp->f_fglob->fg_flag & FUNENCRYPTED) { |
1030 | ioflag |= IO_SKIP_ENCRYPTION; |
1031 | } |
1032 | if (fp->f_fglob->fg_flag & O_EVTONLY) { |
1033 | ioflag |= IO_EVTONLY; |
1034 | } |
1035 | if (fp->f_fglob->fg_flag & FNORDAHEAD) |
1036 | ioflag |= IO_RAOFF; |
1037 | |
1038 | if ((flags & FOF_OFFSET) == 0) { |
1039 | if ((vnode_vtype(vp) == VREG) && !vnode_isswap(vp)) { |
1040 | vn_offset_lock(fp->f_fglob); |
1041 | offset_locked = 1; |
1042 | } |
1043 | uio->uio_offset = fp->f_fglob->fg_offset; |
1044 | } |
1045 | count = uio_resid(uio); |
1046 | |
1047 | if (vnode_isswap(vp) && !(IO_SKIP_ENCRYPTION & ioflag)) { |
1048 | |
1049 | /* special case for swap files */ |
1050 | error = vn_read_swapfile(vp, uio); |
1051 | } else { |
1052 | error = VNOP_READ(vp, uio, ioflag, ctx); |
1053 | } |
1054 | |
1055 | if ((flags & FOF_OFFSET) == 0) { |
1056 | fp->f_fglob->fg_offset += count - uio_resid(uio); |
1057 | if (offset_locked) { |
1058 | vn_offset_unlock(fp->f_fglob); |
1059 | offset_locked = 0; |
1060 | } |
1061 | } |
1062 | |
1063 | (void)vnode_put(vp); |
1064 | return (error); |
1065 | } |
1066 | |
1067 | |
1068 | /* |
1069 | * File table vnode write routine. |
1070 | */ |
1071 | static int |
1072 | vn_write(struct fileproc *fp, struct uio *uio, int flags, vfs_context_t ctx) |
1073 | { |
1074 | struct vnode *vp; |
1075 | int error, ioflag; |
1076 | off_t count; |
1077 | int clippedsize = 0; |
1078 | int partialwrite=0; |
1079 | int residcount, oldcount; |
1080 | int offset_locked = 0; |
1081 | proc_t p = vfs_context_proc(ctx); |
1082 | |
1083 | count = 0; |
1084 | vp = (struct vnode *)fp->f_fglob->fg_data; |
1085 | if ( (error = vnode_getwithref(vp)) ) { |
1086 | return(error); |
1087 | } |
1088 | |
1089 | #if DEVELOPMENT || DEBUG |
1090 | /* |
1091 | * XXX VSWAP: Check for entitlements or special flag here |
1092 | * so we can restrict access appropriately. |
1093 | */ |
1094 | #else /* DEVELOPMENT || DEBUG */ |
1095 | |
1096 | if (vnode_isswap(vp)) { |
1097 | (void)vnode_put(vp); |
1098 | error = EPERM; |
1099 | return (error); |
1100 | } |
1101 | #endif /* DEVELOPMENT || DEBUG */ |
1102 | |
1103 | |
1104 | #if CONFIG_MACF |
1105 | error = mac_vnode_check_write(ctx, vfs_context_ucred(ctx), vp); |
1106 | if (error) { |
1107 | (void)vnode_put(vp); |
1108 | return (error); |
1109 | } |
1110 | #endif |
1111 | |
1112 | /* |
1113 | * IO_SYSCALL_DISPATCH signals to VNOP handlers that this write came from |
1114 | * a file table write |
1115 | */ |
1116 | ioflag = (IO_UNIT | IO_SYSCALL_DISPATCH); |
1117 | |
1118 | if (vp->v_type == VREG && (fp->f_fglob->fg_flag & O_APPEND)) |
1119 | ioflag |= IO_APPEND; |
1120 | if (fp->f_fglob->fg_flag & FNONBLOCK) |
1121 | ioflag |= IO_NDELAY; |
1122 | if ((fp->f_fglob->fg_flag & FNOCACHE) || vnode_isnocache(vp)) |
1123 | ioflag |= IO_NOCACHE; |
1124 | if (fp->f_fglob->fg_flag & FNODIRECT) |
1125 | ioflag |= IO_NODIRECT; |
1126 | if (fp->f_fglob->fg_flag & FSINGLE_WRITER) |
1127 | ioflag |= IO_SINGLE_WRITER; |
1128 | if (fp->f_fglob->fg_flag & O_EVTONLY) |
1129 | ioflag |= IO_EVTONLY; |
1130 | |
1131 | /* |
1132 | * Treat synchronous mounts and O_FSYNC on the fd as equivalent. |
1133 | * |
1134 | * XXX We treat O_DSYNC as O_FSYNC for now, since we can not delay |
1135 | * XXX the non-essential metadata without some additional VFS work; |
1136 | * XXX the intent at this point is to plumb the interface for it. |
1137 | */ |
1138 | if ((fp->f_fglob->fg_flag & (O_FSYNC|O_DSYNC)) || |
1139 | (vp->v_mount && (vp->v_mount->mnt_flag & MNT_SYNCHRONOUS))) { |
1140 | ioflag |= IO_SYNC; |
1141 | } |
1142 | |
1143 | if ((flags & FOF_OFFSET) == 0) { |
1144 | if ((vnode_vtype(vp) == VREG) && !vnode_isswap(vp)) { |
1145 | vn_offset_lock(fp->f_fglob); |
1146 | offset_locked = 1; |
1147 | } |
1148 | uio->uio_offset = fp->f_fglob->fg_offset; |
1149 | count = uio_resid(uio); |
1150 | } |
1151 | if (((flags & FOF_OFFSET) == 0) && |
1152 | vfs_context_proc(ctx) && (vp->v_type == VREG) && |
1153 | (((rlim_t)(uio->uio_offset + uio_resid(uio)) > p->p_rlimit[RLIMIT_FSIZE].rlim_cur) || |
1154 | ((rlim_t)uio_resid(uio) > (p->p_rlimit[RLIMIT_FSIZE].rlim_cur - uio->uio_offset)))) { |
1155 | /* |
1156 | * If the requested residual would cause us to go past the |
1157 | * administrative limit, then we need to adjust the residual |
1158 | * down to cause fewer bytes than requested to be written. If |
1159 | * we can't do that (e.g. the residual is already 1 byte), |
1160 | * then we fail the write with EFBIG. |
1161 | */ |
1162 | residcount = uio_resid(uio); |
1163 | if ((rlim_t)(uio->uio_offset + uio_resid(uio)) > p->p_rlimit[RLIMIT_FSIZE].rlim_cur) { |
1164 | clippedsize = (uio->uio_offset + uio_resid(uio)) - p->p_rlimit[RLIMIT_FSIZE].rlim_cur; |
1165 | } else if ((rlim_t)uio_resid(uio) > (p->p_rlimit[RLIMIT_FSIZE].rlim_cur - uio->uio_offset)) { |
1166 | clippedsize = (p->p_rlimit[RLIMIT_FSIZE].rlim_cur - uio->uio_offset); |
1167 | } |
1168 | if (clippedsize >= residcount) { |
1169 | psignal(p, SIGXFSZ); |
1170 | error = EFBIG; |
1171 | goto error_out; |
1172 | } |
1173 | partialwrite = 1; |
1174 | uio_setresid(uio, residcount-clippedsize); |
1175 | } |
1176 | if ((flags & FOF_OFFSET) != 0) { |
1177 | /* for pwrite, append should be ignored */ |
1178 | ioflag &= ~IO_APPEND; |
1179 | if (p && (vp->v_type == VREG) && |
1180 | ((rlim_t)uio->uio_offset >= p->p_rlimit[RLIMIT_FSIZE].rlim_cur)) { |
1181 | psignal(p, SIGXFSZ); |
1182 | error = EFBIG; |
1183 | goto error_out; |
1184 | } |
1185 | if (p && (vp->v_type == VREG) && |
1186 | ((rlim_t)(uio->uio_offset + uio_resid(uio)) > p->p_rlimit[RLIMIT_FSIZE].rlim_cur)) { |
1187 | //Debugger("vn_bwrite:overstepping the bounds"); |
1188 | residcount = uio_resid(uio); |
1189 | clippedsize = (uio->uio_offset + uio_resid(uio)) - p->p_rlimit[RLIMIT_FSIZE].rlim_cur; |
1190 | partialwrite = 1; |
1191 | uio_setresid(uio, residcount-clippedsize); |
1192 | } |
1193 | } |
1194 | |
1195 | error = VNOP_WRITE(vp, uio, ioflag, ctx); |
1196 | |
1197 | if (partialwrite) { |
1198 | oldcount = uio_resid(uio); |
1199 | uio_setresid(uio, oldcount + clippedsize); |
1200 | } |
1201 | |
1202 | if ((flags & FOF_OFFSET) == 0) { |
1203 | if (ioflag & IO_APPEND) |
1204 | fp->f_fglob->fg_offset = uio->uio_offset; |
1205 | else |
1206 | fp->f_fglob->fg_offset += count - uio_resid(uio); |
1207 | if (offset_locked) { |
1208 | vn_offset_unlock(fp->f_fglob); |
1209 | offset_locked = 0; |
1210 | } |
1211 | } |
1212 | |
1213 | /* |
1214 | * Set the credentials on successful writes |
1215 | */ |
1216 | if ((error == 0) && (vp->v_tag == VT_NFS) && (UBCINFOEXISTS(vp))) { |
1217 | /* |
1218 | * When called from aio subsystem, we only have the proc from |
1219 | * which to get the credential, at this point, so use that |
1220 | * instead. This means aio functions are incompatible with |
1221 | * per-thread credentials (aio operations are proxied). We |
1222 | * can't easily correct the aio vs. settid race in this case |
1223 | * anyway, so we disallow it. |
1224 | */ |
1225 | if ((flags & FOF_PCRED) == 0) { |
1226 | ubc_setthreadcred(vp, p, current_thread()); |
1227 | } else { |
1228 | ubc_setcred(vp, p); |
1229 | } |
1230 | } |
1231 | (void)vnode_put(vp); |
1232 | return (error); |
1233 | |
1234 | error_out: |
1235 | if (offset_locked) { |
1236 | vn_offset_unlock(fp->f_fglob); |
1237 | } |
1238 | (void)vnode_put(vp); |
1239 | return (error); |
1240 | } |
1241 | |
1242 | /* |
1243 | * File table vnode stat routine. |
1244 | * |
1245 | * Returns: 0 Success |
1246 | * EBADF |
1247 | * ENOMEM |
1248 | * vnode_getattr:??? |
1249 | */ |
1250 | int |
1251 | vn_stat_noauth(struct vnode *vp, void *sbptr, kauth_filesec_t *xsec, int isstat64, |
1252 | vfs_context_t ctx, struct ucred *file_cred) |
1253 | { |
1254 | struct vnode_attr va; |
1255 | int error; |
1256 | u_short mode; |
1257 | kauth_filesec_t fsec; |
1258 | struct stat *sb = (struct stat *)0; /* warning avoidance ; protected by isstat64 */ |
1259 | struct stat64 * sb64 = (struct stat64 *)0; /* warning avoidance ; protected by isstat64 */ |
1260 | |
1261 | if (isstat64 != 0) |
1262 | sb64 = (struct stat64 *)sbptr; |
1263 | else |
1264 | sb = (struct stat *)sbptr; |
1265 | memset(&va, 0, sizeof(va)); |
1266 | VATTR_INIT(&va); |
1267 | VATTR_WANTED(&va, va_fsid); |
1268 | VATTR_WANTED(&va, va_fileid); |
1269 | VATTR_WANTED(&va, va_mode); |
1270 | VATTR_WANTED(&va, va_type); |
1271 | VATTR_WANTED(&va, va_nlink); |
1272 | VATTR_WANTED(&va, va_uid); |
1273 | VATTR_WANTED(&va, va_gid); |
1274 | VATTR_WANTED(&va, va_rdev); |
1275 | VATTR_WANTED(&va, va_data_size); |
1276 | VATTR_WANTED(&va, va_access_time); |
1277 | VATTR_WANTED(&va, va_modify_time); |
1278 | VATTR_WANTED(&va, va_change_time); |
1279 | VATTR_WANTED(&va, va_create_time); |
1280 | VATTR_WANTED(&va, va_flags); |
1281 | VATTR_WANTED(&va, va_gen); |
1282 | VATTR_WANTED(&va, va_iosize); |
1283 | /* lower layers will synthesise va_total_alloc from va_data_size if required */ |
1284 | VATTR_WANTED(&va, va_total_alloc); |
1285 | if (xsec != NULL) { |
1286 | VATTR_WANTED(&va, va_uuuid); |
1287 | VATTR_WANTED(&va, va_guuid); |
1288 | VATTR_WANTED(&va, va_acl); |
1289 | } |
1290 | error = vnode_getattr(vp, &va, ctx); |
1291 | if (error) |
1292 | goto out; |
1293 | #if CONFIG_MACF |
1294 | /* |
1295 | * Give MAC polices a chance to reject or filter the attributes |
1296 | * returned by the filesystem. Note that MAC policies are consulted |
1297 | * *after* calling the filesystem because filesystems can return more |
1298 | * attributes than were requested so policies wouldn't be authoritative |
1299 | * is consulted beforehand. This also gives policies an opportunity |
1300 | * to change the values of attributes retrieved. |
1301 | */ |
1302 | error = mac_vnode_check_getattr(ctx, file_cred, vp, &va); |
1303 | if (error) |
1304 | goto out; |
1305 | #endif |
1306 | /* |
1307 | * Copy from vattr table |
1308 | */ |
1309 | if (isstat64 != 0) { |
1310 | sb64->st_dev = va.va_fsid; |
1311 | sb64->st_ino = (ino64_t)va.va_fileid; |
1312 | |
1313 | } else { |
1314 | sb->st_dev = va.va_fsid; |
1315 | sb->st_ino = (ino_t)va.va_fileid; |
1316 | } |
1317 | mode = va.va_mode; |
1318 | switch (vp->v_type) { |
1319 | case VREG: |
1320 | mode |= S_IFREG; |
1321 | break; |
1322 | case VDIR: |
1323 | mode |= S_IFDIR; |
1324 | break; |
1325 | case VBLK: |
1326 | mode |= S_IFBLK; |
1327 | break; |
1328 | case VCHR: |
1329 | mode |= S_IFCHR; |
1330 | break; |
1331 | case VLNK: |
1332 | mode |= S_IFLNK; |
1333 | break; |
1334 | case VSOCK: |
1335 | mode |= S_IFSOCK; |
1336 | break; |
1337 | case VFIFO: |
1338 | mode |= S_IFIFO; |
1339 | break; |
1340 | default: |
1341 | error = EBADF; |
1342 | goto out; |
1343 | }; |
1344 | if (isstat64 != 0) { |
1345 | sb64->st_mode = mode; |
1346 | sb64->st_nlink = VATTR_IS_SUPPORTED(&va, va_nlink) ? va.va_nlink > UINT16_MAX ? UINT16_MAX : (u_int16_t)va.va_nlink : 1; |
1347 | sb64->st_uid = va.va_uid; |
1348 | sb64->st_gid = va.va_gid; |
1349 | sb64->st_rdev = va.va_rdev; |
1350 | sb64->st_size = va.va_data_size; |
1351 | sb64->st_atimespec = va.va_access_time; |
1352 | sb64->st_mtimespec = va.va_modify_time; |
1353 | sb64->st_ctimespec = va.va_change_time; |
1354 | if (VATTR_IS_SUPPORTED(&va, va_create_time)) { |
1355 | sb64->st_birthtimespec = va.va_create_time; |
1356 | } else { |
1357 | sb64->st_birthtimespec.tv_sec = sb64->st_birthtimespec.tv_nsec = 0; |
1358 | } |
1359 | sb64->st_blksize = va.va_iosize; |
1360 | sb64->st_flags = va.va_flags; |
1361 | sb64->st_blocks = roundup(va.va_total_alloc, 512) / 512; |
1362 | } else { |
1363 | sb->st_mode = mode; |
1364 | sb->st_nlink = VATTR_IS_SUPPORTED(&va, va_nlink) ? va.va_nlink > UINT16_MAX ? UINT16_MAX : (u_int16_t)va.va_nlink : 1; |
1365 | sb->st_uid = va.va_uid; |
1366 | sb->st_gid = va.va_gid; |
1367 | sb->st_rdev = va.va_rdev; |
1368 | sb->st_size = va.va_data_size; |
1369 | sb->st_atimespec = va.va_access_time; |
1370 | sb->st_mtimespec = va.va_modify_time; |
1371 | sb->st_ctimespec = va.va_change_time; |
1372 | sb->st_blksize = va.va_iosize; |
1373 | sb->st_flags = va.va_flags; |
1374 | sb->st_blocks = roundup(va.va_total_alloc, 512) / 512; |
1375 | } |
1376 | |
1377 | /* if we're interested in extended security data and we got an ACL */ |
1378 | if (xsec != NULL) { |
1379 | if (!VATTR_IS_SUPPORTED(&va, va_acl) && |
1380 | !VATTR_IS_SUPPORTED(&va, va_uuuid) && |
1381 | !VATTR_IS_SUPPORTED(&va, va_guuid)) { |
1382 | *xsec = KAUTH_FILESEC_NONE; |
1383 | } else { |
1384 | |
1385 | if (VATTR_IS_SUPPORTED(&va, va_acl) && (va.va_acl != NULL)) { |
1386 | fsec = kauth_filesec_alloc(va.va_acl->acl_entrycount); |
1387 | } else { |
1388 | fsec = kauth_filesec_alloc(0); |
1389 | } |
1390 | if (fsec == NULL) { |
1391 | error = ENOMEM; |
1392 | goto out; |
1393 | } |
1394 | fsec->fsec_magic = KAUTH_FILESEC_MAGIC; |
1395 | if (VATTR_IS_SUPPORTED(&va, va_uuuid)) { |
1396 | fsec->fsec_owner = va.va_uuuid; |
1397 | } else { |
1398 | fsec->fsec_owner = kauth_null_guid; |
1399 | } |
1400 | if (VATTR_IS_SUPPORTED(&va, va_guuid)) { |
1401 | fsec->fsec_group = va.va_guuid; |
1402 | } else { |
1403 | fsec->fsec_group = kauth_null_guid; |
1404 | } |
1405 | if (VATTR_IS_SUPPORTED(&va, va_acl) && (va.va_acl != NULL)) { |
1406 | bcopy(va.va_acl, &(fsec->fsec_acl), KAUTH_ACL_COPYSIZE(va.va_acl)); |
1407 | } else { |
1408 | fsec->fsec_acl.acl_entrycount = KAUTH_FILESEC_NOACL; |
1409 | } |
1410 | *xsec = fsec; |
1411 | } |
1412 | } |
1413 | |
1414 | /* Do not give the generation number out to unpriviledged users */ |
1415 | if (va.va_gen && !vfs_context_issuser(ctx)) { |
1416 | if (isstat64 != 0) |
1417 | sb64->st_gen = 0; |
1418 | else |
1419 | sb->st_gen = 0; |
1420 | } else { |
1421 | if (isstat64 != 0) |
1422 | sb64->st_gen = va.va_gen; |
1423 | else |
1424 | sb->st_gen = va.va_gen; |
1425 | } |
1426 | |
1427 | error = 0; |
1428 | out: |
1429 | if (VATTR_IS_SUPPORTED(&va, va_acl) && va.va_acl != NULL) |
1430 | kauth_acl_free(va.va_acl); |
1431 | return (error); |
1432 | } |
1433 | |
1434 | int |
1435 | vn_stat(struct vnode *vp, void *sb, kauth_filesec_t *xsec, int isstat64, vfs_context_t ctx) |
1436 | { |
1437 | int error; |
1438 | |
1439 | #if CONFIG_MACF |
1440 | error = mac_vnode_check_stat(ctx, NOCRED, vp); |
1441 | if (error) |
1442 | return (error); |
1443 | #endif |
1444 | |
1445 | /* authorize */ |
1446 | if ((error = vnode_authorize(vp, NULL, KAUTH_VNODE_READ_ATTRIBUTES | KAUTH_VNODE_READ_SECURITY, ctx)) != 0) |
1447 | return(error); |
1448 | |
1449 | /* actual stat */ |
1450 | return(vn_stat_noauth(vp, sb, xsec, isstat64, ctx, NOCRED)); |
1451 | } |
1452 | |
1453 | |
1454 | /* |
1455 | * File table vnode ioctl routine. |
1456 | */ |
1457 | static int |
1458 | vn_ioctl(struct fileproc *fp, u_long com, caddr_t data, vfs_context_t ctx) |
1459 | { |
1460 | struct vnode *vp = ((struct vnode *)fp->f_fglob->fg_data); |
1461 | off_t file_size; |
1462 | int error; |
1463 | struct vnode *ttyvp; |
1464 | struct session * sessp; |
1465 | |
1466 | if ( (error = vnode_getwithref(vp)) ) { |
1467 | return(error); |
1468 | } |
1469 | |
1470 | #if CONFIG_MACF |
1471 | error = mac_vnode_check_ioctl(ctx, vp, com); |
1472 | if (error) |
1473 | goto out; |
1474 | #endif |
1475 | |
1476 | switch (vp->v_type) { |
1477 | case VREG: |
1478 | case VDIR: |
1479 | if (com == FIONREAD) { |
1480 | if ((error = vnode_size(vp, &file_size, ctx)) != 0) |
1481 | goto out; |
1482 | *(int *)data = file_size - fp->f_fglob->fg_offset; |
1483 | goto out; |
1484 | } |
1485 | if (com == FIONBIO || com == FIOASYNC) { /* XXX */ |
1486 | goto out; |
1487 | } |
1488 | /* fall into ... */ |
1489 | |
1490 | default: |
1491 | error = ENOTTY; |
1492 | goto out; |
1493 | |
1494 | case VFIFO: |
1495 | case VCHR: |
1496 | case VBLK: |
1497 | |
1498 | /* Should not be able to set block size from user space */ |
1499 | if (com == DKIOCSETBLOCKSIZE) { |
1500 | error = EPERM; |
1501 | goto out; |
1502 | } |
1503 | |
1504 | if (com == FIODTYPE) { |
1505 | if (vp->v_type == VBLK) { |
1506 | if (major(vp->v_rdev) >= nblkdev) { |
1507 | error = ENXIO; |
1508 | goto out; |
1509 | } |
1510 | *(int *)data = bdevsw[major(vp->v_rdev)].d_type; |
1511 | |
1512 | } else if (vp->v_type == VCHR) { |
1513 | if (major(vp->v_rdev) >= nchrdev) { |
1514 | error = ENXIO; |
1515 | goto out; |
1516 | } |
1517 | *(int *)data = cdevsw[major(vp->v_rdev)].d_type; |
1518 | } else { |
1519 | error = ENOTTY; |
1520 | goto out; |
1521 | } |
1522 | goto out; |
1523 | } |
1524 | error = VNOP_IOCTL(vp, com, data, fp->f_fglob->fg_flag, ctx); |
1525 | |
1526 | if (error == 0 && com == TIOCSCTTY) { |
1527 | sessp = proc_session(vfs_context_proc(ctx)); |
1528 | |
1529 | session_lock(sessp); |
1530 | ttyvp = sessp->s_ttyvp; |
1531 | sessp->s_ttyvp = vp; |
1532 | sessp->s_ttyvid = vnode_vid(vp); |
1533 | session_unlock(sessp); |
1534 | session_rele(sessp); |
1535 | } |
1536 | } |
1537 | out: |
1538 | (void)vnode_put(vp); |
1539 | return(error); |
1540 | } |
1541 | |
1542 | /* |
1543 | * File table vnode select routine. |
1544 | */ |
1545 | static int |
1546 | vn_select(struct fileproc *fp, int which, void *wql, __unused vfs_context_t ctx) |
1547 | { |
1548 | int error; |
1549 | struct vnode * vp = (struct vnode *)fp->f_fglob->fg_data; |
1550 | struct vfs_context context; |
1551 | |
1552 | if ( (error = vnode_getwithref(vp)) == 0 ) { |
1553 | context.vc_thread = current_thread(); |
1554 | context.vc_ucred = fp->f_fglob->fg_cred; |
1555 | |
1556 | #if CONFIG_MACF |
1557 | /* |
1558 | * XXX We should use a per thread credential here; minimally, |
1559 | * XXX the process credential should have a persistent |
1560 | * XXX reference on it before being passed in here. |
1561 | */ |
1562 | error = mac_vnode_check_select(ctx, vp, which); |
1563 | if (error == 0) |
1564 | #endif |
1565 | error = VNOP_SELECT(vp, which, fp->f_fglob->fg_flag, wql, ctx); |
1566 | |
1567 | (void)vnode_put(vp); |
1568 | } |
1569 | return(error); |
1570 | |
1571 | } |
1572 | |
1573 | /* |
1574 | * File table vnode close routine. |
1575 | */ |
1576 | static int |
1577 | vn_closefile(struct fileglob *fg, vfs_context_t ctx) |
1578 | { |
1579 | struct vnode *vp = fg->fg_data; |
1580 | int error; |
1581 | |
1582 | if ( (error = vnode_getwithref(vp)) == 0 ) { |
1583 | if (FILEGLOB_DTYPE(fg) == DTYPE_VNODE && |
1584 | ((fg->fg_flag & FHASLOCK) != 0 || |
1585 | (fg->fg_lflags & FG_HAS_OFDLOCK) != 0)) { |
1586 | struct flock lf = { |
1587 | .l_whence = SEEK_SET, |
1588 | .l_start = 0, |
1589 | .l_len = 0, |
1590 | .l_type = F_UNLCK |
1591 | }; |
1592 | |
1593 | if ((fg->fg_flag & FHASLOCK) != 0) |
1594 | (void) VNOP_ADVLOCK(vp, (caddr_t)fg, |
1595 | F_UNLCK, &lf, F_FLOCK, ctx, NULL); |
1596 | |
1597 | if ((fg->fg_lflags & FG_HAS_OFDLOCK) != 0) |
1598 | (void) VNOP_ADVLOCK(vp, (caddr_t)fg, |
1599 | F_UNLCK, &lf, F_OFD_LOCK, ctx, NULL); |
1600 | } |
1601 | error = vn_close(vp, fg->fg_flag, ctx); |
1602 | (void) vnode_put(vp); |
1603 | } |
1604 | return (error); |
1605 | } |
1606 | |
1607 | /* |
1608 | * Returns: 0 Success |
1609 | * VNOP_PATHCONF:??? |
1610 | */ |
1611 | int |
1612 | vn_pathconf(vnode_t vp, int name, int32_t *retval, vfs_context_t ctx) |
1613 | { |
1614 | int error = 0; |
1615 | struct vfs_attr vfa; |
1616 | |
1617 | switch(name) { |
1618 | case _PC_EXTENDED_SECURITY_NP: |
1619 | *retval = vfs_extendedsecurity(vnode_mount(vp)) ? 1 : 0; |
1620 | break; |
1621 | case _PC_AUTH_OPAQUE_NP: |
1622 | *retval = vfs_authopaque(vnode_mount(vp)); |
1623 | break; |
1624 | case _PC_2_SYMLINKS: |
1625 | *retval = 1; /* XXX NOTSUP on MSDOS, etc. */ |
1626 | break; |
1627 | case _PC_ALLOC_SIZE_MIN: |
1628 | *retval = 1; /* XXX lie: 1 byte */ |
1629 | break; |
1630 | case _PC_ASYNC_IO: /* unistd.h: _POSIX_ASYNCHRONUS_IO */ |
1631 | *retval = 1; /* [AIO] option is supported */ |
1632 | break; |
1633 | case _PC_PRIO_IO: /* unistd.h: _POSIX_PRIORITIZED_IO */ |
1634 | *retval = 0; /* [PIO] option is not supported */ |
1635 | break; |
1636 | case _PC_REC_INCR_XFER_SIZE: |
1637 | *retval = 4096; /* XXX go from MIN to MAX 4K at a time */ |
1638 | break; |
1639 | case _PC_REC_MIN_XFER_SIZE: |
1640 | *retval = 4096; /* XXX recommend 4K minimum reads/writes */ |
1641 | break; |
1642 | case _PC_REC_MAX_XFER_SIZE: |
1643 | *retval = 65536; /* XXX recommend 64K maximum reads/writes */ |
1644 | break; |
1645 | case _PC_REC_XFER_ALIGN: |
1646 | *retval = 4096; /* XXX recommend page aligned buffers */ |
1647 | break; |
1648 | case _PC_SYMLINK_MAX: |
1649 | *retval = 255; /* Minimum acceptable POSIX value */ |
1650 | break; |
1651 | case _PC_SYNC_IO: /* unistd.h: _POSIX_SYNCHRONIZED_IO */ |
1652 | *retval = 0; /* [SIO] option is not supported */ |
1653 | break; |
1654 | case _PC_XATTR_SIZE_BITS: |
1655 | /* The number of bits used to store maximum extended |
1656 | * attribute size in bytes. For example, if the maximum |
1657 | * attribute size supported by a file system is 128K, the |
1658 | * value returned will be 18. However a value 18 can mean |
1659 | * that the maximum attribute size can be anywhere from |
1660 | * (256KB - 1) to 128KB. As a special case, the resource |
1661 | * fork can have much larger size, and some file system |
1662 | * specific extended attributes can have smaller and preset |
1663 | * size; for example, Finder Info is always 32 bytes. |
1664 | */ |
1665 | memset(&vfa, 0, sizeof(vfa)); |
1666 | VFSATTR_INIT(&vfa); |
1667 | VFSATTR_WANTED(&vfa, f_capabilities); |
1668 | if (vfs_getattr(vnode_mount(vp), &vfa, ctx) == 0 && |
1669 | (VFSATTR_IS_SUPPORTED(&vfa, f_capabilities)) && |
1670 | (vfa.f_capabilities.capabilities[VOL_CAPABILITIES_INTERFACES] & VOL_CAP_INT_EXTENDED_ATTR) && |
1671 | (vfa.f_capabilities.valid[VOL_CAPABILITIES_INTERFACES] & VOL_CAP_INT_EXTENDED_ATTR)) { |
1672 | /* Supports native extended attributes */ |
1673 | error = VNOP_PATHCONF(vp, name, retval, ctx); |
1674 | } else { |
1675 | /* Number of bits used to represent the maximum size of |
1676 | * extended attribute stored in an Apple Double file. |
1677 | */ |
1678 | *retval = AD_XATTR_SIZE_BITS; |
1679 | } |
1680 | break; |
1681 | default: |
1682 | error = VNOP_PATHCONF(vp, name, retval, ctx); |
1683 | break; |
1684 | } |
1685 | |
1686 | return (error); |
1687 | } |
1688 | |
1689 | static int |
1690 | vn_kqfilt_add(struct fileproc *fp, struct knote *kn, |
1691 | struct kevent_internal_s *kev, vfs_context_t ctx) |
1692 | { |
1693 | struct vnode *vp; |
1694 | int error = 0; |
1695 | int result = 0; |
1696 | |
1697 | vp = (struct vnode *)fp->f_fglob->fg_data; |
1698 | |
1699 | /* |
1700 | * Don't attach a knote to a dead vnode. |
1701 | */ |
1702 | if ((error = vget_internal(vp, 0, VNODE_NODEAD)) == 0) { |
1703 | switch (kn->kn_filter) { |
1704 | case EVFILT_READ: |
1705 | case EVFILT_WRITE: |
1706 | if (vnode_isfifo(vp)) { |
1707 | /* We'll only watch FIFOs that use our fifofs */ |
1708 | if (!(vp->v_fifoinfo && vp->v_fifoinfo->fi_readsock)) { |
1709 | error = ENOTSUP; |
1710 | } |
1711 | |
1712 | } else if (!vnode_isreg(vp)) { |
1713 | if (vnode_ischr(vp)) { |
1714 | result = spec_kqfilter(vp, kn, kev); |
1715 | if ((kn->kn_flags & EV_ERROR) == 0) { |
1716 | /* claimed by a special device */ |
1717 | vnode_put(vp); |
1718 | return result; |
1719 | } |
1720 | } |
1721 | error = EINVAL; |
1722 | } |
1723 | break; |
1724 | case EVFILT_VNODE: |
1725 | break; |
1726 | default: |
1727 | error = EINVAL; |
1728 | } |
1729 | |
1730 | if (error == 0) { |
1731 | |
1732 | #if CONFIG_MACF |
1733 | error = mac_vnode_check_kqfilter(ctx, fp->f_fglob->fg_cred, kn, vp); |
1734 | if (error) { |
1735 | vnode_put(vp); |
1736 | goto out; |
1737 | } |
1738 | #endif |
1739 | |
1740 | kn->kn_hook = (void*)vp; |
1741 | kn->kn_hookid = vnode_vid(vp); |
1742 | kn->kn_filtid = EVFILTID_VN; |
1743 | |
1744 | vnode_lock(vp); |
1745 | KNOTE_ATTACH(&vp->v_knotes, kn); |
1746 | result = filt_vnode_common(kn, vp, 0); |
1747 | vnode_unlock(vp); |
1748 | |
1749 | /* |
1750 | * Ask the filesystem to provide remove notifications, |
1751 | * but ignore failure |
1752 | */ |
1753 | VNOP_MONITOR(vp, 0, VNODE_MONITOR_BEGIN, (void*) kn, ctx); |
1754 | } |
1755 | |
1756 | vnode_put(vp); |
1757 | } |
1758 | |
1759 | out: |
1760 | if (error) { |
1761 | kn->kn_flags = EV_ERROR; |
1762 | kn->kn_data = error; |
1763 | } |
1764 | |
1765 | return result; |
1766 | } |
1767 | |
1768 | static void |
1769 | filt_vndetach(struct knote *kn) |
1770 | { |
1771 | vfs_context_t ctx = vfs_context_current(); |
1772 | struct vnode *vp; |
1773 | vp = (struct vnode *)kn->kn_hook; |
1774 | if (vnode_getwithvid(vp, kn->kn_hookid)) |
1775 | return; |
1776 | |
1777 | vnode_lock(vp); |
1778 | KNOTE_DETACH(&vp->v_knotes, kn); |
1779 | vnode_unlock(vp); |
1780 | |
1781 | /* |
1782 | * Tell a (generally networked) filesystem that we're no longer watching |
1783 | * If the FS wants to track contexts, it should still be using the one from |
1784 | * the VNODE_MONITOR_BEGIN. |
1785 | */ |
1786 | VNOP_MONITOR(vp, 0, VNODE_MONITOR_END, (void*)kn, ctx); |
1787 | vnode_put(vp); |
1788 | } |
1789 | |
1790 | |
1791 | /* |
1792 | * Used for EVFILT_READ |
1793 | * |
1794 | * Takes only VFIFO or VREG. vnode is locked. We handle the "poll" case |
1795 | * differently than the regular case for VREG files. If not in poll(), |
1796 | * then we need to know current fileproc offset for VREG. |
1797 | */ |
1798 | static int64_t |
1799 | vnode_readable_data_count(vnode_t vp, off_t current_offset, int ispoll) |
1800 | { |
1801 | if (vnode_isfifo(vp)) { |
1802 | #if FIFO |
1803 | int cnt; |
1804 | int err = fifo_charcount(vp, &cnt); |
1805 | if (err == 0) { |
1806 | return (int64_t)cnt; |
1807 | } else |
1808 | #endif |
1809 | { |
1810 | return 0; |
1811 | } |
1812 | } else if (vnode_isreg(vp)) { |
1813 | if (ispoll) { |
1814 | return 1; |
1815 | } |
1816 | |
1817 | off_t amount; |
1818 | amount = vp->v_un.vu_ubcinfo->ui_size - current_offset; |
1819 | if (amount > INT64_MAX) { |
1820 | return INT64_MAX; |
1821 | } else if (amount < INT64_MIN) { |
1822 | return INT64_MIN; |
1823 | } else { |
1824 | return (int64_t)amount; |
1825 | } |
1826 | } else { |
1827 | panic("Should never have an EVFILT_READ except for reg or fifo." ); |
1828 | return 0; |
1829 | } |
1830 | } |
1831 | |
1832 | /* |
1833 | * Used for EVFILT_WRITE. |
1834 | * |
1835 | * For regular vnodes, we can always write (1). For named pipes, |
1836 | * see how much space there is in the buffer. Nothing else is covered. |
1837 | */ |
1838 | static intptr_t |
1839 | vnode_writable_space_count(vnode_t vp) |
1840 | { |
1841 | if (vnode_isfifo(vp)) { |
1842 | #if FIFO |
1843 | long spc; |
1844 | int err = fifo_freespace(vp, &spc); |
1845 | if (err == 0) { |
1846 | return (intptr_t)spc; |
1847 | } else |
1848 | #endif |
1849 | { |
1850 | return (intptr_t)0; |
1851 | } |
1852 | } else if (vnode_isreg(vp)) { |
1853 | return (intptr_t)1; |
1854 | } else { |
1855 | panic("Should never have an EVFILT_READ except for reg or fifo." ); |
1856 | return 0; |
1857 | } |
1858 | } |
1859 | |
1860 | /* |
1861 | * Determine whether this knote should be active |
1862 | * |
1863 | * This is kind of subtle. |
1864 | * --First, notice if the vnode has been revoked: in so, override hint |
1865 | * --EVFILT_READ knotes are checked no matter what the hint is |
1866 | * --Other knotes activate based on hint. |
1867 | * --If hint is revoke, set special flags and activate |
1868 | */ |
1869 | static int |
1870 | filt_vnode_common(struct knote *kn, vnode_t vp, long hint) |
1871 | { |
1872 | int activate = 0; |
1873 | |
1874 | lck_mtx_assert(&vp->v_lock, LCK_MTX_ASSERT_OWNED); |
1875 | |
1876 | /* Special handling for vnodes that are in recycle or already gone */ |
1877 | if (NOTE_REVOKE == hint) { |
1878 | kn->kn_flags |= (EV_EOF | EV_ONESHOT); |
1879 | activate = 1; |
1880 | |
1881 | if ((kn->kn_filter == EVFILT_VNODE) && (kn->kn_sfflags & NOTE_REVOKE)) { |
1882 | kn->kn_fflags |= NOTE_REVOKE; |
1883 | } |
1884 | } else { |
1885 | switch(kn->kn_filter) { |
1886 | case EVFILT_READ: |
1887 | kn->kn_data = vnode_readable_data_count(vp, kn->kn_fp->f_fglob->fg_offset, (kn->kn_flags & EV_POLL)); |
1888 | |
1889 | if (kn->kn_data != 0) { |
1890 | activate = 1; |
1891 | } |
1892 | break; |
1893 | case EVFILT_WRITE: |
1894 | kn->kn_data = vnode_writable_space_count(vp); |
1895 | |
1896 | if (kn->kn_data != 0) { |
1897 | activate = 1; |
1898 | } |
1899 | break; |
1900 | case EVFILT_VNODE: |
1901 | /* Check events this note matches against the hint */ |
1902 | if (kn->kn_sfflags & hint) { |
1903 | kn->kn_fflags |= hint; /* Set which event occurred */ |
1904 | } |
1905 | if (kn->kn_fflags != 0) { |
1906 | activate = 1; |
1907 | } |
1908 | break; |
1909 | default: |
1910 | panic("Invalid knote filter on a vnode!\n" ); |
1911 | } |
1912 | } |
1913 | return (activate); |
1914 | } |
1915 | |
1916 | static int |
1917 | filt_vnode(struct knote *kn, long hint) |
1918 | { |
1919 | vnode_t vp = (struct vnode *)kn->kn_hook; |
1920 | |
1921 | return filt_vnode_common(kn, vp, hint); |
1922 | } |
1923 | |
1924 | static int |
1925 | filt_vntouch(struct knote *kn, struct kevent_internal_s *kev) |
1926 | { |
1927 | vnode_t vp = (struct vnode *)kn->kn_hook; |
1928 | int activate; |
1929 | int hint = 0; |
1930 | |
1931 | vnode_lock(vp); |
1932 | if (vnode_getiocount(vp, kn->kn_hookid, VNODE_NODEAD | VNODE_WITHID) != 0) { |
1933 | /* is recycled */ |
1934 | hint = NOTE_REVOKE; |
1935 | } |
1936 | |
1937 | /* accept new input fflags mask */ |
1938 | kn->kn_sfflags = kev->fflags; |
1939 | |
1940 | activate = filt_vnode_common(kn, vp, hint); |
1941 | |
1942 | if (hint == 0) |
1943 | vnode_put_locked(vp); |
1944 | vnode_unlock(vp); |
1945 | |
1946 | return activate; |
1947 | } |
1948 | |
1949 | static int |
1950 | filt_vnprocess(struct knote *kn, struct filt_process_s *data, struct kevent_internal_s *kev) |
1951 | { |
1952 | #pragma unused(data) |
1953 | vnode_t vp = (struct vnode *)kn->kn_hook; |
1954 | int activate; |
1955 | int hint = 0; |
1956 | |
1957 | vnode_lock(vp); |
1958 | if (vnode_getiocount(vp, kn->kn_hookid, VNODE_NODEAD | VNODE_WITHID) != 0) { |
1959 | /* Is recycled */ |
1960 | hint = NOTE_REVOKE; |
1961 | } |
1962 | activate = filt_vnode_common(kn, vp, hint); |
1963 | if (activate) { |
1964 | *kev = kn->kn_kevent; |
1965 | if (kn->kn_flags & EV_CLEAR) { |
1966 | kn->kn_data = 0; |
1967 | kn->kn_fflags = 0; |
1968 | } |
1969 | } |
1970 | |
1971 | /* Definitely need to unlock, may need to put */ |
1972 | if (hint == 0) |
1973 | vnode_put_locked(vp); |
1974 | vnode_unlock(vp); |
1975 | |
1976 | return activate; |
1977 | } |
1978 | |
1979 | |