1/*
2 * Copyright (c) 2002-2006 Apple Computer, Inc. All rights reserved.
3 *
4 * @APPLE_OSREFERENCE_LICENSE_HEADER_START@
5 *
6 * This file contains Original Code and/or Modifications of Original Code
7 * as defined in and that are subject to the Apple Public Source License
8 * Version 2.0 (the 'License'). You may not use this file except in
9 * compliance with the License. The rights granted to you under the License
10 * may not be used to create, or enable the creation or redistribution of,
11 * unlawful or unlicensed copies of an Apple operating system, or to
12 * circumvent, violate, or enable the circumvention or violation of, any
13 * terms of an Apple operating system software license agreement.
14 *
15 * Please obtain a copy of the License at
16 * http://www.opensource.apple.com/apsl/ and read it before using this file.
17 *
18 * The Original Code and all software distributed under the License are
19 * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER
20 * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES,
21 * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY,
22 * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT.
23 * Please see the License for the specific language governing rights and
24 * limitations under the License.
25 *
26 * @APPLE_OSREFERENCE_LICENSE_HEADER_END@
27 */
28/*
29 * Copyright (c) 1982, 1986, 1990, 1993, 1995
30 * The Regents of the University of California. All rights reserved.
31 *
32 * This code is derived from software contributed to Berkeley by
33 * Robert Elz at The University of Melbourne.
34 *
35 * Redistribution and use in source and binary forms, with or without
36 * modification, are permitted provided that the following conditions
37 * are met:
38 * 1. Redistributions of source code must retain the above copyright
39 * notice, this list of conditions and the following disclaimer.
40 * 2. Redistributions in binary form must reproduce the above copyright
41 * notice, this list of conditions and the following disclaimer in the
42 * documentation and/or other materials provided with the distribution.
43 * 3. All advertising materials mentioning features or use of this software
44 * must display the following acknowledgement:
45 * This product includes software developed by the University of
46 * California, Berkeley and its contributors.
47 * 4. Neither the name of the University nor the names of its contributors
48 * may be used to endorse or promote products derived from this software
49 * without specific prior written permission.
50 *
51 * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
52 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
53 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
54 * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
55 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
56 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
57 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
58 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
59 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
60 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
61 * SUCH DAMAGE.
62 *
63 * @(#)vfs_quota.c
64 * derived from @(#)ufs_quota.c 8.5 (Berkeley) 5/20/95
65 */
66
67#include <sys/param.h>
68#include <sys/kernel.h>
69#include <sys/systm.h>
70#include <kern/zalloc.h>
71#include <sys/file_internal.h>
72#include <sys/proc_internal.h>
73#include <sys/vnode_internal.h>
74#include <sys/mount_internal.h>
75#include <sys/quota.h>
76#include <sys/uio_internal.h>
77
78#include <libkern/OSByteOrder.h>
79
80
81/* vars for quota file lock */
82static LCK_GRP_DECLARE(qf_lck_grp, "quota file");
83
84/* vars for quota list lock */
85static LCK_GRP_DECLARE(quota_list_lck_grp, "quuota list");
86static LCK_MTX_DECLARE(quota_list_mtx_lock, &quota_list_lck_grp);
87
88/* Routines to lock and unlock the quota global data */
89static int dq_list_lock(void);
90static void dq_list_unlock(void);
91
92static void dq_lock_internal(struct dquot *dq);
93static void dq_unlock_internal(struct dquot *dq);
94
95static u_int32_t quotamagic[MAXQUOTAS] = INITQMAGICS;
96
97
98/*
99 * Code pertaining to management of the in-core dquot data structures.
100 */
101#define DQHASH(dqvp, id) \
102 (&dqhashtbl[((((intptr_t)(dqvp)) >> 8) + id) & dqhash])
103LIST_HEAD(dqhash, dquot) * dqhashtbl;
104u_long dqhash;
105
106#define DQUOTINC 5 /* minimum free dquots desired */
107long numdquot, desireddquot = DQUOTINC;
108
109/*
110 * Dquot free list.
111 */
112TAILQ_HEAD(dqfreelist, dquot) dqfreelist;
113/*
114 * Dquot dirty orphans list
115 */
116TAILQ_HEAD(dqdirtylist, dquot) dqdirtylist;
117
118KALLOC_TYPE_DEFINE(KT_DQUOT, struct dquot, KT_PRIV_ACCT);
119
120static int dqlookup(struct quotafile *, u_int32_t, struct dqblk *, u_int32_t *);
121static int dqsync_locked(struct dquot *dq);
122
123static void qf_lock(struct quotafile *);
124static void qf_unlock(struct quotafile *);
125static int qf_ref(struct quotafile *);
126static void qf_rele(struct quotafile *);
127
128
129/*
130 * Report whether dqhashinit has been run.
131 */
132int
133dqisinitialized(void)
134{
135 return dqhashtbl != NULL;
136}
137
138/*
139 * Initialize hash table for dquot structures.
140 */
141void
142dqhashinit(void)
143{
144 dq_list_lock();
145 if (dqisinitialized()) {
146 goto out;
147 }
148
149 TAILQ_INIT(&dqfreelist);
150 TAILQ_INIT(&dqdirtylist);
151 dqhashtbl = hashinit(count: desiredvnodes, M_DQUOT, hashmask: &dqhash);
152out:
153 dq_list_unlock();
154}
155
156
157static volatile int dq_list_lock_cnt = 0;
158
159static int
160dq_list_lock(void)
161{
162 lck_mtx_lock(lck: &quota_list_mtx_lock);
163 return ++dq_list_lock_cnt;
164}
165
166static int
167dq_list_lock_changed(int oldval)
168{
169 return dq_list_lock_cnt != oldval;
170}
171
172static int
173dq_list_lock_val(void)
174{
175 return dq_list_lock_cnt;
176}
177
178void
179dq_list_unlock(void)
180{
181 lck_mtx_unlock(lck: &quota_list_mtx_lock);
182}
183
184
185/*
186 * must be called with the quota_list_lock held
187 */
188void
189dq_lock_internal(struct dquot *dq)
190{
191 while (dq->dq_lflags & DQ_LLOCK) {
192 dq->dq_lflags |= DQ_LWANT;
193 msleep(chan: &dq->dq_lflags, mtx: &quota_list_mtx_lock, PVFS, wmesg: "dq_lock_internal", NULL);
194 }
195 dq->dq_lflags |= DQ_LLOCK;
196}
197
198/*
199 * must be called with the quota_list_lock held
200 */
201void
202dq_unlock_internal(struct dquot *dq)
203{
204 int wanted = dq->dq_lflags & DQ_LWANT;
205
206 dq->dq_lflags &= ~(DQ_LLOCK | DQ_LWANT);
207
208 if (wanted) {
209 wakeup(chan: &dq->dq_lflags);
210 }
211}
212
213void
214dqlock(struct dquot *dq)
215{
216 lck_mtx_lock(lck: &quota_list_mtx_lock);
217
218 dq_lock_internal(dq);
219
220 lck_mtx_unlock(lck: &quota_list_mtx_lock);
221}
222
223void
224dqunlock(struct dquot *dq)
225{
226 lck_mtx_lock(lck: &quota_list_mtx_lock);
227
228 dq_unlock_internal(dq);
229
230 lck_mtx_unlock(lck: &quota_list_mtx_lock);
231}
232
233
234
235int
236qf_get(struct quotafile *qfp, int type)
237{
238 int error = 0;
239
240 dq_list_lock();
241
242 switch (type) {
243 case QTF_OPENING:
244 while ((qfp->qf_qflags & (QTF_OPENING | QTF_CLOSING))) {
245 if ((qfp->qf_qflags & QTF_OPENING)) {
246 error = EBUSY;
247 break;
248 }
249 if ((qfp->qf_qflags & QTF_CLOSING)) {
250 qfp->qf_qflags |= QTF_WANTED;
251 msleep(chan: &qfp->qf_qflags, mtx: &quota_list_mtx_lock, PVFS, wmesg: "qf_get", NULL);
252 }
253 }
254 if (qfp->qf_vp != NULLVP) {
255 error = EBUSY;
256 }
257 if (error == 0) {
258 qfp->qf_qflags |= QTF_OPENING;
259 }
260 break;
261
262 case QTF_CLOSING:
263 if ((qfp->qf_qflags & QTF_CLOSING)) {
264 error = EBUSY;
265 break;
266 }
267 qfp->qf_qflags |= QTF_CLOSING;
268
269 while ((qfp->qf_qflags & QTF_OPENING) || qfp->qf_refcnt) {
270 qfp->qf_qflags |= QTF_WANTED;
271 msleep(chan: &qfp->qf_qflags, mtx: &quota_list_mtx_lock, PVFS, wmesg: "qf_get", NULL);
272 }
273 if (qfp->qf_vp == NULLVP) {
274 qfp->qf_qflags &= ~QTF_CLOSING;
275 error = EBUSY;
276 }
277 break;
278 }
279 dq_list_unlock();
280
281 return error;
282}
283
284void
285qf_put(struct quotafile *qfp, int type)
286{
287 dq_list_lock();
288
289 switch (type) {
290 case QTF_OPENING:
291 case QTF_CLOSING:
292 qfp->qf_qflags &= ~type;
293 break;
294 }
295 if ((qfp->qf_qflags & QTF_WANTED)) {
296 qfp->qf_qflags &= ~QTF_WANTED;
297 wakeup(chan: &qfp->qf_qflags);
298 }
299 dq_list_unlock();
300}
301
302
303static void
304qf_lock(struct quotafile *qfp)
305{
306 lck_mtx_lock(lck: &qfp->qf_lock);
307}
308
309static void
310qf_unlock(struct quotafile *qfp)
311{
312 lck_mtx_unlock(lck: &qfp->qf_lock);
313}
314
315
316/*
317 * take a reference on the quota file while we're
318 * in dqget... this will prevent a quota_off from
319 * occurring while we're potentially playing with
320 * the quota file... the quota_off will stall until
321 * all the current references 'die'... once we start
322 * into quoto_off, all new references will be rejected
323 * we also don't want any dqgets being processed while
324 * we're in the middle of the quota_on... once we've
325 * actually got the quota file open and the associated
326 * struct quotafile inited, we can let them come through
327 *
328 * quota list lock must be held on entry
329 */
330static int
331qf_ref(struct quotafile *qfp)
332{
333 int error = 0;
334
335 if ((qfp->qf_qflags & (QTF_OPENING | QTF_CLOSING)) || (qfp->qf_vp == NULLVP)) {
336 error = EINVAL;
337 } else {
338 qfp->qf_refcnt++;
339 }
340
341 return error;
342}
343
344/*
345 * drop our reference and wakeup any waiters if
346 * we were the last one holding a ref
347 *
348 * quota list lock must be held on entry
349 */
350static void
351qf_rele(struct quotafile *qfp)
352{
353 qfp->qf_refcnt--;
354
355 if ((qfp->qf_qflags & QTF_WANTED) && qfp->qf_refcnt == 0) {
356 qfp->qf_qflags &= ~QTF_WANTED;
357 wakeup(chan: &qfp->qf_qflags);
358 }
359}
360
361
362void
363dqfileinit(struct quotafile *qfp)
364{
365 qfp->qf_vp = NULLVP;
366 qfp->qf_qflags = 0;
367
368 lck_mtx_init(lck: &qfp->qf_lock, grp: &qf_lck_grp, LCK_ATTR_NULL);
369}
370
371
372/*
373 * Initialize a quota file
374 *
375 * must be called with the quota file lock held
376 */
377int
378dqfileopen(struct quotafile *qfp, int type)
379{
380 struct dqfilehdr header;
381 struct vfs_context context;
382 off_t file_size;
383 uio_t auio;
384 int error = 0;
385 UIO_STACKBUF(uio_buf, 1);
386
387 context.vc_thread = current_thread();
388 context.vc_ucred = qfp->qf_cred;
389
390 /* Obtain the file size */
391 if ((error = vnode_size(qfp->qf_vp, &file_size, &context)) != 0) {
392 goto out;
393 }
394
395 /* Read the file header */
396 auio = uio_createwithbuffer(a_iovcount: 1, a_offset: 0, a_spacetype: UIO_SYSSPACE, a_iodirection: UIO_READ,
397 a_buf_p: &uio_buf[0], a_buffer_size: sizeof(uio_buf));
398 uio_addiov(a_uio: auio, CAST_USER_ADDR_T(&header), a_length: sizeof(header));
399 error = VNOP_READ(vp: qfp->qf_vp, uio: auio, ioflag: 0, ctx: &context);
400 if (error) {
401 goto out;
402 } else if (uio_resid(a_uio: auio)) {
403 error = EINVAL;
404 goto out;
405 }
406 /* Sanity check the quota file header. */
407 if ((OSSwapBigToHostInt32(header.dqh_magic) != quotamagic[type]) ||
408 (OSSwapBigToHostInt32(header.dqh_version) > QF_VERSION) ||
409 (!powerof2(OSSwapBigToHostInt32(header.dqh_maxentries))) ||
410 (OSSwapBigToHostInt32(header.dqh_maxentries) > (file_size / sizeof(struct dqblk)))) {
411 error = EINVAL;
412 goto out;
413 }
414 /* Set up the time limits for this quota. */
415 if (header.dqh_btime != 0) {
416 qfp->qf_btime = OSSwapBigToHostInt32(header.dqh_btime);
417 } else {
418 qfp->qf_btime = MAX_DQ_TIME;
419 }
420 if (header.dqh_itime != 0) {
421 qfp->qf_itime = OSSwapBigToHostInt32(header.dqh_itime);
422 } else {
423 qfp->qf_itime = MAX_IQ_TIME;
424 }
425
426 /* Calculate the hash table constants. */
427 qfp->qf_maxentries = OSSwapBigToHostInt32(header.dqh_maxentries);
428 qfp->qf_entrycnt = OSSwapBigToHostInt32(header.dqh_entrycnt);
429 qfp->qf_shift = dqhashshift(size: qfp->qf_maxentries);
430out:
431 return error;
432}
433
434/*
435 * Close down a quota file
436 */
437void
438dqfileclose(struct quotafile *qfp, __unused int type)
439{
440 struct dqfilehdr header;
441 struct vfs_context context;
442 uio_t auio;
443 UIO_STACKBUF(uio_buf, 1);
444
445 auio = uio_createwithbuffer(a_iovcount: 1, a_offset: 0, a_spacetype: UIO_SYSSPACE, a_iodirection: UIO_READ,
446 a_buf_p: &uio_buf[0], a_buffer_size: sizeof(uio_buf));
447 uio_addiov(a_uio: auio, CAST_USER_ADDR_T(&header), a_length: sizeof(header));
448
449 context.vc_thread = current_thread();
450 context.vc_ucred = qfp->qf_cred;
451
452 if (VNOP_READ(vp: qfp->qf_vp, uio: auio, ioflag: 0, ctx: &context) == 0) {
453 header.dqh_entrycnt = OSSwapHostToBigInt32(qfp->qf_entrycnt);
454 uio_reset(a_uio: auio, a_offset: 0, a_spacetype: UIO_SYSSPACE, a_iodirection: UIO_WRITE);
455 uio_addiov(a_uio: auio, CAST_USER_ADDR_T(&header), a_length: sizeof(header));
456 (void) VNOP_WRITE(vp: qfp->qf_vp, uio: auio, ioflag: 0, ctx: &context);
457 }
458}
459
460
461/*
462 * Obtain a dquot structure for the specified identifier and quota file
463 * reading the information from the file if necessary.
464 */
465int
466dqget(u_int32_t id, struct quotafile *qfp, int type, struct dquot **dqp)
467{
468 struct dquot *dq;
469 struct dquot *ndq = NULL;
470 struct dquot *fdq = NULL;
471 struct dqhash *dqh;
472 struct vnode *dqvp;
473 int error = 0;
474 int listlockval = 0;
475
476 if (!dqisinitialized()) {
477 *dqp = NODQUOT;
478 return EINVAL;
479 }
480
481 if (id == 0 || qfp->qf_vp == NULLVP) {
482 *dqp = NODQUOT;
483 return EINVAL;
484 }
485 dq_list_lock();
486
487 if ((qf_ref(qfp))) {
488 dq_list_unlock();
489
490 *dqp = NODQUOT;
491 return EINVAL;
492 }
493 if ((dqvp = qfp->qf_vp) == NULLVP) {
494 qf_rele(qfp);
495 dq_list_unlock();
496
497 *dqp = NODQUOT;
498 return EINVAL;
499 }
500 dqh = DQHASH(dqvp, id);
501
502relookup:
503 listlockval = dq_list_lock_val();
504
505 /*
506 * Check the cache first.
507 */
508 for (dq = dqh->lh_first; dq; dq = dq->dq_hash.le_next) {
509 if (dq->dq_id != id ||
510 dq->dq_qfile->qf_vp != dqvp) {
511 continue;
512 }
513
514 dq_lock_internal(dq);
515 if (dq_list_lock_changed(oldval: listlockval)) {
516 dq_unlock_internal(dq);
517 goto relookup;
518 }
519
520 /*
521 * dq_lock_internal may drop the quota_list_lock to msleep, so
522 * we need to re-evaluate the identity of this dq
523 */
524 if (dq->dq_id != id || dq->dq_qfile == NULL ||
525 dq->dq_qfile->qf_vp != dqvp) {
526 dq_unlock_internal(dq);
527 goto relookup;
528 }
529 /*
530 * Cache hit with no references. Take
531 * the structure off the free list.
532 */
533 if (dq->dq_cnt++ == 0) {
534 if (dq->dq_flags & DQ_MOD) {
535 TAILQ_REMOVE(&dqdirtylist, dq, dq_freelist);
536 } else {
537 TAILQ_REMOVE(&dqfreelist, dq, dq_freelist);
538 }
539 }
540 dq_unlock_internal(dq);
541
542 if (fdq != NULL) {
543 /*
544 * we grabbed this from the free list in the first pass
545 * but we found the dq we were looking for in
546 * the cache the 2nd time through
547 * so stick it back on the free list and return the cached entry
548 */
549 TAILQ_INSERT_HEAD(&dqfreelist, fdq, dq_freelist);
550 }
551 qf_rele(qfp);
552 dq_list_unlock();
553
554 if (ndq != NULL) {
555 /*
556 * we allocated this in the first pass
557 * but we found the dq we were looking for in
558 * the cache the 2nd time through so free it
559 */
560 zfree(KT_DQUOT, ndq);
561 }
562 *dqp = dq;
563
564 return 0;
565 }
566 /*
567 * Not in cache, allocate a new one.
568 */
569 if (TAILQ_EMPTY(&dqfreelist) &&
570 numdquot < MAXQUOTAS * desiredvnodes) {
571 desireddquot += DQUOTINC;
572 }
573
574 if (fdq != NULL) {
575 /*
576 * we captured this from the free list
577 * in the first pass through, so go
578 * ahead and use it
579 */
580 dq = fdq;
581 fdq = NULL;
582 } else if (numdquot < desireddquot) {
583 if (ndq == NULL) {
584 /*
585 * drop the quota list lock since zalloc may block
586 */
587 dq_list_unlock();
588
589 ndq = (struct dquot *)zalloc_flags(KT_DQUOT,
590 Z_WAITOK | Z_ZERO);
591
592 listlockval = dq_list_lock();
593 /*
594 * need to look for the entry again in the cache
595 * since we dropped the quota list lock and
596 * someone else may have beaten us to creating it
597 */
598 goto relookup;
599 } else {
600 /*
601 * we allocated this in the first pass through
602 * and we're still under out target, so go
603 * ahead and use it
604 */
605 dq = ndq;
606 ndq = NULL;
607 numdquot++;
608 }
609 } else {
610 if (TAILQ_EMPTY(&dqfreelist)) {
611 qf_rele(qfp);
612 dq_list_unlock();
613
614 if (ndq) {
615 /*
616 * we allocated this in the first pass through
617 * but we're now at the limit of our cache size
618 * so free it
619 */
620 zfree(KT_DQUOT, ndq);
621 }
622 tablefull("dquot");
623 *dqp = NODQUOT;
624 return EUSERS;
625 }
626 dq = TAILQ_FIRST(&dqfreelist);
627
628 dq_lock_internal(dq);
629
630 if (dq_list_lock_changed(oldval: listlockval) || dq->dq_cnt || (dq->dq_flags & DQ_MOD)) {
631 /*
632 * we lost the race while we weren't holding
633 * the quota list lock... dq_lock_internal
634 * will drop it to msleep... this dq has been
635 * reclaimed... go find another
636 */
637 dq_unlock_internal(dq);
638
639 /*
640 * need to look for the entry again in the cache
641 * since we dropped the quota list lock and
642 * someone else may have beaten us to creating it
643 */
644 goto relookup;
645 }
646 TAILQ_REMOVE(&dqfreelist, dq, dq_freelist);
647
648 if (dq->dq_qfile != NULL) {
649 LIST_REMOVE(dq, dq_hash);
650 dq->dq_qfile = NULL;
651 dq->dq_id = 0;
652 }
653 dq_unlock_internal(dq);
654
655 /*
656 * because we may have dropped the quota list lock
657 * in the call to dq_lock_internal, we need to
658 * relookup in the hash in case someone else
659 * caused a dq with this identity to be created...
660 * if we don't find it, we'll use this one
661 */
662 fdq = dq;
663 goto relookup;
664 }
665 /*
666 * we've either freshly allocated a dq
667 * or we've atomically pulled it out of
668 * the hash and freelists... no one else
669 * can have a reference, which means no
670 * one else can be trying to use this dq
671 */
672 dq_lock_internal(dq);
673 if (dq_list_lock_changed(oldval: listlockval)) {
674 dq_unlock_internal(dq);
675 goto relookup;
676 }
677
678 /*
679 * Initialize the contents of the dquot structure.
680 */
681 dq->dq_cnt = 1;
682 dq->dq_flags = 0;
683 dq->dq_id = id;
684 dq->dq_qfile = qfp;
685 dq->dq_type = type;
686 /*
687 * once we insert it in the hash and
688 * drop the quota_list_lock, it can be
689 * 'found'... however, we're still holding
690 * the dq_lock which will keep us from doing
691 * anything with it until we've finished
692 * initializing it...
693 */
694 LIST_INSERT_HEAD(dqh, dq, dq_hash);
695 dq_list_unlock();
696
697 if (ndq) {
698 /*
699 * we allocated this in the first pass through
700 * but we didn't need it, so free it after
701 * we've droped the quota list lock
702 */
703 zfree(KT_DQUOT, ndq);
704 }
705
706 error = dqlookup(qfp, id, &dq->dq_dqb, &dq->dq_index);
707
708 /*
709 * I/O error in reading quota file, release
710 * quota structure and reflect problem to caller.
711 */
712 if (error) {
713 dq_list_lock();
714
715 dq->dq_id = 0;
716 dq->dq_qfile = NULL;
717 LIST_REMOVE(dq, dq_hash);
718
719 dq_unlock_internal(dq);
720 qf_rele(qfp);
721 dq_list_unlock();
722
723 dqrele(dq);
724
725 *dqp = NODQUOT;
726 return error;
727 }
728 /*
729 * Check for no limit to enforce.
730 * Initialize time values if necessary.
731 */
732 if (dq->dq_isoftlimit == 0 && dq->dq_bsoftlimit == 0 &&
733 dq->dq_ihardlimit == 0 && dq->dq_bhardlimit == 0) {
734 dq->dq_flags |= DQ_FAKE;
735 }
736 if (dq->dq_id != 0) {
737 struct timeval tv;
738
739 microtime(tv: &tv);
740 if (dq->dq_btime == 0) {
741 dq->dq_btime = tv.tv_sec + qfp->qf_btime;
742 }
743 if (dq->dq_itime == 0) {
744 dq->dq_itime = tv.tv_sec + qfp->qf_itime;
745 }
746 }
747 dq_list_lock();
748 dq_unlock_internal(dq);
749 qf_rele(qfp);
750 dq_list_unlock();
751
752 *dqp = dq;
753 return 0;
754}
755
756/*
757 * Lookup a dqblk structure for the specified identifier and
758 * quota file. If there is no entry for this identifier then
759 * one is inserted. The actual hash table index is returned.
760 */
761static int
762dqlookup(struct quotafile *qfp, u_int32_t id, struct dqblk *dqb, uint32_t *index)
763{
764 struct vnode *dqvp;
765 struct vfs_context context;
766 uio_t auio;
767 int i, skip, last;
768 u_int32_t mask;
769 int error = 0;
770 UIO_STACKBUF(uio_buf, 1);
771
772
773 qf_lock(qfp);
774
775 dqvp = qfp->qf_vp;
776
777 context.vc_thread = current_thread();
778 context.vc_ucred = qfp->qf_cred;
779
780 mask = qfp->qf_maxentries - 1;
781 i = dqhash1(id, qfp->qf_shift, mask);
782 skip = dqhash2(id, mask);
783
784 for (last = (i + (qfp->qf_maxentries - 1) * skip) & mask;
785 i != last;
786 i = (i + skip) & mask) {
787 auio = uio_createwithbuffer(a_iovcount: 1, dqoffset(i), a_spacetype: UIO_SYSSPACE, a_iodirection: UIO_READ,
788 a_buf_p: &uio_buf[0], a_buffer_size: sizeof(uio_buf));
789 uio_addiov(a_uio: auio, CAST_USER_ADDR_T(dqb), a_length: sizeof(struct dqblk));
790 error = VNOP_READ(vp: dqvp, uio: auio, ioflag: 0, ctx: &context);
791 if (error) {
792 printf("dqlookup: error %d looking up id %u at index %d\n", error, id, i);
793 break;
794 } else if (uio_resid(a_uio: auio)) {
795 error = EIO;
796 printf("dqlookup: error looking up id %u at index %d\n", id, i);
797 break;
798 }
799 /*
800 * An empty entry means there is no entry
801 * with that id. In this case a new dqb
802 * record will be inserted.
803 */
804 if (dqb->dqb_id == 0) {
805 bzero(s: dqb, n: sizeof(struct dqblk));
806 dqb->dqb_id = OSSwapHostToBigInt32(id);
807 /*
808 * Write back to reserve entry for this id
809 */
810 uio_reset(a_uio: auio, dqoffset(i), a_spacetype: UIO_SYSSPACE, a_iodirection: UIO_WRITE);
811 uio_addiov(a_uio: auio, CAST_USER_ADDR_T(dqb), a_length: sizeof(struct dqblk));
812 error = VNOP_WRITE(vp: dqvp, uio: auio, ioflag: 0, ctx: &context);
813 if (uio_resid(a_uio: auio) && error == 0) {
814 error = EIO;
815 }
816 if (error == 0) {
817 ++qfp->qf_entrycnt;
818 }
819 dqb->dqb_id = id;
820 break;
821 }
822 /* An id match means an entry was found. */
823 if (OSSwapBigToHostInt32(dqb->dqb_id) == id) {
824 dqb->dqb_bhardlimit = OSSwapBigToHostInt64(dqb->dqb_bhardlimit);
825 dqb->dqb_bsoftlimit = OSSwapBigToHostInt64(dqb->dqb_bsoftlimit);
826 dqb->dqb_curbytes = OSSwapBigToHostInt64(dqb->dqb_curbytes);
827 dqb->dqb_ihardlimit = OSSwapBigToHostInt32(dqb->dqb_ihardlimit);
828 dqb->dqb_isoftlimit = OSSwapBigToHostInt32(dqb->dqb_isoftlimit);
829 dqb->dqb_curinodes = OSSwapBigToHostInt32(dqb->dqb_curinodes);
830 dqb->dqb_btime = OSSwapBigToHostInt32(dqb->dqb_btime);
831 dqb->dqb_itime = OSSwapBigToHostInt32(dqb->dqb_itime);
832 dqb->dqb_id = OSSwapBigToHostInt32(dqb->dqb_id);
833 break;
834 }
835 }
836 qf_unlock(qfp);
837
838 *index = i; /* remember index so we don't have to recompute it later */
839
840 return error;
841}
842
843
844/*
845 * Release a reference to a dquot.
846 */
847void
848dqrele(struct dquot *dq)
849{
850 if (dq == NODQUOT) {
851 return;
852 }
853 dqlock(dq);
854
855 if (dq->dq_cnt > 1) {
856 dq->dq_cnt--;
857
858 dqunlock(dq);
859 return;
860 }
861 if (dq->dq_flags & DQ_MOD) {
862 (void) dqsync_locked(dq);
863 }
864 dq->dq_cnt--;
865
866 dq_list_lock();
867 TAILQ_INSERT_TAIL(&dqfreelist, dq, dq_freelist);
868 dq_unlock_internal(dq);
869 dq_list_unlock();
870}
871
872/*
873 * Release a reference to a dquot but don't do any I/O.
874 */
875void
876dqreclaim(struct dquot *dq)
877{
878 if (dq == NODQUOT) {
879 return;
880 }
881
882 dq_list_lock();
883 dq_lock_internal(dq);
884
885 if (--dq->dq_cnt > 0) {
886 dq_unlock_internal(dq);
887 dq_list_unlock();
888 return;
889 }
890 if (dq->dq_flags & DQ_MOD) {
891 TAILQ_INSERT_TAIL(&dqdirtylist, dq, dq_freelist);
892 } else {
893 TAILQ_INSERT_TAIL(&dqfreelist, dq, dq_freelist);
894 }
895
896 dq_unlock_internal(dq);
897 dq_list_unlock();
898}
899
900/*
901 * Update a quota file's orphaned disk quotas.
902 */
903void
904dqsync_orphans(struct quotafile *qfp)
905{
906 struct dquot *dq;
907
908 dq_list_lock();
909loop:
910 TAILQ_FOREACH(dq, &dqdirtylist, dq_freelist) {
911 if (dq->dq_qfile != qfp) {
912 continue;
913 }
914
915 dq_lock_internal(dq);
916
917 if (dq->dq_qfile != qfp) {
918 /*
919 * the identity of this dq changed while
920 * the quota_list_lock was dropped
921 * dq_lock_internal can drop it to msleep
922 */
923 dq_unlock_internal(dq);
924 goto loop;
925 }
926 if ((dq->dq_flags & DQ_MOD) == 0) {
927 /*
928 * someone cleaned and removed this from
929 * the dq from the dirty list while the
930 * quota_list_lock was dropped
931 */
932 dq_unlock_internal(dq);
933 goto loop;
934 }
935 if (dq->dq_cnt != 0) {
936 panic("dqsync_orphans: dquot in use");
937 }
938
939 TAILQ_REMOVE(&dqdirtylist, dq, dq_freelist);
940
941 dq_list_unlock();
942 /*
943 * we're still holding the dqlock at this point
944 * with the reference count == 0
945 * we shouldn't be able
946 * to pick up another one since we hold dqlock
947 */
948 (void) dqsync_locked(dq);
949
950 dq_list_lock();
951
952 TAILQ_INSERT_TAIL(&dqfreelist, dq, dq_freelist);
953
954 dq_unlock_internal(dq);
955 goto loop;
956 }
957 dq_list_unlock();
958}
959
960int
961dqsync(struct dquot *dq)
962{
963 int error = 0;
964
965 if (dq != NODQUOT) {
966 dqlock(dq);
967
968 if ((dq->dq_flags & DQ_MOD)) {
969 error = dqsync_locked(dq);
970 }
971
972 dqunlock(dq);
973 }
974 return error;
975}
976
977
978/*
979 * Update the disk quota in the quota file.
980 */
981int
982dqsync_locked(struct dquot *dq)
983{
984 struct vfs_context context;
985 struct vnode *dqvp;
986 struct dqblk dqb, *dqblkp;
987 uio_t auio;
988 int error;
989 UIO_STACKBUF(uio_buf, 1);
990
991 if (dq->dq_id == 0) {
992 dq->dq_flags &= ~DQ_MOD;
993 return 0;
994 }
995 if (dq->dq_qfile == NULL) {
996 panic("dqsync: NULL dq_qfile");
997 }
998 if ((dqvp = dq->dq_qfile->qf_vp) == NULLVP) {
999 panic("dqsync: NULL qf_vp");
1000 }
1001
1002 auio = uio_createwithbuffer(a_iovcount: 1, dqoffset(dq->dq_index), a_spacetype: UIO_SYSSPACE,
1003 a_iodirection: UIO_WRITE, a_buf_p: &uio_buf[0], a_buffer_size: sizeof(uio_buf));
1004 uio_addiov(a_uio: auio, CAST_USER_ADDR_T(&dqb), a_length: sizeof(struct dqblk));
1005
1006 context.vc_thread = current_thread(); /* XXX */
1007 context.vc_ucred = dq->dq_qfile->qf_cred;
1008
1009 dqblkp = &dq->dq_dqb;
1010 dqb.dqb_bhardlimit = OSSwapHostToBigInt64(dqblkp->dqb_bhardlimit);
1011 dqb.dqb_bsoftlimit = OSSwapHostToBigInt64(dqblkp->dqb_bsoftlimit);
1012 dqb.dqb_curbytes = OSSwapHostToBigInt64(dqblkp->dqb_curbytes);
1013 dqb.dqb_ihardlimit = OSSwapHostToBigInt32(dqblkp->dqb_ihardlimit);
1014 dqb.dqb_isoftlimit = OSSwapHostToBigInt32(dqblkp->dqb_isoftlimit);
1015 dqb.dqb_curinodes = OSSwapHostToBigInt32(dqblkp->dqb_curinodes);
1016 dqb.dqb_btime = OSSwapHostToBigInt32(dqblkp->dqb_btime);
1017 dqb.dqb_itime = OSSwapHostToBigInt32(dqblkp->dqb_itime);
1018 dqb.dqb_id = OSSwapHostToBigInt32(dqblkp->dqb_id);
1019 dqb.dqb_spare[0] = 0;
1020 dqb.dqb_spare[1] = 0;
1021 dqb.dqb_spare[2] = 0;
1022 dqb.dqb_spare[3] = 0;
1023
1024 error = VNOP_WRITE(vp: dqvp, uio: auio, ioflag: 0, ctx: &context);
1025 if (uio_resid(a_uio: auio) && error == 0) {
1026 error = EIO;
1027 }
1028 dq->dq_flags &= ~DQ_MOD;
1029
1030 return error;
1031}
1032
1033/*
1034 * Flush all entries from the cache for a particular vnode.
1035 */
1036void
1037dqflush(struct vnode *vp)
1038{
1039 struct dquot *dq, *nextdq;
1040 struct dqhash *dqh;
1041
1042 if (!dqisinitialized()) {
1043 return;
1044 }
1045
1046 /*
1047 * Move all dquot's that used to refer to this quota
1048 * file off their hash chains (they will eventually
1049 * fall off the head of the free list and be re-used).
1050 */
1051 dq_list_lock();
1052
1053 for (dqh = &dqhashtbl[dqhash]; dqh >= dqhashtbl; dqh--) {
1054 for (dq = dqh->lh_first; dq; dq = nextdq) {
1055 nextdq = dq->dq_hash.le_next;
1056 if (dq->dq_qfile->qf_vp != vp) {
1057 continue;
1058 }
1059 if (dq->dq_cnt) {
1060 panic("dqflush: stray dquot");
1061 }
1062 LIST_REMOVE(dq, dq_hash);
1063 dq->dq_qfile = NULL;
1064 }
1065 }
1066 dq_list_unlock();
1067}
1068
1069/*
1070 * LP64 support for munging dqblk structure.
1071 * XXX conversion of user_time_t to time_t loses precision; not an issue for
1072 * XXX us now, since we are only ever setting 32 bits worth of time into it.
1073 */
1074__private_extern__ void
1075munge_dqblk(struct dqblk *dqblkp, struct user_dqblk *user_dqblkp, boolean_t to64)
1076{
1077 if (to64) {
1078 /* munge kernel (32 bit) dqblk into user (64 bit) dqblk */
1079 bcopy(src: (caddr_t)dqblkp, dst: (caddr_t)user_dqblkp, offsetof(struct dqblk, dqb_btime));
1080 user_dqblkp->dqb_id = dqblkp->dqb_id;
1081 user_dqblkp->dqb_itime = dqblkp->dqb_itime;
1082 user_dqblkp->dqb_btime = dqblkp->dqb_btime;
1083 } else {
1084 /* munge user (64 bit) dqblk into kernel (32 bit) dqblk */
1085 bcopy(src: (caddr_t)user_dqblkp, dst: (caddr_t)dqblkp, offsetof(struct dqblk, dqb_btime));
1086 dqblkp->dqb_id = user_dqblkp->dqb_id;
1087 dqblkp->dqb_itime = user_dqblkp->dqb_itime; /* XXX - lose precision */
1088 dqblkp->dqb_btime = user_dqblkp->dqb_btime; /* XXX - lose precision */
1089 }
1090}
1091