1 | /* |
2 | * Copyright (c) 2000-2016 Apple Inc. All rights reserved. |
3 | * |
4 | * @APPLE_OSREFERENCE_LICENSE_HEADER_START@ |
5 | * |
6 | * This file contains Original Code and/or Modifications of Original Code |
7 | * as defined in and that are subject to the Apple Public Source License |
8 | * Version 2.0 (the 'License'). You may not use this file except in |
9 | * compliance with the License. The rights granted to you under the License |
10 | * may not be used to create, or enable the creation or redistribution of, |
11 | * unlawful or unlicensed copies of an Apple operating system, or to |
12 | * circumvent, violate, or enable the circumvention or violation of, any |
13 | * terms of an Apple operating system software license agreement. |
14 | * |
15 | * Please obtain a copy of the License at |
16 | * http://www.opensource.apple.com/apsl/ and read it before using this file. |
17 | * |
18 | * The Original Code and all software distributed under the License are |
19 | * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER |
20 | * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES, |
21 | * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY, |
22 | * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT. |
23 | * Please see the License for the specific language governing rights and |
24 | * limitations under the License. |
25 | * |
26 | * @APPLE_OSREFERENCE_LICENSE_HEADER_END@ |
27 | */ |
28 | /* Copyright (c) 1995 NeXT Computer, Inc. All Rights Reserved */ |
29 | /*- |
30 | * Copyright (c) 1994 Christopher G. Demetriou |
31 | * Copyright (c) 1982, 1986, 1989, 1993 |
32 | * The Regents of the University of California. All rights reserved. |
33 | * (c) UNIX System Laboratories, Inc. |
34 | * All or some portions of this file are derived from material licensed |
35 | * to the University of California by American Telephone and Telegraph |
36 | * Co. or Unix System Laboratories, Inc. and are reproduced herein with |
37 | * the permission of UNIX System Laboratories, Inc. |
38 | * |
39 | * Redistribution and use in source and binary forms, with or without |
40 | * modification, are permitted provided that the following conditions |
41 | * are met: |
42 | * 1. Redistributions of source code must retain the above copyright |
43 | * notice, this list of conditions and the following disclaimer. |
44 | * 2. Redistributions in binary form must reproduce the above copyright |
45 | * notice, this list of conditions and the following disclaimer in the |
46 | * documentation and/or other materials provided with the distribution. |
47 | * 3. All advertising materials mentioning features or use of this software |
48 | * must display the following acknowledgement: |
49 | * This product includes software developed by the University of |
50 | * California, Berkeley and its contributors. |
51 | * 4. Neither the name of the University nor the names of its contributors |
52 | * may be used to endorse or promote products derived from this software |
53 | * without specific prior written permission. |
54 | * |
55 | * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND |
56 | * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE |
57 | * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE |
58 | * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE |
59 | * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL |
60 | * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS |
61 | * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) |
62 | * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT |
63 | * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY |
64 | * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF |
65 | * SUCH DAMAGE. |
66 | * |
67 | * @(#)vfs_bio.c 8.6 (Berkeley) 1/11/94 |
68 | */ |
69 | |
70 | /* |
71 | * Some references: |
72 | * Bach: The Design of the UNIX Operating System (Prentice Hall, 1986) |
73 | * Leffler, et al.: The Design and Implementation of the 4.3BSD |
74 | * UNIX Operating System (Addison Welley, 1989) |
75 | */ |
76 | |
77 | #include <sys/param.h> |
78 | #include <sys/systm.h> |
79 | #include <sys/proc_internal.h> |
80 | #include <sys/buf_internal.h> |
81 | #include <sys/vnode_internal.h> |
82 | #include <sys/mount_internal.h> |
83 | #include <sys/trace.h> |
84 | #include <sys/malloc.h> |
85 | #include <sys/resourcevar.h> |
86 | #include <miscfs/specfs/specdev.h> |
87 | #include <sys/ubc.h> |
88 | #include <sys/kauth.h> |
89 | #if DIAGNOSTIC |
90 | #include <kern/assert.h> |
91 | #endif /* DIAGNOSTIC */ |
92 | #include <kern/task.h> |
93 | #include <kern/zalloc.h> |
94 | #include <kern/locks.h> |
95 | #include <kern/thread.h> |
96 | |
97 | #include <sys/fslog.h> /* fslog_io_error() */ |
98 | #include <sys/disk.h> /* dk_error_description_t */ |
99 | |
100 | #include <mach/mach_types.h> |
101 | #include <mach/memory_object_types.h> |
102 | #include <kern/sched_prim.h> /* thread_block() */ |
103 | |
104 | #include <vm/vm_kern.h> |
105 | #include <vm/vm_pageout.h> |
106 | |
107 | #include <sys/kdebug.h> |
108 | |
109 | #include <libkern/OSAtomic.h> |
110 | #include <libkern/OSDebug.h> |
111 | #include <sys/ubc_internal.h> |
112 | |
113 | #include <sys/sdt.h> |
114 | |
115 | int bcleanbuf(buf_t bp, boolean_t discard); |
116 | static int brecover_data(buf_t bp); |
117 | static boolean_t incore(vnode_t vp, daddr64_t blkno); |
118 | /* timeout is in msecs */ |
119 | static buf_t getnewbuf(int slpflag, int slptimeo, int *queue); |
120 | static void bremfree_locked(buf_t bp); |
121 | static void buf_reassign(buf_t bp, vnode_t newvp); |
122 | static errno_t buf_acquire_locked(buf_t bp, int flags, int slpflag, int slptimeo); |
123 | static int buf_iterprepare(vnode_t vp, struct buflists *, int flags); |
124 | static void buf_itercomplete(vnode_t vp, struct buflists *, int flags); |
125 | static boolean_t buffer_cache_gc(int); |
126 | static buf_t buf_brelse_shadow(buf_t bp); |
127 | static void buf_free_meta_store(buf_t bp); |
128 | |
129 | static buf_t buf_create_shadow_internal(buf_t bp, boolean_t force_copy, |
130 | uintptr_t external_storage, void (*iodone)(buf_t, void *), void *arg, int priv); |
131 | |
132 | |
133 | int bdwrite_internal(buf_t, int); |
134 | |
135 | extern void disk_conditioner_delay(buf_t, int, int, uint64_t); |
136 | |
137 | /* zone allocated buffer headers */ |
138 | static void bufzoneinit(void); |
139 | static void bcleanbuf_thread_init(void); |
140 | static void bcleanbuf_thread(void); |
141 | |
142 | static zone_t buf_hdr_zone; |
143 | static int buf_hdr_count; |
144 | |
145 | |
146 | /* |
147 | * Definitions for the buffer hash lists. |
148 | */ |
149 | #define BUFHASH(dvp, lbn) \ |
150 | (&bufhashtbl[((long)(dvp) / sizeof(*(dvp)) + (int)(lbn)) & bufhash]) |
151 | LIST_HEAD(bufhashhdr, buf) *bufhashtbl, invalhash; |
152 | u_long bufhash; |
153 | |
154 | static buf_t incore_locked(vnode_t vp, daddr64_t blkno, struct bufhashhdr *dp); |
155 | |
156 | /* Definitions for the buffer stats. */ |
157 | struct bufstats bufstats; |
158 | |
159 | /* Number of delayed write buffers */ |
160 | long nbdwrite = 0; |
161 | int blaundrycnt = 0; |
162 | static int = 0; |
163 | |
164 | static TAILQ_HEAD(delayqueue, buf) delaybufqueue; |
165 | |
166 | static TAILQ_HEAD(ioqueue, buf) iobufqueue; |
167 | static TAILQ_HEAD(bqueues, buf) bufqueues[BQUEUES]; |
168 | static int needbuffer; |
169 | static int need_iobuffer; |
170 | |
171 | static lck_grp_t *buf_mtx_grp; |
172 | static lck_attr_t *buf_mtx_attr; |
173 | static lck_grp_attr_t *buf_mtx_grp_attr; |
174 | static lck_mtx_t *iobuffer_mtxp; |
175 | static lck_mtx_t *buf_mtxp; |
176 | static lck_mtx_t *buf_gc_callout; |
177 | |
178 | static int buf_busycount; |
179 | |
180 | #define FS_BUFFER_CACHE_GC_CALLOUTS_MAX_SIZE 16 |
181 | typedef struct { |
182 | void (* callout)(int, void *); |
183 | void *context; |
184 | } fs_buffer_cache_gc_callout_t; |
185 | |
186 | fs_buffer_cache_gc_callout_t fs_callouts[FS_BUFFER_CACHE_GC_CALLOUTS_MAX_SIZE] = { {NULL, NULL} }; |
187 | |
188 | static __inline__ int |
189 | buf_timestamp(void) |
190 | { |
191 | struct timeval t; |
192 | microuptime(&t); |
193 | return (t.tv_sec); |
194 | } |
195 | |
196 | /* |
197 | * Insq/Remq for the buffer free lists. |
198 | */ |
199 | #define binsheadfree(bp, dp, whichq) do { \ |
200 | TAILQ_INSERT_HEAD(dp, bp, b_freelist); \ |
201 | } while (0) |
202 | |
203 | #define binstailfree(bp, dp, whichq) do { \ |
204 | TAILQ_INSERT_TAIL(dp, bp, b_freelist); \ |
205 | } while (0) |
206 | |
207 | #define BHASHENTCHECK(bp) \ |
208 | if ((bp)->b_hash.le_prev != (struct buf **)0xdeadbeef) \ |
209 | panic("%p: b_hash.le_prev is not deadbeef", (bp)); |
210 | |
211 | #define BLISTNONE(bp) \ |
212 | (bp)->b_hash.le_next = (struct buf *)0; \ |
213 | (bp)->b_hash.le_prev = (struct buf **)0xdeadbeef; |
214 | |
215 | /* |
216 | * Insq/Remq for the vnode usage lists. |
217 | */ |
218 | #define bufinsvn(bp, dp) LIST_INSERT_HEAD(dp, bp, b_vnbufs) |
219 | #define bufremvn(bp) { \ |
220 | LIST_REMOVE(bp, b_vnbufs); \ |
221 | (bp)->b_vnbufs.le_next = NOLIST; \ |
222 | } |
223 | |
224 | /* |
225 | * Time in seconds before a buffer on a list is |
226 | * considered as a stale buffer |
227 | */ |
228 | #define LRU_IS_STALE 120 /* default value for the LRU */ |
229 | #define AGE_IS_STALE 60 /* default value for the AGE */ |
230 | #define META_IS_STALE 180 /* default value for the BQ_META */ |
231 | |
232 | int lru_is_stale = LRU_IS_STALE; |
233 | int age_is_stale = AGE_IS_STALE; |
234 | int meta_is_stale = META_IS_STALE; |
235 | |
236 | #define MAXLAUNDRY 10 |
237 | |
238 | /* LIST_INSERT_HEAD() with assertions */ |
239 | static __inline__ void |
240 | blistenterhead(struct bufhashhdr * head, buf_t bp) |
241 | { |
242 | if ((bp->b_hash.le_next = (head)->lh_first) != NULL) |
243 | (head)->lh_first->b_hash.le_prev = &(bp)->b_hash.le_next; |
244 | (head)->lh_first = bp; |
245 | bp->b_hash.le_prev = &(head)->lh_first; |
246 | if (bp->b_hash.le_prev == (struct buf **)0xdeadbeef) |
247 | panic("blistenterhead: le_prev is deadbeef" ); |
248 | } |
249 | |
250 | static __inline__ void |
251 | binshash(buf_t bp, struct bufhashhdr *dp) |
252 | { |
253 | #if DIAGNOSTIC |
254 | buf_t nbp; |
255 | #endif /* DIAGNOSTIC */ |
256 | |
257 | BHASHENTCHECK(bp); |
258 | |
259 | #if DIAGNOSTIC |
260 | nbp = dp->lh_first; |
261 | for(; nbp != NULL; nbp = nbp->b_hash.le_next) { |
262 | if(nbp == bp) |
263 | panic("buf already in hashlist" ); |
264 | } |
265 | #endif /* DIAGNOSTIC */ |
266 | |
267 | blistenterhead(dp, bp); |
268 | } |
269 | |
270 | static __inline__ void |
271 | bremhash(buf_t bp) |
272 | { |
273 | if (bp->b_hash.le_prev == (struct buf **)0xdeadbeef) |
274 | panic("bremhash le_prev is deadbeef" ); |
275 | if (bp->b_hash.le_next == bp) |
276 | panic("bremhash: next points to self" ); |
277 | |
278 | if (bp->b_hash.le_next != NULL) |
279 | bp->b_hash.le_next->b_hash.le_prev = bp->b_hash.le_prev; |
280 | *bp->b_hash.le_prev = (bp)->b_hash.le_next; |
281 | } |
282 | |
283 | /* |
284 | * buf_mtxp held. |
285 | */ |
286 | static __inline__ void |
287 | bmovelaundry(buf_t bp) |
288 | { |
289 | bp->b_whichq = BQ_LAUNDRY; |
290 | bp->b_timestamp = buf_timestamp(); |
291 | binstailfree(bp, &bufqueues[BQ_LAUNDRY], BQ_LAUNDRY); |
292 | blaundrycnt++; |
293 | } |
294 | |
295 | static __inline__ void |
296 | buf_release_credentials(buf_t bp) |
297 | { |
298 | if (IS_VALID_CRED(bp->b_rcred)) { |
299 | kauth_cred_unref(&bp->b_rcred); |
300 | } |
301 | if (IS_VALID_CRED(bp->b_wcred)) { |
302 | kauth_cred_unref(&bp->b_wcred); |
303 | } |
304 | } |
305 | |
306 | |
307 | int |
308 | buf_valid(buf_t bp) { |
309 | |
310 | if ( (bp->b_flags & (B_DONE | B_DELWRI)) ) |
311 | return 1; |
312 | return 0; |
313 | } |
314 | |
315 | int |
316 | buf_fromcache(buf_t bp) { |
317 | |
318 | if ( (bp->b_flags & B_CACHE) ) |
319 | return 1; |
320 | return 0; |
321 | } |
322 | |
323 | void |
324 | buf_markinvalid(buf_t bp) { |
325 | |
326 | SET(bp->b_flags, B_INVAL); |
327 | } |
328 | |
329 | void |
330 | buf_markdelayed(buf_t bp) { |
331 | |
332 | if (!ISSET(bp->b_flags, B_DELWRI)) { |
333 | SET(bp->b_flags, B_DELWRI); |
334 | |
335 | OSAddAtomicLong(1, &nbdwrite); |
336 | buf_reassign(bp, bp->b_vp); |
337 | } |
338 | SET(bp->b_flags, B_DONE); |
339 | } |
340 | |
341 | void |
342 | buf_markclean(buf_t bp) { |
343 | |
344 | if (ISSET(bp->b_flags, B_DELWRI)) { |
345 | CLR(bp->b_flags, B_DELWRI); |
346 | |
347 | OSAddAtomicLong(-1, &nbdwrite); |
348 | buf_reassign(bp, bp->b_vp); |
349 | } |
350 | } |
351 | |
352 | void |
353 | buf_markeintr(buf_t bp) { |
354 | |
355 | SET(bp->b_flags, B_EINTR); |
356 | } |
357 | |
358 | |
359 | void |
360 | buf_markaged(buf_t bp) { |
361 | |
362 | SET(bp->b_flags, B_AGE); |
363 | } |
364 | |
365 | int |
366 | buf_fua(buf_t bp) { |
367 | |
368 | if ((bp->b_flags & B_FUA) == B_FUA) |
369 | return 1; |
370 | return 0; |
371 | } |
372 | |
373 | void |
374 | buf_markfua(buf_t bp) { |
375 | |
376 | SET(bp->b_flags, B_FUA); |
377 | } |
378 | |
379 | #if CONFIG_PROTECT |
380 | cpx_t bufattr_cpx(bufattr_t bap) |
381 | { |
382 | return bap->ba_cpx; |
383 | } |
384 | |
385 | void bufattr_setcpx(bufattr_t bap, cpx_t cpx) |
386 | { |
387 | bap->ba_cpx = cpx; |
388 | } |
389 | |
390 | void |
391 | buf_setcpoff (buf_t bp, uint64_t foffset) { |
392 | bp->b_attr.ba_cp_file_off = foffset; |
393 | } |
394 | |
395 | uint64_t |
396 | bufattr_cpoff(bufattr_t bap) { |
397 | return bap->ba_cp_file_off; |
398 | } |
399 | |
400 | void |
401 | bufattr_setcpoff(bufattr_t bap, uint64_t foffset) { |
402 | bap->ba_cp_file_off = foffset; |
403 | } |
404 | |
405 | #else // !CONTECT_PROTECT |
406 | |
407 | uint64_t |
408 | bufattr_cpoff(bufattr_t bap __unused) { |
409 | return 0; |
410 | } |
411 | |
412 | void |
413 | bufattr_setcpoff(__unused bufattr_t bap, __unused uint64_t foffset) { |
414 | return; |
415 | } |
416 | |
417 | struct cpx *bufattr_cpx(__unused bufattr_t bap) |
418 | { |
419 | return NULL; |
420 | } |
421 | |
422 | void bufattr_setcpx(__unused bufattr_t bap, __unused struct cpx *cpx) |
423 | { |
424 | } |
425 | |
426 | #endif /* !CONFIG_PROTECT */ |
427 | |
428 | bufattr_t |
429 | bufattr_alloc() { |
430 | bufattr_t bap; |
431 | MALLOC(bap, bufattr_t, sizeof(struct bufattr), M_TEMP, M_WAITOK); |
432 | if (bap == NULL) |
433 | return NULL; |
434 | |
435 | bzero(bap, sizeof(struct bufattr)); |
436 | return bap; |
437 | } |
438 | |
439 | void |
440 | bufattr_free(bufattr_t bap) { |
441 | if (bap) |
442 | FREE(bap, M_TEMP); |
443 | } |
444 | |
445 | bufattr_t |
446 | bufattr_dup(bufattr_t bap) { |
447 | bufattr_t new_bufattr; |
448 | MALLOC(new_bufattr, bufattr_t, sizeof(struct bufattr), M_TEMP, M_WAITOK); |
449 | if (new_bufattr == NULL) |
450 | return NULL; |
451 | |
452 | /* Copy the provided one into the new copy */ |
453 | memcpy (new_bufattr, bap, sizeof(struct bufattr)); |
454 | return new_bufattr; |
455 | } |
456 | |
457 | int |
458 | bufattr_rawencrypted(bufattr_t bap) { |
459 | if ( (bap->ba_flags & BA_RAW_ENCRYPTED_IO) ) |
460 | return 1; |
461 | return 0; |
462 | } |
463 | |
464 | int |
465 | bufattr_throttled(bufattr_t bap) { |
466 | return (GET_BUFATTR_IO_TIER(bap)); |
467 | } |
468 | |
469 | int |
470 | bufattr_passive(bufattr_t bap) { |
471 | if ( (bap->ba_flags & BA_PASSIVE) ) |
472 | return 1; |
473 | return 0; |
474 | } |
475 | |
476 | int |
477 | bufattr_nocache(bufattr_t bap) { |
478 | if ( (bap->ba_flags & BA_NOCACHE) ) |
479 | return 1; |
480 | return 0; |
481 | } |
482 | |
483 | int |
484 | bufattr_meta(bufattr_t bap) { |
485 | if ( (bap->ba_flags & BA_META) ) |
486 | return 1; |
487 | return 0; |
488 | } |
489 | |
490 | void |
491 | bufattr_markmeta(bufattr_t bap) { |
492 | SET(bap->ba_flags, BA_META); |
493 | } |
494 | |
495 | int |
496 | #if !CONFIG_EMBEDDED |
497 | bufattr_delayidlesleep(bufattr_t bap) |
498 | #else /* !CONFIG_EMBEDDED */ |
499 | bufattr_delayidlesleep(__unused bufattr_t bap) |
500 | #endif /* !CONFIG_EMBEDDED */ |
501 | { |
502 | #if !CONFIG_EMBEDDED |
503 | if ( (bap->ba_flags & BA_DELAYIDLESLEEP) ) |
504 | return 1; |
505 | #endif /* !CONFIG_EMBEDDED */ |
506 | return 0; |
507 | } |
508 | |
509 | bufattr_t |
510 | buf_attr(buf_t bp) { |
511 | return &bp->b_attr; |
512 | } |
513 | |
514 | void |
515 | buf_markstatic(buf_t bp __unused) { |
516 | SET(bp->b_flags, B_STATICCONTENT); |
517 | } |
518 | |
519 | int |
520 | buf_static(buf_t bp) { |
521 | if ( (bp->b_flags & B_STATICCONTENT) ) |
522 | return 1; |
523 | return 0; |
524 | } |
525 | |
526 | void |
527 | bufattr_markgreedymode(bufattr_t bap) { |
528 | SET(bap->ba_flags, BA_GREEDY_MODE); |
529 | } |
530 | |
531 | int |
532 | bufattr_greedymode(bufattr_t bap) { |
533 | if ( (bap->ba_flags & BA_GREEDY_MODE) ) |
534 | return 1; |
535 | return 0; |
536 | } |
537 | |
538 | void |
539 | bufattr_markisochronous(bufattr_t bap) { |
540 | SET(bap->ba_flags, BA_ISOCHRONOUS); |
541 | } |
542 | |
543 | int |
544 | bufattr_isochronous(bufattr_t bap) { |
545 | if ( (bap->ba_flags & BA_ISOCHRONOUS) ) |
546 | return 1; |
547 | return 0; |
548 | } |
549 | |
550 | void |
551 | bufattr_markquickcomplete(bufattr_t bap) { |
552 | SET(bap->ba_flags, BA_QUICK_COMPLETE); |
553 | } |
554 | |
555 | int |
556 | bufattr_quickcomplete(bufattr_t bap) { |
557 | if ( (bap->ba_flags & BA_QUICK_COMPLETE) ) |
558 | return 1; |
559 | return 0; |
560 | } |
561 | |
562 | errno_t |
563 | buf_error(buf_t bp) { |
564 | |
565 | return (bp->b_error); |
566 | } |
567 | |
568 | void |
569 | buf_seterror(buf_t bp, errno_t error) { |
570 | |
571 | if ((bp->b_error = error)) |
572 | SET(bp->b_flags, B_ERROR); |
573 | else |
574 | CLR(bp->b_flags, B_ERROR); |
575 | } |
576 | |
577 | void |
578 | buf_setflags(buf_t bp, int32_t flags) { |
579 | |
580 | SET(bp->b_flags, (flags & BUF_X_WRFLAGS)); |
581 | } |
582 | |
583 | void |
584 | buf_clearflags(buf_t bp, int32_t flags) { |
585 | |
586 | CLR(bp->b_flags, (flags & BUF_X_WRFLAGS)); |
587 | } |
588 | |
589 | int32_t |
590 | buf_flags(buf_t bp) { |
591 | |
592 | return ((bp->b_flags & BUF_X_RDFLAGS)); |
593 | } |
594 | |
595 | void |
596 | buf_reset(buf_t bp, int32_t io_flags) { |
597 | |
598 | CLR(bp->b_flags, (B_READ | B_WRITE | B_ERROR | B_DONE | B_INVAL | B_ASYNC | B_NOCACHE | B_FUA)); |
599 | SET(bp->b_flags, (io_flags & (B_ASYNC | B_READ | B_WRITE | B_NOCACHE))); |
600 | |
601 | bp->b_error = 0; |
602 | } |
603 | |
604 | uint32_t |
605 | buf_count(buf_t bp) { |
606 | |
607 | return (bp->b_bcount); |
608 | } |
609 | |
610 | void |
611 | buf_setcount(buf_t bp, uint32_t bcount) { |
612 | |
613 | bp->b_bcount = bcount; |
614 | } |
615 | |
616 | uint32_t |
617 | buf_size(buf_t bp) { |
618 | |
619 | return (bp->b_bufsize); |
620 | } |
621 | |
622 | void |
623 | buf_setsize(buf_t bp, uint32_t bufsize) { |
624 | |
625 | bp->b_bufsize = bufsize; |
626 | } |
627 | |
628 | uint32_t |
629 | buf_resid(buf_t bp) { |
630 | |
631 | return (bp->b_resid); |
632 | } |
633 | |
634 | void |
635 | buf_setresid(buf_t bp, uint32_t resid) { |
636 | |
637 | bp->b_resid = resid; |
638 | } |
639 | |
640 | uint32_t |
641 | buf_dirtyoff(buf_t bp) { |
642 | |
643 | return (bp->b_dirtyoff); |
644 | } |
645 | |
646 | uint32_t |
647 | buf_dirtyend(buf_t bp) { |
648 | |
649 | return (bp->b_dirtyend); |
650 | } |
651 | |
652 | void |
653 | buf_setdirtyoff(buf_t bp, uint32_t dirtyoff) { |
654 | |
655 | bp->b_dirtyoff = dirtyoff; |
656 | } |
657 | |
658 | void |
659 | buf_setdirtyend(buf_t bp, uint32_t dirtyend) { |
660 | |
661 | bp->b_dirtyend = dirtyend; |
662 | } |
663 | |
664 | uintptr_t |
665 | buf_dataptr(buf_t bp) { |
666 | |
667 | return (bp->b_datap); |
668 | } |
669 | |
670 | void |
671 | buf_setdataptr(buf_t bp, uintptr_t data) { |
672 | |
673 | bp->b_datap = data; |
674 | } |
675 | |
676 | vnode_t |
677 | buf_vnode(buf_t bp) { |
678 | |
679 | return (bp->b_vp); |
680 | } |
681 | |
682 | void |
683 | buf_setvnode(buf_t bp, vnode_t vp) { |
684 | |
685 | bp->b_vp = vp; |
686 | } |
687 | |
688 | |
689 | void * |
690 | buf_callback(buf_t bp) |
691 | { |
692 | if ( !(bp->b_flags & B_CALL) ) |
693 | return ((void *) NULL); |
694 | |
695 | return ((void *)bp->b_iodone); |
696 | } |
697 | |
698 | |
699 | errno_t |
700 | buf_setcallback(buf_t bp, void (*callback)(buf_t, void *), void *transaction) |
701 | { |
702 | assert(!ISSET(bp->b_flags, B_FILTER) && ISSET(bp->b_lflags, BL_BUSY)); |
703 | |
704 | if (callback) |
705 | bp->b_flags |= (B_CALL | B_ASYNC); |
706 | else |
707 | bp->b_flags &= ~B_CALL; |
708 | bp->b_transaction = transaction; |
709 | bp->b_iodone = callback; |
710 | |
711 | return (0); |
712 | } |
713 | |
714 | errno_t |
715 | buf_setupl(buf_t bp, upl_t upl, uint32_t offset) |
716 | { |
717 | |
718 | if ( !(bp->b_lflags & BL_IOBUF) ) |
719 | return (EINVAL); |
720 | |
721 | if (upl) |
722 | bp->b_flags |= B_CLUSTER; |
723 | else |
724 | bp->b_flags &= ~B_CLUSTER; |
725 | bp->b_upl = upl; |
726 | bp->b_uploffset = offset; |
727 | |
728 | return (0); |
729 | } |
730 | |
731 | buf_t |
732 | buf_clone(buf_t bp, int io_offset, int io_size, void (*iodone)(buf_t, void *), void *arg) |
733 | { |
734 | buf_t io_bp; |
735 | |
736 | if (io_offset < 0 || io_size < 0) |
737 | return (NULL); |
738 | |
739 | if ((unsigned)(io_offset + io_size) > (unsigned)bp->b_bcount) |
740 | return (NULL); |
741 | |
742 | if (bp->b_flags & B_CLUSTER) { |
743 | if (io_offset && ((bp->b_uploffset + io_offset) & PAGE_MASK)) |
744 | return (NULL); |
745 | |
746 | if (((bp->b_uploffset + io_offset + io_size) & PAGE_MASK) && ((io_offset + io_size) < bp->b_bcount)) |
747 | return (NULL); |
748 | } |
749 | io_bp = alloc_io_buf(bp->b_vp, 0); |
750 | |
751 | io_bp->b_flags = bp->b_flags & (B_COMMIT_UPL | B_META | B_PAGEIO | B_CLUSTER | B_PHYS | B_RAW | B_ASYNC | B_READ | B_FUA); |
752 | |
753 | if (iodone) { |
754 | io_bp->b_transaction = arg; |
755 | io_bp->b_iodone = iodone; |
756 | io_bp->b_flags |= B_CALL; |
757 | } |
758 | if (bp->b_flags & B_CLUSTER) { |
759 | io_bp->b_upl = bp->b_upl; |
760 | io_bp->b_uploffset = bp->b_uploffset + io_offset; |
761 | } else { |
762 | io_bp->b_datap = (uintptr_t)(((char *)bp->b_datap) + io_offset); |
763 | } |
764 | io_bp->b_bcount = io_size; |
765 | |
766 | return (io_bp); |
767 | } |
768 | |
769 | |
770 | int |
771 | buf_shadow(buf_t bp) |
772 | { |
773 | if (bp->b_lflags & BL_SHADOW) |
774 | return 1; |
775 | return 0; |
776 | } |
777 | |
778 | |
779 | buf_t |
780 | buf_create_shadow_priv(buf_t bp, boolean_t force_copy, uintptr_t external_storage, void (*iodone)(buf_t, void *), void *arg) |
781 | { |
782 | return (buf_create_shadow_internal(bp, force_copy, external_storage, iodone, arg, 1)); |
783 | } |
784 | |
785 | buf_t |
786 | buf_create_shadow(buf_t bp, boolean_t force_copy, uintptr_t external_storage, void (*iodone)(buf_t, void *), void *arg) |
787 | { |
788 | return (buf_create_shadow_internal(bp, force_copy, external_storage, iodone, arg, 0)); |
789 | } |
790 | |
791 | |
792 | static buf_t |
793 | buf_create_shadow_internal(buf_t bp, boolean_t force_copy, uintptr_t external_storage, void (*iodone)(buf_t, void *), void *arg, int priv) |
794 | { |
795 | buf_t io_bp; |
796 | |
797 | KERNEL_DEBUG(0xbbbbc000 | DBG_FUNC_START, bp, 0, 0, 0, 0); |
798 | |
799 | if ( !(bp->b_flags & B_META) || (bp->b_lflags & BL_IOBUF)) { |
800 | |
801 | KERNEL_DEBUG(0xbbbbc000 | DBG_FUNC_END, bp, 0, 0, 0, 0); |
802 | return (NULL); |
803 | } |
804 | #ifdef BUF_MAKE_PRIVATE |
805 | if (bp->b_shadow_ref && bp->b_data_ref == 0 && external_storage == 0) |
806 | panic("buf_create_shadow: %p is in the private state (%d, %d)" , bp, bp->b_shadow_ref, bp->b_data_ref); |
807 | #endif |
808 | io_bp = alloc_io_buf(bp->b_vp, priv); |
809 | |
810 | io_bp->b_flags = bp->b_flags & (B_META | B_ZALLOC | B_ASYNC | B_READ | B_FUA); |
811 | io_bp->b_blkno = bp->b_blkno; |
812 | io_bp->b_lblkno = bp->b_lblkno; |
813 | |
814 | if (iodone) { |
815 | io_bp->b_transaction = arg; |
816 | io_bp->b_iodone = iodone; |
817 | io_bp->b_flags |= B_CALL; |
818 | } |
819 | if (force_copy == FALSE) { |
820 | io_bp->b_bcount = bp->b_bcount; |
821 | io_bp->b_bufsize = bp->b_bufsize; |
822 | |
823 | if (external_storage) { |
824 | io_bp->b_datap = external_storage; |
825 | #ifdef BUF_MAKE_PRIVATE |
826 | io_bp->b_data_store = NULL; |
827 | #endif |
828 | } else { |
829 | io_bp->b_datap = bp->b_datap; |
830 | #ifdef BUF_MAKE_PRIVATE |
831 | io_bp->b_data_store = bp; |
832 | #endif |
833 | } |
834 | *(buf_t *)(&io_bp->b_orig) = bp; |
835 | |
836 | lck_mtx_lock_spin(buf_mtxp); |
837 | |
838 | io_bp->b_lflags |= BL_SHADOW; |
839 | io_bp->b_shadow = bp->b_shadow; |
840 | bp->b_shadow = io_bp; |
841 | bp->b_shadow_ref++; |
842 | |
843 | #ifdef BUF_MAKE_PRIVATE |
844 | if (external_storage) |
845 | io_bp->b_lflags |= BL_EXTERNAL; |
846 | else |
847 | bp->b_data_ref++; |
848 | #endif |
849 | lck_mtx_unlock(buf_mtxp); |
850 | } else { |
851 | if (external_storage) { |
852 | #ifdef BUF_MAKE_PRIVATE |
853 | io_bp->b_lflags |= BL_EXTERNAL; |
854 | #endif |
855 | io_bp->b_bcount = bp->b_bcount; |
856 | io_bp->b_bufsize = bp->b_bufsize; |
857 | io_bp->b_datap = external_storage; |
858 | } else { |
859 | allocbuf(io_bp, bp->b_bcount); |
860 | |
861 | io_bp->b_lflags |= BL_IOBUF_ALLOC; |
862 | } |
863 | bcopy((caddr_t)bp->b_datap, (caddr_t)io_bp->b_datap, bp->b_bcount); |
864 | |
865 | #ifdef BUF_MAKE_PRIVATE |
866 | io_bp->b_data_store = NULL; |
867 | #endif |
868 | } |
869 | KERNEL_DEBUG(0xbbbbc000 | DBG_FUNC_END, bp, bp->b_shadow_ref, 0, io_bp, 0); |
870 | |
871 | return (io_bp); |
872 | } |
873 | |
874 | |
875 | #ifdef BUF_MAKE_PRIVATE |
876 | errno_t |
877 | buf_make_private(buf_t bp) |
878 | { |
879 | buf_t ds_bp; |
880 | buf_t t_bp; |
881 | struct buf my_buf; |
882 | |
883 | KERNEL_DEBUG(0xbbbbc004 | DBG_FUNC_START, bp, bp->b_shadow_ref, 0, 0, 0); |
884 | |
885 | if (bp->b_shadow_ref == 0 || bp->b_data_ref == 0 || ISSET(bp->b_lflags, BL_SHADOW)) { |
886 | |
887 | KERNEL_DEBUG(0xbbbbc004 | DBG_FUNC_END, bp, bp->b_shadow_ref, 0, EINVAL, 0); |
888 | return (EINVAL); |
889 | } |
890 | my_buf.b_flags = B_META; |
891 | my_buf.b_datap = (uintptr_t)NULL; |
892 | allocbuf(&my_buf, bp->b_bcount); |
893 | |
894 | bcopy((caddr_t)bp->b_datap, (caddr_t)my_buf.b_datap, bp->b_bcount); |
895 | |
896 | lck_mtx_lock_spin(buf_mtxp); |
897 | |
898 | for (t_bp = bp->b_shadow; t_bp; t_bp = t_bp->b_shadow) { |
899 | if ( !ISSET(bp->b_lflags, BL_EXTERNAL)) |
900 | break; |
901 | } |
902 | ds_bp = t_bp; |
903 | |
904 | if (ds_bp == NULL && bp->b_data_ref) |
905 | panic("buf_make_private: b_data_ref != 0 && ds_bp == NULL" ); |
906 | |
907 | if (ds_bp && (bp->b_data_ref == 0 || bp->b_shadow_ref == 0)) |
908 | panic("buf_make_private: ref_count == 0 && ds_bp != NULL" ); |
909 | |
910 | if (ds_bp == NULL) { |
911 | lck_mtx_unlock(buf_mtxp); |
912 | |
913 | buf_free_meta_store(&my_buf); |
914 | |
915 | KERNEL_DEBUG(0xbbbbc004 | DBG_FUNC_END, bp, bp->b_shadow_ref, 0, EINVAL, 0); |
916 | return (EINVAL); |
917 | } |
918 | for (t_bp = bp->b_shadow; t_bp; t_bp = t_bp->b_shadow) { |
919 | if ( !ISSET(t_bp->b_lflags, BL_EXTERNAL)) |
920 | t_bp->b_data_store = ds_bp; |
921 | } |
922 | ds_bp->b_data_ref = bp->b_data_ref; |
923 | |
924 | bp->b_data_ref = 0; |
925 | bp->b_datap = my_buf.b_datap; |
926 | |
927 | lck_mtx_unlock(buf_mtxp); |
928 | |
929 | KERNEL_DEBUG(0xbbbbc004 | DBG_FUNC_END, bp, bp->b_shadow_ref, 0, 0, 0); |
930 | return (0); |
931 | } |
932 | #endif |
933 | |
934 | |
935 | void |
936 | buf_setfilter(buf_t bp, void (*filter)(buf_t, void *), void *transaction, |
937 | void (**old_iodone)(buf_t, void *), void **old_transaction) |
938 | { |
939 | assert(ISSET(bp->b_lflags, BL_BUSY)); |
940 | |
941 | if (old_iodone) |
942 | *old_iodone = bp->b_iodone; |
943 | if (old_transaction) |
944 | *old_transaction = bp->b_transaction; |
945 | |
946 | bp->b_transaction = transaction; |
947 | bp->b_iodone = filter; |
948 | if (filter) |
949 | bp->b_flags |= B_FILTER; |
950 | else |
951 | bp->b_flags &= ~B_FILTER; |
952 | } |
953 | |
954 | |
955 | daddr64_t |
956 | buf_blkno(buf_t bp) { |
957 | |
958 | return (bp->b_blkno); |
959 | } |
960 | |
961 | daddr64_t |
962 | buf_lblkno(buf_t bp) { |
963 | |
964 | return (bp->b_lblkno); |
965 | } |
966 | |
967 | void |
968 | buf_setblkno(buf_t bp, daddr64_t blkno) { |
969 | |
970 | bp->b_blkno = blkno; |
971 | } |
972 | |
973 | void |
974 | buf_setlblkno(buf_t bp, daddr64_t lblkno) { |
975 | |
976 | bp->b_lblkno = lblkno; |
977 | } |
978 | |
979 | dev_t |
980 | buf_device(buf_t bp) { |
981 | |
982 | return (bp->b_dev); |
983 | } |
984 | |
985 | errno_t |
986 | buf_setdevice(buf_t bp, vnode_t vp) { |
987 | |
988 | if ((vp->v_type != VBLK) && (vp->v_type != VCHR)) |
989 | return EINVAL; |
990 | bp->b_dev = vp->v_rdev; |
991 | |
992 | return 0; |
993 | } |
994 | |
995 | |
996 | void * |
997 | buf_drvdata(buf_t bp) { |
998 | |
999 | return (bp->b_drvdata); |
1000 | } |
1001 | |
1002 | void |
1003 | buf_setdrvdata(buf_t bp, void *drvdata) { |
1004 | |
1005 | bp->b_drvdata = drvdata; |
1006 | } |
1007 | |
1008 | void * |
1009 | buf_fsprivate(buf_t bp) { |
1010 | |
1011 | return (bp->b_fsprivate); |
1012 | } |
1013 | |
1014 | void |
1015 | buf_setfsprivate(buf_t bp, void *fsprivate) { |
1016 | |
1017 | bp->b_fsprivate = fsprivate; |
1018 | } |
1019 | |
1020 | kauth_cred_t |
1021 | buf_rcred(buf_t bp) { |
1022 | |
1023 | return (bp->b_rcred); |
1024 | } |
1025 | |
1026 | kauth_cred_t |
1027 | buf_wcred(buf_t bp) { |
1028 | |
1029 | return (bp->b_wcred); |
1030 | } |
1031 | |
1032 | void * |
1033 | buf_upl(buf_t bp) { |
1034 | |
1035 | return (bp->b_upl); |
1036 | } |
1037 | |
1038 | uint32_t |
1039 | buf_uploffset(buf_t bp) { |
1040 | |
1041 | return ((uint32_t)(bp->b_uploffset)); |
1042 | } |
1043 | |
1044 | proc_t |
1045 | buf_proc(buf_t bp) { |
1046 | |
1047 | return (bp->b_proc); |
1048 | } |
1049 | |
1050 | |
1051 | errno_t |
1052 | buf_map(buf_t bp, caddr_t *io_addr) |
1053 | { |
1054 | buf_t real_bp; |
1055 | vm_offset_t vaddr; |
1056 | kern_return_t kret; |
1057 | |
1058 | if ( !(bp->b_flags & B_CLUSTER)) { |
1059 | *io_addr = (caddr_t)bp->b_datap; |
1060 | return (0); |
1061 | } |
1062 | real_bp = (buf_t)(bp->b_real_bp); |
1063 | |
1064 | if (real_bp && real_bp->b_datap) { |
1065 | /* |
1066 | * b_real_bp is only valid if B_CLUSTER is SET |
1067 | * if it's non-zero, than someone did a cluster_bp call |
1068 | * if the backing physical pages were already mapped |
1069 | * in before the call to cluster_bp (non-zero b_datap), |
1070 | * than we just use that mapping |
1071 | */ |
1072 | *io_addr = (caddr_t)real_bp->b_datap; |
1073 | return (0); |
1074 | } |
1075 | kret = ubc_upl_map(bp->b_upl, &vaddr); /* Map it in */ |
1076 | |
1077 | if (kret != KERN_SUCCESS) { |
1078 | *io_addr = NULL; |
1079 | |
1080 | return(ENOMEM); |
1081 | } |
1082 | vaddr += bp->b_uploffset; |
1083 | |
1084 | *io_addr = (caddr_t)vaddr; |
1085 | |
1086 | return (0); |
1087 | } |
1088 | |
1089 | errno_t |
1090 | buf_unmap(buf_t bp) |
1091 | { |
1092 | buf_t real_bp; |
1093 | kern_return_t kret; |
1094 | |
1095 | if ( !(bp->b_flags & B_CLUSTER)) |
1096 | return (0); |
1097 | /* |
1098 | * see buf_map for the explanation |
1099 | */ |
1100 | real_bp = (buf_t)(bp->b_real_bp); |
1101 | |
1102 | if (real_bp && real_bp->b_datap) |
1103 | return (0); |
1104 | |
1105 | if ((bp->b_lflags & BL_IOBUF) && |
1106 | ((bp->b_flags & (B_PAGEIO | B_READ)) != (B_PAGEIO | B_READ))) { |
1107 | /* |
1108 | * ignore pageins... the 'right' thing will |
1109 | * happen due to the way we handle speculative |
1110 | * clusters... |
1111 | * |
1112 | * when we commit these pages, we'll hit |
1113 | * it with UPL_COMMIT_INACTIVE which |
1114 | * will clear the reference bit that got |
1115 | * turned on when we touched the mapping |
1116 | */ |
1117 | bp->b_flags |= B_AGE; |
1118 | } |
1119 | kret = ubc_upl_unmap(bp->b_upl); |
1120 | |
1121 | if (kret != KERN_SUCCESS) |
1122 | return (EINVAL); |
1123 | return (0); |
1124 | } |
1125 | |
1126 | |
1127 | void |
1128 | buf_clear(buf_t bp) { |
1129 | caddr_t baddr; |
1130 | |
1131 | if (buf_map(bp, &baddr) == 0) { |
1132 | bzero(baddr, bp->b_bcount); |
1133 | buf_unmap(bp); |
1134 | } |
1135 | bp->b_resid = 0; |
1136 | } |
1137 | |
1138 | /* |
1139 | * Read or write a buffer that is not contiguous on disk. |
1140 | * buffer is marked done/error at the conclusion |
1141 | */ |
1142 | static int |
1143 | buf_strategy_fragmented(vnode_t devvp, buf_t bp, off_t f_offset, size_t contig_bytes) |
1144 | { |
1145 | vnode_t vp = buf_vnode(bp); |
1146 | buf_t io_bp; /* For reading or writing a single block */ |
1147 | int io_direction; |
1148 | int io_resid; |
1149 | size_t io_contig_bytes; |
1150 | daddr64_t io_blkno; |
1151 | int error = 0; |
1152 | int bmap_flags; |
1153 | |
1154 | /* |
1155 | * save our starting point... the bp was already mapped |
1156 | * in buf_strategy before we got called |
1157 | * no sense doing it again. |
1158 | */ |
1159 | io_blkno = bp->b_blkno; |
1160 | /* |
1161 | * Make sure we redo this mapping for the next I/O |
1162 | * i.e. this can never be a 'permanent' mapping |
1163 | */ |
1164 | bp->b_blkno = bp->b_lblkno; |
1165 | |
1166 | /* |
1167 | * Get an io buffer to do the deblocking |
1168 | */ |
1169 | io_bp = alloc_io_buf(devvp, 0); |
1170 | |
1171 | io_bp->b_lblkno = bp->b_lblkno; |
1172 | io_bp->b_datap = bp->b_datap; |
1173 | io_resid = bp->b_bcount; |
1174 | io_direction = bp->b_flags & B_READ; |
1175 | io_contig_bytes = contig_bytes; |
1176 | |
1177 | if (bp->b_flags & B_READ) |
1178 | bmap_flags = VNODE_READ; |
1179 | else |
1180 | bmap_flags = VNODE_WRITE; |
1181 | |
1182 | for (;;) { |
1183 | if (io_blkno == -1) |
1184 | /* |
1185 | * this is unexepected, but we'll allow for it |
1186 | */ |
1187 | bzero((caddr_t)io_bp->b_datap, (int)io_contig_bytes); |
1188 | else { |
1189 | io_bp->b_bcount = io_contig_bytes; |
1190 | io_bp->b_bufsize = io_contig_bytes; |
1191 | io_bp->b_resid = io_contig_bytes; |
1192 | io_bp->b_blkno = io_blkno; |
1193 | |
1194 | buf_reset(io_bp, io_direction); |
1195 | |
1196 | /* |
1197 | * Call the device to do the I/O and wait for it. Make sure the appropriate party is charged for write |
1198 | */ |
1199 | |
1200 | if (!ISSET(bp->b_flags, B_READ)) |
1201 | OSAddAtomic(1, &devvp->v_numoutput); |
1202 | |
1203 | if ((error = VNOP_STRATEGY(io_bp))) |
1204 | break; |
1205 | if ((error = (int)buf_biowait(io_bp))) |
1206 | break; |
1207 | if (io_bp->b_resid) { |
1208 | io_resid -= (io_contig_bytes - io_bp->b_resid); |
1209 | break; |
1210 | } |
1211 | } |
1212 | if ((io_resid -= io_contig_bytes) == 0) |
1213 | break; |
1214 | f_offset += io_contig_bytes; |
1215 | io_bp->b_datap += io_contig_bytes; |
1216 | |
1217 | /* |
1218 | * Map the current position to a physical block number |
1219 | */ |
1220 | if ((error = VNOP_BLOCKMAP(vp, f_offset, io_resid, &io_blkno, &io_contig_bytes, NULL, bmap_flags, NULL))) |
1221 | break; |
1222 | } |
1223 | buf_free(io_bp); |
1224 | |
1225 | if (error) |
1226 | buf_seterror(bp, error); |
1227 | bp->b_resid = io_resid; |
1228 | /* |
1229 | * This I/O is now complete |
1230 | */ |
1231 | buf_biodone(bp); |
1232 | |
1233 | return error; |
1234 | } |
1235 | |
1236 | |
1237 | /* |
1238 | * struct vnop_strategy_args { |
1239 | * struct buf *a_bp; |
1240 | * } *ap; |
1241 | */ |
1242 | errno_t |
1243 | buf_strategy(vnode_t devvp, void *ap) |
1244 | { |
1245 | buf_t bp = ((struct vnop_strategy_args *)ap)->a_bp; |
1246 | vnode_t vp = bp->b_vp; |
1247 | int bmap_flags; |
1248 | errno_t error; |
1249 | #if CONFIG_DTRACE |
1250 | int dtrace_io_start_flag = 0; /* We only want to trip the io:::start |
1251 | * probe once, with the true physical |
1252 | * block in place (b_blkno) |
1253 | */ |
1254 | |
1255 | #endif |
1256 | |
1257 | if (vp == NULL || vp->v_type == VCHR || vp->v_type == VBLK) |
1258 | panic("buf_strategy: b_vp == NULL || vtype == VCHR | VBLK\n" ); |
1259 | /* |
1260 | * associate the physical device with |
1261 | * with this buf_t even if we don't |
1262 | * end up issuing the I/O... |
1263 | */ |
1264 | bp->b_dev = devvp->v_rdev; |
1265 | |
1266 | if (bp->b_flags & B_READ) |
1267 | bmap_flags = VNODE_READ; |
1268 | else |
1269 | bmap_flags = VNODE_WRITE; |
1270 | |
1271 | if ( !(bp->b_flags & B_CLUSTER)) { |
1272 | |
1273 | if ( (bp->b_upl) ) { |
1274 | /* |
1275 | * we have a UPL associated with this bp |
1276 | * go through cluster_bp which knows how |
1277 | * to deal with filesystem block sizes |
1278 | * that aren't equal to the page size |
1279 | */ |
1280 | DTRACE_IO1(start, buf_t, bp); |
1281 | return (cluster_bp(bp)); |
1282 | } |
1283 | if (bp->b_blkno == bp->b_lblkno) { |
1284 | off_t f_offset; |
1285 | size_t contig_bytes; |
1286 | |
1287 | if ((error = VNOP_BLKTOOFF(vp, bp->b_lblkno, &f_offset))) { |
1288 | DTRACE_IO1(start, buf_t, bp); |
1289 | buf_seterror(bp, error); |
1290 | buf_biodone(bp); |
1291 | |
1292 | return (error); |
1293 | } |
1294 | |
1295 | if ((error = VNOP_BLOCKMAP(vp, f_offset, bp->b_bcount, &bp->b_blkno, &contig_bytes, NULL, bmap_flags, NULL))) { |
1296 | DTRACE_IO1(start, buf_t, bp); |
1297 | buf_seterror(bp, error); |
1298 | buf_biodone(bp); |
1299 | |
1300 | return (error); |
1301 | } |
1302 | |
1303 | DTRACE_IO1(start, buf_t, bp); |
1304 | #if CONFIG_DTRACE |
1305 | dtrace_io_start_flag = 1; |
1306 | #endif /* CONFIG_DTRACE */ |
1307 | |
1308 | if ((bp->b_blkno == -1) || (contig_bytes == 0)) { |
1309 | /* Set block number to force biodone later */ |
1310 | bp->b_blkno = -1; |
1311 | buf_clear(bp); |
1312 | } |
1313 | else if ((long)contig_bytes < bp->b_bcount) { |
1314 | return (buf_strategy_fragmented(devvp, bp, f_offset, contig_bytes)); |
1315 | } |
1316 | } |
1317 | |
1318 | #if CONFIG_DTRACE |
1319 | if (dtrace_io_start_flag == 0) { |
1320 | DTRACE_IO1(start, buf_t, bp); |
1321 | dtrace_io_start_flag = 1; |
1322 | } |
1323 | #endif /* CONFIG_DTRACE */ |
1324 | |
1325 | if (bp->b_blkno == -1) { |
1326 | buf_biodone(bp); |
1327 | return (0); |
1328 | } |
1329 | } |
1330 | |
1331 | #if CONFIG_DTRACE |
1332 | if (dtrace_io_start_flag == 0) |
1333 | DTRACE_IO1(start, buf_t, bp); |
1334 | #endif /* CONFIG_DTRACE */ |
1335 | |
1336 | #if CONFIG_PROTECT |
1337 | /* Capture f_offset in the bufattr*/ |
1338 | cpx_t cpx = bufattr_cpx(buf_attr(bp)); |
1339 | if (cpx) { |
1340 | /* No need to go here for older EAs */ |
1341 | if(cpx_use_offset_for_iv(cpx) && !cpx_synthetic_offset_for_iv(cpx)) { |
1342 | off_t f_offset; |
1343 | if ((error = VNOP_BLKTOOFF(bp->b_vp, bp->b_lblkno, &f_offset))) |
1344 | return error; |
1345 | |
1346 | /* |
1347 | * Attach the file offset to this buffer. The |
1348 | * bufattr attributes will be passed down the stack |
1349 | * until they reach the storage driver (whether |
1350 | * IOFlashStorage, ASP, or IONVMe). The driver |
1351 | * will retain the offset in a local variable when it |
1352 | * issues its I/Os to the NAND controller. |
1353 | * |
1354 | * Note that LwVM may end up splitting this I/O |
1355 | * into sub-I/Os if it crosses a chunk boundary. In this |
1356 | * case, LwVM will update this field when it dispatches |
1357 | * each I/O to IOFlashStorage. But from our perspective |
1358 | * we have only issued a single I/O. |
1359 | * |
1360 | * In the case of APFS we do not bounce through another |
1361 | * intermediate layer (such as CoreStorage). APFS will |
1362 | * issue the I/Os directly to the block device / IOMedia |
1363 | * via buf_strategy on the specfs node. |
1364 | */ |
1365 | buf_setcpoff(bp, f_offset); |
1366 | CP_DEBUG((CPDBG_OFFSET_IO | DBG_FUNC_NONE), (uint32_t) f_offset, (uint32_t) bp->b_lblkno, (uint32_t) bp->b_blkno, (uint32_t) bp->b_bcount, 0); |
1367 | } |
1368 | } |
1369 | #endif |
1370 | |
1371 | /* |
1372 | * we can issue the I/O because... |
1373 | * either B_CLUSTER is set which |
1374 | * means that the I/O is properly set |
1375 | * up to be a multiple of the page size, or |
1376 | * we were able to successfully set up the |
1377 | * physical block mapping |
1378 | */ |
1379 | error = VOCALL(devvp->v_op, VOFFSET(vnop_strategy), ap); |
1380 | DTRACE_FSINFO(strategy, vnode_t, vp); |
1381 | return (error); |
1382 | } |
1383 | |
1384 | |
1385 | |
1386 | buf_t |
1387 | buf_alloc(vnode_t vp) |
1388 | { |
1389 | return(alloc_io_buf(vp, is_vm_privileged())); |
1390 | } |
1391 | |
1392 | void |
1393 | buf_free(buf_t bp) { |
1394 | |
1395 | free_io_buf(bp); |
1396 | } |
1397 | |
1398 | |
1399 | /* |
1400 | * iterate buffers for the specified vp. |
1401 | * if BUF_SCAN_DIRTY is set, do the dirty list |
1402 | * if BUF_SCAN_CLEAN is set, do the clean list |
1403 | * if neither flag is set, default to BUF_SCAN_DIRTY |
1404 | * if BUF_NOTIFY_BUSY is set, call the callout function using a NULL bp for busy pages |
1405 | */ |
1406 | |
1407 | struct buf_iterate_info_t { |
1408 | int flag; |
1409 | struct buflists *listhead; |
1410 | }; |
1411 | |
1412 | void |
1413 | buf_iterate(vnode_t vp, int (*callout)(buf_t, void *), int flags, void *arg) |
1414 | { |
1415 | buf_t bp; |
1416 | int retval; |
1417 | struct buflists local_iterblkhd; |
1418 | int lock_flags = BAC_NOWAIT | BAC_REMOVE; |
1419 | int notify_busy = flags & BUF_NOTIFY_BUSY; |
1420 | struct buf_iterate_info_t list[2]; |
1421 | int num_lists, i; |
1422 | |
1423 | if (flags & BUF_SKIP_LOCKED) |
1424 | lock_flags |= BAC_SKIP_LOCKED; |
1425 | if (flags & BUF_SKIP_NONLOCKED) |
1426 | lock_flags |= BAC_SKIP_NONLOCKED; |
1427 | |
1428 | if ( !(flags & (BUF_SCAN_DIRTY | BUF_SCAN_CLEAN))) |
1429 | flags |= BUF_SCAN_DIRTY; |
1430 | |
1431 | num_lists = 0; |
1432 | |
1433 | if (flags & BUF_SCAN_DIRTY) { |
1434 | list[num_lists].flag = VBI_DIRTY; |
1435 | list[num_lists].listhead = &vp->v_dirtyblkhd; |
1436 | num_lists++; |
1437 | } |
1438 | if (flags & BUF_SCAN_CLEAN) { |
1439 | list[num_lists].flag = VBI_CLEAN; |
1440 | list[num_lists].listhead = &vp->v_cleanblkhd; |
1441 | num_lists++; |
1442 | } |
1443 | |
1444 | for (i = 0; i < num_lists; i++) { |
1445 | lck_mtx_lock(buf_mtxp); |
1446 | |
1447 | if (buf_iterprepare(vp, &local_iterblkhd, list[i].flag)) { |
1448 | lck_mtx_unlock(buf_mtxp); |
1449 | continue; |
1450 | } |
1451 | while (!LIST_EMPTY(&local_iterblkhd)) { |
1452 | bp = LIST_FIRST(&local_iterblkhd); |
1453 | LIST_REMOVE(bp, b_vnbufs); |
1454 | LIST_INSERT_HEAD(list[i].listhead, bp, b_vnbufs); |
1455 | |
1456 | if (buf_acquire_locked(bp, lock_flags, 0, 0)) { |
1457 | if (notify_busy) { |
1458 | bp = NULL; |
1459 | } else { |
1460 | continue; |
1461 | } |
1462 | } |
1463 | |
1464 | lck_mtx_unlock(buf_mtxp); |
1465 | |
1466 | retval = callout(bp, arg); |
1467 | |
1468 | switch (retval) { |
1469 | case BUF_RETURNED: |
1470 | if (bp) |
1471 | buf_brelse(bp); |
1472 | break; |
1473 | case BUF_CLAIMED: |
1474 | break; |
1475 | case BUF_RETURNED_DONE: |
1476 | if (bp) |
1477 | buf_brelse(bp); |
1478 | lck_mtx_lock(buf_mtxp); |
1479 | goto out; |
1480 | case BUF_CLAIMED_DONE: |
1481 | lck_mtx_lock(buf_mtxp); |
1482 | goto out; |
1483 | } |
1484 | lck_mtx_lock(buf_mtxp); |
1485 | } /* while list has more nodes */ |
1486 | out: |
1487 | buf_itercomplete(vp, &local_iterblkhd, list[i].flag); |
1488 | lck_mtx_unlock(buf_mtxp); |
1489 | } /* for each list */ |
1490 | } /* buf_iterate */ |
1491 | |
1492 | |
1493 | /* |
1494 | * Flush out and invalidate all buffers associated with a vnode. |
1495 | */ |
1496 | int |
1497 | buf_invalidateblks(vnode_t vp, int flags, int slpflag, int slptimeo) |
1498 | { |
1499 | buf_t bp; |
1500 | int aflags; |
1501 | int error = 0; |
1502 | int must_rescan = 1; |
1503 | struct buflists local_iterblkhd; |
1504 | |
1505 | |
1506 | if (LIST_EMPTY(&vp->v_cleanblkhd) && LIST_EMPTY(&vp->v_dirtyblkhd)) |
1507 | return (0); |
1508 | |
1509 | lck_mtx_lock(buf_mtxp); |
1510 | |
1511 | for (;;) { |
1512 | if (must_rescan == 0) |
1513 | /* |
1514 | * the lists may not be empty, but all that's left at this |
1515 | * point are metadata or B_LOCKED buffers which are being |
1516 | * skipped... we know this because we made it through both |
1517 | * the clean and dirty lists without dropping buf_mtxp... |
1518 | * each time we drop buf_mtxp we bump "must_rescan" |
1519 | */ |
1520 | break; |
1521 | if (LIST_EMPTY(&vp->v_cleanblkhd) && LIST_EMPTY(&vp->v_dirtyblkhd)) |
1522 | break; |
1523 | must_rescan = 0; |
1524 | /* |
1525 | * iterate the clean list |
1526 | */ |
1527 | if (buf_iterprepare(vp, &local_iterblkhd, VBI_CLEAN)) { |
1528 | goto try_dirty_list; |
1529 | } |
1530 | while (!LIST_EMPTY(&local_iterblkhd)) { |
1531 | |
1532 | bp = LIST_FIRST(&local_iterblkhd); |
1533 | |
1534 | LIST_REMOVE(bp, b_vnbufs); |
1535 | LIST_INSERT_HEAD(&vp->v_cleanblkhd, bp, b_vnbufs); |
1536 | |
1537 | /* |
1538 | * some filesystems distinguish meta data blocks with a negative logical block # |
1539 | */ |
1540 | if ((flags & BUF_SKIP_META) && (bp->b_lblkno < 0 || ISSET(bp->b_flags, B_META))) |
1541 | continue; |
1542 | |
1543 | aflags = BAC_REMOVE; |
1544 | |
1545 | if ( !(flags & BUF_INVALIDATE_LOCKED) ) |
1546 | aflags |= BAC_SKIP_LOCKED; |
1547 | |
1548 | if ( (error = (int)buf_acquire_locked(bp, aflags, slpflag, slptimeo)) ) { |
1549 | if (error == EDEADLK) |
1550 | /* |
1551 | * this buffer was marked B_LOCKED... |
1552 | * we didn't drop buf_mtxp, so we |
1553 | * we don't need to rescan |
1554 | */ |
1555 | continue; |
1556 | if (error == EAGAIN) { |
1557 | /* |
1558 | * found a busy buffer... we blocked and |
1559 | * dropped buf_mtxp, so we're going to |
1560 | * need to rescan after this pass is completed |
1561 | */ |
1562 | must_rescan++; |
1563 | continue; |
1564 | } |
1565 | /* |
1566 | * got some kind of 'real' error out of the msleep |
1567 | * in buf_acquire_locked, terminate the scan and return the error |
1568 | */ |
1569 | buf_itercomplete(vp, &local_iterblkhd, VBI_CLEAN); |
1570 | |
1571 | lck_mtx_unlock(buf_mtxp); |
1572 | return (error); |
1573 | } |
1574 | lck_mtx_unlock(buf_mtxp); |
1575 | |
1576 | if (bp->b_flags & B_LOCKED) |
1577 | KERNEL_DEBUG(0xbbbbc038, bp, 0, 0, 0, 0); |
1578 | |
1579 | CLR(bp->b_flags, B_LOCKED); |
1580 | SET(bp->b_flags, B_INVAL); |
1581 | buf_brelse(bp); |
1582 | |
1583 | lck_mtx_lock(buf_mtxp); |
1584 | |
1585 | /* |
1586 | * by dropping buf_mtxp, we allow new |
1587 | * buffers to be added to the vnode list(s) |
1588 | * we'll have to rescan at least once more |
1589 | * if the queues aren't empty |
1590 | */ |
1591 | must_rescan++; |
1592 | } |
1593 | buf_itercomplete(vp, &local_iterblkhd, VBI_CLEAN); |
1594 | |
1595 | try_dirty_list: |
1596 | /* |
1597 | * Now iterate on dirty blks |
1598 | */ |
1599 | if (buf_iterprepare(vp, &local_iterblkhd, VBI_DIRTY)) { |
1600 | continue; |
1601 | } |
1602 | while (!LIST_EMPTY(&local_iterblkhd)) { |
1603 | bp = LIST_FIRST(&local_iterblkhd); |
1604 | |
1605 | LIST_REMOVE(bp, b_vnbufs); |
1606 | LIST_INSERT_HEAD(&vp->v_dirtyblkhd, bp, b_vnbufs); |
1607 | |
1608 | /* |
1609 | * some filesystems distinguish meta data blocks with a negative logical block # |
1610 | */ |
1611 | if ((flags & BUF_SKIP_META) && (bp->b_lblkno < 0 || ISSET(bp->b_flags, B_META))) |
1612 | continue; |
1613 | |
1614 | aflags = BAC_REMOVE; |
1615 | |
1616 | if ( !(flags & BUF_INVALIDATE_LOCKED) ) |
1617 | aflags |= BAC_SKIP_LOCKED; |
1618 | |
1619 | if ( (error = (int)buf_acquire_locked(bp, aflags, slpflag, slptimeo)) ) { |
1620 | if (error == EDEADLK) |
1621 | /* |
1622 | * this buffer was marked B_LOCKED... |
1623 | * we didn't drop buf_mtxp, so we |
1624 | * we don't need to rescan |
1625 | */ |
1626 | continue; |
1627 | if (error == EAGAIN) { |
1628 | /* |
1629 | * found a busy buffer... we blocked and |
1630 | * dropped buf_mtxp, so we're going to |
1631 | * need to rescan after this pass is completed |
1632 | */ |
1633 | must_rescan++; |
1634 | continue; |
1635 | } |
1636 | /* |
1637 | * got some kind of 'real' error out of the msleep |
1638 | * in buf_acquire_locked, terminate the scan and return the error |
1639 | */ |
1640 | buf_itercomplete(vp, &local_iterblkhd, VBI_DIRTY); |
1641 | |
1642 | lck_mtx_unlock(buf_mtxp); |
1643 | return (error); |
1644 | } |
1645 | lck_mtx_unlock(buf_mtxp); |
1646 | |
1647 | if (bp->b_flags & B_LOCKED) |
1648 | KERNEL_DEBUG(0xbbbbc038, bp, 0, 0, 1, 0); |
1649 | |
1650 | CLR(bp->b_flags, B_LOCKED); |
1651 | SET(bp->b_flags, B_INVAL); |
1652 | |
1653 | if (ISSET(bp->b_flags, B_DELWRI) && (flags & BUF_WRITE_DATA)) |
1654 | (void) VNOP_BWRITE(bp); |
1655 | else |
1656 | buf_brelse(bp); |
1657 | |
1658 | lck_mtx_lock(buf_mtxp); |
1659 | /* |
1660 | * by dropping buf_mtxp, we allow new |
1661 | * buffers to be added to the vnode list(s) |
1662 | * we'll have to rescan at least once more |
1663 | * if the queues aren't empty |
1664 | */ |
1665 | must_rescan++; |
1666 | } |
1667 | buf_itercomplete(vp, &local_iterblkhd, VBI_DIRTY); |
1668 | } |
1669 | lck_mtx_unlock(buf_mtxp); |
1670 | |
1671 | return (0); |
1672 | } |
1673 | |
1674 | void |
1675 | buf_flushdirtyblks(vnode_t vp, int wait, int flags, const char *msg) { |
1676 | |
1677 | (void) buf_flushdirtyblks_skipinfo(vp, wait, flags, msg); |
1678 | return; |
1679 | } |
1680 | |
1681 | int |
1682 | buf_flushdirtyblks_skipinfo(vnode_t vp, int wait, int flags, const char *msg) { |
1683 | buf_t bp; |
1684 | int writes_issued = 0; |
1685 | errno_t error; |
1686 | int busy = 0; |
1687 | struct buflists local_iterblkhd; |
1688 | int lock_flags = BAC_NOWAIT | BAC_REMOVE; |
1689 | int any_locked = 0; |
1690 | |
1691 | if (flags & BUF_SKIP_LOCKED) |
1692 | lock_flags |= BAC_SKIP_LOCKED; |
1693 | if (flags & BUF_SKIP_NONLOCKED) |
1694 | lock_flags |= BAC_SKIP_NONLOCKED; |
1695 | loop: |
1696 | lck_mtx_lock(buf_mtxp); |
1697 | |
1698 | if (buf_iterprepare(vp, &local_iterblkhd, VBI_DIRTY) == 0) { |
1699 | while (!LIST_EMPTY(&local_iterblkhd)) { |
1700 | bp = LIST_FIRST(&local_iterblkhd); |
1701 | LIST_REMOVE(bp, b_vnbufs); |
1702 | LIST_INSERT_HEAD(&vp->v_dirtyblkhd, bp, b_vnbufs); |
1703 | |
1704 | if ((error = buf_acquire_locked(bp, lock_flags, 0, 0)) == EBUSY) { |
1705 | busy++; |
1706 | } |
1707 | if (error) { |
1708 | /* |
1709 | * If we passed in BUF_SKIP_LOCKED or BUF_SKIP_NONLOCKED, |
1710 | * we may want to do somethign differently if a locked or unlocked |
1711 | * buffer was encountered (depending on the arg specified). |
1712 | * In this case, we know that one of those two was set, and the |
1713 | * buf acquisition failed above. |
1714 | * |
1715 | * If it failed with EDEADLK, then save state which can be emitted |
1716 | * later on to the caller. Most callers should not care. |
1717 | */ |
1718 | if (error == EDEADLK) { |
1719 | any_locked++; |
1720 | } |
1721 | continue; |
1722 | } |
1723 | lck_mtx_unlock(buf_mtxp); |
1724 | |
1725 | bp->b_flags &= ~B_LOCKED; |
1726 | |
1727 | /* |
1728 | * Wait for I/O associated with indirect blocks to complete, |
1729 | * since there is no way to quickly wait for them below. |
1730 | */ |
1731 | if ((bp->b_vp == vp) || (wait == 0)) |
1732 | (void) buf_bawrite(bp); |
1733 | else |
1734 | (void) VNOP_BWRITE(bp); |
1735 | writes_issued++; |
1736 | |
1737 | lck_mtx_lock(buf_mtxp); |
1738 | } |
1739 | buf_itercomplete(vp, &local_iterblkhd, VBI_DIRTY); |
1740 | } |
1741 | lck_mtx_unlock(buf_mtxp); |
1742 | |
1743 | if (wait) { |
1744 | (void)vnode_waitforwrites(vp, 0, 0, 0, msg); |
1745 | |
1746 | if (vp->v_dirtyblkhd.lh_first && busy) { |
1747 | /* |
1748 | * we had one or more BUSY buffers on |
1749 | * the dirtyblock list... most likely |
1750 | * these are due to delayed writes that |
1751 | * were moved to the bclean queue but |
1752 | * have not yet been 'written'. |
1753 | * if we issued some writes on the |
1754 | * previous pass, we try again immediately |
1755 | * if we didn't, we'll sleep for some time |
1756 | * to allow the state to change... |
1757 | */ |
1758 | if (writes_issued == 0) { |
1759 | (void)tsleep((caddr_t)&vp->v_numoutput, |
1760 | PRIBIO + 1, "vnode_flushdirtyblks" , hz/20); |
1761 | } |
1762 | writes_issued = 0; |
1763 | busy = 0; |
1764 | |
1765 | goto loop; |
1766 | } |
1767 | } |
1768 | |
1769 | return any_locked; |
1770 | } |
1771 | |
1772 | |
1773 | /* |
1774 | * called with buf_mtxp held... |
1775 | * this lock protects the queue manipulation |
1776 | */ |
1777 | static int |
1778 | buf_iterprepare(vnode_t vp, struct buflists *iterheadp, int flags) |
1779 | { |
1780 | struct buflists * listheadp; |
1781 | |
1782 | if (flags & VBI_DIRTY) |
1783 | listheadp = &vp->v_dirtyblkhd; |
1784 | else |
1785 | listheadp = &vp->v_cleanblkhd; |
1786 | |
1787 | while (vp->v_iterblkflags & VBI_ITER) { |
1788 | vp->v_iterblkflags |= VBI_ITERWANT; |
1789 | msleep(&vp->v_iterblkflags, buf_mtxp, 0, "buf_iterprepare" , NULL); |
1790 | } |
1791 | if (LIST_EMPTY(listheadp)) { |
1792 | LIST_INIT(iterheadp); |
1793 | return(EINVAL); |
1794 | } |
1795 | vp->v_iterblkflags |= VBI_ITER; |
1796 | |
1797 | iterheadp->lh_first = listheadp->lh_first; |
1798 | listheadp->lh_first->b_vnbufs.le_prev = &iterheadp->lh_first; |
1799 | LIST_INIT(listheadp); |
1800 | |
1801 | return(0); |
1802 | } |
1803 | |
1804 | /* |
1805 | * called with buf_mtxp held... |
1806 | * this lock protects the queue manipulation |
1807 | */ |
1808 | static void |
1809 | buf_itercomplete(vnode_t vp, struct buflists *iterheadp, int flags) |
1810 | { |
1811 | struct buflists * listheadp; |
1812 | buf_t bp; |
1813 | |
1814 | if (flags & VBI_DIRTY) |
1815 | listheadp = &vp->v_dirtyblkhd; |
1816 | else |
1817 | listheadp = &vp->v_cleanblkhd; |
1818 | |
1819 | while (!LIST_EMPTY(iterheadp)) { |
1820 | bp = LIST_FIRST(iterheadp); |
1821 | LIST_REMOVE(bp, b_vnbufs); |
1822 | LIST_INSERT_HEAD(listheadp, bp, b_vnbufs); |
1823 | } |
1824 | vp->v_iterblkflags &= ~VBI_ITER; |
1825 | |
1826 | if (vp->v_iterblkflags & VBI_ITERWANT) { |
1827 | vp->v_iterblkflags &= ~VBI_ITERWANT; |
1828 | wakeup(&vp->v_iterblkflags); |
1829 | } |
1830 | } |
1831 | |
1832 | |
1833 | static void |
1834 | bremfree_locked(buf_t bp) |
1835 | { |
1836 | struct bqueues *dp = NULL; |
1837 | int whichq; |
1838 | |
1839 | whichq = bp->b_whichq; |
1840 | |
1841 | if (whichq == -1) { |
1842 | if (bp->b_shadow_ref == 0) |
1843 | panic("bremfree_locked: %p not on freelist" , bp); |
1844 | /* |
1845 | * there are clones pointing to 'bp'... |
1846 | * therefore, it was not put on a freelist |
1847 | * when buf_brelse was last called on 'bp' |
1848 | */ |
1849 | return; |
1850 | } |
1851 | /* |
1852 | * We only calculate the head of the freelist when removing |
1853 | * the last element of the list as that is the only time that |
1854 | * it is needed (e.g. to reset the tail pointer). |
1855 | * |
1856 | * NB: This makes an assumption about how tailq's are implemented. |
1857 | */ |
1858 | if (bp->b_freelist.tqe_next == NULL) { |
1859 | dp = &bufqueues[whichq]; |
1860 | |
1861 | if (dp->tqh_last != &bp->b_freelist.tqe_next) |
1862 | panic("bremfree: lost tail" ); |
1863 | } |
1864 | TAILQ_REMOVE(dp, bp, b_freelist); |
1865 | |
1866 | if (whichq == BQ_LAUNDRY) |
1867 | blaundrycnt--; |
1868 | |
1869 | bp->b_whichq = -1; |
1870 | bp->b_timestamp = 0; |
1871 | bp->b_shadow = 0; |
1872 | } |
1873 | |
1874 | /* |
1875 | * Associate a buffer with a vnode. |
1876 | * buf_mtxp must be locked on entry |
1877 | */ |
1878 | static void |
1879 | bgetvp_locked(vnode_t vp, buf_t bp) |
1880 | { |
1881 | |
1882 | if (bp->b_vp != vp) |
1883 | panic("bgetvp_locked: not free" ); |
1884 | |
1885 | if (vp->v_type == VBLK || vp->v_type == VCHR) |
1886 | bp->b_dev = vp->v_rdev; |
1887 | else |
1888 | bp->b_dev = NODEV; |
1889 | /* |
1890 | * Insert onto list for new vnode. |
1891 | */ |
1892 | bufinsvn(bp, &vp->v_cleanblkhd); |
1893 | } |
1894 | |
1895 | /* |
1896 | * Disassociate a buffer from a vnode. |
1897 | * buf_mtxp must be locked on entry |
1898 | */ |
1899 | static void |
1900 | brelvp_locked(buf_t bp) |
1901 | { |
1902 | /* |
1903 | * Delete from old vnode list, if on one. |
1904 | */ |
1905 | if (bp->b_vnbufs.le_next != NOLIST) |
1906 | bufremvn(bp); |
1907 | |
1908 | bp->b_vp = (vnode_t)NULL; |
1909 | } |
1910 | |
1911 | /* |
1912 | * Reassign a buffer from one vnode to another. |
1913 | * Used to assign file specific control information |
1914 | * (indirect blocks) to the vnode to which they belong. |
1915 | */ |
1916 | static void |
1917 | buf_reassign(buf_t bp, vnode_t newvp) |
1918 | { |
1919 | struct buflists *listheadp; |
1920 | |
1921 | if (newvp == NULL) { |
1922 | printf("buf_reassign: NULL" ); |
1923 | return; |
1924 | } |
1925 | lck_mtx_lock_spin(buf_mtxp); |
1926 | |
1927 | /* |
1928 | * Delete from old vnode list, if on one. |
1929 | */ |
1930 | if (bp->b_vnbufs.le_next != NOLIST) |
1931 | bufremvn(bp); |
1932 | /* |
1933 | * If dirty, put on list of dirty buffers; |
1934 | * otherwise insert onto list of clean buffers. |
1935 | */ |
1936 | if (ISSET(bp->b_flags, B_DELWRI)) |
1937 | listheadp = &newvp->v_dirtyblkhd; |
1938 | else |
1939 | listheadp = &newvp->v_cleanblkhd; |
1940 | bufinsvn(bp, listheadp); |
1941 | |
1942 | lck_mtx_unlock(buf_mtxp); |
1943 | } |
1944 | |
1945 | static __inline__ void |
1946 | bufhdrinit(buf_t bp) |
1947 | { |
1948 | bzero((char *)bp, sizeof *bp); |
1949 | bp->b_dev = NODEV; |
1950 | bp->b_rcred = NOCRED; |
1951 | bp->b_wcred = NOCRED; |
1952 | bp->b_vnbufs.le_next = NOLIST; |
1953 | bp->b_flags = B_INVAL; |
1954 | |
1955 | return; |
1956 | } |
1957 | |
1958 | /* |
1959 | * Initialize buffers and hash links for buffers. |
1960 | */ |
1961 | __private_extern__ void |
1962 | bufinit(void) |
1963 | { |
1964 | buf_t bp; |
1965 | struct bqueues *dp; |
1966 | int i; |
1967 | |
1968 | nbuf_headers = 0; |
1969 | /* Initialize the buffer queues ('freelists') and the hash table */ |
1970 | for (dp = bufqueues; dp < &bufqueues[BQUEUES]; dp++) |
1971 | TAILQ_INIT(dp); |
1972 | bufhashtbl = hashinit(nbuf_hashelements, M_CACHE, &bufhash); |
1973 | |
1974 | buf_busycount = 0; |
1975 | |
1976 | /* Initialize the buffer headers */ |
1977 | for (i = 0; i < max_nbuf_headers; i++) { |
1978 | nbuf_headers++; |
1979 | bp = &buf_headers[i]; |
1980 | bufhdrinit(bp); |
1981 | |
1982 | BLISTNONE(bp); |
1983 | dp = &bufqueues[BQ_EMPTY]; |
1984 | bp->b_whichq = BQ_EMPTY; |
1985 | bp->b_timestamp = buf_timestamp(); |
1986 | binsheadfree(bp, dp, BQ_EMPTY); |
1987 | binshash(bp, &invalhash); |
1988 | } |
1989 | boot_nbuf_headers = nbuf_headers; |
1990 | |
1991 | TAILQ_INIT(&iobufqueue); |
1992 | TAILQ_INIT(&delaybufqueue); |
1993 | |
1994 | for (; i < nbuf_headers + niobuf_headers; i++) { |
1995 | bp = &buf_headers[i]; |
1996 | bufhdrinit(bp); |
1997 | bp->b_whichq = -1; |
1998 | binsheadfree(bp, &iobufqueue, -1); |
1999 | } |
2000 | |
2001 | /* |
2002 | * allocate lock group attribute and group |
2003 | */ |
2004 | buf_mtx_grp_attr = lck_grp_attr_alloc_init(); |
2005 | buf_mtx_grp = lck_grp_alloc_init("buffer cache" , buf_mtx_grp_attr); |
2006 | |
2007 | /* |
2008 | * allocate the lock attribute |
2009 | */ |
2010 | buf_mtx_attr = lck_attr_alloc_init(); |
2011 | |
2012 | /* |
2013 | * allocate and initialize mutex's for the buffer and iobuffer pools |
2014 | */ |
2015 | buf_mtxp = lck_mtx_alloc_init(buf_mtx_grp, buf_mtx_attr); |
2016 | iobuffer_mtxp = lck_mtx_alloc_init(buf_mtx_grp, buf_mtx_attr); |
2017 | buf_gc_callout = lck_mtx_alloc_init(buf_mtx_grp, buf_mtx_attr); |
2018 | |
2019 | if (iobuffer_mtxp == NULL) |
2020 | panic("couldn't create iobuffer mutex" ); |
2021 | |
2022 | if (buf_mtxp == NULL) |
2023 | panic("couldn't create buf mutex" ); |
2024 | |
2025 | if (buf_gc_callout == NULL) |
2026 | panic("couldn't create buf_gc_callout mutex" ); |
2027 | |
2028 | /* |
2029 | * allocate and initialize cluster specific global locks... |
2030 | */ |
2031 | cluster_init(); |
2032 | |
2033 | printf("using %d buffer headers and %d cluster IO buffer headers\n" , |
2034 | nbuf_headers, niobuf_headers); |
2035 | |
2036 | /* Set up zones used by the buffer cache */ |
2037 | bufzoneinit(); |
2038 | |
2039 | /* start the bcleanbuf() thread */ |
2040 | bcleanbuf_thread_init(); |
2041 | |
2042 | /* Register a callout for relieving vm pressure */ |
2043 | if (vm_set_buffer_cleanup_callout(buffer_cache_gc) != KERN_SUCCESS) { |
2044 | panic("Couldn't register buffer cache callout for vm pressure!\n" ); |
2045 | } |
2046 | |
2047 | } |
2048 | |
2049 | /* |
2050 | * Zones for the meta data buffers |
2051 | */ |
2052 | |
2053 | #define MINMETA 512 |
2054 | #define MAXMETA 16384 |
2055 | |
2056 | struct meta_zone_entry { |
2057 | zone_t mz_zone; |
2058 | vm_size_t mz_size; |
2059 | vm_size_t mz_max; |
2060 | const char *mz_name; |
2061 | }; |
2062 | |
2063 | struct meta_zone_entry meta_zones[] = { |
2064 | {NULL, (MINMETA * 1), 128 * (MINMETA * 1), "buf.512" }, |
2065 | {NULL, (MINMETA * 2), 64 * (MINMETA * 2), "buf.1024" }, |
2066 | {NULL, (MINMETA * 4), 16 * (MINMETA * 4), "buf.2048" }, |
2067 | {NULL, (MINMETA * 8), 512 * (MINMETA * 8), "buf.4096" }, |
2068 | {NULL, (MINMETA * 16), 512 * (MINMETA * 16), "buf.8192" }, |
2069 | {NULL, (MINMETA * 32), 512 * (MINMETA * 32), "buf.16384" }, |
2070 | {NULL, 0, 0, "" } /* End */ |
2071 | }; |
2072 | |
2073 | /* |
2074 | * Initialize the meta data zones |
2075 | */ |
2076 | static void |
2077 | bufzoneinit(void) |
2078 | { |
2079 | int i; |
2080 | |
2081 | for (i = 0; meta_zones[i].mz_size != 0; i++) { |
2082 | meta_zones[i].mz_zone = |
2083 | zinit(meta_zones[i].mz_size, |
2084 | meta_zones[i].mz_max, |
2085 | PAGE_SIZE, |
2086 | meta_zones[i].mz_name); |
2087 | zone_change(meta_zones[i].mz_zone, Z_CALLERACCT, FALSE); |
2088 | } |
2089 | buf_hdr_zone = zinit(sizeof(struct buf), 32, PAGE_SIZE, "buf headers" ); |
2090 | zone_change(buf_hdr_zone, Z_CALLERACCT, FALSE); |
2091 | } |
2092 | |
2093 | static __inline__ zone_t |
2094 | getbufzone(size_t size) |
2095 | { |
2096 | int i; |
2097 | |
2098 | if ((size % 512) || (size < MINMETA) || (size > MAXMETA)) |
2099 | panic("getbufzone: incorect size = %lu" , size); |
2100 | |
2101 | for (i = 0; meta_zones[i].mz_size != 0; i++) { |
2102 | if (meta_zones[i].mz_size >= size) |
2103 | break; |
2104 | } |
2105 | |
2106 | return (meta_zones[i].mz_zone); |
2107 | } |
2108 | |
2109 | |
2110 | |
2111 | static struct buf * |
2112 | bio_doread(vnode_t vp, daddr64_t blkno, int size, kauth_cred_t cred, int async, int queuetype) |
2113 | { |
2114 | buf_t bp; |
2115 | |
2116 | bp = buf_getblk(vp, blkno, size, 0, 0, queuetype); |
2117 | |
2118 | /* |
2119 | * If buffer does not have data valid, start a read. |
2120 | * Note that if buffer is B_INVAL, buf_getblk() won't return it. |
2121 | * Therefore, it's valid if it's I/O has completed or been delayed. |
2122 | */ |
2123 | if (!ISSET(bp->b_flags, (B_DONE | B_DELWRI))) { |
2124 | struct proc *p; |
2125 | |
2126 | p = current_proc(); |
2127 | |
2128 | /* Start I/O for the buffer (keeping credentials). */ |
2129 | SET(bp->b_flags, B_READ | async); |
2130 | if (IS_VALID_CRED(cred) && !IS_VALID_CRED(bp->b_rcred)) { |
2131 | kauth_cred_ref(cred); |
2132 | bp->b_rcred = cred; |
2133 | } |
2134 | |
2135 | VNOP_STRATEGY(bp); |
2136 | |
2137 | trace(TR_BREADMISS, pack(vp, size), blkno); |
2138 | |
2139 | /* Pay for the read. */ |
2140 | if (p && p->p_stats) { |
2141 | OSIncrementAtomicLong(&p->p_stats->p_ru.ru_inblock); /* XXX */ |
2142 | } |
2143 | |
2144 | if (async) { |
2145 | /* |
2146 | * since we asked for an ASYNC I/O |
2147 | * the biodone will do the brelse |
2148 | * we don't want to pass back a bp |
2149 | * that we don't 'own' |
2150 | */ |
2151 | bp = NULL; |
2152 | } |
2153 | } else if (async) { |
2154 | buf_brelse(bp); |
2155 | bp = NULL; |
2156 | } |
2157 | |
2158 | trace(TR_BREADHIT, pack(vp, size), blkno); |
2159 | |
2160 | return (bp); |
2161 | } |
2162 | |
2163 | /* |
2164 | * Perform the reads for buf_breadn() and buf_meta_breadn(). |
2165 | * Trivial modification to the breada algorithm presented in Bach (p.55). |
2166 | */ |
2167 | static errno_t |
2168 | do_breadn_for_type(vnode_t vp, daddr64_t blkno, int size, daddr64_t *rablks, int *rasizes, |
2169 | int nrablks, kauth_cred_t cred, buf_t *bpp, int queuetype) |
2170 | { |
2171 | buf_t bp; |
2172 | int i; |
2173 | |
2174 | bp = *bpp = bio_doread(vp, blkno, size, cred, 0, queuetype); |
2175 | |
2176 | /* |
2177 | * For each of the read-ahead blocks, start a read, if necessary. |
2178 | */ |
2179 | for (i = 0; i < nrablks; i++) { |
2180 | /* If it's in the cache, just go on to next one. */ |
2181 | if (incore(vp, rablks[i])) |
2182 | continue; |
2183 | |
2184 | /* Get a buffer for the read-ahead block */ |
2185 | (void) bio_doread(vp, rablks[i], rasizes[i], cred, B_ASYNC, queuetype); |
2186 | } |
2187 | |
2188 | /* Otherwise, we had to start a read for it; wait until it's valid. */ |
2189 | return (buf_biowait(bp)); |
2190 | } |
2191 | |
2192 | |
2193 | /* |
2194 | * Read a disk block. |
2195 | * This algorithm described in Bach (p.54). |
2196 | */ |
2197 | errno_t |
2198 | buf_bread(vnode_t vp, daddr64_t blkno, int size, kauth_cred_t cred, buf_t *bpp) |
2199 | { |
2200 | buf_t bp; |
2201 | |
2202 | /* Get buffer for block. */ |
2203 | bp = *bpp = bio_doread(vp, blkno, size, cred, 0, BLK_READ); |
2204 | |
2205 | /* Wait for the read to complete, and return result. */ |
2206 | return (buf_biowait(bp)); |
2207 | } |
2208 | |
2209 | /* |
2210 | * Read a disk block. [bread() for meta-data] |
2211 | * This algorithm described in Bach (p.54). |
2212 | */ |
2213 | errno_t |
2214 | buf_meta_bread(vnode_t vp, daddr64_t blkno, int size, kauth_cred_t cred, buf_t *bpp) |
2215 | { |
2216 | buf_t bp; |
2217 | |
2218 | /* Get buffer for block. */ |
2219 | bp = *bpp = bio_doread(vp, blkno, size, cred, 0, BLK_META); |
2220 | |
2221 | /* Wait for the read to complete, and return result. */ |
2222 | return (buf_biowait(bp)); |
2223 | } |
2224 | |
2225 | /* |
2226 | * Read-ahead multiple disk blocks. The first is sync, the rest async. |
2227 | */ |
2228 | errno_t |
2229 | buf_breadn(vnode_t vp, daddr64_t blkno, int size, daddr64_t *rablks, int *rasizes, int nrablks, kauth_cred_t cred, buf_t *bpp) |
2230 | { |
2231 | return (do_breadn_for_type(vp, blkno, size, rablks, rasizes, nrablks, cred, bpp, BLK_READ)); |
2232 | } |
2233 | |
2234 | /* |
2235 | * Read-ahead multiple disk blocks. The first is sync, the rest async. |
2236 | * [buf_breadn() for meta-data] |
2237 | */ |
2238 | errno_t |
2239 | buf_meta_breadn(vnode_t vp, daddr64_t blkno, int size, daddr64_t *rablks, int *rasizes, int nrablks, kauth_cred_t cred, buf_t *bpp) |
2240 | { |
2241 | return (do_breadn_for_type(vp, blkno, size, rablks, rasizes, nrablks, cred, bpp, BLK_META)); |
2242 | } |
2243 | |
2244 | /* |
2245 | * Block write. Described in Bach (p.56) |
2246 | */ |
2247 | errno_t |
2248 | buf_bwrite(buf_t bp) |
2249 | { |
2250 | int sync, wasdelayed; |
2251 | errno_t rv; |
2252 | proc_t p = current_proc(); |
2253 | vnode_t vp = bp->b_vp; |
2254 | |
2255 | if (bp->b_datap == 0) { |
2256 | if (brecover_data(bp) == 0) |
2257 | return (0); |
2258 | } |
2259 | /* Remember buffer type, to switch on it later. */ |
2260 | sync = !ISSET(bp->b_flags, B_ASYNC); |
2261 | wasdelayed = ISSET(bp->b_flags, B_DELWRI); |
2262 | CLR(bp->b_flags, (B_READ | B_DONE | B_ERROR | B_DELWRI)); |
2263 | |
2264 | if (wasdelayed) |
2265 | OSAddAtomicLong(-1, &nbdwrite); |
2266 | |
2267 | if (!sync) { |
2268 | /* |
2269 | * If not synchronous, pay for the I/O operation and make |
2270 | * sure the buf is on the correct vnode queue. We have |
2271 | * to do this now, because if we don't, the vnode may not |
2272 | * be properly notified that its I/O has completed. |
2273 | */ |
2274 | if (wasdelayed) |
2275 | buf_reassign(bp, vp); |
2276 | else |
2277 | if (p && p->p_stats) { |
2278 | OSIncrementAtomicLong(&p->p_stats->p_ru.ru_oublock); /* XXX */ |
2279 | } |
2280 | } |
2281 | trace(TR_BUFWRITE, pack(vp, bp->b_bcount), bp->b_lblkno); |
2282 | |
2283 | /* Initiate disk write. Make sure the appropriate party is charged. */ |
2284 | |
2285 | OSAddAtomic(1, &vp->v_numoutput); |
2286 | |
2287 | VNOP_STRATEGY(bp); |
2288 | |
2289 | if (sync) { |
2290 | /* |
2291 | * If I/O was synchronous, wait for it to complete. |
2292 | */ |
2293 | rv = buf_biowait(bp); |
2294 | |
2295 | /* |
2296 | * Pay for the I/O operation, if it's not been paid for, and |
2297 | * make sure it's on the correct vnode queue. (async operatings |
2298 | * were payed for above.) |
2299 | */ |
2300 | if (wasdelayed) |
2301 | buf_reassign(bp, vp); |
2302 | else |
2303 | if (p && p->p_stats) { |
2304 | OSIncrementAtomicLong(&p->p_stats->p_ru.ru_oublock); /* XXX */ |
2305 | } |
2306 | |
2307 | /* Release the buffer. */ |
2308 | buf_brelse(bp); |
2309 | |
2310 | return (rv); |
2311 | } else { |
2312 | return (0); |
2313 | } |
2314 | } |
2315 | |
2316 | int |
2317 | vn_bwrite(struct vnop_bwrite_args *ap) |
2318 | { |
2319 | return (buf_bwrite(ap->a_bp)); |
2320 | } |
2321 | |
2322 | /* |
2323 | * Delayed write. |
2324 | * |
2325 | * The buffer is marked dirty, but is not queued for I/O. |
2326 | * This routine should be used when the buffer is expected |
2327 | * to be modified again soon, typically a small write that |
2328 | * partially fills a buffer. |
2329 | * |
2330 | * NB: magnetic tapes cannot be delayed; they must be |
2331 | * written in the order that the writes are requested. |
2332 | * |
2333 | * Described in Leffler, et al. (pp. 208-213). |
2334 | * |
2335 | * Note: With the ability to allocate additional buffer |
2336 | * headers, we can get in to the situation where "too" many |
2337 | * buf_bdwrite()s can create situation where the kernel can create |
2338 | * buffers faster than the disks can service. Doing a buf_bawrite() in |
2339 | * cases where we have "too many" outstanding buf_bdwrite()s avoids that. |
2340 | */ |
2341 | int |
2342 | bdwrite_internal(buf_t bp, int return_error) |
2343 | { |
2344 | proc_t p = current_proc(); |
2345 | vnode_t vp = bp->b_vp; |
2346 | |
2347 | /* |
2348 | * If the block hasn't been seen before: |
2349 | * (1) Mark it as having been seen, |
2350 | * (2) Charge for the write. |
2351 | * (3) Make sure it's on its vnode's correct block list, |
2352 | */ |
2353 | if (!ISSET(bp->b_flags, B_DELWRI)) { |
2354 | SET(bp->b_flags, B_DELWRI); |
2355 | if (p && p->p_stats) { |
2356 | OSIncrementAtomicLong(&p->p_stats->p_ru.ru_oublock); /* XXX */ |
2357 | } |
2358 | OSAddAtomicLong(1, &nbdwrite); |
2359 | buf_reassign(bp, vp); |
2360 | } |
2361 | |
2362 | /* |
2363 | * if we're not LOCKED, but the total number of delayed writes |
2364 | * has climbed above 75% of the total buffers in the system |
2365 | * return an error if the caller has indicated that it can |
2366 | * handle one in this case, otherwise schedule the I/O now |
2367 | * this is done to prevent us from allocating tons of extra |
2368 | * buffers when dealing with virtual disks (i.e. DiskImages), |
2369 | * because additional buffers are dynamically allocated to prevent |
2370 | * deadlocks from occurring |
2371 | * |
2372 | * however, can't do a buf_bawrite() if the LOCKED bit is set because the |
2373 | * buffer is part of a transaction and can't go to disk until |
2374 | * the LOCKED bit is cleared. |
2375 | */ |
2376 | if (!ISSET(bp->b_flags, B_LOCKED) && nbdwrite > ((nbuf_headers/4)*3)) { |
2377 | if (return_error) |
2378 | return (EAGAIN); |
2379 | /* |
2380 | * If the vnode has "too many" write operations in progress |
2381 | * wait for them to finish the IO |
2382 | */ |
2383 | (void)vnode_waitforwrites(vp, VNODE_ASYNC_THROTTLE, 0, 0, "buf_bdwrite" ); |
2384 | |
2385 | return (buf_bawrite(bp)); |
2386 | } |
2387 | |
2388 | /* Otherwise, the "write" is done, so mark and release the buffer. */ |
2389 | SET(bp->b_flags, B_DONE); |
2390 | buf_brelse(bp); |
2391 | return (0); |
2392 | } |
2393 | |
2394 | errno_t |
2395 | buf_bdwrite(buf_t bp) |
2396 | { |
2397 | return (bdwrite_internal(bp, 0)); |
2398 | } |
2399 | |
2400 | |
2401 | /* |
2402 | * Asynchronous block write; just an asynchronous buf_bwrite(). |
2403 | * |
2404 | * Note: With the abilitty to allocate additional buffer |
2405 | * headers, we can get in to the situation where "too" many |
2406 | * buf_bawrite()s can create situation where the kernel can create |
2407 | * buffers faster than the disks can service. |
2408 | * We limit the number of "in flight" writes a vnode can have to |
2409 | * avoid this. |
2410 | */ |
2411 | static int |
2412 | bawrite_internal(buf_t bp, int throttle) |
2413 | { |
2414 | vnode_t vp = bp->b_vp; |
2415 | |
2416 | if (vp) { |
2417 | if (throttle) |
2418 | /* |
2419 | * If the vnode has "too many" write operations in progress |
2420 | * wait for them to finish the IO |
2421 | */ |
2422 | (void)vnode_waitforwrites(vp, VNODE_ASYNC_THROTTLE, 0, 0, (const char *)"buf_bawrite" ); |
2423 | else if (vp->v_numoutput >= VNODE_ASYNC_THROTTLE) |
2424 | /* |
2425 | * return to the caller and |
2426 | * let him decide what to do |
2427 | */ |
2428 | return (EWOULDBLOCK); |
2429 | } |
2430 | SET(bp->b_flags, B_ASYNC); |
2431 | |
2432 | return (VNOP_BWRITE(bp)); |
2433 | } |
2434 | |
2435 | errno_t |
2436 | buf_bawrite(buf_t bp) |
2437 | { |
2438 | return (bawrite_internal(bp, 1)); |
2439 | } |
2440 | |
2441 | |
2442 | |
2443 | static void |
2444 | buf_free_meta_store(buf_t bp) |
2445 | { |
2446 | if (bp->b_bufsize) { |
2447 | if (ISSET(bp->b_flags, B_ZALLOC)) { |
2448 | zone_t z; |
2449 | |
2450 | z = getbufzone(bp->b_bufsize); |
2451 | zfree(z, (void *)bp->b_datap); |
2452 | } else |
2453 | kmem_free(kernel_map, bp->b_datap, bp->b_bufsize); |
2454 | |
2455 | bp->b_datap = (uintptr_t)NULL; |
2456 | bp->b_bufsize = 0; |
2457 | } |
2458 | } |
2459 | |
2460 | |
2461 | static buf_t |
2462 | buf_brelse_shadow(buf_t bp) |
2463 | { |
2464 | buf_t bp_head; |
2465 | buf_t bp_temp; |
2466 | buf_t bp_return = NULL; |
2467 | #ifdef BUF_MAKE_PRIVATE |
2468 | buf_t bp_data; |
2469 | int data_ref = 0; |
2470 | #endif |
2471 | int need_wakeup = 0; |
2472 | |
2473 | lck_mtx_lock_spin(buf_mtxp); |
2474 | |
2475 | __IGNORE_WCASTALIGN(bp_head = (buf_t)bp->b_orig); |
2476 | |
2477 | if (bp_head->b_whichq != -1) |
2478 | panic("buf_brelse_shadow: bp_head on freelist %d\n" , bp_head->b_whichq); |
2479 | |
2480 | #ifdef BUF_MAKE_PRIVATE |
2481 | if (bp_data = bp->b_data_store) { |
2482 | bp_data->b_data_ref--; |
2483 | /* |
2484 | * snapshot the ref count so that we can check it |
2485 | * outside of the lock... we only want the guy going |
2486 | * from 1 -> 0 to try and release the storage |
2487 | */ |
2488 | data_ref = bp_data->b_data_ref; |
2489 | } |
2490 | #endif |
2491 | KERNEL_DEBUG(0xbbbbc008 | DBG_FUNC_START, bp, bp_head, bp_head->b_shadow_ref, 0, 0); |
2492 | |
2493 | bp_head->b_shadow_ref--; |
2494 | |
2495 | for (bp_temp = bp_head; bp_temp && bp != bp_temp->b_shadow; bp_temp = bp_temp->b_shadow); |
2496 | |
2497 | if (bp_temp == NULL) |
2498 | panic("buf_brelse_shadow: bp not on list %p" , bp_head); |
2499 | |
2500 | bp_temp->b_shadow = bp_temp->b_shadow->b_shadow; |
2501 | |
2502 | #ifdef BUF_MAKE_PRIVATE |
2503 | /* |
2504 | * we're about to free the current 'owner' of the data buffer and |
2505 | * there is at least one other shadow buf_t still pointing at it |
2506 | * so transfer it to the first shadow buf left in the chain |
2507 | */ |
2508 | if (bp == bp_data && data_ref) { |
2509 | if ((bp_data = bp_head->b_shadow) == NULL) |
2510 | panic("buf_brelse_shadow: data_ref mismatch bp(%p)" , bp); |
2511 | |
2512 | for (bp_temp = bp_data; bp_temp; bp_temp = bp_temp->b_shadow) |
2513 | bp_temp->b_data_store = bp_data; |
2514 | bp_data->b_data_ref = data_ref; |
2515 | } |
2516 | #endif |
2517 | if (bp_head->b_shadow_ref == 0 && bp_head->b_shadow) |
2518 | panic("buf_relse_shadow: b_shadow != NULL && b_shadow_ref == 0 bp(%p)" , bp); |
2519 | if (bp_head->b_shadow_ref && bp_head->b_shadow == 0) |
2520 | panic("buf_relse_shadow: b_shadow == NULL && b_shadow_ref != 0 bp(%p)" , bp); |
2521 | |
2522 | if (bp_head->b_shadow_ref == 0) { |
2523 | if (!ISSET(bp_head->b_lflags, BL_BUSY)) { |
2524 | |
2525 | CLR(bp_head->b_flags, B_AGE); |
2526 | bp_head->b_timestamp = buf_timestamp(); |
2527 | |
2528 | if (ISSET(bp_head->b_flags, B_LOCKED)) { |
2529 | bp_head->b_whichq = BQ_LOCKED; |
2530 | binstailfree(bp_head, &bufqueues[BQ_LOCKED], BQ_LOCKED); |
2531 | } else { |
2532 | bp_head->b_whichq = BQ_META; |
2533 | binstailfree(bp_head, &bufqueues[BQ_META], BQ_META); |
2534 | } |
2535 | } else if (ISSET(bp_head->b_lflags, BL_WAITSHADOW)) { |
2536 | CLR(bp_head->b_lflags, BL_WAITSHADOW); |
2537 | |
2538 | bp_return = bp_head; |
2539 | } |
2540 | if (ISSET(bp_head->b_lflags, BL_WANTED_REF)) { |
2541 | CLR(bp_head->b_lflags, BL_WANTED_REF); |
2542 | need_wakeup = 1; |
2543 | } |
2544 | } |
2545 | lck_mtx_unlock(buf_mtxp); |
2546 | |
2547 | if (need_wakeup) |
2548 | wakeup(bp_head); |
2549 | |
2550 | #ifdef BUF_MAKE_PRIVATE |
2551 | if (bp == bp_data && data_ref == 0) |
2552 | buf_free_meta_store(bp); |
2553 | |
2554 | bp->b_data_store = NULL; |
2555 | #endif |
2556 | KERNEL_DEBUG(0xbbbbc008 | DBG_FUNC_END, bp, 0, 0, 0, 0); |
2557 | |
2558 | return (bp_return); |
2559 | } |
2560 | |
2561 | |
2562 | /* |
2563 | * Release a buffer on to the free lists. |
2564 | * Described in Bach (p. 46). |
2565 | */ |
2566 | void |
2567 | buf_brelse(buf_t bp) |
2568 | { |
2569 | struct bqueues *bufq; |
2570 | long whichq; |
2571 | upl_t upl; |
2572 | int need_wakeup = 0; |
2573 | int need_bp_wakeup = 0; |
2574 | |
2575 | |
2576 | if (bp->b_whichq != -1 || !(bp->b_lflags & BL_BUSY)) |
2577 | panic("buf_brelse: bad buffer = %p\n" , bp); |
2578 | |
2579 | #ifdef JOE_DEBUG |
2580 | (void) OSBacktrace(&bp->b_stackbrelse[0], 6); |
2581 | |
2582 | bp->b_lastbrelse = current_thread(); |
2583 | bp->b_tag = 0; |
2584 | #endif |
2585 | if (bp->b_lflags & BL_IOBUF) { |
2586 | buf_t shadow_master_bp = NULL; |
2587 | |
2588 | if (ISSET(bp->b_lflags, BL_SHADOW)) |
2589 | shadow_master_bp = buf_brelse_shadow(bp); |
2590 | else if (ISSET(bp->b_lflags, BL_IOBUF_ALLOC)) |
2591 | buf_free_meta_store(bp); |
2592 | free_io_buf(bp); |
2593 | |
2594 | if (shadow_master_bp) { |
2595 | bp = shadow_master_bp; |
2596 | goto finish_shadow_master; |
2597 | } |
2598 | return; |
2599 | } |
2600 | |
2601 | KERNEL_DEBUG((FSDBG_CODE(DBG_FSRW, 388)) | DBG_FUNC_START, |
2602 | bp->b_lblkno * PAGE_SIZE, bp, bp->b_datap, |
2603 | bp->b_flags, 0); |
2604 | |
2605 | trace(TR_BRELSE, pack(bp->b_vp, bp->b_bufsize), bp->b_lblkno); |
2606 | |
2607 | /* |
2608 | * if we're invalidating a buffer that has the B_FILTER bit |
2609 | * set then call the b_iodone function so it gets cleaned |
2610 | * up properly. |
2611 | * |
2612 | * the HFS journal code depends on this |
2613 | */ |
2614 | if (ISSET(bp->b_flags, B_META) && ISSET(bp->b_flags, B_INVAL)) { |
2615 | if (ISSET(bp->b_flags, B_FILTER)) { /* if necessary, call out */ |
2616 | void (*iodone_func)(struct buf *, void *) = bp->b_iodone; |
2617 | void *arg = bp->b_transaction; |
2618 | |
2619 | CLR(bp->b_flags, B_FILTER); /* but note callout done */ |
2620 | bp->b_iodone = NULL; |
2621 | bp->b_transaction = NULL; |
2622 | |
2623 | if (iodone_func == NULL) { |
2624 | panic("brelse: bp @ %p has NULL b_iodone!\n" , bp); |
2625 | } |
2626 | (*iodone_func)(bp, arg); |
2627 | } |
2628 | } |
2629 | /* |
2630 | * I/O is done. Cleanup the UPL state |
2631 | */ |
2632 | upl = bp->b_upl; |
2633 | |
2634 | if ( !ISSET(bp->b_flags, B_META) && UBCINFOEXISTS(bp->b_vp) && bp->b_bufsize) { |
2635 | kern_return_t kret; |
2636 | int upl_flags; |
2637 | |
2638 | if (upl == NULL) { |
2639 | if ( !ISSET(bp->b_flags, B_INVAL)) { |
2640 | kret = ubc_create_upl_kernel(bp->b_vp, |
2641 | ubc_blktooff(bp->b_vp, bp->b_lblkno), |
2642 | bp->b_bufsize, |
2643 | &upl, |
2644 | NULL, |
2645 | UPL_PRECIOUS, |
2646 | VM_KERN_MEMORY_FILE); |
2647 | |
2648 | if (kret != KERN_SUCCESS) |
2649 | panic("brelse: Failed to create UPL" ); |
2650 | #if UPL_DEBUG |
2651 | upl_ubc_alias_set(upl, (uintptr_t) bp, (uintptr_t) 5); |
2652 | #endif /* UPL_DEBUG */ |
2653 | } |
2654 | } else { |
2655 | if (bp->b_datap) { |
2656 | kret = ubc_upl_unmap(upl); |
2657 | |
2658 | if (kret != KERN_SUCCESS) |
2659 | panic("ubc_upl_unmap failed" ); |
2660 | bp->b_datap = (uintptr_t)NULL; |
2661 | } |
2662 | } |
2663 | if (upl) { |
2664 | if (bp->b_flags & (B_ERROR | B_INVAL)) { |
2665 | if (bp->b_flags & (B_READ | B_INVAL)) |
2666 | upl_flags = UPL_ABORT_DUMP_PAGES; |
2667 | else |
2668 | upl_flags = 0; |
2669 | |
2670 | ubc_upl_abort(upl, upl_flags); |
2671 | } else { |
2672 | if (ISSET(bp->b_flags, B_DELWRI | B_WASDIRTY)) |
2673 | upl_flags = UPL_COMMIT_SET_DIRTY ; |
2674 | else |
2675 | upl_flags = UPL_COMMIT_CLEAR_DIRTY ; |
2676 | |
2677 | ubc_upl_commit_range(upl, 0, bp->b_bufsize, upl_flags | |
2678 | UPL_COMMIT_INACTIVATE | UPL_COMMIT_FREE_ON_EMPTY); |
2679 | } |
2680 | bp->b_upl = NULL; |
2681 | } |
2682 | } else { |
2683 | if ( (upl) ) |
2684 | panic("brelse: UPL set for non VREG; vp=%p" , bp->b_vp); |
2685 | } |
2686 | |
2687 | /* |
2688 | * If it's locked, don't report an error; try again later. |
2689 | */ |
2690 | if (ISSET(bp->b_flags, (B_LOCKED|B_ERROR)) == (B_LOCKED|B_ERROR)) |
2691 | CLR(bp->b_flags, B_ERROR); |
2692 | /* |
2693 | * If it's not cacheable, or an error, mark it invalid. |
2694 | */ |
2695 | if (ISSET(bp->b_flags, (B_NOCACHE|B_ERROR))) |
2696 | SET(bp->b_flags, B_INVAL); |
2697 | |
2698 | if ((bp->b_bufsize <= 0) || |
2699 | ISSET(bp->b_flags, B_INVAL) || |
2700 | (ISSET(bp->b_lflags, BL_WANTDEALLOC) && !ISSET(bp->b_flags, B_DELWRI))) { |
2701 | |
2702 | boolean_t delayed_buf_free_meta_store = FALSE; |
2703 | |
2704 | /* |
2705 | * If it's invalid or empty, dissociate it from its vnode, |
2706 | * release its storage if B_META, and |
2707 | * clean it up a bit and put it on the EMPTY queue |
2708 | */ |
2709 | if (ISSET(bp->b_flags, B_DELWRI)) |
2710 | OSAddAtomicLong(-1, &nbdwrite); |
2711 | |
2712 | if (ISSET(bp->b_flags, B_META)) { |
2713 | if (bp->b_shadow_ref) |
2714 | delayed_buf_free_meta_store = TRUE; |
2715 | else |
2716 | buf_free_meta_store(bp); |
2717 | } |
2718 | /* |
2719 | * nuke any credentials we were holding |
2720 | */ |
2721 | buf_release_credentials(bp); |
2722 | |
2723 | lck_mtx_lock_spin(buf_mtxp); |
2724 | |
2725 | if (bp->b_shadow_ref) { |
2726 | SET(bp->b_lflags, BL_WAITSHADOW); |
2727 | |
2728 | lck_mtx_unlock(buf_mtxp); |
2729 | |
2730 | return; |
2731 | } |
2732 | if (delayed_buf_free_meta_store == TRUE) { |
2733 | |
2734 | lck_mtx_unlock(buf_mtxp); |
2735 | finish_shadow_master: |
2736 | buf_free_meta_store(bp); |
2737 | |
2738 | lck_mtx_lock_spin(buf_mtxp); |
2739 | } |
2740 | CLR(bp->b_flags, (B_META | B_ZALLOC | B_DELWRI | B_LOCKED | B_AGE | B_ASYNC | B_NOCACHE | B_FUA)); |
2741 | |
2742 | if (bp->b_vp) |
2743 | brelvp_locked(bp); |
2744 | |
2745 | bremhash(bp); |
2746 | BLISTNONE(bp); |
2747 | binshash(bp, &invalhash); |
2748 | |
2749 | bp->b_whichq = BQ_EMPTY; |
2750 | binsheadfree(bp, &bufqueues[BQ_EMPTY], BQ_EMPTY); |
2751 | } else { |
2752 | |
2753 | /* |
2754 | * It has valid data. Put it on the end of the appropriate |
2755 | * queue, so that it'll stick around for as long as possible. |
2756 | */ |
2757 | if (ISSET(bp->b_flags, B_LOCKED)) |
2758 | whichq = BQ_LOCKED; /* locked in core */ |
2759 | else if (ISSET(bp->b_flags, B_META)) |
2760 | whichq = BQ_META; /* meta-data */ |
2761 | else if (ISSET(bp->b_flags, B_AGE)) |
2762 | whichq = BQ_AGE; /* stale but valid data */ |
2763 | else |
2764 | whichq = BQ_LRU; /* valid data */ |
2765 | bufq = &bufqueues[whichq]; |
2766 | |
2767 | bp->b_timestamp = buf_timestamp(); |
2768 | |
2769 | lck_mtx_lock_spin(buf_mtxp); |
2770 | |
2771 | /* |
2772 | * the buf_brelse_shadow routine doesn't take 'ownership' |
2773 | * of the parent buf_t... it updates state that is protected by |
2774 | * the buf_mtxp, and checks for BL_BUSY to determine whether to |
2775 | * put the buf_t back on a free list. b_shadow_ref is protected |
2776 | * by the lock, and since we have not yet cleared B_BUSY, we need |
2777 | * to check it while holding the lock to insure that one of us |
2778 | * puts this buf_t back on a free list when it is safe to do so |
2779 | */ |
2780 | if (bp->b_shadow_ref == 0) { |
2781 | CLR(bp->b_flags, (B_AGE | B_ASYNC | B_NOCACHE)); |
2782 | bp->b_whichq = whichq; |
2783 | binstailfree(bp, bufq, whichq); |
2784 | } else { |
2785 | /* |
2786 | * there are still cloned buf_t's pointing |
2787 | * at this guy... need to keep it off the |
2788 | * freelists until a buf_brelse is done on |
2789 | * the last clone |
2790 | */ |
2791 | CLR(bp->b_flags, (B_ASYNC | B_NOCACHE)); |
2792 | } |
2793 | } |
2794 | if (needbuffer) { |
2795 | /* |
2796 | * needbuffer is a global |
2797 | * we're currently using buf_mtxp to protect it |
2798 | * delay doing the actual wakeup until after |
2799 | * we drop buf_mtxp |
2800 | */ |
2801 | needbuffer = 0; |
2802 | need_wakeup = 1; |
2803 | } |
2804 | if (ISSET(bp->b_lflags, BL_WANTED)) { |
2805 | /* |
2806 | * delay the actual wakeup until after we |
2807 | * clear BL_BUSY and we've dropped buf_mtxp |
2808 | */ |
2809 | need_bp_wakeup = 1; |
2810 | } |
2811 | /* |
2812 | * Unlock the buffer. |
2813 | */ |
2814 | CLR(bp->b_lflags, (BL_BUSY | BL_WANTED)); |
2815 | buf_busycount--; |
2816 | |
2817 | lck_mtx_unlock(buf_mtxp); |
2818 | |
2819 | if (need_wakeup) { |
2820 | /* |
2821 | * Wake up any processes waiting for any buffer to become free. |
2822 | */ |
2823 | wakeup(&needbuffer); |
2824 | } |
2825 | if (need_bp_wakeup) { |
2826 | /* |
2827 | * Wake up any proceeses waiting for _this_ buffer to become free. |
2828 | */ |
2829 | wakeup(bp); |
2830 | } |
2831 | KERNEL_DEBUG((FSDBG_CODE(DBG_FSRW, 388)) | DBG_FUNC_END, |
2832 | bp, bp->b_datap, bp->b_flags, 0, 0); |
2833 | } |
2834 | |
2835 | /* |
2836 | * Determine if a block is in the cache. |
2837 | * Just look on what would be its hash chain. If it's there, return |
2838 | * a pointer to it, unless it's marked invalid. If it's marked invalid, |
2839 | * we normally don't return the buffer, unless the caller explicitly |
2840 | * wants us to. |
2841 | */ |
2842 | static boolean_t |
2843 | incore(vnode_t vp, daddr64_t blkno) |
2844 | { |
2845 | boolean_t retval; |
2846 | struct bufhashhdr *dp; |
2847 | |
2848 | dp = BUFHASH(vp, blkno); |
2849 | |
2850 | lck_mtx_lock_spin(buf_mtxp); |
2851 | |
2852 | if (incore_locked(vp, blkno, dp)) |
2853 | retval = TRUE; |
2854 | else |
2855 | retval = FALSE; |
2856 | lck_mtx_unlock(buf_mtxp); |
2857 | |
2858 | return (retval); |
2859 | } |
2860 | |
2861 | |
2862 | static buf_t |
2863 | incore_locked(vnode_t vp, daddr64_t blkno, struct bufhashhdr *dp) |
2864 | { |
2865 | struct buf *bp; |
2866 | |
2867 | /* Search hash chain */ |
2868 | for (bp = dp->lh_first; bp != NULL; bp = bp->b_hash.le_next) { |
2869 | if (bp->b_lblkno == blkno && bp->b_vp == vp && |
2870 | !ISSET(bp->b_flags, B_INVAL)) { |
2871 | return (bp); |
2872 | } |
2873 | } |
2874 | return (NULL); |
2875 | } |
2876 | |
2877 | |
2878 | void |
2879 | buf_wait_for_shadow_io(vnode_t vp, daddr64_t blkno) |
2880 | { |
2881 | buf_t bp; |
2882 | struct bufhashhdr *dp; |
2883 | |
2884 | dp = BUFHASH(vp, blkno); |
2885 | |
2886 | lck_mtx_lock_spin(buf_mtxp); |
2887 | |
2888 | for (;;) { |
2889 | if ((bp = incore_locked(vp, blkno, dp)) == NULL) |
2890 | break; |
2891 | |
2892 | if (bp->b_shadow_ref == 0) |
2893 | break; |
2894 | |
2895 | SET(bp->b_lflags, BL_WANTED_REF); |
2896 | |
2897 | (void) msleep(bp, buf_mtxp, PSPIN | (PRIBIO+1), "buf_wait_for_shadow" , NULL); |
2898 | } |
2899 | lck_mtx_unlock(buf_mtxp); |
2900 | } |
2901 | |
2902 | /* XXX FIXME -- Update the comment to reflect the UBC changes (please) -- */ |
2903 | /* |
2904 | * Get a block of requested size that is associated with |
2905 | * a given vnode and block offset. If it is found in the |
2906 | * block cache, mark it as having been found, make it busy |
2907 | * and return it. Otherwise, return an empty block of the |
2908 | * correct size. It is up to the caller to insure that the |
2909 | * cached blocks be of the correct size. |
2910 | */ |
2911 | buf_t |
2912 | buf_getblk(vnode_t vp, daddr64_t blkno, int size, int slpflag, int slptimeo, int operation) |
2913 | { |
2914 | buf_t bp; |
2915 | int err; |
2916 | upl_t upl; |
2917 | upl_page_info_t *pl; |
2918 | kern_return_t kret; |
2919 | int ret_only_valid; |
2920 | struct timespec ts; |
2921 | int upl_flags; |
2922 | struct bufhashhdr *dp; |
2923 | |
2924 | KERNEL_DEBUG((FSDBG_CODE(DBG_FSRW, 386)) | DBG_FUNC_START, |
2925 | (uintptr_t)(blkno * PAGE_SIZE), size, operation, 0, 0); |
2926 | |
2927 | ret_only_valid = operation & BLK_ONLYVALID; |
2928 | operation &= ~BLK_ONLYVALID; |
2929 | dp = BUFHASH(vp, blkno); |
2930 | start: |
2931 | lck_mtx_lock_spin(buf_mtxp); |
2932 | |
2933 | if ((bp = incore_locked(vp, blkno, dp))) { |
2934 | /* |
2935 | * Found in the Buffer Cache |
2936 | */ |
2937 | if (ISSET(bp->b_lflags, BL_BUSY)) { |
2938 | /* |
2939 | * but is busy |
2940 | */ |
2941 | switch (operation) { |
2942 | case BLK_READ: |
2943 | case BLK_WRITE: |
2944 | case BLK_META: |
2945 | SET(bp->b_lflags, BL_WANTED); |
2946 | bufstats.bufs_busyincore++; |
2947 | |
2948 | /* |
2949 | * don't retake the mutex after being awakened... |
2950 | * the time out is in msecs |
2951 | */ |
2952 | ts.tv_sec = (slptimeo/1000); |
2953 | ts.tv_nsec = (slptimeo % 1000) * 10 * NSEC_PER_USEC * 1000; |
2954 | |
2955 | KERNEL_DEBUG((FSDBG_CODE(DBG_FSRW, 396)) | DBG_FUNC_NONE, |
2956 | (uintptr_t)blkno, size, operation, 0, 0); |
2957 | |
2958 | err = msleep(bp, buf_mtxp, slpflag | PDROP | (PRIBIO + 1), "buf_getblk" , &ts); |
2959 | |
2960 | /* |
2961 | * Callers who call with PCATCH or timeout are |
2962 | * willing to deal with the NULL pointer |
2963 | */ |
2964 | if (err && ((slpflag & PCATCH) || ((err == EWOULDBLOCK) && slptimeo))) |
2965 | return (NULL); |
2966 | goto start; |
2967 | /*NOTREACHED*/ |
2968 | |
2969 | default: |
2970 | /* |
2971 | * unknown operation requested |
2972 | */ |
2973 | panic("getblk: paging or unknown operation for incore busy buffer - %x\n" , operation); |
2974 | /*NOTREACHED*/ |
2975 | break; |
2976 | } |
2977 | } else { |
2978 | int clear_bdone; |
2979 | |
2980 | /* |
2981 | * buffer in core and not busy |
2982 | */ |
2983 | SET(bp->b_lflags, BL_BUSY); |
2984 | SET(bp->b_flags, B_CACHE); |
2985 | buf_busycount++; |
2986 | |
2987 | bremfree_locked(bp); |
2988 | bufstats.bufs_incore++; |
2989 | |
2990 | lck_mtx_unlock(buf_mtxp); |
2991 | #ifdef JOE_DEBUG |
2992 | bp->b_owner = current_thread(); |
2993 | bp->b_tag = 1; |
2994 | #endif |
2995 | if ( (bp->b_upl) ) |
2996 | panic("buffer has UPL, but not marked BUSY: %p" , bp); |
2997 | |
2998 | clear_bdone = FALSE; |
2999 | if (!ret_only_valid) { |
3000 | /* |
3001 | * If the number bytes that are valid is going |
3002 | * to increase (even if we end up not doing a |
3003 | * reallocation through allocbuf) we have to read |
3004 | * the new size first. |
3005 | * |
3006 | * This is required in cases where we doing a read |
3007 | * modify write of a already valid data on disk but |
3008 | * in cases where the data on disk beyond (blkno + b_bcount) |
3009 | * is invalid, we may end up doing extra I/O. |
3010 | */ |
3011 | if (operation == BLK_META && bp->b_bcount < size) { |
3012 | /* |
3013 | * Since we are going to read in the whole size first |
3014 | * we first have to ensure that any pending delayed write |
3015 | * is flushed to disk first. |
3016 | */ |
3017 | if (ISSET(bp->b_flags, B_DELWRI)) { |
3018 | CLR(bp->b_flags, B_CACHE); |
3019 | buf_bwrite(bp); |
3020 | goto start; |
3021 | } |
3022 | /* |
3023 | * clear B_DONE before returning from |
3024 | * this function so that the caller can |
3025 | * can issue a read for the new size. |
3026 | */ |
3027 | clear_bdone = TRUE; |
3028 | } |
3029 | |
3030 | if (bp->b_bufsize != size) |
3031 | allocbuf(bp, size); |
3032 | } |
3033 | |
3034 | upl_flags = 0; |
3035 | switch (operation) { |
3036 | case BLK_WRITE: |
3037 | /* |
3038 | * "write" operation: let the UPL subsystem |
3039 | * know that we intend to modify the buffer |
3040 | * cache pages we're gathering. |
3041 | */ |
3042 | upl_flags |= UPL_WILL_MODIFY; |
3043 | case BLK_READ: |
3044 | upl_flags |= UPL_PRECIOUS; |
3045 | if (UBCINFOEXISTS(bp->b_vp) && bp->b_bufsize) { |
3046 | kret = ubc_create_upl_kernel(vp, |
3047 | ubc_blktooff(vp, bp->b_lblkno), |
3048 | bp->b_bufsize, |
3049 | &upl, |
3050 | &pl, |
3051 | upl_flags, |
3052 | VM_KERN_MEMORY_FILE); |
3053 | if (kret != KERN_SUCCESS) |
3054 | panic("Failed to create UPL" ); |
3055 | |
3056 | bp->b_upl = upl; |
3057 | |
3058 | if (upl_valid_page(pl, 0)) { |
3059 | if (upl_dirty_page(pl, 0)) |
3060 | SET(bp->b_flags, B_WASDIRTY); |
3061 | else |
3062 | CLR(bp->b_flags, B_WASDIRTY); |
3063 | } else |
3064 | CLR(bp->b_flags, (B_DONE | B_CACHE | B_WASDIRTY | B_DELWRI)); |
3065 | |
3066 | kret = ubc_upl_map(upl, (vm_offset_t*)&(bp->b_datap)); |
3067 | |
3068 | if (kret != KERN_SUCCESS) |
3069 | panic("getblk: ubc_upl_map() failed with (%d)" , kret); |
3070 | } |
3071 | break; |
3072 | |
3073 | case BLK_META: |
3074 | /* |
3075 | * VM is not involved in IO for the meta data |
3076 | * buffer already has valid data |
3077 | */ |
3078 | break; |
3079 | |
3080 | default: |
3081 | panic("getblk: paging or unknown operation for incore buffer- %d\n" , operation); |
3082 | /*NOTREACHED*/ |
3083 | break; |
3084 | } |
3085 | |
3086 | if (clear_bdone) |
3087 | CLR(bp->b_flags, B_DONE); |
3088 | } |
3089 | } else { /* not incore() */ |
3090 | int queue = BQ_EMPTY; /* Start with no preference */ |
3091 | |
3092 | if (ret_only_valid) { |
3093 | lck_mtx_unlock(buf_mtxp); |
3094 | return (NULL); |
3095 | } |
3096 | if ((vnode_isreg(vp) == 0) || (UBCINFOEXISTS(vp) == 0) /*|| (vnode_issystem(vp) == 1)*/) |
3097 | operation = BLK_META; |
3098 | |
3099 | if ((bp = getnewbuf(slpflag, slptimeo, &queue)) == NULL) |
3100 | goto start; |
3101 | |
3102 | /* |
3103 | * getnewbuf may block for a number of different reasons... |
3104 | * if it does, it's then possible for someone else to |
3105 | * create a buffer for the same block and insert it into |
3106 | * the hash... if we see it incore at this point we dump |
3107 | * the buffer we were working on and start over |
3108 | */ |
3109 | if (incore_locked(vp, blkno, dp)) { |
3110 | SET(bp->b_flags, B_INVAL); |
3111 | binshash(bp, &invalhash); |
3112 | |
3113 | lck_mtx_unlock(buf_mtxp); |
3114 | |
3115 | buf_brelse(bp); |
3116 | goto start; |
3117 | } |
3118 | /* |
3119 | * NOTE: YOU CAN NOT BLOCK UNTIL binshash() HAS BEEN |
3120 | * CALLED! BE CAREFUL. |
3121 | */ |
3122 | |
3123 | /* |
3124 | * mark the buffer as B_META if indicated |
3125 | * so that when buffer is released it will goto META queue |
3126 | */ |
3127 | if (operation == BLK_META) |
3128 | SET(bp->b_flags, B_META); |
3129 | |
3130 | bp->b_blkno = bp->b_lblkno = blkno; |
3131 | bp->b_vp = vp; |
3132 | |
3133 | /* |
3134 | * Insert in the hash so that incore() can find it |
3135 | */ |
3136 | binshash(bp, BUFHASH(vp, blkno)); |
3137 | |
3138 | bgetvp_locked(vp, bp); |
3139 | |
3140 | lck_mtx_unlock(buf_mtxp); |
3141 | |
3142 | allocbuf(bp, size); |
3143 | |
3144 | upl_flags = 0; |
3145 | switch (operation) { |
3146 | case BLK_META: |
3147 | /* |
3148 | * buffer data is invalid... |
3149 | * |
3150 | * I don't want to have to retake buf_mtxp, |
3151 | * so the miss and vmhits counters are done |
3152 | * with Atomic updates... all other counters |
3153 | * in bufstats are protected with either |
3154 | * buf_mtxp or iobuffer_mtxp |
3155 | */ |
3156 | OSAddAtomicLong(1, &bufstats.bufs_miss); |
3157 | break; |
3158 | |
3159 | case BLK_WRITE: |
3160 | /* |
3161 | * "write" operation: let the UPL subsystem know |
3162 | * that we intend to modify the buffer cache pages |
3163 | * we're gathering. |
3164 | */ |
3165 | upl_flags |= UPL_WILL_MODIFY; |
3166 | case BLK_READ: |
3167 | { off_t f_offset; |
3168 | size_t contig_bytes; |
3169 | int bmap_flags; |
3170 | |
3171 | #if DEVELOPMENT || DEBUG |
3172 | /* |
3173 | * Apple implemented file systems use UBC excludively; they should |
3174 | * not call in here." |
3175 | */ |
3176 | const char* excldfs[] = {"hfs" , "afpfs" , "smbfs" , "acfs" , |
3177 | "exfat" , "msdos" , "webdav" , NULL}; |
3178 | |
3179 | for (int i = 0; excldfs[i] != NULL; i++) { |
3180 | if (vp->v_mount && |
3181 | !strcmp(vp->v_mount->mnt_vfsstat.f_fstypename, |
3182 | excldfs[i])) { |
3183 | panic("%s %s calls buf_getblk" , |
3184 | excldfs[i], |
3185 | operation == BLK_READ ? "BLK_READ" : "BLK_WRITE" ); |
3186 | } |
3187 | } |
3188 | #endif |
3189 | |
3190 | if ( (bp->b_upl) ) |
3191 | panic("bp already has UPL: %p" ,bp); |
3192 | |
3193 | f_offset = ubc_blktooff(vp, blkno); |
3194 | |
3195 | upl_flags |= UPL_PRECIOUS; |
3196 | kret = ubc_create_upl_kernel(vp, |
3197 | f_offset, |
3198 | bp->b_bufsize, |
3199 | &upl, |
3200 | &pl, |
3201 | upl_flags, |
3202 | VM_KERN_MEMORY_FILE); |
3203 | |
3204 | if (kret != KERN_SUCCESS) |
3205 | panic("Failed to create UPL" ); |
3206 | #if UPL_DEBUG |
3207 | upl_ubc_alias_set(upl, (uintptr_t) bp, (uintptr_t) 4); |
3208 | #endif /* UPL_DEBUG */ |
3209 | bp->b_upl = upl; |
3210 | |
3211 | if (upl_valid_page(pl, 0)) { |
3212 | |
3213 | if (operation == BLK_READ) |
3214 | bmap_flags = VNODE_READ; |
3215 | else |
3216 | bmap_flags = VNODE_WRITE; |
3217 | |
3218 | SET(bp->b_flags, B_CACHE | B_DONE); |
3219 | |
3220 | OSAddAtomicLong(1, &bufstats.bufs_vmhits); |
3221 | |
3222 | bp->b_validoff = 0; |
3223 | bp->b_dirtyoff = 0; |
3224 | |
3225 | if (upl_dirty_page(pl, 0)) { |
3226 | /* page is dirty */ |
3227 | SET(bp->b_flags, B_WASDIRTY); |
3228 | |
3229 | bp->b_validend = bp->b_bcount; |
3230 | bp->b_dirtyend = bp->b_bcount; |
3231 | } else { |
3232 | /* page is clean */ |
3233 | bp->b_validend = bp->b_bcount; |
3234 | bp->b_dirtyend = 0; |
3235 | } |
3236 | /* |
3237 | * try to recreate the physical block number associated with |
3238 | * this buffer... |
3239 | */ |
3240 | if (VNOP_BLOCKMAP(vp, f_offset, bp->b_bcount, &bp->b_blkno, &contig_bytes, NULL, bmap_flags, NULL)) |
3241 | panic("getblk: VNOP_BLOCKMAP failed" ); |
3242 | /* |
3243 | * if the extent represented by this buffer |
3244 | * is not completely physically contiguous on |
3245 | * disk, than we can't cache the physical mapping |
3246 | * in the buffer header |
3247 | */ |
3248 | if ((long)contig_bytes < bp->b_bcount) |
3249 | bp->b_blkno = bp->b_lblkno; |
3250 | } else { |
3251 | OSAddAtomicLong(1, &bufstats.bufs_miss); |
3252 | } |
3253 | kret = ubc_upl_map(upl, (vm_offset_t *)&(bp->b_datap)); |
3254 | |
3255 | if (kret != KERN_SUCCESS) |
3256 | panic("getblk: ubc_upl_map() failed with (%d)" , kret); |
3257 | break; |
3258 | } // end BLK_READ |
3259 | default: |
3260 | panic("getblk: paging or unknown operation - %x" , operation); |
3261 | /*NOTREACHED*/ |
3262 | break; |
3263 | } // end switch |
3264 | } //end buf_t !incore |
3265 | |
3266 | KERNEL_DEBUG((FSDBG_CODE(DBG_FSRW, 386)) | DBG_FUNC_END, |
3267 | bp, bp->b_datap, bp->b_flags, 3, 0); |
3268 | |
3269 | #ifdef JOE_DEBUG |
3270 | (void) OSBacktrace(&bp->b_stackgetblk[0], 6); |
3271 | #endif |
3272 | return (bp); |
3273 | } |
3274 | |
3275 | /* |
3276 | * Get an empty, disassociated buffer of given size. |
3277 | */ |
3278 | buf_t |
3279 | buf_geteblk(int size) |
3280 | { |
3281 | buf_t bp = NULL; |
3282 | int queue = BQ_EMPTY; |
3283 | |
3284 | do { |
3285 | lck_mtx_lock_spin(buf_mtxp); |
3286 | |
3287 | bp = getnewbuf(0, 0, &queue); |
3288 | } while (bp == NULL); |
3289 | |
3290 | SET(bp->b_flags, (B_META|B_INVAL)); |
3291 | |
3292 | #if DIAGNOSTIC |
3293 | assert(queue == BQ_EMPTY); |
3294 | #endif /* DIAGNOSTIC */ |
3295 | /* XXX need to implement logic to deal with other queues */ |
3296 | |
3297 | binshash(bp, &invalhash); |
3298 | bufstats.bufs_eblk++; |
3299 | |
3300 | lck_mtx_unlock(buf_mtxp); |
3301 | |
3302 | allocbuf(bp, size); |
3303 | |
3304 | return (bp); |
3305 | } |
3306 | |
3307 | uint32_t |
3308 | buf_redundancy_flags(buf_t bp) |
3309 | { |
3310 | return bp->b_redundancy_flags; |
3311 | } |
3312 | |
3313 | void |
3314 | buf_set_redundancy_flags(buf_t bp, uint32_t flags) |
3315 | { |
3316 | SET(bp->b_redundancy_flags, flags); |
3317 | } |
3318 | |
3319 | void |
3320 | buf_clear_redundancy_flags(buf_t bp, uint32_t flags) |
3321 | { |
3322 | CLR(bp->b_redundancy_flags, flags); |
3323 | } |
3324 | |
3325 | |
3326 | |
3327 | static void * |
3328 | recycle_buf_from_pool(int nsize) |
3329 | { |
3330 | buf_t bp; |
3331 | void *ptr = NULL; |
3332 | |
3333 | lck_mtx_lock_spin(buf_mtxp); |
3334 | |
3335 | TAILQ_FOREACH(bp, &bufqueues[BQ_META], b_freelist) { |
3336 | if (ISSET(bp->b_flags, B_DELWRI) || bp->b_bufsize != nsize) |
3337 | continue; |
3338 | ptr = (void *)bp->b_datap; |
3339 | bp->b_bufsize = 0; |
3340 | |
3341 | bcleanbuf(bp, TRUE); |
3342 | break; |
3343 | } |
3344 | lck_mtx_unlock(buf_mtxp); |
3345 | |
3346 | return (ptr); |
3347 | } |
3348 | |
3349 | |
3350 | |
3351 | int zalloc_nopagewait_failed = 0; |
3352 | int recycle_buf_failed = 0; |
3353 | |
3354 | static void * |
3355 | grab_memory_for_meta_buf(int nsize) |
3356 | { |
3357 | zone_t z; |
3358 | void *ptr; |
3359 | boolean_t was_vmpriv; |
3360 | |
3361 | z = getbufzone(nsize); |
3362 | |
3363 | /* |
3364 | * make sure we're NOT priviliged so that |
3365 | * if a vm_page_grab is needed, it won't |
3366 | * block if we're out of free pages... if |
3367 | * it blocks, then we can't honor the |
3368 | * nopagewait request |
3369 | */ |
3370 | was_vmpriv = set_vm_privilege(FALSE); |
3371 | |
3372 | ptr = zalloc_nopagewait(z); |
3373 | |
3374 | if (was_vmpriv == TRUE) |
3375 | set_vm_privilege(TRUE); |
3376 | |
3377 | if (ptr == NULL) { |
3378 | |
3379 | zalloc_nopagewait_failed++; |
3380 | |
3381 | ptr = recycle_buf_from_pool(nsize); |
3382 | |
3383 | if (ptr == NULL) { |
3384 | |
3385 | recycle_buf_failed++; |
3386 | |
3387 | if (was_vmpriv == FALSE) |
3388 | set_vm_privilege(TRUE); |
3389 | |
3390 | ptr = zalloc(z); |
3391 | |
3392 | if (was_vmpriv == FALSE) |
3393 | set_vm_privilege(FALSE); |
3394 | } |
3395 | } |
3396 | return (ptr); |
3397 | } |
3398 | |
3399 | /* |
3400 | * With UBC, there is no need to expand / shrink the file data |
3401 | * buffer. The VM uses the same pages, hence no waste. |
3402 | * All the file data buffers can have one size. |
3403 | * In fact expand / shrink would be an expensive operation. |
3404 | * |
3405 | * Only exception to this is meta-data buffers. Most of the |
3406 | * meta data operations are smaller than PAGE_SIZE. Having the |
3407 | * meta-data buffers grow and shrink as needed, optimizes use |
3408 | * of the kernel wired memory. |
3409 | */ |
3410 | |
3411 | int |
3412 | allocbuf(buf_t bp, int size) |
3413 | { |
3414 | vm_size_t desired_size; |
3415 | |
3416 | desired_size = roundup(size, CLBYTES); |
3417 | |
3418 | if (desired_size < PAGE_SIZE) |
3419 | desired_size = PAGE_SIZE; |
3420 | if (desired_size > MAXBSIZE) |
3421 | panic("allocbuf: buffer larger than MAXBSIZE requested" ); |
3422 | |
3423 | if (ISSET(bp->b_flags, B_META)) { |
3424 | int nsize = roundup(size, MINMETA); |
3425 | |
3426 | if (bp->b_datap) { |
3427 | vm_offset_t elem = (vm_offset_t)bp->b_datap; |
3428 | |
3429 | if (ISSET(bp->b_flags, B_ZALLOC)) { |
3430 | if (bp->b_bufsize < nsize) { |
3431 | zone_t zprev; |
3432 | |
3433 | /* reallocate to a bigger size */ |
3434 | |
3435 | zprev = getbufzone(bp->b_bufsize); |
3436 | if (nsize <= MAXMETA) { |
3437 | desired_size = nsize; |
3438 | |
3439 | /* b_datap not really a ptr */ |
3440 | *(void **)(&bp->b_datap) = grab_memory_for_meta_buf(nsize); |
3441 | } else { |
3442 | bp->b_datap = (uintptr_t)NULL; |
3443 | kmem_alloc_kobject(kernel_map, (vm_offset_t *)&bp->b_datap, desired_size, VM_KERN_MEMORY_FILE); |
3444 | CLR(bp->b_flags, B_ZALLOC); |
3445 | } |
3446 | bcopy((void *)elem, (caddr_t)bp->b_datap, bp->b_bufsize); |
3447 | zfree(zprev, (void *)elem); |
3448 | } else { |
3449 | desired_size = bp->b_bufsize; |
3450 | } |
3451 | |
3452 | } else { |
3453 | if ((vm_size_t)bp->b_bufsize < desired_size) { |
3454 | /* reallocate to a bigger size */ |
3455 | bp->b_datap = (uintptr_t)NULL; |
3456 | kmem_alloc_kobject(kernel_map, (vm_offset_t *)&bp->b_datap, desired_size, VM_KERN_MEMORY_FILE); |
3457 | bcopy((const void *)elem, (caddr_t)bp->b_datap, bp->b_bufsize); |
3458 | kmem_free(kernel_map, elem, bp->b_bufsize); |
3459 | } else { |
3460 | desired_size = bp->b_bufsize; |
3461 | } |
3462 | } |
3463 | } else { |
3464 | /* new allocation */ |
3465 | if (nsize <= MAXMETA) { |
3466 | desired_size = nsize; |
3467 | |
3468 | /* b_datap not really a ptr */ |
3469 | *(void **)(&bp->b_datap) = grab_memory_for_meta_buf(nsize); |
3470 | SET(bp->b_flags, B_ZALLOC); |
3471 | } else |
3472 | kmem_alloc_kobject(kernel_map, (vm_offset_t *)&bp->b_datap, desired_size, VM_KERN_MEMORY_FILE); |
3473 | } |
3474 | |
3475 | if (bp->b_datap == 0) |
3476 | panic("allocbuf: NULL b_datap" ); |
3477 | } |
3478 | bp->b_bufsize = desired_size; |
3479 | bp->b_bcount = size; |
3480 | |
3481 | return (0); |
3482 | } |
3483 | |
3484 | /* |
3485 | * Get a new buffer from one of the free lists. |
3486 | * |
3487 | * Request for a queue is passes in. The queue from which the buffer was taken |
3488 | * from is returned. Out of range queue requests get BQ_EMPTY. Request for |
3489 | * BQUEUE means no preference. Use heuristics in that case. |
3490 | * Heuristics is as follows: |
3491 | * Try BQ_AGE, BQ_LRU, BQ_EMPTY, BQ_META in that order. |
3492 | * If none available block till one is made available. |
3493 | * If buffers available on both BQ_AGE and BQ_LRU, check the timestamps. |
3494 | * Pick the most stale buffer. |
3495 | * If found buffer was marked delayed write, start the async. write |
3496 | * and restart the search. |
3497 | * Initialize the fields and disassociate the buffer from the vnode. |
3498 | * Remove the buffer from the hash. Return the buffer and the queue |
3499 | * on which it was found. |
3500 | * |
3501 | * buf_mtxp is held upon entry |
3502 | * returns with buf_mtxp locked if new buf available |
3503 | * returns with buf_mtxp UNlocked if new buf NOT available |
3504 | */ |
3505 | |
3506 | static buf_t |
3507 | getnewbuf(int slpflag, int slptimeo, int * queue) |
3508 | { |
3509 | buf_t bp; |
3510 | buf_t lru_bp; |
3511 | buf_t age_bp; |
3512 | buf_t meta_bp; |
3513 | int age_time, lru_time, bp_time, meta_time; |
3514 | int req = *queue; /* save it for restarts */ |
3515 | struct timespec ts; |
3516 | |
3517 | start: |
3518 | /* |
3519 | * invalid request gets empty queue |
3520 | */ |
3521 | if ((*queue >= BQUEUES) || (*queue < 0) |
3522 | || (*queue == BQ_LAUNDRY) || (*queue == BQ_LOCKED)) |
3523 | *queue = BQ_EMPTY; |
3524 | |
3525 | |
3526 | if (*queue == BQ_EMPTY && (bp = bufqueues[*queue].tqh_first)) |
3527 | goto found; |
3528 | |
3529 | /* |
3530 | * need to grow number of bufs, add another one rather than recycling |
3531 | */ |
3532 | if (nbuf_headers < max_nbuf_headers) { |
3533 | /* |
3534 | * Increment count now as lock |
3535 | * is dropped for allocation. |
3536 | * That avoids over commits |
3537 | */ |
3538 | nbuf_headers++; |
3539 | goto add_newbufs; |
3540 | } |
3541 | /* Try for the requested queue first */ |
3542 | bp = bufqueues[*queue].tqh_first; |
3543 | if (bp) |
3544 | goto found; |
3545 | |
3546 | /* Unable to use requested queue */ |
3547 | age_bp = bufqueues[BQ_AGE].tqh_first; |
3548 | lru_bp = bufqueues[BQ_LRU].tqh_first; |
3549 | meta_bp = bufqueues[BQ_META].tqh_first; |
3550 | |
3551 | if (!age_bp && !lru_bp && !meta_bp) { |
3552 | /* |
3553 | * Unavailble on AGE or LRU or META queues |
3554 | * Try the empty list first |
3555 | */ |
3556 | bp = bufqueues[BQ_EMPTY].tqh_first; |
3557 | if (bp) { |
3558 | *queue = BQ_EMPTY; |
3559 | goto found; |
3560 | } |
3561 | /* |
3562 | * We have seen is this is hard to trigger. |
3563 | * This is an overcommit of nbufs but needed |
3564 | * in some scenarios with diskiamges |
3565 | */ |
3566 | |
3567 | add_newbufs: |
3568 | lck_mtx_unlock(buf_mtxp); |
3569 | |
3570 | /* Create a new temporary buffer header */ |
3571 | bp = (struct buf *)zalloc(buf_hdr_zone); |
3572 | |
3573 | if (bp) { |
3574 | bufhdrinit(bp); |
3575 | bp->b_whichq = BQ_EMPTY; |
3576 | bp->b_timestamp = buf_timestamp(); |
3577 | BLISTNONE(bp); |
3578 | SET(bp->b_flags, B_HDRALLOC); |
3579 | *queue = BQ_EMPTY; |
3580 | } |
3581 | lck_mtx_lock_spin(buf_mtxp); |
3582 | |
3583 | if (bp) { |
3584 | binshash(bp, &invalhash); |
3585 | binsheadfree(bp, &bufqueues[BQ_EMPTY], BQ_EMPTY); |
3586 | buf_hdr_count++; |
3587 | goto found; |
3588 | } |
3589 | /* subtract already accounted bufcount */ |
3590 | nbuf_headers--; |
3591 | |
3592 | bufstats.bufs_sleeps++; |
3593 | |
3594 | /* wait for a free buffer of any kind */ |
3595 | needbuffer = 1; |
3596 | /* hz value is 100 */ |
3597 | ts.tv_sec = (slptimeo/1000); |
3598 | /* the hz value is 100; which leads to 10ms */ |
3599 | ts.tv_nsec = (slptimeo % 1000) * NSEC_PER_USEC * 1000 * 10; |
3600 | |
3601 | msleep(&needbuffer, buf_mtxp, slpflag | PDROP | (PRIBIO+1), "getnewbuf" , &ts); |
3602 | return (NULL); |
3603 | } |
3604 | |
3605 | /* Buffer available either on AGE or LRU or META */ |
3606 | bp = NULL; |
3607 | *queue = -1; |
3608 | |
3609 | /* Buffer available either on AGE or LRU */ |
3610 | if (!age_bp) { |
3611 | bp = lru_bp; |
3612 | *queue = BQ_LRU; |
3613 | } else if (!lru_bp) { |
3614 | bp = age_bp; |
3615 | *queue = BQ_AGE; |
3616 | } else { /* buffer available on both AGE and LRU */ |
3617 | int t = buf_timestamp(); |
3618 | |
3619 | age_time = t - age_bp->b_timestamp; |
3620 | lru_time = t - lru_bp->b_timestamp; |
3621 | if ((age_time < 0) || (lru_time < 0)) { /* time set backwards */ |
3622 | bp = age_bp; |
3623 | *queue = BQ_AGE; |
3624 | /* |
3625 | * we should probably re-timestamp eveything in the |
3626 | * queues at this point with the current time |
3627 | */ |
3628 | } else { |
3629 | if ((lru_time >= lru_is_stale) && (age_time < age_is_stale)) { |
3630 | bp = lru_bp; |
3631 | *queue = BQ_LRU; |
3632 | } else { |
3633 | bp = age_bp; |
3634 | *queue = BQ_AGE; |
3635 | } |
3636 | } |
3637 | } |
3638 | |
3639 | if (!bp) { /* Neither on AGE nor on LRU */ |
3640 | bp = meta_bp; |
3641 | *queue = BQ_META; |
3642 | } else if (meta_bp) { |
3643 | int t = buf_timestamp(); |
3644 | |
3645 | bp_time = t - bp->b_timestamp; |
3646 | meta_time = t - meta_bp->b_timestamp; |
3647 | |
3648 | if (!(bp_time < 0) && !(meta_time < 0)) { |
3649 | /* time not set backwards */ |
3650 | int bp_is_stale; |
3651 | bp_is_stale = (*queue == BQ_LRU) ? |
3652 | lru_is_stale : age_is_stale; |
3653 | |
3654 | if ((meta_time >= meta_is_stale) && |
3655 | (bp_time < bp_is_stale)) { |
3656 | bp = meta_bp; |
3657 | *queue = BQ_META; |
3658 | } |
3659 | } |
3660 | } |
3661 | found: |
3662 | if (ISSET(bp->b_flags, B_LOCKED) || ISSET(bp->b_lflags, BL_BUSY)) |
3663 | panic("getnewbuf: bp @ %p is LOCKED or BUSY! (flags 0x%x)\n" , bp, bp->b_flags); |
3664 | |
3665 | /* Clean it */ |
3666 | if (bcleanbuf(bp, FALSE)) { |
3667 | /* |
3668 | * moved to the laundry thread, buffer not ready |
3669 | */ |
3670 | *queue = req; |
3671 | goto start; |
3672 | } |
3673 | return (bp); |
3674 | } |
3675 | |
3676 | |
3677 | /* |
3678 | * Clean a buffer. |
3679 | * Returns 0 if buffer is ready to use, |
3680 | * Returns 1 if issued a buf_bawrite() to indicate |
3681 | * that the buffer is not ready. |
3682 | * |
3683 | * buf_mtxp is held upon entry |
3684 | * returns with buf_mtxp locked |
3685 | */ |
3686 | int |
3687 | bcleanbuf(buf_t bp, boolean_t discard) |
3688 | { |
3689 | /* Remove from the queue */ |
3690 | bremfree_locked(bp); |
3691 | |
3692 | #ifdef JOE_DEBUG |
3693 | bp->b_owner = current_thread(); |
3694 | bp->b_tag = 2; |
3695 | #endif |
3696 | /* |
3697 | * If buffer was a delayed write, start the IO by queuing |
3698 | * it on the LAUNDRY queue, and return 1 |
3699 | */ |
3700 | if (ISSET(bp->b_flags, B_DELWRI)) { |
3701 | if (discard) { |
3702 | SET(bp->b_lflags, BL_WANTDEALLOC); |
3703 | } |
3704 | |
3705 | bmovelaundry(bp); |
3706 | |
3707 | lck_mtx_unlock(buf_mtxp); |
3708 | |
3709 | wakeup(&bufqueues[BQ_LAUNDRY]); |
3710 | /* |
3711 | * and give it a chance to run |
3712 | */ |
3713 | (void)thread_block(THREAD_CONTINUE_NULL); |
3714 | |
3715 | lck_mtx_lock_spin(buf_mtxp); |
3716 | |
3717 | return (1); |
3718 | } |
3719 | #ifdef JOE_DEBUG |
3720 | bp->b_owner = current_thread(); |
3721 | bp->b_tag = 8; |
3722 | #endif |
3723 | /* |
3724 | * Buffer is no longer on any free list... we own it |
3725 | */ |
3726 | SET(bp->b_lflags, BL_BUSY); |
3727 | buf_busycount++; |
3728 | |
3729 | bremhash(bp); |
3730 | |
3731 | /* |
3732 | * disassociate us from our vnode, if we had one... |
3733 | */ |
3734 | if (bp->b_vp) |
3735 | brelvp_locked(bp); |
3736 | |
3737 | lck_mtx_unlock(buf_mtxp); |
3738 | |
3739 | BLISTNONE(bp); |
3740 | |
3741 | if (ISSET(bp->b_flags, B_META)) |
3742 | buf_free_meta_store(bp); |
3743 | |
3744 | trace(TR_BRELSE, pack(bp->b_vp, bp->b_bufsize), bp->b_lblkno); |
3745 | |
3746 | buf_release_credentials(bp); |
3747 | |
3748 | /* If discarding, just move to the empty queue */ |
3749 | if (discard) { |
3750 | lck_mtx_lock_spin(buf_mtxp); |
3751 | CLR(bp->b_flags, (B_META | B_ZALLOC | B_DELWRI | B_LOCKED | B_AGE | B_ASYNC | B_NOCACHE | B_FUA)); |
3752 | bp->b_whichq = BQ_EMPTY; |
3753 | binshash(bp, &invalhash); |
3754 | binsheadfree(bp, &bufqueues[BQ_EMPTY], BQ_EMPTY); |
3755 | CLR(bp->b_lflags, BL_BUSY); |
3756 | buf_busycount--; |
3757 | } else { |
3758 | /* Not discarding: clean up and prepare for reuse */ |
3759 | bp->b_bufsize = 0; |
3760 | bp->b_datap = (uintptr_t)NULL; |
3761 | bp->b_upl = (void *)NULL; |
3762 | bp->b_fsprivate = (void *)NULL; |
3763 | /* |
3764 | * preserve the state of whether this buffer |
3765 | * was allocated on the fly or not... |
3766 | * the only other flag that should be set at |
3767 | * this point is BL_BUSY... |
3768 | */ |
3769 | #ifdef JOE_DEBUG |
3770 | bp->b_owner = current_thread(); |
3771 | bp->b_tag = 3; |
3772 | #endif |
3773 | bp->b_lflags = BL_BUSY; |
3774 | bp->b_flags = (bp->b_flags & B_HDRALLOC); |
3775 | bp->b_redundancy_flags = 0; |
3776 | bp->b_dev = NODEV; |
3777 | bp->b_blkno = bp->b_lblkno = 0; |
3778 | bp->b_iodone = NULL; |
3779 | bp->b_error = 0; |
3780 | bp->b_resid = 0; |
3781 | bp->b_bcount = 0; |
3782 | bp->b_dirtyoff = bp->b_dirtyend = 0; |
3783 | bp->b_validoff = bp->b_validend = 0; |
3784 | bzero(&bp->b_attr, sizeof(struct bufattr)); |
3785 | |
3786 | lck_mtx_lock_spin(buf_mtxp); |
3787 | } |
3788 | return (0); |
3789 | } |
3790 | |
3791 | |
3792 | |
3793 | errno_t |
3794 | buf_invalblkno(vnode_t vp, daddr64_t lblkno, int flags) |
3795 | { |
3796 | buf_t bp; |
3797 | errno_t error; |
3798 | struct bufhashhdr *dp; |
3799 | |
3800 | dp = BUFHASH(vp, lblkno); |
3801 | |
3802 | relook: |
3803 | lck_mtx_lock_spin(buf_mtxp); |
3804 | |
3805 | if ((bp = incore_locked(vp, lblkno, dp)) == (struct buf *)0) { |
3806 | lck_mtx_unlock(buf_mtxp); |
3807 | return (0); |
3808 | } |
3809 | if (ISSET(bp->b_lflags, BL_BUSY)) { |
3810 | if ( !ISSET(flags, BUF_WAIT)) { |
3811 | lck_mtx_unlock(buf_mtxp); |
3812 | return (EBUSY); |
3813 | } |
3814 | SET(bp->b_lflags, BL_WANTED); |
3815 | |
3816 | error = msleep((caddr_t)bp, buf_mtxp, PDROP | (PRIBIO + 1), "buf_invalblkno" , NULL); |
3817 | |
3818 | if (error) { |
3819 | return (error); |
3820 | } |
3821 | goto relook; |
3822 | } |
3823 | bremfree_locked(bp); |
3824 | SET(bp->b_lflags, BL_BUSY); |
3825 | SET(bp->b_flags, B_INVAL); |
3826 | buf_busycount++; |
3827 | #ifdef JOE_DEBUG |
3828 | bp->b_owner = current_thread(); |
3829 | bp->b_tag = 4; |
3830 | #endif |
3831 | lck_mtx_unlock(buf_mtxp); |
3832 | buf_brelse(bp); |
3833 | |
3834 | return (0); |
3835 | } |
3836 | |
3837 | |
3838 | void |
3839 | buf_drop(buf_t bp) |
3840 | { |
3841 | int need_wakeup = 0; |
3842 | |
3843 | lck_mtx_lock_spin(buf_mtxp); |
3844 | |
3845 | if (ISSET(bp->b_lflags, BL_WANTED)) { |
3846 | /* |
3847 | * delay the actual wakeup until after we |
3848 | * clear BL_BUSY and we've dropped buf_mtxp |
3849 | */ |
3850 | need_wakeup = 1; |
3851 | } |
3852 | #ifdef JOE_DEBUG |
3853 | bp->b_owner = current_thread(); |
3854 | bp->b_tag = 9; |
3855 | #endif |
3856 | /* |
3857 | * Unlock the buffer. |
3858 | */ |
3859 | CLR(bp->b_lflags, (BL_BUSY | BL_WANTED)); |
3860 | buf_busycount--; |
3861 | |
3862 | lck_mtx_unlock(buf_mtxp); |
3863 | |
3864 | if (need_wakeup) { |
3865 | /* |
3866 | * Wake up any proceeses waiting for _this_ buffer to become free. |
3867 | */ |
3868 | wakeup(bp); |
3869 | } |
3870 | } |
3871 | |
3872 | |
3873 | errno_t |
3874 | buf_acquire(buf_t bp, int flags, int slpflag, int slptimeo) { |
3875 | errno_t error; |
3876 | |
3877 | lck_mtx_lock_spin(buf_mtxp); |
3878 | |
3879 | error = buf_acquire_locked(bp, flags, slpflag, slptimeo); |
3880 | |
3881 | lck_mtx_unlock(buf_mtxp); |
3882 | |
3883 | return (error); |
3884 | } |
3885 | |
3886 | |
3887 | static errno_t |
3888 | buf_acquire_locked(buf_t bp, int flags, int slpflag, int slptimeo) |
3889 | { |
3890 | errno_t error; |
3891 | struct timespec ts; |
3892 | |
3893 | if (ISSET(bp->b_flags, B_LOCKED)) { |
3894 | if ((flags & BAC_SKIP_LOCKED)) |
3895 | return (EDEADLK); |
3896 | } else { |
3897 | if ((flags & BAC_SKIP_NONLOCKED)) |
3898 | return (EDEADLK); |
3899 | } |
3900 | if (ISSET(bp->b_lflags, BL_BUSY)) { |
3901 | /* |
3902 | * since the lck_mtx_lock may block, the buffer |
3903 | * may become BUSY, so we need to |
3904 | * recheck for a NOWAIT request |
3905 | */ |
3906 | if (flags & BAC_NOWAIT) |
3907 | return (EBUSY); |
3908 | SET(bp->b_lflags, BL_WANTED); |
3909 | |
3910 | /* the hz value is 100; which leads to 10ms */ |
3911 | ts.tv_sec = (slptimeo/100); |
3912 | ts.tv_nsec = (slptimeo % 100) * 10 * NSEC_PER_USEC * 1000; |
3913 | error = msleep((caddr_t)bp, buf_mtxp, slpflag | (PRIBIO + 1), "buf_acquire" , &ts); |
3914 | |
3915 | if (error) |
3916 | return (error); |
3917 | return (EAGAIN); |
3918 | } |
3919 | if (flags & BAC_REMOVE) |
3920 | bremfree_locked(bp); |
3921 | SET(bp->b_lflags, BL_BUSY); |
3922 | buf_busycount++; |
3923 | |
3924 | #ifdef JOE_DEBUG |
3925 | bp->b_owner = current_thread(); |
3926 | bp->b_tag = 5; |
3927 | #endif |
3928 | return (0); |
3929 | } |
3930 | |
3931 | |
3932 | /* |
3933 | * Wait for operations on the buffer to complete. |
3934 | * When they do, extract and return the I/O's error value. |
3935 | */ |
3936 | errno_t |
3937 | buf_biowait(buf_t bp) |
3938 | { |
3939 | while (!ISSET(bp->b_flags, B_DONE)) { |
3940 | |
3941 | lck_mtx_lock_spin(buf_mtxp); |
3942 | |
3943 | if (!ISSET(bp->b_flags, B_DONE)) { |
3944 | DTRACE_IO1(wait__start, buf_t, bp); |
3945 | (void) msleep(bp, buf_mtxp, PDROP | (PRIBIO+1), "buf_biowait" , NULL); |
3946 | DTRACE_IO1(wait__done, buf_t, bp); |
3947 | } else |
3948 | lck_mtx_unlock(buf_mtxp); |
3949 | } |
3950 | /* check for interruption of I/O (e.g. via NFS), then errors. */ |
3951 | if (ISSET(bp->b_flags, B_EINTR)) { |
3952 | CLR(bp->b_flags, B_EINTR); |
3953 | return (EINTR); |
3954 | } else if (ISSET(bp->b_flags, B_ERROR)) |
3955 | return (bp->b_error ? bp->b_error : EIO); |
3956 | else |
3957 | return (0); |
3958 | } |
3959 | |
3960 | |
3961 | /* |
3962 | * Mark I/O complete on a buffer. |
3963 | * |
3964 | * If a callback has been requested, e.g. the pageout |
3965 | * daemon, do so. Otherwise, awaken waiting processes. |
3966 | * |
3967 | * [ Leffler, et al., says on p.247: |
3968 | * "This routine wakes up the blocked process, frees the buffer |
3969 | * for an asynchronous write, or, for a request by the pagedaemon |
3970 | * process, invokes a procedure specified in the buffer structure" ] |
3971 | * |
3972 | * In real life, the pagedaemon (or other system processes) wants |
3973 | * to do async stuff to, and doesn't want the buffer buf_brelse()'d. |
3974 | * (for swap pager, that puts swap buffers on the free lists (!!!), |
3975 | * for the vn device, that puts malloc'd buffers on the free lists!) |
3976 | */ |
3977 | |
3978 | void |
3979 | buf_biodone(buf_t bp) |
3980 | { |
3981 | mount_t mp; |
3982 | struct bufattr *bap; |
3983 | struct timeval real_elapsed; |
3984 | uint64_t real_elapsed_usec = 0; |
3985 | |
3986 | KERNEL_DEBUG((FSDBG_CODE(DBG_FSRW, 387)) | DBG_FUNC_START, |
3987 | bp, bp->b_datap, bp->b_flags, 0, 0); |
3988 | |
3989 | if (ISSET(bp->b_flags, B_DONE)) |
3990 | panic("biodone already" ); |
3991 | |
3992 | bap = &bp->b_attr; |
3993 | |
3994 | if (bp->b_vp && bp->b_vp->v_mount) { |
3995 | mp = bp->b_vp->v_mount; |
3996 | } else { |
3997 | mp = NULL; |
3998 | } |
3999 | |
4000 | if (ISSET(bp->b_flags, B_ERROR)) { |
4001 | if (mp && (MNT_ROOTFS & mp->mnt_flag)) { |
4002 | dk_error_description_t desc; |
4003 | bzero(&desc, sizeof(desc)); |
4004 | desc.description = panic_disk_error_description; |
4005 | desc.description_size = panic_disk_error_description_size; |
4006 | VNOP_IOCTL(mp->mnt_devvp, DKIOCGETERRORDESCRIPTION, (caddr_t)&desc, 0, vfs_context_kernel()); |
4007 | } |
4008 | } |
4009 | |
4010 | if (mp && (bp->b_flags & B_READ) == 0) { |
4011 | update_last_io_time(mp); |
4012 | INCR_PENDING_IO(-(pending_io_t)buf_count(bp), mp->mnt_pending_write_size); |
4013 | } else if (mp) { |
4014 | INCR_PENDING_IO(-(pending_io_t)buf_count(bp), mp->mnt_pending_read_size); |
4015 | } |
4016 | |
4017 | throttle_info_end_io(bp); |
4018 | |
4019 | if (kdebug_enable) { |
4020 | int code = DKIO_DONE; |
4021 | int io_tier = GET_BUFATTR_IO_TIER(bap); |
4022 | |
4023 | if (bp->b_flags & B_READ) |
4024 | code |= DKIO_READ; |
4025 | if (bp->b_flags & B_ASYNC) |
4026 | code |= DKIO_ASYNC; |
4027 | |
4028 | if (bp->b_flags & B_META) |
4029 | code |= DKIO_META; |
4030 | else if (bp->b_flags & B_PAGEIO) |
4031 | code |= DKIO_PAGING; |
4032 | |
4033 | if (io_tier != 0) |
4034 | code |= DKIO_THROTTLE; |
4035 | |
4036 | code |= ((io_tier << DKIO_TIER_SHIFT) & DKIO_TIER_MASK); |
4037 | |
4038 | if (bp->b_flags & B_PASSIVE) |
4039 | code |= DKIO_PASSIVE; |
4040 | |
4041 | if (bap->ba_flags & BA_NOCACHE) |
4042 | code |= DKIO_NOCACHE; |
4043 | |
4044 | if (bap->ba_flags & BA_IO_TIER_UPGRADE) { |
4045 | code |= DKIO_TIER_UPGRADE; |
4046 | } |
4047 | |
4048 | KDBG_RELEASE_NOPROCFILT(FSDBG_CODE(DBG_DKRW, code), |
4049 | buf_kernel_addrperm_addr(bp), |
4050 | (uintptr_t)VM_KERNEL_ADDRPERM(bp->b_vp), bp->b_resid, |
4051 | bp->b_error); |
4052 | } |
4053 | |
4054 | microuptime(&real_elapsed); |
4055 | timevalsub(&real_elapsed, &bp->b_timestamp_tv); |
4056 | real_elapsed_usec = real_elapsed.tv_sec * USEC_PER_SEC + real_elapsed.tv_usec; |
4057 | disk_conditioner_delay(bp, 1, bp->b_bcount, real_elapsed_usec); |
4058 | |
4059 | /* |
4060 | * I/O was done, so don't believe |
4061 | * the DIRTY state from VM anymore... |
4062 | * and we need to reset the THROTTLED/PASSIVE |
4063 | * indicators |
4064 | */ |
4065 | CLR(bp->b_flags, (B_WASDIRTY | B_PASSIVE)); |
4066 | CLR(bap->ba_flags, (BA_META | BA_NOCACHE | BA_DELAYIDLESLEEP | BA_IO_TIER_UPGRADE)); |
4067 | |
4068 | SET_BUFATTR_IO_TIER(bap, 0); |
4069 | |
4070 | DTRACE_IO1(done, buf_t, bp); |
4071 | |
4072 | if (!ISSET(bp->b_flags, B_READ) && !ISSET(bp->b_flags, B_RAW)) |
4073 | /* |
4074 | * wake up any writer's blocked |
4075 | * on throttle or waiting for I/O |
4076 | * to drain |
4077 | */ |
4078 | vnode_writedone(bp->b_vp); |
4079 | |
4080 | if (ISSET(bp->b_flags, (B_CALL | B_FILTER))) { /* if necessary, call out */ |
4081 | void (*iodone_func)(struct buf *, void *) = bp->b_iodone; |
4082 | void *arg = bp->b_transaction; |
4083 | int callout = ISSET(bp->b_flags, B_CALL); |
4084 | |
4085 | if (iodone_func == NULL) |
4086 | panic("biodone: bp @ %p has NULL b_iodone!\n" , bp); |
4087 | |
4088 | CLR(bp->b_flags, (B_CALL | B_FILTER)); /* filters and callouts are one-shot */ |
4089 | bp->b_iodone = NULL; |
4090 | bp->b_transaction = NULL; |
4091 | |
4092 | if (callout) |
4093 | SET(bp->b_flags, B_DONE); /* note that it's done */ |
4094 | |
4095 | (*iodone_func)(bp, arg); |
4096 | |
4097 | if (callout) { |
4098 | /* |
4099 | * assumes that the callback function takes |
4100 | * ownership of the bp and deals with releasing it if necessary |
4101 | */ |
4102 | goto biodone_done; |
4103 | } |
4104 | /* |
4105 | * in this case the call back function is acting |
4106 | * strictly as a filter... it does not take |
4107 | * ownership of the bp and is expecting us |
4108 | * to finish cleaning up... this is currently used |
4109 | * by the HFS journaling code |
4110 | */ |
4111 | } |
4112 | if (ISSET(bp->b_flags, B_ASYNC)) { /* if async, release it */ |
4113 | SET(bp->b_flags, B_DONE); /* note that it's done */ |
4114 | |
4115 | buf_brelse(bp); |
4116 | } else { /* or just wakeup the buffer */ |
4117 | /* |
4118 | * by taking the mutex, we serialize |
4119 | * the buf owner calling buf_biowait so that we'll |
4120 | * only see him in one of 2 states... |
4121 | * state 1: B_DONE wasn't set and he's |
4122 | * blocked in msleep |
4123 | * state 2: he's blocked trying to take the |
4124 | * mutex before looking at B_DONE |
4125 | * BL_WANTED is cleared in case anyone else |
4126 | * is blocked waiting for the buffer... note |
4127 | * that we haven't cleared B_BUSY yet, so if |
4128 | * they do get to run, their going to re-set |
4129 | * BL_WANTED and go back to sleep |
4130 | */ |
4131 | lck_mtx_lock_spin(buf_mtxp); |
4132 | |
4133 | CLR(bp->b_lflags, BL_WANTED); |
4134 | SET(bp->b_flags, B_DONE); /* note that it's done */ |
4135 | |
4136 | lck_mtx_unlock(buf_mtxp); |
4137 | |
4138 | wakeup(bp); |
4139 | } |
4140 | biodone_done: |
4141 | KERNEL_DEBUG((FSDBG_CODE(DBG_FSRW, 387)) | DBG_FUNC_END, |
4142 | (uintptr_t)bp, (uintptr_t)bp->b_datap, bp->b_flags, 0, 0); |
4143 | } |
4144 | |
4145 | /* |
4146 | * Obfuscate buf pointers. |
4147 | */ |
4148 | vm_offset_t |
4149 | buf_kernel_addrperm_addr(void * addr) |
4150 | { |
4151 | if ((vm_offset_t)addr == 0) |
4152 | return 0; |
4153 | else |
4154 | return ((vm_offset_t)addr + buf_kernel_addrperm); |
4155 | } |
4156 | |
4157 | /* |
4158 | * Return a count of buffers on the "locked" queue. |
4159 | */ |
4160 | int |
4161 | count_lock_queue(void) |
4162 | { |
4163 | buf_t bp; |
4164 | int n = 0; |
4165 | |
4166 | lck_mtx_lock_spin(buf_mtxp); |
4167 | |
4168 | for (bp = bufqueues[BQ_LOCKED].tqh_first; bp; |
4169 | bp = bp->b_freelist.tqe_next) |
4170 | n++; |
4171 | lck_mtx_unlock(buf_mtxp); |
4172 | |
4173 | return (n); |
4174 | } |
4175 | |
4176 | /* |
4177 | * Return a count of 'busy' buffers. Used at the time of shutdown. |
4178 | * note: This is also called from the mach side in debug context in kdp.c |
4179 | */ |
4180 | int |
4181 | count_busy_buffers(void) |
4182 | { |
4183 | return buf_busycount + bufstats.bufs_iobufinuse; |
4184 | } |
4185 | |
4186 | #if DIAGNOSTIC |
4187 | /* |
4188 | * Print out statistics on the current allocation of the buffer pool. |
4189 | * Can be enabled to print out on every ``sync'' by setting "syncprt" |
4190 | * in vfs_syscalls.c using sysctl. |
4191 | */ |
4192 | void |
4193 | vfs_bufstats() |
4194 | { |
4195 | int i, j, count; |
4196 | struct buf *bp; |
4197 | struct bqueues *dp; |
4198 | int counts[MAXBSIZE/CLBYTES+1]; |
4199 | static char *bname[BQUEUES] = |
4200 | { "LOCKED" , "LRU" , "AGE" , "EMPTY" , "META" , "LAUNDRY" }; |
4201 | |
4202 | for (dp = bufqueues, i = 0; dp < &bufqueues[BQUEUES]; dp++, i++) { |
4203 | count = 0; |
4204 | for (j = 0; j <= MAXBSIZE/CLBYTES; j++) |
4205 | counts[j] = 0; |
4206 | |
4207 | lck_mtx_lock(buf_mtxp); |
4208 | |
4209 | for (bp = dp->tqh_first; bp; bp = bp->b_freelist.tqe_next) { |
4210 | counts[bp->b_bufsize/CLBYTES]++; |
4211 | count++; |
4212 | } |
4213 | lck_mtx_unlock(buf_mtxp); |
4214 | |
4215 | printf("%s: total-%d" , bname[i], count); |
4216 | for (j = 0; j <= MAXBSIZE/CLBYTES; j++) |
4217 | if (counts[j] != 0) |
4218 | printf(", %d-%d" , j * CLBYTES, counts[j]); |
4219 | printf("\n" ); |
4220 | } |
4221 | } |
4222 | #endif /* DIAGNOSTIC */ |
4223 | |
4224 | #define NRESERVEDIOBUFS 128 |
4225 | |
4226 | #define MNT_VIRTUALDEV_MAX_IOBUFS 16 |
4227 | #define VIRTUALDEV_MAX_IOBUFS ((40*niobuf_headers)/100) |
4228 | |
4229 | buf_t |
4230 | alloc_io_buf(vnode_t vp, int priv) |
4231 | { |
4232 | buf_t bp; |
4233 | mount_t mp = NULL; |
4234 | int alloc_for_virtualdev = FALSE; |
4235 | |
4236 | lck_mtx_lock_spin(iobuffer_mtxp); |
4237 | |
4238 | /* |
4239 | * We subject iobuf requests for diskimages to additional restrictions. |
4240 | * |
4241 | * a) A single diskimage mount cannot use up more than |
4242 | * MNT_VIRTUALDEV_MAX_IOBUFS. However,vm privileged (pageout) requests |
4243 | * are not subject to this restriction. |
4244 | * b) iobuf headers used by all diskimage headers by all mount |
4245 | * points cannot exceed VIRTUALDEV_MAX_IOBUFS. |
4246 | */ |
4247 | if (vp && ((mp = vp->v_mount)) && mp != dead_mountp && |
4248 | mp->mnt_kern_flag & MNTK_VIRTUALDEV) { |
4249 | alloc_for_virtualdev = TRUE; |
4250 | while ((!priv && mp->mnt_iobufinuse > MNT_VIRTUALDEV_MAX_IOBUFS) || |
4251 | bufstats.bufs_iobufinuse_vdev > VIRTUALDEV_MAX_IOBUFS) { |
4252 | bufstats.bufs_iobufsleeps++; |
4253 | |
4254 | need_iobuffer = 1; |
4255 | (void)msleep(&need_iobuffer, iobuffer_mtxp, |
4256 | PSPIN | (PRIBIO+1), (const char *)"alloc_io_buf (1)" , |
4257 | NULL); |
4258 | } |
4259 | } |
4260 | |
4261 | while (((niobuf_headers - NRESERVEDIOBUFS < bufstats.bufs_iobufinuse) && !priv) || |
4262 | (bp = iobufqueue.tqh_first) == NULL) { |
4263 | bufstats.bufs_iobufsleeps++; |
4264 | |
4265 | need_iobuffer = 1; |
4266 | (void)msleep(&need_iobuffer, iobuffer_mtxp, PSPIN | (PRIBIO+1), |
4267 | (const char *)"alloc_io_buf (2)" , NULL); |
4268 | } |
4269 | TAILQ_REMOVE(&iobufqueue, bp, b_freelist); |
4270 | |
4271 | bufstats.bufs_iobufinuse++; |
4272 | if (bufstats.bufs_iobufinuse > bufstats.bufs_iobufmax) |
4273 | bufstats.bufs_iobufmax = bufstats.bufs_iobufinuse; |
4274 | |
4275 | if (alloc_for_virtualdev) { |
4276 | mp->mnt_iobufinuse++; |
4277 | bufstats.bufs_iobufinuse_vdev++; |
4278 | } |
4279 | |
4280 | lck_mtx_unlock(iobuffer_mtxp); |
4281 | |
4282 | /* |
4283 | * initialize various fields |
4284 | * we don't need to hold the mutex since the buffer |
4285 | * is now private... the vp should have a reference |
4286 | * on it and is not protected by this mutex in any event |
4287 | */ |
4288 | bp->b_timestamp = 0; |
4289 | bp->b_proc = NULL; |
4290 | |
4291 | bp->b_datap = 0; |
4292 | bp->b_flags = 0; |
4293 | bp->b_lflags = BL_BUSY | BL_IOBUF; |
4294 | if (alloc_for_virtualdev) |
4295 | bp->b_lflags |= BL_IOBUF_VDEV; |
4296 | bp->b_redundancy_flags = 0; |
4297 | bp->b_blkno = bp->b_lblkno = 0; |
4298 | #ifdef JOE_DEBUG |
4299 | bp->b_owner = current_thread(); |
4300 | bp->b_tag = 6; |
4301 | #endif |
4302 | bp->b_iodone = NULL; |
4303 | bp->b_error = 0; |
4304 | bp->b_resid = 0; |
4305 | bp->b_bcount = 0; |
4306 | bp->b_bufsize = 0; |
4307 | bp->b_upl = NULL; |
4308 | bp->b_fsprivate = (void *)NULL; |
4309 | bp->b_vp = vp; |
4310 | bzero(&bp->b_attr, sizeof(struct bufattr)); |
4311 | |
4312 | if (vp && (vp->v_type == VBLK || vp->v_type == VCHR)) |
4313 | bp->b_dev = vp->v_rdev; |
4314 | else |
4315 | bp->b_dev = NODEV; |
4316 | |
4317 | return (bp); |
4318 | } |
4319 | |
4320 | |
4321 | void |
4322 | free_io_buf(buf_t bp) |
4323 | { |
4324 | int need_wakeup = 0; |
4325 | int free_for_virtualdev = FALSE; |
4326 | mount_t mp = NULL; |
4327 | |
4328 | /* Was this iobuf for a diskimage ? */ |
4329 | if (bp->b_lflags & BL_IOBUF_VDEV) { |
4330 | free_for_virtualdev = TRUE; |
4331 | if (bp->b_vp) |
4332 | mp = bp->b_vp->v_mount; |
4333 | } |
4334 | |
4335 | /* |
4336 | * put buffer back on the head of the iobufqueue |
4337 | */ |
4338 | bp->b_vp = NULL; |
4339 | bp->b_flags = B_INVAL; |
4340 | |
4341 | /* Zero out the bufattr and its flags before relinquishing this iobuf */ |
4342 | bzero (&bp->b_attr, sizeof(struct bufattr)); |
4343 | |
4344 | lck_mtx_lock_spin(iobuffer_mtxp); |
4345 | |
4346 | binsheadfree(bp, &iobufqueue, -1); |
4347 | |
4348 | if (need_iobuffer) { |
4349 | /* |
4350 | * Wake up any processes waiting because they need an io buffer |
4351 | * |
4352 | * do the wakeup after we drop the mutex... it's possible that the |
4353 | * wakeup will be superfluous if need_iobuffer gets set again and |
4354 | * another thread runs this path, but it's highly unlikely, doesn't |
4355 | * hurt, and it means we don't hold up I/O progress if the wakeup blocks |
4356 | * trying to grab a task related lock... |
4357 | */ |
4358 | need_iobuffer = 0; |
4359 | need_wakeup = 1; |
4360 | } |
4361 | if (bufstats.bufs_iobufinuse <= 0) |
4362 | panic("free_io_buf: bp(%p) - bufstats.bufs_iobufinuse < 0" , bp); |
4363 | |
4364 | bufstats.bufs_iobufinuse--; |
4365 | |
4366 | if (free_for_virtualdev) { |
4367 | bufstats.bufs_iobufinuse_vdev--; |
4368 | if (mp && mp != dead_mountp) |
4369 | mp->mnt_iobufinuse--; |
4370 | } |
4371 | |
4372 | lck_mtx_unlock(iobuffer_mtxp); |
4373 | |
4374 | if (need_wakeup) |
4375 | wakeup(&need_iobuffer); |
4376 | } |
4377 | |
4378 | |
4379 | void |
4380 | buf_list_lock(void) |
4381 | { |
4382 | lck_mtx_lock_spin(buf_mtxp); |
4383 | } |
4384 | |
4385 | void |
4386 | buf_list_unlock(void) |
4387 | { |
4388 | lck_mtx_unlock(buf_mtxp); |
4389 | } |
4390 | |
4391 | /* |
4392 | * If getnewbuf() calls bcleanbuf() on the same thread |
4393 | * there is a potential for stack overrun and deadlocks. |
4394 | * So we always handoff the work to a worker thread for completion |
4395 | */ |
4396 | |
4397 | |
4398 | static void |
4399 | bcleanbuf_thread_init(void) |
4400 | { |
4401 | thread_t thread = THREAD_NULL; |
4402 | |
4403 | /* create worker thread */ |
4404 | kernel_thread_start((thread_continue_t)bcleanbuf_thread, NULL, &thread); |
4405 | thread_deallocate(thread); |
4406 | } |
4407 | |
4408 | typedef int (*bcleanbufcontinuation)(int); |
4409 | |
4410 | __attribute__((noreturn)) |
4411 | static void |
4412 | bcleanbuf_thread(void) |
4413 | { |
4414 | struct buf *bp; |
4415 | int error = 0; |
4416 | int loopcnt = 0; |
4417 | |
4418 | for (;;) { |
4419 | lck_mtx_lock_spin(buf_mtxp); |
4420 | |
4421 | while ( (bp = TAILQ_FIRST(&bufqueues[BQ_LAUNDRY])) == NULL) { |
4422 | (void)msleep0(&bufqueues[BQ_LAUNDRY], buf_mtxp, PRIBIO|PDROP, "blaundry" , 0, (bcleanbufcontinuation)bcleanbuf_thread); |
4423 | } |
4424 | |
4425 | /* |
4426 | * Remove from the queue |
4427 | */ |
4428 | bremfree_locked(bp); |
4429 | |
4430 | /* |
4431 | * Buffer is no longer on any free list |
4432 | */ |
4433 | SET(bp->b_lflags, BL_BUSY); |
4434 | buf_busycount++; |
4435 | |
4436 | #ifdef JOE_DEBUG |
4437 | bp->b_owner = current_thread(); |
4438 | bp->b_tag = 10; |
4439 | #endif |
4440 | |
4441 | lck_mtx_unlock(buf_mtxp); |
4442 | /* |
4443 | * do the IO |
4444 | */ |
4445 | error = bawrite_internal(bp, 0); |
4446 | |
4447 | if (error) { |
4448 | bp->b_whichq = BQ_LAUNDRY; |
4449 | bp->b_timestamp = buf_timestamp(); |
4450 | |
4451 | lck_mtx_lock_spin(buf_mtxp); |
4452 | |
4453 | binstailfree(bp, &bufqueues[BQ_LAUNDRY], BQ_LAUNDRY); |
4454 | blaundrycnt++; |
4455 | |
4456 | /* we never leave a busy page on the laundry queue */ |
4457 | CLR(bp->b_lflags, BL_BUSY); |
4458 | buf_busycount--; |
4459 | #ifdef JOE_DEBUG |
4460 | bp->b_owner = current_thread(); |
4461 | bp->b_tag = 11; |
4462 | #endif |
4463 | |
4464 | lck_mtx_unlock(buf_mtxp); |
4465 | |
4466 | if (loopcnt > MAXLAUNDRY) { |
4467 | /* |
4468 | * bawrite_internal() can return errors if we're throttled. If we've |
4469 | * done several I/Os and failed, give the system some time to unthrottle |
4470 | * the vnode |
4471 | */ |
4472 | (void)tsleep((void *)&bufqueues[BQ_LAUNDRY], PRIBIO, "blaundry" , 1); |
4473 | loopcnt = 0; |
4474 | } else { |
4475 | /* give other threads a chance to run */ |
4476 | (void)thread_block(THREAD_CONTINUE_NULL); |
4477 | loopcnt++; |
4478 | } |
4479 | } |
4480 | } |
4481 | } |
4482 | |
4483 | |
4484 | static int |
4485 | brecover_data(buf_t bp) |
4486 | { |
4487 | int upl_offset; |
4488 | upl_t upl; |
4489 | upl_page_info_t *pl; |
4490 | kern_return_t kret; |
4491 | vnode_t vp = bp->b_vp; |
4492 | int upl_flags; |
4493 | |
4494 | |
4495 | if ( !UBCINFOEXISTS(vp) || bp->b_bufsize == 0) |
4496 | goto dump_buffer; |
4497 | |
4498 | upl_flags = UPL_PRECIOUS; |
4499 | if (! (buf_flags(bp) & B_READ)) { |
4500 | /* |
4501 | * "write" operation: let the UPL subsystem know |
4502 | * that we intend to modify the buffer cache pages we're |
4503 | * gathering. |
4504 | */ |
4505 | upl_flags |= UPL_WILL_MODIFY; |
4506 | } |
4507 | |
4508 | kret = ubc_create_upl_kernel(vp, |
4509 | ubc_blktooff(vp, bp->b_lblkno), |
4510 | bp->b_bufsize, |
4511 | &upl, |
4512 | &pl, |
4513 | upl_flags, |
4514 | VM_KERN_MEMORY_FILE); |
4515 | if (kret != KERN_SUCCESS) |
4516 | panic("Failed to create UPL" ); |
4517 | |
4518 | for (upl_offset = 0; upl_offset < bp->b_bufsize; upl_offset += PAGE_SIZE) { |
4519 | |
4520 | if (!upl_valid_page(pl, upl_offset / PAGE_SIZE) || !upl_dirty_page(pl, upl_offset / PAGE_SIZE)) { |
4521 | ubc_upl_abort(upl, 0); |
4522 | goto dump_buffer; |
4523 | } |
4524 | } |
4525 | bp->b_upl = upl; |
4526 | |
4527 | kret = ubc_upl_map(upl, (vm_offset_t *)&(bp->b_datap)); |
4528 | |
4529 | if (kret != KERN_SUCCESS) |
4530 | panic("getblk: ubc_upl_map() failed with (%d)" , kret); |
4531 | return (1); |
4532 | |
4533 | dump_buffer: |
4534 | bp->b_bufsize = 0; |
4535 | SET(bp->b_flags, B_INVAL); |
4536 | buf_brelse(bp); |
4537 | |
4538 | return(0); |
4539 | } |
4540 | |
4541 | int |
4542 | fs_buffer_cache_gc_register(void (* callout)(int, void *), void *context) |
4543 | { |
4544 | lck_mtx_lock(buf_gc_callout); |
4545 | for (int i = 0; i < FS_BUFFER_CACHE_GC_CALLOUTS_MAX_SIZE; i++) { |
4546 | if (fs_callouts[i].callout == NULL) { |
4547 | fs_callouts[i].callout = callout; |
4548 | fs_callouts[i].context = context; |
4549 | lck_mtx_unlock(buf_gc_callout); |
4550 | return 0; |
4551 | } |
4552 | } |
4553 | |
4554 | lck_mtx_unlock(buf_gc_callout); |
4555 | return ENOMEM; |
4556 | } |
4557 | |
4558 | int |
4559 | fs_buffer_cache_gc_unregister(void (* callout)(int, void *), void *context) |
4560 | { |
4561 | lck_mtx_lock(buf_gc_callout); |
4562 | for (int i = 0; i < FS_BUFFER_CACHE_GC_CALLOUTS_MAX_SIZE; i++) { |
4563 | if (fs_callouts[i].callout == callout && |
4564 | fs_callouts[i].context == context) { |
4565 | fs_callouts[i].callout = NULL; |
4566 | fs_callouts[i].context = NULL; |
4567 | } |
4568 | } |
4569 | lck_mtx_unlock(buf_gc_callout); |
4570 | return 0; |
4571 | } |
4572 | |
4573 | static void |
4574 | fs_buffer_cache_gc_dispatch_callouts(int all) |
4575 | { |
4576 | lck_mtx_lock(buf_gc_callout); |
4577 | for(int i = 0; i < FS_BUFFER_CACHE_GC_CALLOUTS_MAX_SIZE; i++) { |
4578 | if (fs_callouts[i].callout != NULL) { |
4579 | fs_callouts[i].callout(all, fs_callouts[i].context); |
4580 | } |
4581 | } |
4582 | lck_mtx_unlock(buf_gc_callout); |
4583 | } |
4584 | |
4585 | static boolean_t |
4586 | buffer_cache_gc(int all) |
4587 | { |
4588 | buf_t bp; |
4589 | boolean_t did_large_zfree = FALSE; |
4590 | boolean_t need_wakeup = FALSE; |
4591 | int now = buf_timestamp(); |
4592 | uint32_t found = 0; |
4593 | struct bqueues privq; |
4594 | int thresh_hold = BUF_STALE_THRESHHOLD; |
4595 | |
4596 | if (all) |
4597 | thresh_hold = 0; |
4598 | /* |
4599 | * We only care about metadata (incore storage comes from zalloc()). |
4600 | * Unless "all" is set (used to evict meta data buffers in preparation |
4601 | * for deep sleep), we only evict up to BUF_MAX_GC_BATCH_SIZE buffers |
4602 | * that have not been accessed in the last BUF_STALE_THRESHOLD seconds. |
4603 | * BUF_MAX_GC_BATCH_SIZE controls both the hold time of the global lock |
4604 | * "buf_mtxp" and the length of time we spend compute bound in the GC |
4605 | * thread which calls this function |
4606 | */ |
4607 | lck_mtx_lock(buf_mtxp); |
4608 | |
4609 | do { |
4610 | found = 0; |
4611 | TAILQ_INIT(&privq); |
4612 | need_wakeup = FALSE; |
4613 | |
4614 | while (((bp = TAILQ_FIRST(&bufqueues[BQ_META]))) && |
4615 | (now > bp->b_timestamp) && |
4616 | (now - bp->b_timestamp > thresh_hold) && |
4617 | (found < BUF_MAX_GC_BATCH_SIZE)) { |
4618 | |
4619 | /* Remove from free list */ |
4620 | bremfree_locked(bp); |
4621 | found++; |
4622 | |
4623 | #ifdef JOE_DEBUG |
4624 | bp->b_owner = current_thread(); |
4625 | bp->b_tag = 12; |
4626 | #endif |
4627 | |
4628 | /* If dirty, move to laundry queue and remember to do wakeup */ |
4629 | if (ISSET(bp->b_flags, B_DELWRI)) { |
4630 | SET(bp->b_lflags, BL_WANTDEALLOC); |
4631 | |
4632 | bmovelaundry(bp); |
4633 | need_wakeup = TRUE; |
4634 | |
4635 | continue; |
4636 | } |
4637 | |
4638 | /* |
4639 | * Mark busy and put on private list. We could technically get |
4640 | * away without setting BL_BUSY here. |
4641 | */ |
4642 | SET(bp->b_lflags, BL_BUSY); |
4643 | buf_busycount++; |
4644 | |
4645 | /* |
4646 | * Remove from hash and dissociate from vp. |
4647 | */ |
4648 | bremhash(bp); |
4649 | if (bp->b_vp) { |
4650 | brelvp_locked(bp); |
4651 | } |
4652 | |
4653 | TAILQ_INSERT_TAIL(&privq, bp, b_freelist); |
4654 | } |
4655 | |
4656 | if (found == 0) { |
4657 | break; |
4658 | } |
4659 | |
4660 | /* Drop lock for batch processing */ |
4661 | lck_mtx_unlock(buf_mtxp); |
4662 | |
4663 | /* Wakeup and yield for laundry if need be */ |
4664 | if (need_wakeup) { |
4665 | wakeup(&bufqueues[BQ_LAUNDRY]); |
4666 | (void)thread_block(THREAD_CONTINUE_NULL); |
4667 | } |
4668 | |
4669 | /* Clean up every buffer on private list */ |
4670 | TAILQ_FOREACH(bp, &privq, b_freelist) { |
4671 | /* Take note if we've definitely freed at least a page to a zone */ |
4672 | if ((ISSET(bp->b_flags, B_ZALLOC)) && (buf_size(bp) >= PAGE_SIZE)) { |
4673 | did_large_zfree = TRUE; |
4674 | } |
4675 | |
4676 | trace(TR_BRELSE, pack(bp->b_vp, bp->b_bufsize), bp->b_lblkno); |
4677 | |
4678 | /* Free Storage */ |
4679 | buf_free_meta_store(bp); |
4680 | |
4681 | /* Release credentials */ |
4682 | buf_release_credentials(bp); |
4683 | |
4684 | /* Prepare for moving to empty queue */ |
4685 | CLR(bp->b_flags, (B_META | B_ZALLOC | B_DELWRI | B_LOCKED |
4686 | | B_AGE | B_ASYNC | B_NOCACHE | B_FUA)); |
4687 | bp->b_whichq = BQ_EMPTY; |
4688 | BLISTNONE(bp); |
4689 | } |
4690 | lck_mtx_lock(buf_mtxp); |
4691 | |
4692 | /* Back under lock, move them all to invalid hash and clear busy */ |
4693 | TAILQ_FOREACH(bp, &privq, b_freelist) { |
4694 | binshash(bp, &invalhash); |
4695 | CLR(bp->b_lflags, BL_BUSY); |
4696 | buf_busycount--; |
4697 | |
4698 | #ifdef JOE_DEBUG |
4699 | if (bp->b_owner != current_thread()) { |
4700 | panic("Buffer stolen from buffer_cache_gc()" ); |
4701 | } |
4702 | bp->b_owner = current_thread(); |
4703 | bp->b_tag = 13; |
4704 | #endif |
4705 | } |
4706 | |
4707 | /* And do a big bulk move to the empty queue */ |
4708 | TAILQ_CONCAT(&bufqueues[BQ_EMPTY], &privq, b_freelist); |
4709 | |
4710 | } while (all && (found == BUF_MAX_GC_BATCH_SIZE)); |
4711 | |
4712 | lck_mtx_unlock(buf_mtxp); |
4713 | |
4714 | fs_buffer_cache_gc_dispatch_callouts(all); |
4715 | |
4716 | return did_large_zfree; |
4717 | } |
4718 | |
4719 | |
4720 | /* |
4721 | * disabled for now |
4722 | */ |
4723 | |
4724 | #if FLUSH_QUEUES |
4725 | |
4726 | #define NFLUSH 32 |
4727 | |
4728 | static int |
4729 | bp_cmp(void *a, void *b) |
4730 | { |
4731 | buf_t *bp_a = *(buf_t **)a, |
4732 | *bp_b = *(buf_t **)b; |
4733 | daddr64_t res; |
4734 | |
4735 | // don't have to worry about negative block |
4736 | // numbers so this is ok to do. |
4737 | // |
4738 | res = (bp_a->b_blkno - bp_b->b_blkno); |
4739 | |
4740 | return (int)res; |
4741 | } |
4742 | |
4743 | |
4744 | int |
4745 | bflushq(int whichq, mount_t mp) |
4746 | { |
4747 | buf_t bp, next; |
4748 | int i, buf_count; |
4749 | int total_writes = 0; |
4750 | static buf_t flush_table[NFLUSH]; |
4751 | |
4752 | if (whichq < 0 || whichq >= BQUEUES) { |
4753 | return (0); |
4754 | } |
4755 | |
4756 | restart: |
4757 | lck_mtx_lock(buf_mtxp); |
4758 | |
4759 | bp = TAILQ_FIRST(&bufqueues[whichq]); |
4760 | |
4761 | for (buf_count = 0; bp; bp = next) { |
4762 | next = bp->b_freelist.tqe_next; |
4763 | |
4764 | if (bp->b_vp == NULL || bp->b_vp->v_mount != mp) { |
4765 | continue; |
4766 | } |
4767 | |
4768 | if (ISSET(bp->b_flags, B_DELWRI) && !ISSET(bp->b_lflags, BL_BUSY)) { |
4769 | |
4770 | bremfree_locked(bp); |
4771 | #ifdef JOE_DEBUG |
4772 | bp->b_owner = current_thread(); |
4773 | bp->b_tag = 7; |
4774 | #endif |
4775 | SET(bp->b_lflags, BL_BUSY); |
4776 | buf_busycount++; |
4777 | |
4778 | flush_table[buf_count] = bp; |
4779 | buf_count++; |
4780 | total_writes++; |
4781 | |
4782 | if (buf_count >= NFLUSH) { |
4783 | lck_mtx_unlock(buf_mtxp); |
4784 | |
4785 | qsort(flush_table, buf_count, sizeof(struct buf *), bp_cmp); |
4786 | |
4787 | for (i = 0; i < buf_count; i++) { |
4788 | buf_bawrite(flush_table[i]); |
4789 | } |
4790 | goto restart; |
4791 | } |
4792 | } |
4793 | } |
4794 | lck_mtx_unlock(buf_mtxp); |
4795 | |
4796 | if (buf_count > 0) { |
4797 | qsort(flush_table, buf_count, sizeof(struct buf *), bp_cmp); |
4798 | |
4799 | for (i = 0; i < buf_count; i++) { |
4800 | buf_bawrite(flush_table[i]); |
4801 | } |
4802 | } |
4803 | |
4804 | return (total_writes); |
4805 | } |
4806 | #endif |
4807 | |