1 | /* |
2 | * Copyright (c) 1999-2007 Apple Inc. All rights reserved. |
3 | * |
4 | * @APPLE_OSREFERENCE_LICENSE_HEADER_START@ |
5 | * |
6 | * This file contains Original Code and/or Modifications of Original Code |
7 | * as defined in and that are subject to the Apple Public Source License |
8 | * Version 2.0 (the 'License'). You may not use this file except in |
9 | * compliance with the License. The rights granted to you under the License |
10 | * may not be used to create, or enable the creation or redistribution of, |
11 | * unlawful or unlicensed copies of an Apple operating system, or to |
12 | * circumvent, violate, or enable the circumvention or violation of, any |
13 | * terms of an Apple operating system software license agreement. |
14 | * |
15 | * Please obtain a copy of the License at |
16 | * http://www.opensource.apple.com/apsl/ and read it before using this file. |
17 | * |
18 | * The Original Code and all software distributed under the License are |
19 | * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER |
20 | * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES, |
21 | * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY, |
22 | * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT. |
23 | * Please see the License for the specific language governing rights and |
24 | * limitations under the License. |
25 | * |
26 | * @APPLE_OSREFERENCE_LICENSE_HEADER_END@ |
27 | */ |
28 | /* |
29 | * Header file for Unified Buffer Cache. |
30 | * |
31 | */ |
32 | |
33 | #ifndef _SYS_UBC_H_ |
34 | #define _SYS_UBC_H_ |
35 | |
36 | #include <sys/appleapiopts.h> |
37 | #include <sys/cdefs.h> |
38 | #include <sys/kernel_types.h> |
39 | #include <kern/locks.h> |
40 | #include <mach/machine.h> |
41 | #include <mach/memory_object_types.h> |
42 | #include <sys/ucred.h> |
43 | |
44 | #ifdef KERNEL_PRIVATE |
45 | #include <sys/imgact.h> |
46 | #endif // KERNEL_PRIVATE |
47 | |
48 | /* defns for ubc_msync() and ubc_msync */ |
49 | |
50 | #define UBC_PUSHDIRTY 0x01 /* clean any dirty pages in the specified range to the backing store */ |
51 | #define UBC_PUSHALL 0x02 /* push both dirty and precious pages to the backing store */ |
52 | #define UBC_INVALIDATE 0x04 /* invalidate pages in the specified range... may be used with UBC_PUSHDIRTY/ALL */ |
53 | #define UBC_SYNC 0x08 /* wait for I/Os generated by UBC_PUSHDIRTY to complete */ |
54 | |
55 | __BEGIN_DECLS |
56 | |
57 | off_t ubc_blktooff(struct vnode *, daddr64_t); |
58 | daddr64_t ubc_offtoblk(struct vnode *, off_t); |
59 | off_t ubc_getsize(struct vnode *); |
60 | int ubc_setsize(struct vnode *, off_t); |
61 | |
62 | #ifdef KERNEL_PRIVATE |
63 | |
64 | enum { |
65 | UBC_SETSIZE_NO_FS_REENTRY = 1 |
66 | }; |
67 | typedef uint32_t ubc_setsize_opts_t; |
68 | |
69 | errno_t ubc_setsize_ex(vnode_t vp, off_t nsize, ubc_setsize_opts_t opts); |
70 | |
71 | #endif // KERNEL_PRIVATE |
72 | |
73 | kauth_cred_t ubc_getcred(struct vnode *); |
74 | struct thread; |
75 | int ubc_setthreadcred(struct vnode *, struct proc *, struct thread *); |
76 | |
77 | errno_t ubc_msync(vnode_t, off_t, off_t, off_t *, int); |
78 | int ubc_pages_resident(vnode_t); |
79 | int ubc_page_op(vnode_t, off_t, int, ppnum_t *, int *); |
80 | int ubc_range_op(vnode_t, off_t, off_t, int, int *); |
81 | |
82 | #ifdef KERNEL_PRIVATE |
83 | int ubc_setcred(struct vnode *, struct ucred *); |
84 | |
85 | /* code signing */ |
86 | struct cs_blob; |
87 | struct cs_blob *ubc_cs_blob_get(vnode_t, cpu_type_t, cpu_subtype_t, off_t); |
88 | struct cs_blob *ubc_cs_blob_get_supplement(vnode_t, off_t); |
89 | |
90 | /* apis to handle generation count for cs blob */ |
91 | void cs_blob_reset_cache(void); |
92 | int ubc_cs_blob_revalidate(vnode_t, struct cs_blob *, struct image_params *, int, uint32_t); |
93 | int ubc_cs_generation_check(vnode_t); |
94 | |
95 | int cs_entitlements_blob_get(proc_t, void **, size_t *); |
96 | int cs_blob_get(proc_t, void **, size_t *); |
97 | const char *cs_identity_get(proc_t); |
98 | |
99 | void ubc_cs_free_and_vnode_unlock(struct vnode *); |
100 | |
101 | int UBCINFOEXISTS(const struct vnode *); |
102 | |
103 | #endif |
104 | |
105 | /* cluster IO routines */ |
106 | void cluster_update_state(vnode_t, vm_object_offset_t, vm_object_offset_t, boolean_t); |
107 | |
108 | int advisory_read(vnode_t, off_t, off_t, int); |
109 | int advisory_read_ext(vnode_t, off_t, off_t, int, int (*)(buf_t, void *), void *, int); |
110 | |
111 | int cluster_read(vnode_t, struct uio *, off_t, int); |
112 | int cluster_read_ext(vnode_t, struct uio *, off_t, int, int (*)(buf_t, void *), void *); |
113 | |
114 | int cluster_write(vnode_t, struct uio *, off_t, off_t, off_t, off_t, int); |
115 | int cluster_write_ext(vnode_t, struct uio *, off_t, off_t, off_t, off_t, int, int (*)(buf_t, void *), void *); |
116 | |
117 | int cluster_pageout(vnode_t, upl_t, upl_offset_t, off_t, int, off_t, int); |
118 | int cluster_pageout_ext(vnode_t, upl_t, upl_offset_t, off_t, int, off_t, int, int (*)(buf_t, void *), void *); |
119 | |
120 | int cluster_pagein(vnode_t, upl_t, upl_offset_t, off_t, int, off_t, int); |
121 | int cluster_pagein_ext(vnode_t, upl_t, upl_offset_t, off_t, int, off_t, int, int (*)(buf_t, void *), void *); |
122 | |
123 | int cluster_push(vnode_t, int); |
124 | int cluster_push_ext(vnode_t, int, int (*)(buf_t, void *), void *); |
125 | int cluster_push_err(vnode_t, int, int (*)(buf_t, void *), void *, int *); |
126 | |
127 | int cluster_bp(buf_t); |
128 | int cluster_bp_ext(buf_t, int (*)(buf_t, void *), void *); |
129 | |
130 | void cluster_zero(upl_t, upl_offset_t, int, buf_t); |
131 | |
132 | int cluster_copy_upl_data(uio_t, upl_t, int, int *); |
133 | int cluster_copy_ubc_data(vnode_t, uio_t, int *, int); |
134 | |
135 | typedef struct cl_direct_read_lock cl_direct_read_lock_t; |
136 | cl_direct_read_lock_t *cluster_lock_direct_read(vnode_t vp, lck_rw_type_t exclusive); |
137 | void cluster_unlock_direct_read(cl_direct_read_lock_t *lck); |
138 | |
139 | /* UPL routines */ |
140 | #ifndef XNU_KERNEL_PRIVATE |
141 | int ubc_create_upl(vnode_t, off_t, int, upl_t *, upl_page_info_t **, int); |
142 | #endif /* XNU_KERNEL_PRIVATE */ |
143 | int ubc_upl_map(upl_t, vm_offset_t *); |
144 | int ubc_upl_unmap(upl_t); |
145 | int ubc_upl_map_range(upl_t, vm_offset_t, vm_size_t, vm_prot_t, vm_offset_t *); |
146 | int ubc_upl_unmap_range(upl_t, vm_offset_t, vm_size_t); |
147 | int ubc_upl_commit(upl_t); |
148 | int ubc_upl_commit_range(upl_t, upl_offset_t, upl_size_t, int); |
149 | int ubc_upl_abort(upl_t, int); |
150 | int ubc_upl_abort_range(upl_t, upl_offset_t, upl_size_t, int); |
151 | void ubc_upl_range_needed(upl_t, int, int); |
152 | |
153 | upl_page_info_t *ubc_upl_pageinfo(upl_t); |
154 | upl_size_t ubc_upl_maxbufsize(void); |
155 | |
156 | int is_file_clean(vnode_t, off_t); |
157 | |
158 | errno_t mach_to_bsd_errno(kern_return_t mach_err); |
159 | |
160 | #ifdef KERNEL_PRIVATE |
161 | |
162 | int ubc_create_upl_external(vnode_t, off_t, int, upl_t *, upl_page_info_t **, int); |
163 | #ifdef XNU_KERNEL_PRIVATE |
164 | int ubc_create_upl_kernel(vnode_t, off_t, int, upl_t *, upl_page_info_t **, int, vm_tag_t); |
165 | #endif /* XNU_KERNEL_PRIVATE */ |
166 | |
167 | boolean_t ubc_is_mapped(const struct vnode *, boolean_t *writable); |
168 | __attribute__((pure)) boolean_t ubc_is_mapped_writable(const struct vnode *); |
169 | boolean_t ubc_was_mapped(const struct vnode *, boolean_t *writable); |
170 | __attribute__((pure)) boolean_t ubc_was_mapped_writable(const struct vnode *); |
171 | |
172 | uint32_t cluster_max_io_size(mount_t, int); |
173 | |
174 | #endif |
175 | |
176 | __END_DECLS |
177 | |
178 | #endif /* _SYS_UBC_H_ */ |
179 | |