1 | /* |
2 | * Copyright (c) 2000 Apple Computer, Inc. All rights reserved. |
3 | * |
4 | * @APPLE_OSREFERENCE_LICENSE_HEADER_START@ |
5 | * |
6 | * This file contains Original Code and/or Modifications of Original Code |
7 | * as defined in and that are subject to the Apple Public Source License |
8 | * Version 2.0 (the 'License'). You may not use this file except in |
9 | * compliance with the License. The rights granted to you under the License |
10 | * may not be used to create, or enable the creation or redistribution of, |
11 | * unlawful or unlicensed copies of an Apple operating system, or to |
12 | * circumvent, violate, or enable the circumvention or violation of, any |
13 | * terms of an Apple operating system software license agreement. |
14 | * |
15 | * Please obtain a copy of the License at |
16 | * http://www.opensource.apple.com/apsl/ and read it before using this file. |
17 | * |
18 | * The Original Code and all software distributed under the License are |
19 | * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER |
20 | * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES, |
21 | * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY, |
22 | * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT. |
23 | * Please see the License for the specific language governing rights and |
24 | * limitations under the License. |
25 | * |
26 | * @APPLE_OSREFERENCE_LICENSE_HEADER_END@ |
27 | */ |
28 | #include <sys/time.h> |
29 | #include <kern/task.h> |
30 | #include <kern/thread.h> |
31 | #include <mach/mach_types.h> |
32 | #include <mach/vm_prot.h> |
33 | #include <vm/vm_kern.h> |
34 | #include <sys/stat.h> |
35 | #include <vm/vm_map.h> |
36 | #include <sys/systm.h> |
37 | #include <kern/assert.h> |
38 | #include <sys/conf.h> |
39 | #include <sys/proc_internal.h> |
40 | #include <sys/buf.h> /* for SET */ |
41 | #include <sys/kernel.h> |
42 | #include <sys/user.h> |
43 | #include <sys/sysent.h> |
44 | #include <sys/sysproto.h> |
45 | |
46 | /* XXX these should be in a common header somwhere, but aren't */ |
47 | extern int chrtoblk_set(int, int); |
48 | |
49 | /* XXX most of these just exist to export; there's no good header for them*/ |
50 | void pcb_synch(void); |
51 | |
52 | typedef struct devsw_lock { |
53 | TAILQ_ENTRY(devsw_lock) dl_list; |
54 | thread_t dl_thread; |
55 | dev_t dl_dev; |
56 | int dl_mode; |
57 | int dl_waiters; |
58 | } *devsw_lock_t; |
59 | |
60 | static LCK_GRP_DECLARE(devsw_lock_grp, "devsw" ); |
61 | static LCK_MTX_DECLARE(devsw_lock_list_mtx, &devsw_lock_grp); |
62 | static TAILQ_HEAD(, devsw_lock) devsw_locks = TAILQ_HEAD_INITIALIZER(devsw_locks); |
63 | |
64 | /* Just to satisfy pstat command */ |
65 | int dmmin, dmmax, dmtext; |
66 | |
67 | /* |
68 | * XXX this function only exists to be exported and do nothing. |
69 | */ |
70 | void |
71 | pcb_synch(void) |
72 | { |
73 | } |
74 | |
75 | struct proc * |
76 | current_proc(void) |
77 | { |
78 | /* Never returns a NULL */ |
79 | proc_t p = current_thread_ro()->tro_proc; |
80 | if (__improbable(p == PROC_NULL)) { |
81 | return kernproc; |
82 | } |
83 | return p; |
84 | } |
85 | |
86 | /* Device switch add delete routines */ |
87 | |
88 | const struct bdevsw nobdev = NO_BDEVICE; |
89 | const struct cdevsw nocdev = NO_CDEVICE; |
90 | /* |
91 | * if index is -1, return a free slot if avaliable |
92 | * else see whether the index is free |
93 | * return the major number that is free else -1 |
94 | * |
95 | * if index is negative, we start |
96 | * looking for a free slot at the absolute value of index, |
97 | * instead of starting at 0 |
98 | */ |
99 | int |
100 | bdevsw_isfree(int index) |
101 | { |
102 | struct bdevsw * devsw; |
103 | |
104 | if (index < 0) { |
105 | if (index == -1) { |
106 | index = 1; /* start at 1 to avoid collision with volfs (Radar 2842228) */ |
107 | } else { |
108 | index = -index; /* start at least this far up in the table */ |
109 | } |
110 | devsw = &bdevsw[index]; |
111 | for (; index < nblkdev; index++, devsw++) { |
112 | if (memcmp(s1: (const char *)devsw, s2: (const char *)&nobdev, n: sizeof(struct bdevsw)) == 0) { |
113 | break; |
114 | } |
115 | } |
116 | } |
117 | |
118 | if (index < 0 || index >= nblkdev) { |
119 | return -1; |
120 | } |
121 | |
122 | devsw = &bdevsw[index]; |
123 | if ((memcmp(s1: (const char *)devsw, s2: (const char *)&nobdev, n: sizeof(struct bdevsw)) != 0)) { |
124 | return -1; |
125 | } |
126 | return index; |
127 | } |
128 | |
129 | /* |
130 | * if index is -1, find a free slot to add |
131 | * else see whether the slot is free |
132 | * return the major number that is used else -1 |
133 | * |
134 | * if index is negative, we start |
135 | * looking for a free slot at the absolute value of index, |
136 | * instead of starting at 0 |
137 | */ |
138 | int |
139 | bdevsw_add(int index, const struct bdevsw * bsw) |
140 | { |
141 | lck_mtx_lock_spin(lck: &devsw_lock_list_mtx); |
142 | index = bdevsw_isfree(index); |
143 | if (index < 0) { |
144 | index = -1; |
145 | } else { |
146 | bdevsw[index] = *bsw; |
147 | } |
148 | lck_mtx_unlock(lck: &devsw_lock_list_mtx); |
149 | return index; |
150 | } |
151 | /* |
152 | * if the slot has the same bsw, then remove |
153 | * else -1 |
154 | */ |
155 | int |
156 | bdevsw_remove(int index, const struct bdevsw * bsw) |
157 | { |
158 | struct bdevsw * devsw; |
159 | |
160 | if (index < 0 || index >= nblkdev) { |
161 | return -1; |
162 | } |
163 | |
164 | devsw = &bdevsw[index]; |
165 | lck_mtx_lock_spin(lck: &devsw_lock_list_mtx); |
166 | if ((memcmp(s1: (const char *)devsw, s2: (const char *)bsw, n: sizeof(struct bdevsw)) != 0)) { |
167 | index = -1; |
168 | } else { |
169 | bdevsw[index] = nobdev; |
170 | } |
171 | lck_mtx_unlock(lck: &devsw_lock_list_mtx); |
172 | return index; |
173 | } |
174 | |
175 | /* |
176 | * if index is -1, return a free slot if avaliable |
177 | * else see whether the index is free |
178 | * return the major number that is free else -1 |
179 | * |
180 | * if index is negative, we start |
181 | * looking for a free slot at the absolute value of index, |
182 | * instead of starting at 0 |
183 | */ |
184 | int |
185 | cdevsw_isfree(int index) |
186 | { |
187 | struct cdevsw * devsw; |
188 | |
189 | if (index < 0) { |
190 | if (index == -1) { |
191 | index = 0; |
192 | } else { |
193 | index = -index; /* start at least this far up in the table */ |
194 | } |
195 | devsw = &cdevsw[index]; |
196 | for (; index < nchrdev; index++, devsw++) { |
197 | if (memcmp(s1: (const char *)devsw, s2: (const char *)&nocdev, n: sizeof(struct cdevsw)) == 0) { |
198 | break; |
199 | } |
200 | } |
201 | } |
202 | |
203 | if (index < 0 || index >= nchrdev) { |
204 | return -1; |
205 | } |
206 | |
207 | devsw = &cdevsw[index]; |
208 | if ((memcmp(s1: (const char *)devsw, s2: (const char *)&nocdev, n: sizeof(struct cdevsw)) != 0)) { |
209 | return -1; |
210 | } |
211 | return index; |
212 | } |
213 | |
214 | /* |
215 | * if index is -1, find a free slot to add |
216 | * else see whether the slot is free |
217 | * return the major number that is used else -1 |
218 | * |
219 | * if index is negative, we start |
220 | * looking for a free slot at the absolute value of index, |
221 | * instead of starting at 0 |
222 | * |
223 | * NOTE: In practice, -1 is unusable, since there are kernel internal |
224 | * devices that call this function with absolute index values, |
225 | * which will stomp on free-slot based assignments that happen |
226 | * before them. -24 is currently a safe starting point. |
227 | */ |
228 | int |
229 | cdevsw_add(int index, const struct cdevsw * csw) |
230 | { |
231 | lck_mtx_lock_spin(lck: &devsw_lock_list_mtx); |
232 | index = cdevsw_isfree(index); |
233 | if (index < 0) { |
234 | index = -1; |
235 | } else { |
236 | cdevsw[index] = *csw; |
237 | } |
238 | lck_mtx_unlock(lck: &devsw_lock_list_mtx); |
239 | return index; |
240 | } |
241 | /* |
242 | * if the slot has the same csw, then remove |
243 | * else -1 |
244 | */ |
245 | int |
246 | cdevsw_remove(int index, const struct cdevsw * csw) |
247 | { |
248 | struct cdevsw * devsw; |
249 | |
250 | if (index < 0 || index >= nchrdev) { |
251 | return -1; |
252 | } |
253 | |
254 | devsw = &cdevsw[index]; |
255 | lck_mtx_lock_spin(lck: &devsw_lock_list_mtx); |
256 | if ((memcmp(s1: (const char *)devsw, s2: (const char *)csw, n: sizeof(struct cdevsw)) != 0)) { |
257 | index = -1; |
258 | } else { |
259 | cdevsw[index] = nocdev; |
260 | cdevsw_flags[index] = 0; |
261 | } |
262 | lck_mtx_unlock(lck: &devsw_lock_list_mtx); |
263 | return index; |
264 | } |
265 | |
266 | static int |
267 | cdev_set_bdev(int cdev, int bdev) |
268 | { |
269 | return chrtoblk_set(cdev, bdev); |
270 | } |
271 | |
272 | int |
273 | cdevsw_add_with_bdev(int index, const struct cdevsw * csw, int bdev) |
274 | { |
275 | index = cdevsw_add(index, csw); |
276 | if (index < 0) { |
277 | return index; |
278 | } |
279 | if (cdev_set_bdev(cdev: index, bdev) < 0) { |
280 | cdevsw_remove(index, csw); |
281 | return -1; |
282 | } |
283 | return index; |
284 | } |
285 | |
286 | int |
287 | cdevsw_setkqueueok(int maj, const struct cdevsw * csw, int ) |
288 | { |
289 | struct cdevsw * devsw; |
290 | uint64_t flags = CDEVSW_SELECT_KQUEUE; |
291 | |
292 | if (maj < 0 || maj >= nchrdev) { |
293 | return -1; |
294 | } |
295 | |
296 | devsw = &cdevsw[maj]; |
297 | if ((memcmp(s1: (const char *)devsw, s2: (const char *)csw, n: sizeof(struct cdevsw)) != 0)) { |
298 | return -1; |
299 | } |
300 | |
301 | flags |= extra_flags; |
302 | |
303 | cdevsw_flags[maj] = flags; |
304 | return 0; |
305 | } |
306 | |
307 | /* |
308 | * Copy the "hostname" variable into a caller-provided buffer |
309 | * Returns: 0 for success, ENAMETOOLONG for insufficient buffer space. |
310 | * On success, "len" will be set to the number of characters preceding |
311 | * the NULL character in the hostname. |
312 | */ |
313 | int |
314 | bsd_hostname(char *buf, size_t bufsize, size_t *len) |
315 | { |
316 | int ret; |
317 | size_t hnlen; |
318 | /* |
319 | * "hostname" is null-terminated |
320 | */ |
321 | lck_mtx_lock(lck: &hostname_lock); |
322 | hnlen = strlen(s: hostname); |
323 | if (hnlen < bufsize) { |
324 | strlcpy(dst: buf, src: hostname, n: bufsize); |
325 | *len = hnlen; |
326 | ret = 0; |
327 | } else { |
328 | ret = ENAMETOOLONG; |
329 | } |
330 | lck_mtx_unlock(lck: &hostname_lock); |
331 | return ret; |
332 | } |
333 | |
334 | static devsw_lock_t |
335 | devsw_lock_find_locked(dev_t dev, int mode) |
336 | { |
337 | devsw_lock_t lock; |
338 | |
339 | TAILQ_FOREACH(lock, &devsw_locks, dl_list) { |
340 | if (lock->dl_dev == dev && lock->dl_mode == mode) { |
341 | return lock; |
342 | } |
343 | } |
344 | |
345 | return NULL; |
346 | } |
347 | |
348 | void |
349 | devsw_lock(dev_t dev, int mode) |
350 | { |
351 | devsw_lock_t newlock, curlock; |
352 | |
353 | assert(0 <= major(dev) && major(dev) < nchrdev); |
354 | assert(mode == S_IFCHR || mode == S_IFBLK); |
355 | |
356 | newlock = kalloc_type(struct devsw_lock, Z_WAITOK | Z_ZERO); |
357 | newlock->dl_dev = dev; |
358 | newlock->dl_thread = current_thread(); |
359 | newlock->dl_mode = mode; |
360 | |
361 | lck_mtx_lock_spin(lck: &devsw_lock_list_mtx); |
362 | |
363 | curlock = devsw_lock_find_locked(dev, mode); |
364 | if (curlock == NULL) { |
365 | TAILQ_INSERT_TAIL(&devsw_locks, newlock, dl_list); |
366 | } else { |
367 | curlock->dl_waiters++; |
368 | lck_mtx_sleep_with_inheritor(lock: &devsw_lock_list_mtx, |
369 | lck_sleep_action: LCK_SLEEP_SPIN, event: curlock, inheritor: curlock->dl_thread, |
370 | THREAD_UNINT | THREAD_WAIT_NOREPORT, |
371 | TIMEOUT_WAIT_FOREVER); |
372 | assert(curlock->dl_thread == current_thread()); |
373 | curlock->dl_waiters--; |
374 | } |
375 | |
376 | lck_mtx_unlock(lck: &devsw_lock_list_mtx); |
377 | |
378 | if (curlock != NULL) { |
379 | kfree_type(struct devsw_lock, newlock); |
380 | } |
381 | } |
382 | |
383 | void |
384 | devsw_unlock(dev_t dev, int mode) |
385 | { |
386 | devsw_lock_t lock; |
387 | thread_t inheritor_thread = NULL; |
388 | |
389 | assert(0 <= major(dev) && major(dev) < nchrdev); |
390 | |
391 | lck_mtx_lock_spin(lck: &devsw_lock_list_mtx); |
392 | |
393 | lock = devsw_lock_find_locked(dev, mode); |
394 | |
395 | if (lock == NULL || lock->dl_thread != current_thread()) { |
396 | panic("current thread doesn't own the lock (%p)" , lock); |
397 | } |
398 | |
399 | if (lock->dl_waiters) { |
400 | wakeup_one_with_inheritor(event: lock, THREAD_AWAKENED, |
401 | action: LCK_WAKE_DEFAULT, thread_wokenup: &lock->dl_thread); |
402 | inheritor_thread = lock->dl_thread; |
403 | lock = NULL; |
404 | } else { |
405 | TAILQ_REMOVE(&devsw_locks, lock, dl_list); |
406 | } |
407 | |
408 | lck_mtx_unlock(lck: &devsw_lock_list_mtx); |
409 | |
410 | if (inheritor_thread) { |
411 | thread_deallocate(thread: inheritor_thread); |
412 | } |
413 | kfree_type(struct devsw_lock, lock); |
414 | } |
415 | |