1 | /* |
2 | * Copyright (c) 2000-2021 Apple Inc. All rights reserved. |
3 | * |
4 | * @APPLE_OSREFERENCE_LICENSE_HEADER_START@ |
5 | * |
6 | * This file contains Original Code and/or Modifications of Original Code |
7 | * as defined in and that are subject to the Apple Public Source License |
8 | * Version 2.0 (the 'License'). You may not use this file except in |
9 | * compliance with the License. The rights granted to you under the License |
10 | * may not be used to create, or enable the creation or redistribution of, |
11 | * unlawful or unlicensed copies of an Apple operating system, or to |
12 | * circumvent, violate, or enable the circumvention or violation of, any |
13 | * terms of an Apple operating system software license agreement. |
14 | * |
15 | * Please obtain a copy of the License at |
16 | * http://www.opensource.apple.com/apsl/ and read it before using this file. |
17 | * |
18 | * The Original Code and all software distributed under the License are |
19 | * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER |
20 | * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES, |
21 | * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY, |
22 | * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT. |
23 | * Please see the License for the specific language governing rights and |
24 | * limitations under the License. |
25 | * |
26 | * @APPLE_OSREFERENCE_LICENSE_HEADER_END@ |
27 | */ |
28 | /* |
29 | * @OSF_COPYRIGHT@ |
30 | * |
31 | */ |
32 | /* |
33 | * File: kern/sync_sema.c |
34 | * Author: Joseph CaraDonna |
35 | * |
36 | * Contains RT distributed semaphore synchronization services. |
37 | */ |
38 | |
39 | #include <mach/mach_types.h> |
40 | #include <mach/mach_traps.h> |
41 | #include <mach/kern_return.h> |
42 | #include <mach/semaphore.h> |
43 | #include <mach/sync_policy.h> |
44 | #include <mach/task.h> |
45 | |
46 | #include <kern/misc_protos.h> |
47 | #include <kern/sync_sema.h> |
48 | #include <kern/spl.h> |
49 | #include <kern/ipc_kobject.h> |
50 | #include <kern/ipc_tt.h> |
51 | #include <kern/thread.h> |
52 | #include <kern/clock.h> |
53 | #include <ipc/ipc_port.h> |
54 | #include <ipc/ipc_space.h> |
55 | #include <kern/host.h> |
56 | #include <kern/waitq.h> |
57 | #include <kern/zalloc.h> |
58 | #include <kern/mach_param.h> |
59 | |
60 | static const uint8_t semaphore_event; |
61 | #define SEMAPHORE_EVENT CAST_EVENT64_T(&semaphore_event) |
62 | |
63 | ZONE_DEFINE_ID(ZONE_ID_SEMAPHORE, "semaphores" , struct semaphore, |
64 | ZC_ZFREE_CLEARMEM); |
65 | |
66 | os_refgrp_decl(static, sema_refgrp, "semaphore" , NULL); |
67 | |
68 | /* Forward declarations */ |
69 | |
70 | static inline bool |
71 | semaphore_active(semaphore_t semaphore) |
72 | { |
73 | return semaphore->owner != TASK_NULL; |
74 | } |
75 | |
76 | static __inline__ uint64_t |
77 | semaphore_deadline( |
78 | unsigned int sec, |
79 | clock_res_t nsec) |
80 | { |
81 | uint64_t abstime; |
82 | |
83 | nanotime_to_absolutetime(secs: sec, nanosecs: nsec, result: &abstime); |
84 | clock_absolutetime_interval_to_deadline(abstime, result: &abstime); |
85 | |
86 | return abstime; |
87 | } |
88 | |
89 | /* |
90 | * Routine: semaphore_create |
91 | * |
92 | * Creates a semaphore. |
93 | * The port representing the semaphore is returned as a parameter. |
94 | */ |
95 | kern_return_t |
96 | semaphore_create( |
97 | task_t task, |
98 | semaphore_t *new_semaphore, |
99 | int policy, |
100 | int value) |
101 | { |
102 | semaphore_t s = SEMAPHORE_NULL; |
103 | |
104 | *new_semaphore = SEMAPHORE_NULL; |
105 | if (task == TASK_NULL || value < 0 || (policy & ~SYNC_POLICY_USER_MASK)) { |
106 | return KERN_INVALID_ARGUMENT; |
107 | } |
108 | |
109 | s = zalloc_id(ZONE_ID_SEMAPHORE, Z_ZERO | Z_WAITOK | Z_NOFAIL); |
110 | |
111 | /* |
112 | * Associate the new semaphore with the task by adding |
113 | * the new semaphore to the task's semaphore list. |
114 | */ |
115 | task_lock(task); |
116 | /* Check for race with task_terminate */ |
117 | if (!task->active) { |
118 | task_unlock(task); |
119 | zfree_id(ZONE_ID_SEMAPHORE, s); |
120 | return KERN_INVALID_TASK; |
121 | } |
122 | |
123 | waitq_init(waitq: &s->waitq, type: WQT_QUEUE, policy: policy | SYNC_POLICY_INIT_LOCKED); |
124 | |
125 | /* init everything under both the task and semaphore locks */ |
126 | os_ref_init_raw(&s->ref_count, &sema_refgrp); |
127 | s->count = value; |
128 | s->owner = task; |
129 | enqueue_head(que: &task->semaphore_list, elt: &s->task_link); |
130 | task->semaphores_owned++; |
131 | |
132 | semaphore_unlock(s); |
133 | |
134 | task_unlock(task); |
135 | |
136 | *new_semaphore = s; |
137 | |
138 | return KERN_SUCCESS; |
139 | } |
140 | |
141 | /* |
142 | * Routine: semaphore_destroy_internal |
143 | * |
144 | * Disassociate a semaphore from its owning task, mark it inactive, |
145 | * and set any waiting threads running with THREAD_RESTART. |
146 | * |
147 | * Conditions: |
148 | * task is locked |
149 | * semaphore is owned by the specified task |
150 | * if semaphore is locked, interrupts are disabled |
151 | * Returns: |
152 | * with semaphore unlocked, interrupts enabled |
153 | */ |
154 | static void |
155 | semaphore_destroy_internal( |
156 | task_t task, |
157 | semaphore_t semaphore, |
158 | bool semaphore_locked) |
159 | { |
160 | int old_count; |
161 | |
162 | /* unlink semaphore from owning task */ |
163 | assert(semaphore->owner == task); |
164 | remqueue(elt: &semaphore->task_link); |
165 | task->semaphores_owned--; |
166 | |
167 | spl_t spl_level = 0; |
168 | |
169 | if (semaphore_locked) { |
170 | spl_level = 1; |
171 | } else { |
172 | spl_level = splsched(); |
173 | semaphore_lock(semaphore); |
174 | } |
175 | |
176 | /* |
177 | * deactivate semaphore under both locks |
178 | * and then wake up all waiters. |
179 | */ |
180 | |
181 | semaphore->owner = TASK_NULL; |
182 | old_count = semaphore->count; |
183 | semaphore->count = 0; |
184 | |
185 | if (old_count < 0) { |
186 | waitq_wakeup64_all_locked(waitq: &semaphore->waitq, |
187 | SEMAPHORE_EVENT, THREAD_RESTART, |
188 | flags: waitq_flags_splx(spl_level) | WAITQ_UNLOCK); |
189 | /* waitq/semaphore is unlocked, splx handled */ |
190 | assert(ml_get_interrupts_enabled()); |
191 | } else { |
192 | assert(circle_queue_empty(&semaphore->waitq.waitq_queue)); |
193 | semaphore_unlock(semaphore); |
194 | splx(spl_level); |
195 | assert(ml_get_interrupts_enabled()); |
196 | } |
197 | } |
198 | |
199 | /* |
200 | * Routine: semaphore_free |
201 | * |
202 | * Free a semaphore that hit a 0 refcount. |
203 | * |
204 | * Conditions: |
205 | * Nothing is locked. |
206 | */ |
207 | __attribute__((noinline)) |
208 | static void |
209 | semaphore_free( |
210 | semaphore_t semaphore) |
211 | { |
212 | ipc_port_t port; |
213 | task_t task; |
214 | |
215 | /* |
216 | * Last ref, clean up the port [if any] |
217 | * associated with the semaphore, destroy |
218 | * it (if still active) and then free |
219 | * the semaphore. |
220 | */ |
221 | port = semaphore->port; |
222 | if (IP_VALID(port)) { |
223 | assert(!port->ip_srights); |
224 | ipc_kobject_dealloc_port(port, mscount: 0, type: IKOT_SEMAPHORE); |
225 | } |
226 | |
227 | /* |
228 | * If the semaphore owned by the current task, |
229 | * we know the current task can't go away, |
230 | * so we can take locks in the right order. |
231 | * |
232 | * Else we try to take locks in the "wrong" order |
233 | * but if we fail to, we take a task ref and do it "right". |
234 | */ |
235 | task = current_task(); |
236 | if (semaphore->owner == task) { |
237 | task_lock(task); |
238 | if (semaphore->owner == task) { |
239 | semaphore_destroy_internal(task, semaphore, false); |
240 | } else { |
241 | assert(semaphore->owner == TASK_NULL); |
242 | } |
243 | task_unlock(task); |
244 | } else { |
245 | spl_t spl = splsched(); |
246 | |
247 | /* semaphore_destroy_internal will always enable, can't nest */ |
248 | assert(spl); |
249 | |
250 | semaphore_lock(semaphore); |
251 | |
252 | task = semaphore->owner; |
253 | if (task == TASK_NULL) { |
254 | semaphore_unlock(semaphore); |
255 | splx(spl); |
256 | } else if (task_lock_try(task)) { |
257 | semaphore_destroy_internal(task, semaphore, true); |
258 | /* semaphore unlocked, interrupts enabled */ |
259 | task_unlock(task); |
260 | } else { |
261 | task_reference(task); |
262 | semaphore_unlock(semaphore); |
263 | splx(spl); |
264 | |
265 | task_lock(task); |
266 | if (semaphore->owner == task) { |
267 | semaphore_destroy_internal(task, semaphore, false); |
268 | } |
269 | task_unlock(task); |
270 | |
271 | task_deallocate(task); |
272 | } |
273 | } |
274 | |
275 | waitq_deinit(waitq: &semaphore->waitq); |
276 | zfree_id(ZONE_ID_SEMAPHORE, semaphore); |
277 | } |
278 | |
279 | /* |
280 | * Routine: semaphore_destroy |
281 | * |
282 | * Destroys a semaphore and consume the caller's reference on the |
283 | * semaphore. |
284 | */ |
285 | kern_return_t |
286 | semaphore_destroy( |
287 | task_t task, |
288 | semaphore_t semaphore) |
289 | { |
290 | if (semaphore == SEMAPHORE_NULL) { |
291 | return KERN_INVALID_ARGUMENT; |
292 | } |
293 | |
294 | if (task == TASK_NULL) { |
295 | semaphore_dereference(semaphore); |
296 | return KERN_INVALID_ARGUMENT; |
297 | } |
298 | |
299 | if (semaphore->owner == task) { |
300 | task_lock(task); |
301 | if (semaphore->owner == task) { |
302 | semaphore_destroy_internal(task, semaphore, false); |
303 | } |
304 | task_unlock(task); |
305 | } |
306 | |
307 | semaphore_dereference(semaphore); |
308 | return KERN_SUCCESS; |
309 | } |
310 | |
311 | /* |
312 | * Routine: semaphore_destroy_all |
313 | * |
314 | * Destroy all the semaphores associated with a given task. |
315 | */ |
316 | |
317 | void |
318 | semaphore_destroy_all( |
319 | task_t task) |
320 | { |
321 | semaphore_t semaphore; |
322 | |
323 | task_lock(task); |
324 | |
325 | qe_foreach_element_safe(semaphore, &task->semaphore_list, task_link) { |
326 | semaphore_destroy_internal(task, semaphore, false); |
327 | } |
328 | |
329 | task_unlock(task); |
330 | } |
331 | |
332 | /* |
333 | * Routine: semaphore_signal_internal |
334 | * |
335 | * Signals the semaphore as direct. |
336 | * Assumptions: |
337 | * Semaphore is locked. |
338 | */ |
339 | static kern_return_t |
340 | semaphore_signal_internal( |
341 | semaphore_t semaphore, |
342 | thread_t thread, |
343 | int options) |
344 | { |
345 | kern_return_t kr; |
346 | |
347 | spl_t spl_level = splsched(); |
348 | semaphore_lock(semaphore); |
349 | |
350 | if (!semaphore_active(semaphore)) { |
351 | semaphore_unlock(semaphore); |
352 | splx(spl_level); |
353 | return KERN_TERMINATED; |
354 | } |
355 | |
356 | if (thread != THREAD_NULL) { |
357 | if (semaphore->count < 0) { |
358 | kr = waitq_wakeup64_thread_and_unlock( |
359 | waitq: &semaphore->waitq, SEMAPHORE_EVENT, |
360 | thread, THREAD_AWAKENED); |
361 | /* waitq/semaphore is unlocked */ |
362 | splx(spl_level); |
363 | } else { |
364 | kr = KERN_NOT_WAITING; |
365 | semaphore_unlock(semaphore); |
366 | splx(spl_level); |
367 | } |
368 | return kr; |
369 | } |
370 | |
371 | if (options & SEMAPHORE_SIGNAL_ALL) { |
372 | int old_count = semaphore->count; |
373 | |
374 | kr = KERN_NOT_WAITING; |
375 | if (old_count < 0) { |
376 | semaphore->count = 0; /* always reset */ |
377 | |
378 | kr = waitq_wakeup64_all_locked(waitq: &semaphore->waitq, |
379 | SEMAPHORE_EVENT, THREAD_AWAKENED, |
380 | flags: WAITQ_UNLOCK | waitq_flags_splx(spl_level)); |
381 | /* waitq / semaphore is unlocked, splx handled */ |
382 | } else { |
383 | if (options & SEMAPHORE_SIGNAL_PREPOST) { |
384 | semaphore->count++; |
385 | } |
386 | kr = KERN_SUCCESS; |
387 | semaphore_unlock(semaphore); |
388 | splx(spl_level); |
389 | } |
390 | return kr; |
391 | } |
392 | |
393 | if (semaphore->count < 0) { |
394 | waitq_wakeup_flags_t flags = WAITQ_KEEP_LOCKED; |
395 | |
396 | if (options & SEMAPHORE_THREAD_HANDOFF) { |
397 | flags |= WAITQ_HANDOFF; |
398 | } |
399 | kr = waitq_wakeup64_one_locked(waitq: &semaphore->waitq, |
400 | SEMAPHORE_EVENT, THREAD_AWAKENED, flags); |
401 | if (kr == KERN_SUCCESS) { |
402 | semaphore_unlock(semaphore); |
403 | splx(spl_level); |
404 | return KERN_SUCCESS; |
405 | } else { |
406 | semaphore->count = 0; /* all waiters gone */ |
407 | } |
408 | } |
409 | |
410 | if (options & SEMAPHORE_SIGNAL_PREPOST) { |
411 | semaphore->count++; |
412 | } |
413 | |
414 | semaphore_unlock(semaphore); |
415 | splx(spl_level); |
416 | return KERN_NOT_WAITING; |
417 | } |
418 | |
419 | /* |
420 | * Routine: semaphore_signal_thread |
421 | * |
422 | * If the specified thread is blocked on the semaphore, it is |
423 | * woken up. If a NULL thread was supplied, then any one |
424 | * thread is woken up. Otherwise the caller gets KERN_NOT_WAITING |
425 | * and the semaphore is unchanged. |
426 | */ |
427 | kern_return_t |
428 | semaphore_signal_thread( |
429 | semaphore_t semaphore, |
430 | thread_t thread) |
431 | { |
432 | if (semaphore == SEMAPHORE_NULL) { |
433 | return KERN_INVALID_ARGUMENT; |
434 | } |
435 | |
436 | return semaphore_signal_internal(semaphore, thread, |
437 | SEMAPHORE_OPTION_NONE); |
438 | } |
439 | |
440 | /* |
441 | * Routine: semaphore_signal_thread_trap |
442 | * |
443 | * Trap interface to the semaphore_signal_thread function. |
444 | */ |
445 | kern_return_t |
446 | semaphore_signal_thread_trap( |
447 | struct semaphore_signal_thread_trap_args *args) |
448 | { |
449 | mach_port_name_t sema_name = args->signal_name; |
450 | mach_port_name_t thread_name = args->thread_name; |
451 | semaphore_t semaphore; |
452 | thread_t thread; |
453 | kern_return_t kr; |
454 | |
455 | /* |
456 | * MACH_PORT_NULL is not an error. It means that we want to |
457 | * select any one thread that is already waiting, but not to |
458 | * pre-post the semaphore. |
459 | */ |
460 | if (thread_name != MACH_PORT_NULL) { |
461 | thread = port_name_to_thread(port_name: thread_name, options: PORT_INTRANS_OPTIONS_NONE); |
462 | if (thread == THREAD_NULL) { |
463 | return KERN_INVALID_ARGUMENT; |
464 | } |
465 | } else { |
466 | thread = THREAD_NULL; |
467 | } |
468 | |
469 | kr = port_name_to_semaphore(name: sema_name, semaphore: &semaphore); |
470 | if (kr == KERN_SUCCESS) { |
471 | kr = semaphore_signal_internal(semaphore, |
472 | thread, |
473 | SEMAPHORE_OPTION_NONE); |
474 | semaphore_dereference(semaphore); |
475 | } |
476 | if (thread != THREAD_NULL) { |
477 | thread_deallocate(thread); |
478 | } |
479 | return kr; |
480 | } |
481 | |
482 | |
483 | |
484 | /* |
485 | * Routine: semaphore_signal |
486 | * |
487 | * Traditional (in-kernel client and MIG interface) semaphore |
488 | * signal routine. Most users will access the trap version. |
489 | * |
490 | * This interface in not defined to return info about whether |
491 | * this call found a thread waiting or not. The internal |
492 | * routines (and future external routines) do. We have to |
493 | * convert those into plain KERN_SUCCESS returns. |
494 | */ |
495 | kern_return_t |
496 | semaphore_signal( |
497 | semaphore_t semaphore) |
498 | { |
499 | kern_return_t kr; |
500 | |
501 | if (semaphore == SEMAPHORE_NULL) { |
502 | return KERN_INVALID_ARGUMENT; |
503 | } |
504 | |
505 | kr = semaphore_signal_internal(semaphore, |
506 | THREAD_NULL, |
507 | SEMAPHORE_SIGNAL_PREPOST); |
508 | if (kr == KERN_NOT_WAITING) { |
509 | return KERN_SUCCESS; |
510 | } |
511 | return kr; |
512 | } |
513 | |
514 | /* |
515 | * Routine: semaphore_signal_trap |
516 | * |
517 | * Trap interface to the semaphore_signal function. |
518 | */ |
519 | kern_return_t |
520 | semaphore_signal_trap( |
521 | struct semaphore_signal_trap_args *args) |
522 | { |
523 | mach_port_name_t sema_name = args->signal_name; |
524 | |
525 | return semaphore_signal_internal_trap(sema_name); |
526 | } |
527 | |
528 | kern_return_t |
529 | semaphore_signal_internal_trap(mach_port_name_t sema_name) |
530 | { |
531 | semaphore_t semaphore; |
532 | kern_return_t kr; |
533 | |
534 | kr = port_name_to_semaphore(name: sema_name, semaphore: &semaphore); |
535 | if (kr == KERN_SUCCESS) { |
536 | kr = semaphore_signal_internal(semaphore, |
537 | THREAD_NULL, |
538 | SEMAPHORE_SIGNAL_PREPOST); |
539 | semaphore_dereference(semaphore); |
540 | if (kr == KERN_NOT_WAITING) { |
541 | kr = KERN_SUCCESS; |
542 | } |
543 | } |
544 | return kr; |
545 | } |
546 | |
547 | /* |
548 | * Routine: semaphore_signal_all |
549 | * |
550 | * Awakens ALL threads currently blocked on the semaphore. |
551 | * The semaphore count returns to zero. |
552 | */ |
553 | kern_return_t |
554 | semaphore_signal_all( |
555 | semaphore_t semaphore) |
556 | { |
557 | kern_return_t kr; |
558 | |
559 | if (semaphore == SEMAPHORE_NULL) { |
560 | return KERN_INVALID_ARGUMENT; |
561 | } |
562 | |
563 | kr = semaphore_signal_internal(semaphore, |
564 | THREAD_NULL, |
565 | SEMAPHORE_SIGNAL_ALL); |
566 | if (kr == KERN_NOT_WAITING) { |
567 | return KERN_SUCCESS; |
568 | } |
569 | return kr; |
570 | } |
571 | |
572 | /* |
573 | * Routine: semaphore_signal_all_trap |
574 | * |
575 | * Trap interface to the semaphore_signal_all function. |
576 | */ |
577 | kern_return_t |
578 | semaphore_signal_all_trap( |
579 | struct semaphore_signal_all_trap_args *args) |
580 | { |
581 | mach_port_name_t sema_name = args->signal_name; |
582 | semaphore_t semaphore; |
583 | kern_return_t kr; |
584 | |
585 | kr = port_name_to_semaphore(name: sema_name, semaphore: &semaphore); |
586 | if (kr == KERN_SUCCESS) { |
587 | kr = semaphore_signal_internal(semaphore, |
588 | THREAD_NULL, |
589 | SEMAPHORE_SIGNAL_ALL); |
590 | semaphore_dereference(semaphore); |
591 | if (kr == KERN_NOT_WAITING) { |
592 | kr = KERN_SUCCESS; |
593 | } |
594 | } |
595 | return kr; |
596 | } |
597 | |
598 | /* |
599 | * Routine: semaphore_convert_wait_result |
600 | * |
601 | * Generate the return code after a semaphore wait/block. It |
602 | * takes the wait result as an input and coverts that to an |
603 | * appropriate result. |
604 | */ |
605 | static kern_return_t |
606 | semaphore_convert_wait_result(int wait_result) |
607 | { |
608 | switch (wait_result) { |
609 | case THREAD_AWAKENED: |
610 | return KERN_SUCCESS; |
611 | |
612 | case THREAD_TIMED_OUT: |
613 | return KERN_OPERATION_TIMED_OUT; |
614 | |
615 | case THREAD_INTERRUPTED: |
616 | return KERN_ABORTED; |
617 | |
618 | case THREAD_RESTART: |
619 | return KERN_TERMINATED; |
620 | |
621 | default: |
622 | panic("semaphore_block" ); |
623 | return KERN_FAILURE; |
624 | } |
625 | } |
626 | |
627 | /* |
628 | * Routine: semaphore_wait_continue |
629 | * |
630 | * Common continuation routine after waiting on a semphore. |
631 | * It returns directly to user space. |
632 | */ |
633 | static void |
634 | semaphore_wait_continue(void *arg __unused, wait_result_t wr) |
635 | { |
636 | thread_t self = current_thread(); |
637 | semaphore_cont_t caller_cont = self->sth_continuation; |
638 | |
639 | assert(self->sth_waitsemaphore != SEMAPHORE_NULL); |
640 | semaphore_dereference(semaphore: self->sth_waitsemaphore); |
641 | if (self->sth_signalsemaphore != SEMAPHORE_NULL) { |
642 | semaphore_dereference(semaphore: self->sth_signalsemaphore); |
643 | } |
644 | |
645 | assert(self->handoff_thread == THREAD_NULL); |
646 | assert(caller_cont != NULL); |
647 | (*caller_cont)(semaphore_convert_wait_result(wait_result: wr)); |
648 | } |
649 | |
650 | /* |
651 | * Routine: semaphore_wait_internal |
652 | * |
653 | * Decrements the semaphore count by one. If the count is |
654 | * negative after the decrement, the calling thread blocks |
655 | * (possibly at a continuation and/or with a timeout). |
656 | * |
657 | * Assumptions: |
658 | * The reference |
659 | * A reference is held on the signal semaphore. |
660 | */ |
661 | static kern_return_t |
662 | semaphore_wait_internal( |
663 | semaphore_t wait_semaphore, |
664 | semaphore_t signal_semaphore, |
665 | uint64_t deadline, |
666 | int option, |
667 | semaphore_cont_t caller_cont) |
668 | { |
669 | int wait_result; |
670 | spl_t spl_level; |
671 | kern_return_t kr = KERN_ALREADY_WAITING; |
672 | thread_t self = current_thread(); |
673 | thread_t handoff_thread = THREAD_NULL; |
674 | int semaphore_signal_options = SEMAPHORE_SIGNAL_PREPOST; |
675 | thread_handoff_option_t handoff_option = THREAD_HANDOFF_NONE; |
676 | |
677 | spl_level = splsched(); |
678 | semaphore_lock(wait_semaphore); |
679 | |
680 | if (!semaphore_active(semaphore: wait_semaphore)) { |
681 | kr = KERN_TERMINATED; |
682 | } else if (wait_semaphore->count > 0) { |
683 | wait_semaphore->count--; |
684 | kr = KERN_SUCCESS; |
685 | } else if (option & SEMAPHORE_TIMEOUT_NOBLOCK) { |
686 | kr = KERN_OPERATION_TIMED_OUT; |
687 | } else { |
688 | wait_semaphore->count = -1; /* we don't keep an actual count */ |
689 | |
690 | thread_set_pending_block_hint(thread: self, block_hint: kThreadWaitSemaphore); |
691 | (void)waitq_assert_wait64_locked( |
692 | waitq: &wait_semaphore->waitq, |
693 | SEMAPHORE_EVENT, |
694 | THREAD_ABORTSAFE, |
695 | TIMEOUT_URGENCY_USER_NORMAL, |
696 | deadline, TIMEOUT_NO_LEEWAY, |
697 | thread: self); |
698 | |
699 | semaphore_signal_options |= SEMAPHORE_THREAD_HANDOFF; |
700 | } |
701 | semaphore_unlock(wait_semaphore); |
702 | splx(spl_level); |
703 | |
704 | /* |
705 | * wait_semaphore is unlocked so we are free to go ahead and |
706 | * signal the signal_semaphore (if one was provided). |
707 | */ |
708 | if (signal_semaphore != SEMAPHORE_NULL) { |
709 | kern_return_t signal_kr; |
710 | |
711 | /* |
712 | * lock the signal semaphore reference we got and signal it. |
713 | * This will NOT block (we cannot block after having asserted |
714 | * our intention to wait above). |
715 | */ |
716 | signal_kr = semaphore_signal_internal(semaphore: signal_semaphore, |
717 | THREAD_NULL, options: semaphore_signal_options); |
718 | |
719 | if (signal_kr == KERN_NOT_WAITING) { |
720 | assert(self->handoff_thread == THREAD_NULL); |
721 | signal_kr = KERN_SUCCESS; |
722 | } else if (signal_kr == KERN_TERMINATED) { |
723 | /* |
724 | * Uh!Oh! The semaphore we were to signal died. |
725 | * We have to get ourselves out of the wait in |
726 | * case we get stuck here forever (it is assumed |
727 | * that the semaphore we were posting is gating |
728 | * the decision by someone else to post the |
729 | * semaphore we are waiting on). People will |
730 | * discover the other dead semaphore soon enough. |
731 | * If we got out of the wait cleanly (someone |
732 | * already posted a wakeup to us) then return that |
733 | * (most important) result. Otherwise, |
734 | * return the KERN_TERMINATED status. |
735 | */ |
736 | assert(self->handoff_thread == THREAD_NULL); |
737 | clear_wait(thread: self, THREAD_INTERRUPTED); |
738 | kr = semaphore_convert_wait_result(wait_result: self->wait_result); |
739 | if (kr == KERN_ABORTED) { |
740 | kr = KERN_TERMINATED; |
741 | } |
742 | } |
743 | } |
744 | |
745 | /* |
746 | * If we had an error, or we didn't really need to wait we can |
747 | * return now that we have signalled the signal semaphore. |
748 | */ |
749 | if (kr != KERN_ALREADY_WAITING) { |
750 | assert(self->handoff_thread == THREAD_NULL); |
751 | return kr; |
752 | } |
753 | |
754 | if (self->handoff_thread) { |
755 | handoff_thread = self->handoff_thread; |
756 | self->handoff_thread = THREAD_NULL; |
757 | handoff_option = THREAD_HANDOFF_SETRUN_NEEDED; |
758 | } |
759 | |
760 | /* |
761 | * Now, we can block. If the caller supplied a continuation |
762 | * pointer of his own for after the block, block with the |
763 | * appropriate semaphore continuation. This will gather the |
764 | * semaphore results, release references on the semaphore(s), |
765 | * and then call the caller's continuation. |
766 | */ |
767 | if (caller_cont) { |
768 | self->sth_continuation = caller_cont; |
769 | self->sth_waitsemaphore = wait_semaphore; |
770 | self->sth_signalsemaphore = signal_semaphore; |
771 | |
772 | thread_handoff_parameter(thread: handoff_thread, continuation: semaphore_wait_continue, |
773 | NULL, handoff_option); |
774 | } else { |
775 | wait_result = thread_handoff_deallocate(thread: handoff_thread, option: handoff_option); |
776 | } |
777 | |
778 | assert(self->handoff_thread == THREAD_NULL); |
779 | return semaphore_convert_wait_result(wait_result); |
780 | } |
781 | |
782 | |
783 | /* |
784 | * Routine: semaphore_wait |
785 | * |
786 | * Traditional (non-continuation) interface presented to |
787 | * in-kernel clients to wait on a semaphore. |
788 | */ |
789 | kern_return_t |
790 | semaphore_wait( |
791 | semaphore_t semaphore) |
792 | { |
793 | if (semaphore == SEMAPHORE_NULL) { |
794 | return KERN_INVALID_ARGUMENT; |
795 | } |
796 | |
797 | return semaphore_wait_internal(wait_semaphore: semaphore, SEMAPHORE_NULL, |
798 | deadline: 0ULL, SEMAPHORE_OPTION_NONE, SEMAPHORE_CONT_NULL); |
799 | } |
800 | |
801 | kern_return_t |
802 | semaphore_wait_noblock( |
803 | semaphore_t semaphore) |
804 | { |
805 | if (semaphore == SEMAPHORE_NULL) { |
806 | return KERN_INVALID_ARGUMENT; |
807 | } |
808 | |
809 | return semaphore_wait_internal(wait_semaphore: semaphore, SEMAPHORE_NULL, |
810 | deadline: 0ULL, SEMAPHORE_TIMEOUT_NOBLOCK, SEMAPHORE_CONT_NULL); |
811 | } |
812 | |
813 | kern_return_t |
814 | semaphore_wait_deadline( |
815 | semaphore_t semaphore, |
816 | uint64_t deadline) |
817 | { |
818 | if (semaphore == SEMAPHORE_NULL) { |
819 | return KERN_INVALID_ARGUMENT; |
820 | } |
821 | |
822 | return semaphore_wait_internal(wait_semaphore: semaphore, SEMAPHORE_NULL, |
823 | deadline, SEMAPHORE_OPTION_NONE, SEMAPHORE_CONT_NULL); |
824 | } |
825 | |
826 | /* |
827 | * Trap: semaphore_wait_trap |
828 | * |
829 | * Trap version of semaphore wait. Called on behalf of user-level |
830 | * clients. |
831 | */ |
832 | |
833 | kern_return_t |
834 | semaphore_wait_trap( |
835 | struct semaphore_wait_trap_args *args) |
836 | { |
837 | return semaphore_wait_trap_internal(name: args->wait_name, thread_syscall_return); |
838 | } |
839 | |
840 | kern_return_t |
841 | semaphore_wait_trap_internal( |
842 | mach_port_name_t name, |
843 | semaphore_cont_t caller_cont) |
844 | { |
845 | semaphore_t semaphore; |
846 | kern_return_t kr; |
847 | |
848 | kr = port_name_to_semaphore(name, semaphore: &semaphore); |
849 | if (kr == KERN_SUCCESS) { |
850 | kr = semaphore_wait_internal(wait_semaphore: semaphore, |
851 | SEMAPHORE_NULL, |
852 | deadline: 0ULL, SEMAPHORE_OPTION_NONE, |
853 | caller_cont); |
854 | semaphore_dereference(semaphore); |
855 | } |
856 | return kr; |
857 | } |
858 | |
859 | /* |
860 | * Routine: semaphore_timedwait |
861 | * |
862 | * Traditional (non-continuation) interface presented to |
863 | * in-kernel clients to wait on a semaphore with a timeout. |
864 | * |
865 | * A timeout of {0,0} is considered non-blocking. |
866 | */ |
867 | kern_return_t |
868 | semaphore_timedwait( |
869 | semaphore_t semaphore, |
870 | mach_timespec_t wait_time) |
871 | { |
872 | int option = SEMAPHORE_OPTION_NONE; |
873 | uint64_t deadline = 0; |
874 | |
875 | if (semaphore == SEMAPHORE_NULL) { |
876 | return KERN_INVALID_ARGUMENT; |
877 | } |
878 | |
879 | if (BAD_MACH_TIMESPEC(&wait_time)) { |
880 | return KERN_INVALID_VALUE; |
881 | } |
882 | |
883 | if (wait_time.tv_sec == 0 && wait_time.tv_nsec == 0) { |
884 | option = SEMAPHORE_TIMEOUT_NOBLOCK; |
885 | } else { |
886 | deadline = semaphore_deadline(sec: wait_time.tv_sec, nsec: wait_time.tv_nsec); |
887 | } |
888 | |
889 | return semaphore_wait_internal(wait_semaphore: semaphore, SEMAPHORE_NULL, |
890 | deadline, option, SEMAPHORE_CONT_NULL); |
891 | } |
892 | |
893 | /* |
894 | * Trap: semaphore_timedwait_trap |
895 | * |
896 | * Trap version of a semaphore_timedwait. The timeout parameter |
897 | * is passed in two distinct parts and re-assembled on this side |
898 | * of the trap interface (to accomodate calling conventions that |
899 | * pass structures as pointers instead of inline in registers without |
900 | * having to add a copyin). |
901 | * |
902 | * A timeout of {0,0} is considered non-blocking. |
903 | */ |
904 | kern_return_t |
905 | semaphore_timedwait_trap( |
906 | struct semaphore_timedwait_trap_args *args) |
907 | { |
908 | return semaphore_timedwait_trap_internal(name: args->wait_name, |
909 | sec: args->sec, nsec: args->nsec, thread_syscall_return); |
910 | } |
911 | |
912 | |
913 | kern_return_t |
914 | semaphore_timedwait_trap_internal( |
915 | mach_port_name_t name, |
916 | unsigned int sec, |
917 | clock_res_t nsec, |
918 | semaphore_cont_t caller_cont) |
919 | { |
920 | semaphore_t semaphore; |
921 | mach_timespec_t wait_time; |
922 | kern_return_t kr; |
923 | |
924 | wait_time.tv_sec = sec; |
925 | wait_time.tv_nsec = nsec; |
926 | if (BAD_MACH_TIMESPEC(&wait_time)) { |
927 | return KERN_INVALID_VALUE; |
928 | } |
929 | |
930 | kr = port_name_to_semaphore(name, semaphore: &semaphore); |
931 | if (kr == KERN_SUCCESS) { |
932 | int option = SEMAPHORE_OPTION_NONE; |
933 | uint64_t deadline = 0; |
934 | |
935 | if (sec == 0 && nsec == 0) { |
936 | option = SEMAPHORE_TIMEOUT_NOBLOCK; |
937 | } else { |
938 | deadline = semaphore_deadline(sec, nsec); |
939 | } |
940 | |
941 | kr = semaphore_wait_internal(wait_semaphore: semaphore, |
942 | SEMAPHORE_NULL, |
943 | deadline, option, |
944 | caller_cont); |
945 | semaphore_dereference(semaphore); |
946 | } |
947 | return kr; |
948 | } |
949 | |
950 | /* |
951 | * Routine: semaphore_wait_signal |
952 | * |
953 | * Atomically register a wait on a semaphore and THEN signal |
954 | * another. This is the in-kernel entry point that does not |
955 | * block at a continuation and does not free a signal_semaphore |
956 | * reference. |
957 | */ |
958 | kern_return_t |
959 | semaphore_wait_signal( |
960 | semaphore_t wait_semaphore, |
961 | semaphore_t signal_semaphore) |
962 | { |
963 | if (wait_semaphore == SEMAPHORE_NULL) { |
964 | return KERN_INVALID_ARGUMENT; |
965 | } |
966 | |
967 | return semaphore_wait_internal(wait_semaphore, signal_semaphore, |
968 | deadline: 0ULL, SEMAPHORE_OPTION_NONE, SEMAPHORE_CONT_NULL); |
969 | } |
970 | |
971 | /* |
972 | * Trap: semaphore_wait_signal_trap |
973 | * |
974 | * Atomically register a wait on a semaphore and THEN signal |
975 | * another. This is the trap version from user space. |
976 | */ |
977 | kern_return_t |
978 | semaphore_wait_signal_trap( |
979 | struct semaphore_wait_signal_trap_args *args) |
980 | { |
981 | return semaphore_wait_signal_trap_internal(wait_name: args->wait_name, |
982 | signal_name: args->signal_name, thread_syscall_return); |
983 | } |
984 | |
985 | kern_return_t |
986 | semaphore_wait_signal_trap_internal( |
987 | mach_port_name_t wait_name, |
988 | mach_port_name_t signal_name, |
989 | semaphore_cont_t caller_cont) |
990 | { |
991 | semaphore_t wait_semaphore; |
992 | semaphore_t signal_semaphore; |
993 | kern_return_t kr; |
994 | |
995 | kr = port_name_to_semaphore(name: signal_name, semaphore: &signal_semaphore); |
996 | if (kr == KERN_SUCCESS) { |
997 | kr = port_name_to_semaphore(name: wait_name, semaphore: &wait_semaphore); |
998 | if (kr == KERN_SUCCESS) { |
999 | kr = semaphore_wait_internal(wait_semaphore, |
1000 | signal_semaphore, |
1001 | deadline: 0ULL, SEMAPHORE_OPTION_NONE, |
1002 | caller_cont); |
1003 | semaphore_dereference(semaphore: wait_semaphore); |
1004 | } |
1005 | semaphore_dereference(semaphore: signal_semaphore); |
1006 | } |
1007 | return kr; |
1008 | } |
1009 | |
1010 | |
1011 | /* |
1012 | * Routine: semaphore_timedwait_signal |
1013 | * |
1014 | * Atomically register a wait on a semaphore and THEN signal |
1015 | * another. This is the in-kernel entry point that does not |
1016 | * block at a continuation. |
1017 | * |
1018 | * A timeout of {0,0} is considered non-blocking. |
1019 | */ |
1020 | kern_return_t |
1021 | semaphore_timedwait_signal( |
1022 | semaphore_t wait_semaphore, |
1023 | semaphore_t signal_semaphore, |
1024 | mach_timespec_t wait_time) |
1025 | { |
1026 | int option = SEMAPHORE_OPTION_NONE; |
1027 | uint64_t deadline = 0; |
1028 | |
1029 | if (wait_semaphore == SEMAPHORE_NULL) { |
1030 | return KERN_INVALID_ARGUMENT; |
1031 | } |
1032 | |
1033 | if (BAD_MACH_TIMESPEC(&wait_time)) { |
1034 | return KERN_INVALID_VALUE; |
1035 | } |
1036 | |
1037 | if (wait_time.tv_sec == 0 && wait_time.tv_nsec == 0) { |
1038 | option = SEMAPHORE_TIMEOUT_NOBLOCK; |
1039 | } else { |
1040 | deadline = semaphore_deadline(sec: wait_time.tv_sec, nsec: wait_time.tv_nsec); |
1041 | } |
1042 | |
1043 | return semaphore_wait_internal(wait_semaphore, signal_semaphore, |
1044 | deadline, option, SEMAPHORE_CONT_NULL); |
1045 | } |
1046 | |
1047 | /* |
1048 | * Trap: semaphore_timedwait_signal_trap |
1049 | * |
1050 | * Atomically register a timed wait on a semaphore and THEN signal |
1051 | * another. This is the trap version from user space. |
1052 | */ |
1053 | kern_return_t |
1054 | semaphore_timedwait_signal_trap( |
1055 | struct semaphore_timedwait_signal_trap_args *args) |
1056 | { |
1057 | return semaphore_timedwait_signal_trap_internal(wait_name: args->wait_name, |
1058 | signal_name: args->signal_name, sec: args->sec, nsec: args->nsec, thread_syscall_return); |
1059 | } |
1060 | |
1061 | kern_return_t |
1062 | semaphore_timedwait_signal_trap_internal( |
1063 | mach_port_name_t wait_name, |
1064 | mach_port_name_t signal_name, |
1065 | unsigned int sec, |
1066 | clock_res_t nsec, |
1067 | semaphore_cont_t caller_cont) |
1068 | { |
1069 | semaphore_t wait_semaphore; |
1070 | semaphore_t signal_semaphore; |
1071 | mach_timespec_t wait_time; |
1072 | kern_return_t kr; |
1073 | |
1074 | wait_time.tv_sec = sec; |
1075 | wait_time.tv_nsec = nsec; |
1076 | if (BAD_MACH_TIMESPEC(&wait_time)) { |
1077 | return KERN_INVALID_VALUE; |
1078 | } |
1079 | |
1080 | kr = port_name_to_semaphore(name: signal_name, semaphore: &signal_semaphore); |
1081 | if (kr == KERN_SUCCESS) { |
1082 | kr = port_name_to_semaphore(name: wait_name, semaphore: &wait_semaphore); |
1083 | if (kr == KERN_SUCCESS) { |
1084 | int option = SEMAPHORE_OPTION_NONE; |
1085 | uint64_t deadline = 0; |
1086 | |
1087 | if (sec == 0 && nsec == 0) { |
1088 | option = SEMAPHORE_TIMEOUT_NOBLOCK; |
1089 | } else { |
1090 | deadline = semaphore_deadline(sec, nsec); |
1091 | } |
1092 | |
1093 | kr = semaphore_wait_internal(wait_semaphore, |
1094 | signal_semaphore, |
1095 | deadline, option, |
1096 | caller_cont); |
1097 | semaphore_dereference(semaphore: wait_semaphore); |
1098 | } |
1099 | semaphore_dereference(semaphore: signal_semaphore); |
1100 | } |
1101 | return kr; |
1102 | } |
1103 | |
1104 | |
1105 | /* |
1106 | * Routine: semaphore_reference |
1107 | * |
1108 | * Take out a reference on a semaphore. This keeps the data structure |
1109 | * in existence (but the semaphore may be deactivated). |
1110 | */ |
1111 | void |
1112 | semaphore_reference( |
1113 | semaphore_t semaphore) |
1114 | { |
1115 | zone_id_require(zone_id: ZONE_ID_SEMAPHORE, elem_size: sizeof(*semaphore), addr: semaphore); |
1116 | os_ref_retain_raw(&semaphore->ref_count, &sema_refgrp); |
1117 | } |
1118 | |
1119 | /* |
1120 | * Routine: semaphore_dereference |
1121 | * |
1122 | * Release a reference on a semaphore. If this is the last reference, |
1123 | * the semaphore data structure is deallocated. |
1124 | */ |
1125 | void |
1126 | semaphore_dereference( |
1127 | semaphore_t semaphore) |
1128 | { |
1129 | if (semaphore == NULL) { |
1130 | return; |
1131 | } |
1132 | |
1133 | if (os_ref_release_raw(&semaphore->ref_count, &sema_refgrp) == 0) { |
1134 | return semaphore_free(semaphore); |
1135 | } |
1136 | } |
1137 | |
1138 | void |
1139 | kdp_sema_find_owner(struct waitq *waitq, __assert_only event64_t event, thread_waitinfo_t * waitinfo) |
1140 | { |
1141 | semaphore_t sem = __container_of(waitq, struct semaphore, waitq); |
1142 | assert(event == SEMAPHORE_EVENT); |
1143 | |
1144 | zone_id_require(zone_id: ZONE_ID_SEMAPHORE, elem_size: sizeof(*sem), addr: sem); |
1145 | |
1146 | waitinfo->context = VM_KERNEL_UNSLIDE_OR_PERM(sem->port); |
1147 | if (sem->owner) { |
1148 | waitinfo->owner = pid_from_task(task: sem->owner); |
1149 | } |
1150 | } |
1151 | |
1152 | /* |
1153 | * Routine: port_name_to_semaphore |
1154 | * Purpose: |
1155 | * Convert from a port name in the current space to a semaphore. |
1156 | * Produces a semaphore ref, which may be null. |
1157 | * Conditions: |
1158 | * Nothing locked. |
1159 | */ |
1160 | kern_return_t |
1161 | port_name_to_semaphore( |
1162 | mach_port_name_t name, |
1163 | semaphore_t *semaphorep) |
1164 | { |
1165 | ipc_port_t port; |
1166 | kern_return_t kr; |
1167 | |
1168 | if (!MACH_PORT_VALID(name)) { |
1169 | *semaphorep = SEMAPHORE_NULL; |
1170 | return KERN_INVALID_NAME; |
1171 | } |
1172 | |
1173 | kr = ipc_port_translate_send(current_space(), name, portp: &port); |
1174 | if (kr != KERN_SUCCESS) { |
1175 | *semaphorep = SEMAPHORE_NULL; |
1176 | return kr; |
1177 | } |
1178 | /* have the port locked */ |
1179 | |
1180 | *semaphorep = convert_port_to_semaphore(port); |
1181 | if (*semaphorep == SEMAPHORE_NULL) { |
1182 | /* the port is valid, but doesn't denote a semaphore */ |
1183 | kr = KERN_INVALID_CAPABILITY; |
1184 | } else { |
1185 | kr = KERN_SUCCESS; |
1186 | } |
1187 | ip_mq_unlock(port); |
1188 | |
1189 | return kr; |
1190 | } |
1191 | |
1192 | /* |
1193 | * Routine: convert_port_to_semaphore |
1194 | * Purpose: |
1195 | * Convert from a port to a semaphore. |
1196 | * Doesn't consume the port [send-right] ref; |
1197 | * produces a semaphore ref, which may be null. |
1198 | * Conditions: |
1199 | * Caller has a send-right reference to port. |
1200 | * Port may or may not be locked. |
1201 | */ |
1202 | semaphore_t |
1203 | convert_port_to_semaphore(ipc_port_t port) |
1204 | { |
1205 | semaphore_t semaphore = SEMAPHORE_NULL; |
1206 | |
1207 | if (IP_VALID(port)) { |
1208 | semaphore = ipc_kobject_get_stable(port, type: IKOT_SEMAPHORE); |
1209 | if (semaphore != SEMAPHORE_NULL) { |
1210 | semaphore_reference(semaphore); |
1211 | } |
1212 | } |
1213 | |
1214 | return semaphore; |
1215 | } |
1216 | |
1217 | |
1218 | /* |
1219 | * Routine: convert_semaphore_to_port |
1220 | * Purpose: |
1221 | * Convert a semaphore reference to a send right to a |
1222 | * semaphore port. |
1223 | * |
1224 | * Consumes the semaphore reference. If the semaphore |
1225 | * port currently has no send rights (or doesn't exist |
1226 | * yet), the reference is donated to the port to represent |
1227 | * all extant send rights collectively. |
1228 | */ |
1229 | ipc_port_t |
1230 | convert_semaphore_to_port(semaphore_t semaphore) |
1231 | { |
1232 | if (semaphore == SEMAPHORE_NULL) { |
1233 | return IP_NULL; |
1234 | } |
1235 | |
1236 | /* |
1237 | * make a send right and donate our reference for |
1238 | * semaphore_no_senders if this is the first send right |
1239 | */ |
1240 | if (!ipc_kobject_make_send_lazy_alloc_port(port_store: &semaphore->port, |
1241 | kobject: semaphore, type: IKOT_SEMAPHORE, alloc_opts: IPC_KOBJECT_ALLOC_NONE)) { |
1242 | semaphore_dereference(semaphore); |
1243 | } |
1244 | return semaphore->port; |
1245 | } |
1246 | |
1247 | /* |
1248 | * Routine: semaphore_no_senders |
1249 | * Purpose: |
1250 | * Called whenever the Mach port system detects no-senders |
1251 | * on the semaphore port. |
1252 | * |
1253 | * When a send-right is first created, a no-senders |
1254 | * notification is armed (and a semaphore reference is donated). |
1255 | * |
1256 | * A no-senders notification will be posted when no one else holds a |
1257 | * send-right (reference) to the semaphore's port. This notification function |
1258 | * will consume the semaphore reference donated to the extant collection of |
1259 | * send-rights. |
1260 | */ |
1261 | static void |
1262 | semaphore_no_senders(ipc_port_t port, __unused mach_port_mscount_t mscount) |
1263 | { |
1264 | semaphore_t semaphore = ipc_kobject_get_stable(port, type: IKOT_SEMAPHORE); |
1265 | |
1266 | assert(semaphore != SEMAPHORE_NULL); |
1267 | assert(semaphore->port == port); |
1268 | |
1269 | semaphore_dereference(semaphore); |
1270 | } |
1271 | |
1272 | IPC_KOBJECT_DEFINE(IKOT_SEMAPHORE, |
1273 | .iko_op_stable = true, |
1274 | .iko_op_no_senders = semaphore_no_senders); |
1275 | |