1/*
2 * Copyright (c) 2000-2009 Apple Inc. All rights reserved.
3 *
4 * @APPLE_OSREFERENCE_LICENSE_HEADER_START@
5 *
6 * This file contains Original Code and/or Modifications of Original Code
7 * as defined in and that are subject to the Apple Public Source License
8 * Version 2.0 (the 'License'). You may not use this file except in
9 * compliance with the License. The rights granted to you under the License
10 * may not be used to create, or enable the creation or redistribution of,
11 * unlawful or unlicensed copies of an Apple operating system, or to
12 * circumvent, violate, or enable the circumvention or violation of, any
13 * terms of an Apple operating system software license agreement.
14 *
15 * Please obtain a copy of the License at
16 * http://www.opensource.apple.com/apsl/ and read it before using this file.
17 *
18 * The Original Code and all software distributed under the License are
19 * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER
20 * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES,
21 * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY,
22 * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT.
23 * Please see the License for the specific language governing rights and
24 * limitations under the License.
25 *
26 * @APPLE_OSREFERENCE_LICENSE_HEADER_END@
27 */
28/*
29 * @OSF_COPYRIGHT@
30 *
31 */
32/*
33 * File: kern/sync_sema.c
34 * Author: Joseph CaraDonna
35 *
36 * Contains RT distributed semaphore synchronization services.
37 */
38
39#include <mach/mach_types.h>
40#include <mach/mach_traps.h>
41#include <mach/kern_return.h>
42#include <mach/semaphore.h>
43#include <mach/sync_policy.h>
44#include <mach/task.h>
45
46#include <kern/misc_protos.h>
47#include <kern/sync_sema.h>
48#include <kern/spl.h>
49#include <kern/ipc_kobject.h>
50#include <kern/ipc_sync.h>
51#include <kern/ipc_tt.h>
52#include <kern/thread.h>
53#include <kern/clock.h>
54#include <ipc/ipc_port.h>
55#include <ipc/ipc_space.h>
56#include <kern/host.h>
57#include <kern/waitq.h>
58#include <kern/zalloc.h>
59#include <kern/mach_param.h>
60
61#include <libkern/OSAtomic.h>
62
63static unsigned int semaphore_event;
64#define SEMAPHORE_EVENT CAST_EVENT64_T(&semaphore_event)
65
66zone_t semaphore_zone;
67unsigned int semaphore_max;
68
69os_refgrp_decl(static, sema_refgrp, "semaphore", NULL);
70
71/* Forward declarations */
72
73
74kern_return_t
75semaphore_wait_trap_internal(
76 mach_port_name_t name,
77 void (*caller_cont)(kern_return_t));
78
79kern_return_t
80semaphore_wait_signal_trap_internal(
81 mach_port_name_t wait_name,
82 mach_port_name_t signal_name,
83 void (*caller_cont)(kern_return_t));
84
85kern_return_t
86semaphore_timedwait_trap_internal(
87 mach_port_name_t name,
88 unsigned int sec,
89 clock_res_t nsec,
90 void (*caller_cont)(kern_return_t));
91
92kern_return_t
93semaphore_timedwait_signal_trap_internal(
94 mach_port_name_t wait_name,
95 mach_port_name_t signal_name,
96 unsigned int sec,
97 clock_res_t nsec,
98 void (*caller_cont)(kern_return_t));
99
100kern_return_t
101semaphore_signal_internal_trap(mach_port_name_t sema_name);
102
103kern_return_t
104semaphore_signal_internal(
105 semaphore_t semaphore,
106 thread_t thread,
107 int options);
108
109kern_return_t
110semaphore_convert_wait_result(
111 int wait_result);
112
113void
114semaphore_wait_continue(void);
115
116static kern_return_t
117semaphore_wait_internal(
118 semaphore_t wait_semaphore,
119 semaphore_t signal_semaphore,
120 uint64_t deadline,
121 int option,
122 void (*caller_cont)(kern_return_t));
123
124static __inline__ uint64_t
125semaphore_deadline(
126 unsigned int sec,
127 clock_res_t nsec)
128{
129 uint64_t abstime;
130
131 nanoseconds_to_absolutetime((uint64_t)sec * NSEC_PER_SEC + nsec, &abstime);
132 clock_absolutetime_interval_to_deadline(abstime, &abstime);
133
134 return (abstime);
135}
136
137/*
138 * ROUTINE: semaphore_init [private]
139 *
140 * Initialize the semaphore mechanisms.
141 * Right now, we only need to initialize the semaphore zone.
142 */
143void
144semaphore_init(void)
145{
146 semaphore_zone = zinit(sizeof(struct semaphore),
147 semaphore_max * sizeof(struct semaphore),
148 sizeof(struct semaphore),
149 "semaphores");
150 zone_change(semaphore_zone, Z_NOENCRYPT, TRUE);
151}
152
153/*
154 * Routine: semaphore_create
155 *
156 * Creates a semaphore.
157 * The port representing the semaphore is returned as a parameter.
158 */
159kern_return_t
160semaphore_create(
161 task_t task,
162 semaphore_t *new_semaphore,
163 int policy,
164 int value)
165{
166 semaphore_t s = SEMAPHORE_NULL;
167 kern_return_t kret;
168
169
170 *new_semaphore = SEMAPHORE_NULL;
171 if (task == TASK_NULL || value < 0 || policy > SYNC_POLICY_MAX)
172 return KERN_INVALID_ARGUMENT;
173
174 s = (semaphore_t) zalloc (semaphore_zone);
175
176 if (s == SEMAPHORE_NULL)
177 return KERN_RESOURCE_SHORTAGE;
178
179 kret = waitq_init(&s->waitq, policy | SYNC_POLICY_DISABLE_IRQ); /* also inits lock */
180 if (kret != KERN_SUCCESS) {
181 zfree(semaphore_zone, s);
182 return kret;
183 }
184
185 /*
186 * Initialize the semaphore values.
187 */
188 s->port = IP_NULL;
189 os_ref_init(&s->ref_count, &sema_refgrp);
190 s->count = value;
191 s->active = TRUE;
192 s->owner = task;
193
194 /*
195 * Associate the new semaphore with the task by adding
196 * the new semaphore to the task's semaphore list.
197 */
198 task_lock(task);
199 enqueue_head(&task->semaphore_list, (queue_entry_t) s);
200 task->semaphores_owned++;
201 task_unlock(task);
202
203 *new_semaphore = s;
204
205 return KERN_SUCCESS;
206}
207
208/*
209 * Routine: semaphore_destroy_internal
210 *
211 * Disassociate a semaphore from its owning task, mark it inactive,
212 * and set any waiting threads running with THREAD_RESTART.
213 *
214 * Conditions:
215 * task is locked
216 * semaphore is locked
217 * semaphore is owned by the specified task
218 * Returns:
219 * with semaphore unlocked
220 */
221static void
222semaphore_destroy_internal(
223 task_t task,
224 semaphore_t semaphore)
225{
226 int old_count;
227
228 /* unlink semaphore from owning task */
229 assert(semaphore->owner == task);
230 remqueue((queue_entry_t) semaphore);
231 semaphore->owner = TASK_NULL;
232 task->semaphores_owned--;
233
234 /*
235 * Deactivate semaphore
236 */
237 assert(semaphore->active);
238 semaphore->active = FALSE;
239
240 /*
241 * Wakeup blocked threads
242 */
243 old_count = semaphore->count;
244 semaphore->count = 0;
245
246 if (old_count < 0) {
247 waitq_wakeup64_all_locked(&semaphore->waitq,
248 SEMAPHORE_EVENT,
249 THREAD_RESTART, NULL,
250 WAITQ_ALL_PRIORITIES,
251 WAITQ_UNLOCK);
252 /* waitq/semaphore is unlocked */
253 } else {
254 semaphore_unlock(semaphore);
255 }
256}
257
258/*
259 * Routine: semaphore_destroy
260 *
261 * Destroys a semaphore and consume the caller's reference on the
262 * semaphore.
263 */
264kern_return_t
265semaphore_destroy(
266 task_t task,
267 semaphore_t semaphore)
268{
269 spl_t spl_level;
270
271 if (semaphore == SEMAPHORE_NULL)
272 return KERN_INVALID_ARGUMENT;
273
274 if (task == TASK_NULL) {
275 semaphore_dereference(semaphore);
276 return KERN_INVALID_ARGUMENT;
277 }
278
279 task_lock(task);
280 spl_level = splsched();
281 semaphore_lock(semaphore);
282
283 if (semaphore->owner != task) {
284 semaphore_unlock(semaphore);
285 semaphore_dereference(semaphore);
286 splx(spl_level);
287 task_unlock(task);
288 return KERN_INVALID_ARGUMENT;
289 }
290
291 semaphore_destroy_internal(task, semaphore);
292 /* semaphore unlocked */
293
294 splx(spl_level);
295 task_unlock(task);
296
297 semaphore_dereference(semaphore);
298 return KERN_SUCCESS;
299}
300
301/*
302 * Routine: semaphore_destroy_all
303 *
304 * Destroy all the semaphores associated with a given task.
305 */
306#define SEMASPERSPL 20 /* max number of semaphores to destroy per spl hold */
307
308void
309semaphore_destroy_all(
310 task_t task)
311{
312 uint32_t count;
313 spl_t spl_level;
314
315 count = 0;
316 task_lock(task);
317 while (!queue_empty(&task->semaphore_list)) {
318 semaphore_t semaphore;
319
320 semaphore = (semaphore_t) queue_first(&task->semaphore_list);
321
322 if (count == 0)
323 spl_level = splsched();
324 semaphore_lock(semaphore);
325
326 semaphore_destroy_internal(task, semaphore);
327 /* semaphore unlocked */
328
329 /* throttle number of semaphores per interrupt disablement */
330 if (++count == SEMASPERSPL) {
331 count = 0;
332 splx(spl_level);
333 }
334 }
335 if (count != 0)
336 splx(spl_level);
337
338 task_unlock(task);
339}
340
341/*
342 * Routine: semaphore_signal_internal
343 *
344 * Signals the semaphore as direct.
345 * Assumptions:
346 * Semaphore is locked.
347 */
348kern_return_t
349semaphore_signal_internal(
350 semaphore_t semaphore,
351 thread_t thread,
352 int options)
353{
354 kern_return_t kr;
355 spl_t spl_level;
356
357 spl_level = splsched();
358 semaphore_lock(semaphore);
359
360 if (!semaphore->active) {
361 semaphore_unlock(semaphore);
362 splx(spl_level);
363 return KERN_TERMINATED;
364 }
365
366 if (thread != THREAD_NULL) {
367 if (semaphore->count < 0) {
368 kr = waitq_wakeup64_thread_locked(
369 &semaphore->waitq,
370 SEMAPHORE_EVENT,
371 thread,
372 THREAD_AWAKENED,
373 WAITQ_UNLOCK);
374 /* waitq/semaphore is unlocked */
375 } else {
376 kr = KERN_NOT_WAITING;
377 semaphore_unlock(semaphore);
378 }
379 splx(spl_level);
380 return kr;
381 }
382
383 if (options & SEMAPHORE_SIGNAL_ALL) {
384 int old_count = semaphore->count;
385
386 kr = KERN_NOT_WAITING;
387 if (old_count < 0) {
388 semaphore->count = 0; /* always reset */
389 kr = waitq_wakeup64_all_locked(
390 &semaphore->waitq,
391 SEMAPHORE_EVENT,
392 THREAD_AWAKENED, NULL,
393 WAITQ_ALL_PRIORITIES,
394 WAITQ_UNLOCK);
395 /* waitq / semaphore is unlocked */
396 } else {
397 if (options & SEMAPHORE_SIGNAL_PREPOST)
398 semaphore->count++;
399 kr = KERN_SUCCESS;
400 semaphore_unlock(semaphore);
401 }
402 splx(spl_level);
403 return kr;
404 }
405
406 if (semaphore->count < 0) {
407 kr = waitq_wakeup64_one_locked(
408 &semaphore->waitq,
409 SEMAPHORE_EVENT,
410 THREAD_AWAKENED, NULL,
411 WAITQ_ALL_PRIORITIES,
412 WAITQ_KEEP_LOCKED);
413 if (kr == KERN_SUCCESS) {
414 semaphore_unlock(semaphore);
415 splx(spl_level);
416 return KERN_SUCCESS;
417 } else {
418 semaphore->count = 0; /* all waiters gone */
419 }
420 }
421
422 if (options & SEMAPHORE_SIGNAL_PREPOST) {
423 semaphore->count++;
424 }
425
426 semaphore_unlock(semaphore);
427 splx(spl_level);
428 return KERN_NOT_WAITING;
429}
430
431/*
432 * Routine: semaphore_signal_thread
433 *
434 * If the specified thread is blocked on the semaphore, it is
435 * woken up. If a NULL thread was supplied, then any one
436 * thread is woken up. Otherwise the caller gets KERN_NOT_WAITING
437 * and the semaphore is unchanged.
438 */
439kern_return_t
440semaphore_signal_thread(
441 semaphore_t semaphore,
442 thread_t thread)
443{
444 kern_return_t ret;
445
446 if (semaphore == SEMAPHORE_NULL)
447 return KERN_INVALID_ARGUMENT;
448
449 ret = semaphore_signal_internal(semaphore,
450 thread,
451 SEMAPHORE_OPTION_NONE);
452 return ret;
453}
454
455/*
456 * Routine: semaphore_signal_thread_trap
457 *
458 * Trap interface to the semaphore_signal_thread function.
459 */
460kern_return_t
461semaphore_signal_thread_trap(
462 struct semaphore_signal_thread_trap_args *args)
463{
464 mach_port_name_t sema_name = args->signal_name;
465 mach_port_name_t thread_name = args->thread_name;
466 semaphore_t semaphore;
467 thread_t thread;
468 kern_return_t kr;
469
470 /*
471 * MACH_PORT_NULL is not an error. It means that we want to
472 * select any one thread that is already waiting, but not to
473 * pre-post the semaphore.
474 */
475 if (thread_name != MACH_PORT_NULL) {
476 thread = port_name_to_thread(thread_name);
477 if (thread == THREAD_NULL)
478 return KERN_INVALID_ARGUMENT;
479 } else
480 thread = THREAD_NULL;
481
482 kr = port_name_to_semaphore(sema_name, &semaphore);
483 if (kr == KERN_SUCCESS) {
484 kr = semaphore_signal_internal(semaphore,
485 thread,
486 SEMAPHORE_OPTION_NONE);
487 semaphore_dereference(semaphore);
488 }
489 if (thread != THREAD_NULL) {
490 thread_deallocate(thread);
491 }
492 return kr;
493}
494
495
496
497/*
498 * Routine: semaphore_signal
499 *
500 * Traditional (in-kernel client and MIG interface) semaphore
501 * signal routine. Most users will access the trap version.
502 *
503 * This interface in not defined to return info about whether
504 * this call found a thread waiting or not. The internal
505 * routines (and future external routines) do. We have to
506 * convert those into plain KERN_SUCCESS returns.
507 */
508kern_return_t
509semaphore_signal(
510 semaphore_t semaphore)
511{
512 kern_return_t kr;
513
514 if (semaphore == SEMAPHORE_NULL)
515 return KERN_INVALID_ARGUMENT;
516
517 kr = semaphore_signal_internal(semaphore,
518 THREAD_NULL,
519 SEMAPHORE_SIGNAL_PREPOST);
520 if (kr == KERN_NOT_WAITING)
521 return KERN_SUCCESS;
522 return kr;
523}
524
525/*
526 * Routine: semaphore_signal_trap
527 *
528 * Trap interface to the semaphore_signal function.
529 */
530kern_return_t
531semaphore_signal_trap(
532 struct semaphore_signal_trap_args *args)
533{
534 mach_port_name_t sema_name = args->signal_name;
535
536 return (semaphore_signal_internal_trap(sema_name));
537}
538
539kern_return_t
540semaphore_signal_internal_trap(mach_port_name_t sema_name)
541{
542 semaphore_t semaphore;
543 kern_return_t kr;
544
545 kr = port_name_to_semaphore(sema_name, &semaphore);
546 if (kr == KERN_SUCCESS) {
547 kr = semaphore_signal_internal(semaphore,
548 THREAD_NULL,
549 SEMAPHORE_SIGNAL_PREPOST);
550 semaphore_dereference(semaphore);
551 if (kr == KERN_NOT_WAITING)
552 kr = KERN_SUCCESS;
553 }
554 return kr;
555}
556
557/*
558 * Routine: semaphore_signal_all
559 *
560 * Awakens ALL threads currently blocked on the semaphore.
561 * The semaphore count returns to zero.
562 */
563kern_return_t
564semaphore_signal_all(
565 semaphore_t semaphore)
566{
567 kern_return_t kr;
568
569 if (semaphore == SEMAPHORE_NULL)
570 return KERN_INVALID_ARGUMENT;
571
572 kr = semaphore_signal_internal(semaphore,
573 THREAD_NULL,
574 SEMAPHORE_SIGNAL_ALL);
575 if (kr == KERN_NOT_WAITING)
576 return KERN_SUCCESS;
577 return kr;
578}
579
580/*
581 * Routine: semaphore_signal_all_trap
582 *
583 * Trap interface to the semaphore_signal_all function.
584 */
585kern_return_t
586semaphore_signal_all_trap(
587 struct semaphore_signal_all_trap_args *args)
588{
589 mach_port_name_t sema_name = args->signal_name;
590 semaphore_t semaphore;
591 kern_return_t kr;
592
593 kr = port_name_to_semaphore(sema_name, &semaphore);
594 if (kr == KERN_SUCCESS) {
595 kr = semaphore_signal_internal(semaphore,
596 THREAD_NULL,
597 SEMAPHORE_SIGNAL_ALL);
598 semaphore_dereference(semaphore);
599 if (kr == KERN_NOT_WAITING)
600 kr = KERN_SUCCESS;
601 }
602 return kr;
603}
604
605/*
606 * Routine: semaphore_convert_wait_result
607 *
608 * Generate the return code after a semaphore wait/block. It
609 * takes the wait result as an input and coverts that to an
610 * appropriate result.
611 */
612kern_return_t
613semaphore_convert_wait_result(int wait_result)
614{
615 switch (wait_result) {
616 case THREAD_AWAKENED:
617 return KERN_SUCCESS;
618
619 case THREAD_TIMED_OUT:
620 return KERN_OPERATION_TIMED_OUT;
621
622 case THREAD_INTERRUPTED:
623 return KERN_ABORTED;
624
625 case THREAD_RESTART:
626 return KERN_TERMINATED;
627
628 default:
629 panic("semaphore_block\n");
630 return KERN_FAILURE;
631 }
632}
633
634/*
635 * Routine: semaphore_wait_continue
636 *
637 * Common continuation routine after waiting on a semphore.
638 * It returns directly to user space.
639 */
640void
641semaphore_wait_continue(void)
642{
643 thread_t self = current_thread();
644 int wait_result = self->wait_result;
645 void (*caller_cont)(kern_return_t) = self->sth_continuation;
646
647 assert(self->sth_waitsemaphore != SEMAPHORE_NULL);
648 semaphore_dereference(self->sth_waitsemaphore);
649 if (self->sth_signalsemaphore != SEMAPHORE_NULL)
650 semaphore_dereference(self->sth_signalsemaphore);
651
652 assert(caller_cont != (void (*)(kern_return_t))0);
653 (*caller_cont)(semaphore_convert_wait_result(wait_result));
654}
655
656/*
657 * Routine: semaphore_wait_internal
658 *
659 * Decrements the semaphore count by one. If the count is
660 * negative after the decrement, the calling thread blocks
661 * (possibly at a continuation and/or with a timeout).
662 *
663 * Assumptions:
664 * The reference
665 * A reference is held on the signal semaphore.
666 */
667static kern_return_t
668semaphore_wait_internal(
669 semaphore_t wait_semaphore,
670 semaphore_t signal_semaphore,
671 uint64_t deadline,
672 int option,
673 void (*caller_cont)(kern_return_t))
674{
675 int wait_result;
676 spl_t spl_level;
677 kern_return_t kr = KERN_ALREADY_WAITING;
678
679 spl_level = splsched();
680 semaphore_lock(wait_semaphore);
681
682 if (!wait_semaphore->active) {
683 kr = KERN_TERMINATED;
684 } else if (wait_semaphore->count > 0) {
685 wait_semaphore->count--;
686 kr = KERN_SUCCESS;
687 } else if (option & SEMAPHORE_TIMEOUT_NOBLOCK) {
688 kr = KERN_OPERATION_TIMED_OUT;
689 } else {
690 thread_t self = current_thread();
691
692 wait_semaphore->count = -1; /* we don't keep an actual count */
693
694 thread_set_pending_block_hint(self, kThreadWaitSemaphore);
695 (void)waitq_assert_wait64_locked(
696 &wait_semaphore->waitq,
697 SEMAPHORE_EVENT,
698 THREAD_ABORTSAFE,
699 TIMEOUT_URGENCY_USER_NORMAL,
700 deadline, TIMEOUT_NO_LEEWAY,
701 self);
702 }
703 semaphore_unlock(wait_semaphore);
704 splx(spl_level);
705
706 /*
707 * wait_semaphore is unlocked so we are free to go ahead and
708 * signal the signal_semaphore (if one was provided).
709 */
710 if (signal_semaphore != SEMAPHORE_NULL) {
711 kern_return_t signal_kr;
712
713 /*
714 * lock the signal semaphore reference we got and signal it.
715 * This will NOT block (we cannot block after having asserted
716 * our intention to wait above).
717 */
718 signal_kr = semaphore_signal_internal(signal_semaphore,
719 THREAD_NULL,
720 SEMAPHORE_SIGNAL_PREPOST);
721
722 if (signal_kr == KERN_NOT_WAITING)
723 signal_kr = KERN_SUCCESS;
724 else if (signal_kr == KERN_TERMINATED) {
725 /*
726 * Uh!Oh! The semaphore we were to signal died.
727 * We have to get ourselves out of the wait in
728 * case we get stuck here forever (it is assumed
729 * that the semaphore we were posting is gating
730 * the decision by someone else to post the
731 * semaphore we are waiting on). People will
732 * discover the other dead semaphore soon enough.
733 * If we got out of the wait cleanly (someone
734 * already posted a wakeup to us) then return that
735 * (most important) result. Otherwise,
736 * return the KERN_TERMINATED status.
737 */
738 thread_t self = current_thread();
739
740 clear_wait(self, THREAD_INTERRUPTED);
741 kr = semaphore_convert_wait_result(self->wait_result);
742 if (kr == KERN_ABORTED)
743 kr = KERN_TERMINATED;
744 }
745 }
746
747 /*
748 * If we had an error, or we didn't really need to wait we can
749 * return now that we have signalled the signal semaphore.
750 */
751 if (kr != KERN_ALREADY_WAITING)
752 return kr;
753
754 /*
755 * Now, we can block. If the caller supplied a continuation
756 * pointer of his own for after the block, block with the
757 * appropriate semaphore continuation. Thiswill gather the
758 * semaphore results, release references on the semaphore(s),
759 * and then call the caller's continuation.
760 */
761 if (caller_cont) {
762 thread_t self = current_thread();
763
764 self->sth_continuation = caller_cont;
765 self->sth_waitsemaphore = wait_semaphore;
766 self->sth_signalsemaphore = signal_semaphore;
767 wait_result = thread_block((thread_continue_t)semaphore_wait_continue);
768 }
769 else {
770 wait_result = thread_block(THREAD_CONTINUE_NULL);
771 }
772
773 return (semaphore_convert_wait_result(wait_result));
774}
775
776
777/*
778 * Routine: semaphore_wait
779 *
780 * Traditional (non-continuation) interface presented to
781 * in-kernel clients to wait on a semaphore.
782 */
783kern_return_t
784semaphore_wait(
785 semaphore_t semaphore)
786{
787
788 if (semaphore == SEMAPHORE_NULL)
789 return KERN_INVALID_ARGUMENT;
790
791 return(semaphore_wait_internal(semaphore,
792 SEMAPHORE_NULL,
793 0ULL, SEMAPHORE_OPTION_NONE,
794 (void (*)(kern_return_t))0));
795}
796
797kern_return_t
798semaphore_wait_noblock(
799 semaphore_t semaphore)
800{
801
802 if (semaphore == SEMAPHORE_NULL)
803 return KERN_INVALID_ARGUMENT;
804
805 return(semaphore_wait_internal(semaphore,
806 SEMAPHORE_NULL,
807 0ULL, SEMAPHORE_TIMEOUT_NOBLOCK,
808 (void (*)(kern_return_t))0));
809}
810
811kern_return_t
812semaphore_wait_deadline(
813 semaphore_t semaphore,
814 uint64_t deadline)
815{
816
817 if (semaphore == SEMAPHORE_NULL)
818 return KERN_INVALID_ARGUMENT;
819
820 return(semaphore_wait_internal(semaphore,
821 SEMAPHORE_NULL,
822 deadline, SEMAPHORE_OPTION_NONE,
823 (void (*)(kern_return_t))0));
824}
825
826/*
827 * Trap: semaphore_wait_trap
828 *
829 * Trap version of semaphore wait. Called on behalf of user-level
830 * clients.
831 */
832
833kern_return_t
834semaphore_wait_trap(
835 struct semaphore_wait_trap_args *args)
836{
837 return(semaphore_wait_trap_internal(args->wait_name, thread_syscall_return));
838}
839
840
841
842kern_return_t
843semaphore_wait_trap_internal(
844 mach_port_name_t name,
845 void (*caller_cont)(kern_return_t))
846{
847 semaphore_t semaphore;
848 kern_return_t kr;
849
850 kr = port_name_to_semaphore(name, &semaphore);
851 if (kr == KERN_SUCCESS) {
852 kr = semaphore_wait_internal(semaphore,
853 SEMAPHORE_NULL,
854 0ULL, SEMAPHORE_OPTION_NONE,
855 caller_cont);
856 semaphore_dereference(semaphore);
857 }
858 return kr;
859}
860
861/*
862 * Routine: semaphore_timedwait
863 *
864 * Traditional (non-continuation) interface presented to
865 * in-kernel clients to wait on a semaphore with a timeout.
866 *
867 * A timeout of {0,0} is considered non-blocking.
868 */
869kern_return_t
870semaphore_timedwait(
871 semaphore_t semaphore,
872 mach_timespec_t wait_time)
873{
874 int option = SEMAPHORE_OPTION_NONE;
875 uint64_t deadline = 0;
876
877 if (semaphore == SEMAPHORE_NULL)
878 return KERN_INVALID_ARGUMENT;
879
880 if(BAD_MACH_TIMESPEC(&wait_time))
881 return KERN_INVALID_VALUE;
882
883 if (wait_time.tv_sec == 0 && wait_time.tv_nsec == 0)
884 option = SEMAPHORE_TIMEOUT_NOBLOCK;
885 else
886 deadline = semaphore_deadline(wait_time.tv_sec, wait_time.tv_nsec);
887
888 return (semaphore_wait_internal(semaphore,
889 SEMAPHORE_NULL,
890 deadline, option,
891 (void(*)(kern_return_t))0));
892
893}
894
895/*
896 * Trap: semaphore_timedwait_trap
897 *
898 * Trap version of a semaphore_timedwait. The timeout parameter
899 * is passed in two distinct parts and re-assembled on this side
900 * of the trap interface (to accomodate calling conventions that
901 * pass structures as pointers instead of inline in registers without
902 * having to add a copyin).
903 *
904 * A timeout of {0,0} is considered non-blocking.
905 */
906kern_return_t
907semaphore_timedwait_trap(
908 struct semaphore_timedwait_trap_args *args)
909{
910
911 return(semaphore_timedwait_trap_internal(args->wait_name, args->sec, args->nsec, thread_syscall_return));
912}
913
914
915kern_return_t
916semaphore_timedwait_trap_internal(
917 mach_port_name_t name,
918 unsigned int sec,
919 clock_res_t nsec,
920 void (*caller_cont)(kern_return_t))
921{
922 semaphore_t semaphore;
923 mach_timespec_t wait_time;
924 kern_return_t kr;
925
926 wait_time.tv_sec = sec;
927 wait_time.tv_nsec = nsec;
928 if(BAD_MACH_TIMESPEC(&wait_time))
929 return KERN_INVALID_VALUE;
930
931 kr = port_name_to_semaphore(name, &semaphore);
932 if (kr == KERN_SUCCESS) {
933 int option = SEMAPHORE_OPTION_NONE;
934 uint64_t deadline = 0;
935
936 if (sec == 0 && nsec == 0)
937 option = SEMAPHORE_TIMEOUT_NOBLOCK;
938 else
939 deadline = semaphore_deadline(sec, nsec);
940
941 kr = semaphore_wait_internal(semaphore,
942 SEMAPHORE_NULL,
943 deadline, option,
944 caller_cont);
945 semaphore_dereference(semaphore);
946 }
947 return kr;
948}
949
950/*
951 * Routine: semaphore_wait_signal
952 *
953 * Atomically register a wait on a semaphore and THEN signal
954 * another. This is the in-kernel entry point that does not
955 * block at a continuation and does not free a signal_semaphore
956 * reference.
957 */
958kern_return_t
959semaphore_wait_signal(
960 semaphore_t wait_semaphore,
961 semaphore_t signal_semaphore)
962{
963 if (wait_semaphore == SEMAPHORE_NULL)
964 return KERN_INVALID_ARGUMENT;
965
966 return(semaphore_wait_internal(wait_semaphore,
967 signal_semaphore,
968 0ULL, SEMAPHORE_OPTION_NONE,
969 (void(*)(kern_return_t))0));
970}
971
972/*
973 * Trap: semaphore_wait_signal_trap
974 *
975 * Atomically register a wait on a semaphore and THEN signal
976 * another. This is the trap version from user space.
977 */
978kern_return_t
979semaphore_wait_signal_trap(
980 struct semaphore_wait_signal_trap_args *args)
981{
982 return(semaphore_wait_signal_trap_internal(args->wait_name, args->signal_name, thread_syscall_return));
983}
984
985kern_return_t
986semaphore_wait_signal_trap_internal(
987 mach_port_name_t wait_name,
988 mach_port_name_t signal_name,
989 void (*caller_cont)(kern_return_t))
990{
991 semaphore_t wait_semaphore;
992 semaphore_t signal_semaphore;
993 kern_return_t kr;
994
995 kr = port_name_to_semaphore(signal_name, &signal_semaphore);
996 if (kr == KERN_SUCCESS) {
997 kr = port_name_to_semaphore(wait_name, &wait_semaphore);
998 if (kr == KERN_SUCCESS) {
999 kr = semaphore_wait_internal(wait_semaphore,
1000 signal_semaphore,
1001 0ULL, SEMAPHORE_OPTION_NONE,
1002 caller_cont);
1003 semaphore_dereference(wait_semaphore);
1004 }
1005 semaphore_dereference(signal_semaphore);
1006 }
1007 return kr;
1008}
1009
1010
1011/*
1012 * Routine: semaphore_timedwait_signal
1013 *
1014 * Atomically register a wait on a semaphore and THEN signal
1015 * another. This is the in-kernel entry point that does not
1016 * block at a continuation.
1017 *
1018 * A timeout of {0,0} is considered non-blocking.
1019 */
1020kern_return_t
1021semaphore_timedwait_signal(
1022 semaphore_t wait_semaphore,
1023 semaphore_t signal_semaphore,
1024 mach_timespec_t wait_time)
1025{
1026 int option = SEMAPHORE_OPTION_NONE;
1027 uint64_t deadline = 0;
1028
1029 if (wait_semaphore == SEMAPHORE_NULL)
1030 return KERN_INVALID_ARGUMENT;
1031
1032 if(BAD_MACH_TIMESPEC(&wait_time))
1033 return KERN_INVALID_VALUE;
1034
1035 if (wait_time.tv_sec == 0 && wait_time.tv_nsec == 0)
1036 option = SEMAPHORE_TIMEOUT_NOBLOCK;
1037 else
1038 deadline = semaphore_deadline(wait_time.tv_sec, wait_time.tv_nsec);
1039
1040 return(semaphore_wait_internal(wait_semaphore,
1041 signal_semaphore,
1042 deadline, option,
1043 (void(*)(kern_return_t))0));
1044}
1045
1046/*
1047 * Trap: semaphore_timedwait_signal_trap
1048 *
1049 * Atomically register a timed wait on a semaphore and THEN signal
1050 * another. This is the trap version from user space.
1051 */
1052kern_return_t
1053semaphore_timedwait_signal_trap(
1054 struct semaphore_timedwait_signal_trap_args *args)
1055{
1056 return(semaphore_timedwait_signal_trap_internal(args->wait_name, args->signal_name, args->sec, args->nsec, thread_syscall_return));
1057}
1058
1059kern_return_t
1060semaphore_timedwait_signal_trap_internal(
1061 mach_port_name_t wait_name,
1062 mach_port_name_t signal_name,
1063 unsigned int sec,
1064 clock_res_t nsec,
1065 void (*caller_cont)(kern_return_t))
1066{
1067 semaphore_t wait_semaphore;
1068 semaphore_t signal_semaphore;
1069 mach_timespec_t wait_time;
1070 kern_return_t kr;
1071
1072 wait_time.tv_sec = sec;
1073 wait_time.tv_nsec = nsec;
1074 if(BAD_MACH_TIMESPEC(&wait_time))
1075 return KERN_INVALID_VALUE;
1076
1077 kr = port_name_to_semaphore(signal_name, &signal_semaphore);
1078 if (kr == KERN_SUCCESS) {
1079 kr = port_name_to_semaphore(wait_name, &wait_semaphore);
1080 if (kr == KERN_SUCCESS) {
1081 int option = SEMAPHORE_OPTION_NONE;
1082 uint64_t deadline = 0;
1083
1084 if (sec == 0 && nsec == 0)
1085 option = SEMAPHORE_TIMEOUT_NOBLOCK;
1086 else
1087 deadline = semaphore_deadline(sec, nsec);
1088
1089 kr = semaphore_wait_internal(wait_semaphore,
1090 signal_semaphore,
1091 deadline, option,
1092 caller_cont);
1093 semaphore_dereference(wait_semaphore);
1094 }
1095 semaphore_dereference(signal_semaphore);
1096 }
1097 return kr;
1098}
1099
1100
1101/*
1102 * Routine: semaphore_reference
1103 *
1104 * Take out a reference on a semaphore. This keeps the data structure
1105 * in existence (but the semaphore may be deactivated).
1106 */
1107void
1108semaphore_reference(
1109 semaphore_t semaphore)
1110{
1111 os_ref_retain(&semaphore->ref_count);
1112}
1113
1114/*
1115 * Routine: semaphore_dereference
1116 *
1117 * Release a reference on a semaphore. If this is the last reference,
1118 * the semaphore data structure is deallocated.
1119 */
1120void
1121semaphore_dereference(
1122 semaphore_t semaphore)
1123{
1124 uint32_t collisions;
1125 spl_t spl_level;
1126
1127 if (semaphore == NULL)
1128 return;
1129
1130 if (os_ref_release(&semaphore->ref_count) > 0) {
1131 return;
1132 }
1133
1134 /*
1135 * Last ref, clean up the port [if any]
1136 * associated with the semaphore, destroy
1137 * it (if still active) and then free
1138 * the semaphore.
1139 */
1140 ipc_port_t port = semaphore->port;
1141
1142 if (IP_VALID(port)) {
1143 assert(!port->ip_srights);
1144 ipc_port_dealloc_kernel(port);
1145 }
1146
1147 /*
1148 * Lock the semaphore to lock in the owner task reference.
1149 * Then continue to try to lock the task (inverse order).
1150 */
1151 spl_level = splsched();
1152 semaphore_lock(semaphore);
1153 for (collisions = 0; semaphore->active; collisions++) {
1154 task_t task = semaphore->owner;
1155
1156 assert(task != TASK_NULL);
1157
1158 if (task_lock_try(task)) {
1159 semaphore_destroy_internal(task, semaphore);
1160 /* semaphore unlocked */
1161 splx(spl_level);
1162 task_unlock(task);
1163 goto out;
1164 }
1165
1166 /* failed to get out-of-order locks */
1167 semaphore_unlock(semaphore);
1168 splx(spl_level);
1169 mutex_pause(collisions);
1170 spl_level = splsched();
1171 semaphore_lock(semaphore);
1172 }
1173 semaphore_unlock(semaphore);
1174 splx(spl_level);
1175
1176 out:
1177 zfree(semaphore_zone, semaphore);
1178}
1179
1180#define WAITQ_TO_SEMA(wq) ((semaphore_t) ((uintptr_t)(wq) - offsetof(struct semaphore, waitq)))
1181void
1182kdp_sema_find_owner(struct waitq * waitq, __assert_only event64_t event, thread_waitinfo_t * waitinfo)
1183{
1184 semaphore_t sem = WAITQ_TO_SEMA(waitq);
1185 assert(event == SEMAPHORE_EVENT);
1186 assert(kdp_is_in_zone(sem, "semaphores"));
1187
1188 waitinfo->context = VM_KERNEL_UNSLIDE_OR_PERM(sem->port);
1189 if (sem->owner)
1190 waitinfo->owner = pid_from_task(sem->owner);
1191}
1192