1 | /* |
2 | * Copyright (c) 2000-2020 Apple Inc. All rights reserved. |
3 | * |
4 | * @APPLE_OSREFERENCE_LICENSE_HEADER_START@ |
5 | * |
6 | * This file contains Original Code and/or Modifications of Original Code |
7 | * as defined in and that are subject to the Apple Public Source License |
8 | * Version 2.0 (the 'License'). You may not use this file except in |
9 | * compliance with the License. The rights granted to you under the License |
10 | * may not be used to create, or enable the creation or redistribution of, |
11 | * unlawful or unlicensed copies of an Apple operating system, or to |
12 | * circumvent, violate, or enable the circumvention or violation of, any |
13 | * terms of an Apple operating system software license agreement. |
14 | * |
15 | * Please obtain a copy of the License at |
16 | * http://www.opensource.apple.com/apsl/ and read it before using this file. |
17 | * |
18 | * The Original Code and all software distributed under the License are |
19 | * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER |
20 | * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES, |
21 | * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY, |
22 | * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT. |
23 | * Please see the License for the specific language governing rights and |
24 | * limitations under the License. |
25 | * |
26 | * @APPLE_OSREFERENCE_LICENSE_HEADER_END@ |
27 | */ |
28 | |
29 | #include <mach/mach_types.h> |
30 | #include <mach/mach_traps.h> |
31 | #include <mach/kern_return.h> |
32 | #include <mach/sync_policy.h> |
33 | #include <mach/task.h> |
34 | |
35 | #include <kern/misc_protos.h> |
36 | #include <kern/spl.h> |
37 | #include <kern/ipc_tt.h> |
38 | #include <kern/thread.h> |
39 | #include <kern/clock.h> |
40 | #include <ipc/ipc_port.h> |
41 | #include <ipc/ipc_space.h> |
42 | #include <ipc/ipc_eventlink.h> |
43 | #include <kern/host.h> |
44 | #include <kern/waitq.h> |
45 | #include <kern/zalloc.h> |
46 | #include <kern/mach_param.h> |
47 | #include <mach/mach_traps.h> |
48 | #include <mach/mach_eventlink_server.h> |
49 | |
50 | #include <libkern/OSAtomic.h> |
51 | |
52 | static KALLOC_TYPE_DEFINE(ipc_eventlink_zone, |
53 | struct ipc_eventlink_base, KT_DEFAULT); |
54 | |
55 | os_refgrp_decl(static, ipc_eventlink_refgrp, "eventlink" , NULL); |
56 | |
57 | #if DEVELOPMENT || DEBUG |
58 | static queue_head_t ipc_eventlink_list = QUEUE_HEAD_INITIALIZER(ipc_eventlink_list); |
59 | static LCK_GRP_DECLARE(ipc_eventlink_dev_lock_grp, "ipc_eventlink_dev_lock" ); |
60 | static LCK_SPIN_DECLARE(global_ipc_eventlink_lock, &ipc_eventlink_dev_lock_grp); |
61 | |
62 | #define global_ipc_eventlink_lock() \ |
63 | lck_spin_lock_grp(&global_ipc_eventlink_lock, &ipc_eventlink_dev_lock_grp) |
64 | #define global_ipc_eventlink_lock_try() \ |
65 | lck_spin_try_lock_grp(&global_ipc_eventlink_lock, &ipc_eventlink_dev_lock_grp) |
66 | #define global_ipc_eventlink_unlock() \ |
67 | lck_spin_unlock(&global_ipc_eventlink_lock) |
68 | |
69 | #endif /* DEVELOPMENT || DEBUG */ |
70 | |
71 | /* Forward declarations */ |
72 | static void |
73 | ipc_eventlink_no_senders( |
74 | ipc_port_t port, |
75 | mach_port_mscount_t mscount); |
76 | |
77 | static struct ipc_eventlink_base * |
78 | ipc_eventlink_alloc(void); |
79 | |
80 | static void |
81 | ipc_eventlink_initialize( |
82 | struct ipc_eventlink_base *ipc_eventlink_base); |
83 | |
84 | static kern_return_t |
85 | ipc_eventlink_destroy_internal( |
86 | struct ipc_eventlink *ipc_eventlink); |
87 | |
88 | static kern_return_t |
89 | ipc_eventlink_signal( |
90 | struct ipc_eventlink *ipc_eventlink); |
91 | |
92 | static uint64_t |
93 | ipc_eventlink_signal_wait_until_trap_internal( |
94 | mach_port_name_t wait_port, |
95 | mach_port_name_t signal_port, |
96 | uint64_t count, |
97 | mach_eventlink_signal_wait_option_t el_option, |
98 | kern_clock_id_t clock_id, |
99 | uint64_t deadline); |
100 | |
101 | static kern_return_t |
102 | ipc_eventlink_signal_wait_internal( |
103 | struct ipc_eventlink *wait_eventlink, |
104 | struct ipc_eventlink *signal_eventlink, |
105 | uint64_t deadline, |
106 | uint64_t *count, |
107 | ipc_eventlink_option_t eventlink_option); |
108 | |
109 | static kern_return_t |
110 | ipc_eventlink_convert_wait_result(int wait_result); |
111 | |
112 | static kern_return_t |
113 | ipc_eventlink_signal_internal_locked( |
114 | struct ipc_eventlink *signal_eventlink, |
115 | ipc_eventlink_option_t eventlink_option); |
116 | |
117 | static kern_return_t |
118 | convert_port_to_eventlink_locked( |
119 | ipc_port_t port, |
120 | struct ipc_eventlink **ipc_eventlink_ptr); |
121 | |
122 | static kern_return_t |
123 | port_name_to_eventlink( |
124 | mach_port_name_t name, |
125 | struct ipc_eventlink **ipc_eventlink_ptr); |
126 | |
127 | IPC_KOBJECT_DEFINE(IKOT_EVENTLINK, |
128 | .iko_op_no_senders = ipc_eventlink_no_senders); |
129 | |
130 | /* |
131 | * Name: ipc_eventlink_alloc |
132 | * |
133 | * Description: Allocates an ipc_eventlink struct and initializes it. |
134 | * |
135 | * Args: None. |
136 | * |
137 | * Returns: |
138 | * ipc_eventlink_base on Success. |
139 | */ |
140 | static struct ipc_eventlink_base * |
141 | ipc_eventlink_alloc(void) |
142 | { |
143 | struct ipc_eventlink_base *ipc_eventlink_base = IPC_EVENTLINK_BASE_NULL; |
144 | ipc_eventlink_base = zalloc(kt_view: ipc_eventlink_zone); |
145 | |
146 | ipc_eventlink_initialize(ipc_eventlink_base); |
147 | |
148 | #if DEVELOPMENT || DEBUG |
149 | /* Add ipc_eventlink to global list */ |
150 | global_ipc_eventlink_lock(); |
151 | queue_enter(&ipc_eventlink_list, ipc_eventlink_base, |
152 | struct ipc_eventlink_base *, elb_global_elm); |
153 | global_ipc_eventlink_unlock(); |
154 | #endif |
155 | return ipc_eventlink_base; |
156 | } |
157 | |
158 | /* |
159 | * Name: ipc_eventlink_initialize |
160 | * |
161 | * Description: Initializes ipc eventlink struct. |
162 | * |
163 | * Args: ipc eventlink base. |
164 | * |
165 | * Returns: |
166 | * KERN_SUCCESS on Success. |
167 | */ |
168 | static void |
169 | ipc_eventlink_initialize( |
170 | struct ipc_eventlink_base *ipc_eventlink_base) |
171 | { |
172 | /* Initialize the count to 2, refs for each ipc eventlink port */ |
173 | os_ref_init_count(&ipc_eventlink_base->elb_ref_count, &ipc_eventlink_refgrp, 2); |
174 | ipc_eventlink_base->elb_type = IPC_EVENTLINK_TYPE_NO_COPYIN; |
175 | |
176 | for (int i = 0; i < 2; i++) { |
177 | struct ipc_eventlink *ipc_eventlink = &(ipc_eventlink_base->elb_eventlink[i]); |
178 | |
179 | ipc_eventlink->el_port = ipc_kobject_alloc_port(kobject: (ipc_kobject_t)ipc_eventlink, |
180 | type: IKOT_EVENTLINK, options: IPC_KOBJECT_ALLOC_MAKE_SEND | IPC_KOBJECT_ALLOC_NSREQUEST); |
181 | /* ipc_kobject_alloc_port never fails */ |
182 | ipc_eventlink->el_thread = THREAD_NULL; |
183 | ipc_eventlink->el_sync_counter = 0; |
184 | ipc_eventlink->el_wait_counter = UINT64_MAX; |
185 | ipc_eventlink->el_base = ipc_eventlink_base; |
186 | } |
187 | |
188 | /* Must be done last */ |
189 | waitq_init(waitq: &ipc_eventlink_base->elb_waitq, type: WQT_QUEUE, SYNC_POLICY_FIFO); |
190 | } |
191 | |
192 | /* |
193 | * Name: mach_eventlink_create |
194 | * |
195 | * Description: Allocates an ipc_eventlink struct and initializes it. |
196 | * |
197 | * Args: |
198 | * task : task port of the process |
199 | * mach_eventlink_create_option_t: option |
200 | * eventlink_port_pair: eventlink port array |
201 | * |
202 | * Returns: |
203 | * KERN_SUCCESS on Success. |
204 | */ |
205 | kern_return_t |
206 | mach_eventlink_create( |
207 | task_t task, |
208 | mach_eventlink_create_option_t elc_option, |
209 | eventlink_port_pair_t eventlink_port_pair) |
210 | { |
211 | int i; |
212 | struct ipc_eventlink_base *ipc_eventlink_base; |
213 | |
214 | if (task == TASK_NULL || task != current_task() || |
215 | elc_option != MELC_OPTION_NO_COPYIN) { |
216 | return KERN_INVALID_ARGUMENT; |
217 | } |
218 | |
219 | ipc_eventlink_base = ipc_eventlink_alloc(); |
220 | |
221 | for (i = 0; i < 2; i++) { |
222 | eventlink_port_pair[i] = ipc_eventlink_base->elb_eventlink[i].el_port; |
223 | } |
224 | |
225 | return KERN_SUCCESS; |
226 | } |
227 | |
228 | /* |
229 | * Name: mach_eventlink_destroy |
230 | * |
231 | * Description: Destroy an ipc_eventlink, wakeup all threads. |
232 | * |
233 | * Args: |
234 | * eventlink: eventlink |
235 | * |
236 | * Returns: |
237 | * KERN_SUCCESS on Success. |
238 | */ |
239 | kern_return_t |
240 | mach_eventlink_destroy( |
241 | struct ipc_eventlink *ipc_eventlink) |
242 | { |
243 | ipc_eventlink_destroy_internal(ipc_eventlink); |
244 | |
245 | /* mach_eventlink_destroy should succeed for terminated eventlink */ |
246 | return KERN_SUCCESS; |
247 | } |
248 | |
249 | /* |
250 | * Name: ipc_eventlink_destroy_internal |
251 | * |
252 | * Description: Destroy an ipc_eventlink, wakeup all threads. |
253 | * |
254 | * Args: |
255 | * eventlink: eventlink |
256 | * |
257 | * Returns: |
258 | * KERN_SUCCESS on Success. |
259 | */ |
260 | static kern_return_t |
261 | ipc_eventlink_destroy_internal( |
262 | struct ipc_eventlink *ipc_eventlink) |
263 | { |
264 | spl_t s; |
265 | struct ipc_eventlink_base *ipc_eventlink_base; |
266 | thread_t associated_thread[2] = {}; |
267 | ipc_port_t ipc_eventlink_port = IPC_PORT_NULL; |
268 | ipc_port_t ipc_eventlink_port_remote = IPC_PORT_NULL; |
269 | |
270 | if (ipc_eventlink == IPC_EVENTLINK_NULL) { |
271 | return KERN_TERMINATED; |
272 | } |
273 | |
274 | s = splsched(); |
275 | ipc_eventlink_lock(ipc_eventlink); |
276 | |
277 | ipc_eventlink_base = ipc_eventlink->el_base; |
278 | |
279 | /* Check if the eventlink is active */ |
280 | if (!ipc_eventlink_active(ipc_eventlink)) { |
281 | ipc_eventlink_unlock(ipc_eventlink); |
282 | splx(s); |
283 | return KERN_TERMINATED; |
284 | } |
285 | |
286 | for (int i = 0; i < 2; i++) { |
287 | struct ipc_eventlink *temp_ipc_eventlink = &ipc_eventlink_base->elb_eventlink[i]; |
288 | |
289 | /* Wakeup threads sleeping on eventlink */ |
290 | if (temp_ipc_eventlink->el_thread) { |
291 | associated_thread[i] = temp_ipc_eventlink->el_thread; |
292 | temp_ipc_eventlink->el_thread = THREAD_NULL; |
293 | |
294 | ipc_eventlink_signal_internal_locked(signal_eventlink: temp_ipc_eventlink, |
295 | eventlink_option: IPC_EVENTLINK_FORCE_WAKEUP); |
296 | } |
297 | |
298 | /* Only destroy the port on which destroy was called */ |
299 | if (temp_ipc_eventlink == ipc_eventlink) { |
300 | ipc_eventlink_port = temp_ipc_eventlink->el_port; |
301 | assert(ipc_eventlink_port != IPC_PORT_NULL); |
302 | } else { |
303 | /* Do not destory the remote port, else eventlink_destroy will fail */ |
304 | ipc_eventlink_port_remote = temp_ipc_eventlink->el_port; |
305 | assert(ipc_eventlink_port_remote != IPC_PORT_NULL); |
306 | /* |
307 | * Take a reference on the remote port, since it could go |
308 | * away after eventlink lock is dropped. |
309 | */ |
310 | ip_validate(ipc_eventlink_port_remote); |
311 | ip_reference(ipc_eventlink_port_remote); |
312 | } |
313 | assert(temp_ipc_eventlink->el_port != IPC_PORT_NULL); |
314 | temp_ipc_eventlink->el_port = IPC_PORT_NULL; |
315 | } |
316 | |
317 | /* Mark the eventlink as inactive */ |
318 | waitq_invalidate(wq: &ipc_eventlink_base->elb_waitq); |
319 | |
320 | ipc_eventlink_unlock(ipc_eventlink); |
321 | splx(s); |
322 | |
323 | /* Destroy the local eventlink port */ |
324 | ipc_kobject_dealloc_port(port: ipc_eventlink_port, mscount: 0, type: IKOT_EVENTLINK); |
325 | /* Drops port reference */ |
326 | |
327 | /* Clear the remote eventlink port without destroying it */ |
328 | (void)ipc_kobject_disable(port: ipc_eventlink_port_remote, type: IKOT_EVENTLINK); |
329 | ip_release(ipc_eventlink_port_remote); |
330 | |
331 | for (int i = 0; i < 2; i++) { |
332 | if (associated_thread[i] != THREAD_NULL && |
333 | associated_thread[i] != THREAD_ASSOCIATE_WILD) { |
334 | thread_deallocate(thread: associated_thread[i]); |
335 | } |
336 | |
337 | /* Drop the eventlink reference given to port */ |
338 | ipc_eventlink_deallocate(ipc_eventlink); |
339 | } |
340 | return KERN_SUCCESS; |
341 | } |
342 | |
343 | /* |
344 | * Name: mach_eventlink_associate |
345 | * |
346 | * Description: Associate a thread to eventlink. |
347 | * |
348 | * Args: |
349 | * eventlink: eventlink |
350 | * thread: thread needs to be associated |
351 | * copyin_addr_wait: copyin addr for wait |
352 | * copyin_mask_wait: copyin mask for wait |
353 | * copyin_addr_signal: copyin addr for signal |
354 | * copyin_mask_signal: copyin mask for signal |
355 | * mach_eventlink_associate_option_t: option for eventlink associate |
356 | * |
357 | * Returns: |
358 | * KERN_SUCCESS on Success. |
359 | */ |
360 | kern_return_t |
361 | mach_eventlink_associate( |
362 | struct ipc_eventlink *ipc_eventlink, |
363 | thread_t thread, |
364 | mach_vm_address_t copyin_addr_wait, |
365 | uint64_t copyin_mask_wait, |
366 | mach_vm_address_t copyin_addr_signal, |
367 | uint64_t copyin_mask_signal, |
368 | mach_eventlink_associate_option_t ela_option) |
369 | { |
370 | spl_t s; |
371 | |
372 | if (ipc_eventlink == IPC_EVENTLINK_NULL) { |
373 | return KERN_TERMINATED; |
374 | } |
375 | |
376 | if (copyin_addr_wait != 0 || copyin_mask_wait != 0 || |
377 | copyin_addr_signal != 0 || copyin_mask_signal != 0) { |
378 | return KERN_INVALID_ARGUMENT; |
379 | } |
380 | |
381 | if ((thread == NULL && ela_option == MELA_OPTION_NONE) || |
382 | (thread != NULL && ela_option == MELA_OPTION_ASSOCIATE_ON_WAIT)) { |
383 | return KERN_INVALID_ARGUMENT; |
384 | } |
385 | |
386 | s = splsched(); |
387 | ipc_eventlink_lock(ipc_eventlink); |
388 | |
389 | /* Check if eventlink is terminated */ |
390 | if (!ipc_eventlink_active(ipc_eventlink)) { |
391 | ipc_eventlink_unlock(ipc_eventlink); |
392 | splx(s); |
393 | return KERN_TERMINATED; |
394 | } |
395 | |
396 | if (ipc_eventlink->el_thread != NULL) { |
397 | ipc_eventlink_unlock(ipc_eventlink); |
398 | splx(s); |
399 | return KERN_NAME_EXISTS; |
400 | } |
401 | |
402 | if (ela_option == MELA_OPTION_ASSOCIATE_ON_WAIT) { |
403 | ipc_eventlink->el_thread = THREAD_ASSOCIATE_WILD; |
404 | } else { |
405 | thread_reference(thread); |
406 | ipc_eventlink->el_thread = thread; |
407 | } |
408 | |
409 | ipc_eventlink_unlock(ipc_eventlink); |
410 | splx(s); |
411 | return KERN_SUCCESS; |
412 | } |
413 | |
414 | /* |
415 | * Name: mach_eventlink_disassociate |
416 | * |
417 | * Description: Disassociate a thread from eventlink. |
418 | * Wake up the associated thread if blocked on eventlink. |
419 | * |
420 | * Args: |
421 | * eventlink: eventlink |
422 | * mach_eventlink_option_t: option for eventlink disassociate |
423 | * |
424 | * Returns: |
425 | * KERN_SUCCESS on Success. |
426 | */ |
427 | kern_return_t |
428 | mach_eventlink_disassociate( |
429 | struct ipc_eventlink *ipc_eventlink, |
430 | mach_eventlink_disassociate_option_t eld_option) |
431 | { |
432 | spl_t s; |
433 | thread_t thread; |
434 | |
435 | if (ipc_eventlink == IPC_EVENTLINK_NULL) { |
436 | return KERN_TERMINATED; |
437 | } |
438 | |
439 | if (eld_option != MELD_OPTION_NONE) { |
440 | return KERN_INVALID_ARGUMENT; |
441 | } |
442 | |
443 | s = splsched(); |
444 | ipc_eventlink_lock(ipc_eventlink); |
445 | |
446 | /* Check if eventlink is terminated */ |
447 | if (!ipc_eventlink_active(ipc_eventlink)) { |
448 | ipc_eventlink_unlock(ipc_eventlink); |
449 | splx(s); |
450 | return KERN_TERMINATED; |
451 | } |
452 | |
453 | if (ipc_eventlink->el_thread == NULL) { |
454 | ipc_eventlink_unlock(ipc_eventlink); |
455 | splx(s); |
456 | return KERN_INVALID_ARGUMENT; |
457 | } |
458 | |
459 | thread = ipc_eventlink->el_thread; |
460 | ipc_eventlink->el_thread = NULL; |
461 | |
462 | /* wake up the thread if blocked */ |
463 | ipc_eventlink_signal_internal_locked(signal_eventlink: ipc_eventlink, |
464 | eventlink_option: IPC_EVENTLINK_FORCE_WAKEUP); |
465 | |
466 | ipc_eventlink_unlock(ipc_eventlink); |
467 | splx(s); |
468 | |
469 | if (thread != THREAD_ASSOCIATE_WILD) { |
470 | thread_deallocate(thread); |
471 | } |
472 | return KERN_SUCCESS; |
473 | } |
474 | |
475 | /* |
476 | * Name: mach_eventlink_signal_trap |
477 | * |
478 | * Description: Increment the sync count of eventlink and |
479 | * wake up the thread waiting if sync counter is greater |
480 | * than wake counter. |
481 | * |
482 | * Args: |
483 | * eventlink: eventlink |
484 | * |
485 | * Returns: |
486 | * uint64_t: Contains count and error codes. |
487 | */ |
488 | uint64_t |
489 | mach_eventlink_signal_trap( |
490 | mach_port_name_t port, |
491 | uint64_t signal_count __unused) |
492 | { |
493 | struct ipc_eventlink *ipc_eventlink; |
494 | kern_return_t kr; |
495 | uint64_t retval = 0; |
496 | |
497 | kr = port_name_to_eventlink(name: port, ipc_eventlink_ptr: &ipc_eventlink); |
498 | if (kr == KERN_SUCCESS) { |
499 | /* Signal the remote side of the eventlink */ |
500 | kr = ipc_eventlink_signal(eventlink_remote_side(ipc_eventlink)); |
501 | |
502 | /* Deallocate ref returned by port_name_to_eventlink */ |
503 | ipc_eventlink_deallocate(ipc_eventlink); |
504 | } |
505 | |
506 | retval = encode_eventlink_count_and_error(0, kr); |
507 | return retval; |
508 | } |
509 | |
510 | /* |
511 | * Name: ipc_eventlink_signal |
512 | * |
513 | * Description: Increment the sync count of eventlink and |
514 | * wake up the thread waiting if sync counter is greater |
515 | * than wake counter. |
516 | * |
517 | * Args: |
518 | * eventlink: eventlink |
519 | * |
520 | * Returns: |
521 | * KERN_SUCCESS on Success. |
522 | */ |
523 | static kern_return_t |
524 | ipc_eventlink_signal( |
525 | struct ipc_eventlink *ipc_eventlink) |
526 | { |
527 | kern_return_t kr; |
528 | spl_t s; |
529 | |
530 | if (ipc_eventlink == IPC_EVENTLINK_NULL) { |
531 | return KERN_INVALID_ARGUMENT; |
532 | } |
533 | |
534 | s = splsched(); |
535 | ipc_eventlink_lock(ipc_eventlink); |
536 | |
537 | /* Check if eventlink is terminated */ |
538 | if (!ipc_eventlink_active(ipc_eventlink)) { |
539 | ipc_eventlink_unlock(ipc_eventlink); |
540 | splx(s); |
541 | return KERN_TERMINATED; |
542 | } |
543 | |
544 | kr = ipc_eventlink_signal_internal_locked(signal_eventlink: ipc_eventlink, |
545 | eventlink_option: IPC_EVENTLINK_NONE); |
546 | |
547 | ipc_eventlink_unlock(ipc_eventlink); |
548 | splx(s); |
549 | |
550 | if (kr == KERN_NOT_WAITING) { |
551 | kr = KERN_SUCCESS; |
552 | } |
553 | |
554 | return kr; |
555 | } |
556 | |
557 | /* |
558 | * Name: mach_eventlink_wait_until_trap |
559 | * |
560 | * Description: Wait until local signal count exceeds the |
561 | * specified count or deadline passes. |
562 | * |
563 | * Args: |
564 | * wait_port: eventlink port for wait |
565 | * count_ptr: signal count to wait on |
566 | * el_option: eventlink option |
567 | * clock_id: clock id |
568 | * deadline: deadline in mach_absolute_time |
569 | * |
570 | * Returns: |
571 | * uint64_t: contains count and error codes |
572 | */ |
573 | uint64_t |
574 | mach_eventlink_wait_until_trap( |
575 | mach_port_name_t eventlink_port, |
576 | uint64_t wait_count, |
577 | mach_eventlink_signal_wait_option_t option, |
578 | kern_clock_id_t clock_id, |
579 | uint64_t deadline) |
580 | { |
581 | return ipc_eventlink_signal_wait_until_trap_internal( |
582 | wait_port: eventlink_port, |
583 | MACH_PORT_NULL, |
584 | count: wait_count, |
585 | el_option: option, |
586 | clock_id, |
587 | deadline); |
588 | } |
589 | |
590 | /* |
591 | * Name: mach_eventlink_signal_wait_until |
592 | * |
593 | * Description: Signal the opposite side of the |
594 | * eventlink and wait until local signal count exceeds the |
595 | * specified count or deadline passes. |
596 | * |
597 | * Args: |
598 | * wait_port: eventlink port for wait |
599 | * count_ptr: signal count to wait on |
600 | * el_option: eventlink option |
601 | * clock_id: clock id |
602 | * deadline: deadline in mach_absolute_time |
603 | * |
604 | * Returns: |
605 | * uint64_t: contains count and error codes |
606 | */ |
607 | uint64_t |
608 | mach_eventlink_signal_wait_until_trap( |
609 | mach_port_name_t eventlink_port, |
610 | uint64_t wait_count, |
611 | uint64_t signal_count __unused, |
612 | mach_eventlink_signal_wait_option_t option, |
613 | kern_clock_id_t clock_id, |
614 | uint64_t deadline) |
615 | { |
616 | return ipc_eventlink_signal_wait_until_trap_internal( |
617 | wait_port: eventlink_port, |
618 | signal_port: eventlink_port, |
619 | count: wait_count, |
620 | el_option: option, |
621 | clock_id, |
622 | deadline); |
623 | } |
624 | |
625 | /* |
626 | * Name: ipc_eventlink_signal_wait_until_trap_internal |
627 | * |
628 | * Description: Signal the opposite side of the |
629 | * eventlink and wait until local signal count exceeds the |
630 | * specified count or deadline passes. |
631 | * |
632 | * Args: |
633 | * wait_port: eventlink port for wait |
634 | * signal_port: eventlink port for signal |
635 | * count: signal count to wait on |
636 | * el_option: eventlink option |
637 | * clock_id: clock id |
638 | * deadline: deadline in mach_absolute_time |
639 | * |
640 | * Returns: |
641 | * uint64_t: contains signal count and error codes |
642 | */ |
643 | static uint64_t |
644 | ipc_eventlink_signal_wait_until_trap_internal( |
645 | mach_port_name_t wait_port, |
646 | mach_port_name_t signal_port, |
647 | uint64_t count, |
648 | mach_eventlink_signal_wait_option_t el_option, |
649 | kern_clock_id_t clock_id, |
650 | uint64_t deadline) |
651 | { |
652 | struct ipc_eventlink *wait_ipc_eventlink = IPC_EVENTLINK_NULL; |
653 | struct ipc_eventlink *signal_ipc_eventlink = IPC_EVENTLINK_NULL; |
654 | kern_return_t kr; |
655 | ipc_eventlink_option_t ipc_eventlink_option = IPC_EVENTLINK_NONE; |
656 | |
657 | if (clock_id != KERN_CLOCK_MACH_ABSOLUTE_TIME) { |
658 | return encode_eventlink_count_and_error(count, KERN_INVALID_ARGUMENT); |
659 | } |
660 | |
661 | kr = port_name_to_eventlink(name: wait_port, ipc_eventlink_ptr: &wait_ipc_eventlink); |
662 | if (kr == KERN_SUCCESS) { |
663 | assert(wait_ipc_eventlink != IPC_EVENTLINK_NULL); |
664 | |
665 | /* Get the remote side of eventlink for signal */ |
666 | if (signal_port != MACH_PORT_NULL) { |
667 | signal_ipc_eventlink = eventlink_remote_side(wait_ipc_eventlink); |
668 | } |
669 | |
670 | if (el_option & MELSW_OPTION_NO_WAIT) { |
671 | ipc_eventlink_option |= IPC_EVENTLINK_NO_WAIT; |
672 | } |
673 | |
674 | kr = ipc_eventlink_signal_wait_internal(wait_eventlink: wait_ipc_eventlink, |
675 | signal_eventlink: signal_ipc_eventlink, deadline, |
676 | count: &count, eventlink_option: ipc_eventlink_option); |
677 | |
678 | /* release ref returned by port_name_to_eventlink */ |
679 | ipc_eventlink_deallocate(ipc_eventlink: wait_ipc_eventlink); |
680 | } |
681 | return encode_eventlink_count_and_error(count, kr); |
682 | } |
683 | |
684 | /* |
685 | * Name: ipc_eventlink_signal_wait_internal |
686 | * |
687 | * Description: Signal the opposite side of the |
688 | * eventlink and wait until local signal count exceeds the |
689 | * specified count or deadline passes. |
690 | * |
691 | * Args: |
692 | * wait_eventlink: eventlink for wait |
693 | * signal_eventlink: eventlink for signal |
694 | * deadline: deadline in mach_absolute_time |
695 | * count_ptr: signal count to wait on |
696 | * el_option: eventlink option |
697 | * |
698 | * Returns: |
699 | * KERN_SUCCESS on Success. |
700 | * signal count is returned implicitly in count arg. |
701 | */ |
702 | static kern_return_t |
703 | ipc_eventlink_signal_wait_internal( |
704 | struct ipc_eventlink *wait_eventlink, |
705 | struct ipc_eventlink *signal_eventlink, |
706 | uint64_t deadline, |
707 | uint64_t *count, |
708 | ipc_eventlink_option_t eventlink_option) |
709 | { |
710 | spl_t s; |
711 | kern_return_t kr = KERN_ALREADY_WAITING; |
712 | thread_t self = current_thread(); |
713 | struct ipc_eventlink_base *ipc_eventlink_base = wait_eventlink->el_base; |
714 | thread_t handoff_thread = THREAD_NULL; |
715 | thread_handoff_option_t handoff_option = THREAD_HANDOFF_NONE; |
716 | uint64_t old_signal_count; |
717 | wait_result_t wr; |
718 | |
719 | s = splsched(); |
720 | ipc_eventlink_lock(wait_eventlink); |
721 | |
722 | /* Check if eventlink is terminated */ |
723 | if (!ipc_eventlink_active(wait_eventlink)) { |
724 | kr = KERN_TERMINATED; |
725 | goto unlock; |
726 | } |
727 | |
728 | /* Check if waiting thread is associated to eventlink */ |
729 | if (wait_eventlink->el_thread != THREAD_ASSOCIATE_WILD && |
730 | wait_eventlink->el_thread != self) { |
731 | kr = KERN_INVALID_ARGUMENT; |
732 | goto unlock; |
733 | } |
734 | |
735 | /* Check if thread already waiting for associate on wait case */ |
736 | if (wait_eventlink->el_thread == THREAD_ASSOCIATE_WILD && |
737 | wait_eventlink->el_wait_counter != UINT64_MAX) { |
738 | kr = KERN_INVALID_ARGUMENT; |
739 | goto unlock; |
740 | } |
741 | |
742 | /* Check if the signal count exceeds the count provided */ |
743 | if (*count < wait_eventlink->el_sync_counter) { |
744 | *count = wait_eventlink->el_sync_counter; |
745 | kr = KERN_SUCCESS; |
746 | } else if (eventlink_option & IPC_EVENTLINK_NO_WAIT) { |
747 | /* Check if no block was passed */ |
748 | *count = wait_eventlink->el_sync_counter; |
749 | kr = KERN_OPERATION_TIMED_OUT; |
750 | } else { |
751 | /* Update the wait counter and add thread to waitq */ |
752 | wait_eventlink->el_wait_counter = *count; |
753 | old_signal_count = wait_eventlink->el_sync_counter; |
754 | |
755 | thread_set_pending_block_hint(thread: self, block_hint: kThreadWaitEventlink); |
756 | (void)waitq_assert_wait64_locked( |
757 | waitq: &ipc_eventlink_base->elb_waitq, |
758 | CAST_EVENT64_T(wait_eventlink), |
759 | THREAD_ABORTSAFE, |
760 | TIMEOUT_URGENCY_USER_NORMAL, |
761 | deadline, TIMEOUT_NO_LEEWAY, |
762 | thread: self); |
763 | |
764 | eventlink_option |= IPC_EVENTLINK_HANDOFF; |
765 | } |
766 | |
767 | /* Check if we need to signal the other side of eventlink */ |
768 | if (signal_eventlink != IPC_EVENTLINK_NULL) { |
769 | kern_return_t signal_kr; |
770 | signal_kr = ipc_eventlink_signal_internal_locked(signal_eventlink, |
771 | eventlink_option); |
772 | |
773 | if (signal_kr == KERN_NOT_WAITING) { |
774 | assert(self->handoff_thread == THREAD_NULL); |
775 | } |
776 | } |
777 | |
778 | if (kr != KERN_ALREADY_WAITING) { |
779 | goto unlock; |
780 | } |
781 | |
782 | if (self->handoff_thread) { |
783 | handoff_thread = self->handoff_thread; |
784 | self->handoff_thread = THREAD_NULL; |
785 | handoff_option = THREAD_HANDOFF_SETRUN_NEEDED; |
786 | } |
787 | |
788 | ipc_eventlink_unlock(wait_eventlink); |
789 | splx(s); |
790 | |
791 | wr = thread_handoff_deallocate(thread: handoff_thread, option: handoff_option); |
792 | kr = ipc_eventlink_convert_wait_result(wait_result: wr); |
793 | |
794 | assert(self->handoff_thread == THREAD_NULL); |
795 | |
796 | /* Increment the count value if eventlink_signal was called */ |
797 | if (kr == KERN_SUCCESS) { |
798 | *count += 1; |
799 | } else { |
800 | *count = old_signal_count; |
801 | } |
802 | |
803 | return kr; |
804 | |
805 | unlock: |
806 | ipc_eventlink_unlock(wait_eventlink); |
807 | splx(s); |
808 | assert(self->handoff_thread == THREAD_NULL); |
809 | |
810 | return kr; |
811 | } |
812 | |
813 | /* |
814 | * Name: ipc_eventlink_convert_wait_result |
815 | * |
816 | * Description: Convert wait result to return value |
817 | * for wait trap. |
818 | * |
819 | * Args: |
820 | * wait_result: result from thread handoff |
821 | * |
822 | * Returns: |
823 | * KERN_SUCCESS on Success. |
824 | */ |
825 | static kern_return_t |
826 | ipc_eventlink_convert_wait_result(int wait_result) |
827 | { |
828 | switch (wait_result) { |
829 | case THREAD_AWAKENED: |
830 | return KERN_SUCCESS; |
831 | |
832 | case THREAD_TIMED_OUT: |
833 | return KERN_OPERATION_TIMED_OUT; |
834 | |
835 | case THREAD_INTERRUPTED: |
836 | return KERN_ABORTED; |
837 | |
838 | case THREAD_RESTART: |
839 | return KERN_TERMINATED; |
840 | |
841 | default: |
842 | panic("ipc_eventlink_wait_block" ); |
843 | return KERN_FAILURE; |
844 | } |
845 | } |
846 | |
847 | /* |
848 | * Name: ipc_eventlink_signal_internal_locked |
849 | * |
850 | * Description: Increment the sync count of eventlink and |
851 | * wake up the thread waiting if sync counter is greater |
852 | * than wake counter. |
853 | * |
854 | * Args: |
855 | * eventlink: eventlink |
856 | * ipc_eventlink_option_t: options |
857 | * |
858 | * Returns: |
859 | * KERN_SUCCESS on Success. |
860 | */ |
861 | static kern_return_t |
862 | ipc_eventlink_signal_internal_locked( |
863 | struct ipc_eventlink *signal_eventlink, |
864 | ipc_eventlink_option_t eventlink_option) |
865 | { |
866 | kern_return_t kr = KERN_NOT_WAITING; |
867 | struct ipc_eventlink_base *ipc_eventlink_base = signal_eventlink->el_base; |
868 | waitq_wakeup_flags_t flags = WAITQ_KEEP_LOCKED; |
869 | |
870 | if (eventlink_option & IPC_EVENTLINK_FORCE_WAKEUP) { |
871 | /* Adjust the wait counter */ |
872 | signal_eventlink->el_wait_counter = UINT64_MAX; |
873 | |
874 | kr = waitq_wakeup64_all_locked( |
875 | waitq: &ipc_eventlink_base->elb_waitq, |
876 | CAST_EVENT64_T(signal_eventlink), |
877 | THREAD_RESTART, flags); |
878 | return kr; |
879 | } |
880 | |
881 | /* Increment the eventlink sync count */ |
882 | signal_eventlink->el_sync_counter++; |
883 | |
884 | /* Check if thread needs to be woken up */ |
885 | if (signal_eventlink->el_sync_counter > signal_eventlink->el_wait_counter) { |
886 | if (eventlink_option & IPC_EVENTLINK_HANDOFF) { |
887 | flags |= WAITQ_HANDOFF; |
888 | } |
889 | |
890 | /* Adjust the wait counter */ |
891 | signal_eventlink->el_wait_counter = UINT64_MAX; |
892 | |
893 | kr = waitq_wakeup64_one_locked( |
894 | waitq: &ipc_eventlink_base->elb_waitq, |
895 | CAST_EVENT64_T(signal_eventlink), |
896 | THREAD_AWAKENED, flags); |
897 | } |
898 | |
899 | return kr; |
900 | } |
901 | |
902 | /* |
903 | * Name: ipc_eventlink_reference |
904 | * |
905 | * Description: Increment ref on ipc eventlink struct |
906 | * |
907 | * Args: |
908 | * eventlink: eventlink |
909 | * |
910 | * Returns: None |
911 | */ |
912 | void |
913 | ipc_eventlink_reference( |
914 | struct ipc_eventlink *ipc_eventlink) |
915 | { |
916 | os_ref_retain(rc: &ipc_eventlink->el_base->elb_ref_count); |
917 | } |
918 | |
919 | /* |
920 | * Name: ipc_eventlink_deallocate |
921 | * |
922 | * Description: Decrement ref on ipc eventlink struct |
923 | * |
924 | * Args: |
925 | * eventlink: eventlink |
926 | * |
927 | * Returns: None |
928 | */ |
929 | void |
930 | ipc_eventlink_deallocate( |
931 | struct ipc_eventlink *ipc_eventlink) |
932 | { |
933 | if (ipc_eventlink == IPC_EVENTLINK_NULL) { |
934 | return; |
935 | } |
936 | |
937 | struct ipc_eventlink_base *ipc_eventlink_base = ipc_eventlink->el_base; |
938 | |
939 | if (os_ref_release(rc: &ipc_eventlink_base->elb_ref_count) > 0) { |
940 | return; |
941 | } |
942 | |
943 | waitq_deinit(waitq: &ipc_eventlink_base->elb_waitq); |
944 | |
945 | assert(!ipc_eventlink_active(ipc_eventlink)); |
946 | |
947 | #if DEVELOPMENT || DEBUG |
948 | /* Remove ipc_eventlink to global list */ |
949 | global_ipc_eventlink_lock(); |
950 | queue_remove(&ipc_eventlink_list, ipc_eventlink_base, |
951 | struct ipc_eventlink_base *, elb_global_elm); |
952 | global_ipc_eventlink_unlock(); |
953 | #endif |
954 | zfree(ipc_eventlink_zone, ipc_eventlink_base); |
955 | } |
956 | |
957 | /* |
958 | * Name: convert_port_to_eventlink |
959 | * |
960 | * Description: Convert from a port name in the current |
961 | * space to an ipc eventlink. Produces an ipc eventlink ref, |
962 | * which may be null. |
963 | * |
964 | * Args: |
965 | * mach_port_t: eventlink port |
966 | * |
967 | * Returns: |
968 | * ipc_eventlink on Success. |
969 | */ |
970 | struct ipc_eventlink * |
971 | convert_port_to_eventlink( |
972 | mach_port_t port) |
973 | { |
974 | struct ipc_eventlink *ipc_eventlink = IPC_EVENTLINK_NULL; |
975 | |
976 | if (IP_VALID(port)) { |
977 | ip_mq_lock(port); |
978 | convert_port_to_eventlink_locked(port, ipc_eventlink_ptr: &ipc_eventlink); |
979 | ip_mq_unlock(port); |
980 | } |
981 | |
982 | return ipc_eventlink; |
983 | } |
984 | |
985 | /* |
986 | * Name: convert_port_to_eventlink_locked |
987 | * |
988 | * Description: Convert from a port name in the current |
989 | * space to an ipc eventlink. Produces an ipc eventlink ref, |
990 | * which may be null. |
991 | * |
992 | * Args: |
993 | * mach_port_name_t: eventlink port name |
994 | * ipc_eventlink_ptr: pointer to return ipc_eventlink. |
995 | * |
996 | * Returns: |
997 | * KERN_SUCCESS on Success. |
998 | * KERN_TERMINATED on inactive eventlink. |
999 | */ |
1000 | static kern_return_t |
1001 | convert_port_to_eventlink_locked( |
1002 | ipc_port_t port, |
1003 | struct ipc_eventlink **ipc_eventlink_ptr) |
1004 | { |
1005 | kern_return_t kr = KERN_INVALID_CAPABILITY; |
1006 | struct ipc_eventlink *ipc_eventlink = IPC_EVENTLINK_NULL; |
1007 | |
1008 | if (ip_active(port) && ip_kotype(port) == IKOT_EVENTLINK) { |
1009 | ipc_eventlink = ipc_kobject_get_raw(port, type: IKOT_EVENTLINK); |
1010 | if (ipc_eventlink) { |
1011 | ipc_eventlink_reference(ipc_eventlink); |
1012 | kr = KERN_SUCCESS; |
1013 | } else { |
1014 | kr = KERN_TERMINATED; |
1015 | } |
1016 | } |
1017 | |
1018 | *ipc_eventlink_ptr = ipc_eventlink; |
1019 | return kr; |
1020 | } |
1021 | |
1022 | /* |
1023 | * Name: port_name_to_eventlink |
1024 | * |
1025 | * Description: Convert from a port name in the current |
1026 | * space to an ipc eventlink. Produces an ipc eventlink ref, |
1027 | * which may be null. |
1028 | * |
1029 | * Args: |
1030 | * mach_port_name_t: eventlink port name |
1031 | * ipc_eventlink_ptr: ptr to pass eventlink struct |
1032 | * |
1033 | * Returns: |
1034 | * KERN_SUCCESS on Success. |
1035 | */ |
1036 | static kern_return_t |
1037 | port_name_to_eventlink( |
1038 | mach_port_name_t name, |
1039 | struct ipc_eventlink **ipc_eventlink_ptr) |
1040 | { |
1041 | ipc_port_t kern_port; |
1042 | kern_return_t kr; |
1043 | |
1044 | if (!MACH_PORT_VALID(name)) { |
1045 | *ipc_eventlink_ptr = IPC_EVENTLINK_NULL; |
1046 | return KERN_INVALID_NAME; |
1047 | } |
1048 | |
1049 | kr = ipc_port_translate_send(current_space(), name, portp: &kern_port); |
1050 | if (kr != KERN_SUCCESS) { |
1051 | *ipc_eventlink_ptr = IPC_EVENTLINK_NULL; |
1052 | return kr; |
1053 | } |
1054 | /* have the port locked */ |
1055 | assert(IP_VALID(kern_port)); |
1056 | |
1057 | kr = convert_port_to_eventlink_locked(port: kern_port, ipc_eventlink_ptr); |
1058 | ip_mq_unlock(kern_port); |
1059 | |
1060 | return kr; |
1061 | } |
1062 | |
1063 | /* |
1064 | * Name: ipc_eventlink_no_senders |
1065 | * |
1066 | * Description: Destroy an ipc_eventlink, wakeup all threads. |
1067 | * |
1068 | * Returns: |
1069 | * None. |
1070 | */ |
1071 | static void |
1072 | ipc_eventlink_no_senders(ipc_port_t port, mach_port_mscount_t mscount) |
1073 | { |
1074 | kern_return_t kr; |
1075 | struct ipc_eventlink *ipc_eventlink; |
1076 | |
1077 | if (!ip_active(port)) { |
1078 | return; |
1079 | } |
1080 | |
1081 | /* Get ipc_eventlink reference */ |
1082 | ip_mq_lock(port); |
1083 | |
1084 | /* Make sure port is still active */ |
1085 | if (!ip_active(port)) { |
1086 | ip_mq_unlock(port); |
1087 | return; |
1088 | } |
1089 | |
1090 | convert_port_to_eventlink_locked(port, ipc_eventlink_ptr: &ipc_eventlink); |
1091 | ip_mq_unlock(port); |
1092 | |
1093 | kr = ipc_eventlink_destroy_internal(ipc_eventlink); |
1094 | if (kr == KERN_TERMINATED) { |
1095 | /* eventlink is already inactive, destroy the port */ |
1096 | ipc_kobject_dealloc_port(port, mscount, type: IKOT_EVENTLINK); |
1097 | } |
1098 | |
1099 | /* Drop the reference returned by convert_port_to_eventlink_locked */ |
1100 | ipc_eventlink_deallocate(ipc_eventlink); |
1101 | } |
1102 | |
1103 | #define WAITQ_TO_EVENTLINK(wq) ((struct ipc_eventlink_base *) ((uintptr_t)(wq) - offsetof(struct ipc_eventlink_base, elb_waitq))) |
1104 | |
1105 | /* |
1106 | * Name: kdp_eventlink_find_owner |
1107 | * |
1108 | * Description: Find who will signal the waiting thread. |
1109 | * |
1110 | * Args: |
1111 | * waitq: eventlink waitq |
1112 | * wait_event: eventlink wait event |
1113 | * waitinfo: waitinfo struct |
1114 | * |
1115 | * Returns: |
1116 | * None. |
1117 | */ |
1118 | void |
1119 | kdp_eventlink_find_owner( |
1120 | struct waitq *waitq, |
1121 | event64_t event, |
1122 | thread_waitinfo_t *waitinfo) |
1123 | { |
1124 | assert(waitinfo->wait_type == kThreadWaitEventlink); |
1125 | waitinfo->owner = 0; |
1126 | waitinfo->context = 0; |
1127 | |
1128 | if (waitq_held(wq: waitq)) { |
1129 | return; |
1130 | } |
1131 | |
1132 | struct ipc_eventlink_base *ipc_eventlink_base = WAITQ_TO_EVENTLINK(waitq); |
1133 | |
1134 | if (event == CAST_EVENT64_T(&ipc_eventlink_base->elb_eventlink[0])) { |
1135 | /* Use the other end of eventlink for signal thread */ |
1136 | if (ipc_eventlink_base->elb_eventlink[1].el_thread != THREAD_ASSOCIATE_WILD) { |
1137 | waitinfo->owner = thread_tid(thread: ipc_eventlink_base->elb_eventlink[1].el_thread); |
1138 | } else { |
1139 | waitinfo->owner = 0; |
1140 | } |
1141 | } else if (event == CAST_EVENT64_T(&ipc_eventlink_base->elb_eventlink[1])) { |
1142 | /* Use the other end of eventlink for signal thread */ |
1143 | if (ipc_eventlink_base->elb_eventlink[0].el_thread != THREAD_ASSOCIATE_WILD) { |
1144 | waitinfo->owner = thread_tid(thread: ipc_eventlink_base->elb_eventlink[0].el_thread); |
1145 | } else { |
1146 | waitinfo->owner = 0; |
1147 | } |
1148 | } |
1149 | |
1150 | return; |
1151 | } |
1152 | |