1/*
2 * Copyright (c) 2000-2020 Apple Inc. All rights reserved.
3 *
4 * @APPLE_OSREFERENCE_LICENSE_HEADER_START@
5 *
6 * This file contains Original Code and/or Modifications of Original Code
7 * as defined in and that are subject to the Apple Public Source License
8 * Version 2.0 (the 'License'). You may not use this file except in
9 * compliance with the License. The rights granted to you under the License
10 * may not be used to create, or enable the creation or redistribution of,
11 * unlawful or unlicensed copies of an Apple operating system, or to
12 * circumvent, violate, or enable the circumvention or violation of, any
13 * terms of an Apple operating system software license agreement.
14 *
15 * Please obtain a copy of the License at
16 * http://www.opensource.apple.com/apsl/ and read it before using this file.
17 *
18 * The Original Code and all software distributed under the License are
19 * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER
20 * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES,
21 * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY,
22 * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT.
23 * Please see the License for the specific language governing rights and
24 * limitations under the License.
25 *
26 * @APPLE_OSREFERENCE_LICENSE_HEADER_END@
27 */
28/*
29 * @OSF_COPYRIGHT@
30 */
31/*
32 * Mach Operating System
33 * Copyright (c) 1991,1990,1989 Carnegie Mellon University
34 * All Rights Reserved.
35 *
36 * Permission to use, copy, modify and distribute this software and its
37 * documentation is hereby granted, provided that both the copyright
38 * notice and this permission notice appear in all copies of the
39 * software, derivative works or modified versions, and any portions
40 * thereof, and that both notices appear in supporting documentation.
41 *
42 * CARNEGIE MELLON ALLOWS FREE USE OF THIS SOFTWARE IN ITS "AS IS"
43 * CONDITION. CARNEGIE MELLON DISCLAIMS ANY LIABILITY OF ANY KIND FOR
44 * ANY DAMAGES WHATSOEVER RESULTING FROM THE USE OF THIS SOFTWARE.
45 *
46 * Carnegie Mellon requests users of this software to return to
47 *
48 * Software Distribution Coordinator or Software.Distribution@CS.CMU.EDU
49 * School of Computer Science
50 * Carnegie Mellon University
51 * Pittsburgh PA 15213-3890
52 *
53 * any improvements or extensions that they make and grant Carnegie Mellon
54 * the rights to redistribute these changes.
55 */
56/*
57 * NOTICE: This file was modified by McAfee Research in 2004 to introduce
58 * support for mandatory and extensible security protections. This notice
59 * is included in support of clause 2.2 (b) of the Apple Public License,
60 * Version 2.0.
61 * Copyright (c) 2005-2006 SPARTA, Inc.
62 */
63/*
64 */
65/*
66 * File: ipc/ipc_object.c
67 * Author: Rich Draves
68 * Date: 1989
69 *
70 * Functions to manipulate IPC objects.
71 */
72
73#include <mach/mach_types.h>
74#include <mach/boolean.h>
75#include <mach/kern_return.h>
76#include <mach/port.h>
77#include <mach/message.h>
78
79#include <kern/kern_types.h>
80#include <kern/misc_protos.h>
81#include <kern/ipc_kobject.h>
82#include <kern/zalloc_internal.h> // zone_id_for_element
83
84#include <ipc/ipc_types.h>
85#include <ipc/ipc_importance.h>
86#include <ipc/port.h>
87#include <ipc/ipc_space.h>
88#include <ipc/ipc_entry.h>
89#include <ipc/ipc_object.h>
90#include <ipc/ipc_hash.h>
91#include <ipc/ipc_kmsg.h>
92#include <ipc/ipc_right.h>
93#include <ipc/ipc_notify.h>
94#include <ipc/ipc_port.h>
95#include <ipc/ipc_pset.h>
96
97#include <security/mac_mach_internal.h>
98
99static struct mpsc_daemon_queue ipc_object_deallocate_queue;
100SECURITY_READ_ONLY_LATE(zone_t) ipc_object_zones[IOT_NUMBER];
101
102/*
103 * In order to do lockfree lookups in the IPC space, we combine two schemes:
104 *
105 * - the ipc table pointer is protected with hazard pointers to allow
106 * dereferencing it with only holding a ref on a task or space;
107 *
108 * - we use ipc_object_lock_allow_invalid in order to lock locks and validate
109 * that they are the droid we're looking for.
110 *
111 * The second half requires that virtual addresses assigned that ever held
112 * a port, either hold a port, or nothing, forever. To get this property,
113 * we just piggy back on the zone sequestering security feature which gives
114 * us exactly that.
115 *
116 * However, sequestering really only "works" on a sufficiently large address
117 * space, especially for a resource that can be made by userspace at will,
118 * so we can't do lockless lookups on ILP32.
119 *
120 * Note: this scheme is incompatible with kasan quarantines
121 * (because it uses elements to store backtraces in them
122 * which lets the waitq lock appear "valid" by accident when
123 * elements are freed).
124 */
125#define IPC_OBJECT_ZC_BASE (ZC_ZFREE_CLEARMEM | ZC_SEQUESTER)
126
127ZONE_INIT(&ipc_object_zones[IOT_PORT],
128 "ipc ports", sizeof(struct ipc_port),
129 IPC_OBJECT_ZC_BASE | ZC_CACHING, ZONE_ID_IPC_PORT, NULL);
130
131ZONE_INIT(&ipc_object_zones[IOT_PORT_SET],
132 "ipc port sets", sizeof(struct ipc_pset),
133 IPC_OBJECT_ZC_BASE, ZONE_ID_IPC_PORT_SET, NULL);
134
135__attribute__((noinline))
136static void
137ipc_object_free(unsigned int otype, ipc_object_t object, bool last_ref)
138{
139 if (last_ref) {
140 if (otype == IOT_PORT) {
141 ipc_port_finalize(ip_object_to_port(object));
142 } else {
143 ipc_pset_finalize(ips_object_to_pset(object));
144 }
145 }
146 zfree(ipc_object_zones[otype], object);
147}
148
149__attribute__((noinline))
150static void
151ipc_object_free_safe(ipc_object_t object)
152{
153 struct waitq *wq = io_waitq(object);
154
155 assert(!waitq_is_valid(wq));
156 assert(os_atomic_load(&wq->waitq_defer.mpqc_next, relaxed) == NULL);
157 mpsc_daemon_enqueue(dq: &ipc_object_deallocate_queue,
158 elm: &wq->waitq_defer, options: MPSC_QUEUE_NONE);
159}
160
161static void
162ipc_object_deallocate_queue_invoke(mpsc_queue_chain_t e,
163 __assert_only mpsc_daemon_queue_t dq)
164{
165 struct waitq *wq = __container_of(e, struct waitq, waitq_defer);
166 ipc_object_t io = io_from_waitq(wq);
167
168 assert(dq == &ipc_object_deallocate_queue);
169
170 os_atomic_store(&wq->waitq_defer.mpqc_next, NULL, relaxed);
171 ipc_object_free(io_otype(io), object: io, true);
172}
173
174void
175ipc_object_deallocate_register_queue(void)
176{
177 thread_deallocate_daemon_register_queue(dq: &ipc_object_deallocate_queue,
178 invoke: ipc_object_deallocate_queue_invoke);
179}
180
181/*
182 * Routine: ipc_object_reference
183 * Purpose:
184 * Take a reference to an object.
185 */
186
187void
188ipc_object_reference(
189 ipc_object_t io)
190{
191 static_assert(sizeof(os_ref_atomic_t) == sizeof(io->io_references));
192 os_ref_retain_raw((os_ref_atomic_t *)&io->io_references, NULL);
193}
194
195/*
196 * Routine: ipc_object_release
197 * Purpose:
198 * Release a reference to an object.
199 */
200
201void
202ipc_object_release(
203 ipc_object_t io)
204{
205#if DEBUG
206 assert(get_preemption_level() == 0);
207#endif
208
209 if (os_ref_release_raw((os_ref_atomic_t *)&io->io_references, NULL) == 0) {
210 /* Free the object */
211 ipc_object_free(io_otype(io), object: io, true);
212 }
213}
214
215/*
216 * Routine: ipc_object_release_safe
217 * Purpose:
218 * Release a reference to an object safely
219 */
220
221void
222ipc_object_release_safe(
223 ipc_object_t io)
224{
225 if (os_ref_release_raw((os_ref_atomic_t *)&io->io_references, NULL) == 0) {
226 if (get_preemption_level() == 0) {
227 ipc_object_free(io_otype(io), object: io, true);
228 } else {
229 ipc_object_free_safe(object: io);
230 }
231 }
232}
233
234/*
235 * Routine: ipc_object_release_live
236 * Purpose:
237 * Release a reference to an object that isn't the last one.
238 */
239
240void
241ipc_object_release_live(
242 ipc_object_t io)
243{
244 os_ref_release_live_raw((os_ref_atomic_t *)&io->io_references, NULL);
245}
246
247/*
248 * Routine: ipc_object_translate
249 * Purpose:
250 * Look up an object in a space.
251 * Conditions:
252 * Nothing locked before. If successful, the object
253 * is returned active and locked. The caller doesn't get a ref.
254 * Returns:
255 * KERN_SUCCESS Object returned locked.
256 * KERN_INVALID_TASK The space is dead.
257 * KERN_INVALID_NAME The name doesn't denote a right
258 * KERN_INVALID_RIGHT Name doesn't denote the correct right
259 */
260kern_return_t
261ipc_object_translate(
262 ipc_space_t space,
263 mach_port_name_t name,
264 mach_port_right_t right,
265 ipc_object_t *objectp)
266{
267 ipc_entry_bits_t bits;
268 ipc_object_t object;
269 kern_return_t kr;
270
271 if (!MACH_PORT_RIGHT_VALID_TRANSLATE(right)) {
272 return KERN_INVALID_RIGHT;
273 }
274
275 kr = ipc_right_lookup_read(space, name, bitsp: &bits, objectp: &object);
276 if (kr != KERN_SUCCESS) {
277 return kr;
278 }
279 /* object is locked and active */
280
281 if ((bits & MACH_PORT_TYPE(right)) == MACH_PORT_TYPE_NONE) {
282 io_unlock(object);
283 return KERN_INVALID_RIGHT;
284 }
285
286 *objectp = object;
287 return KERN_SUCCESS;
288}
289
290/*
291 * Routine: ipc_object_translate_two
292 * Purpose:
293 * Look up two objects in a space.
294 * Conditions:
295 * Nothing locked before. If successful, the objects
296 * are returned locked. The caller doesn't get a ref.
297 * Returns:
298 * KERN_SUCCESS Objects returned locked.
299 * KERN_INVALID_TASK The space is dead.
300 * KERN_INVALID_NAME A name doesn't denote a right.
301 * KERN_INVALID_RIGHT A name doesn't denote the correct right.
302 */
303
304kern_return_t
305ipc_object_translate_two(
306 ipc_space_t space,
307 mach_port_name_t name1,
308 mach_port_right_t right1,
309 ipc_object_t *objectp1,
310 mach_port_name_t name2,
311 mach_port_right_t right2,
312 ipc_object_t *objectp2)
313{
314 ipc_entry_t entry1;
315 ipc_entry_t entry2;
316 ipc_object_t object1, object2;
317 kern_return_t kr;
318 boolean_t doguard = TRUE;
319
320 kr = ipc_right_lookup_two_read(space, name1, entryp1: &entry1, name2, entryp2: &entry2);
321 if (kr != KERN_SUCCESS) {
322 return kr;
323 }
324 /* space is read-locked and active */
325
326 if ((entry1->ie_bits & MACH_PORT_TYPE(right1)) == MACH_PORT_TYPE_NONE) {
327 /* If looking for receive, and the entry used to hold one, give a pass on EXC_GUARD */
328 if ((right1 & MACH_PORT_RIGHT_RECEIVE) == MACH_PORT_RIGHT_RECEIVE &&
329 (entry1->ie_bits & MACH_PORT_TYPE_EX_RECEIVE) == MACH_PORT_TYPE_EX_RECEIVE) {
330 doguard = FALSE;
331 }
332 is_read_unlock(space);
333 if (doguard) {
334 mach_port_guard_exception(name: name1, inguard: 0, portguard: 0, reason: kGUARD_EXC_INVALID_RIGHT);
335 }
336 return KERN_INVALID_RIGHT;
337 }
338
339 if ((entry2->ie_bits & MACH_PORT_TYPE(right2)) == MACH_PORT_TYPE_NONE) {
340 /* If looking for receive, and the entry used to hold one, give a pass on EXC_GUARD */
341 if ((right2 & MACH_PORT_RIGHT_RECEIVE) == MACH_PORT_RIGHT_RECEIVE &&
342 (entry2->ie_bits & MACH_PORT_TYPE_EX_RECEIVE) == MACH_PORT_TYPE_EX_RECEIVE) {
343 doguard = FALSE;
344 }
345 is_read_unlock(space);
346 if (doguard) {
347 mach_port_guard_exception(name: name2, inguard: 0, portguard: 0, reason: kGUARD_EXC_INVALID_RIGHT);
348 }
349 return KERN_INVALID_RIGHT;
350 }
351
352 object1 = entry1->ie_object;
353 assert(object1 != IO_NULL);
354 io_lock(object1);
355 if (!io_active(object1)) {
356 io_unlock(object1);
357 is_read_unlock(space);
358 return KERN_INVALID_NAME;
359 }
360
361 object2 = entry2->ie_object;
362 assert(object2 != IO_NULL);
363 io_lock(object2);
364 if (!io_active(object2)) {
365 io_unlock(object1);
366 io_unlock(object2);
367 is_read_unlock(space);
368 return KERN_INVALID_NAME;
369 }
370
371 *objectp1 = object1;
372 *objectp2 = object2;
373
374 is_read_unlock(space);
375 return KERN_SUCCESS;
376}
377
378/*
379 * Routine: ipc_object_alloc_dead
380 * Purpose:
381 * Allocate a dead-name entry.
382 * Conditions:
383 * Nothing locked.
384 * Returns:
385 * KERN_SUCCESS The dead name is allocated.
386 * KERN_INVALID_TASK The space is dead.
387 * KERN_NO_SPACE No room for an entry in the space.
388 */
389
390kern_return_t
391ipc_object_alloc_dead(
392 ipc_space_t space,
393 mach_port_name_t *namep)
394{
395 ipc_entry_t entry;
396 kern_return_t kr;
397
398 kr = ipc_entry_alloc(space, IO_NULL, namep, entryp: &entry);
399 if (kr != KERN_SUCCESS) {
400 return kr;
401 }
402 /* space is write-locked */
403
404 /* null object, MACH_PORT_TYPE_DEAD_NAME, 1 uref */
405
406 entry->ie_bits |= MACH_PORT_TYPE_DEAD_NAME | 1;
407 ipc_entry_modified(space, name: *namep, entry);
408 is_write_unlock(space);
409 return KERN_SUCCESS;
410}
411
412/*
413 * Routine: ipc_object_alloc
414 * Purpose:
415 * Allocate an object.
416 * Conditions:
417 * Nothing locked.
418 * The space is write locked on successful return.
419 * The caller doesn't get a reference for the object.
420 * Returns:
421 * KERN_SUCCESS The object is allocated.
422 * KERN_INVALID_TASK The space is dead.
423 * KERN_NO_SPACE No room for an entry in the space.
424 */
425
426kern_return_t
427ipc_object_alloc(
428 ipc_space_t space,
429 ipc_object_type_t otype,
430 mach_port_type_t type,
431 mach_port_urefs_t urefs,
432 mach_port_name_t *namep,
433 ipc_object_t *objectp)
434{
435 ipc_object_t object;
436 ipc_entry_t entry;
437 kern_return_t kr;
438
439 assert(otype < IOT_NUMBER);
440 assert((type & MACH_PORT_TYPE_ALL_RIGHTS) == type);
441 assert(type != MACH_PORT_TYPE_NONE);
442 assert(urefs <= MACH_PORT_UREFS_MAX);
443
444 object = io_alloc(otype, Z_WAITOK | Z_ZERO | Z_NOFAIL);
445 os_atomic_init(&object->io_bits, io_makebits(otype));
446 os_atomic_init(&object->io_references, 1); /* for entry, not caller */
447
448 *namep = CAST_MACH_PORT_TO_NAME(object);
449 kr = ipc_entry_alloc(space, object, namep, entryp: &entry);
450 if (kr != KERN_SUCCESS) {
451 ipc_object_free(otype, object, false);
452 return kr;
453 }
454 /* space is write-locked */
455
456 entry->ie_bits |= type | urefs;
457 ipc_entry_modified(space, name: *namep, entry);
458
459 *objectp = object;
460 return KERN_SUCCESS;
461}
462
463/*
464 * Routine: ipc_object_alloc_name
465 * Purpose:
466 * Allocate an object, with a specific name.
467 * Conditions:
468 * Nothing locked. If successful, the object is returned locked.
469 * The caller doesn't get a reference for the object.
470 *
471 * finish_init() must call an ipc_*_init function
472 * that will return the object locked (using IPC_PORT_INIT_LOCKED,
473 * or SYNC_POLICY_INIT_LOCKED, or equivalent).
474 *
475 * Returns:
476 * KERN_SUCCESS The object is allocated.
477 * KERN_INVALID_TASK The space is dead.
478 * KERN_NAME_EXISTS The name already denotes a right.
479 */
480
481kern_return_t
482ipc_object_alloc_name(
483 ipc_space_t space,
484 ipc_object_type_t otype,
485 mach_port_type_t type,
486 mach_port_urefs_t urefs,
487 mach_port_name_t name,
488 ipc_object_t *objectp,
489 void (^finish_init)(ipc_object_t))
490{
491 ipc_object_t object;
492 ipc_entry_t entry;
493 kern_return_t kr;
494
495 assert(otype < IOT_NUMBER);
496 assert((type & MACH_PORT_TYPE_ALL_RIGHTS) == type);
497 assert(type != MACH_PORT_TYPE_NONE);
498 assert(urefs <= MACH_PORT_UREFS_MAX);
499
500 object = io_alloc(otype, Z_WAITOK | Z_ZERO | Z_NOFAIL);
501 os_atomic_init(&object->io_bits, io_makebits(otype));
502 os_atomic_init(&object->io_references, 1); /* for entry, not caller */
503
504 kr = ipc_entry_alloc_name(space, name, entryp: &entry);
505 if (kr != KERN_SUCCESS) {
506 ipc_object_free(otype, object, false);
507 return kr;
508 }
509 /* space is write-locked */
510
511 if (ipc_right_inuse(entry)) {
512 is_write_unlock(space);
513 ipc_object_free(otype, object, false);
514 return KERN_NAME_EXISTS;
515 }
516
517 entry->ie_bits |= type | urefs;
518 entry->ie_object = object;
519
520 finish_init(object);
521 /* object is locked */
522 io_lock_held(object);
523
524 ipc_entry_modified(space, name, entry);
525 is_write_unlock(space);
526
527 *objectp = object;
528 return KERN_SUCCESS;
529}
530
531/* Routine: ipc_object_validate
532 * Purpose:
533 * Validates an ipc port or port set as belonging to the correct
534 * zone.
535 */
536
537void
538ipc_object_validate(
539 ipc_object_t object,
540 ipc_object_type_t type)
541{
542 if (type != IOT_PORT_SET) {
543 ip_validate(object);
544 } else {
545 ips_validate(object);
546 }
547}
548
549void
550ipc_object_validate_aligned(
551 ipc_object_t object,
552 ipc_object_type_t type)
553{
554 if (type != IOT_PORT_SET) {
555 ip_validate_aligned(object);
556 } else {
557 ips_validate_aligned(object);
558 }
559}
560
561/*
562 * Routine: ipc_object_copyin_type
563 * Purpose:
564 * Convert a send type name to a received type name.
565 */
566
567mach_msg_type_name_t
568ipc_object_copyin_type(
569 mach_msg_type_name_t msgt_name)
570{
571 switch (msgt_name) {
572 case MACH_MSG_TYPE_MOVE_RECEIVE:
573 return MACH_MSG_TYPE_PORT_RECEIVE;
574
575 case MACH_MSG_TYPE_MOVE_SEND_ONCE:
576 case MACH_MSG_TYPE_MAKE_SEND_ONCE:
577 return MACH_MSG_TYPE_PORT_SEND_ONCE;
578
579 case MACH_MSG_TYPE_MOVE_SEND:
580 case MACH_MSG_TYPE_MAKE_SEND:
581 case MACH_MSG_TYPE_COPY_SEND:
582 return MACH_MSG_TYPE_PORT_SEND;
583
584 case MACH_MSG_TYPE_DISPOSE_RECEIVE:
585 case MACH_MSG_TYPE_DISPOSE_SEND:
586 case MACH_MSG_TYPE_DISPOSE_SEND_ONCE:
587 /* fall thru */
588 default:
589 return MACH_MSG_TYPE_PORT_NONE;
590 }
591}
592
593/*
594 * Routine: ipc_object_copyin
595 * Purpose:
596 * Copyin a capability from a space.
597 * If successful, the caller gets a ref
598 * for the resulting object, unless it is IO_DEAD.
599 * Conditions:
600 * Nothing locked.
601 * Returns:
602 * KERN_SUCCESS Acquired an object, possibly IO_DEAD.
603 * KERN_INVALID_TASK The space is dead.
604 * KERN_INVALID_NAME Name doesn't exist in space.
605 * KERN_INVALID_RIGHT Name doesn't denote correct right.
606 */
607
608kern_return_t
609ipc_object_copyin(
610 ipc_space_t space,
611 mach_port_name_t name,
612 mach_msg_type_name_t msgt_name,
613 ipc_object_t *objectp,
614 mach_port_context_t context,
615 mach_msg_guard_flags_t *guard_flags,
616 ipc_object_copyin_flags_t copyin_flags)
617{
618 ipc_entry_t entry;
619 ipc_port_t soright;
620 ipc_port_t release_port;
621 kern_return_t kr;
622 int assertcnt = 0;
623
624 ipc_object_copyin_flags_t copyin_mask = IPC_OBJECT_COPYIN_FLAGS_ALLOW_IMMOVABLE_SEND
625 | IPC_OBJECT_COPYIN_FLAGS_ALLOW_CONN_IMMOVABLE_RECEIVE;
626 copyin_mask = (copyin_flags & copyin_mask) | IPC_OBJECT_COPYIN_FLAGS_DEADOK;
627
628 /*
629 * We allow moving of immovable receive right of a service port when it is from launchd.
630 */
631 task_t task = current_task_early();
632#ifdef MACH_BSD
633 if (task && proc_isinitproc(p: get_bsdtask_info(task))) {
634 copyin_mask |= IPC_OBJECT_COPYIN_FLAGS_ALLOW_IMMOVABLE_RECEIVE;
635 }
636#endif
637
638 /*
639 * Could first try a read lock when doing
640 * MACH_MSG_TYPE_COPY_SEND, MACH_MSG_TYPE_MAKE_SEND,
641 * and MACH_MSG_TYPE_MAKE_SEND_ONCE.
642 */
643
644 kr = ipc_right_lookup_write(space, name, entryp: &entry);
645 if (kr != KERN_SUCCESS) {
646 return kr;
647 }
648 /* space is write-locked and active */
649
650 release_port = IP_NULL;
651 kr = ipc_right_copyin(space, name, entry,
652 msgt_name, flags: copyin_mask,
653 objectp, sorightp: &soright,
654 releasep: &release_port,
655 assertcntp: &assertcnt,
656 context,
657 guard_flags);
658 is_write_unlock(space);
659
660 if (moved_provisional_reply_port(msgt_name, soright)) {
661 send_prp_telemetry(msgh_id: -1);
662 }
663
664
665#if IMPORTANCE_INHERITANCE
666 if (0 < assertcnt && ipc_importance_task_is_any_receiver_type(task_imp: current_task()->task_imp_base)) {
667 ipc_importance_task_drop_internal_assertion(task_imp: current_task()->task_imp_base, count: assertcnt);
668 }
669#endif /* IMPORTANCE_INHERITANCE */
670
671 if (release_port != IP_NULL) {
672 ip_release(release_port);
673 }
674
675 if ((kr == KERN_SUCCESS) && (soright != IP_NULL)) {
676 ipc_notify_port_deleted(port: soright, name);
677 }
678
679 return kr;
680}
681
682/*
683 * Routine: ipc_object_copyin_from_kernel
684 * Purpose:
685 * Copyin a naked capability from the kernel.
686 *
687 * MACH_MSG_TYPE_MOVE_RECEIVE
688 * The receiver must be ipc_space_kernel
689 * or the receive right must already be in limbo.
690 * Consumes the naked receive right.
691 * MACH_MSG_TYPE_COPY_SEND
692 * A naked send right must be supplied.
693 * The port gains a reference, and a send right
694 * if the port is still active.
695 * MACH_MSG_TYPE_MAKE_SEND
696 * The receiver must be ipc_space_kernel.
697 * The port gains a reference and a send right.
698 * MACH_MSG_TYPE_MOVE_SEND
699 * Consumes a naked send right.
700 * MACH_MSG_TYPE_MAKE_SEND_ONCE
701 * The port gains a reference and a send-once right.
702 * Receiver also be the caller of device subsystem,
703 * so no assertion.
704 * MACH_MSG_TYPE_MOVE_SEND_ONCE
705 * Consumes a naked send-once right.
706 * Conditions:
707 * Nothing locked.
708 */
709
710void
711ipc_object_copyin_from_kernel(
712 ipc_object_t object,
713 mach_msg_type_name_t msgt_name)
714{
715 assert(IO_VALID(object));
716
717 switch (msgt_name) {
718 case MACH_MSG_TYPE_MOVE_RECEIVE: {
719 ipc_port_t port = ip_object_to_port(object);
720
721 ip_mq_lock(port);
722 require_ip_active(port);
723 if (ip_in_a_space(port)) {
724 assert(ip_in_space(port, ipc_space_kernel));
725 assert(port->ip_immovable_receive == 0);
726
727 /* relevant part of ipc_port_clear_receiver */
728 port->ip_mscount = 0;
729
730 /* port transtions to IN-LIMBO state */
731 port->ip_receiver_name = MACH_PORT_NULL;
732 port->ip_destination = IP_NULL;
733 }
734 ip_mq_unlock(port);
735 break;
736 }
737
738 case MACH_MSG_TYPE_COPY_SEND: {
739 ipc_port_t port = ip_object_to_port(object);
740
741 ip_mq_lock(port);
742 if (ip_active(port)) {
743 assert(port->ip_srights > 0);
744 }
745 ip_srights_inc(port);
746 ip_reference(port);
747 ip_mq_unlock(port);
748 break;
749 }
750
751 case MACH_MSG_TYPE_MAKE_SEND: {
752 ipc_port_t port = ip_object_to_port(object);
753
754 ip_mq_lock(port);
755 if (ip_active(port)) {
756 assert(ip_in_a_space(port));
757 assert((ip_in_space(port, ipc_space_kernel)) ||
758 (port->ip_receiver->is_node_id != HOST_LOCAL_NODE));
759 port->ip_mscount++;
760 }
761
762 ip_srights_inc(port);
763 ip_reference(port);
764 ip_mq_unlock(port);
765 break;
766 }
767
768 case MACH_MSG_TYPE_MOVE_SEND: {
769 /* move naked send right into the message */
770 assert(ip_object_to_port(object)->ip_srights);
771 break;
772 }
773
774 case MACH_MSG_TYPE_MAKE_SEND_ONCE: {
775 ipc_port_t port = ip_object_to_port(object);
776
777 ip_mq_lock(port);
778 if (ip_active(port)) {
779 assert(ip_in_a_space(port));
780 }
781 ipc_port_make_sonce_locked(port);
782 ip_mq_unlock(port);
783 break;
784 }
785
786 case MACH_MSG_TYPE_MOVE_SEND_ONCE: {
787 /* move naked send-once right into the message */
788 assert(ip_object_to_port(object)->ip_sorights);
789 break;
790 }
791
792 default:
793 panic("ipc_object_copyin_from_kernel: strange rights");
794 }
795}
796
797/*
798 * Routine: ipc_object_destroy
799 * Purpose:
800 * Destroys a naked capability.
801 * Consumes a ref for the object.
802 *
803 * A receive right should be in limbo or in transit.
804 * Conditions:
805 * Nothing locked.
806 */
807
808void
809ipc_object_destroy(
810 ipc_object_t object,
811 mach_msg_type_name_t msgt_name)
812{
813 ipc_port_t port = ip_object_to_port(object);
814
815 assert(IO_VALID(object));
816 assert(io_otype(object) == IOT_PORT);
817
818 switch (msgt_name) {
819 case MACH_MSG_TYPE_PORT_SEND:
820 ipc_port_release_send(port);
821 break;
822
823 case MACH_MSG_TYPE_PORT_SEND_ONCE:
824 ip_mq_lock(port);
825 ipc_notify_send_once_and_unlock(port);
826 break;
827
828 case MACH_MSG_TYPE_PORT_RECEIVE:
829 ipc_port_release_receive(port);
830 break;
831
832 default:
833 panic("ipc_object_destroy: strange rights");
834 }
835}
836
837/*
838 * Routine: ipc_object_destroy_dest
839 * Purpose:
840 * Destroys a naked capability for the destination of
841 * of a message. Consumes a ref for the object.
842 *
843 * Conditions:
844 * Nothing locked.
845 */
846
847void
848ipc_object_destroy_dest(
849 ipc_object_t object,
850 mach_msg_type_name_t msgt_name)
851{
852 ipc_port_t port = ip_object_to_port(object);
853
854 assert(IO_VALID(object));
855 assert(io_otype(object) == IOT_PORT);
856
857 switch (msgt_name) {
858 case MACH_MSG_TYPE_PORT_SEND:
859 ipc_port_release_send(port);
860 break;
861
862 case MACH_MSG_TYPE_PORT_SEND_ONCE:
863 ip_mq_lock(port);
864 ipc_notify_send_once_and_unlock(port);
865 break;
866
867 default:
868 panic("ipc_object_destroy_dest: strange rights");
869 }
870}
871
872/*
873 * Routine: ipc_object_insert_send_right
874 * Purpose:
875 * Insert a send right into an object already in the space.
876 * The specified name must already point to a valid object.
877 *
878 * Note: This really is a combined copyin()/copyout(),
879 * that avoids most of the overhead of being implemented that way.
880 *
881 * This is the fastpath for mach_port_insert_right.
882 *
883 * Conditions:
884 * Nothing locked.
885 *
886 * msgt_name must be MACH_MSG_TYPE_MAKE_SEND or
887 * MACH_MSG_TYPE_COPY_SEND.
888 *
889 * Returns:
890 * KERN_SUCCESS Copied out object, consumed ref.
891 * KERN_INVALID_TASK The space is dead.
892 * KERN_INVALID_NAME Name doesn't exist in space.
893 * KERN_INVALID_CAPABILITY The object is dead.
894 * KERN_RIGHT_EXISTS Space has rights under another name.
895 */
896kern_return_t
897ipc_object_insert_send_right(
898 ipc_space_t space,
899 mach_port_name_t name,
900 mach_msg_type_name_t msgt_name)
901{
902 ipc_entry_bits_t bits;
903 ipc_object_t object;
904 ipc_entry_t entry;
905 ipc_port_t port;
906 kern_return_t kr;
907
908 assert(msgt_name == MACH_MSG_TYPE_MAKE_SEND ||
909 msgt_name == MACH_MSG_TYPE_COPY_SEND);
910
911 kr = ipc_right_lookup_write(space, name, entryp: &entry);
912 if (kr != KERN_SUCCESS) {
913 return kr;
914 }
915 /* space is write-locked and active */
916
917 bits = entry->ie_bits;
918 object = entry->ie_object;
919
920 if (!IO_VALID(object)) {
921 is_write_unlock(space);
922 return KERN_INVALID_CAPABILITY;
923 }
924 if ((bits & MACH_PORT_TYPE_PORT_RIGHTS) == 0) {
925 is_write_unlock(space);
926 return KERN_INVALID_RIGHT;
927 }
928
929 port = ip_object_to_port(object);
930
931 ip_mq_lock(port);
932 if (!ip_active(port)) {
933 kr = KERN_INVALID_CAPABILITY;
934 } else if (msgt_name == MACH_MSG_TYPE_MAKE_SEND) {
935 if (bits & MACH_PORT_TYPE_RECEIVE) {
936 port->ip_mscount++;
937 if ((bits & MACH_PORT_TYPE_SEND) == 0) {
938 ip_srights_inc(port);
939 bits |= MACH_PORT_TYPE_SEND;
940 }
941 /* leave urefs pegged to maximum if it overflowed */
942 if (IE_BITS_UREFS(bits) < MACH_PORT_UREFS_MAX) {
943 bits += 1; /* increment urefs */
944 }
945 entry->ie_bits = bits;
946 ipc_entry_modified(space, name, entry);
947 kr = KERN_SUCCESS;
948 } else {
949 kr = KERN_INVALID_RIGHT;
950 }
951 } else { // MACH_MSG_TYPE_COPY_SEND
952 if (bits & MACH_PORT_TYPE_SEND) {
953 /* leave urefs pegged to maximum if it overflowed */
954 if (IE_BITS_UREFS(bits) < MACH_PORT_UREFS_MAX) {
955 entry->ie_bits = bits + 1; /* increment urefs */
956 }
957 ipc_entry_modified(space, name, entry);
958 kr = KERN_SUCCESS;
959 } else {
960 kr = KERN_INVALID_RIGHT;
961 }
962 }
963
964 ip_mq_unlock(port);
965 is_write_unlock(space);
966
967 return kr;
968}
969
970/*
971 * Routine: ipc_object_copyout
972 * Purpose:
973 * Copyout a capability, placing it into a space.
974 * Always consumes a ref for the object.
975 * Conditions:
976 * Nothing locked.
977 * Returns:
978 * KERN_SUCCESS Copied out object, consumed ref.
979 * KERN_INVALID_TASK The space is dead.
980 * KERN_INVALID_CAPABILITY The object is dead.
981 * KERN_NO_SPACE No room in space for another right.
982 * KERN_UREFS_OVERFLOW Urefs limit exceeded
983 * and overflow wasn't specified.
984 */
985
986kern_return_t
987ipc_object_copyout(
988 ipc_space_t space,
989 ipc_object_t object,
990 mach_msg_type_name_t msgt_name,
991 ipc_object_copyout_flags_t flags,
992 mach_port_context_t *context,
993 mach_msg_guard_flags_t *guard_flags,
994 mach_port_name_t *namep)
995{
996 struct knote *kn = current_thread()->ith_knote;
997 mach_port_name_t name;
998 ipc_port_t port = ip_object_to_port(object);
999 ipc_entry_t entry;
1000 kern_return_t kr;
1001
1002 assert(IO_VALID(object));
1003 assert(io_otype(object) == IOT_PORT);
1004
1005 if (ITH_KNOTE_VALID(kn, msgt_name)) {
1006 filt_machport_turnstile_prepare_lazily(kn, msgt_name, port);
1007 }
1008
1009 is_write_lock(space);
1010
1011 for (;;) {
1012 ipc_port_t port_subst = IP_NULL;
1013
1014 if (!is_active(space)) {
1015 is_write_unlock(space);
1016 kr = KERN_INVALID_TASK;
1017 goto out;
1018 }
1019
1020 kr = ipc_entries_hold(space, count: 1);
1021 if (kr != KERN_SUCCESS) {
1022 /* unlocks/locks space, so must start again */
1023
1024 kr = ipc_entry_grow_table(space, ITS_SIZE_NONE);
1025 if (kr != KERN_SUCCESS) {
1026 /* space is unlocked */
1027 goto out;
1028 }
1029 continue;
1030 }
1031
1032 ip_mq_lock_check_aligned(port);
1033 if (!ip_active(port)) {
1034 ip_mq_unlock(port);
1035 is_write_unlock(space);
1036 kr = KERN_INVALID_CAPABILITY;
1037 goto out;
1038 }
1039
1040 /* Don't actually copyout rights we aren't allowed to */
1041 if (!ip_label_check(space, port, msgt_name, flags: &flags, subst_portp: &port_subst)) {
1042 ip_mq_unlock(port);
1043 is_write_unlock(space);
1044 assert(port_subst == IP_NULL);
1045 kr = KERN_INVALID_CAPABILITY;
1046 goto out;
1047 }
1048
1049 /* is the kolabel requesting a substitution */
1050 if (port_subst != IP_NULL) {
1051 /*
1052 * port is unlocked, its right consumed
1053 * space is unlocked
1054 */
1055 assert(msgt_name == MACH_MSG_TYPE_PORT_SEND);
1056 port = port_subst;
1057 if (!IP_VALID(port)) {
1058 object = IO_DEAD;
1059 kr = KERN_INVALID_CAPABILITY;
1060 goto out;
1061 }
1062
1063 object = ip_to_object(port);
1064 is_write_lock(space);
1065 continue;
1066 }
1067
1068 break;
1069 }
1070
1071 /* space is write-locked and active, object is locked and active */
1072
1073 if ((msgt_name != MACH_MSG_TYPE_PORT_SEND_ONCE) &&
1074 ipc_right_reverse(space, object, namep: &name, entryp: &entry)) {
1075 assert(entry->ie_bits & MACH_PORT_TYPE_SEND_RECEIVE);
1076 } else {
1077 ipc_entry_claim(space, object, namep: &name, entryp: &entry);
1078 }
1079
1080 kr = ipc_right_copyout(space, name, entry,
1081 msgt_name, flags, context, guard_flags, object);
1082
1083 /* object is unlocked */
1084 is_write_unlock(space);
1085
1086out:
1087 if (kr == KERN_SUCCESS) {
1088 *namep = name;
1089 } else if (IO_VALID(object)) {
1090 ipc_object_destroy(object, msgt_name);
1091 }
1092
1093 return kr;
1094}
1095
1096/*
1097 * Routine: ipc_object_copyout_name
1098 * Purpose:
1099 * Copyout a capability, placing it into a space.
1100 * The specified name is used for the capability.
1101 * If successful, consumes a ref for the object.
1102 * Conditions:
1103 * Nothing locked.
1104 * Returns:
1105 * KERN_SUCCESS Copied out object, consumed ref.
1106 * KERN_INVALID_TASK The space is dead.
1107 * KERN_INVALID_CAPABILITY The object is dead.
1108 * KERN_UREFS_OVERFLOW Urefs limit exceeded
1109 * and overflow wasn't specified.
1110 * KERN_RIGHT_EXISTS Space has rights under another name.
1111 * KERN_NAME_EXISTS Name is already used.
1112 * KERN_INVALID_VALUE Supplied port name is invalid.
1113 */
1114
1115kern_return_t
1116ipc_object_copyout_name(
1117 ipc_space_t space,
1118 ipc_object_t object,
1119 mach_msg_type_name_t msgt_name,
1120 mach_port_name_t name)
1121{
1122 ipc_port_t port = ip_object_to_port(object);
1123 mach_port_name_t oname;
1124 ipc_entry_t oentry;
1125 ipc_entry_t entry;
1126 kern_return_t kr;
1127
1128#if IMPORTANCE_INHERITANCE
1129 int assertcnt = 0;
1130 ipc_importance_task_t task_imp = IIT_NULL;
1131#endif /* IMPORTANCE_INHERITANCE */
1132
1133 assert(IO_VALID(object));
1134 assert(io_otype(object) == IOT_PORT);
1135
1136 kr = ipc_entry_alloc_name(space, name, entryp: &entry);
1137 if (kr != KERN_SUCCESS) {
1138 return kr;
1139 }
1140 /* space is write-locked and active */
1141
1142 ip_mq_lock_check_aligned(port);
1143
1144 /*
1145 * Don't actually copyout rights we aren't allowed to
1146 *
1147 * In particular, kolabel-ed objects do not allow callers
1148 * to pick the name they end up with.
1149 */
1150 if (!ip_active(port) || ip_is_kolabeled(port)) {
1151 ip_mq_unlock(port);
1152 if (!ipc_right_inuse(entry)) {
1153 ipc_entry_dealloc(space, IO_NULL, name, entry);
1154 }
1155 is_write_unlock(space);
1156 return KERN_INVALID_CAPABILITY;
1157 }
1158
1159 /* space is write-locked and active, object is locked and active */
1160
1161 if ((msgt_name != MACH_MSG_TYPE_PORT_SEND_ONCE) &&
1162 ipc_right_reverse(space, object, namep: &oname, entryp: &oentry)) {
1163 if (name != oname) {
1164 ip_mq_unlock(port);
1165 if (!ipc_right_inuse(entry)) {
1166 ipc_entry_dealloc(space, IO_NULL, name, entry);
1167 }
1168 is_write_unlock(space);
1169 return KERN_RIGHT_EXISTS;
1170 }
1171
1172 assert(entry == oentry);
1173 assert(entry->ie_bits & MACH_PORT_TYPE_SEND_RECEIVE);
1174 } else if (ipc_right_inuse(entry)) {
1175 ip_mq_unlock(port);
1176 is_write_unlock(space);
1177 return KERN_NAME_EXISTS;
1178 } else {
1179 assert(entry->ie_object == IO_NULL);
1180
1181 entry->ie_object = object;
1182 }
1183
1184#if IMPORTANCE_INHERITANCE
1185 /*
1186 * We are slamming a receive right into the space, without
1187 * first having been enqueued on a port destined there. So,
1188 * we have to arrange to boost the task appropriately if this
1189 * port has assertions (and the task wants them).
1190 */
1191 if (msgt_name == MACH_MSG_TYPE_PORT_RECEIVE) {
1192 if (space->is_task != TASK_NULL) {
1193 task_imp = space->is_task->task_imp_base;
1194 if (ipc_importance_task_is_any_receiver_type(task_imp)) {
1195 assertcnt = port->ip_impcount;
1196 ipc_importance_task_reference(task_elem: task_imp);
1197 } else {
1198 task_imp = IIT_NULL;
1199 }
1200 }
1201
1202 /* take port out of limbo */
1203 port->ip_tempowner = 0;
1204 }
1205
1206#endif /* IMPORTANCE_INHERITANCE */
1207
1208 kr = ipc_right_copyout(space, name, entry,
1209 msgt_name, flags: IPC_OBJECT_COPYOUT_FLAGS_NONE, NULL, NULL, object);
1210
1211 /* object is unlocked */
1212 is_write_unlock(space);
1213
1214#if IMPORTANCE_INHERITANCE
1215 /*
1216 * Add the assertions to the task that we captured before
1217 */
1218 if (task_imp != IIT_NULL) {
1219 ipc_importance_task_hold_internal_assertion(task_imp, count: assertcnt);
1220 ipc_importance_task_release(task_imp);
1221 }
1222#endif /* IMPORTANCE_INHERITANCE */
1223
1224 return kr;
1225}
1226
1227/*
1228 * Routine: ipc_object_copyout_dest
1229 * Purpose:
1230 * Translates/consumes the destination right of a message.
1231 * This is unlike normal copyout because the right is consumed
1232 * in a funny way instead of being given to the receiving space.
1233 * The receiver gets his name for the port, if he has receive
1234 * rights, otherwise MACH_PORT_NULL.
1235 * Conditions:
1236 * The object is locked and active. Nothing else locked.
1237 * The object is unlocked and loses a reference.
1238 */
1239
1240void
1241ipc_object_copyout_dest(
1242 ipc_space_t space,
1243 ipc_object_t object,
1244 mach_msg_type_name_t msgt_name,
1245 mach_port_name_t *namep)
1246{
1247 mach_port_name_t name;
1248
1249 assert(IO_VALID(object));
1250 assert(io_active(object));
1251
1252 /*
1253 * If the space is the receiver/owner of the object,
1254 * then we quietly consume the right and return
1255 * the space's name for the object. Otherwise
1256 * we destroy the right and return MACH_PORT_NULL.
1257 */
1258
1259 switch (msgt_name) {
1260 case MACH_MSG_TYPE_PORT_SEND: {
1261 ipc_port_t port = ip_object_to_port(object);
1262 ipc_notify_nsenders_t nsrequest = { };
1263
1264 if (ip_in_space(port, space)) {
1265 name = ip_get_receiver_name(port);
1266 } else {
1267 name = MACH_PORT_NULL;
1268 }
1269 ip_srights_dec(port);
1270 if (port->ip_srights == 0) {
1271 nsrequest = ipc_notify_no_senders_prepare(port);
1272 }
1273 ipc_port_clear_sync_rcv_thread_boost_locked(port);
1274 /* port unlocked */
1275
1276 ipc_notify_no_senders_emit(nsrequest);
1277
1278 ip_release(port);
1279 break;
1280 }
1281
1282 case MACH_MSG_TYPE_PORT_SEND_ONCE: {
1283 ipc_port_t port = ip_object_to_port(object);
1284
1285 assert(port->ip_sorights > 0);
1286
1287 if (ip_in_space(port, space)) {
1288 /* quietly consume the send-once right */
1289 ip_sorights_dec(port);
1290 name = ip_get_receiver_name(port);
1291 ipc_port_clear_sync_rcv_thread_boost_locked(port);
1292 /* port unlocked */
1293 ip_release(port);
1294 } else {
1295 /*
1296 * A very bizarre case. The message
1297 * was received, but before this copyout
1298 * happened the space lost receive rights.
1299 * We can't quietly consume the soright
1300 * out from underneath some other task,
1301 * so generate a send-once notification.
1302 */
1303
1304 ipc_notify_send_once_and_unlock(port);
1305 name = MACH_PORT_NULL;
1306 }
1307
1308 break;
1309 }
1310
1311 default:
1312 panic("ipc_object_copyout_dest: strange rights");
1313 name = MACH_PORT_DEAD;
1314 }
1315
1316 *namep = name;
1317}
1318
1319static_assert(offsetof(struct ipc_object_waitq, iowq_waitq) ==
1320 offsetof(struct ipc_port, ip_waitq));
1321static_assert(offsetof(struct ipc_object_waitq, iowq_waitq) ==
1322 offsetof(struct ipc_pset, ips_wqset));
1323
1324/*
1325 * Routine: ipc_object_lock
1326 * Purpose:
1327 * Validate, then acquire a lock on an ipc object
1328 */
1329void
1330ipc_object_lock(ipc_object_t io, ipc_object_type_t type)
1331{
1332 ipc_object_validate(object: io, type);
1333 waitq_lock(io_waitq(io));
1334}
1335
1336void
1337ipc_object_lock_check_aligned(ipc_object_t io, ipc_object_type_t type)
1338{
1339 ipc_object_validate_aligned(object: io, type);
1340 waitq_lock(io_waitq(io));
1341}
1342
1343__abortlike
1344static void
1345ipc_object_validate_preflight_panic(ipc_object_t io)
1346{
1347 panic("ipc object %p is neither a port or a port-set", io);
1348}
1349
1350/*
1351 * Routine: ipc_object_lock_allow_invalid
1352 * Purpose:
1353 * Speculatively try to lock an object in an undefined state.
1354 *
1355 * This relies on the fact that IPC object memory is allocated
1356 * from sequestered zones, so at a given address, one can find:
1357 * 1. a valid object,
1358 * 2. a freed or invalid (uninitialized) object,
1359 * 3. unmapped memory.
1360 *
1361 * (2) is possible because the zone is made with ZC_ZFREE_CLEARMEM which
1362 * ensures freed elements are always zeroed.
1363 *
1364 * (3) is a direct courtesy of waitq_lock_allow_invalid().
1365 *
1366 * In order to disambiguate (1) from (2), we use the "waitq valid"
1367 * bit which is part of the lock. When that bit is absent,
1368 * waitq_lock() will function as expected, but
1369 * waitq_lock_allow_invalid() will not.
1370 *
1371 * Objects are then initialized and destroyed carefully so that
1372 * this "valid bit" is only set when the object invariants are
1373 * respected.
1374 *
1375 * Returns:
1376 * true: the lock was acquired
1377 * false: the object was freed or not initialized.
1378 */
1379bool
1380ipc_object_lock_allow_invalid(ipc_object_t orig_io)
1381{
1382 struct waitq *orig_wq = io_waitq(orig_io);
1383 struct waitq *wq = pgz_decode_allow_invalid(orig_wq, ZONE_ID_ANY);
1384
1385 switch (zone_id_for_element(addr: wq, esize: sizeof(*wq))) {
1386 case ZONE_ID_IPC_PORT:
1387 case ZONE_ID_IPC_PORT_SET:
1388 break;
1389 default:
1390#if CONFIG_PROB_GZALLOC
1391 if (orig_wq != wq) {
1392 /*
1393 * The element was PGZ protected, and the translation
1394 * returned another type than port or port-set, or
1395 * ZONE_ID_INVALID (wq is NULL).
1396 *
1397 * We have to allow this skew, and assumed the slot
1398 * has held a now freed port/port-set.
1399 */
1400 return false;
1401 }
1402#endif /* CONFIG_PROB_GZALLOC */
1403 ipc_object_validate_preflight_panic(io: orig_io);
1404 }
1405
1406 if (__probable(waitq_lock_allow_invalid(wq))) {
1407 ipc_object_t io = io_from_waitq(wq);
1408
1409 ipc_object_validate(object: io, io_otype(io));
1410#if CONFIG_PROB_GZALLOC
1411 if (__improbable(wq != orig_wq &&
1412 wq != pgz_decode_allow_invalid(orig_wq, ZONE_ID_ANY))) {
1413 /*
1414 * This object is no longer held in the slot,
1415 * whatever this object is, it's not the droid
1416 * we're looking for. Pretend we failed the lock.
1417 */
1418 waitq_unlock(wq);
1419 return false;
1420 }
1421#endif /* CONFIG_PROB_GZALLOC */
1422 return true;
1423 }
1424 return false;
1425}
1426
1427/*
1428 * Routine: ipc_object_lock_try
1429 * Purpose:
1430 * Validate, then try to acquire a lock on an object,
1431 * fail if there is an existing busy lock
1432 */
1433bool
1434ipc_object_lock_try(ipc_object_t io, ipc_object_type_t type)
1435{
1436 ipc_object_validate(object: io, type);
1437 return waitq_lock_try(io_waitq(io));
1438}
1439
1440/*
1441 * Routine: ipc_object_unlock
1442 * Purpose:
1443 * Unlocks the given object.
1444 */
1445void
1446ipc_object_unlock(ipc_object_t io)
1447{
1448 waitq_unlock(io_waitq(io));
1449}
1450