1/*
2 * Copyright (c) 2000-2020 Apple Inc. All rights reserved.
3 *
4 * @APPLE_OSREFERENCE_LICENSE_HEADER_START@
5 *
6 * This file contains Original Code and/or Modifications of Original Code
7 * as defined in and that are subject to the Apple Public Source License
8 * Version 2.0 (the 'License'). You may not use this file except in
9 * compliance with the License. The rights granted to you under the License
10 * may not be used to create, or enable the creation or redistribution of,
11 * unlawful or unlicensed copies of an Apple operating system, or to
12 * circumvent, violate, or enable the circumvention or violation of, any
13 * terms of an Apple operating system software license agreement.
14 *
15 * Please obtain a copy of the License at
16 * http://www.opensource.apple.com/apsl/ and read it before using this file.
17 *
18 * The Original Code and all software distributed under the License are
19 * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER
20 * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES,
21 * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY,
22 * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT.
23 * Please see the License for the specific language governing rights and
24 * limitations under the License.
25 *
26 * @APPLE_OSREFERENCE_LICENSE_HEADER_END@
27 */
28/*
29 * @OSF_COPYRIGHT@
30 */
31/*
32 * Mach Operating System
33 * Copyright (c) 1991,1990,1989 Carnegie Mellon University
34 * All Rights Reserved.
35 *
36 * Permission to use, copy, modify and distribute this software and its
37 * documentation is hereby granted, provided that both the copyright
38 * notice and this permission notice appear in all copies of the
39 * software, derivative works or modified versions, and any portions
40 * thereof, and that both notices appear in supporting documentation.
41 *
42 * CARNEGIE MELLON ALLOWS FREE USE OF THIS SOFTWARE IN ITS "AS IS"
43 * CONDITION. CARNEGIE MELLON DISCLAIMS ANY LIABILITY OF ANY KIND FOR
44 * ANY DAMAGES WHATSOEVER RESULTING FROM THE USE OF THIS SOFTWARE.
45 *
46 * Carnegie Mellon requests users of this software to return to
47 *
48 * Software Distribution Coordinator or Software.Distribution@CS.CMU.EDU
49 * School of Computer Science
50 * Carnegie Mellon University
51 * Pittsburgh PA 15213-3890
52 *
53 * any improvements or extensions that they make and grant Carnegie Mellon
54 * the rights to redistribute these changes.
55 */
56/*
57 * NOTICE: This file was modified by McAfee Research in 2004 to introduce
58 * support for mandatory and extensible security protections. This notice
59 * is included in support of clause 2.2 (b) of the Apple Public License,
60 * Version 2.0.
61 * Copyright (c) 2005 SPARTA, Inc.
62 */
63/*
64 */
65/*
66 * File: kern/ipc_kobject.c
67 * Author: Rich Draves
68 * Date: 1989
69 *
70 * Functions for letting a port represent a kernel object.
71 */
72
73#include <mach/mig.h>
74#include <mach/port.h>
75#include <mach/kern_return.h>
76#include <mach/message.h>
77#include <mach/mig_errors.h>
78#include <mach/mach_notify.h>
79#include <mach/ndr.h>
80#include <mach/vm_param.h>
81
82#include <mach/mach_vm_server.h>
83#include <mach/mach_port_server.h>
84#include <mach/mach_host_server.h>
85#include <mach/host_priv_server.h>
86#include <mach/clock_server.h>
87#include <mach/memory_entry_server.h>
88#include <mach/processor_server.h>
89#include <mach/processor_set_server.h>
90#include <mach/task_server.h>
91#include <mach/mach_voucher_server.h>
92#ifdef VM32_SUPPORT
93#include <mach/vm32_map_server.h>
94#endif
95#include <mach/thread_act_server.h>
96#include <mach/restartable_server.h>
97
98#include <mach/exc_server.h>
99#include <mach/mach_exc_server.h>
100#include <mach/mach_eventlink_server.h>
101
102#include <device/device_types.h>
103#include <device/device_server.h>
104
105#if CONFIG_USER_NOTIFICATION
106#include <UserNotification/UNDReplyServer.h>
107#endif
108
109#if CONFIG_ARCADE
110#include <mach/arcade_register_server.h>
111#endif
112
113#if CONFIG_AUDIT
114#include <kern/audit_sessionport.h>
115#endif
116
117#include <kern/counter.h>
118#include <kern/ipc_tt.h>
119#include <kern/ipc_mig.h>
120#include <kern/ipc_misc.h>
121#include <kern/ipc_kobject.h>
122#include <kern/host_notify.h>
123#include <kern/misc_protos.h>
124
125#if CONFIG_ARCADE
126#include <kern/arcade.h>
127#endif /* CONFIG_ARCADE */
128
129#include <ipc/ipc_kmsg.h>
130#include <ipc/ipc_port.h>
131#include <ipc/ipc_voucher.h>
132#include <kern/sync_sema.h>
133#include <kern/work_interval.h>
134#include <kern/task_ident.h>
135
136#if HYPERVISOR
137#include <kern/hv_support.h>
138#endif
139
140#if CONFIG_CSR
141#include <sys/csr.h>
142#endif
143
144#include <vm/vm_protos.h>
145
146#include <security/mac_mach_internal.h>
147
148extern char *proc_name_address(void *p);
149struct proc;
150extern int proc_pid(struct proc *p);
151
152typedef struct {
153 mach_msg_id_t num;
154 int kobjidx;
155 mig_kern_routine_t kroutine; /* Kernel server routine */
156 unsigned int kreply_size; /* Size of kernel reply msg */
157 unsigned int kreply_desc_cnt; /* Number of descs in kernel reply msg */
158} mig_hash_t;
159
160static void ipc_kobject_subst_once_no_senders(ipc_port_t, mach_msg_type_number_t);
161
162IPC_KOBJECT_DEFINE(IKOT_MEMORY_OBJECT); /* vestigial, no real instance */
163IPC_KOBJECT_DEFINE(IKOT_PORT_SUBST_ONCE,
164 .iko_op_no_senders = ipc_kobject_subst_once_no_senders);
165
166#define MAX_MIG_ENTRIES 1031
167#define MIG_HASH(x) (x)
168
169#define KOBJ_IDX_NOT_SET (-1)
170
171static SECURITY_READ_ONLY_LATE(mig_hash_t) mig_buckets[MAX_MIG_ENTRIES];
172static SECURITY_READ_ONLY_LATE(int) mig_table_max_displ;
173SECURITY_READ_ONLY_LATE(int) mach_kobj_count; /* count of total number of kobjects */
174
175ZONE_DEFINE_TYPE(ipc_kobject_label_zone, "ipc kobject labels",
176 struct ipc_kobject_label, ZC_ZFREE_CLEARMEM);
177
178__startup_const
179static struct mig_kern_subsystem *mig_e[] = {
180 (const struct mig_kern_subsystem *)&mach_vm_subsystem,
181 (const struct mig_kern_subsystem *)&mach_port_subsystem,
182 (const struct mig_kern_subsystem *)&mach_host_subsystem,
183 (const struct mig_kern_subsystem *)&host_priv_subsystem,
184 (const struct mig_kern_subsystem *)&clock_subsystem,
185 (const struct mig_kern_subsystem *)&processor_subsystem,
186 (const struct mig_kern_subsystem *)&processor_set_subsystem,
187 (const struct mig_kern_subsystem *)&is_iokit_subsystem,
188 (const struct mig_kern_subsystem *)&task_subsystem,
189 (const struct mig_kern_subsystem *)&thread_act_subsystem,
190#ifdef VM32_SUPPORT
191 (const struct mig_kern_subsystem *)&vm32_map_subsystem,
192#endif
193#if CONFIG_USER_NOTIFICATION
194 (const struct mig_kern_subsystem *)&UNDReply_subsystem,
195#endif
196 (const struct mig_kern_subsystem *)&mach_voucher_subsystem,
197 (const struct mig_kern_subsystem *)&memory_entry_subsystem,
198 (const struct mig_kern_subsystem *)&task_restartable_subsystem,
199 (const struct mig_kern_subsystem *)&catch_exc_subsystem,
200 (const struct mig_kern_subsystem *)&catch_mach_exc_subsystem,
201#if CONFIG_ARCADE
202 (const struct mig_kern_subsystem *)&arcade_register_subsystem,
203#endif
204 (const struct mig_kern_subsystem *)&mach_eventlink_subsystem,
205};
206
207static struct ipc_kobject_ops __security_const_late
208 ipc_kobject_ops_array[IKOT_MAX_TYPE];
209
210__startup_func
211void
212ipc_kobject_register_startup(ipc_kobject_ops_t ops)
213{
214 if (ipc_kobject_ops_array[ops->iko_op_type].iko_op_type) {
215 panic("trying to register kobject(%d) twice", ops->iko_op_type);
216 }
217 ipc_kobject_ops_array[ops->iko_op_type] = *ops;
218}
219
220static ipc_kobject_ops_t
221ipc_kobject_ops_get(ipc_kobject_type_t ikot)
222{
223 if (ikot < IKOT_NONE || ikot >= IKOT_MAX_TYPE) {
224 panic("invalid kobject type %d", ikot);
225 }
226 return &ipc_kobject_ops_array[ikot];
227}
228
229__startup_func
230static void
231mig_init(void)
232{
233 unsigned int i, n = sizeof(mig_e) / sizeof(const struct mig_kern_subsystem *);
234 int howmany;
235 mach_msg_id_t j, pos, nentry, range;
236
237 for (i = 0; i < n; i++) {
238 range = mig_e[i]->end - mig_e[i]->start;
239 if (!mig_e[i]->start || range < 0) {
240 panic("the msgh_ids in mig_e[] aren't valid!");
241 }
242
243 if (mig_e[i]->maxsize > KALLOC_SAFE_ALLOC_SIZE - MAX_TRAILER_SIZE) {
244 panic("mig subsystem %d (%p) replies are too large (%d > %d)",
245 mig_e[i]->start, mig_e[i], mig_e[i]->maxsize,
246 KALLOC_SAFE_ALLOC_SIZE - MAX_TRAILER_SIZE);
247 }
248
249 for (j = 0; j < range; j++) {
250 if (mig_e[i]->kroutine[j].kstub_routine) {
251 /* Only put real entries in the table */
252 nentry = j + mig_e[i]->start;
253 for (pos = MIG_HASH(nentry) % MAX_MIG_ENTRIES, howmany = 1;
254 mig_buckets[pos].num;
255 pos++, pos = pos % MAX_MIG_ENTRIES, howmany++) {
256 if (mig_buckets[pos].num == nentry) {
257 printf(format: "message id = %d\n", nentry);
258 panic("multiple entries with the same msgh_id");
259 }
260 if (howmany == MAX_MIG_ENTRIES) {
261 panic("the mig dispatch table is too small");
262 }
263 }
264
265 mig_buckets[pos].num = nentry;
266 mig_buckets[pos].kroutine = mig_e[i]->kroutine[j].kstub_routine;
267 if (mig_e[i]->kroutine[j].max_reply_msg) {
268 mig_buckets[pos].kreply_size = mig_e[i]->kroutine[j].max_reply_msg;
269 mig_buckets[pos].kreply_desc_cnt = mig_e[i]->kroutine[j].reply_descr_count;
270 } else {
271 /*
272 * Allocating a larger-than-needed kmsg creates hole for
273 * inlined kmsgs (IKM_TYPE_ALL_INLINED) during copyout.
274 * Disallow that.
275 */
276 panic("kroutine must have precise size %d %d", mig_e[i]->start, j);
277 }
278
279 mig_buckets[pos].kobjidx = KOBJ_IDX_NOT_SET;
280
281 if (mig_table_max_displ < howmany) {
282 mig_table_max_displ = howmany;
283 }
284 mach_kobj_count++;
285 }
286 }
287 }
288
289 /* 77417305: pad to allow for MIG routines removals/cleanups */
290 mach_kobj_count += 32;
291
292 printf(format: "mig_table_max_displ = %d mach_kobj_count = %d\n",
293 mig_table_max_displ, mach_kobj_count);
294}
295STARTUP(MACH_IPC, STARTUP_RANK_FIRST, mig_init);
296
297/*
298 * Do a hash table lookup for given msgh_id. Return 0
299 * if not found.
300 */
301static mig_hash_t *
302find_mig_hash_entry(int msgh_id)
303{
304 unsigned int i = (unsigned int)MIG_HASH(msgh_id);
305 int max_iter = mig_table_max_displ;
306 mig_hash_t *ptr;
307
308 do {
309 ptr = &mig_buckets[i++ % MAX_MIG_ENTRIES];
310 } while (msgh_id != ptr->num && ptr->num && --max_iter);
311
312 if (!ptr->kroutine || msgh_id != ptr->num) {
313 ptr = (mig_hash_t *)0;
314 }
315
316 return ptr;
317}
318
319/*
320 * Routine: ipc_kobject_reply_status
321 *
322 * Returns the error/success status from a given kobject call reply message.
323 *
324 * Contract for KernelServer MIG routines is as follows:
325 *
326 * (1) If reply header has complex bit set, kernel server implementation routine
327 * must have implicitly returned KERN_SUCCESS.
328 *
329 * (2) Otherwise we can always read RetCode from after the header. This is not
330 * obvious to see, and is discussed below by case.
331 *
332 * MIG can return three types of replies from KernelServer routines.
333 *
334 * (A) Complex Reply (i.e. with Descriptors)
335 *
336 * E.g.: thread_get_exception_ports()
337 *
338 * If complex bit is set, we can deduce the call is successful since the bit
339 * is set at the very end.
340 * If complex bit is not set, we must have returned from MIG_RETURN_ERROR.
341 * MIG writes RetCode to immediately after the header, and we know this is
342 * safe to do for all kmsg layouts. (See discussion in ipc_kmsg_server_internal()).
343 *
344 * (B) Simple Reply with Out Params
345 *
346 * E.g.: thread_get_states()
347 *
348 * If the call failed, we return from MIG_RETURN_ERROR, which writes RetCode
349 * to immediately after the header.
350 * If the call succeeded, MIG writes RetCode as KERN_SUCCESS to USER DATA
351 * buffer. *BUT* since the region after header is always initialized with
352 * KERN_SUCCESS, reading from there gives us the same result. We rely on
353 * this behavior to not make a special case.
354 *
355 * (C) Simple Reply without Out Params
356 *
357 * E.g.: thread_set_states()
358 *
359 * For this type of MIG routines we always allocate a mig_reply_error_t
360 * as reply kmsg, which fits inline in kmsg. RetCode can be found after
361 * header, and can be KERN_SUCCESS or otherwise a failure code.
362 */
363static kern_return_t
364ipc_kobject_reply_status(ipc_kmsg_t reply)
365{
366 mach_msg_header_t *hdr = ikm_header(kmsg: reply);
367
368 if (hdr->msgh_bits & MACH_MSGH_BITS_COMPLEX) {
369 return KERN_SUCCESS;
370 }
371
372 return ((mig_reply_error_t *)hdr)->RetCode;
373}
374
375static void
376ipc_kobject_set_reply_error_status(
377 ipc_kmsg_t reply,
378 kern_return_t kr)
379{
380 mig_reply_error_t *error = (mig_reply_error_t *)ikm_header(kmsg: reply);
381
382 assert(!(error->Head.msgh_bits & MACH_MSGH_BITS_COMPLEX));
383 error->RetCode = kr;
384}
385
386/*
387 * Routine: ipc_kobject_set_kobjidx
388 * Purpose:
389 * Set the index for the kobject filter
390 * mask for a given message ID.
391 */
392kern_return_t
393ipc_kobject_set_kobjidx(
394 int msgh_id,
395 int index)
396{
397 mig_hash_t *ptr = find_mig_hash_entry(msgh_id);
398
399 if (ptr == (mig_hash_t *)0) {
400 return KERN_INVALID_ARGUMENT;
401 }
402
403 assert(index < mach_kobj_count);
404 ptr->kobjidx = index;
405
406 return KERN_SUCCESS;
407}
408
409static void
410ipc_kobject_init_reply(
411 ipc_kmsg_t reply,
412 const ipc_kmsg_t request,
413 kern_return_t kr)
414{
415 mach_msg_header_t *req_hdr = ikm_header(kmsg: request);
416 mach_msg_header_t *reply_hdr = ikm_header(kmsg: reply);
417
418#define InP ((mach_msg_header_t *) req_hdr)
419#define OutP ((mig_reply_error_t *) reply_hdr)
420
421 OutP->Head.msgh_size = sizeof(mig_reply_error_t);
422 OutP->Head.msgh_bits =
423 MACH_MSGH_BITS_SET(MACH_MSGH_BITS_LOCAL(InP->msgh_bits), 0, 0, 0);
424 OutP->Head.msgh_remote_port = InP->msgh_local_port;
425 OutP->Head.msgh_local_port = MACH_PORT_NULL;
426 OutP->Head.msgh_voucher_port = MACH_PORT_NULL;
427 OutP->Head.msgh_id = InP->msgh_id + 100;
428
429 OutP->NDR = NDR_record;
430 OutP->RetCode = kr;
431
432#undef InP
433#undef OutP
434}
435
436static void
437ipc_kobject_init_new_reply(
438 ipc_kmsg_t new_reply,
439 const ipc_kmsg_t old_reply,
440 kern_return_t kr)
441{
442 mach_msg_header_t *new_hdr = ikm_header(kmsg: new_reply);
443 mach_msg_header_t *old_hdr = ikm_header(kmsg: old_reply);
444
445#define InP ((mig_reply_error_t *) old_hdr)
446#define OutP ((mig_reply_error_t *) new_hdr)
447
448 OutP->Head.msgh_size = sizeof(mig_reply_error_t);
449 OutP->Head.msgh_bits = InP->Head.msgh_bits & ~MACH_MSGH_BITS_COMPLEX;
450 OutP->Head.msgh_remote_port = InP->Head.msgh_remote_port;
451 OutP->Head.msgh_local_port = MACH_PORT_NULL;
452 OutP->Head.msgh_voucher_port = MACH_PORT_NULL;
453 OutP->Head.msgh_id = InP->Head.msgh_id;
454
455 OutP->NDR = InP->NDR;
456 OutP->RetCode = kr;
457
458#undef InP
459#undef OutP
460}
461
462static ipc_kmsg_t
463ipc_kobject_alloc_mig_error(void)
464{
465 return ipc_kmsg_alloc(msg_size: sizeof(mig_reply_error_t),
466 aux_size: 0, desc_count: 0, flags: IPC_KMSG_ALLOC_KERNEL | IPC_KMSG_ALLOC_SAVED | IPC_KMSG_ALLOC_ZERO |
467 IPC_KMSG_ALLOC_NOFAIL);
468}
469
470/*
471 * Routine: ipc_kobject_server_internal
472 * Purpose:
473 * Handle a message sent to the kernel.
474 * Generates a reply message.
475 * Version for Untyped IPC.
476 * Conditions:
477 * Nothing locked.
478 */
479static kern_return_t
480ipc_kobject_server_internal(
481 __unused ipc_port_t port,
482 ipc_kmsg_t request,
483 ipc_kmsg_t *replyp)
484{
485 int request_msgh_id;
486 ipc_kmsg_t reply = IKM_NULL;
487 mach_msg_size_t reply_size, reply_desc_cnt;
488 mig_hash_t *ptr;
489 mach_msg_header_t *req_hdr, *reply_hdr;
490 void *req_data, *reply_data;
491 mach_msg_max_trailer_t *req_trailer;
492
493 thread_ro_t tro = current_thread_ro();
494 task_t curtask = tro->tro_task;
495 struct proc *curproc = tro->tro_proc;
496
497 req_hdr = ikm_header(kmsg: request);
498 req_data = ikm_udata_from_header(kmsg: request);
499 req_trailer = ipc_kmsg_get_trailer(kmsg: request, FALSE);
500 request_msgh_id = req_hdr->msgh_id;
501
502 /* Find corresponding mig_hash entry, if any */
503 ptr = find_mig_hash_entry(msgh_id: request_msgh_id);
504
505 /* Get the reply_size. */
506 if (ptr == (mig_hash_t *)0) {
507 reply_size = sizeof(mig_reply_error_t);
508 reply_desc_cnt = 0;
509 } else {
510 reply_size = ptr->kreply_size;
511 reply_desc_cnt = ptr->kreply_desc_cnt;
512 }
513
514 assert(reply_size >= sizeof(mig_reply_error_t));
515
516 /*
517 * MIG should really assure no data leakage -
518 * but until it does, pessimistically zero the
519 * whole reply buffer.
520 */
521 reply = ipc_kmsg_alloc(msg_size: reply_size, aux_size: 0, desc_count: reply_desc_cnt, flags: IPC_KMSG_ALLOC_KERNEL |
522 IPC_KMSG_ALLOC_ZERO | IPC_KMSG_ALLOC_NOFAIL);
523 /* reply can be non-linear */
524
525 if (ptr == (mig_hash_t *)0) {
526#if DEVELOPMENT || DEBUG
527 printf("ipc_kobject_server: bogus kernel message, id=%d\n",
528 req_hdr->msgh_id);
529#endif /* DEVELOPMENT || DEBUG */
530 _MIG_MSGID_INVALID(req_hdr->msgh_id);
531
532 ipc_kobject_init_reply(reply, request, MIG_BAD_ID);
533
534 *replyp = reply;
535 return KERN_SUCCESS;
536 }
537
538 /*
539 * We found the routine to call. Call it to perform the kernel function.
540 */
541 assert(ptr != (mig_hash_t *)0);
542
543 reply_hdr = ikm_header(kmsg: reply);
544 /* reply is allocated by kernel. non-zero desc count means complex msg */
545 reply_data = ikm_udata(kmsg: reply, desc_count: reply_desc_cnt, complex: (reply_desc_cnt > 0));
546
547 /*
548 * Reply can be of layout IKM_TYPE_ALL_INLINED, IKM_TYPE_UDATA_OOL,
549 * or IKM_TYPE_ALL_OOL, each of which guarantees kernel/user data segregation.
550 *
551 * Here is the trick: In each case, there _must_ be enough space in
552 * the kdata (header) buffer in `reply` to hold a mig_reply_error_t.
553 */
554 assert(reply->ikm_type != IKM_TYPE_KDATA_OOL);
555 assert((vm_offset_t)reply_hdr + sizeof(mig_reply_error_t) <= ikm_kdata_end(reply));
556
557 /*
558 * Discussion by case:
559 *
560 * (1) IKM_TYPE_ALL_INLINED
561 * - IKM_SAVED_MSG_SIZE is large enough for mig_reply_error_t
562 * (2) IKM_TYPE_UDATA_OOL
563 * - Same as (1).
564 * (3) IKM_TYPE_ALL_OOL
565 * - This layout is only possible if kdata (header + descs) doesn't fit
566 * in IKM_SAVED_MSG_SIZE. So we must have at least one descriptor
567 * following the header, which is enough to fit mig_reply_error_t.
568 */
569 static_assert(sizeof(mig_reply_error_t) < IKM_SAVED_MSG_SIZE);
570 static_assert(sizeof(mig_reply_error_t) < sizeof(mach_msg_base_t) +
571 1 * sizeof(mach_msg_descriptor_t));
572
573 /*
574 * Therefore, we can temporarily treat `reply` as a *simple* message that
575 * contains NDR Record + RetCode immediately after the header (which overlaps
576 * with descriptors, if the reply msg is supposed to be complex).
577 *
578 * In doing so we save having a separate allocation specifically for errors.
579 */
580 ipc_kobject_init_reply(reply, request, KERN_SUCCESS);
581
582 /* Check if the kobject call should be filtered */
583#if CONFIG_MACF
584 int idx = ptr->kobjidx;
585 uint8_t *filter_mask = task_get_mach_kobj_filter_mask(task: curtask);
586
587 /* Check kobject mig filter mask, if exists. */
588 if (filter_mask != NULL &&
589 idx != KOBJ_IDX_NOT_SET &&
590 !bitstr_test(filter_mask, idx) &&
591 mac_task_kobj_msg_evaluate != NULL) {
592 /* Not in filter mask, evaluate policy. */
593 kern_return_t kr = mac_task_kobj_msg_evaluate(curproc,
594 request_msgh_id, idx);
595 if (kr != KERN_SUCCESS) {
596 ipc_kobject_set_reply_error_status(reply, kr);
597 goto skip_kobjcall;
598 }
599 }
600#endif /* CONFIG_MACF */
601
602 __BeforeKobjectServerTrace(idx);
603 /* See contract in header doc for ipc_kobject_reply_status() */
604 (*ptr->kroutine)(req_hdr, req_data, req_trailer, reply_hdr, reply_data);
605 __AfterKobjectServerTrace(idx);
606
607#if CONFIG_MACF
608skip_kobjcall:
609#endif
610 counter_inc(&kernel_task->messages_received);
611
612 kern_return_t reply_status = ipc_kobject_reply_status(reply);
613
614 if (reply_status == MIG_NO_REPLY) {
615 /*
616 * The server function will send a reply message
617 * using the reply port right, which it has saved.
618 */
619 ipc_kmsg_free(kmsg: reply);
620 reply = IKM_NULL;
621 } else if (reply_status != KERN_SUCCESS && reply_size > sizeof(mig_reply_error_t)) {
622 assert(ikm_header(reply)->msgh_size == sizeof(mig_reply_error_t));
623 /*
624 * MIG returned an error, and the original kmsg we allocated for reply
625 * is oversized. Deallocate it and allocate a smaller, proper kmsg
626 * that fits mig_reply_error_t snuggly.
627 *
628 * We must do so because we used the trick mentioned above which (depending
629 * on the kmsg layout) may cause payload in mig_reply_error_t to overlap
630 * with kdata buffer meant for descriptors.
631 *
632 * This will mess with ikm_kdata_size() calculation down the line so
633 * reallocate a new buffer immediately here.
634 */
635 ipc_kmsg_t new_reply = ipc_kobject_alloc_mig_error();
636 ipc_kobject_init_new_reply(new_reply, old_reply: reply, kr: reply_status);
637
638 /* MIG contract: If status is not KERN_SUCCESS, reply must be simple. */
639 assert(!(ikm_header(reply)->msgh_bits & MACH_MSGH_BITS_COMPLEX));
640 assert(ikm_header(reply)->msgh_local_port == MACH_PORT_NULL);
641 assert(ikm_header(reply)->msgh_voucher_port == MACH_PORT_NULL);
642 /* So we can simply free the original reply message. */
643 ipc_kmsg_free(kmsg: reply);
644 reply = new_reply;
645 }
646
647 *replyp = reply;
648 return KERN_SUCCESS;
649}
650
651
652/*
653 * Routine: ipc_kobject_server
654 * Purpose:
655 * Handle a message sent to the kernel.
656 * Generates a reply message.
657 * Version for Untyped IPC.
658 *
659 * Ownership of the incoming rights (from the request)
660 * are transferred on success (wether a reply is made or not).
661 *
662 * Conditions:
663 * Nothing locked.
664 */
665ipc_kmsg_t
666ipc_kobject_server(
667 ipc_port_t port,
668 ipc_kmsg_t request,
669 mach_msg_option_t option __unused)
670{
671 mach_msg_header_t *req_hdr = ikm_header(kmsg: request);
672#if DEVELOPMENT || DEBUG
673 const int request_msgh_id = req_hdr->msgh_id;
674#endif
675 ipc_port_t request_voucher_port;
676 ipc_kmsg_t reply = IKM_NULL;
677 mach_msg_header_t *reply_hdr;
678 kern_return_t kr;
679
680 ipc_kmsg_trace_send(request, option);
681
682 if (ip_kotype(port) == IKOT_UEXT_OBJECT) {
683 kr = uext_server(receiver: port, request, reply: &reply);
684 } else {
685 kr = ipc_kobject_server_internal(port, request, replyp: &reply);
686 assert(kr == KERN_SUCCESS);
687 }
688
689 if (kr != KERN_SUCCESS) {
690 assert(kr != MACH_SEND_TIMED_OUT &&
691 kr != MACH_SEND_INTERRUPTED &&
692 kr != MACH_SEND_INVALID_DEST);
693 assert(reply == IKM_NULL);
694
695 /* convert the server error into a MIG error */
696 reply = ipc_kobject_alloc_mig_error();
697 ipc_kobject_init_reply(reply, request, kr);
698 }
699
700 counter_inc(&kernel_task->messages_sent);
701 /*
702 * Destroy destination. The following code differs from
703 * ipc_object_destroy in that we release the send-once
704 * right instead of generating a send-once notification
705 * (which would bring us here again, creating a loop).
706 * It also differs in that we only expect send or
707 * send-once rights, never receive rights.
708 */
709 switch (MACH_MSGH_BITS_REMOTE(req_hdr->msgh_bits)) {
710 case MACH_MSG_TYPE_PORT_SEND:
711 ipc_port_release_send(port: req_hdr->msgh_remote_port);
712 break;
713
714 case MACH_MSG_TYPE_PORT_SEND_ONCE:
715 ipc_port_release_sonce(port: req_hdr->msgh_remote_port);
716 break;
717
718 default:
719 panic("ipc_kobject_server: strange destination rights");
720 }
721
722 /*
723 * Destroy voucher. The kernel MIG servers never take ownership
724 * of vouchers sent in messages. Swallow any such rights here.
725 */
726 request_voucher_port = ipc_kmsg_get_voucher_port(kmsg: request);
727 if (IP_VALID(request_voucher_port)) {
728 assert(MACH_MSG_TYPE_PORT_SEND ==
729 MACH_MSGH_BITS_VOUCHER(req_hdr->msgh_bits));
730 ipc_port_release_send(port: request_voucher_port);
731 ipc_kmsg_clear_voucher_port(kmsg: request);
732 }
733
734 if (reply == IKM_NULL ||
735 ipc_kobject_reply_status(reply) == KERN_SUCCESS) {
736 /*
737 * The server function is responsible for the contents
738 * of the message. The reply port right is moved
739 * to the reply message, and we have deallocated
740 * the destination port right, so we just need
741 * to free the kmsg.
742 */
743 ipc_kmsg_free(kmsg: request);
744 } else {
745 /*
746 * The message contents of the request are intact.
747 * Remote port has been released above. Do not destroy
748 * the reply port right either, which is needed in the reply message.
749 */
750 ipc_kmsg_destroy(kmsg: request, flags: IPC_KMSG_DESTROY_SKIP_LOCAL | IPC_KMSG_DESTROY_SKIP_REMOTE);
751 }
752
753 if (reply != IKM_NULL) {
754 reply_hdr = ikm_header(kmsg: reply);
755 ipc_port_t reply_port = reply_hdr->msgh_remote_port;
756
757 if (!IP_VALID(reply_port)) {
758 /*
759 * Can't queue the reply message if the destination
760 * (the reply port) isn't valid.
761 */
762 ipc_kmsg_destroy(kmsg: reply, flags: IPC_KMSG_DESTROY_NOT_SIGNED);
763 reply = IKM_NULL;
764 } else if (ip_in_space_noauth(port: reply_port, space: ipc_space_kernel)) {
765 /* do not lock reply port, use raw pointer comparison */
766
767 /*
768 * Don't send replies to kobject kernel ports.
769 */
770#if DEVELOPMENT || DEBUG
771 printf("%s: refusing to send reply to kobject %d port (id:%d)\n",
772 __func__, ip_kotype(reply_port), request_msgh_id);
773#endif /* DEVELOPMENT || DEBUG */
774 ipc_kmsg_destroy(kmsg: reply, flags: IPC_KMSG_DESTROY_NOT_SIGNED);
775 reply = IKM_NULL;
776 }
777 }
778
779 return reply;
780}
781
782static __header_always_inline void
783ipc_kobject_set_raw(
784 ipc_port_t port,
785 ipc_kobject_t kobject,
786 ipc_kobject_type_t type)
787{
788 uintptr_t *store = &port->ip_kobject;
789
790#if __has_feature(ptrauth_calls)
791 type |= port->ip_immovable_receive << 14;
792 type |= port->ip_immovable_send << 15;
793 type ^= OS_PTRAUTH_DISCRIMINATOR("ipc_port.ip_kobject");
794 kobject = ptrauth_sign_unauthenticated(kobject,
795 ptrauth_key_process_independent_data,
796 ptrauth_blend_discriminator(store, type));
797#else
798 (void)type;
799#endif // __has_feature(ptrauth_calls)
800
801 *store = (uintptr_t)kobject;
802}
803
804static inline void
805ipc_kobject_set_internal(
806 ipc_port_t port,
807 ipc_kobject_t kobject,
808 ipc_kobject_type_t type)
809{
810 assert(type != IKOT_NONE);
811 io_bits_or(ip_to_object(port), bits: type);
812 ipc_kobject_set_raw(port, kobject, type);
813}
814
815/*
816 * Routine: ipc_kobject_get_raw
817 * Purpose:
818 * Returns the kobject pointer of a specified port.
819 *
820 * This returns the current value of the kobject pointer,
821 * without any validation (the caller is expected to do
822 * the validation it needs).
823 *
824 * Conditions:
825 * The port is a kobject of the proper type.
826 */
827__header_always_inline ipc_kobject_t
828ipc_kobject_get_raw(
829 ipc_port_t port,
830 ipc_kobject_type_t type)
831{
832 uintptr_t *store = &port->ip_kobject;
833 ipc_kobject_t kobject = (ipc_kobject_t)*store;
834
835#if __has_feature(ptrauth_calls)
836 type |= port->ip_immovable_receive << 14;
837 type |= port->ip_immovable_send << 15;
838 type ^= OS_PTRAUTH_DISCRIMINATOR("ipc_port.ip_kobject");
839 kobject = ptrauth_auth_data(kobject,
840 ptrauth_key_process_independent_data,
841 ptrauth_blend_discriminator(store, type));
842#else
843 (void)type;
844#endif // __has_feature(ptrauth_calls)
845
846 return kobject;
847}
848
849__abortlike
850static void
851ipc_kobject_require_panic(
852 ipc_port_t port,
853 ipc_kobject_t kobject,
854 ipc_kobject_type_t kotype)
855{
856 if (ip_kotype(port) != kotype) {
857 panic("port %p: invalid kobject type, got %d wanted %d",
858 port, ip_kotype(port), kotype);
859 }
860 panic("port %p: invalid kobject, got %p wanted %p",
861 port, ipc_kobject_get_raw(port, kotype), kobject);
862}
863
864__header_always_inline void
865ipc_kobject_require(
866 ipc_port_t port,
867 ipc_kobject_t kobject,
868 ipc_kobject_type_t kotype)
869{
870 ipc_kobject_t cur;
871
872 if (__improbable(ip_kotype(port) != kotype)) {
873 ipc_kobject_require_panic(port, kobject, kotype);
874 }
875 cur = ipc_kobject_get_raw(port, type: kotype);
876 if (cur && cur != kobject) {
877 ipc_kobject_require_panic(port, kobject, kotype);
878 }
879}
880
881/*
882 * Routine: ipc_kobject_get_locked
883 * Purpose:
884 * Returns the kobject pointer of a specified port,
885 * for an expected type.
886 *
887 * Returns IKO_NULL if the port isn't active.
888 *
889 * This function may be used when:
890 * - the port lock is held
891 * - the kobject association stays while there
892 * are any outstanding rights.
893 *
894 * Conditions:
895 * The port is a kobject of the proper type.
896 */
897ipc_kobject_t
898ipc_kobject_get_locked(
899 ipc_port_t port,
900 ipc_kobject_type_t type)
901{
902 ipc_kobject_t kobject = IKO_NULL;
903
904 if (ip_active(port) && type == ip_kotype(port)) {
905 kobject = ipc_kobject_get_raw(port, type);
906 }
907
908 return kobject;
909}
910
911/*
912 * Routine: ipc_kobject_get_stable
913 * Purpose:
914 * Returns the kobject pointer of a specified port,
915 * for an expected type, for types where the port/kobject
916 * association is permanent.
917 *
918 * Returns IKO_NULL if the port isn't active.
919 *
920 * Conditions:
921 * The port is a kobject of the proper type.
922 */
923ipc_kobject_t
924ipc_kobject_get_stable(
925 ipc_port_t port,
926 ipc_kobject_type_t type)
927{
928 assert(ipc_kobject_ops_get(type)->iko_op_stable);
929 return ipc_kobject_get_locked(port, type);
930}
931
932/*
933 * Routine: ipc_kobject_init_port
934 * Purpose:
935 * Initialize a kobject port with the given types and options.
936 *
937 * This function never fails.
938 */
939static inline void
940ipc_kobject_init_port(
941 ipc_port_t port,
942 ipc_kobject_t kobject,
943 ipc_kobject_type_t type,
944 ipc_kobject_alloc_options_t options)
945{
946 if (options & IPC_KOBJECT_ALLOC_MAKE_SEND) {
947 ipc_port_make_send_any_locked(port);
948 }
949 if (options & IPC_KOBJECT_ALLOC_NSREQUEST) {
950 port->ip_nsrequest = IP_KOBJECT_NSREQUEST_ARMED;
951 ip_reference(port);
952 }
953 if (options & IPC_KOBJECT_ALLOC_NO_GRANT) {
954 port->ip_no_grant = 1;
955 }
956 if (options & IPC_KOBJECT_ALLOC_IMMOVABLE_SEND) {
957 port->ip_immovable_send = 1;
958 }
959 if (options & IPC_KOBJECT_ALLOC_PINNED) {
960 port->ip_pinned = 1;
961 }
962
963 ipc_kobject_set_internal(port, kobject, type);
964}
965
966/*
967 * Routine: ipc_kobject_alloc_port
968 * Purpose:
969 * Allocate a kobject port in the kernel space of the specified type.
970 *
971 * This function never fails.
972 *
973 * Conditions:
974 * No locks held (memory is allocated)
975 */
976ipc_port_t
977ipc_kobject_alloc_port(
978 ipc_kobject_t kobject,
979 ipc_kobject_type_t type,
980 ipc_kobject_alloc_options_t options)
981{
982 ipc_port_t port;
983 port = ipc_port_alloc_special(space: ipc_space_kernel, flags: IPC_PORT_ENFORCE_RIGID_REPLY_PORT_SEMANTICS);
984
985 if (port == IP_NULL) {
986 panic("ipc_kobject_alloc_port(): failed to allocate port");
987 }
988
989 ipc_kobject_init_port(port, kobject, type, options);
990 return port;
991}
992
993/*
994 * Routine: ipc_kobject_alloc_labeled_port
995 * Purpose:
996 * Allocate a kobject port and associated mandatory access label
997 * in the kernel space of the specified type.
998 *
999 * This function never fails.
1000 *
1001 * Conditions:
1002 * No locks held (memory is allocated)
1003 */
1004
1005ipc_port_t
1006ipc_kobject_alloc_labeled_port(
1007 ipc_kobject_t kobject,
1008 ipc_kobject_type_t type,
1009 ipc_label_t label,
1010 ipc_kobject_alloc_options_t options)
1011{
1012 ipc_port_t port;
1013
1014 port = ipc_kobject_alloc_port(kobject, type, options);
1015
1016 ipc_port_set_label(port, label);
1017
1018 return port;
1019}
1020
1021static void
1022ipc_kobject_subst_once_no_senders(
1023 ipc_port_t port,
1024 mach_port_mscount_t mscount)
1025{
1026 ipc_port_t ko_port;
1027
1028 ko_port = ipc_kobject_dealloc_port(port, mscount, type: IKOT_PORT_SUBST_ONCE);
1029
1030 if (ko_port) {
1031 /*
1032 * Clean up the right if the wrapper wasn't hollowed out
1033 * by ipc_kobject_alloc_subst_once().
1034 */
1035 ipc_port_release_send(port: ko_port);
1036 }
1037}
1038
1039/*
1040 * Routine: ipc_kobject_alloc_subst_once
1041 * Purpose:
1042 * Make a port that will be substituted by the kolabel
1043 * rules once, preventing the next substitution (of its target)
1044 * to happen if any.
1045 *
1046 * Returns:
1047 * A port with a send right, that will substitute to its "kobject".
1048 *
1049 * Conditions:
1050 * No locks held (memory is allocated).
1051 *
1052 * `target` holds a send-right donated to this function,
1053 * consumed in ipc_kobject_subst_once_no_senders().
1054 */
1055ipc_port_t
1056ipc_kobject_alloc_subst_once(
1057 ipc_port_t target)
1058{
1059 if (!IP_VALID(target)) {
1060 return target;
1061 }
1062 return ipc_kobject_alloc_labeled_port(kobject: target,
1063 type: IKOT_PORT_SUBST_ONCE, IPC_LABEL_SUBST_ONCE,
1064 options: IPC_KOBJECT_ALLOC_MAKE_SEND | IPC_KOBJECT_ALLOC_NSREQUEST);
1065}
1066
1067/*
1068 * Routine: ipc_kobject_make_send_lazy_alloc_port
1069 * Purpose:
1070 * Make a send once for a kobject port.
1071 *
1072 * A location owning this port is passed in port_store.
1073 * If no port exists, a port is made lazily.
1074 *
1075 * A send right is made for the port, and if this is the first one
1076 * (possibly not for the first time), then the no-more-senders
1077 * notification is rearmed.
1078 *
1079 * When a notification is armed, the kobject must donate
1080 * one of its references to the port. It is expected
1081 * the no-more-senders notification will consume this reference.
1082 *
1083 * Returns:
1084 * TRUE if a notification was armed
1085 * FALSE else
1086 *
1087 * Conditions:
1088 * Nothing is locked, memory can be allocated.
1089 * The caller must be able to donate a kobject reference to the port.
1090 */
1091bool
1092ipc_kobject_make_send_lazy_alloc_port(
1093 ipc_port_t *port_store,
1094 ipc_kobject_t kobject,
1095 ipc_kobject_type_t type,
1096 ipc_kobject_alloc_options_t alloc_opts)
1097{
1098 ipc_port_t port, previous;
1099 kern_return_t kr;
1100
1101 alloc_opts |= IPC_KOBJECT_ALLOC_MAKE_SEND | IPC_KOBJECT_ALLOC_NSREQUEST;
1102 port = os_atomic_load(port_store, dependency);
1103
1104 if (!IP_VALID(port)) {
1105 port = ipc_kobject_alloc_port(kobject, type, options: alloc_opts);
1106
1107 if (os_atomic_cmpxchgv(port_store,
1108 IP_NULL, port, &previous, release)) {
1109 return TRUE;
1110 }
1111
1112 /*
1113 * undo IPC_KOBJECT_ALLOC_MAKE_SEND,
1114 * ipc_kobject_dealloc_port will handle
1115 * IPC_KOBJECT_ALLOC_NSREQUEST.
1116 */
1117 port->ip_mscount = 0;
1118 port->ip_srights = 0;
1119 ip_release_live(port);
1120 ipc_kobject_dealloc_port(port, mscount: 0, type);
1121
1122 port = previous;
1123 }
1124
1125 kr = ipc_kobject_make_send_nsrequest(port, kobject, kotype: type);
1126 assert(kr == KERN_SUCCESS || kr == KERN_ALREADY_WAITING);
1127
1128 return kr == KERN_SUCCESS;
1129}
1130
1131/*
1132 * Routine: ipc_kobject_make_send_lazy_alloc_labeled_port
1133 * Purpose:
1134 * Make a send once for a kobject port.
1135 *
1136 * A location owning this port is passed in port_store.
1137 * If no port exists, a port is made lazily.
1138 *
1139 * A send right is made for the port, and if this is the first one
1140 * (possibly not for the first time), then the no-more-senders
1141 * notification is rearmed.
1142 *
1143 * When a notification is armed, the kobject must donate
1144 * one of its references to the port. It is expected
1145 * the no-more-senders notification will consume this reference.
1146 *
1147 * Returns:
1148 * TRUE if a notification was armed
1149 * FALSE else
1150 *
1151 * Conditions:
1152 * Nothing is locked, memory can be allocated.
1153 * The caller must be able to donate a kobject reference to the port.
1154 */
1155boolean_t
1156ipc_kobject_make_send_lazy_alloc_labeled_port(
1157 ipc_port_t *port_store,
1158 ipc_kobject_t kobject,
1159 ipc_kobject_type_t type,
1160 ipc_label_t label)
1161{
1162 ipc_port_t port, previous;
1163 kern_return_t kr;
1164
1165 port = os_atomic_load(port_store, dependency);
1166
1167 if (!IP_VALID(port)) {
1168 port = ipc_kobject_alloc_labeled_port(kobject, type, label,
1169 options: IPC_KOBJECT_ALLOC_MAKE_SEND | IPC_KOBJECT_ALLOC_NSREQUEST);
1170 if (os_atomic_cmpxchgv(port_store, IP_NULL, port, &previous, release)) {
1171 return TRUE;
1172 }
1173
1174 /*
1175 * undo IPC_KOBJECT_ALLOC_MAKE_SEND,
1176 * ipc_kobject_dealloc_port will handle
1177 * IPC_KOBJECT_ALLOC_NSREQUEST.
1178 */
1179 port->ip_mscount = 0;
1180 port->ip_srights = 0;
1181 ip_release_live(port);
1182 ipc_kobject_dealloc_port(port, mscount: 0, type);
1183
1184 port = previous;
1185 assert(ip_is_kolabeled(port));
1186 }
1187
1188 kr = ipc_kobject_make_send_nsrequest(port, kobject, kotype: type);
1189 assert(kr == KERN_SUCCESS || kr == KERN_ALREADY_WAITING);
1190
1191 return kr == KERN_SUCCESS;
1192}
1193
1194/*
1195 * Routine: ipc_kobject_nsrequest_locked
1196 * Purpose:
1197 * Arm the no-senders notification for the given kobject
1198 * if it doesn't have one armed yet.
1199 *
1200 * Conditions:
1201 * Port is locked and active.
1202 *
1203 * Returns:
1204 * KERN_SUCCESS: the notification was armed
1205 * KERN_ALREADY_WAITING: the notification was already armed
1206 * KERN_FAILURE: the notification would fire immediately
1207 */
1208static inline kern_return_t
1209ipc_kobject_nsrequest_locked(
1210 ipc_port_t port,
1211 mach_port_mscount_t sync)
1212{
1213 if (port->ip_nsrequest == IP_KOBJECT_NSREQUEST_ARMED) {
1214 return KERN_ALREADY_WAITING;
1215 }
1216
1217 if (port->ip_srights == 0 && sync <= port->ip_mscount) {
1218 return KERN_FAILURE;
1219 }
1220
1221 port->ip_nsrequest = IP_KOBJECT_NSREQUEST_ARMED;
1222 ip_reference(port);
1223 return KERN_SUCCESS;
1224}
1225
1226
1227/*
1228 * Routine: ipc_kobject_nsrequest
1229 * Purpose:
1230 * Arm the no-senders notification for the given kobject
1231 * if it doesn't have one armed yet.
1232 *
1233 * Returns:
1234 * KERN_SUCCESS: the notification was armed
1235 * KERN_ALREADY_WAITING: the notification was already armed
1236 * KERN_FAILURE: the notification would fire immediately
1237 * KERN_INVALID_RIGHT: the port is dead
1238 */
1239kern_return_t
1240ipc_kobject_nsrequest(
1241 ipc_port_t port,
1242 mach_port_mscount_t sync,
1243 mach_port_mscount_t *mscount)
1244{
1245 kern_return_t kr = KERN_INVALID_RIGHT;
1246
1247 if (IP_VALID(port)) {
1248 ip_mq_lock(port);
1249
1250 if (mscount) {
1251 *mscount = port->ip_mscount;
1252 }
1253 if (ip_active(port)) {
1254 kr = ipc_kobject_nsrequest_locked(port, sync);
1255 }
1256
1257 ip_mq_unlock(port);
1258 } else if (mscount) {
1259 *mscount = 0;
1260 }
1261
1262 return kr;
1263}
1264
1265ipc_port_t
1266ipc_kobject_copy_send(
1267 ipc_port_t port,
1268 ipc_kobject_t kobject,
1269 ipc_kobject_type_t kotype)
1270{
1271 ipc_port_t sright = port;
1272
1273 if (IP_VALID(port)) {
1274 ip_mq_lock(port);
1275 if (ip_active(port)) {
1276 ipc_kobject_require(port, kobject, kotype);
1277 ipc_port_copy_send_any_locked(port);
1278 } else {
1279 sright = IP_DEAD;
1280 }
1281 ip_mq_unlock(port);
1282 }
1283
1284 return sright;
1285}
1286
1287ipc_port_t
1288ipc_kobject_make_send(
1289 ipc_port_t port,
1290 ipc_kobject_t kobject,
1291 ipc_kobject_type_t kotype)
1292{
1293 ipc_port_t sright = port;
1294
1295 if (IP_VALID(port)) {
1296 ip_mq_lock(port);
1297 if (ip_active(port)) {
1298 ipc_kobject_require(port, kobject, kotype);
1299 ipc_port_make_send_any_locked(port);
1300 } else {
1301 sright = IP_DEAD;
1302 }
1303 ip_mq_unlock(port);
1304 }
1305
1306 return sright;
1307}
1308
1309kern_return_t
1310ipc_kobject_make_send_nsrequest(
1311 ipc_port_t port,
1312 ipc_kobject_t kobject,
1313 ipc_kobject_type_t kotype)
1314{
1315 kern_return_t kr = KERN_INVALID_RIGHT;
1316
1317 if (IP_VALID(port)) {
1318 ip_mq_lock(port);
1319 if (ip_active(port)) {
1320 ipc_kobject_require(port, kobject, kotype);
1321 ipc_port_make_send_any_locked(port);
1322 kr = ipc_kobject_nsrequest_locked(port, sync: 0);
1323 assert(kr != KERN_FAILURE);
1324 }
1325 ip_mq_unlock(port);
1326 }
1327
1328 return kr;
1329}
1330
1331kern_return_t
1332ipc_kobject_make_send_nsrequest_locked(
1333 ipc_port_t port,
1334 ipc_kobject_t kobject,
1335 ipc_kobject_type_t kotype)
1336{
1337 kern_return_t kr = KERN_INVALID_RIGHT;
1338
1339 if (ip_active(port)) {
1340 ipc_kobject_require(port, kobject, kotype);
1341 ipc_port_make_send_any_locked(port);
1342 kr = ipc_kobject_nsrequest_locked(port, sync: 0);
1343 assert(kr != KERN_FAILURE);
1344 }
1345
1346 return kr;
1347}
1348
1349static inline ipc_kobject_t
1350ipc_kobject_disable_internal(
1351 ipc_port_t port,
1352 ipc_kobject_type_t type)
1353{
1354 ipc_kobject_t kobject = ipc_kobject_get_raw(port, type);
1355
1356 ipc_kobject_set_raw(port, IKO_NULL, type);
1357 if (ip_is_kolabeled(port)) {
1358 port->ip_kolabel->ikol_alt_port = IP_NULL;
1359 }
1360
1361 return kobject;
1362}
1363
1364/*
1365 * Routine: ipc_kobject_dealloc_port_and_unlock
1366 * Purpose:
1367 * Destroys a port allocated with any of the ipc_kobject_alloc*
1368 * functions.
1369 *
1370 * This will atomically:
1371 * - make the port inactive,
1372 * - optionally check the make send count
1373 * - disable (nil-out) the kobject pointer for kobjects without
1374 * a destroy callback.
1375 *
1376 * The port will retain its kobject-ness and kobject type.
1377 *
1378 *
1379 * Returns:
1380 * The kobject pointer that was set prior to this call
1381 * (possibly NULL if the kobject was already disabled).
1382 *
1383 * Conditions:
1384 * The port is active and locked.
1385 * On return the port is inactive and unlocked.
1386 */
1387__abortlike
1388static void
1389__ipc_kobject_dealloc_bad_type_panic(ipc_port_t port, ipc_kobject_type_t type)
1390{
1391 panic("port %p of type %d, expecting %d", port, ip_kotype(port), type);
1392}
1393
1394__abortlike
1395static void
1396__ipc_kobject_dealloc_bad_mscount_panic(
1397 ipc_port_t port,
1398 mach_port_mscount_t mscount,
1399 ipc_kobject_type_t type)
1400{
1401 panic("unexpected make-send count: %p[%d], %d, %d",
1402 port, type, port->ip_mscount, mscount);
1403}
1404
1405__abortlike
1406static void
1407__ipc_kobject_dealloc_bad_srights_panic(
1408 ipc_port_t port,
1409 ipc_kobject_type_t type)
1410{
1411 panic("unexpected send right count: %p[%d], %d",
1412 port, type, port->ip_srights);
1413}
1414
1415ipc_kobject_t
1416ipc_kobject_dealloc_port_and_unlock(
1417 ipc_port_t port,
1418 mach_port_mscount_t mscount,
1419 ipc_kobject_type_t type)
1420{
1421 ipc_kobject_t kobject = IKO_NULL;
1422 ipc_kobject_ops_t ops = ipc_kobject_ops_get(ikot: type);
1423
1424 require_ip_active(port);
1425
1426 if (ip_kotype(port) != type) {
1427 __ipc_kobject_dealloc_bad_type_panic(port, type);
1428 }
1429
1430 if (mscount && port->ip_mscount != mscount) {
1431 __ipc_kobject_dealloc_bad_mscount_panic(port, mscount, type);
1432 }
1433 if ((mscount || ops->iko_op_stable) && port->ip_srights != 0) {
1434 __ipc_kobject_dealloc_bad_srights_panic(port, type);
1435 }
1436
1437 if (!ops->iko_op_destroy) {
1438 kobject = ipc_kobject_disable_internal(port, type);
1439 }
1440
1441 ipc_port_dealloc_special_and_unlock(port, space: ipc_space_kernel);
1442
1443 return kobject;
1444}
1445
1446/*
1447 * Routine: ipc_kobject_dealloc_port
1448 * Purpose:
1449 * Destroys a port allocated with any of the ipc_kobject_alloc*
1450 * functions.
1451 *
1452 * This will atomically:
1453 * - make the port inactive,
1454 * - optionally check the make send count
1455 * - disable (nil-out) the kobject pointer for kobjects without
1456 * a destroy callback.
1457 *
1458 * The port will retain its kobject-ness and kobject type.
1459 *
1460 *
1461 * Returns:
1462 * The kobject pointer that was set prior to this call
1463 * (possibly NULL if the kobject was already disabled).
1464 *
1465 * Conditions:
1466 * Nothing is locked.
1467 * The port is active.
1468 * On return the port is inactive.
1469 */
1470ipc_kobject_t
1471ipc_kobject_dealloc_port(
1472 ipc_port_t port,
1473 mach_port_mscount_t mscount,
1474 ipc_kobject_type_t type)
1475{
1476 ip_mq_lock(port);
1477 return ipc_kobject_dealloc_port_and_unlock(port, mscount, type);
1478}
1479
1480/*
1481 * Routine: ipc_kobject_enable
1482 * Purpose:
1483 * Make a port represent a kernel object of the given type.
1484 * The caller is responsible for handling refs for the
1485 * kernel object, if necessary.
1486 * Conditions:
1487 * Nothing locked.
1488 * The port must be active.
1489 */
1490void
1491ipc_kobject_enable(
1492 ipc_port_t port,
1493 ipc_kobject_t kobject,
1494 ipc_kobject_type_t type)
1495{
1496 assert(!ipc_kobject_ops_get(type)->iko_op_stable);
1497
1498 ip_mq_lock(port);
1499 require_ip_active(port);
1500
1501 if (type != ip_kotype(port)) {
1502 panic("%s: unexpected kotype of port %p: want %d, got %d",
1503 __func__, port, type, ip_kotype(port));
1504 }
1505
1506 ipc_kobject_set_raw(port, kobject, type);
1507
1508 ip_mq_unlock(port);
1509}
1510
1511/*
1512 * Routine: ipc_kobject_disable_locked
1513 * Purpose:
1514 * Clear the kobject pointer for a port.
1515 * Conditions:
1516 * The port is locked.
1517 * Returns the current kobject pointer.
1518 */
1519ipc_kobject_t
1520ipc_kobject_disable_locked(
1521 ipc_port_t port,
1522 ipc_kobject_type_t type)
1523{
1524 if (ip_active(port)) {
1525 assert(!ipc_kobject_ops_get(type)->iko_op_stable);
1526 }
1527
1528 if (ip_kotype(port) != type) {
1529 panic("port %p of type %d, expecting %d",
1530 port, ip_kotype(port), type);
1531 }
1532
1533 return ipc_kobject_disable_internal(port, type);
1534}
1535
1536/*
1537 * Routine: ipc_kobject_disable
1538 * Purpose:
1539 * Clear the kobject pointer for a port.
1540 * Conditions:
1541 * Nothing locked.
1542 * Returns the current kobject pointer.
1543 */
1544ipc_kobject_t
1545ipc_kobject_disable(
1546 ipc_port_t port,
1547 ipc_kobject_type_t type)
1548{
1549 ipc_kobject_t kobject;
1550
1551 ip_mq_lock(port);
1552 kobject = ipc_kobject_disable_locked(port, type);
1553 ip_mq_unlock(port);
1554
1555 return kobject;
1556}
1557
1558/*
1559 * Routine: ipc_kobject_upgrade_mktimer_locked
1560 * Purpose:
1561 * Upgrades a port to mktimer kobject status
1562 *
1563 * This pattern is rather bad as it leads to various
1564 * confusions that need to be special cased with kobject-ness
1565 * of ports. No new port with dual kobject/message-queue
1566 * semantics should be made ever.
1567 *
1568 * Conditions:
1569 * Port is locked
1570 */
1571void
1572ipc_kobject_upgrade_mktimer_locked(
1573 ipc_port_t port,
1574 ipc_kobject_t kobject)
1575{
1576 ipc_kobject_set_internal(port, kobject, type: IKOT_TIMER);
1577}
1578
1579/*
1580 * Routine: ipc_kobject_notify_no_senders
1581 * Purpose:
1582 * Handles a no-senders notification
1583 * sent to a kobject.
1584 *
1585 * A port reference is consumed.
1586 *
1587 * Conditions:
1588 * Nothing locked.
1589 */
1590void
1591ipc_kobject_notify_no_senders(
1592 ipc_port_t port,
1593 mach_port_mscount_t mscount)
1594{
1595 ipc_kobject_ops_t ops = ipc_kobject_ops_get(ip_kotype(port));
1596
1597 assert(ops->iko_op_no_senders);
1598 ops->iko_op_no_senders(port, mscount);
1599
1600 /* consume the ref ipc_notify_no_senders_prepare left */
1601 ip_release(port);
1602}
1603
1604/*
1605 * Routine: ipc_kobject_notify_no_senders
1606 * Purpose:
1607 * Handles a send once notifications
1608 * sent to a kobject.
1609 *
1610 * A send-once port reference is consumed.
1611 *
1612 * Conditions:
1613 * Port is locked.
1614 */
1615void
1616ipc_kobject_notify_send_once_and_unlock(
1617 ipc_port_t port)
1618{
1619 /*
1620 * drop the send once right while we hold the port lock.
1621 * we will keep a port reference while we run the possible
1622 * callouts to kobjects.
1623 *
1624 * This a simplified version of ipc_port_release_sonce()
1625 * since kobjects can't be special reply ports.
1626 */
1627 assert(!port->ip_specialreply);
1628
1629 ip_sorights_dec(port);
1630 ip_mq_unlock(port);
1631
1632 /*
1633 * because there's very few consumers,
1634 * the code here isn't generic as it's really not worth it.
1635 */
1636 switch (ip_kotype(port)) {
1637 case IKOT_TASK_RESUME:
1638 task_suspension_send_once(port);
1639 break;
1640 default:
1641 break;
1642 }
1643
1644 ip_release(port);
1645}
1646
1647
1648/*
1649 * Routine: ipc_kobject_destroy
1650 * Purpose:
1651 * Release any kernel object resources associated
1652 * with the port, which is being destroyed.
1653 *
1654 * This path to free object resources should only be
1655 * needed when resources are associated with a user's port.
1656 * In the normal case, when the kernel is the receiver,
1657 * the code calling ipc_kobject_dealloc_port() should clean
1658 * up the object resources.
1659 *
1660 * Cleans up any kobject label that might be present.
1661 * Conditions:
1662 * The port is not locked, but it is dead.
1663 */
1664void
1665ipc_kobject_destroy(
1666 ipc_port_t port)
1667{
1668 ipc_kobject_ops_t ops = ipc_kobject_ops_get(ip_kotype(port));
1669
1670 if (ops->iko_op_permanent) {
1671 panic("trying to destroy an permanent port %p", port);
1672 }
1673 if (ops->iko_op_destroy) {
1674 ops->iko_op_destroy(port);
1675 }
1676
1677 if (ip_is_kolabeled(port)) {
1678 ipc_kobject_label_t labelp = port->ip_kolabel;
1679
1680 assert(labelp != NULL);
1681 assert(labelp->ikol_alt_port == IP_NULL);
1682 assert(ip_is_kobject(port));
1683 port->ip_kolabel = NULL;
1684 io_bits_andnot(ip_to_object(port), IO_BITS_KOLABEL);
1685 zfree(ipc_kobject_label_zone, labelp);
1686 }
1687}
1688
1689/*
1690 * Routine: ipc_kobject_label_substitute_task
1691 * Purpose:
1692 * Substitute a task control port for its immovable
1693 * equivalent when the receiver is that task.
1694 * Conditions:
1695 * Space is write locked and active.
1696 * Port is locked and active.
1697 * Returns:
1698 * - IP_NULL port if no substitution is to be done
1699 * - a valid port if a substitution needs to happen
1700 */
1701static ipc_port_t
1702ipc_kobject_label_substitute_task(
1703 ipc_space_t space,
1704 ipc_kobject_label_t kolabel,
1705 ipc_port_t port)
1706{
1707 ipc_port_t subst = IP_NULL;
1708 task_t task = ipc_kobject_get_raw(port, type: IKOT_TASK_CONTROL);
1709
1710 if (task != TASK_NULL && task == space->is_task) {
1711 if ((subst = kolabel->ikol_alt_port)) {
1712 return subst;
1713 }
1714 }
1715
1716 return IP_NULL;
1717}
1718
1719/*
1720 * Routine: ipc_kobject_label_substitute_task_read
1721 * Purpose:
1722 * Substitute a task read port for its immovable
1723 * control equivalent when the receiver is that task.
1724 * Conditions:
1725 * Space is write locked and active.
1726 * Port is locked and active.
1727 * Returns:
1728 * - IP_NULL port if no substitution is to be done
1729 * - a valid port if a substitution needs to happen
1730 */
1731static ipc_port_t
1732ipc_kobject_label_substitute_task_read(
1733 ipc_space_t space,
1734 ipc_kobject_label_t kolabel,
1735 ipc_port_t port)
1736{
1737 ipc_port_t subst = IP_NULL;
1738 task_t task = ipc_kobject_get_raw(port, type: IKOT_TASK_READ);
1739
1740 if (task != TASK_NULL && task == space->is_task) {
1741 if ((subst = kolabel->ikol_alt_port)) {
1742 return subst;
1743 }
1744 }
1745
1746 return IP_NULL;
1747}
1748
1749/*
1750 * Routine: ipc_kobject_label_substitute_thread
1751 * Purpose:
1752 * Substitute a thread control port for its immovable
1753 * equivalent when it belongs to the receiver task.
1754 * Conditions:
1755 * Space is write locked and active.
1756 * Port is locked and active.
1757 * Returns:
1758 * - IP_NULL port if no substitution is to be done
1759 * - a valid port if a substitution needs to happen
1760 */
1761static ipc_port_t
1762ipc_kobject_label_substitute_thread(
1763 ipc_space_t space,
1764 ipc_kobject_label_t kolabel,
1765 ipc_port_t port)
1766{
1767 ipc_port_t subst = IP_NULL;
1768 thread_t thread = ipc_kobject_get_raw(port, type: IKOT_THREAD_CONTROL);
1769
1770 if (thread != THREAD_NULL && space->is_task == get_threadtask(thread)) {
1771 if ((subst = kolabel->ikol_alt_port) != IP_NULL) {
1772 return subst;
1773 }
1774 }
1775
1776 return IP_NULL;
1777}
1778
1779/*
1780 * Routine: ipc_kobject_label_substitute_thread_read
1781 * Purpose:
1782 * Substitute a thread read port for its immovable
1783 * control equivalent when it belongs to the receiver task.
1784 * Conditions:
1785 * Space is write locked and active.
1786 * Port is locked and active.
1787 * Returns:
1788 * - IP_NULL port if no substitution is to be done
1789 * - a valid port if a substitution needs to happen
1790 */
1791static ipc_port_t
1792ipc_kobject_label_substitute_thread_read(
1793 ipc_space_t space,
1794 ipc_kobject_label_t kolabel,
1795 ipc_port_t port)
1796{
1797 ipc_port_t subst = IP_NULL;
1798 thread_t thread = ipc_kobject_get_raw(port, type: IKOT_THREAD_READ);
1799
1800 if (thread != THREAD_NULL && space->is_task == get_threadtask(thread)) {
1801 if ((subst = kolabel->ikol_alt_port) != IP_NULL) {
1802 return subst;
1803 }
1804 }
1805
1806 return IP_NULL;
1807}
1808
1809/*
1810 * Routine: ipc_kobject_label_check
1811 * Purpose:
1812 * Check to see if the space is allowed to possess
1813 * a right for the given port. In order to qualify,
1814 * the space label must contain all the privileges
1815 * listed in the port/kobject label.
1816 *
1817 * Conditions:
1818 * Space is write locked and active.
1819 * Port is locked and active.
1820 *
1821 * Returns:
1822 * Whether the copyout is authorized.
1823 *
1824 * If a port substitution is requested, the space is unlocked,
1825 * the port is unlocked and its "right" consumed.
1826 *
1827 * As of now, substituted ports only happen for send rights.
1828 */
1829bool
1830ipc_kobject_label_check(
1831 ipc_space_t space,
1832 ipc_port_t port,
1833 mach_msg_type_name_t msgt_name,
1834 ipc_object_copyout_flags_t *flags,
1835 ipc_port_t *subst_portp)
1836{
1837 ipc_kobject_label_t kolabel;
1838 ipc_label_t label;
1839
1840 assert(is_active(space));
1841 assert(ip_active(port));
1842
1843 *subst_portp = IP_NULL;
1844
1845 /* Unlabled ports/kobjects are always allowed */
1846 if (!ip_is_kolabeled(port)) {
1847 return true;
1848 }
1849
1850 /* Never OK to copyout the receive right for a labeled kobject */
1851 if (msgt_name == MACH_MSG_TYPE_PORT_RECEIVE) {
1852 panic("ipc_kobject_label_check: attempted receive right "
1853 "copyout for labeled kobject");
1854 }
1855
1856 kolabel = port->ip_kolabel;
1857 label = kolabel->ikol_label;
1858
1859 if ((*flags & IPC_OBJECT_COPYOUT_FLAGS_NO_LABEL_CHECK) == 0 &&
1860 (label & IPC_LABEL_SUBST_MASK)) {
1861 ipc_port_t subst = IP_NULL;
1862
1863 if (msgt_name != MACH_MSG_TYPE_PORT_SEND) {
1864 return false;
1865 }
1866
1867 if ((label & IPC_LABEL_SUBST_MASK) == IPC_LABEL_SUBST_ONCE) {
1868 /*
1869 * The next check will _not_ substitute.
1870 * hollow out our one-time wrapper,
1871 * and steal its send right.
1872 */
1873 *flags |= IPC_OBJECT_COPYOUT_FLAGS_NO_LABEL_CHECK;
1874 subst = ipc_kobject_disable_locked(port,
1875 type: IKOT_PORT_SUBST_ONCE);
1876 is_write_unlock(space);
1877 ipc_port_release_send_and_unlock(port);
1878 if (subst == IP_NULL) {
1879 panic("subst-once port %p was consumed twice", port);
1880 }
1881 *subst_portp = subst;
1882 return true;
1883 }
1884
1885 switch (label & IPC_LABEL_SUBST_MASK) {
1886 case IPC_LABEL_SUBST_TASK:
1887 subst = ipc_kobject_label_substitute_task(space,
1888 kolabel, port);
1889 break;
1890 case IPC_LABEL_SUBST_TASK_READ:
1891 subst = ipc_kobject_label_substitute_task_read(space,
1892 kolabel, port);
1893 break;
1894 case IPC_LABEL_SUBST_THREAD:
1895 subst = ipc_kobject_label_substitute_thread(space,
1896 kolabel, port);
1897 break;
1898 case IPC_LABEL_SUBST_THREAD_READ:
1899 subst = ipc_kobject_label_substitute_thread_read(space,
1900 kolabel, port);
1901 break;
1902 default:
1903 panic("unexpected label: %llx", label);
1904 }
1905
1906 if (subst != IP_NULL) {
1907 ip_reference(subst);
1908 is_write_unlock(space);
1909
1910 /*
1911 * We do not hold a proper send right on `subst`,
1912 * only a reference.
1913 *
1914 * Because of how thread/task termination works,
1915 * there is no guarantee copy_send() would work,
1916 * so we need to make_send().
1917 *
1918 * We can do that because ports tagged with
1919 * IPC_LABEL_SUBST_{THREAD,TASK} do not use
1920 * the no-senders notification.
1921 */
1922
1923 ipc_port_release_send_and_unlock(port);
1924 /* no check: dPAC integrity */
1925 port = ipc_port_make_send_any(port: subst);
1926 ip_release(subst);
1927 *subst_portp = port;
1928 return true;
1929 }
1930 }
1931
1932 return (label & space->is_label & IPC_LABEL_SPACE_MASK) ==
1933 (label & IPC_LABEL_SPACE_MASK);
1934}
1935