1/*
2 * Copyright (c) 2000-2010 Apple Inc. All rights reserved.
3 *
4 * @APPLE_OSREFERENCE_LICENSE_HEADER_START@
5 *
6 * This file contains Original Code and/or Modifications of Original Code
7 * as defined in and that are subject to the Apple Public Source License
8 * Version 2.0 (the 'License'). You may not use this file except in
9 * compliance with the License. The rights granted to you under the License
10 * may not be used to create, or enable the creation or redistribution of,
11 * unlawful or unlicensed copies of an Apple operating system, or to
12 * circumvent, violate, or enable the circumvention or violation of, any
13 * terms of an Apple operating system software license agreement.
14 *
15 * Please obtain a copy of the License at
16 * http://www.opensource.apple.com/apsl/ and read it before using this file.
17 *
18 * The Original Code and all software distributed under the License are
19 * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER
20 * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES,
21 * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY,
22 * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT.
23 * Please see the License for the specific language governing rights and
24 * limitations under the License.
25 *
26 * @APPLE_OSREFERENCE_LICENSE_HEADER_END@
27 */
28/*
29 * @OSF_COPYRIGHT@
30 */
31/*
32 * Mach Operating System
33 * Copyright (c) 1991,1990,1989,1988,1987 Carnegie Mellon University
34 * All Rights Reserved.
35 *
36 * Permission to use, copy, modify and distribute this software and its
37 * documentation is hereby granted, provided that both the copyright
38 * notice and this permission notice appear in all copies of the
39 * software, derivative works or modified versions, and any portions
40 * thereof, and that both notices appear in supporting documentation.
41 *
42 * CARNEGIE MELLON ALLOWS FREE USE OF THIS SOFTWARE IN ITS "AS IS"
43 * CONDITION. CARNEGIE MELLON DISCLAIMS ANY LIABILITY OF ANY KIND FOR
44 * ANY DAMAGES WHATSOEVER RESULTING FROM THE USE OF THIS SOFTWARE.
45 *
46 * Carnegie Mellon requests users of this software to return to
47 *
48 * Software Distribution Coordinator or Software.Distribution@CS.CMU.EDU
49 * School of Computer Science
50 * Carnegie Mellon University
51 * Pittsburgh PA 15213-3890
52 *
53 * any improvements or extensions that they make and grant Carnegie Mellon
54 * the rights to redistribute these changes.
55 */
56/*
57 * NOTICE: This file was modified by McAfee Research in 2004 to introduce
58 * support for mandatory and extensible security protections. This notice
59 * is included in support of clause 2.2 (b) of the Apple Public License,
60 * Version 2.0.
61 */
62/*
63 */
64
65/*
66 * File: ipc_tt.c
67 * Purpose:
68 * Task and thread related IPC functions.
69 */
70
71#include <ipc/port.h>
72#include <mach/mach_types.h>
73#include <mach/boolean.h>
74#include <mach/kern_return.h>
75#include <mach/mach_param.h>
76#include <mach/task_special_ports.h>
77#include <mach/thread_special_ports.h>
78#include <mach/thread_status.h>
79#include <mach/exception_types.h>
80#include <mach/memory_object_types.h>
81#include <mach/mach_traps.h>
82#include <mach/task_server.h>
83#include <mach/thread_act_server.h>
84#include <mach/mach_host_server.h>
85#include <mach/host_priv_server.h>
86#include <mach/vm_map_server.h>
87
88#include <kern/exc_guard.h>
89#include <kern/kern_types.h>
90#include <kern/host.h>
91#include <kern/ipc_kobject.h>
92#include <kern/ipc_tt.h>
93#include <kern/kalloc.h>
94#include <kern/thread.h>
95#include <kern/ux_handler.h>
96#include <kern/misc_protos.h>
97#include <kdp/kdp_dyld.h>
98
99#include <vm/vm_map.h>
100#include <vm/vm_pageout.h>
101#include <vm/vm_protos.h>
102#include <libkern/coreanalytics/coreanalytics.h>
103
104#include <security/mac_mach_internal.h>
105
106#if CONFIG_CSR
107#include <sys/csr.h>
108#endif
109
110#include <sys/code_signing.h> /* for developer mode state */
111
112#if !defined(XNU_TARGET_OS_OSX) && !SECURE_KERNEL
113extern int cs_relax_platform_task_ports;
114#endif
115
116extern boolean_t IOCurrentTaskHasEntitlement(const char *);
117extern boolean_t proc_is_simulated(const proc_t);
118extern struct proc* current_proc(void);
119
120/* bootarg to create lightweight corpse for thread identity lockdown */
121TUNABLE(bool, thid_should_crash, "thid_should_crash", true);
122
123#define SET_EXCEPTION_ENTITLEMENT "com.apple.private.set-exception-port"
124
125CA_EVENT(set_exception,
126 CA_STATIC_STRING(CA_PROCNAME_LEN), current_proc,
127 CA_STATIC_STRING(CA_PROCNAME_LEN), thread_proc,
128 CA_INT, mask,
129 CA_STATIC_STRING(6), level);
130
131__options_decl(ipc_reply_port_type_t, uint32_t, {
132 IRPT_NONE = 0x00,
133 IRPT_USER = 0x01,
134 IRPT_KERNEL = 0x02,
135});
136
137/* forward declarations */
138static kern_return_t special_port_allowed_with_task_flavor(int which, mach_task_flavor_t flavor);
139static kern_return_t special_port_allowed_with_thread_flavor(int which, mach_thread_flavor_t flavor);
140static void ipc_port_bind_special_reply_port_locked(ipc_port_t port, ipc_reply_port_type_t reply_type);
141static void ipc_port_unbind_special_reply_port(thread_t thread, ipc_reply_port_type_t reply_type);
142extern kern_return_t task_conversion_eval(task_t caller, task_t victim, int flavor);
143static thread_inspect_t convert_port_to_thread_inspect_no_eval(ipc_port_t port);
144static ipc_port_t convert_thread_to_port_with_flavor(thread_t, thread_ro_t, mach_thread_flavor_t flavor);
145ipc_port_t convert_task_to_port_with_flavor(task_t task, mach_task_flavor_t flavor, task_grp_t grp);
146kern_return_t task_set_special_port(task_t task, int which, ipc_port_t port);
147kern_return_t task_get_special_port(task_t task, int which, ipc_port_t *portp);
148
149/*
150 * Routine: ipc_task_init
151 * Purpose:
152 * Initialize a task's IPC state.
153 *
154 * If non-null, some state will be inherited from the parent.
155 * The parent must be appropriately initialized.
156 * Conditions:
157 * Nothing locked.
158 */
159
160void
161ipc_task_init(
162 task_t task,
163 task_t parent)
164{
165 ipc_space_t space;
166 ipc_port_t kport;
167 ipc_port_t nport;
168 ipc_port_t pport;
169 kern_return_t kr;
170 int i;
171
172
173 kr = ipc_space_create(IPC_LABEL_NONE, spacep: &space);
174 if (kr != KERN_SUCCESS) {
175 panic("ipc_task_init");
176 }
177
178 space->is_task = task;
179
180 kport = ipc_kobject_alloc_port(IKO_NULL, type: IKOT_TASK_CONTROL,
181 options: IPC_KOBJECT_ALLOC_NONE);
182 pport = kport;
183
184 nport = ipc_kobject_alloc_port(IKO_NULL, type: IKOT_TASK_NAME,
185 options: IPC_KOBJECT_ALLOC_NONE);
186
187 itk_lock_init(task);
188 task->itk_task_ports[TASK_FLAVOR_CONTROL] = kport;
189 task->itk_task_ports[TASK_FLAVOR_NAME] = nport;
190
191 /* Lazily allocated on-demand */
192 task->itk_task_ports[TASK_FLAVOR_INSPECT] = IP_NULL;
193 task->itk_task_ports[TASK_FLAVOR_READ] = IP_NULL;
194 task->itk_dyld_notify = NULL;
195#if CONFIG_PROC_RESOURCE_LIMITS
196 task->itk_resource_notify = NULL;
197#endif /* CONFIG_PROC_RESOURCE_LIMITS */
198
199 task->itk_self = pport;
200 task->itk_resume = IP_NULL; /* Lazily allocated on-demand */
201 if (task_is_a_corpse_fork(task)) {
202 /*
203 * No sender's notification for corpse would not
204 * work with a naked send right in kernel.
205 */
206 task->itk_settable_self = IP_NULL;
207 } else {
208 /* we just made the port, no need to triple check */
209 task->itk_settable_self = ipc_port_make_send_any(port: kport);
210 }
211 task->itk_debug_control = IP_NULL;
212 task->itk_space = space;
213
214#if CONFIG_MACF
215 task->exc_actions[0].label = NULL;
216 for (i = FIRST_EXCEPTION; i < EXC_TYPES_COUNT; i++) {
217 mac_exc_associate_action_label(action: &task->exc_actions[i],
218 label: mac_exc_create_label(action: &task->exc_actions[i]));
219 }
220#endif
221
222 /* always zero-out the first (unused) array element */
223 bzero(s: &task->exc_actions[0], n: sizeof(task->exc_actions[0]));
224
225 if (parent == TASK_NULL) {
226 ipc_port_t port = IP_NULL;
227 for (i = FIRST_EXCEPTION; i < EXC_TYPES_COUNT; i++) {
228 task->exc_actions[i].port = IP_NULL;
229 task->exc_actions[i].flavor = 0;
230 task->exc_actions[i].behavior = 0;
231 task->exc_actions[i].privileged = FALSE;
232 }/* for */
233
234 kr = host_get_host_port(host_priv_self(), &port);
235 assert(kr == KERN_SUCCESS);
236 task->itk_host = port;
237
238 task->itk_bootstrap = IP_NULL;
239 task->itk_task_access = IP_NULL;
240
241 for (i = 0; i < TASK_PORT_REGISTER_MAX; i++) {
242 task->itk_registered[i] = IP_NULL;
243 }
244 } else {
245 itk_lock(parent);
246 assert(parent->itk_task_ports[TASK_FLAVOR_CONTROL] != IP_NULL);
247
248 /* inherit registered ports */
249
250 for (i = 0; i < TASK_PORT_REGISTER_MAX; i++) {
251 task->itk_registered[i] =
252 ipc_port_copy_send_any(port: parent->itk_registered[i]);
253 }
254
255 /* inherit exception and bootstrap ports */
256
257 for (i = FIRST_EXCEPTION; i < EXC_TYPES_COUNT; i++) {
258 task->exc_actions[i].port =
259 exception_port_copy_send(port: parent->exc_actions[i].port);
260 task->exc_actions[i].flavor =
261 parent->exc_actions[i].flavor;
262 task->exc_actions[i].behavior =
263 parent->exc_actions[i].behavior;
264 task->exc_actions[i].privileged =
265 parent->exc_actions[i].privileged;
266#if CONFIG_MACF
267 mac_exc_inherit_action_label(parent: parent->exc_actions + i,
268 child: task->exc_actions + i);
269#endif
270 }
271
272 task->itk_host = host_port_copy_send(port: parent->itk_host);
273
274 task->itk_bootstrap =
275 ipc_port_copy_send_mqueue(port: parent->itk_bootstrap);
276
277 task->itk_task_access =
278 ipc_port_copy_send_mqueue(port: parent->itk_task_access);
279
280 itk_unlock(parent);
281 }
282}
283
284/*
285 * Routine: ipc_task_set_immovable_pinned
286 * Purpose:
287 * Make a task's control port immovable and/or pinned
288 * according to its control port options. If control port
289 * is immovable, allocate an immovable control port for the
290 * task and optionally pin it.
291 * Conditions:
292 * Task's control port is movable and not pinned.
293 */
294void
295ipc_task_set_immovable_pinned(
296 task_t task)
297{
298 ipc_port_t kport = task->itk_task_ports[TASK_FLAVOR_CONTROL];
299 ipc_port_t new_pport;
300
301 /* pport is the same as kport at ipc_task_init() time */
302 assert(task->itk_self == task->itk_task_ports[TASK_FLAVOR_CONTROL]);
303 assert(task->itk_self == task->itk_settable_self);
304 assert(!task_is_a_corpse(task));
305
306 /* only tasks opt in immovable control port can have pinned control port */
307 if (task_is_immovable(task)) {
308 ipc_kobject_alloc_options_t options = IPC_KOBJECT_ALLOC_IMMOVABLE_SEND;
309
310 if (task_is_pinned(task)) {
311 options |= IPC_KOBJECT_ALLOC_PINNED;
312 }
313
314 new_pport = ipc_kobject_alloc_port(IKO_NULL, type: IKOT_TASK_CONTROL, options);
315
316 assert(kport != IP_NULL);
317 ipc_port_set_label(port: kport, IPC_LABEL_SUBST_TASK);
318 kport->ip_kolabel->ikol_alt_port = new_pport;
319
320 itk_lock(task);
321 task->itk_self = new_pport;
322 itk_unlock(task);
323
324 /* enable the pinned port */
325 ipc_kobject_enable(port: new_pport, kobject: task, type: IKOT_TASK_CONTROL);
326 }
327}
328
329/*
330 * Routine: ipc_task_enable
331 * Purpose:
332 * Enable a task for IPC access.
333 * Conditions:
334 * Nothing locked.
335 */
336void
337ipc_task_enable(
338 task_t task)
339{
340 ipc_port_t kport;
341 ipc_port_t nport;
342 ipc_port_t iport;
343 ipc_port_t rdport;
344 ipc_port_t pport;
345
346 itk_lock(task);
347 if (!task->active) {
348 /*
349 * task has been terminated before we can enable IPC access.
350 * The check is to make sure we don't accidentally re-enable
351 * the task ports _after_ they've been disabled during
352 * task_terminate_internal(), in which case we will hit the
353 * !task->ipc_active assertion in ipc_task_terminate().
354 *
355 * Technically we should grab task lock when checking task
356 * active bit, but since task termination unsets task->active
357 * _before_ calling ipc_task_disable(), we can always see the
358 * truth with just itk_lock() and bail if disable has been called.
359 */
360 itk_unlock(task);
361 return;
362 }
363
364 assert(!task->ipc_active || task_is_a_corpse(task));
365 task->ipc_active = true;
366
367 kport = task->itk_task_ports[TASK_FLAVOR_CONTROL];
368 if (kport != IP_NULL) {
369 ipc_kobject_enable(port: kport, kobject: task, type: IKOT_TASK_CONTROL);
370 }
371 nport = task->itk_task_ports[TASK_FLAVOR_NAME];
372 if (nport != IP_NULL) {
373 ipc_kobject_enable(port: nport, kobject: task, type: IKOT_TASK_NAME);
374 }
375 iport = task->itk_task_ports[TASK_FLAVOR_INSPECT];
376 if (iport != IP_NULL) {
377 ipc_kobject_enable(port: iport, kobject: task, type: IKOT_TASK_INSPECT);
378 }
379 rdport = task->itk_task_ports[TASK_FLAVOR_READ];
380 if (rdport != IP_NULL) {
381 ipc_kobject_enable(port: rdport, kobject: task, type: IKOT_TASK_READ);
382 }
383 pport = task->itk_self;
384 if (pport != kport && pport != IP_NULL) {
385 assert(task_is_immovable(task));
386 ipc_kobject_enable(port: pport, kobject: task, type: IKOT_TASK_CONTROL);
387 }
388
389 itk_unlock(task);
390}
391
392/*
393 * Routine: ipc_task_disable
394 * Purpose:
395 * Disable IPC access to a task.
396 * Conditions:
397 * Nothing locked.
398 */
399
400void
401ipc_task_disable(
402 task_t task)
403{
404 ipc_port_t kport;
405 ipc_port_t nport;
406 ipc_port_t iport;
407 ipc_port_t rdport;
408 ipc_port_t rport;
409 ipc_port_t pport;
410
411 itk_lock(task);
412
413 /*
414 * This innocuous looking line is load bearing.
415 *
416 * It is used to disable the creation of lazy made ports.
417 * We must do so before we drop the last reference on the task,
418 * as task ports do not own a reference on the task, and
419 * convert_port_to_task* will crash trying to resurect a task.
420 */
421 task->ipc_active = false;
422
423 kport = task->itk_task_ports[TASK_FLAVOR_CONTROL];
424 if (kport != IP_NULL) {
425 /* clears ikol_alt_port */
426 ipc_kobject_disable(port: kport, type: IKOT_TASK_CONTROL);
427 }
428 nport = task->itk_task_ports[TASK_FLAVOR_NAME];
429 if (nport != IP_NULL) {
430 ipc_kobject_disable(port: nport, type: IKOT_TASK_NAME);
431 }
432 iport = task->itk_task_ports[TASK_FLAVOR_INSPECT];
433 if (iport != IP_NULL) {
434 ipc_kobject_disable(port: iport, type: IKOT_TASK_INSPECT);
435 }
436 rdport = task->itk_task_ports[TASK_FLAVOR_READ];
437 if (rdport != IP_NULL) {
438 /* clears ikol_alt_port */
439 ipc_kobject_disable(port: rdport, type: IKOT_TASK_READ);
440 }
441 pport = task->itk_self;
442 if (pport != IP_NULL) {
443 /* see port_name_is_pinned_itk_self() */
444 pport->ip_receiver_name = MACH_PORT_SPECIAL_DEFAULT;
445 if (pport != kport) {
446 assert(task_is_immovable(task));
447 assert(pport->ip_immovable_send);
448 ipc_kobject_disable(port: pport, type: IKOT_TASK_CONTROL);
449 }
450 }
451
452 rport = task->itk_resume;
453 if (rport != IP_NULL) {
454 /*
455 * From this point onwards this task is no longer accepting
456 * resumptions.
457 *
458 * There are still outstanding suspensions on this task,
459 * even as it is being torn down. Disconnect the task
460 * from the rport, thereby "orphaning" the rport. The rport
461 * itself will go away only when the last suspension holder
462 * destroys his SO right to it -- when he either
463 * exits, or tries to actually use that last SO right to
464 * resume this (now non-existent) task.
465 */
466 ipc_kobject_disable(port: rport, type: IKOT_TASK_RESUME);
467 }
468 itk_unlock(task);
469}
470
471/*
472 * Routine: ipc_task_terminate
473 * Purpose:
474 * Clean up and destroy a task's IPC state.
475 * Conditions:
476 * Nothing locked. The task must be suspended.
477 * (Or the current thread must be in the task.)
478 */
479
480void
481ipc_task_terminate(
482 task_t task)
483{
484 ipc_port_t kport;
485 ipc_port_t nport;
486 ipc_port_t iport;
487 ipc_port_t rdport;
488 ipc_port_t rport;
489 ipc_port_t pport;
490 ipc_port_t sself;
491 ipc_port_t *notifiers_ptr = NULL;
492
493 itk_lock(task);
494
495 /*
496 * If we ever failed to clear ipc_active before the last reference
497 * was dropped, lazy ports might be made and used after the last
498 * reference is dropped and cause use after free (see comment in
499 * ipc_task_disable()).
500 */
501 assert(!task->ipc_active);
502
503 kport = task->itk_task_ports[TASK_FLAVOR_CONTROL];
504 sself = task->itk_settable_self;
505 pport = IP_NULL;
506
507 if (kport == IP_NULL) {
508 /* the task is already terminated (can this happen?) */
509 itk_unlock(task);
510 return;
511 }
512 task->itk_task_ports[TASK_FLAVOR_CONTROL] = IP_NULL;
513
514 rdport = task->itk_task_ports[TASK_FLAVOR_READ];
515 task->itk_task_ports[TASK_FLAVOR_READ] = IP_NULL;
516
517 iport = task->itk_task_ports[TASK_FLAVOR_INSPECT];
518 task->itk_task_ports[TASK_FLAVOR_INSPECT] = IP_NULL;
519
520 nport = task->itk_task_ports[TASK_FLAVOR_NAME];
521 assert(nport != IP_NULL);
522 task->itk_task_ports[TASK_FLAVOR_NAME] = IP_NULL;
523
524 if (task->itk_dyld_notify) {
525 notifiers_ptr = task->itk_dyld_notify;
526 task->itk_dyld_notify = NULL;
527 }
528
529 pport = task->itk_self;
530 task->itk_self = IP_NULL;
531
532 rport = task->itk_resume;
533 task->itk_resume = IP_NULL;
534
535 itk_unlock(task);
536
537 /* release the naked send rights */
538 if (IP_VALID(sself)) {
539 ipc_port_release_send(port: sself);
540 }
541
542 if (notifiers_ptr) {
543 for (int i = 0; i < DYLD_MAX_PROCESS_INFO_NOTIFY_COUNT; i++) {
544 if (IP_VALID(notifiers_ptr[i])) {
545 ipc_port_release_send(port: notifiers_ptr[i]);
546 }
547 }
548 kfree_type(ipc_port_t, DYLD_MAX_PROCESS_INFO_NOTIFY_COUNT, notifiers_ptr);
549 }
550
551 for (int i = FIRST_EXCEPTION; i < EXC_TYPES_COUNT; i++) {
552 if (IP_VALID(task->exc_actions[i].port)) {
553 ipc_port_release_send(port: task->exc_actions[i].port);
554 }
555#if CONFIG_MACF
556 mac_exc_free_action_label(action: task->exc_actions + i);
557#endif
558 }
559
560 if (IP_VALID(task->itk_host)) {
561 ipc_port_release_send(port: task->itk_host);
562 }
563
564 if (IP_VALID(task->itk_bootstrap)) {
565 ipc_port_release_send(port: task->itk_bootstrap);
566 }
567
568 if (IP_VALID(task->itk_task_access)) {
569 ipc_port_release_send(port: task->itk_task_access);
570 }
571
572 if (IP_VALID(task->itk_debug_control)) {
573 ipc_port_release_send(port: task->itk_debug_control);
574 }
575
576#if CONFIG_PROC_RESOURCE_LIMITS
577 if (IP_VALID(task->itk_resource_notify)) {
578 ipc_port_release_send(task->itk_resource_notify);
579 }
580#endif /* CONFIG_PROC_RESOURCE_LIMITS */
581
582 for (int i = 0; i < TASK_PORT_REGISTER_MAX; i++) {
583 if (IP_VALID(task->itk_registered[i])) {
584 ipc_port_release_send(port: task->itk_registered[i]);
585 }
586 }
587
588 /* clears read port ikol_alt_port, must be done first */
589 if (rdport != IP_NULL) {
590 ipc_kobject_dealloc_port(port: rdport, mscount: 0, type: IKOT_TASK_READ);
591 }
592 ipc_kobject_dealloc_port(port: kport, mscount: 0, type: IKOT_TASK_CONTROL);
593 /* ikol_alt_port cleared */
594
595 /* destroy other kernel ports */
596 ipc_kobject_dealloc_port(port: nport, mscount: 0, type: IKOT_TASK_NAME);
597 if (iport != IP_NULL) {
598 ipc_kobject_dealloc_port(port: iport, mscount: 0, type: IKOT_TASK_INSPECT);
599 }
600 if (pport != IP_NULL && pport != kport) {
601 ipc_kobject_dealloc_port(port: pport, mscount: 0, type: IKOT_TASK_CONTROL);
602 }
603 if (rport != IP_NULL) {
604 ipc_kobject_dealloc_port(port: rport, mscount: 0, type: IKOT_TASK_RESUME);
605 }
606
607 itk_lock_destroy(task);
608}
609
610/*
611 * Routine: ipc_task_reset
612 * Purpose:
613 * Reset a task's IPC state to protect it when
614 * it enters an elevated security context. The
615 * task name port can remain the same - since it
616 * represents no specific privilege.
617 * Conditions:
618 * Nothing locked. The task must be suspended.
619 * (Or the current thread must be in the task.)
620 */
621
622void
623ipc_task_reset(
624 task_t task)
625{
626 ipc_port_t old_kport, old_pport, new_kport, new_pport;
627 ipc_port_t old_sself;
628 ipc_port_t old_rdport;
629 ipc_port_t old_iport;
630 ipc_port_t old_exc_actions[EXC_TYPES_COUNT];
631 ipc_port_t *notifiers_ptr = NULL;
632
633#if CONFIG_MACF
634 /* Fresh label to unset credentials in existing labels. */
635 struct label *unset_label = mac_exc_create_label(NULL);
636#endif
637
638 new_kport = ipc_kobject_alloc_port(kobject: (ipc_kobject_t)task,
639 type: IKOT_TASK_CONTROL, options: IPC_KOBJECT_ALLOC_NONE);
640 /*
641 * ipc_task_reset() only happens during sugid or corpsify.
642 *
643 * (1) sugid happens early in exec_mach_imgact(), at which point the old task
644 * port has not been enabled, and is left movable/not pinned.
645 * (2) corpse cannot execute more code so the notion of the immovable/pinned
646 * task port is bogus, and should appear as if it doesn't have one.
647 *
648 * So simply leave pport the same as kport.
649 */
650 new_pport = new_kport;
651
652 itk_lock(task);
653
654 old_kport = task->itk_task_ports[TASK_FLAVOR_CONTROL];
655 old_rdport = task->itk_task_ports[TASK_FLAVOR_READ];
656 old_iport = task->itk_task_ports[TASK_FLAVOR_INSPECT];
657
658 old_pport = task->itk_self;
659
660 if (old_pport == IP_NULL) {
661 /* the task is already terminated (can this happen?) */
662 itk_unlock(task);
663 ipc_kobject_dealloc_port(port: new_kport, mscount: 0, type: IKOT_TASK_CONTROL);
664 if (new_pport != new_kport) {
665 assert(task_is_immovable(task));
666 ipc_kobject_dealloc_port(port: new_pport, mscount: 0, type: IKOT_TASK_CONTROL);
667 }
668#if CONFIG_MACF
669 mac_exc_free_label(label: unset_label);
670#endif
671 return;
672 }
673
674 old_sself = task->itk_settable_self;
675 task->itk_task_ports[TASK_FLAVOR_CONTROL] = new_kport;
676 task->itk_self = new_pport;
677
678 if (task_is_a_corpse(task)) {
679 /* No extra send right for coprse, needed to arm no-sender notification */
680 task->itk_settable_self = IP_NULL;
681 } else {
682 /* we just made the port, no need to triple check */
683 task->itk_settable_self = ipc_port_make_send_any(port: new_kport);
684 }
685
686 /* clears ikol_alt_port */
687 ipc_kobject_disable(port: old_kport, type: IKOT_TASK_CONTROL);
688
689 /* Reset the read and inspect flavors of task port */
690 task->itk_task_ports[TASK_FLAVOR_READ] = IP_NULL;
691 task->itk_task_ports[TASK_FLAVOR_INSPECT] = IP_NULL;
692
693 if (old_pport != old_kport) {
694 assert(task_is_immovable(task));
695 ipc_kobject_disable(port: old_pport, type: IKOT_TASK_CONTROL);
696 }
697
698 for (int i = FIRST_EXCEPTION; i < EXC_TYPES_COUNT; i++) {
699 old_exc_actions[i] = IP_NULL;
700
701 if (i == EXC_CORPSE_NOTIFY && task_corpse_pending_report(task)) {
702 continue;
703 }
704
705 if (!task->exc_actions[i].privileged) {
706#if CONFIG_MACF
707 mac_exc_update_action_label(action: task->exc_actions + i, newlabel: unset_label);
708#endif
709 old_exc_actions[i] = task->exc_actions[i].port;
710 task->exc_actions[i].port = IP_NULL;
711 }
712 }/* for */
713
714 if (IP_VALID(task->itk_debug_control)) {
715 ipc_port_release_send(port: task->itk_debug_control);
716 }
717 task->itk_debug_control = IP_NULL;
718
719 if (task->itk_dyld_notify) {
720 notifiers_ptr = task->itk_dyld_notify;
721 task->itk_dyld_notify = NULL;
722 }
723
724 itk_unlock(task);
725
726#if CONFIG_MACF
727 mac_exc_free_label(label: unset_label);
728#endif
729
730 /* release the naked send rights */
731
732 if (IP_VALID(old_sself)) {
733 ipc_port_release_send(port: old_sself);
734 }
735
736 if (notifiers_ptr) {
737 for (int i = 0; i < DYLD_MAX_PROCESS_INFO_NOTIFY_COUNT; i++) {
738 if (IP_VALID(notifiers_ptr[i])) {
739 ipc_port_release_send(port: notifiers_ptr[i]);
740 }
741 }
742 kfree_type(ipc_port_t, DYLD_MAX_PROCESS_INFO_NOTIFY_COUNT, notifiers_ptr);
743 }
744
745 for (int i = FIRST_EXCEPTION; i < EXC_TYPES_COUNT; i++) {
746 if (IP_VALID(old_exc_actions[i])) {
747 ipc_port_release_send(port: old_exc_actions[i]);
748 }
749 }
750
751 /* destroy all task port flavors */
752 if (old_rdport != IP_NULL) {
753 /* read port ikol_alt_port may point to kport, dealloc first */
754 ipc_kobject_dealloc_port(port: old_rdport, mscount: 0, type: IKOT_TASK_READ);
755 }
756 ipc_kobject_dealloc_port(port: old_kport, mscount: 0, type: IKOT_TASK_CONTROL);
757 /* ikol_alt_port cleared */
758
759 if (old_iport != IP_NULL) {
760 ipc_kobject_dealloc_port(port: old_iport, mscount: 0, type: IKOT_TASK_INSPECT);
761 }
762 if (old_pport != old_kport) {
763 assert(task_is_immovable(task));
764 ipc_kobject_dealloc_port(port: old_pport, mscount: 0, type: IKOT_TASK_CONTROL);
765 }
766}
767
768/*
769 * Routine: ipc_thread_init
770 * Purpose:
771 * Initialize a thread's IPC state.
772 * Conditions:
773 * Nothing locked.
774 */
775
776void
777ipc_thread_init(
778 task_t task,
779 thread_t thread,
780 thread_ro_t tro,
781 ipc_thread_init_options_t options)
782{
783 ipc_port_t kport;
784 ipc_port_t pport;
785 ipc_kobject_alloc_options_t alloc_options = IPC_KOBJECT_ALLOC_NONE;
786
787 if (task_is_immovable(task) && !(options & IPC_THREAD_INIT_MAINTHREAD)) {
788 /*
789 * pthreads and raw threads both have immovable port upon creation.
790 * pthreads are subsequently pinned via ipc_port_copyout_send_pinned() whereas
791 * raw threads are left unpinned.
792 */
793 alloc_options |= IPC_KOBJECT_ALLOC_IMMOVABLE_SEND;
794
795 pport = ipc_kobject_alloc_port(kobject: (ipc_kobject_t)thread,
796 type: IKOT_THREAD_CONTROL, options: alloc_options);
797
798 kport = ipc_kobject_alloc_labeled_port(kobject: (ipc_kobject_t)thread,
799 type: IKOT_THREAD_CONTROL, IPC_LABEL_SUBST_THREAD, options: IPC_KOBJECT_ALLOC_NONE);
800 kport->ip_kolabel->ikol_alt_port = pport;
801 } else {
802 /*
803 * Main thread is created movable but may be set immovable and pinned in
804 * main_thread_set_immovable_pinned(). It needs to be handled separately
805 * because task_control_port_options is not available at main thread creation time.
806 */
807 kport = ipc_kobject_alloc_port(kobject: (ipc_kobject_t)thread,
808 type: IKOT_THREAD_CONTROL, options: IPC_KOBJECT_ALLOC_NONE);
809
810 pport = kport;
811 }
812
813 tro->tro_self_port = pport;
814 /* we just made the port, no need to triple check */
815 tro->tro_settable_self_port = ipc_port_make_send_any(port: kport);
816 tro->tro_ports[THREAD_FLAVOR_CONTROL] = kport;
817
818 thread->ith_special_reply_port = NULL;
819
820#if IMPORTANCE_INHERITANCE
821 thread->ith_assertions = 0;
822#endif
823
824 thread->ipc_active = true;
825 ipc_kmsg_queue_init(&thread->ith_messages);
826
827 thread->ith_kernel_reply_port = IP_NULL;
828}
829
830void
831ipc_main_thread_set_immovable_pinned(thread_t thread)
832{
833 thread_ro_t tro = get_thread_ro(thread);
834 ipc_port_t kport = tro->tro_ports[THREAD_FLAVOR_CONTROL];
835 task_t task = tro->tro_task;
836 ipc_port_t new_pport;
837
838 assert(thread_get_tag(thread) & THREAD_TAG_MAINTHREAD);
839
840 /* pport is the same as kport at ipc_thread_init() time */
841 assert(tro->tro_self_port == tro->tro_ports[THREAD_FLAVOR_CONTROL]);
842 assert(tro->tro_self_port == tro->tro_settable_self_port);
843
844 /*
845 * Main thread port is immovable/pinned depending on whether owner task has
846 * immovable/pinned task control port. task_control_port_options is now set.
847 */
848 if (task_is_immovable(task)) {
849 ipc_kobject_alloc_options_t options = IPC_KOBJECT_ALLOC_IMMOVABLE_SEND;
850
851 if (task_is_pinned(task)) {
852 options |= IPC_KOBJECT_ALLOC_PINNED;
853 }
854
855 new_pport = ipc_kobject_alloc_port(IKO_NULL, type: IKOT_THREAD_CONTROL, options);
856
857 assert(kport != IP_NULL);
858 ipc_port_set_label(port: kport, IPC_LABEL_SUBST_THREAD);
859 kport->ip_kolabel->ikol_alt_port = new_pport;
860
861 thread_mtx_lock(thread);
862 zalloc_ro_update_field(ZONE_ID_THREAD_RO, tro, tro_self_port, &new_pport);
863 thread_mtx_unlock(thread);
864
865 /* enable the pinned port */
866 ipc_kobject_enable(port: new_pport, kobject: thread, type: IKOT_THREAD_CONTROL);
867 }
868}
869
870struct thread_init_exc_actions {
871 struct exception_action array[EXC_TYPES_COUNT];
872};
873
874static void
875ipc_thread_init_exc_actions(thread_ro_t tro)
876{
877 struct exception_action *actions;
878
879 actions = kalloc_type(struct thread_init_exc_actions,
880 Z_WAITOK | Z_ZERO | Z_NOFAIL)->array;
881
882#if CONFIG_MACF
883 for (size_t i = 0; i < EXC_TYPES_COUNT; ++i) {
884 mac_exc_associate_action_label(action: &actions[i],
885 label: mac_exc_create_label(action: &actions[i]));
886 }
887#endif
888
889 zalloc_ro_update_field(ZONE_ID_THREAD_RO, tro, tro_exc_actions, &actions);
890}
891
892static void
893ipc_thread_destroy_exc_actions(thread_ro_t tro)
894{
895 struct exception_action *actions = tro->tro_exc_actions;
896
897 if (actions) {
898#if CONFIG_MACF
899 for (size_t i = 0; i < EXC_TYPES_COUNT; ++i) {
900 mac_exc_free_action_label(action: actions + i);
901 }
902#endif
903
904 zalloc_ro_clear_field(ZONE_ID_THREAD_RO, tro, tro_exc_actions);
905 struct thread_init_exc_actions *tr_actions =
906 (struct thread_init_exc_actions *)actions;
907 kfree_type(struct thread_init_exc_actions, tr_actions);
908 }
909}
910
911static void
912ipc_thread_ro_update_ports(
913 thread_ro_t tro,
914 const struct thread_ro *tro_tpl)
915{
916 vm_size_t offs = offsetof(struct thread_ro, tro_self_port);
917 vm_size_t size = sizeof(struct ipc_port *) * 2 + sizeof(tro_tpl->tro_ports);
918
919 static_assert(offsetof(struct thread_ro, tro_settable_self_port) ==
920 offsetof(struct thread_ro, tro_self_port) +
921 sizeof(struct ipc_port_t *));
922 static_assert(offsetof(struct thread_ro, tro_ports) ==
923 offsetof(struct thread_ro, tro_self_port) +
924 2 * sizeof(struct ipc_port_t *));
925 zalloc_ro_mut(zone_id: ZONE_ID_THREAD_RO, elem: tro,
926 offset: offs, new_data: &tro_tpl->tro_self_port, new_data_size: size);
927}
928
929/*
930 * Routine: ipc_thread_disable
931 * Purpose:
932 * Clean up and destroy a thread's IPC state.
933 * Conditions:
934 * Thread locked.
935 */
936void
937ipc_thread_disable(
938 thread_t thread)
939{
940 thread_ro_t tro = get_thread_ro(thread);
941 ipc_port_t kport = tro->tro_ports[THREAD_FLAVOR_CONTROL];
942 ipc_port_t iport = tro->tro_ports[THREAD_FLAVOR_INSPECT];
943 ipc_port_t rdport = tro->tro_ports[THREAD_FLAVOR_READ];
944 ipc_port_t pport = tro->tro_self_port;
945
946 /*
947 * This innocuous looking line is load bearing.
948 *
949 * It is used to disable the creation of lazy made ports.
950 * We must do so before we drop the last reference on the thread,
951 * as thread ports do not own a reference on the thread, and
952 * convert_port_to_thread* will crash trying to resurect a thread.
953 */
954 thread->ipc_active = false;
955
956 if (kport != IP_NULL) {
957 /* clears ikol_alt_port */
958 ipc_kobject_disable(port: kport, type: IKOT_THREAD_CONTROL);
959 }
960
961 if (iport != IP_NULL) {
962 ipc_kobject_disable(port: iport, type: IKOT_THREAD_INSPECT);
963 }
964
965 if (rdport != IP_NULL) {
966 /* clears ikol_alt_port */
967 ipc_kobject_disable(port: rdport, type: IKOT_THREAD_READ);
968 }
969
970 if (pport != kport && pport != IP_NULL) {
971 assert(task_is_immovable(tro->tro_task));
972 assert(pport->ip_immovable_send);
973 ipc_kobject_disable(port: pport, type: IKOT_THREAD_CONTROL);
974 }
975
976 /* unbind the thread special reply port */
977 if (IP_VALID(thread->ith_special_reply_port)) {
978 ipc_port_unbind_special_reply_port(thread, reply_type: IRPT_USER);
979 }
980}
981
982/*
983 * Routine: ipc_thread_terminate
984 * Purpose:
985 * Clean up and destroy a thread's IPC state.
986 * Conditions:
987 * Nothing locked.
988 */
989
990void
991ipc_thread_terminate(
992 thread_t thread)
993{
994 thread_ro_t tro = get_thread_ro(thread);
995 ipc_port_t kport = IP_NULL;
996 ipc_port_t iport = IP_NULL;
997 ipc_port_t rdport = IP_NULL;
998 ipc_port_t pport = IP_NULL;
999 ipc_port_t sport = IP_NULL;
1000
1001 thread_mtx_lock(thread);
1002
1003 /*
1004 * If we ever failed to clear ipc_active before the last reference
1005 * was dropped, lazy ports might be made and used after the last
1006 * reference is dropped and cause use after free (see comment in
1007 * ipc_thread_disable()).
1008 */
1009 assert(!thread->ipc_active);
1010
1011 kport = tro->tro_ports[THREAD_FLAVOR_CONTROL];
1012 iport = tro->tro_ports[THREAD_FLAVOR_INSPECT];
1013 rdport = tro->tro_ports[THREAD_FLAVOR_READ];
1014 pport = tro->tro_self_port;
1015 sport = tro->tro_settable_self_port;
1016
1017 if (kport != IP_NULL) {
1018 if (IP_VALID(sport)) {
1019 ipc_port_release_send(port: sport);
1020 }
1021
1022 ipc_thread_ro_update_ports(tro, tro_tpl: &(struct thread_ro){ });
1023
1024 if (tro->tro_exc_actions != NULL) {
1025 for (int i = FIRST_EXCEPTION; i < EXC_TYPES_COUNT; ++i) {
1026 if (IP_VALID(tro->tro_exc_actions[i].port)) {
1027 ipc_port_release_send(port: tro->tro_exc_actions[i].port);
1028 }
1029 }
1030 ipc_thread_destroy_exc_actions(tro);
1031 }
1032 }
1033
1034#if IMPORTANCE_INHERITANCE
1035 assert(thread->ith_assertions == 0);
1036#endif
1037
1038 assert(ipc_kmsg_queue_empty(&thread->ith_messages));
1039 thread_mtx_unlock(thread);
1040
1041 /* clears read port ikol_alt_port, must be done first */
1042 if (rdport != IP_NULL) {
1043 ipc_kobject_dealloc_port(port: rdport, mscount: 0, type: IKOT_THREAD_READ);
1044 }
1045 /* control port can also have ikol_alt_port */
1046 if (kport != IP_NULL) {
1047 ipc_kobject_dealloc_port(port: kport, mscount: 0, type: IKOT_THREAD_CONTROL);
1048 }
1049 /* ikol_alt_port cleared */
1050
1051 if (iport != IP_NULL) {
1052 ipc_kobject_dealloc_port(port: iport, mscount: 0, type: IKOT_THREAD_INSPECT);
1053 }
1054 if (pport != kport && pport != IP_NULL) {
1055 assert(task_is_immovable(tro->tro_task));
1056 ipc_kobject_dealloc_port(port: pport, mscount: 0, type: IKOT_THREAD_CONTROL);
1057 }
1058 if (thread->ith_kernel_reply_port != IP_NULL) {
1059 thread_dealloc_kernel_special_reply_port(thread);
1060 }
1061}
1062
1063/*
1064 * Routine: ipc_thread_reset
1065 * Purpose:
1066 * Reset the IPC state for a given Mach thread when
1067 * its task enters an elevated security context.
1068 * All flavors of thread port and its exception ports have
1069 * to be reset. Its RPC reply port cannot have any
1070 * rights outstanding, so it should be fine. The thread
1071 * inspect and read port are set to NULL.
1072 * Conditions:
1073 * Nothing locked.
1074 */
1075
1076void
1077ipc_thread_reset(
1078 thread_t thread)
1079{
1080 thread_ro_t tro = get_thread_ro(thread);
1081 ipc_port_t old_kport, new_kport, old_pport, new_pport;
1082 ipc_port_t old_sself;
1083 ipc_port_t old_rdport;
1084 ipc_port_t old_iport;
1085 ipc_port_t old_exc_actions[EXC_TYPES_COUNT];
1086 boolean_t has_old_exc_actions = FALSE;
1087 boolean_t thread_is_immovable;
1088 int i;
1089
1090#if CONFIG_MACF
1091 struct label *new_label = mac_exc_create_label(NULL);
1092#endif
1093
1094 thread_is_immovable = ip_is_immovable_send(tro->tro_self_port);
1095
1096 new_kport = ipc_kobject_alloc_port(kobject: (ipc_kobject_t)thread,
1097 type: IKOT_THREAD_CONTROL, options: IPC_KOBJECT_ALLOC_NONE);
1098 /*
1099 * ipc_thread_reset() only happens during sugid or corpsify.
1100 *
1101 * (1) sugid happens early in exec_mach_imgact(), at which point the old thread
1102 * port is still movable/not pinned.
1103 * (2) corpse cannot execute more code so the notion of the immovable/pinned
1104 * thread port is bogus, and should appear as if it doesn't have one.
1105 *
1106 * So simply leave pport the same as kport.
1107 */
1108 new_pport = new_kport;
1109
1110 thread_mtx_lock(thread);
1111
1112 old_kport = tro->tro_ports[THREAD_FLAVOR_CONTROL];
1113 old_rdport = tro->tro_ports[THREAD_FLAVOR_READ];
1114 old_iport = tro->tro_ports[THREAD_FLAVOR_INSPECT];
1115
1116 old_sself = tro->tro_settable_self_port;
1117 old_pport = tro->tro_self_port;
1118
1119 if (old_kport == IP_NULL && thread->inspection == FALSE) {
1120 /* thread is already terminated (can this happen?) */
1121 thread_mtx_unlock(thread);
1122 ipc_kobject_dealloc_port(port: new_kport, mscount: 0, type: IKOT_THREAD_CONTROL);
1123 if (thread_is_immovable) {
1124 ipc_kobject_dealloc_port(port: new_pport, mscount: 0,
1125 type: IKOT_THREAD_CONTROL);
1126 }
1127#if CONFIG_MACF
1128 mac_exc_free_label(label: new_label);
1129#endif
1130 return;
1131 }
1132
1133 thread->ipc_active = true;
1134
1135 struct thread_ro tpl = {
1136 .tro_self_port = new_pport,
1137 /* we just made the port, no need to triple check */
1138 .tro_settable_self_port = ipc_port_make_send_any(port: new_kport),
1139 .tro_ports[THREAD_FLAVOR_CONTROL] = new_kport,
1140 };
1141
1142 ipc_thread_ro_update_ports(tro, tro_tpl: &tpl);
1143
1144 if (old_kport != IP_NULL) {
1145 /* clears ikol_alt_port */
1146 (void)ipc_kobject_disable(port: old_kport, type: IKOT_THREAD_CONTROL);
1147 }
1148 if (old_rdport != IP_NULL) {
1149 /* clears ikol_alt_port */
1150 (void)ipc_kobject_disable(port: old_rdport, type: IKOT_THREAD_READ);
1151 }
1152 if (old_iport != IP_NULL) {
1153 (void)ipc_kobject_disable(port: old_iport, type: IKOT_THREAD_INSPECT);
1154 }
1155 if (thread_is_immovable && old_pport != IP_NULL) {
1156 (void)ipc_kobject_disable(port: old_pport, type: IKOT_THREAD_CONTROL);
1157 }
1158
1159 /*
1160 * Only ports that were set by root-owned processes
1161 * (privileged ports) should survive
1162 */
1163 if (tro->tro_exc_actions != NULL) {
1164 has_old_exc_actions = TRUE;
1165 for (i = FIRST_EXCEPTION; i < EXC_TYPES_COUNT; i++) {
1166 if (tro->tro_exc_actions[i].privileged) {
1167 old_exc_actions[i] = IP_NULL;
1168 } else {
1169#if CONFIG_MACF
1170 mac_exc_update_action_label(action: tro->tro_exc_actions + i, newlabel: new_label);
1171#endif
1172 old_exc_actions[i] = tro->tro_exc_actions[i].port;
1173 tro->tro_exc_actions[i].port = IP_NULL;
1174 }
1175 }
1176 }
1177
1178 thread_mtx_unlock(thread);
1179
1180#if CONFIG_MACF
1181 mac_exc_free_label(label: new_label);
1182#endif
1183
1184 /* release the naked send rights */
1185
1186 if (IP_VALID(old_sself)) {
1187 ipc_port_release_send(port: old_sself);
1188 }
1189
1190 if (has_old_exc_actions) {
1191 for (i = FIRST_EXCEPTION; i < EXC_TYPES_COUNT; i++) {
1192 ipc_port_release_send(port: old_exc_actions[i]);
1193 }
1194 }
1195
1196 /* destroy the kernel ports */
1197 if (old_rdport != IP_NULL) {
1198 ipc_kobject_dealloc_port(port: old_rdport, mscount: 0, type: IKOT_THREAD_READ);
1199 }
1200 if (old_kport != IP_NULL) {
1201 ipc_kobject_dealloc_port(port: old_kport, mscount: 0, type: IKOT_THREAD_CONTROL);
1202 }
1203 /* ikol_alt_port cleared */
1204
1205 if (old_iport != IP_NULL) {
1206 ipc_kobject_dealloc_port(port: old_iport, mscount: 0, type: IKOT_THREAD_INSPECT);
1207 }
1208 if (old_pport != old_kport && old_pport != IP_NULL) {
1209 assert(thread_is_immovable);
1210 ipc_kobject_dealloc_port(port: old_pport, mscount: 0, type: IKOT_THREAD_CONTROL);
1211 }
1212
1213 /* unbind the thread special reply port */
1214 if (IP_VALID(thread->ith_special_reply_port)) {
1215 ipc_port_unbind_special_reply_port(thread, reply_type: IRPT_USER);
1216 }
1217}
1218
1219/*
1220 * Routine: retrieve_task_self_fast
1221 * Purpose:
1222 * Optimized version of retrieve_task_self,
1223 * that only works for the current task.
1224 *
1225 * Return a send right (possibly null/dead)
1226 * for the task's user-visible self port.
1227 * Conditions:
1228 * Nothing locked.
1229 */
1230
1231static ipc_port_t
1232retrieve_task_self_fast(
1233 task_t task)
1234{
1235 ipc_port_t port = IP_NULL;
1236
1237 assert(task == current_task());
1238
1239 itk_lock(task);
1240 assert(task->itk_self != IP_NULL);
1241
1242#if CONFIG_CSR
1243 if (task->itk_settable_self != task->itk_task_ports[TASK_FLAVOR_CONTROL]) {
1244 port = ipc_port_copy_send_mqueue(port: task->itk_settable_self);
1245 } else
1246#endif
1247 {
1248 /* no interposing, return the IMMOVABLE port */
1249 port = ipc_kobject_make_send(port: task->itk_self, kobject: task,
1250 kotype: IKOT_TASK_CONTROL);
1251#if (DEBUG || DEVELOPMENT)
1252 if (task_is_immovable(task)) {
1253 assert(ip_is_immovable_send(port));
1254 if (task_is_pinned(task)) {
1255 /* pinned port is also immovable */
1256 assert(ip_is_pinned(port));
1257 }
1258 } else {
1259 assert(!ip_is_immovable_send(port));
1260 assert(!ip_is_pinned(port));
1261 }
1262#endif
1263 }
1264
1265 itk_unlock(task);
1266
1267 return port;
1268}
1269
1270/*
1271 * Routine: mach_task_is_self
1272 * Purpose:
1273 * [MIG call] Checks if the task (control/read/inspect/name/movable)
1274 * port is pointing to current_task.
1275 */
1276kern_return_t
1277mach_task_is_self(
1278 task_t task,
1279 boolean_t *is_self)
1280{
1281 if (task == TASK_NULL) {
1282 return KERN_INVALID_ARGUMENT;
1283 }
1284
1285 *is_self = (task == current_task());
1286
1287 return KERN_SUCCESS;
1288}
1289
1290/*
1291 * Routine: retrieve_thread_self_fast
1292 * Purpose:
1293 * Return a send right (possibly null/dead)
1294 * for the thread's user-visible self port.
1295 *
1296 * Only works for the current thread.
1297 *
1298 * Conditions:
1299 * Nothing locked.
1300 */
1301
1302ipc_port_t
1303retrieve_thread_self_fast(
1304 thread_t thread)
1305{
1306 thread_ro_t tro = get_thread_ro(thread);
1307 ipc_port_t port = IP_NULL;
1308
1309 assert(thread == current_thread());
1310
1311 thread_mtx_lock(thread);
1312
1313 assert(tro->tro_self_port != IP_NULL);
1314
1315#if CONFIG_CSR
1316 if (tro->tro_settable_self_port != tro->tro_ports[THREAD_FLAVOR_CONTROL]) {
1317 port = ipc_port_copy_send_mqueue(port: tro->tro_settable_self_port);
1318 } else
1319#endif
1320 {
1321 /* no interposing, return IMMOVABLE_PORT */
1322 port = ipc_kobject_make_send(port: tro->tro_self_port, kobject: thread,
1323 kotype: IKOT_THREAD_CONTROL);
1324#if (DEBUG || DEVELOPMENT)
1325 if (task_is_immovable(tro->tro_task)) {
1326 assert(ip_is_immovable_send(port));
1327 uint16_t tag = thread_get_tag(thread);
1328 /* terminated threads are unpinned */
1329 if (thread->active && (tag & (THREAD_TAG_PTHREAD | THREAD_TAG_MAINTHREAD))) {
1330 assert(ip_is_pinned(port));
1331 } else {
1332 assert(!ip_is_pinned(port));
1333 }
1334 } else {
1335 assert(!ip_is_immovable_send(port));
1336 assert(!ip_is_pinned(port));
1337 }
1338#endif
1339 }
1340
1341 thread_mtx_unlock(thread);
1342
1343 return port;
1344}
1345
1346/*
1347 * Routine: task_self_trap [mach trap]
1348 * Purpose:
1349 * Give the caller send rights for his own task port.
1350 * Conditions:
1351 * Nothing locked.
1352 * Returns:
1353 * MACH_PORT_NULL if there are any resource failures
1354 * or other errors.
1355 */
1356
1357mach_port_name_t
1358task_self_trap(
1359 __unused struct task_self_trap_args *args)
1360{
1361 task_t task = current_task();
1362 ipc_port_t sright;
1363 mach_port_name_t name;
1364
1365 sright = retrieve_task_self_fast(task);
1366 name = ipc_port_copyout_send(sright, space: task->itk_space);
1367
1368 /*
1369 * When the right is pinned, memorize the name we gave it
1370 * in ip_receiver_name (it's an abuse as this port really
1371 * isn't a message queue, but the field is up for grabs
1372 * and otherwise `MACH_PORT_SPECIAL_DEFAULT` for special ports).
1373 *
1374 * port_name_to_task* use this to fastpath IPCs to mach_task_self()
1375 * when it is pinned.
1376 *
1377 * ipc_task_disable() will revert this when the task dies.
1378 */
1379 if (sright == task->itk_self && sright->ip_pinned &&
1380 MACH_PORT_VALID(name)) {
1381 itk_lock(task);
1382 if (task->ipc_active) {
1383 if (ip_get_receiver_name(port: sright) == MACH_PORT_SPECIAL_DEFAULT) {
1384 sright->ip_receiver_name = name;
1385 } else if (ip_get_receiver_name(port: sright) != name) {
1386 panic("mach_task_self() name changed");
1387 }
1388 }
1389 itk_unlock(task);
1390 }
1391 return name;
1392}
1393
1394/*
1395 * Routine: thread_self_trap [mach trap]
1396 * Purpose:
1397 * Give the caller send rights for his own thread port.
1398 * Conditions:
1399 * Nothing locked.
1400 * Returns:
1401 * MACH_PORT_NULL if there are any resource failures
1402 * or other errors.
1403 */
1404
1405mach_port_name_t
1406thread_self_trap(
1407 __unused struct thread_self_trap_args *args)
1408{
1409 thread_t thread = current_thread();
1410 ipc_space_t space = current_space();
1411 ipc_port_t sright;
1412 mach_port_name_t name;
1413
1414 sright = retrieve_thread_self_fast(thread);
1415 name = ipc_port_copyout_send(sright, space);
1416 return name;
1417}
1418
1419/*
1420 * Routine: mach_reply_port [mach trap]
1421 * Purpose:
1422 * Allocate a port for the caller.
1423 * Conditions:
1424 * Nothing locked.
1425 * Returns:
1426 * MACH_PORT_NULL if there are any resource failures
1427 * or other errors.
1428 */
1429
1430mach_port_name_t
1431mach_reply_port(
1432 __unused struct mach_reply_port_args *args)
1433{
1434 ipc_port_t port;
1435 mach_port_name_t name;
1436 kern_return_t kr;
1437
1438 kr = ipc_port_alloc(space: current_task()->itk_space, flags: IPC_PORT_INIT_MESSAGE_QUEUE,
1439 namep: &name, portp: &port);
1440 if (kr == KERN_SUCCESS) {
1441 ip_mq_unlock(port);
1442 } else {
1443 name = MACH_PORT_NULL;
1444 }
1445 return name;
1446}
1447
1448/*
1449 * Routine: thread_get_special_reply_port [mach trap]
1450 * Purpose:
1451 * Allocate a special reply port for the calling thread.
1452 * Conditions:
1453 * Nothing locked.
1454 * Returns:
1455 * mach_port_name_t: send right & receive right for special reply port.
1456 * MACH_PORT_NULL if there are any resource failures
1457 * or other errors.
1458 */
1459
1460mach_port_name_t
1461thread_get_special_reply_port(
1462 __unused struct thread_get_special_reply_port_args *args)
1463{
1464 ipc_port_t port;
1465 mach_port_name_t name;
1466 kern_return_t kr;
1467 thread_t thread = current_thread();
1468 ipc_port_init_flags_t flags = IPC_PORT_INIT_MESSAGE_QUEUE |
1469 IPC_PORT_INIT_MAKE_SEND_RIGHT | IPC_PORT_INIT_SPECIAL_REPLY;
1470
1471 /* unbind the thread special reply port */
1472 if (IP_VALID(thread->ith_special_reply_port)) {
1473 ipc_port_unbind_special_reply_port(thread, reply_type: IRPT_USER);
1474 }
1475
1476 kr = ipc_port_alloc(space: current_task()->itk_space, flags, namep: &name, portp: &port);
1477 if (kr == KERN_SUCCESS) {
1478 ipc_port_bind_special_reply_port_locked(port, reply_type: IRPT_USER);
1479 ip_mq_unlock(port);
1480 } else {
1481 name = MACH_PORT_NULL;
1482 }
1483 return name;
1484}
1485
1486/*
1487 * Routine: thread_get_kernel_special_reply_port
1488 * Purpose:
1489 * Allocate a kernel special reply port for the calling thread.
1490 * Conditions:
1491 * Nothing locked.
1492 * Returns:
1493 * Creates and sets kernel special reply port.
1494 * KERN_SUCCESS on Success.
1495 * KERN_FAILURE on Failure.
1496 */
1497
1498kern_return_t
1499thread_get_kernel_special_reply_port(void)
1500{
1501 ipc_port_t port = IP_NULL;
1502 thread_t thread = current_thread();
1503
1504 /* unbind the thread special reply port */
1505 if (IP_VALID(thread->ith_kernel_reply_port)) {
1506 ipc_port_unbind_special_reply_port(thread, reply_type: IRPT_KERNEL);
1507 }
1508
1509 port = ipc_port_alloc_reply(); /*returns a reference on the port */
1510 if (port != IPC_PORT_NULL) {
1511 ip_mq_lock(port);
1512 ipc_port_bind_special_reply_port_locked(port, reply_type: IRPT_KERNEL);
1513 ip_mq_unlock(port);
1514 ip_release(port); /* release the reference returned by ipc_port_alloc_reply */
1515 }
1516 return KERN_SUCCESS;
1517}
1518
1519/*
1520 * Routine: ipc_port_bind_special_reply_port_locked
1521 * Purpose:
1522 * Bind the given port to current thread as a special reply port.
1523 * Conditions:
1524 * Port locked.
1525 * Returns:
1526 * None.
1527 */
1528
1529static void
1530ipc_port_bind_special_reply_port_locked(
1531 ipc_port_t port,
1532 ipc_reply_port_type_t reply_type)
1533{
1534 thread_t thread = current_thread();
1535 ipc_port_t *reply_portp;
1536
1537 if (reply_type == IRPT_USER) {
1538 reply_portp = &thread->ith_special_reply_port;
1539 } else {
1540 reply_portp = &thread->ith_kernel_reply_port;
1541 }
1542
1543 assert(*reply_portp == NULL);
1544 assert(port->ip_specialreply);
1545 assert(port->ip_sync_link_state == PORT_SYNC_LINK_ANY);
1546
1547 ip_reference(port);
1548 *reply_portp = port;
1549 port->ip_messages.imq_srp_owner_thread = thread;
1550
1551 ipc_special_reply_port_bits_reset(special_reply_port: port);
1552}
1553
1554/*
1555 * Routine: ipc_port_unbind_special_reply_port
1556 * Purpose:
1557 * Unbind the thread's special reply port.
1558 * If the special port has threads waiting on turnstile,
1559 * update it's inheritor.
1560 * Condition:
1561 * Nothing locked.
1562 * Returns:
1563 * None.
1564 */
1565static void
1566ipc_port_unbind_special_reply_port(
1567 thread_t thread,
1568 ipc_reply_port_type_t reply_type)
1569{
1570 ipc_port_t *reply_portp;
1571
1572 if (reply_type == IRPT_USER) {
1573 reply_portp = &thread->ith_special_reply_port;
1574 } else {
1575 reply_portp = &thread->ith_kernel_reply_port;
1576 }
1577
1578 ipc_port_t special_reply_port = *reply_portp;
1579
1580 ip_mq_lock(special_reply_port);
1581
1582 *reply_portp = NULL;
1583 ipc_port_adjust_special_reply_port_locked(special_reply_port, NULL,
1584 IPC_PORT_ADJUST_UNLINK_THREAD, FALSE);
1585 /* port unlocked */
1586
1587 /* Destroy the port if its kernel special reply, else just release a ref */
1588 if (reply_type == IRPT_USER) {
1589 ip_release(special_reply_port);
1590 } else {
1591 ipc_port_dealloc_reply(special_reply_port);
1592 }
1593 return;
1594}
1595
1596/*
1597 * Routine: thread_dealloc_kernel_special_reply_port
1598 * Purpose:
1599 * Unbind the thread's kernel special reply port.
1600 * If the special port has threads waiting on turnstile,
1601 * update it's inheritor.
1602 * Condition:
1603 * Called on current thread or a terminated thread.
1604 * Returns:
1605 * None.
1606 */
1607
1608void
1609thread_dealloc_kernel_special_reply_port(thread_t thread)
1610{
1611 ipc_port_unbind_special_reply_port(thread, reply_type: IRPT_KERNEL);
1612}
1613
1614/*
1615 * Routine: thread_get_special_port [kernel call]
1616 * Purpose:
1617 * Clones a send right for one of the thread's
1618 * special ports.
1619 * Conditions:
1620 * Nothing locked.
1621 * Returns:
1622 * KERN_SUCCESS Extracted a send right.
1623 * KERN_INVALID_ARGUMENT The thread is null.
1624 * KERN_FAILURE The thread is dead.
1625 * KERN_INVALID_ARGUMENT Invalid special port.
1626 */
1627
1628kern_return_t
1629thread_get_special_port(
1630 thread_inspect_t thread,
1631 int which,
1632 ipc_port_t *portp);
1633
1634static kern_return_t
1635thread_get_special_port_internal(
1636 thread_inspect_t thread,
1637 thread_ro_t tro,
1638 int which,
1639 ipc_port_t *portp,
1640 mach_thread_flavor_t flavor)
1641{
1642 kern_return_t kr;
1643 ipc_port_t port;
1644
1645 if ((kr = special_port_allowed_with_thread_flavor(which, flavor)) != KERN_SUCCESS) {
1646 return kr;
1647 }
1648
1649 thread_mtx_lock(thread);
1650 if (!thread->active) {
1651 thread_mtx_unlock(thread);
1652 return KERN_FAILURE;
1653 }
1654
1655 switch (which) {
1656 case THREAD_KERNEL_PORT:
1657 port = tro->tro_ports[THREAD_FLAVOR_CONTROL];
1658#if CONFIG_CSR
1659 if (tro->tro_settable_self_port != port) {
1660 port = ipc_port_copy_send_mqueue(port: tro->tro_settable_self_port);
1661 } else
1662#endif
1663 {
1664 port = ipc_kobject_copy_send(port, kobject: thread, kotype: IKOT_THREAD_CONTROL);
1665 }
1666 thread_mtx_unlock(thread);
1667 break;
1668
1669 case THREAD_READ_PORT:
1670 case THREAD_INSPECT_PORT:
1671 thread_mtx_unlock(thread);
1672 mach_thread_flavor_t current_flavor = (which == THREAD_READ_PORT) ?
1673 THREAD_FLAVOR_READ : THREAD_FLAVOR_INSPECT;
1674 /* convert_thread_to_port_with_flavor consumes a thread reference */
1675 thread_reference(thread);
1676 port = convert_thread_to_port_with_flavor(thread, tro, flavor: current_flavor);
1677 break;
1678
1679 default:
1680 thread_mtx_unlock(thread);
1681 return KERN_INVALID_ARGUMENT;
1682 }
1683
1684 *portp = port;
1685 return KERN_SUCCESS;
1686}
1687
1688kern_return_t
1689thread_get_special_port(
1690 thread_inspect_t thread,
1691 int which,
1692 ipc_port_t *portp)
1693{
1694 if (thread == THREAD_NULL) {
1695 return KERN_INVALID_ARGUMENT;
1696 }
1697
1698 return thread_get_special_port_internal(thread, tro: get_thread_ro(thread),
1699 which, portp, THREAD_FLAVOR_CONTROL);
1700}
1701
1702static ipc_port_t
1703thread_get_non_substituted_self(thread_t thread, thread_ro_t tro)
1704{
1705 ipc_port_t port = IP_NULL;
1706
1707 thread_mtx_lock(thread);
1708 port = tro->tro_ports[THREAD_FLAVOR_CONTROL];
1709#if CONFIG_CSR
1710 if (tro->tro_settable_self_port != port) {
1711 port = ipc_port_make_send_mqueue(port: tro->tro_settable_self_port);
1712 } else
1713#endif
1714 {
1715 port = ipc_kobject_make_send(port, kobject: thread, kotype: IKOT_THREAD_CONTROL);
1716 }
1717 thread_mtx_unlock(thread);
1718
1719 /* takes ownership of the send right */
1720 return ipc_kobject_alloc_subst_once(target: port);
1721}
1722
1723kern_return_t
1724thread_get_special_port_from_user(
1725 mach_port_t port,
1726 int which,
1727 ipc_port_t *portp)
1728{
1729 thread_ro_t tro;
1730 ipc_kobject_type_t kotype;
1731 mach_thread_flavor_t flavor;
1732 kern_return_t kr = KERN_SUCCESS;
1733
1734 thread_t thread = convert_port_to_thread_inspect_no_eval(port);
1735
1736 if (thread == THREAD_NULL) {
1737 return KERN_INVALID_ARGUMENT;
1738 }
1739
1740 tro = get_thread_ro(thread);
1741 kotype = ip_kotype(port);
1742
1743 if (which == THREAD_KERNEL_PORT && tro->tro_task == current_task()) {
1744#if CONFIG_MACF
1745 /*
1746 * only check for threads belong to current_task,
1747 * because foreign thread ports are always movable
1748 */
1749 if (mac_task_check_get_movable_control_port()) {
1750 kr = KERN_DENIED;
1751 goto out;
1752 }
1753#endif
1754 if (kotype == IKOT_THREAD_CONTROL) {
1755 *portp = thread_get_non_substituted_self(thread, tro);
1756 goto out;
1757 }
1758 }
1759
1760 switch (kotype) {
1761 case IKOT_THREAD_CONTROL:
1762 flavor = THREAD_FLAVOR_CONTROL;
1763 break;
1764 case IKOT_THREAD_READ:
1765 flavor = THREAD_FLAVOR_READ;
1766 break;
1767 case IKOT_THREAD_INSPECT:
1768 flavor = THREAD_FLAVOR_INSPECT;
1769 break;
1770 default:
1771 panic("strange kobject type");
1772 }
1773
1774 kr = thread_get_special_port_internal(thread, tro, which, portp, flavor);
1775out:
1776 thread_deallocate(thread);
1777 return kr;
1778}
1779
1780static kern_return_t
1781special_port_allowed_with_thread_flavor(
1782 int which,
1783 mach_thread_flavor_t flavor)
1784{
1785 switch (flavor) {
1786 case THREAD_FLAVOR_CONTROL:
1787 return KERN_SUCCESS;
1788
1789 case THREAD_FLAVOR_READ:
1790
1791 switch (which) {
1792 case THREAD_READ_PORT:
1793 case THREAD_INSPECT_PORT:
1794 return KERN_SUCCESS;
1795 default:
1796 return KERN_INVALID_CAPABILITY;
1797 }
1798
1799 case THREAD_FLAVOR_INSPECT:
1800
1801 switch (which) {
1802 case THREAD_INSPECT_PORT:
1803 return KERN_SUCCESS;
1804 default:
1805 return KERN_INVALID_CAPABILITY;
1806 }
1807
1808 default:
1809 return KERN_INVALID_CAPABILITY;
1810 }
1811}
1812
1813/*
1814 * Routine: thread_set_special_port [kernel call]
1815 * Purpose:
1816 * Changes one of the thread's special ports,
1817 * setting it to the supplied send right.
1818 * Conditions:
1819 * Nothing locked. If successful, consumes
1820 * the supplied send right.
1821 * Returns:
1822 * KERN_SUCCESS Changed the special port.
1823 * KERN_INVALID_ARGUMENT The thread is null.
1824 * KERN_INVALID_RIGHT Port is marked as immovable.
1825 * KERN_FAILURE The thread is dead.
1826 * KERN_INVALID_ARGUMENT Invalid special port.
1827 * KERN_NO_ACCESS Restricted access to set port.
1828 */
1829
1830kern_return_t
1831thread_set_special_port(
1832 thread_t thread,
1833 int which,
1834 ipc_port_t port)
1835{
1836 kern_return_t result = KERN_SUCCESS;
1837 thread_ro_t tro = NULL;
1838 ipc_port_t old = IP_NULL;
1839
1840 if (thread == THREAD_NULL) {
1841 return KERN_INVALID_ARGUMENT;
1842 }
1843
1844 if (IP_VALID(port) && port->ip_immovable_send) {
1845 return KERN_INVALID_RIGHT;
1846 }
1847
1848 switch (which) {
1849 case THREAD_KERNEL_PORT:
1850#if CONFIG_CSR
1851 if (csr_check(CSR_ALLOW_KERNEL_DEBUGGER) == 0) {
1852 /*
1853 * Only allow setting of thread-self
1854 * special port from user-space when SIP is
1855 * disabled (for Mach-on-Mach emulation).
1856 */
1857 tro = get_thread_ro(thread);
1858
1859 thread_mtx_lock(thread);
1860 if (thread->active) {
1861 old = tro->tro_settable_self_port;
1862 zalloc_ro_update_field(ZONE_ID_THREAD_RO,
1863 tro, tro_settable_self_port, &port);
1864 } else {
1865 result = KERN_FAILURE;
1866 }
1867 thread_mtx_unlock(thread);
1868
1869 if (IP_VALID(old)) {
1870 ipc_port_release_send(port: old);
1871 }
1872
1873 return result;
1874 }
1875#else
1876 (void)old;
1877 (void)result;
1878 (void)tro;
1879#endif
1880 return KERN_NO_ACCESS;
1881
1882 default:
1883 return KERN_INVALID_ARGUMENT;
1884 }
1885}
1886
1887/*
1888 * Routine: task_get_special_port [kernel call]
1889 * Purpose:
1890 * Clones a send right for one of the task's
1891 * special ports.
1892 * Conditions:
1893 * Nothing locked.
1894 * Returns:
1895 * KERN_SUCCESS Extracted a send right.
1896 * KERN_INVALID_ARGUMENT The task is null.
1897 * KERN_FAILURE The task/space is dead.
1898 * KERN_INVALID_ARGUMENT Invalid special port.
1899 */
1900
1901static kern_return_t
1902task_get_special_port_internal(
1903 task_t task,
1904 int which,
1905 ipc_port_t *portp,
1906 mach_task_flavor_t flavor)
1907{
1908 kern_return_t kr;
1909 ipc_port_t port;
1910
1911 if (task == TASK_NULL) {
1912 return KERN_INVALID_ARGUMENT;
1913 }
1914
1915 if ((kr = special_port_allowed_with_task_flavor(which, flavor)) != KERN_SUCCESS) {
1916 return kr;
1917 }
1918
1919 itk_lock(task);
1920 if (!task->ipc_active) {
1921 itk_unlock(task);
1922 return KERN_FAILURE;
1923 }
1924
1925 switch (which) {
1926 case TASK_KERNEL_PORT:
1927 port = task->itk_task_ports[TASK_FLAVOR_CONTROL];
1928#if CONFIG_CSR
1929 if (task->itk_settable_self != port) {
1930 port = ipc_port_copy_send_mqueue(port: task->itk_settable_self);
1931 } else
1932#endif
1933 {
1934 port = ipc_kobject_copy_send(port, kobject: task, kotype: IKOT_TASK_CONTROL);
1935 }
1936 itk_unlock(task);
1937 break;
1938
1939 case TASK_READ_PORT:
1940 case TASK_INSPECT_PORT:
1941 itk_unlock(task);
1942 mach_task_flavor_t current_flavor = (which == TASK_READ_PORT) ?
1943 TASK_FLAVOR_READ : TASK_FLAVOR_INSPECT;
1944 /* convert_task_to_port_with_flavor consumes a task reference */
1945 task_reference(task);
1946 port = convert_task_to_port_with_flavor(task, flavor: current_flavor, grp: TASK_GRP_KERNEL);
1947 break;
1948
1949 case TASK_NAME_PORT:
1950 port = ipc_kobject_make_send(port: task->itk_task_ports[TASK_FLAVOR_NAME],
1951 kobject: task, kotype: IKOT_TASK_NAME);
1952 itk_unlock(task);
1953 break;
1954
1955 case TASK_HOST_PORT:
1956 port = host_port_copy_send(port: task->itk_host);
1957 itk_unlock(task);
1958 break;
1959
1960 case TASK_BOOTSTRAP_PORT:
1961 port = ipc_port_copy_send_mqueue(port: task->itk_bootstrap);
1962 itk_unlock(task);
1963 break;
1964
1965 case TASK_ACCESS_PORT:
1966 port = ipc_port_copy_send_mqueue(port: task->itk_task_access);
1967 itk_unlock(task);
1968 break;
1969
1970 case TASK_DEBUG_CONTROL_PORT:
1971 port = ipc_port_copy_send_mqueue(port: task->itk_debug_control);
1972 itk_unlock(task);
1973 break;
1974
1975#if CONFIG_PROC_RESOURCE_LIMITS
1976 case TASK_RESOURCE_NOTIFY_PORT:
1977 port = ipc_port_copy_send_mqueue(task->itk_resource_notify);
1978 itk_unlock(task);
1979 break;
1980#endif /* CONFIG_PROC_RESOURCE_LIMITS */
1981
1982 default:
1983 itk_unlock(task);
1984 return KERN_INVALID_ARGUMENT;
1985 }
1986
1987 *portp = port;
1988 return KERN_SUCCESS;
1989}
1990
1991/* Kernel/Kext call only and skips MACF checks. MIG uses task_get_special_port_from_user(). */
1992kern_return_t
1993task_get_special_port(
1994 task_t task,
1995 int which,
1996 ipc_port_t *portp)
1997{
1998 return task_get_special_port_internal(task, which, portp, TASK_FLAVOR_CONTROL);
1999}
2000
2001static ipc_port_t
2002task_get_non_substituted_self(task_t task)
2003{
2004 ipc_port_t port = IP_NULL;
2005
2006 itk_lock(task);
2007 port = task->itk_task_ports[TASK_FLAVOR_CONTROL];
2008#if CONFIG_CSR
2009 if (task->itk_settable_self != port) {
2010 port = ipc_port_make_send_mqueue(port: task->itk_settable_self);
2011 } else
2012#endif
2013 {
2014 port = ipc_kobject_make_send(port, kobject: task, kotype: IKOT_TASK_CONTROL);
2015 }
2016 itk_unlock(task);
2017
2018 /* takes ownership of the send right */
2019 return ipc_kobject_alloc_subst_once(target: port);
2020}
2021
2022/* MIG call only. Kernel/Kext uses task_get_special_port() */
2023kern_return_t
2024task_get_special_port_from_user(
2025 mach_port_t port,
2026 int which,
2027 ipc_port_t *portp)
2028{
2029 ipc_kobject_type_t kotype;
2030 mach_task_flavor_t flavor;
2031 kern_return_t kr = KERN_SUCCESS;
2032
2033 task_t task = convert_port_to_task_inspect_no_eval(port);
2034
2035 if (task == TASK_NULL) {
2036 return KERN_INVALID_ARGUMENT;
2037 }
2038
2039 kotype = ip_kotype(port);
2040
2041#if CONFIG_MACF
2042 if (mac_task_check_get_task_special_port(task: current_task(), target: task, which)) {
2043 kr = KERN_DENIED;
2044 goto out;
2045 }
2046#endif
2047
2048 if (which == TASK_KERNEL_PORT && task == current_task()) {
2049#if CONFIG_MACF
2050 /*
2051 * only check for current_task,
2052 * because foreign task ports are always movable
2053 */
2054 if (mac_task_check_get_movable_control_port()) {
2055 kr = KERN_DENIED;
2056 goto out;
2057 }
2058#endif
2059 if (kotype == IKOT_TASK_CONTROL) {
2060 *portp = task_get_non_substituted_self(task);
2061 goto out;
2062 }
2063 }
2064
2065 switch (kotype) {
2066 case IKOT_TASK_CONTROL:
2067 flavor = TASK_FLAVOR_CONTROL;
2068 break;
2069 case IKOT_TASK_READ:
2070 flavor = TASK_FLAVOR_READ;
2071 break;
2072 case IKOT_TASK_INSPECT:
2073 flavor = TASK_FLAVOR_INSPECT;
2074 break;
2075 default:
2076 panic("strange kobject type");
2077 }
2078
2079 kr = task_get_special_port_internal(task, which, portp, flavor);
2080out:
2081 task_deallocate(task);
2082 return kr;
2083}
2084
2085static kern_return_t
2086special_port_allowed_with_task_flavor(
2087 int which,
2088 mach_task_flavor_t flavor)
2089{
2090 switch (flavor) {
2091 case TASK_FLAVOR_CONTROL:
2092 return KERN_SUCCESS;
2093
2094 case TASK_FLAVOR_READ:
2095
2096 switch (which) {
2097 case TASK_READ_PORT:
2098 case TASK_INSPECT_PORT:
2099 case TASK_NAME_PORT:
2100 return KERN_SUCCESS;
2101 default:
2102 return KERN_INVALID_CAPABILITY;
2103 }
2104
2105 case TASK_FLAVOR_INSPECT:
2106
2107 switch (which) {
2108 case TASK_INSPECT_PORT:
2109 case TASK_NAME_PORT:
2110 return KERN_SUCCESS;
2111 default:
2112 return KERN_INVALID_CAPABILITY;
2113 }
2114
2115 default:
2116 return KERN_INVALID_CAPABILITY;
2117 }
2118}
2119
2120/*
2121 * Routine: task_set_special_port [MIG call]
2122 * Purpose:
2123 * Changes one of the task's special ports,
2124 * setting it to the supplied send right.
2125 * Conditions:
2126 * Nothing locked. If successful, consumes
2127 * the supplied send right.
2128 * Returns:
2129 * KERN_SUCCESS Changed the special port.
2130 * KERN_INVALID_ARGUMENT The task is null.
2131 * KERN_INVALID_RIGHT Port is marked as immovable.
2132 * KERN_FAILURE The task/space is dead.
2133 * KERN_INVALID_ARGUMENT Invalid special port.
2134 * KERN_NO_ACCESS Restricted access to set port.
2135 */
2136
2137kern_return_t
2138task_set_special_port_from_user(
2139 task_t task,
2140 int which,
2141 ipc_port_t port)
2142{
2143 if (task == TASK_NULL) {
2144 return KERN_INVALID_ARGUMENT;
2145 }
2146
2147#if CONFIG_MACF
2148 if (mac_task_check_set_task_special_port(task: current_task(), target: task, which, port)) {
2149 return KERN_DENIED;
2150 }
2151#endif
2152
2153 return task_set_special_port(task, which, port);
2154}
2155
2156/* Kernel call only. MIG uses task_set_special_port_from_user() */
2157kern_return_t
2158task_set_special_port(
2159 task_t task,
2160 int which,
2161 ipc_port_t port)
2162{
2163 if (task == TASK_NULL) {
2164 return KERN_INVALID_ARGUMENT;
2165 }
2166
2167 if (task_is_driver(task: current_task())) {
2168 return KERN_NO_ACCESS;
2169 }
2170
2171 if (IP_VALID(port) && port->ip_immovable_send) {
2172 return KERN_INVALID_RIGHT;
2173 }
2174
2175 switch (which) {
2176 case TASK_KERNEL_PORT:
2177 case TASK_HOST_PORT:
2178#if CONFIG_CSR
2179 if (csr_check(CSR_ALLOW_KERNEL_DEBUGGER) == 0) {
2180 /*
2181 * Only allow setting of task-self / task-host
2182 * special ports from user-space when SIP is
2183 * disabled (for Mach-on-Mach emulation).
2184 */
2185 break;
2186 }
2187#endif
2188 return KERN_NO_ACCESS;
2189 default:
2190 break;
2191 }
2192
2193 return task_set_special_port_internal(task, which, port);
2194}
2195
2196/*
2197 * Routine: task_set_special_port_internal
2198 * Purpose:
2199 * Changes one of the task's special ports,
2200 * setting it to the supplied send right.
2201 * Conditions:
2202 * Nothing locked. If successful, consumes
2203 * the supplied send right.
2204 * Returns:
2205 * KERN_SUCCESS Changed the special port.
2206 * KERN_INVALID_ARGUMENT The task is null.
2207 * KERN_FAILURE The task/space is dead.
2208 * KERN_INVALID_ARGUMENT Invalid special port.
2209 * KERN_NO_ACCESS Restricted access to overwrite port.
2210 */
2211
2212kern_return_t
2213task_set_special_port_internal(
2214 task_t task,
2215 int which,
2216 ipc_port_t port)
2217{
2218 ipc_port_t old = IP_NULL;
2219 kern_return_t rc = KERN_INVALID_ARGUMENT;
2220
2221 if (task == TASK_NULL) {
2222 goto out;
2223 }
2224
2225 itk_lock(task);
2226 /*
2227 * Allow setting special port during the span of ipc_task_init() to
2228 * ipc_task_terminate(). posix_spawn() port actions can set special
2229 * ports on target task _before_ task IPC access is enabled.
2230 */
2231 if (task->itk_task_ports[TASK_FLAVOR_CONTROL] == IP_NULL) {
2232 rc = KERN_FAILURE;
2233 goto out_unlock;
2234 }
2235
2236 switch (which) {
2237 case TASK_KERNEL_PORT:
2238 old = task->itk_settable_self;
2239 task->itk_settable_self = port;
2240 break;
2241
2242 case TASK_HOST_PORT:
2243 old = task->itk_host;
2244 task->itk_host = port;
2245 break;
2246
2247 case TASK_BOOTSTRAP_PORT:
2248 old = task->itk_bootstrap;
2249 task->itk_bootstrap = port;
2250 break;
2251
2252 /* Never allow overwrite of the task access port */
2253 case TASK_ACCESS_PORT:
2254 if (IP_VALID(task->itk_task_access)) {
2255 rc = KERN_NO_ACCESS;
2256 goto out_unlock;
2257 }
2258 task->itk_task_access = port;
2259 break;
2260
2261 case TASK_DEBUG_CONTROL_PORT:
2262 old = task->itk_debug_control;
2263 task->itk_debug_control = port;
2264 break;
2265
2266#if CONFIG_PROC_RESOURCE_LIMITS
2267 case TASK_RESOURCE_NOTIFY_PORT:
2268 old = task->itk_resource_notify;
2269 task->itk_resource_notify = port;
2270 break;
2271#endif /* CONFIG_PROC_RESOURCE_LIMITS */
2272
2273 default:
2274 rc = KERN_INVALID_ARGUMENT;
2275 goto out_unlock;
2276 }/* switch */
2277
2278 rc = KERN_SUCCESS;
2279
2280out_unlock:
2281 itk_unlock(task);
2282
2283 if (IP_VALID(old)) {
2284 ipc_port_release_send(port: old);
2285 }
2286out:
2287 return rc;
2288}
2289/*
2290 * Routine: mach_ports_register [kernel call]
2291 * Purpose:
2292 * Stash a handful of port send rights in the task.
2293 * Child tasks will inherit these rights, but they
2294 * must use mach_ports_lookup to acquire them.
2295 *
2296 * The rights are supplied in a (wired) kalloc'd segment.
2297 * Rights which aren't supplied are assumed to be null.
2298 * Conditions:
2299 * Nothing locked. If successful, consumes
2300 * the supplied rights and memory.
2301 * Returns:
2302 * KERN_SUCCESS Stashed the port rights.
2303 * KERN_INVALID_RIGHT Port in array is marked immovable.
2304 * KERN_INVALID_ARGUMENT The task is null.
2305 * KERN_INVALID_ARGUMENT The task is dead.
2306 * KERN_INVALID_ARGUMENT The memory param is null.
2307 * KERN_INVALID_ARGUMENT Too many port rights supplied.
2308 */
2309
2310kern_return_t
2311mach_ports_register(
2312 task_t task,
2313 mach_port_array_t memory,
2314 mach_msg_type_number_t portsCnt)
2315{
2316 ipc_port_t ports[TASK_PORT_REGISTER_MAX];
2317 unsigned int i;
2318
2319 if ((task == TASK_NULL) ||
2320 (portsCnt > TASK_PORT_REGISTER_MAX) ||
2321 (portsCnt && memory == NULL)) {
2322 return KERN_INVALID_ARGUMENT;
2323 }
2324
2325 /*
2326 * Pad the port rights with nulls.
2327 */
2328
2329 for (i = 0; i < portsCnt; i++) {
2330 ports[i] = memory[i];
2331 if (IP_VALID(ports[i]) && ports[i]->ip_immovable_send) {
2332 return KERN_INVALID_RIGHT;
2333 }
2334 }
2335 for (; i < TASK_PORT_REGISTER_MAX; i++) {
2336 ports[i] = IP_NULL;
2337 }
2338
2339 itk_lock(task);
2340 if (!task->ipc_active) {
2341 itk_unlock(task);
2342 return KERN_INVALID_ARGUMENT;
2343 }
2344
2345 /*
2346 * Replace the old send rights with the new.
2347 * Release the old rights after unlocking.
2348 */
2349
2350 for (i = 0; i < TASK_PORT_REGISTER_MAX; i++) {
2351 ipc_port_t old;
2352
2353 old = task->itk_registered[i];
2354 task->itk_registered[i] = ports[i];
2355 ports[i] = old;
2356 }
2357
2358 itk_unlock(task);
2359
2360 for (i = 0; i < TASK_PORT_REGISTER_MAX; i++) {
2361 if (IP_VALID(ports[i])) {
2362 ipc_port_release_send(port: ports[i]);
2363 }
2364 }
2365
2366 /*
2367 * Now that the operation is known to be successful,
2368 * we can free the memory.
2369 */
2370
2371 if (portsCnt != 0) {
2372 kfree_type(mach_port_t, portsCnt, memory);
2373 }
2374
2375 return KERN_SUCCESS;
2376}
2377
2378/*
2379 * Routine: mach_ports_lookup [kernel call]
2380 * Purpose:
2381 * Retrieves (clones) the stashed port send rights.
2382 * Conditions:
2383 * Nothing locked. If successful, the caller gets
2384 * rights and memory.
2385 * Returns:
2386 * KERN_SUCCESS Retrieved the send rights.
2387 * KERN_INVALID_ARGUMENT The task is null.
2388 * KERN_INVALID_ARGUMENT The task is dead.
2389 * KERN_RESOURCE_SHORTAGE Couldn't allocate memory.
2390 */
2391
2392kern_return_t
2393mach_ports_lookup(
2394 task_t task,
2395 mach_port_array_t *portsp,
2396 mach_msg_type_number_t *portsCnt)
2397{
2398 ipc_port_t *ports;
2399
2400 if (task == TASK_NULL) {
2401 return KERN_INVALID_ARGUMENT;
2402 }
2403
2404 ports = kalloc_type(ipc_port_t, TASK_PORT_REGISTER_MAX,
2405 Z_WAITOK | Z_ZERO | Z_NOFAIL);
2406
2407 itk_lock(task);
2408 if (!task->ipc_active) {
2409 itk_unlock(task);
2410 kfree_type(ipc_port_t, TASK_PORT_REGISTER_MAX, ports);
2411
2412 return KERN_INVALID_ARGUMENT;
2413 }
2414
2415 for (int i = 0; i < TASK_PORT_REGISTER_MAX; i++) {
2416 ports[i] = ipc_port_copy_send_any(port: task->itk_registered[i]);
2417 }
2418
2419 itk_unlock(task);
2420
2421 *portsp = ports;
2422 *portsCnt = TASK_PORT_REGISTER_MAX;
2423 return KERN_SUCCESS;
2424}
2425
2426static kern_return_t
2427task_conversion_eval_internal(
2428 task_t caller,
2429 task_t victim,
2430 boolean_t out_trans,
2431 int flavor) /* control or read */
2432{
2433 boolean_t allow_kern_task_out_trans;
2434 boolean_t allow_kern_task;
2435
2436 assert(flavor == TASK_FLAVOR_CONTROL || flavor == TASK_FLAVOR_READ);
2437 assert(flavor == THREAD_FLAVOR_CONTROL || flavor == THREAD_FLAVOR_READ);
2438
2439#if defined(SECURE_KERNEL)
2440 /*
2441 * On secure kernel platforms, reject converting kernel task/threads to port
2442 * and sending it to user space.
2443 */
2444 allow_kern_task_out_trans = FALSE;
2445#else
2446 allow_kern_task_out_trans = TRUE;
2447#endif
2448
2449 allow_kern_task = out_trans && allow_kern_task_out_trans;
2450
2451 if (victim == TASK_NULL) {
2452 return KERN_INVALID_SECURITY;
2453 }
2454
2455 task_require(task: victim);
2456
2457 /*
2458 * If Developer Mode is not enabled, deny attempts to translate foreign task's
2459 * control port completely. Read port or corpse is okay.
2460 */
2461 if (!developer_mode_state()) {
2462 if ((caller != victim) &&
2463 (flavor == TASK_FLAVOR_CONTROL) && !task_is_a_corpse(task: victim)) {
2464#if XNU_TARGET_OS_OSX
2465 return KERN_INVALID_SECURITY;
2466#else
2467 /*
2468 * All control ports are immovable.
2469 * Return an error for outtrans, but panic on intrans.
2470 */
2471 if (out_trans) {
2472 return KERN_INVALID_SECURITY;
2473 } else {
2474 panic("Just like pineapple on pizza, this task/thread port doesn't belong here.");
2475 }
2476#endif /* XNU_TARGET_OS_OSX */
2477 }
2478 }
2479
2480 /*
2481 * Tasks are allowed to resolve their own task ports, and the kernel is
2482 * allowed to resolve anyone's task port (subject to Developer Mode check).
2483 */
2484 if (caller == kernel_task) {
2485 return KERN_SUCCESS;
2486 }
2487
2488 if (caller == victim) {
2489 return KERN_SUCCESS;
2490 }
2491
2492 /*
2493 * Only the kernel can resolve the kernel's task port. We've established
2494 * by this point that the caller is not kernel_task.
2495 */
2496 if (victim == kernel_task && !allow_kern_task) {
2497 return KERN_INVALID_SECURITY;
2498 }
2499
2500#if !defined(XNU_TARGET_OS_OSX)
2501 /*
2502 * On platforms other than macOS, only a platform binary can resolve the task port
2503 * of another platform binary.
2504 */
2505 if (task_get_platform_binary(victim) && !task_get_platform_binary(caller)) {
2506#if SECURE_KERNEL
2507 return KERN_INVALID_SECURITY;
2508#else
2509 if (cs_relax_platform_task_ports) {
2510 return KERN_SUCCESS;
2511 } else {
2512 return KERN_INVALID_SECURITY;
2513 }
2514#endif /* SECURE_KERNEL */
2515 }
2516#endif /* !defined(XNU_TARGET_OS_OSX) */
2517
2518 return KERN_SUCCESS;
2519}
2520
2521kern_return_t
2522task_conversion_eval(task_t caller, task_t victim, int flavor)
2523{
2524 /* flavor is mach_task_flavor_t or mach_thread_flavor_t */
2525 static_assert(TASK_FLAVOR_CONTROL == THREAD_FLAVOR_CONTROL);
2526 static_assert(TASK_FLAVOR_READ == THREAD_FLAVOR_READ);
2527 return task_conversion_eval_internal(caller, victim, FALSE, flavor);
2528}
2529
2530static kern_return_t
2531task_conversion_eval_out_trans(task_t caller, task_t victim, int flavor)
2532{
2533 assert(flavor == TASK_FLAVOR_CONTROL || flavor == THREAD_FLAVOR_CONTROL);
2534 return task_conversion_eval_internal(caller, victim, TRUE, flavor);
2535}
2536
2537/*
2538 * Routine: task_port_kotype_valid_for_flavor
2539 * Purpose:
2540 * Check whether the kobject type of a mach port
2541 * is valid for conversion to a task of given flavor.
2542 */
2543static boolean_t
2544task_port_kotype_valid_for_flavor(
2545 natural_t kotype,
2546 mach_task_flavor_t flavor)
2547{
2548 switch (flavor) {
2549 /* Ascending capability */
2550 case TASK_FLAVOR_NAME:
2551 if (kotype == IKOT_TASK_NAME) {
2552 return TRUE;
2553 }
2554 OS_FALLTHROUGH;
2555 case TASK_FLAVOR_INSPECT:
2556 if (kotype == IKOT_TASK_INSPECT) {
2557 return TRUE;
2558 }
2559 OS_FALLTHROUGH;
2560 case TASK_FLAVOR_READ:
2561 if (kotype == IKOT_TASK_READ) {
2562 return TRUE;
2563 }
2564 OS_FALLTHROUGH;
2565 case TASK_FLAVOR_CONTROL:
2566 if (kotype == IKOT_TASK_CONTROL) {
2567 return TRUE;
2568 }
2569 break;
2570 default:
2571 panic("strange task flavor");
2572 }
2573
2574 return FALSE;
2575}
2576
2577/*
2578 * Routine: convert_port_to_task_with_flavor_locked_noref
2579 * Purpose:
2580 * Internal helper routine to convert from a locked port to a task.
2581 * Args:
2582 * port - target port
2583 * flavor - requested task port flavor
2584 * options - port translation options
2585 * Conditions:
2586 * Port is locked and active.
2587 */
2588static task_t
2589convert_port_to_task_with_flavor_locked_noref(
2590 ipc_port_t port,
2591 mach_task_flavor_t flavor,
2592 port_intrans_options_t options)
2593{
2594 ipc_kobject_type_t type = ip_kotype(port);
2595 task_t task;
2596
2597 ip_mq_lock_held(port);
2598 require_ip_active(port);
2599
2600 if (!task_port_kotype_valid_for_flavor(kotype: type, flavor)) {
2601 return TASK_NULL;
2602 }
2603
2604 task = ipc_kobject_get_locked(port, type);
2605 if (task == TASK_NULL) {
2606 return TASK_NULL;
2607 }
2608
2609 if (!(options & PORT_INTRANS_ALLOW_CORPSE_TASK) && task_is_a_corpse(task)) {
2610 assert(flavor == TASK_FLAVOR_CONTROL);
2611 return TASK_NULL;
2612 }
2613
2614 /* TODO: rdar://42389187 */
2615 if (flavor == TASK_FLAVOR_NAME || flavor == TASK_FLAVOR_INSPECT) {
2616 assert(options & PORT_INTRANS_SKIP_TASK_EVAL);
2617 }
2618
2619 if (!(options & PORT_INTRANS_SKIP_TASK_EVAL) &&
2620 task_conversion_eval(caller: current_task(), victim: task, flavor)) {
2621 return TASK_NULL;
2622 }
2623
2624 return task;
2625}
2626
2627/*
2628 * Routine: convert_port_to_task_with_flavor_locked
2629 * Purpose:
2630 * Internal helper routine to convert from a locked port to a task.
2631 * Args:
2632 * port - target port
2633 * flavor - requested task port flavor
2634 * options - port translation options
2635 * grp - task reference group
2636 * Conditions:
2637 * Port is locked and active.
2638 * Produces task ref or TASK_NULL.
2639 */
2640static task_t
2641convert_port_to_task_with_flavor_locked(
2642 ipc_port_t port,
2643 mach_task_flavor_t flavor,
2644 port_intrans_options_t options,
2645 task_grp_t grp)
2646{
2647 task_t task;
2648
2649 task = convert_port_to_task_with_flavor_locked_noref(port, flavor,
2650 options);
2651
2652 if (task != TASK_NULL) {
2653 task_reference_grp(task, grp);
2654 }
2655
2656 return task;
2657}
2658
2659/*
2660 * Routine: convert_port_to_task_with_flavor
2661 * Purpose:
2662 * Internal helper for converting from a port to a task.
2663 * Doesn't consume the port ref; produces a task ref,
2664 * which may be null.
2665 * Args:
2666 * port - target port
2667 * flavor - requested task port flavor
2668 * options - port translation options
2669 * grp - task reference group
2670 * Conditions:
2671 * Nothing locked.
2672 */
2673static task_t
2674convert_port_to_task_with_flavor(
2675 ipc_port_t port,
2676 mach_task_flavor_t flavor,
2677 port_intrans_options_t options,
2678 task_grp_t grp)
2679{
2680 task_t task = TASK_NULL;
2681 task_t self = current_task();
2682
2683 if (IP_VALID(port)) {
2684 if (port == self->itk_self) {
2685 task_reference_grp(self, grp);
2686 return self;
2687 }
2688
2689 ip_mq_lock(port);
2690 if (ip_active(port)) {
2691 task = convert_port_to_task_with_flavor_locked(port,
2692 flavor, options, grp);
2693 }
2694 ip_mq_unlock(port);
2695 }
2696
2697 return task;
2698}
2699
2700task_t
2701convert_port_to_task(
2702 ipc_port_t port)
2703{
2704 return convert_port_to_task_with_flavor(port, TASK_FLAVOR_CONTROL,
2705 options: PORT_INTRANS_OPTIONS_NONE, grp: TASK_GRP_KERNEL);
2706}
2707
2708task_t
2709convert_port_to_task_mig(
2710 ipc_port_t port)
2711{
2712 return convert_port_to_task_with_flavor(port, TASK_FLAVOR_CONTROL,
2713 options: PORT_INTRANS_OPTIONS_NONE, grp: TASK_GRP_MIG);
2714}
2715
2716task_read_t
2717convert_port_to_task_read(
2718 ipc_port_t port)
2719{
2720 return convert_port_to_task_with_flavor(port, TASK_FLAVOR_READ,
2721 options: PORT_INTRANS_ALLOW_CORPSE_TASK, grp: TASK_GRP_KERNEL);
2722}
2723
2724static task_read_t
2725convert_port_to_task_read_no_eval(
2726 ipc_port_t port)
2727{
2728 return convert_port_to_task_with_flavor(port, TASK_FLAVOR_READ,
2729 options: PORT_INTRANS_SKIP_TASK_EVAL | PORT_INTRANS_ALLOW_CORPSE_TASK, grp: TASK_GRP_KERNEL);
2730}
2731
2732task_read_t
2733convert_port_to_task_read_mig(
2734 ipc_port_t port)
2735{
2736 return convert_port_to_task_with_flavor(port, TASK_FLAVOR_READ,
2737 options: PORT_INTRANS_ALLOW_CORPSE_TASK, grp: TASK_GRP_MIG);
2738}
2739
2740task_inspect_t
2741convert_port_to_task_inspect(
2742 ipc_port_t port)
2743{
2744 return convert_port_to_task_with_flavor(port, TASK_FLAVOR_INSPECT,
2745 options: PORT_INTRANS_SKIP_TASK_EVAL | PORT_INTRANS_ALLOW_CORPSE_TASK, grp: TASK_GRP_KERNEL);
2746}
2747
2748task_inspect_t
2749convert_port_to_task_inspect_no_eval(
2750 ipc_port_t port)
2751{
2752 return convert_port_to_task_with_flavor(port, TASK_FLAVOR_INSPECT,
2753 options: PORT_INTRANS_SKIP_TASK_EVAL | PORT_INTRANS_ALLOW_CORPSE_TASK, grp: TASK_GRP_KERNEL);
2754}
2755
2756task_inspect_t
2757convert_port_to_task_inspect_mig(
2758 ipc_port_t port)
2759{
2760 return convert_port_to_task_with_flavor(port, TASK_FLAVOR_INSPECT,
2761 options: PORT_INTRANS_SKIP_TASK_EVAL | PORT_INTRANS_ALLOW_CORPSE_TASK, grp: TASK_GRP_MIG);
2762}
2763
2764task_name_t
2765convert_port_to_task_name(
2766 ipc_port_t port)
2767{
2768 return convert_port_to_task_with_flavor(port, TASK_FLAVOR_NAME,
2769 options: PORT_INTRANS_SKIP_TASK_EVAL | PORT_INTRANS_ALLOW_CORPSE_TASK, grp: TASK_GRP_KERNEL);
2770}
2771
2772task_name_t
2773convert_port_to_task_name_mig(
2774 ipc_port_t port)
2775{
2776 return convert_port_to_task_with_flavor(port, TASK_FLAVOR_NAME,
2777 options: PORT_INTRANS_SKIP_TASK_EVAL | PORT_INTRANS_ALLOW_CORPSE_TASK, grp: TASK_GRP_MIG);
2778}
2779
2780/*
2781 * Routine: convert_port_to_task_policy
2782 * Purpose:
2783 * Convert from a port to a task.
2784 * Doesn't consume the port ref; produces a task ref,
2785 * which may be null.
2786 * If the port is being used with task_port_set(), any task port
2787 * type other than TASK_CONTROL requires an entitlement. If the
2788 * port is being used with task_port_get(), TASK_NAME requires an
2789 * entitlement.
2790 * Conditions:
2791 * Nothing locked.
2792 */
2793static task_t
2794convert_port_to_task_policy_mig(ipc_port_t port, boolean_t set)
2795{
2796 task_t task = TASK_NULL;
2797
2798 if (!IP_VALID(port)) {
2799 return TASK_NULL;
2800 }
2801
2802 task = set ?
2803 convert_port_to_task_mig(port) :
2804 convert_port_to_task_inspect_mig(port);
2805
2806 if (task == TASK_NULL &&
2807 IOCurrentTaskHasEntitlement("com.apple.private.task_policy")) {
2808 task = convert_port_to_task_name_mig(port);
2809 }
2810
2811 return task;
2812}
2813
2814task_policy_set_t
2815convert_port_to_task_policy_set_mig(ipc_port_t port)
2816{
2817 return convert_port_to_task_policy_mig(port, true);
2818}
2819
2820task_policy_get_t
2821convert_port_to_task_policy_get_mig(ipc_port_t port)
2822{
2823 return convert_port_to_task_policy_mig(port, false);
2824}
2825
2826/*
2827 * Routine: convert_port_to_task_suspension_token
2828 * Purpose:
2829 * Convert from a port to a task suspension token.
2830 * Doesn't consume the port ref; produces a suspension token ref,
2831 * which may be null.
2832 * Conditions:
2833 * Nothing locked.
2834 */
2835static task_suspension_token_t
2836convert_port_to_task_suspension_token_grp(
2837 ipc_port_t port,
2838 task_grp_t grp)
2839{
2840 task_suspension_token_t task = TASK_NULL;
2841
2842 if (IP_VALID(port)) {
2843 ip_mq_lock(port);
2844 task = ipc_kobject_get_locked(port, type: IKOT_TASK_RESUME);
2845 if (task != TASK_NULL) {
2846 task_reference_grp(task, grp);
2847 }
2848 ip_mq_unlock(port);
2849 }
2850
2851 return task;
2852}
2853
2854task_suspension_token_t
2855convert_port_to_task_suspension_token_external(
2856 ipc_port_t port)
2857{
2858 return convert_port_to_task_suspension_token_grp(port, grp: TASK_GRP_EXTERNAL);
2859}
2860
2861task_suspension_token_t
2862convert_port_to_task_suspension_token_mig(
2863 ipc_port_t port)
2864{
2865 return convert_port_to_task_suspension_token_grp(port, grp: TASK_GRP_MIG);
2866}
2867
2868task_suspension_token_t
2869convert_port_to_task_suspension_token_kernel(
2870 ipc_port_t port)
2871{
2872 return convert_port_to_task_suspension_token_grp(port, grp: TASK_GRP_KERNEL);
2873}
2874
2875/*
2876 * Routine: convert_port_to_space_with_flavor
2877 * Purpose:
2878 * Internal helper for converting from a port to a space.
2879 * Doesn't consume the port ref; produces a space ref,
2880 * which may be null.
2881 * Args:
2882 * port - target port
2883 * flavor - requested ipc space flavor
2884 * options - port translation options
2885 * Conditions:
2886 * Nothing locked.
2887 */
2888static ipc_space_t
2889convert_port_to_space_with_flavor(
2890 ipc_port_t port,
2891 mach_task_flavor_t flavor,
2892 port_intrans_options_t options)
2893{
2894 ipc_space_t space = IPC_SPACE_NULL;
2895 task_t task = TASK_NULL;
2896
2897 assert(flavor != TASK_FLAVOR_NAME);
2898
2899 if (IP_VALID(port)) {
2900 ip_mq_lock(port);
2901 if (ip_active(port)) {
2902 task = convert_port_to_task_with_flavor_locked_noref(port,
2903 flavor, options);
2904 }
2905
2906 /*
2907 * Because we hold the port lock and we could resolve a task,
2908 * even if we're racing with task termination, we know that
2909 * ipc_task_disable() hasn't been called yet.
2910 *
2911 * We try to sniff if `task->active` flipped to accelerate
2912 * resolving the race, but this isn't load bearing.
2913 *
2914 * The space will be torn down _after_ ipc_task_disable() returns,
2915 * so it is valid to take a reference on it now.
2916 */
2917 if (task && task->active) {
2918 space = task->itk_space;
2919 is_reference(space);
2920 }
2921 ip_mq_unlock(port);
2922 }
2923
2924 return space;
2925}
2926
2927ipc_space_t
2928convert_port_to_space(
2929 ipc_port_t port)
2930{
2931 return convert_port_to_space_with_flavor(port, TASK_FLAVOR_CONTROL,
2932 options: PORT_INTRANS_OPTIONS_NONE);
2933}
2934
2935ipc_space_read_t
2936convert_port_to_space_read(
2937 ipc_port_t port)
2938{
2939 return convert_port_to_space_with_flavor(port, TASK_FLAVOR_READ,
2940 options: PORT_INTRANS_ALLOW_CORPSE_TASK);
2941}
2942
2943ipc_space_read_t
2944convert_port_to_space_read_no_eval(
2945 ipc_port_t port)
2946{
2947 return convert_port_to_space_with_flavor(port, TASK_FLAVOR_READ,
2948 options: PORT_INTRANS_SKIP_TASK_EVAL | PORT_INTRANS_ALLOW_CORPSE_TASK);
2949}
2950
2951ipc_space_inspect_t
2952convert_port_to_space_inspect(
2953 ipc_port_t port)
2954{
2955 return convert_port_to_space_with_flavor(port, TASK_FLAVOR_INSPECT,
2956 options: PORT_INTRANS_SKIP_TASK_EVAL | PORT_INTRANS_ALLOW_CORPSE_TASK);
2957}
2958
2959/*
2960 * Routine: convert_port_to_map_with_flavor
2961 * Purpose:
2962 * Internal helper for converting from a port to a map.
2963 * Doesn't consume the port ref; produces a map ref,
2964 * which may be null.
2965 * Args:
2966 * port - target port
2967 * flavor - requested vm map flavor
2968 * options - port translation options
2969 * Conditions:
2970 * Nothing locked.
2971 */
2972static vm_map_t
2973convert_port_to_map_with_flavor(
2974 ipc_port_t port,
2975 mach_task_flavor_t flavor,
2976 port_intrans_options_t options)
2977{
2978 task_t task = TASK_NULL;
2979 vm_map_t map = VM_MAP_NULL;
2980
2981 /* there is no vm_map_inspect_t routines at the moment. */
2982 assert(flavor != TASK_FLAVOR_NAME && flavor != TASK_FLAVOR_INSPECT);
2983 assert((options & PORT_INTRANS_SKIP_TASK_EVAL) == 0);
2984
2985 if (IP_VALID(port)) {
2986 ip_mq_lock(port);
2987
2988 if (ip_active(port)) {
2989 task = convert_port_to_task_with_flavor_locked_noref(port,
2990 flavor, options);
2991 }
2992
2993 /*
2994 * Because we hold the port lock and we could resolve a task,
2995 * even if we're racing with task termination, we know that
2996 * ipc_task_disable() hasn't been called yet.
2997 *
2998 * We try to sniff if `task->active` flipped to accelerate
2999 * resolving the race, but this isn't load bearing.
3000 *
3001 * The vm map will be torn down _after_ ipc_task_disable() returns,
3002 * so it is valid to take a reference on it now.
3003 */
3004 if (task && task->active) {
3005 map = task->map;
3006
3007 if (map->pmap == kernel_pmap) {
3008 panic("userspace has control access to a "
3009 "kernel map %p through task %p", map, task);
3010 }
3011
3012 pmap_require(pmap: map->pmap);
3013 vm_map_reference(map);
3014 }
3015
3016 ip_mq_unlock(port);
3017 }
3018
3019 return map;
3020}
3021
3022vm_map_t
3023convert_port_to_map(
3024 ipc_port_t port)
3025{
3026 return convert_port_to_map_with_flavor(port, TASK_FLAVOR_CONTROL,
3027 options: PORT_INTRANS_OPTIONS_NONE);
3028}
3029
3030vm_map_read_t
3031convert_port_to_map_read(
3032 ipc_port_t port)
3033{
3034 return convert_port_to_map_with_flavor(port, TASK_FLAVOR_READ,
3035 options: PORT_INTRANS_ALLOW_CORPSE_TASK);
3036}
3037
3038vm_map_inspect_t
3039convert_port_to_map_inspect(
3040 __unused ipc_port_t port)
3041{
3042 /* there is no vm_map_inspect_t routines at the moment. */
3043 return VM_MAP_INSPECT_NULL;
3044}
3045
3046/*
3047 * Routine: thread_port_kotype_valid_for_flavor
3048 * Purpose:
3049 * Check whether the kobject type of a mach port
3050 * is valid for conversion to a thread of given flavor.
3051 */
3052static boolean_t
3053thread_port_kotype_valid_for_flavor(
3054 natural_t kotype,
3055 mach_thread_flavor_t flavor)
3056{
3057 switch (flavor) {
3058 /* Ascending capability */
3059 case THREAD_FLAVOR_INSPECT:
3060 if (kotype == IKOT_THREAD_INSPECT) {
3061 return TRUE;
3062 }
3063 OS_FALLTHROUGH;
3064 case THREAD_FLAVOR_READ:
3065 if (kotype == IKOT_THREAD_READ) {
3066 return TRUE;
3067 }
3068 OS_FALLTHROUGH;
3069 case THREAD_FLAVOR_CONTROL:
3070 if (kotype == IKOT_THREAD_CONTROL) {
3071 return TRUE;
3072 }
3073 break;
3074 default:
3075 panic("strange thread flavor");
3076 }
3077
3078 return FALSE;
3079}
3080
3081/*
3082 * Routine: convert_port_to_thread_with_flavor_locked
3083 * Purpose:
3084 * Internal helper routine to convert from a locked port to a thread.
3085 * Args:
3086 * port - target port
3087 * flavor - requested thread port flavor
3088 * options - port translation options
3089 * Conditions:
3090 * Port is locked and active.
3091 * Produces a thread ref or THREAD_NULL.
3092 */
3093static thread_t
3094convert_port_to_thread_with_flavor_locked(
3095 ipc_port_t port,
3096 mach_thread_flavor_t flavor,
3097 port_intrans_options_t options)
3098{
3099 thread_t thread = THREAD_NULL;
3100 task_t task;
3101 ipc_kobject_type_t type = ip_kotype(port);
3102
3103 ip_mq_lock_held(port);
3104 require_ip_active(port);
3105
3106 if (!thread_port_kotype_valid_for_flavor(kotype: type, flavor)) {
3107 return THREAD_NULL;
3108 }
3109
3110 thread = ipc_kobject_get_locked(port, type);
3111
3112 if (thread == THREAD_NULL) {
3113 return THREAD_NULL;
3114 }
3115
3116 if (options & PORT_INTRANS_THREAD_NOT_CURRENT_THREAD) {
3117 if (thread == current_thread()) {
3118 return THREAD_NULL;
3119 }
3120 }
3121
3122 task = get_threadtask(thread);
3123
3124 if (options & PORT_INTRANS_THREAD_IN_CURRENT_TASK) {
3125 if (task != current_task()) {
3126 return THREAD_NULL;
3127 }
3128 } else {
3129 if (!(options & PORT_INTRANS_ALLOW_CORPSE_TASK) && task_is_a_corpse(task)) {
3130 assert(flavor == THREAD_FLAVOR_CONTROL);
3131 return THREAD_NULL;
3132 }
3133 /* TODO: rdar://42389187 */
3134 if (flavor == THREAD_FLAVOR_INSPECT) {
3135 assert(options & PORT_INTRANS_SKIP_TASK_EVAL);
3136 }
3137
3138 if (!(options & PORT_INTRANS_SKIP_TASK_EVAL) &&
3139 task_conversion_eval(caller: current_task(), victim: task, flavor) != KERN_SUCCESS) {
3140 return THREAD_NULL;
3141 }
3142 }
3143
3144 thread_reference(thread);
3145 return thread;
3146}
3147
3148/*
3149 * Routine: convert_port_to_thread_with_flavor
3150 * Purpose:
3151 * Internal helper for converting from a port to a thread.
3152 * Doesn't consume the port ref; produces a thread ref,
3153 * which may be null.
3154 * Args:
3155 * port - target port
3156 * flavor - requested thread port flavor
3157 * options - port translation options
3158 * Conditions:
3159 * Nothing locked.
3160 */
3161static thread_t
3162convert_port_to_thread_with_flavor(
3163 ipc_port_t port,
3164 mach_thread_flavor_t flavor,
3165 port_intrans_options_t options)
3166{
3167 thread_t thread = THREAD_NULL;
3168
3169 if (IP_VALID(port)) {
3170 ip_mq_lock(port);
3171 if (ip_active(port)) {
3172 thread = convert_port_to_thread_with_flavor_locked(port,
3173 flavor, options);
3174 }
3175 ip_mq_unlock(port);
3176 }
3177
3178 return thread;
3179}
3180
3181thread_t
3182convert_port_to_thread(
3183 ipc_port_t port)
3184{
3185 return convert_port_to_thread_with_flavor(port, THREAD_FLAVOR_CONTROL,
3186 options: PORT_INTRANS_OPTIONS_NONE);
3187}
3188
3189thread_read_t
3190convert_port_to_thread_read(
3191 ipc_port_t port)
3192{
3193 return convert_port_to_thread_with_flavor(port, THREAD_FLAVOR_READ,
3194 options: PORT_INTRANS_ALLOW_CORPSE_TASK);
3195}
3196
3197static thread_read_t
3198convert_port_to_thread_read_no_eval(
3199 ipc_port_t port)
3200{
3201 return convert_port_to_thread_with_flavor(port, THREAD_FLAVOR_READ,
3202 options: PORT_INTRANS_SKIP_TASK_EVAL | PORT_INTRANS_ALLOW_CORPSE_TASK);
3203}
3204
3205thread_inspect_t
3206convert_port_to_thread_inspect(
3207 ipc_port_t port)
3208{
3209 return convert_port_to_thread_with_flavor(port, THREAD_FLAVOR_INSPECT,
3210 options: PORT_INTRANS_SKIP_TASK_EVAL | PORT_INTRANS_ALLOW_CORPSE_TASK);
3211}
3212
3213static thread_inspect_t
3214convert_port_to_thread_inspect_no_eval(
3215 ipc_port_t port)
3216{
3217 return convert_port_to_thread_with_flavor(port, THREAD_FLAVOR_INSPECT,
3218 options: PORT_INTRANS_SKIP_TASK_EVAL | PORT_INTRANS_ALLOW_CORPSE_TASK);
3219}
3220
3221static inline ipc_kobject_type_t
3222thread_flavor_to_kotype(mach_thread_flavor_t flavor)
3223{
3224 switch (flavor) {
3225 case THREAD_FLAVOR_CONTROL:
3226 return IKOT_THREAD_CONTROL;
3227 case THREAD_FLAVOR_READ:
3228 return IKOT_THREAD_READ;
3229 default:
3230 return IKOT_THREAD_INSPECT;
3231 }
3232}
3233
3234/*
3235 * Routine: convert_thread_to_port_with_flavor
3236 * Purpose:
3237 * Convert from a thread to a port of given flavor.
3238 * Consumes a thread ref; produces a naked send right
3239 * which may be invalid.
3240 * Conditions:
3241 * Nothing locked.
3242 */
3243static ipc_port_t
3244convert_thread_to_port_with_flavor(
3245 thread_t thread,
3246 thread_ro_t tro,
3247 mach_thread_flavor_t flavor)
3248{
3249 ipc_kobject_type_t kotype = thread_flavor_to_kotype(flavor);
3250 ipc_port_t port = IP_NULL;
3251
3252 thread_mtx_lock(thread);
3253
3254 /*
3255 * out-trans of weaker flavors are still permitted, but in-trans
3256 * is separately enforced.
3257 */
3258 if (flavor == THREAD_FLAVOR_CONTROL &&
3259 task_conversion_eval_out_trans(caller: current_task(), victim: tro->tro_task, flavor)) {
3260 /* denied by security policy, make the port appear dead */
3261 port = IP_DEAD;
3262 goto exit;
3263 }
3264
3265 if (!thread->ipc_active) {
3266 goto exit;
3267 }
3268
3269 port = tro->tro_ports[flavor];
3270 if (flavor == THREAD_FLAVOR_CONTROL) {
3271 port = ipc_kobject_make_send(port, kobject: thread, kotype: IKOT_THREAD_CONTROL);
3272 } else if (IP_VALID(port)) {
3273 (void)ipc_kobject_make_send_nsrequest(port, kobject: thread, kotype);
3274 } else {
3275 /*
3276 * Claim a send right on the thread read/inspect port, and request a no-senders
3277 * notification on that port (if none outstanding). A thread reference is not
3278 * donated here even though the ports are created lazily because it doesn't own the
3279 * kobject that it points to. Threads manage their lifetime explicitly and
3280 * have to synchronize with each other, between the task/thread terminating and the
3281 * send-once notification firing, and this is done under the thread mutex
3282 * rather than with atomics.
3283 */
3284 port = ipc_kobject_alloc_port(kobject: thread, type: kotype,
3285 options: IPC_KOBJECT_ALLOC_MAKE_SEND | IPC_KOBJECT_ALLOC_NSREQUEST |
3286 IPC_KOBJECT_ALLOC_IMMOVABLE_SEND);
3287 /*
3288 * If Developer Mode is off, substitute read port for control
3289 * port if copying out to owning task's space, for the sake of
3290 * in-process exception handler.
3291 *
3292 * Also see: exception_deliver().
3293 */
3294 if (!developer_mode_state() && flavor == THREAD_FLAVOR_READ) {
3295 ipc_port_set_label(port, IPC_LABEL_SUBST_THREAD_READ);
3296 port->ip_kolabel->ikol_alt_port = tro->tro_self_port;
3297 }
3298 zalloc_ro_update_field(ZONE_ID_THREAD_RO,
3299 tro, tro_ports[flavor], &port);
3300 }
3301
3302exit:
3303 thread_mtx_unlock(thread);
3304 thread_deallocate(thread);
3305 return port;
3306}
3307
3308ipc_port_t
3309convert_thread_to_port(
3310 thread_t thread)
3311{
3312 thread_ro_t tro = get_thread_ro(thread);
3313 return convert_thread_to_port_with_flavor(thread, tro, THREAD_FLAVOR_CONTROL);
3314}
3315
3316ipc_port_t
3317convert_thread_read_to_port(thread_read_t thread)
3318{
3319 thread_ro_t tro = get_thread_ro(thread);
3320 return convert_thread_to_port_with_flavor(thread, tro, THREAD_FLAVOR_READ);
3321}
3322
3323ipc_port_t
3324convert_thread_inspect_to_port(thread_inspect_t thread)
3325{
3326 thread_ro_t tro = get_thread_ro(thread);
3327 return convert_thread_to_port_with_flavor(thread, tro, THREAD_FLAVOR_INSPECT);
3328}
3329
3330
3331/*
3332 * Routine: port_name_to_thread
3333 * Purpose:
3334 * Convert from a port name to a thread reference
3335 * A name of MACH_PORT_NULL is valid for the null thread.
3336 * Conditions:
3337 * Nothing locked.
3338 */
3339thread_t
3340port_name_to_thread(
3341 mach_port_name_t name,
3342 port_intrans_options_t options)
3343{
3344 thread_t thread = THREAD_NULL;
3345 ipc_port_t kport;
3346 kern_return_t kr;
3347
3348 if (MACH_PORT_VALID(name)) {
3349 kr = ipc_port_translate_send(current_space(), name, portp: &kport);
3350 if (kr == KERN_SUCCESS) {
3351 /* port is locked and active */
3352 assert(!(options & PORT_INTRANS_ALLOW_CORPSE_TASK) &&
3353 !(options & PORT_INTRANS_SKIP_TASK_EVAL));
3354 thread = convert_port_to_thread_with_flavor_locked(port: kport,
3355 THREAD_FLAVOR_CONTROL, options);
3356 ip_mq_unlock(kport);
3357 }
3358 }
3359
3360 return thread;
3361}
3362
3363/*
3364 * Routine: port_name_is_pinned_itk_self
3365 * Purpose:
3366 * Returns whether this port name is for the pinned
3367 * mach_task_self (if it exists).
3368 *
3369 * task_self_trap() when the task port is pinned,
3370 * will memorize the name the port has in the space
3371 * in ip_receiver_name, which we can use to fast-track
3372 * this answer without taking any lock.
3373 *
3374 * ipc_task_disable() will set `ip_receiver_name` back to
3375 * MACH_PORT_SPECIAL_DEFAULT.
3376 *
3377 * Conditions:
3378 * self must be current_task()
3379 * Nothing locked.
3380 */
3381static bool
3382port_name_is_pinned_itk_self(
3383 task_t self,
3384 mach_port_name_t name)
3385{
3386 ipc_port_t kport = self->itk_self;
3387 return MACH_PORT_VALID(name) && name != MACH_PORT_SPECIAL_DEFAULT &&
3388 kport->ip_pinned && ip_get_receiver_name(port: kport) == name;
3389}
3390
3391/*
3392 * Routine: port_name_to_current_task*_noref
3393 * Purpose:
3394 * Convert from a port name to current_task()
3395 * A name of MACH_PORT_NULL is valid for the null task.
3396 *
3397 * If current_task() is in the process of being terminated,
3398 * this might return a non NULL task even when port_name_to_task()
3399 * would.
3400 *
3401 * However, this is an acceptable race that can't be controlled by
3402 * userspace, and that downstream code using the returned task
3403 * has to handle anyway.
3404 *
3405 * ipc_space_disable() does try to narrow this race,
3406 * by causing port_name_is_pinned_itk_self() to fail.
3407 *
3408 * Returns:
3409 * current_task() if the port name was for current_task()
3410 * at the appropriate flavor.
3411 *
3412 * TASK_NULL otherwise.
3413 *
3414 * Conditions:
3415 * Nothing locked.
3416 */
3417static task_t
3418port_name_to_current_task_internal_noref(
3419 mach_port_name_t name,
3420 mach_task_flavor_t flavor)
3421{
3422 ipc_port_t kport;
3423 kern_return_t kr;
3424 task_t task = TASK_NULL;
3425 task_t self = current_task();
3426
3427 if (port_name_is_pinned_itk_self(self, name)) {
3428 return self;
3429 }
3430
3431 if (MACH_PORT_VALID(name)) {
3432 kr = ipc_port_translate_send(space: self->itk_space, name, portp: &kport);
3433 if (kr == KERN_SUCCESS) {
3434 ipc_kobject_type_t type = ip_kotype(kport);
3435 if (task_port_kotype_valid_for_flavor(kotype: type, flavor)) {
3436 task = ipc_kobject_get_locked(port: kport, type);
3437 }
3438 ip_mq_unlock(kport);
3439 if (task != self) {
3440 task = TASK_NULL;
3441 }
3442 }
3443 }
3444
3445 return task;
3446}
3447
3448task_t
3449port_name_to_current_task_noref(
3450 mach_port_name_t name)
3451{
3452 return port_name_to_current_task_internal_noref(name, TASK_FLAVOR_CONTROL);
3453}
3454
3455task_read_t
3456port_name_to_current_task_read_noref(
3457 mach_port_name_t name)
3458{
3459 return port_name_to_current_task_internal_noref(name, TASK_FLAVOR_READ);
3460}
3461
3462/*
3463 * Routine: port_name_to_task
3464 * Purpose:
3465 * Convert from a port name to a task reference
3466 * A name of MACH_PORT_NULL is valid for the null task.
3467 * Conditions:
3468 * Nothing locked.
3469 */
3470static task_t
3471port_name_to_task_grp(
3472 mach_port_name_t name,
3473 task_grp_t grp)
3474{
3475 ipc_port_t kport;
3476 kern_return_t kr;
3477 task_t task = TASK_NULL;
3478 task_t self = current_task();
3479
3480 if (port_name_is_pinned_itk_self(self, name)) {
3481 task_reference_grp(self, grp);
3482 return self;
3483 }
3484
3485 if (MACH_PORT_VALID(name)) {
3486 kr = ipc_port_translate_send(space: self->itk_space, name, portp: &kport);
3487 if (kr == KERN_SUCCESS) {
3488 /* port is locked and active */
3489 task = convert_port_to_task_with_flavor_locked(port: kport,
3490 TASK_FLAVOR_CONTROL, options: PORT_INTRANS_OPTIONS_NONE, grp);
3491 ip_mq_unlock(kport);
3492 }
3493 }
3494 return task;
3495}
3496
3497task_t
3498port_name_to_task_external(
3499 mach_port_name_t name)
3500{
3501 return port_name_to_task_grp(name, grp: TASK_GRP_EXTERNAL);
3502}
3503
3504task_t
3505port_name_to_task_kernel(
3506 mach_port_name_t name)
3507{
3508 return port_name_to_task_grp(name, grp: TASK_GRP_KERNEL);
3509}
3510
3511/*
3512 * Routine: port_name_to_task_read
3513 * Purpose:
3514 * Convert from a port name to a task reference
3515 * A name of MACH_PORT_NULL is valid for the null task.
3516 * Conditions:
3517 * Nothing locked.
3518 */
3519task_read_t
3520port_name_to_task_read(
3521 mach_port_name_t name)
3522{
3523 ipc_port_t kport;
3524 kern_return_t kr;
3525 task_read_t tr = TASK_READ_NULL;
3526 task_t self = current_task();
3527
3528 if (port_name_is_pinned_itk_self(self, name)) {
3529 task_reference_grp(self, TASK_GRP_KERNEL);
3530 return self;
3531 }
3532
3533 if (MACH_PORT_VALID(name)) {
3534 kr = ipc_port_translate_send(space: self->itk_space, name, portp: &kport);
3535 if (kr == KERN_SUCCESS) {
3536 /* port is locked and active */
3537 tr = convert_port_to_task_with_flavor_locked(port: kport,
3538 TASK_FLAVOR_READ, options: PORT_INTRANS_ALLOW_CORPSE_TASK,
3539 grp: TASK_GRP_KERNEL);
3540 ip_mq_unlock(kport);
3541 }
3542 }
3543 return tr;
3544}
3545
3546/*
3547 * Routine: port_name_to_task_read_no_eval
3548 * Purpose:
3549 * Convert from a port name to a task reference
3550 * A name of MACH_PORT_NULL is valid for the null task.
3551 * Skips task_conversion_eval() during conversion.
3552 * Conditions:
3553 * Nothing locked.
3554 */
3555task_read_t
3556port_name_to_task_read_no_eval(
3557 mach_port_name_t name)
3558{
3559 ipc_port_t kport;
3560 kern_return_t kr;
3561 task_read_t tr = TASK_READ_NULL;
3562 task_t self = current_task();
3563
3564 if (port_name_is_pinned_itk_self(self, name)) {
3565 task_reference_grp(self, TASK_GRP_KERNEL);
3566 return self;
3567 }
3568
3569 if (MACH_PORT_VALID(name)) {
3570 port_intrans_options_t options = PORT_INTRANS_SKIP_TASK_EVAL |
3571 PORT_INTRANS_ALLOW_CORPSE_TASK;
3572
3573 kr = ipc_port_translate_send(space: self->itk_space, name, portp: &kport);
3574 if (kr == KERN_SUCCESS) {
3575 /* port is locked and active */
3576 tr = convert_port_to_task_with_flavor_locked(port: kport,
3577 TASK_FLAVOR_READ, options, grp: TASK_GRP_KERNEL);
3578 ip_mq_unlock(kport);
3579 }
3580 }
3581 return tr;
3582}
3583
3584/*
3585 * Routine: port_name_to_task_name
3586 * Purpose:
3587 * Convert from a port name to a task reference
3588 * A name of MACH_PORT_NULL is valid for the null task.
3589 * Conditions:
3590 * Nothing locked.
3591 */
3592task_name_t
3593port_name_to_task_name(
3594 mach_port_name_t name)
3595{
3596 ipc_port_t kport;
3597 kern_return_t kr;
3598 task_name_t tn = TASK_NAME_NULL;
3599 task_t self = current_task();
3600
3601 if (port_name_is_pinned_itk_self(self, name)) {
3602 task_reference_grp(self, TASK_GRP_KERNEL);
3603 return self;
3604 }
3605
3606 if (MACH_PORT_VALID(name)) {
3607 port_intrans_options_t options = PORT_INTRANS_SKIP_TASK_EVAL |
3608 PORT_INTRANS_ALLOW_CORPSE_TASK;
3609
3610 kr = ipc_port_translate_send(current_space(), name, portp: &kport);
3611 if (kr == KERN_SUCCESS) {
3612 /* port is locked and active */
3613 tn = convert_port_to_task_with_flavor_locked(port: kport,
3614 TASK_FLAVOR_NAME, options, grp: TASK_GRP_KERNEL);
3615 ip_mq_unlock(kport);
3616 }
3617 }
3618 return tn;
3619}
3620
3621/*
3622 * Routine: port_name_to_task_id_token
3623 * Purpose:
3624 * Convert from a port name to a task identity token reference
3625 * Conditions:
3626 * Nothing locked.
3627 */
3628task_id_token_t
3629port_name_to_task_id_token(
3630 mach_port_name_t name)
3631{
3632 ipc_port_t port;
3633 kern_return_t kr;
3634 task_id_token_t token = TASK_ID_TOKEN_NULL;
3635
3636 if (MACH_PORT_VALID(name)) {
3637 kr = ipc_port_translate_send(current_space(), name, portp: &port);
3638 if (kr == KERN_SUCCESS) {
3639 token = convert_port_to_task_id_token(port);
3640 ip_mq_unlock(port);
3641 }
3642 }
3643 return token;
3644}
3645
3646/*
3647 * Routine: port_name_to_host
3648 * Purpose:
3649 * Convert from a port name to a host pointer.
3650 * NOTE: This does _not_ return a +1 reference to the host_t
3651 * Conditions:
3652 * Nothing locked.
3653 */
3654host_t
3655port_name_to_host(
3656 mach_port_name_t name)
3657{
3658 host_t host = HOST_NULL;
3659 kern_return_t kr;
3660 ipc_port_t port;
3661
3662 if (MACH_PORT_VALID(name)) {
3663 kr = ipc_port_translate_send(current_space(), name, portp: &port);
3664 if (kr == KERN_SUCCESS) {
3665 host = convert_port_to_host(port);
3666 ip_mq_unlock(port);
3667 }
3668 }
3669 return host;
3670}
3671
3672static inline ipc_kobject_type_t
3673task_flavor_to_kotype(mach_task_flavor_t flavor)
3674{
3675 switch (flavor) {
3676 case TASK_FLAVOR_CONTROL:
3677 return IKOT_TASK_CONTROL;
3678 case TASK_FLAVOR_READ:
3679 return IKOT_TASK_READ;
3680 case TASK_FLAVOR_INSPECT:
3681 return IKOT_TASK_INSPECT;
3682 default:
3683 return IKOT_TASK_NAME;
3684 }
3685}
3686
3687/*
3688 * Routine: convert_task_to_port_with_flavor
3689 * Purpose:
3690 * Convert from a task to a port of given flavor.
3691 * Consumes a task ref; produces a naked send right
3692 * which may be invalid.
3693 * Conditions:
3694 * Nothing locked.
3695 */
3696ipc_port_t
3697convert_task_to_port_with_flavor(
3698 task_t task,
3699 mach_task_flavor_t flavor,
3700 task_grp_t grp)
3701{
3702 ipc_kobject_type_t kotype = task_flavor_to_kotype(flavor);
3703 ipc_port_t port = IP_NULL;
3704
3705 itk_lock(task);
3706
3707 if (!task->ipc_active) {
3708 goto exit;
3709 }
3710
3711 /*
3712 * out-trans of weaker flavors are still permitted, but in-trans
3713 * is separately enforced.
3714 */
3715 if (flavor == TASK_FLAVOR_CONTROL &&
3716 task_conversion_eval_out_trans(caller: current_task(), victim: task, flavor)) {
3717 /* denied by security policy, make the port appear dead */
3718 port = IP_DEAD;
3719 goto exit;
3720 }
3721
3722 switch (flavor) {
3723 case TASK_FLAVOR_CONTROL:
3724 case TASK_FLAVOR_NAME:
3725 port = ipc_kobject_make_send(port: task->itk_task_ports[flavor],
3726 kobject: task, kotype);
3727 break;
3728 /*
3729 * Claim a send right on the task read/inspect port,
3730 * and request a no-senders notification on that port
3731 * (if none outstanding).
3732 *
3733 * The task's itk_lock is used to synchronize the handling
3734 * of the no-senders notification with the task termination.
3735 */
3736 case TASK_FLAVOR_READ:
3737 case TASK_FLAVOR_INSPECT:
3738 port = task->itk_task_ports[flavor];
3739 if (IP_VALID(port)) {
3740 (void)ipc_kobject_make_send_nsrequest(port,
3741 kobject: task, kotype);
3742 } else {
3743 port = ipc_kobject_alloc_port(kobject: task, type: kotype,
3744 options: IPC_KOBJECT_ALLOC_MAKE_SEND |
3745 IPC_KOBJECT_ALLOC_NSREQUEST |
3746 IPC_KOBJECT_ALLOC_IMMOVABLE_SEND);
3747 /*
3748 * If Developer Mode is off, substitute read port for control port if
3749 * copying out to owning task's space, for the sake of in-process
3750 * exception handler.
3751 *
3752 * Also see: exception_deliver().
3753 */
3754 if (!developer_mode_state() && flavor == TASK_FLAVOR_READ) {
3755 ipc_port_set_label(port, IPC_LABEL_SUBST_TASK_READ);
3756 port->ip_kolabel->ikol_alt_port = task->itk_self;
3757 }
3758
3759 task->itk_task_ports[flavor] = port;
3760 }
3761 break;
3762 }
3763
3764exit:
3765 itk_unlock(task);
3766 task_deallocate_grp(task, grp);
3767 return port;
3768}
3769
3770ipc_port_t
3771convert_corpse_to_port_and_nsrequest(
3772 task_t corpse)
3773{
3774 ipc_port_t port = IP_NULL;
3775 __assert_only kern_return_t kr;
3776
3777 assert(task_is_a_corpse(corpse));
3778 itk_lock(corpse);
3779 port = corpse->itk_task_ports[TASK_FLAVOR_CONTROL];
3780 assert(port->ip_srights == 0);
3781 kr = ipc_kobject_make_send_nsrequest(port, kobject: corpse, kotype: IKOT_TASK_CONTROL);
3782 assert(kr == KERN_SUCCESS || kr == KERN_ALREADY_WAITING);
3783 itk_unlock(corpse);
3784
3785 task_deallocate(corpse);
3786 return port;
3787}
3788
3789ipc_port_t
3790convert_task_to_port(
3791 task_t task)
3792{
3793 return convert_task_to_port_with_flavor(task, TASK_FLAVOR_CONTROL, grp: TASK_GRP_KERNEL);
3794}
3795
3796ipc_port_t
3797convert_task_read_to_port(
3798 task_read_t task)
3799{
3800 return convert_task_to_port_with_flavor(task, TASK_FLAVOR_READ, grp: TASK_GRP_KERNEL);
3801}
3802
3803ipc_port_t
3804convert_task_inspect_to_port(
3805 task_inspect_t task)
3806{
3807 return convert_task_to_port_with_flavor(task, TASK_FLAVOR_INSPECT, grp: TASK_GRP_KERNEL);
3808}
3809
3810ipc_port_t
3811convert_task_name_to_port(
3812 task_name_t task)
3813{
3814 return convert_task_to_port_with_flavor(task, TASK_FLAVOR_NAME, grp: TASK_GRP_KERNEL);
3815}
3816
3817ipc_port_t
3818convert_task_to_port_external(task_t task)
3819{
3820 return convert_task_to_port_with_flavor(task, TASK_FLAVOR_CONTROL, grp: TASK_GRP_EXTERNAL);
3821}
3822
3823ipc_port_t
3824convert_task_read_to_port_external(task_t task)
3825{
3826 return convert_task_to_port_with_flavor(task, TASK_FLAVOR_READ, grp: TASK_GRP_EXTERNAL);
3827}
3828
3829ipc_port_t
3830convert_task_to_port_pinned(
3831 task_t task)
3832{
3833 ipc_port_t port = IP_NULL;
3834
3835 assert(task == current_task());
3836
3837 itk_lock(task);
3838
3839 if (task->ipc_active) {
3840 port = ipc_kobject_make_send(port: task->itk_self, kobject: task,
3841 kotype: IKOT_TASK_CONTROL);
3842 }
3843
3844 if (port && task_is_immovable(task)) {
3845 assert(ip_is_pinned(port));
3846 assert(ip_is_immovable_send(port));
3847 }
3848
3849 itk_unlock(task);
3850 task_deallocate(task);
3851 return port;
3852}
3853/*
3854 * Routine: convert_task_suspend_token_to_port
3855 * Purpose:
3856 * Convert from a task suspension token to a port.
3857 * Consumes a task suspension token ref; produces a naked send-once right
3858 * which may be invalid.
3859 * Conditions:
3860 * Nothing locked.
3861 */
3862static ipc_port_t
3863convert_task_suspension_token_to_port_grp(
3864 task_suspension_token_t task,
3865 task_grp_t grp)
3866{
3867 ipc_port_t port;
3868
3869 task_lock(task);
3870 if (task->active) {
3871 itk_lock(task);
3872 if (task->itk_resume == IP_NULL) {
3873 task->itk_resume = ipc_kobject_alloc_port(kobject: (ipc_kobject_t) task,
3874 type: IKOT_TASK_RESUME, options: IPC_KOBJECT_ALLOC_NONE);
3875 }
3876
3877 /*
3878 * Create a send-once right for each instance of a direct user-called
3879 * task_suspend2 call. Each time one of these send-once rights is abandoned,
3880 * the notification handler will resume the target task.
3881 */
3882 port = task->itk_resume;
3883 ipc_kobject_require(port, kobject: task, kotype: IKOT_TASK_RESUME);
3884 port = ipc_port_make_sonce(port);
3885 itk_unlock(task);
3886 assert(IP_VALID(port));
3887 } else {
3888 port = IP_NULL;
3889 }
3890
3891 task_unlock(task);
3892 task_suspension_token_deallocate_grp(suspend_token: task, grp);
3893
3894 return port;
3895}
3896
3897ipc_port_t
3898convert_task_suspension_token_to_port_external(
3899 task_suspension_token_t task)
3900{
3901 return convert_task_suspension_token_to_port_grp(task, grp: TASK_GRP_EXTERNAL);
3902}
3903
3904ipc_port_t
3905convert_task_suspension_token_to_port_mig(
3906 task_suspension_token_t task)
3907{
3908 return convert_task_suspension_token_to_port_grp(task, grp: TASK_GRP_MIG);
3909}
3910
3911ipc_port_t
3912convert_thread_to_port_pinned(
3913 thread_t thread)
3914{
3915 thread_ro_t tro = get_thread_ro(thread);
3916 ipc_port_t port = IP_NULL;
3917
3918 thread_mtx_lock(thread);
3919
3920 if (thread->ipc_active) {
3921 port = ipc_kobject_make_send(port: tro->tro_self_port,
3922 kobject: thread, kotype: IKOT_THREAD_CONTROL);
3923 }
3924
3925 if (port && task_is_immovable(tro->tro_task)) {
3926 assert(ip_is_immovable_send(port));
3927 }
3928
3929 thread_mtx_unlock(thread);
3930 thread_deallocate(thread);
3931 return port;
3932}
3933/*
3934 * Routine: space_deallocate
3935 * Purpose:
3936 * Deallocate a space ref produced by convert_port_to_space.
3937 * Conditions:
3938 * Nothing locked.
3939 */
3940
3941void
3942space_deallocate(
3943 ipc_space_t space)
3944{
3945 if (space != IS_NULL) {
3946 is_release(space);
3947 }
3948}
3949
3950/*
3951 * Routine: space_read_deallocate
3952 * Purpose:
3953 * Deallocate a space read ref produced by convert_port_to_space_read.
3954 * Conditions:
3955 * Nothing locked.
3956 */
3957
3958void
3959space_read_deallocate(
3960 ipc_space_read_t space)
3961{
3962 if (space != IS_INSPECT_NULL) {
3963 is_release((ipc_space_t)space);
3964 }
3965}
3966
3967/*
3968 * Routine: space_inspect_deallocate
3969 * Purpose:
3970 * Deallocate a space inspect ref produced by convert_port_to_space_inspect.
3971 * Conditions:
3972 * Nothing locked.
3973 */
3974
3975void
3976space_inspect_deallocate(
3977 ipc_space_inspect_t space)
3978{
3979 if (space != IS_INSPECT_NULL) {
3980 is_release((ipc_space_t)space);
3981 }
3982}
3983
3984
3985static boolean_t
3986behavior_is_identity_protected(int new_behavior)
3987{
3988 return (new_behavior & ~MACH_EXCEPTION_MASK) == EXCEPTION_IDENTITY_PROTECTED;
3989}
3990
3991static boolean_t
3992identity_protection_opted_out(const ipc_port_t new_port)
3993{
3994 if (IP_VALID(new_port)) {
3995 return ip_is_id_prot_opted_out(new_port);
3996 }
3997 return false;
3998}
3999
4000static void
4001send_set_exception_telemetry(const task_t excepting_task, const exception_mask_t mask, const char* level)
4002{
4003 ca_event_t ca_event = CA_EVENT_ALLOCATE(set_exception);
4004 CA_EVENT_TYPE(set_exception) * event = ca_event->data;
4005
4006 task_procname(task: current_task(), buf: (char *) &event->current_proc, size: sizeof(event->current_proc));
4007 task_procname(task: excepting_task, buf: (char *) &event->thread_proc, size: sizeof(event->thread_proc));
4008 event->mask = mask;
4009 strlcpy(dst: event->level, src: level, n: sizeof(event->level));
4010
4011 CA_EVENT_SEND(ca_event);
4012}
4013
4014/* Returns whether the violation should be ignored */
4015static boolean_t
4016set_exception_behavior_violation(const ipc_port_t new_port, const task_t excepting_task,
4017 const exception_mask_t mask, const char *level)
4018{
4019 mach_port_name_t new_name = CAST_MACH_PORT_TO_NAME(new_port);
4020 boolean_t rate_limited;
4021
4022 task_lock(current_task());
4023 rate_limited = task_has_exception_telemetry(current_task());
4024 if (!rate_limited) {
4025 task_set_exception_telemetry(current_task());
4026 }
4027 task_unlock(current_task());
4028
4029 if (thid_should_crash && !rate_limited) {
4030 /* create lightweight corpse */
4031 mach_port_guard_exception(name: new_name, inguard: 0, portguard: 0, reason: kGUARD_EXC_EXCEPTION_BEHAVIOR_ENFORCE);
4032 }
4033
4034 /* always report the proc name to CA */
4035 send_set_exception_telemetry(excepting_task, mask, level);
4036
4037 /* if the bootarg has been manually set to false, ignore the violation */
4038 return !thid_should_crash;
4039}
4040
4041/*
4042 * Protect platform binary task/thread ports.
4043 * excepting_task is NULL if we are setting a host exception port.
4044 */
4045static boolean_t
4046exception_exposes_protected_ports(const ipc_port_t new_port, const task_t excepting_task)
4047{
4048 if (!IP_VALID(new_port) || is_ux_handler_port(port: new_port)) {
4049 /*
4050 * sending exceptions to invalid port does not pose risk
4051 * ux_handler port is an immovable, read-only kobject port; doesn't need protection.
4052 */
4053 return FALSE;
4054 } else if (excepting_task) {
4055 /* setting task/thread exception port - protect hardened binaries */
4056 return task_is_hardened_binary(task: excepting_task);
4057 }
4058
4059 /* setting host port exposes all processes - always protect. */
4060 return TRUE;
4061}
4062
4063#if XNU_TARGET_OS_OSX && CONFIG_CSR
4064static bool
4065SIP_is_enabled()
4066{
4067 return csr_check(CSR_ALLOW_UNRESTRICTED_FS) != 0;
4068}
4069#endif /* XNU_TARGET_OS_OSX && CONFIG_CSR*/
4070
4071boolean_t
4072set_exception_behavior_allowed(__unused const ipc_port_t new_port, __unused int new_behavior,
4073 __unused const task_t excepting_task, __unused const exception_mask_t mask, __unused const char *level)
4074{
4075 if (exception_exposes_protected_ports(new_port, excepting_task)
4076 && !behavior_is_identity_protected(new_behavior)
4077 && !identity_protection_opted_out(new_port) /* Ignore opted out */
4078#if XNU_TARGET_OS_OSX
4079 && !task_opted_out_mach_hardening(task: excepting_task)
4080#if CONFIG_CSR
4081 && SIP_is_enabled() /* cannot enforce if SIP is disabled */
4082#endif /* CONFIG_CSR */
4083#endif /* XNU_TARGET_OS_OSX */
4084#if CONFIG_ROSETTA
4085 && !task_is_translated(current_task())
4086#endif /* CONFIG_ROSETTA */
4087 && !proc_is_simulated(current_proc())
4088 && !IOCurrentTaskHasEntitlement("com.apple.private.thread-set-state") /* rdar://109119238 */
4089 && !IOCurrentTaskHasEntitlement(SET_EXCEPTION_ENTITLEMENT)) {
4090 return set_exception_behavior_violation(new_port, excepting_task, mask, level);
4091 }
4092
4093 return TRUE;
4094}
4095
4096/*
4097 * Routine: thread/task_set_exception_ports [kernel call]
4098 * Purpose:
4099 * Sets the thread/task exception port, flavor and
4100 * behavior for the exception types specified by the mask.
4101 * There will be one send right per exception per valid
4102 * port.
4103 * Conditions:
4104 * Nothing locked. If successful, consumes
4105 * the supplied send right.
4106 * Returns:
4107 * KERN_SUCCESS Changed the special port.
4108 * KERN_INVALID_ARGUMENT The thread is null,
4109 * Illegal mask bit set.
4110 * Illegal exception behavior
4111 * KERN_FAILURE The thread is dead.
4112 * KERN_NO_ACCESS Restricted access to set port
4113 */
4114
4115kern_return_t
4116thread_set_exception_ports(
4117 thread_t thread,
4118 exception_mask_t exception_mask,
4119 ipc_port_t new_port,
4120 exception_behavior_t new_behavior,
4121 thread_state_flavor_t new_flavor)
4122{
4123 ipc_port_t old_port[EXC_TYPES_COUNT];
4124 thread_ro_t tro;
4125 boolean_t privileged = task_is_privileged(task: current_task());
4126
4127#if CONFIG_MACF
4128 struct label *new_label;
4129#endif
4130
4131 if (thread == THREAD_NULL) {
4132 return KERN_INVALID_ARGUMENT;
4133 }
4134
4135 if (exception_mask & ~EXC_MASK_VALID) {
4136 return KERN_INVALID_ARGUMENT;
4137 }
4138
4139 if (IP_VALID(new_port)) {
4140 switch (new_behavior & ~MACH_EXCEPTION_MASK) {
4141 case EXCEPTION_DEFAULT:
4142 case EXCEPTION_STATE:
4143 case EXCEPTION_STATE_IDENTITY:
4144 case EXCEPTION_IDENTITY_PROTECTED:
4145 break;
4146
4147 default:
4148 return KERN_INVALID_ARGUMENT;
4149 }
4150 }
4151
4152 /*
4153 * rdar://77996387
4154 * Avoid exposing immovable ports send rights (kobjects) to `get_exception_ports`,
4155 * but allow opted out ports to still be set on thread only.
4156 */
4157 if (IP_VALID(new_port) &&
4158 ((!ip_is_id_prot_opted_out(new_port) && new_port->ip_immovable_receive) ||
4159 new_port->ip_immovable_send)) {
4160 return KERN_INVALID_RIGHT;
4161 }
4162
4163
4164 /*
4165 * Check the validity of the thread_state_flavor by calling the
4166 * VALID_THREAD_STATE_FLAVOR architecture dependent macro defined in
4167 * osfmk/mach/ARCHITECTURE/thread_status.h
4168 */
4169 if (new_flavor != 0 && !VALID_THREAD_STATE_FLAVOR(new_flavor)) {
4170 return KERN_INVALID_ARGUMENT;
4171 }
4172
4173 if (((new_behavior & ~MACH_EXCEPTION_MASK) == EXCEPTION_IDENTITY_PROTECTED ||
4174 (new_behavior & MACH_EXCEPTION_BACKTRACE_PREFERRED))
4175 && !(new_behavior & MACH_EXCEPTION_CODES)) {
4176 return KERN_INVALID_ARGUMENT;
4177 }
4178
4179 if (!set_exception_behavior_allowed(new_port, new_behavior, excepting_task: get_threadtask(thread), mask: exception_mask, level: "thread")) {
4180 return KERN_NO_ACCESS;
4181 }
4182
4183#if CONFIG_MACF
4184 new_label = mac_exc_create_label_for_current_proc();
4185#endif
4186
4187 tro = get_thread_ro(thread);
4188 thread_mtx_lock(thread);
4189
4190 if (!thread->active) {
4191 thread_mtx_unlock(thread);
4192#if CONFIG_MACF
4193 mac_exc_free_label(label: new_label);
4194#endif
4195 return KERN_FAILURE;
4196 }
4197
4198 if (tro->tro_exc_actions == NULL) {
4199 ipc_thread_init_exc_actions(tro);
4200 }
4201 for (size_t i = FIRST_EXCEPTION; i < EXC_TYPES_COUNT; ++i) {
4202 struct exception_action *action = &tro->tro_exc_actions[i];
4203
4204 if ((exception_mask & (1 << i))
4205#if CONFIG_MACF
4206 && mac_exc_update_action_label(action, newlabel: new_label) == 0
4207#endif
4208 ) {
4209 old_port[i] = action->port;
4210 action->port = exception_port_copy_send(port: new_port);
4211 action->behavior = new_behavior;
4212 action->flavor = new_flavor;
4213 action->privileged = privileged;
4214 } else {
4215 old_port[i] = IP_NULL;
4216 }
4217 }
4218
4219 thread_mtx_unlock(thread);
4220
4221#if CONFIG_MACF
4222 mac_exc_free_label(label: new_label);
4223#endif
4224
4225 for (size_t i = FIRST_EXCEPTION; i < EXC_TYPES_COUNT; ++i) {
4226 if (IP_VALID(old_port[i])) {
4227 ipc_port_release_send(port: old_port[i]);
4228 }
4229 }
4230
4231 if (IP_VALID(new_port)) { /* consume send right */
4232 ipc_port_release_send(port: new_port);
4233 }
4234
4235 return KERN_SUCCESS;
4236}
4237
4238kern_return_t
4239task_set_exception_ports(
4240 task_t task,
4241 exception_mask_t exception_mask,
4242 ipc_port_t new_port,
4243 exception_behavior_t new_behavior,
4244 thread_state_flavor_t new_flavor)
4245{
4246 ipc_port_t old_port[EXC_TYPES_COUNT];
4247 boolean_t privileged = task_is_privileged(task: current_task());
4248 register int i;
4249
4250#if CONFIG_MACF
4251 struct label *new_label;
4252#endif
4253
4254 if (task == TASK_NULL) {
4255 return KERN_INVALID_ARGUMENT;
4256 }
4257
4258 if (exception_mask & ~EXC_MASK_VALID) {
4259 return KERN_INVALID_ARGUMENT;
4260 }
4261
4262 if (IP_VALID(new_port)) {
4263 switch (new_behavior & ~MACH_EXCEPTION_MASK) {
4264 case EXCEPTION_DEFAULT:
4265 case EXCEPTION_STATE:
4266 case EXCEPTION_STATE_IDENTITY:
4267 case EXCEPTION_IDENTITY_PROTECTED:
4268 break;
4269
4270 default:
4271 return KERN_INVALID_ARGUMENT;
4272 }
4273 }
4274
4275 if (IP_VALID(new_port) && (new_port->ip_immovable_receive || new_port->ip_immovable_send)) {
4276 return KERN_INVALID_RIGHT;
4277 }
4278
4279
4280 /*
4281 * Check the validity of the thread_state_flavor by calling the
4282 * VALID_THREAD_STATE_FLAVOR architecture dependent macro defined in
4283 * osfmk/mach/ARCHITECTURE/thread_status.h
4284 */
4285 if (new_flavor != 0 && !VALID_THREAD_STATE_FLAVOR(new_flavor)) {
4286 return KERN_INVALID_ARGUMENT;
4287 }
4288
4289 if (((new_behavior & ~MACH_EXCEPTION_MASK) == EXCEPTION_IDENTITY_PROTECTED ||
4290 (new_behavior & MACH_EXCEPTION_BACKTRACE_PREFERRED))
4291 && !(new_behavior & MACH_EXCEPTION_CODES)) {
4292 return KERN_INVALID_ARGUMENT;
4293 }
4294
4295 if (!set_exception_behavior_allowed(new_port, new_behavior, excepting_task: task, mask: exception_mask, level: "task")) {
4296 return KERN_NO_ACCESS;
4297 }
4298
4299#if CONFIG_MACF
4300 new_label = mac_exc_create_label_for_current_proc();
4301#endif
4302
4303 itk_lock(task);
4304
4305 /*
4306 * Allow setting exception port during the span of ipc_task_init() to
4307 * ipc_task_terminate(). posix_spawn() port actions can set exception
4308 * ports on target task _before_ task IPC access is enabled.
4309 */
4310 if (task->itk_task_ports[TASK_FLAVOR_CONTROL] == IP_NULL) {
4311 itk_unlock(task);
4312#if CONFIG_MACF
4313 mac_exc_free_label(label: new_label);
4314#endif
4315 return KERN_FAILURE;
4316 }
4317
4318 for (i = FIRST_EXCEPTION; i < EXC_TYPES_COUNT; ++i) {
4319 if ((exception_mask & (1 << i))
4320#if CONFIG_MACF
4321 && mac_exc_update_action_label(action: &task->exc_actions[i], newlabel: new_label) == 0
4322#endif
4323 ) {
4324 old_port[i] = task->exc_actions[i].port;
4325 task->exc_actions[i].port =
4326 exception_port_copy_send(port: new_port);
4327 task->exc_actions[i].behavior = new_behavior;
4328 task->exc_actions[i].flavor = new_flavor;
4329 task->exc_actions[i].privileged = privileged;
4330 } else {
4331 old_port[i] = IP_NULL;
4332 }
4333 }
4334
4335 itk_unlock(task);
4336
4337#if CONFIG_MACF
4338 mac_exc_free_label(label: new_label);
4339#endif
4340
4341 for (i = FIRST_EXCEPTION; i < EXC_TYPES_COUNT; ++i) {
4342 if (IP_VALID(old_port[i])) {
4343 ipc_port_release_send(port: old_port[i]);
4344 }
4345 }
4346
4347 if (IP_VALID(new_port)) { /* consume send right */
4348 ipc_port_release_send(port: new_port);
4349 }
4350
4351 return KERN_SUCCESS;
4352}
4353
4354/*
4355 * Routine: thread/task_swap_exception_ports [kernel call]
4356 * Purpose:
4357 * Sets the thread/task exception port, flavor and
4358 * behavior for the exception types specified by the
4359 * mask.
4360 *
4361 * The old ports, behavior and flavors are returned
4362 * Count specifies the array sizes on input and
4363 * the number of returned ports etc. on output. The
4364 * arrays must be large enough to hold all the returned
4365 * data, MIG returnes an error otherwise. The masks
4366 * array specifies the corresponding exception type(s).
4367 *
4368 * Conditions:
4369 * Nothing locked. If successful, consumes
4370 * the supplied send right.
4371 *
4372 * Returns upto [in} CountCnt elements.
4373 * Returns:
4374 * KERN_SUCCESS Changed the special port.
4375 * KERN_INVALID_ARGUMENT The thread is null,
4376 * Illegal mask bit set.
4377 * Illegal exception behavior
4378 * KERN_FAILURE The thread is dead.
4379 * KERN_NO_ACCESS Restricted access to set port
4380 */
4381
4382kern_return_t
4383thread_swap_exception_ports(
4384 thread_t thread,
4385 exception_mask_t exception_mask,
4386 ipc_port_t new_port,
4387 exception_behavior_t new_behavior,
4388 thread_state_flavor_t new_flavor,
4389 exception_mask_array_t masks,
4390 mach_msg_type_number_t *CountCnt,
4391 exception_port_array_t ports,
4392 exception_behavior_array_t behaviors,
4393 thread_state_flavor_array_t flavors)
4394{
4395 ipc_port_t old_port[EXC_TYPES_COUNT];
4396 thread_ro_t tro;
4397 boolean_t privileged = task_is_privileged(task: current_task());
4398 unsigned int i, j, count;
4399
4400#if CONFIG_MACF
4401 struct label *new_label;
4402#endif
4403
4404 if (thread == THREAD_NULL) {
4405 return KERN_INVALID_ARGUMENT;
4406 }
4407
4408 if (exception_mask & ~EXC_MASK_VALID) {
4409 return KERN_INVALID_ARGUMENT;
4410 }
4411
4412 if (IP_VALID(new_port)) {
4413 switch (new_behavior & ~MACH_EXCEPTION_MASK) {
4414 case EXCEPTION_DEFAULT:
4415 case EXCEPTION_STATE:
4416 case EXCEPTION_STATE_IDENTITY:
4417 case EXCEPTION_IDENTITY_PROTECTED:
4418 break;
4419
4420 default:
4421 return KERN_INVALID_ARGUMENT;
4422 }
4423 }
4424
4425 if (IP_VALID(new_port) && (new_port->ip_immovable_receive || new_port->ip_immovable_send)) {
4426 return KERN_INVALID_RIGHT;
4427 }
4428
4429
4430 if (new_flavor != 0 && !VALID_THREAD_STATE_FLAVOR(new_flavor)) {
4431 return KERN_INVALID_ARGUMENT;
4432 }
4433
4434 if (((new_behavior & ~MACH_EXCEPTION_MASK) == EXCEPTION_IDENTITY_PROTECTED ||
4435 (new_behavior & MACH_EXCEPTION_BACKTRACE_PREFERRED))
4436 && !(new_behavior & MACH_EXCEPTION_CODES)) {
4437 return KERN_INVALID_ARGUMENT;
4438 }
4439
4440 if (!set_exception_behavior_allowed(new_port, new_behavior, excepting_task: get_threadtask(thread), mask: exception_mask, level: "thread")) {
4441 return KERN_NO_ACCESS;
4442 }
4443
4444#if CONFIG_MACF
4445 new_label = mac_exc_create_label_for_current_proc();
4446#endif
4447
4448 thread_mtx_lock(thread);
4449
4450 if (!thread->active) {
4451 thread_mtx_unlock(thread);
4452#if CONFIG_MACF
4453 mac_exc_free_label(label: new_label);
4454#endif
4455 return KERN_FAILURE;
4456 }
4457
4458 tro = get_thread_ro(thread);
4459 if (tro->tro_exc_actions == NULL) {
4460 ipc_thread_init_exc_actions(tro);
4461 }
4462
4463 assert(EXC_TYPES_COUNT > FIRST_EXCEPTION);
4464 for (count = 0, i = FIRST_EXCEPTION; i < EXC_TYPES_COUNT && count < *CountCnt; ++i) {
4465 struct exception_action *action = &tro->tro_exc_actions[i];
4466
4467 if ((exception_mask & (1 << i))
4468#if CONFIG_MACF
4469 && mac_exc_update_action_label(action, newlabel: new_label) == 0
4470#endif
4471 ) {
4472 for (j = 0; j < count; ++j) {
4473 /*
4474 * search for an identical entry, if found
4475 * set corresponding mask for this exception.
4476 */
4477 if (action->port == ports[j] &&
4478 action->behavior == behaviors[j] &&
4479 action->flavor == flavors[j]) {
4480 masks[j] |= (1 << i);
4481 break;
4482 }
4483 }
4484
4485 if (j == count) {
4486 masks[j] = (1 << i);
4487 ports[j] = exception_port_copy_send(port: action->port);
4488
4489 behaviors[j] = action->behavior;
4490 flavors[j] = action->flavor;
4491 ++count;
4492 }
4493
4494 old_port[i] = action->port;
4495 action->port = exception_port_copy_send(port: new_port);
4496 action->behavior = new_behavior;
4497 action->flavor = new_flavor;
4498 action->privileged = privileged;
4499 } else {
4500 old_port[i] = IP_NULL;
4501 }
4502 }
4503
4504 thread_mtx_unlock(thread);
4505
4506#if CONFIG_MACF
4507 mac_exc_free_label(label: new_label);
4508#endif
4509
4510 while (--i >= FIRST_EXCEPTION) {
4511 if (IP_VALID(old_port[i])) {
4512 ipc_port_release_send(port: old_port[i]);
4513 }
4514 }
4515
4516 if (IP_VALID(new_port)) { /* consume send right */
4517 ipc_port_release_send(port: new_port);
4518 }
4519
4520 *CountCnt = count;
4521
4522 return KERN_SUCCESS;
4523}
4524
4525kern_return_t
4526task_swap_exception_ports(
4527 task_t task,
4528 exception_mask_t exception_mask,
4529 ipc_port_t new_port,
4530 exception_behavior_t new_behavior,
4531 thread_state_flavor_t new_flavor,
4532 exception_mask_array_t masks,
4533 mach_msg_type_number_t *CountCnt,
4534 exception_port_array_t ports,
4535 exception_behavior_array_t behaviors,
4536 thread_state_flavor_array_t flavors)
4537{
4538 ipc_port_t old_port[EXC_TYPES_COUNT];
4539 boolean_t privileged = task_is_privileged(task: current_task());
4540 unsigned int i, j, count;
4541
4542#if CONFIG_MACF
4543 struct label *new_label;
4544#endif
4545
4546 if (task == TASK_NULL) {
4547 return KERN_INVALID_ARGUMENT;
4548 }
4549
4550 if (exception_mask & ~EXC_MASK_VALID) {
4551 return KERN_INVALID_ARGUMENT;
4552 }
4553
4554 if (IP_VALID(new_port)) {
4555 switch (new_behavior & ~MACH_EXCEPTION_MASK) {
4556 case EXCEPTION_DEFAULT:
4557 case EXCEPTION_STATE:
4558 case EXCEPTION_STATE_IDENTITY:
4559 case EXCEPTION_IDENTITY_PROTECTED:
4560 break;
4561
4562 default:
4563 return KERN_INVALID_ARGUMENT;
4564 }
4565 }
4566
4567 if (IP_VALID(new_port) && (new_port->ip_immovable_receive || new_port->ip_immovable_send)) {
4568 return KERN_INVALID_RIGHT;
4569 }
4570
4571
4572 if (new_flavor != 0 && !VALID_THREAD_STATE_FLAVOR(new_flavor)) {
4573 return KERN_INVALID_ARGUMENT;
4574 }
4575
4576 if (((new_behavior & ~MACH_EXCEPTION_MASK) == EXCEPTION_IDENTITY_PROTECTED ||
4577 (new_behavior & MACH_EXCEPTION_BACKTRACE_PREFERRED))
4578 && !(new_behavior & MACH_EXCEPTION_CODES)) {
4579 return KERN_INVALID_ARGUMENT;
4580 }
4581
4582 if (!set_exception_behavior_allowed(new_port, new_behavior, excepting_task: task, mask: exception_mask, level: "task")) {
4583 return KERN_NO_ACCESS;
4584 }
4585
4586#if CONFIG_MACF
4587 new_label = mac_exc_create_label_for_current_proc();
4588#endif
4589
4590 itk_lock(task);
4591
4592 if (!task->ipc_active) {
4593 itk_unlock(task);
4594#if CONFIG_MACF
4595 mac_exc_free_label(label: new_label);
4596#endif
4597 return KERN_FAILURE;
4598 }
4599
4600 assert(EXC_TYPES_COUNT > FIRST_EXCEPTION);
4601 for (count = 0, i = FIRST_EXCEPTION; i < EXC_TYPES_COUNT && count < *CountCnt; ++i) {
4602 if ((exception_mask & (1 << i))
4603#if CONFIG_MACF
4604 && mac_exc_update_action_label(action: &task->exc_actions[i], newlabel: new_label) == 0
4605#endif
4606 ) {
4607 for (j = 0; j < count; j++) {
4608 /*
4609 * search for an identical entry, if found
4610 * set corresponding mask for this exception.
4611 */
4612 if (task->exc_actions[i].port == ports[j] &&
4613 task->exc_actions[i].behavior == behaviors[j] &&
4614 task->exc_actions[i].flavor == flavors[j]) {
4615 masks[j] |= (1 << i);
4616 break;
4617 }
4618 }
4619
4620 if (j == count) {
4621 masks[j] = (1 << i);
4622 ports[j] = exception_port_copy_send(port: task->exc_actions[i].port);
4623 behaviors[j] = task->exc_actions[i].behavior;
4624 flavors[j] = task->exc_actions[i].flavor;
4625 ++count;
4626 }
4627
4628 old_port[i] = task->exc_actions[i].port;
4629
4630 task->exc_actions[i].port = exception_port_copy_send(port: new_port);
4631 task->exc_actions[i].behavior = new_behavior;
4632 task->exc_actions[i].flavor = new_flavor;
4633 task->exc_actions[i].privileged = privileged;
4634 } else {
4635 old_port[i] = IP_NULL;
4636 }
4637 }
4638
4639 itk_unlock(task);
4640
4641#if CONFIG_MACF
4642 mac_exc_free_label(label: new_label);
4643#endif
4644
4645 while (--i >= FIRST_EXCEPTION) {
4646 if (IP_VALID(old_port[i])) {
4647 ipc_port_release_send(port: old_port[i]);
4648 }
4649 }
4650
4651 if (IP_VALID(new_port)) { /* consume send right */
4652 ipc_port_release_send(port: new_port);
4653 }
4654
4655 *CountCnt = count;
4656
4657 return KERN_SUCCESS;
4658}
4659
4660/*
4661 * Routine: thread/task_get_exception_ports [kernel call]
4662 * Purpose:
4663 * Clones a send right for each of the thread/task's exception
4664 * ports specified in the mask and returns the behaviour
4665 * and flavor of said port.
4666 *
4667 * Returns upto [in} CountCnt elements.
4668 *
4669 * Conditions:
4670 * Nothing locked.
4671 * Returns:
4672 * KERN_SUCCESS Extracted a send right.
4673 * KERN_INVALID_ARGUMENT The thread is null,
4674 * Invalid special port,
4675 * Illegal mask bit set.
4676 * KERN_FAILURE The thread is dead.
4677 */
4678static kern_return_t
4679thread_get_exception_ports_internal(
4680 thread_t thread,
4681 exception_mask_t exception_mask,
4682 exception_mask_array_t masks,
4683 mach_msg_type_number_t *CountCnt,
4684 exception_port_info_array_t ports_info,
4685 exception_port_array_t ports,
4686 exception_behavior_array_t behaviors,
4687 thread_state_flavor_array_t flavors)
4688{
4689 unsigned int count;
4690 boolean_t info_only = (ports_info != NULL);
4691 thread_ro_t tro;
4692 ipc_port_t port_ptrs[EXC_TYPES_COUNT]; /* pointers only, does not hold right */
4693
4694 if (thread == THREAD_NULL) {
4695 return KERN_INVALID_ARGUMENT;
4696 }
4697
4698 if (exception_mask & ~EXC_MASK_VALID) {
4699 return KERN_INVALID_ARGUMENT;
4700 }
4701
4702 if (!info_only && !ports) {
4703 return KERN_INVALID_ARGUMENT;
4704 }
4705
4706 tro = get_thread_ro(thread);
4707 thread_mtx_lock(thread);
4708
4709 if (!thread->active) {
4710 thread_mtx_unlock(thread);
4711
4712 return KERN_FAILURE;
4713 }
4714
4715 count = 0;
4716
4717 if (tro->tro_exc_actions == NULL) {
4718 goto done;
4719 }
4720
4721 for (int i = FIRST_EXCEPTION, j = 0; i < EXC_TYPES_COUNT; ++i) {
4722 if (exception_mask & (1 << i)) {
4723 ipc_port_t exc_port = tro->tro_exc_actions[i].port;
4724 exception_behavior_t exc_behavior = tro->tro_exc_actions[i].behavior;
4725 thread_state_flavor_t exc_flavor = tro->tro_exc_actions[i].flavor;
4726
4727 for (j = 0; j < count; ++j) {
4728 /*
4729 * search for an identical entry, if found
4730 * set corresponding mask for this exception.
4731 */
4732 if (exc_port == port_ptrs[j] &&
4733 exc_behavior == behaviors[j] &&
4734 exc_flavor == flavors[j]) {
4735 masks[j] |= (1 << i);
4736 break;
4737 }
4738 }
4739
4740 if (j == count && count < *CountCnt) {
4741 masks[j] = (1 << i);
4742 port_ptrs[j] = exc_port;
4743
4744 if (info_only) {
4745 if (!IP_VALID(exc_port)) {
4746 ports_info[j] = (ipc_info_port_t){ .iip_port_object = 0, .iip_receiver_object = 0 };
4747 } else {
4748 uintptr_t receiver;
4749 (void)ipc_port_get_receiver_task(port: exc_port, task: &receiver);
4750 ports_info[j].iip_port_object = (natural_t)VM_KERNEL_ADDRPERM(exc_port);
4751 ports_info[j].iip_receiver_object = receiver ? (natural_t)VM_KERNEL_ADDRPERM(receiver) : 0;
4752 }
4753 } else {
4754 ports[j] = exception_port_copy_send(port: exc_port);
4755 }
4756 behaviors[j] = exc_behavior;
4757 flavors[j] = exc_flavor;
4758 ++count;
4759 }
4760 }
4761 }
4762
4763done:
4764 thread_mtx_unlock(thread);
4765
4766 *CountCnt = count;
4767
4768 return KERN_SUCCESS;
4769}
4770
4771kern_return_t
4772thread_get_exception_ports(
4773 thread_t thread,
4774 exception_mask_t exception_mask,
4775 exception_mask_array_t masks,
4776 mach_msg_type_number_t *CountCnt,
4777 exception_port_array_t ports,
4778 exception_behavior_array_t behaviors,
4779 thread_state_flavor_array_t flavors)
4780{
4781 return thread_get_exception_ports_internal(thread, exception_mask, masks, CountCnt,
4782 NULL, ports, behaviors, flavors);
4783}
4784
4785kern_return_t
4786thread_get_exception_ports_info(
4787 mach_port_t port,
4788 exception_mask_t exception_mask,
4789 exception_mask_array_t masks,
4790 mach_msg_type_number_t *CountCnt,
4791 exception_port_info_array_t ports_info,
4792 exception_behavior_array_t behaviors,
4793 thread_state_flavor_array_t flavors)
4794{
4795 kern_return_t kr;
4796
4797 thread_t thread = convert_port_to_thread_read_no_eval(port);
4798
4799 if (thread == THREAD_NULL) {
4800 return KERN_INVALID_ARGUMENT;
4801 }
4802
4803 kr = thread_get_exception_ports_internal(thread, exception_mask, masks, CountCnt,
4804 ports_info, NULL, behaviors, flavors);
4805
4806 thread_deallocate(thread);
4807 return kr;
4808}
4809
4810kern_return_t
4811thread_get_exception_ports_from_user(
4812 mach_port_t port,
4813 exception_mask_t exception_mask,
4814 exception_mask_array_t masks,
4815 mach_msg_type_number_t *CountCnt,
4816 exception_port_array_t ports,
4817 exception_behavior_array_t behaviors,
4818 thread_state_flavor_array_t flavors)
4819{
4820 kern_return_t kr;
4821
4822 thread_t thread = convert_port_to_thread(port);
4823
4824 if (thread == THREAD_NULL) {
4825 return KERN_INVALID_ARGUMENT;
4826 }
4827
4828 kr = thread_get_exception_ports(thread, exception_mask, masks, CountCnt, ports, behaviors, flavors);
4829
4830 thread_deallocate(thread);
4831 return kr;
4832}
4833
4834static kern_return_t
4835task_get_exception_ports_internal(
4836 task_t task,
4837 exception_mask_t exception_mask,
4838 exception_mask_array_t masks,
4839 mach_msg_type_number_t *CountCnt,
4840 exception_port_info_array_t ports_info,
4841 exception_port_array_t ports,
4842 exception_behavior_array_t behaviors,
4843 thread_state_flavor_array_t flavors)
4844{
4845 unsigned int count;
4846 boolean_t info_only = (ports_info != NULL);
4847 ipc_port_t port_ptrs[EXC_TYPES_COUNT]; /* pointers only, does not hold right */
4848
4849 if (task == TASK_NULL) {
4850 return KERN_INVALID_ARGUMENT;
4851 }
4852
4853 if (exception_mask & ~EXC_MASK_VALID) {
4854 return KERN_INVALID_ARGUMENT;
4855 }
4856
4857 if (!info_only && !ports) {
4858 return KERN_INVALID_ARGUMENT;
4859 }
4860
4861 itk_lock(task);
4862
4863 if (!task->ipc_active) {
4864 itk_unlock(task);
4865 return KERN_FAILURE;
4866 }
4867
4868 count = 0;
4869
4870 for (int i = FIRST_EXCEPTION, j = 0; i < EXC_TYPES_COUNT; ++i) {
4871 if (exception_mask & (1 << i)) {
4872 ipc_port_t exc_port = task->exc_actions[i].port;
4873 exception_behavior_t exc_behavior = task->exc_actions[i].behavior;
4874 thread_state_flavor_t exc_flavor = task->exc_actions[i].flavor;
4875
4876 for (j = 0; j < count; ++j) {
4877 /*
4878 * search for an identical entry, if found
4879 * set corresponding mask for this exception.
4880 */
4881 if (exc_port == port_ptrs[j] &&
4882 exc_behavior == behaviors[j] &&
4883 exc_flavor == flavors[j]) {
4884 masks[j] |= (1 << i);
4885 break;
4886 }
4887 }
4888
4889 if (j == count && count < *CountCnt) {
4890 masks[j] = (1 << i);
4891 port_ptrs[j] = exc_port;
4892
4893 if (info_only) {
4894 if (!IP_VALID(exc_port)) {
4895 ports_info[j] = (ipc_info_port_t){ .iip_port_object = 0, .iip_receiver_object = 0 };
4896 } else {
4897 uintptr_t receiver;
4898 (void)ipc_port_get_receiver_task(port: exc_port, task: &receiver);
4899 ports_info[j].iip_port_object = (natural_t)VM_KERNEL_ADDRPERM(exc_port);
4900 ports_info[j].iip_receiver_object = receiver ? (natural_t)VM_KERNEL_ADDRPERM(receiver) : 0;
4901 }
4902 } else {
4903 ports[j] = exception_port_copy_send(port: exc_port);
4904 }
4905 behaviors[j] = exc_behavior;
4906 flavors[j] = exc_flavor;
4907 ++count;
4908 }
4909 }
4910 }
4911
4912 itk_unlock(task);
4913
4914 *CountCnt = count;
4915
4916 return KERN_SUCCESS;
4917}
4918
4919kern_return_t
4920task_get_exception_ports(
4921 task_t task,
4922 exception_mask_t exception_mask,
4923 exception_mask_array_t masks,
4924 mach_msg_type_number_t *CountCnt,
4925 exception_port_array_t ports,
4926 exception_behavior_array_t behaviors,
4927 thread_state_flavor_array_t flavors)
4928{
4929 return task_get_exception_ports_internal(task, exception_mask, masks, CountCnt,
4930 NULL, ports, behaviors, flavors);
4931}
4932
4933kern_return_t
4934task_get_exception_ports_info(
4935 mach_port_t port,
4936 exception_mask_t exception_mask,
4937 exception_mask_array_t masks,
4938 mach_msg_type_number_t *CountCnt,
4939 exception_port_info_array_t ports_info,
4940 exception_behavior_array_t behaviors,
4941 thread_state_flavor_array_t flavors)
4942{
4943 kern_return_t kr;
4944
4945 task_t task = convert_port_to_task_read_no_eval(port);
4946
4947 if (task == TASK_NULL) {
4948 return KERN_INVALID_ARGUMENT;
4949 }
4950
4951 kr = task_get_exception_ports_internal(task, exception_mask, masks, CountCnt,
4952 ports_info, NULL, behaviors, flavors);
4953
4954 task_deallocate(task);
4955 return kr;
4956}
4957
4958kern_return_t
4959task_get_exception_ports_from_user(
4960 mach_port_t port,
4961 exception_mask_t exception_mask,
4962 exception_mask_array_t masks,
4963 mach_msg_type_number_t *CountCnt,
4964 exception_port_array_t ports,
4965 exception_behavior_array_t behaviors,
4966 thread_state_flavor_array_t flavors)
4967{
4968 kern_return_t kr;
4969
4970 task_t task = convert_port_to_task(port);
4971
4972 if (task == TASK_NULL) {
4973 return KERN_INVALID_ARGUMENT;
4974 }
4975
4976 kr = task_get_exception_ports(task, exception_mask, masks, CountCnt, ports, behaviors, flavors);
4977
4978 task_deallocate(task);
4979 return kr;
4980}
4981
4982/*
4983 * Routine: ipc_thread_port_unpin
4984 * Purpose:
4985 *
4986 * Called on the thread when it's terminating so that the last ref
4987 * can be deallocated without a guard exception.
4988 * Conditions:
4989 * Thread mutex lock is held.
4990 */
4991void
4992ipc_thread_port_unpin(
4993 ipc_port_t port)
4994{
4995 if (port == IP_NULL) {
4996 return;
4997 }
4998 ip_mq_lock(port);
4999 port->ip_pinned = 0;
5000 ip_mq_unlock(port);
5001}
5002