1 | /* |
2 | * Copyright (c) 2000-2019 Apple Inc. All rights reserved. |
3 | * |
4 | * @APPLE_OSREFERENCE_LICENSE_HEADER_START@ |
5 | * |
6 | * This file contains Original Code and/or Modifications of Original Code |
7 | * as defined in and that are subject to the Apple Public Source License |
8 | * Version 2.0 (the 'License'). You may not use this file except in |
9 | * compliance with the License. The rights granted to you under the License |
10 | * may not be used to create, or enable the creation or redistribution of, |
11 | * unlawful or unlicensed copies of an Apple operating system, or to |
12 | * circumvent, violate, or enable the circumvention or violation of, any |
13 | * terms of an Apple operating system software license agreement. |
14 | * |
15 | * Please obtain a copy of the License at |
16 | * http://www.opensource.apple.com/apsl/ and read it before using this file. |
17 | * |
18 | * The Original Code and all software distributed under the License are |
19 | * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER |
20 | * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES, |
21 | * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY, |
22 | * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT. |
23 | * Please see the License for the specific language governing rights and |
24 | * limitations under the License. |
25 | * |
26 | * @APPLE_OSREFERENCE_LICENSE_HEADER_END@ |
27 | */ |
28 | /* |
29 | * @OSF_FREE_COPYRIGHT@ |
30 | */ |
31 | /* |
32 | * Mach Operating System |
33 | * Copyright (c) 1991,1990,1989 Carnegie Mellon University |
34 | * All Rights Reserved. |
35 | * |
36 | * Permission to use, copy, modify and distribute this software and its |
37 | * documentation is hereby granted, provided that both the copyright |
38 | * notice and this permission notice appear in all copies of the |
39 | * software, derivative works or modified versions, and any portions |
40 | * thereof, and that both notices appear in supporting documentation. |
41 | * |
42 | * CARNEGIE MELLON ALLOWS FREE USE OF THIS SOFTWARE IN ITS "AS IS" |
43 | * CONDITION. CARNEGIE MELLON DISCLAIMS ANY LIABILITY OF ANY KIND FOR |
44 | * ANY DAMAGES WHATSOEVER RESULTING FROM THE USE OF THIS SOFTWARE. |
45 | * |
46 | * Carnegie Mellon requests users of this software to return to |
47 | * |
48 | * Software Distribution Coordinator or Software.Distribution@CS.CMU.EDU |
49 | * School of Computer Science |
50 | * Carnegie Mellon University |
51 | * Pittsburgh PA 15213-3890 |
52 | * |
53 | * any improvements or extensions that they make and grant Carnegie Mellon |
54 | * the rights to redistribute these changes. |
55 | */ |
56 | /* |
57 | * NOTICE: This file was modified by McAfee Research in 2004 to introduce |
58 | * support for mandatory and extensible security protections. This notice |
59 | * is included in support of clause 2.2 (b) of the Apple Public License, |
60 | * Version 2.0. |
61 | */ |
62 | /* |
63 | */ |
64 | /* |
65 | * File: ipc/ipc_port.c |
66 | * Author: Rich Draves |
67 | * Date: 1989 |
68 | * |
69 | * Functions to manipulate IPC ports. |
70 | */ |
71 | |
72 | #include <mach/boolean.h> |
73 | #include <mach_assert.h> |
74 | |
75 | #include <mach/port.h> |
76 | #include <mach/kern_return.h> |
77 | #include <kern/backtrace.h> |
78 | #include <kern/debug.h> |
79 | #include <kern/ipc_kobject.h> |
80 | #include <kern/kcdata.h> |
81 | #include <kern/misc_protos.h> |
82 | #include <kern/policy_internal.h> |
83 | #include <kern/thread.h> |
84 | #include <kern/waitq.h> |
85 | #include <kern/host_notify.h> |
86 | #include <ipc/ipc_entry.h> |
87 | #include <ipc/ipc_space.h> |
88 | #include <ipc/ipc_object.h> |
89 | #include <ipc/ipc_right.h> |
90 | #include <ipc/ipc_port.h> |
91 | #include <ipc/ipc_pset.h> |
92 | #include <ipc/ipc_kmsg.h> |
93 | #include <ipc/ipc_mqueue.h> |
94 | #include <ipc/ipc_notify.h> |
95 | #include <ipc/ipc_importance.h> |
96 | #include <machine/limits.h> |
97 | #include <kern/turnstile.h> |
98 | #include <kern/machine.h> |
99 | |
100 | #include <security/mac_mach_internal.h> |
101 | #include <ipc/ipc_service_port.h> |
102 | |
103 | #include <string.h> |
104 | |
105 | extern bool proc_is_simulated(struct proc *); |
106 | extern struct proc *current_proc(void); |
107 | extern int csproc_hardened_runtime(struct proc* p); |
108 | |
109 | static TUNABLE(bool, prioritize_launch, "prioritize_launch" , true); |
110 | TUNABLE_WRITEABLE(int, ipc_portbt, "ipc_portbt" , false); |
111 | |
112 | extern zone_t ipc_kobject_label_zone; |
113 | |
114 | LCK_SPIN_DECLARE_ATTR(ipc_port_multiple_lock_data, &ipc_lck_grp, &ipc_lck_attr); |
115 | ipc_port_timestamp_t ipc_port_timestamp_data; |
116 | |
117 | KALLOC_ARRAY_TYPE_DEFINE(ipc_port_request_table, |
118 | struct ipc_port_request, KT_DEFAULT); |
119 | |
120 | #if MACH_ASSERT |
121 | static void ipc_port_init_debug(ipc_port_t, void *fp); |
122 | #endif /* MACH_ASSERT */ |
123 | |
124 | void __abortlike |
125 | __ipc_port_inactive_panic(ipc_port_t port) |
126 | { |
127 | panic("Using inactive port %p" , port); |
128 | } |
129 | |
130 | static __abortlike void |
131 | __ipc_port_translate_receive_panic(ipc_space_t space, ipc_port_t port) |
132 | { |
133 | panic("found receive right in space %p for port %p owned by space %p" , |
134 | space, port, ip_get_receiver(port)); |
135 | } |
136 | |
137 | __abortlike void |
138 | __ipc_right_delta_overflow_panic(ipc_port_t port, natural_t *field, int delta) |
139 | { |
140 | const char *what; |
141 | if (field == &port->ip_srights) { |
142 | what = "send right" ; |
143 | } else { |
144 | what = "send-once right" ; |
145 | } |
146 | panic("port %p %s count overflow (delta: %d)" , port, what, delta); |
147 | } |
148 | |
149 | static void |
150 | ipc_port_send_turnstile_recompute_push_locked( |
151 | ipc_port_t port); |
152 | |
153 | static thread_t |
154 | ipc_port_get_watchport_inheritor( |
155 | ipc_port_t port); |
156 | |
157 | static kern_return_t |
158 | ipc_port_update_qos_n_iotier( |
159 | ipc_port_t port, |
160 | uint8_t qos, |
161 | uint8_t iotier); |
162 | |
163 | void |
164 | ipc_port_release(ipc_port_t port) |
165 | { |
166 | ip_release(port); |
167 | } |
168 | |
169 | void |
170 | ipc_port_reference(ipc_port_t port) |
171 | { |
172 | ip_validate(port); |
173 | ip_reference(port); |
174 | } |
175 | |
176 | /* |
177 | * Routine: ipc_port_timestamp |
178 | * Purpose: |
179 | * Retrieve a timestamp value. |
180 | */ |
181 | |
182 | ipc_port_timestamp_t |
183 | ipc_port_timestamp(void) |
184 | { |
185 | return OSIncrementAtomic(&ipc_port_timestamp_data); |
186 | } |
187 | |
188 | |
189 | /* |
190 | * Routine: ipc_port_translate_send |
191 | * Purpose: |
192 | * Look up a send right in a space. |
193 | * Conditions: |
194 | * Nothing locked before. If successful, the object |
195 | * is returned active and locked. The caller doesn't get a ref. |
196 | * Returns: |
197 | * KERN_SUCCESS Object returned locked. |
198 | * KERN_INVALID_TASK The space is dead. |
199 | * KERN_INVALID_NAME The name doesn't denote a right |
200 | * KERN_INVALID_RIGHT Name doesn't denote the correct right |
201 | */ |
202 | kern_return_t |
203 | ipc_port_translate_send( |
204 | ipc_space_t space, |
205 | mach_port_name_t name, |
206 | ipc_port_t *portp) |
207 | { |
208 | ipc_port_t port = IP_NULL; |
209 | ipc_object_t object; |
210 | kern_return_t kr; |
211 | |
212 | kr = ipc_object_translate(space, name, MACH_PORT_RIGHT_SEND, objectp: &object); |
213 | if (kr == KERN_SUCCESS) { |
214 | port = ip_object_to_port(object); |
215 | } |
216 | *portp = port; |
217 | return kr; |
218 | } |
219 | |
220 | |
221 | /* |
222 | * Routine: ipc_port_translate_receive |
223 | * Purpose: |
224 | * Look up a receive right in a space. |
225 | * Performs some minimal security checks against tampering. |
226 | * Conditions: |
227 | * Nothing locked before. If successful, the object |
228 | * is returned active and locked. The caller doesn't get a ref. |
229 | * Returns: |
230 | * KERN_SUCCESS Object returned locked. |
231 | * KERN_INVALID_TASK The space is dead. |
232 | * KERN_INVALID_NAME The name doesn't denote a right |
233 | * KERN_INVALID_RIGHT Name doesn't denote the correct right |
234 | */ |
235 | kern_return_t |
236 | ipc_port_translate_receive( |
237 | ipc_space_t space, |
238 | mach_port_name_t name, |
239 | ipc_port_t *portp) |
240 | { |
241 | ipc_port_t port = IP_NULL; |
242 | ipc_object_t object; |
243 | kern_return_t kr; |
244 | |
245 | kr = ipc_object_translate(space, name, MACH_PORT_RIGHT_RECEIVE, objectp: &object); |
246 | if (kr == KERN_SUCCESS) { |
247 | /* object is locked */ |
248 | port = ip_object_to_port(object); |
249 | if (!ip_in_space(port, space)) { |
250 | __ipc_port_translate_receive_panic(space, port); |
251 | } |
252 | } |
253 | *portp = port; |
254 | return kr; |
255 | } |
256 | |
257 | |
258 | /* |
259 | * Routine: ipc_port_request_alloc |
260 | * Purpose: |
261 | * Try to allocate a request slot. |
262 | * If successful, returns the request index. |
263 | * Otherwise returns zero. |
264 | * Conditions: |
265 | * The port is locked and active. |
266 | * Returns: |
267 | * KERN_SUCCESS A request index was found. |
268 | * KERN_NO_SPACE No index allocated. |
269 | */ |
270 | |
271 | kern_return_t |
272 | ipc_port_request_alloc( |
273 | ipc_port_t port, |
274 | mach_port_name_t name, |
275 | ipc_port_t soright, |
276 | ipc_port_request_opts_t options, |
277 | ipc_port_request_index_t *indexp) |
278 | { |
279 | ipc_port_request_table_t table; |
280 | ipc_port_request_index_t index; |
281 | ipc_port_request_t ipr, base; |
282 | |
283 | require_ip_active(port); |
284 | assert(name != MACH_PORT_NULL); |
285 | assert(soright != IP_NULL); |
286 | |
287 | table = port->ip_requests; |
288 | if (table == NULL) { |
289 | return KERN_NO_SPACE; |
290 | } |
291 | |
292 | base = ipc_port_request_table_base(array: table); |
293 | index = base->ipr_next; |
294 | if (index == 0) { |
295 | return KERN_NO_SPACE; |
296 | } |
297 | |
298 | ipr = ipc_port_request_table_get(array: table, i: index); |
299 | assert(ipr->ipr_soright == IP_NULL); |
300 | |
301 | base->ipr_next = ipr->ipr_next; |
302 | ipr->ipr_name = name; |
303 | ipr->ipr_soright = IPR_SOR_MAKE(soright, options); |
304 | |
305 | if (options == (IPR_SOR_SPARM_MASK | IPR_SOR_SPREQ_MASK) && |
306 | port->ip_sprequests == 0) { |
307 | port->ip_sprequests = 1; |
308 | } |
309 | |
310 | *indexp = index; |
311 | |
312 | return KERN_SUCCESS; |
313 | } |
314 | |
315 | |
316 | /* |
317 | * Routine: ipc_port_request_hnotify_alloc |
318 | * Purpose: |
319 | * Try to allocate a request slot. |
320 | * If successful, returns the request index. |
321 | * Otherwise returns zero. |
322 | * Conditions: |
323 | * The port is locked and active. |
324 | * Returns: |
325 | * KERN_SUCCESS A request index was found. |
326 | * KERN_NO_SPACE No index allocated. |
327 | * KERN_INVALID_CAPABILITY A host notify registration already |
328 | * existed |
329 | */ |
330 | |
331 | kern_return_t |
332 | ipc_port_request_hnotify_alloc( |
333 | ipc_port_t port, |
334 | struct host_notify_entry *hnotify, |
335 | ipc_port_request_index_t *indexp) |
336 | { |
337 | ipc_port_request_table_t table; |
338 | ipc_port_request_index_t index; |
339 | ipc_port_request_t ipr, base; |
340 | |
341 | require_ip_active(port); |
342 | |
343 | table = port->ip_requests; |
344 | if (table == NULL) { |
345 | return KERN_NO_SPACE; |
346 | } |
347 | |
348 | base = ipc_port_request_table_base(array: table); |
349 | if (base->ipr_hn_slot) { |
350 | return KERN_INVALID_CAPABILITY; |
351 | } |
352 | index = base->ipr_next; |
353 | if (index == 0) { |
354 | return KERN_NO_SPACE; |
355 | } |
356 | |
357 | ipr = ipc_port_request_table_get(array: table, i: index); |
358 | assert(ipr->ipr_soright == IP_NULL); |
359 | |
360 | base->ipr_hn_slot = ipr; |
361 | base->ipr_next = ipr->ipr_next; |
362 | ipr->ipr_hnotify = hnotify; |
363 | ipr->ipr_name = IPR_HOST_NOTIFY; |
364 | |
365 | *indexp = index; |
366 | |
367 | return KERN_SUCCESS; |
368 | } |
369 | |
370 | /* |
371 | * Routine: ipc_port_request_grow |
372 | * Purpose: |
373 | * Grow a port's table of requests. |
374 | * Conditions: |
375 | * The port must be locked and active. |
376 | * Nothing else locked; will allocate memory. |
377 | * Upon return the port is unlocked. |
378 | * Returns: |
379 | * KERN_SUCCESS Grew the table. |
380 | * KERN_SUCCESS Somebody else grew the table. |
381 | * KERN_SUCCESS The port died. |
382 | * KERN_RESOURCE_SHORTAGE Couldn't allocate new table. |
383 | * KERN_NO_SPACE Couldn't grow to desired size |
384 | */ |
385 | |
386 | kern_return_t |
387 | ipc_port_request_grow( |
388 | ipc_port_t port) |
389 | { |
390 | ipc_port_request_table_t otable, ntable; |
391 | uint32_t osize, nsize; |
392 | uint32_t ocount, ncount; |
393 | |
394 | require_ip_active(port); |
395 | |
396 | otable = port->ip_requests; |
397 | if (otable) { |
398 | osize = ipc_port_request_table_size(array: otable); |
399 | } else { |
400 | osize = 0; |
401 | } |
402 | nsize = ipc_port_request_table_next_size(min_count: 2, cur_size: osize, vm_period: 16); |
403 | if (nsize > CONFIG_IPC_TABLE_REQUEST_SIZE_MAX) { |
404 | nsize = CONFIG_IPC_TABLE_REQUEST_SIZE_MAX; |
405 | } |
406 | if (nsize == osize) { |
407 | return KERN_RESOURCE_SHORTAGE; |
408 | } |
409 | |
410 | ip_reference(port); |
411 | ip_mq_unlock(port); |
412 | |
413 | ntable = ipc_port_request_table_alloc_by_size(size: nsize, fl: Z_WAITOK | Z_ZERO); |
414 | if (ntable == NULL) { |
415 | ip_release(port); |
416 | return KERN_RESOURCE_SHORTAGE; |
417 | } |
418 | |
419 | ip_mq_lock(port); |
420 | |
421 | /* |
422 | * Check that port is still active and that nobody else |
423 | * has slipped in and grown the table on us. Note that |
424 | * just checking if the current table pointer == otable |
425 | * isn't sufficient; must check ipr_size. |
426 | */ |
427 | |
428 | ocount = ipc_port_request_table_size_to_count(size: osize); |
429 | ncount = ipc_port_request_table_size_to_count(size: nsize); |
430 | |
431 | if (ip_active(port) && port->ip_requests == otable) { |
432 | ipc_port_request_index_t free, i; |
433 | |
434 | /* copy old table to new table */ |
435 | |
436 | if (otable != NULL) { |
437 | ipc_port_request_t obase, nbase, ohn, nhn; |
438 | |
439 | obase = ipc_port_request_table_base(array: otable); |
440 | nbase = ipc_port_request_table_base(array: ntable); |
441 | memcpy(dst: nbase, src: obase, n: osize); |
442 | |
443 | /* |
444 | * if there is a host-notify registration, |
445 | * fixup dPAC for the registration's ipr_hnotify field, |
446 | * and the ipr_hn_slot sentinel. |
447 | */ |
448 | ohn = obase->ipr_hn_slot; |
449 | if (ohn) { |
450 | nhn = nbase + (ohn - obase); |
451 | nhn->ipr_hnotify = ohn->ipr_hnotify; |
452 | nbase->ipr_hn_slot = nhn; |
453 | } |
454 | } else { |
455 | ocount = 1; |
456 | free = 0; |
457 | } |
458 | |
459 | /* add new elements to the new table's free list */ |
460 | |
461 | for (i = ocount; i < ncount; i++) { |
462 | ipc_port_request_table_get_nocheck(array: ntable, i)->ipr_next = free; |
463 | free = i; |
464 | } |
465 | |
466 | ipc_port_request_table_base(array: ntable)->ipr_next = free; |
467 | port->ip_requests = ntable; |
468 | ip_mq_unlock(port); |
469 | ip_release(port); |
470 | |
471 | if (otable != NULL) { |
472 | ipc_port_request_table_free(arrayp: &otable); |
473 | } |
474 | } else { |
475 | ip_mq_unlock(port); |
476 | ip_release(port); |
477 | ipc_port_request_table_free(arrayp: &ntable); |
478 | } |
479 | |
480 | return KERN_SUCCESS; |
481 | } |
482 | |
483 | /* |
484 | * Routine: ipc_port_request_sparm |
485 | * Purpose: |
486 | * Arm delayed send-possible request. |
487 | * Conditions: |
488 | * The port must be locked and active. |
489 | * |
490 | * Returns TRUE if the request was armed |
491 | * (or armed with importance in that version). |
492 | */ |
493 | |
494 | boolean_t |
495 | ipc_port_request_sparm( |
496 | ipc_port_t port, |
497 | __assert_only mach_port_name_t name, |
498 | ipc_port_request_index_t index, |
499 | mach_msg_option_t option, |
500 | mach_msg_priority_t priority) |
501 | { |
502 | if (index != IE_REQ_NONE) { |
503 | ipc_port_request_table_t table; |
504 | ipc_port_request_t ipr; |
505 | |
506 | require_ip_active(port); |
507 | |
508 | table = port->ip_requests; |
509 | assert(table != NULL); |
510 | |
511 | ipr = ipc_port_request_table_get(array: table, i: index); |
512 | assert(ipr->ipr_name == name); |
513 | |
514 | /* Is there a valid destination? */ |
515 | if (IPR_SOR_SPREQ(ipr->ipr_soright)) { |
516 | ipr->ipr_soright = IPR_SOR_MAKE(ipr->ipr_soright, IPR_SOR_SPARM_MASK); |
517 | port->ip_sprequests = 1; |
518 | |
519 | if (option & MACH_SEND_OVERRIDE) { |
520 | /* apply override to message queue */ |
521 | mach_msg_qos_t qos_ovr; |
522 | if (mach_msg_priority_is_pthread_priority(priority)) { |
523 | qos_ovr = _pthread_priority_thread_qos(pp: priority); |
524 | } else { |
525 | qos_ovr = mach_msg_priority_overide_qos(priority); |
526 | } |
527 | if (qos_ovr) { |
528 | ipc_mqueue_override_send_locked(mqueue: &port->ip_messages, qos_ovr); |
529 | } |
530 | } |
531 | |
532 | #if IMPORTANCE_INHERITANCE |
533 | if (((option & MACH_SEND_NOIMPORTANCE) == 0) && |
534 | (port->ip_impdonation != 0) && |
535 | (port->ip_spimportant == 0) && |
536 | (((option & MACH_SEND_IMPORTANCE) != 0) || |
537 | (task_is_importance_donor(task: current_task())))) { |
538 | return TRUE; |
539 | } |
540 | #else |
541 | return TRUE; |
542 | #endif /* IMPORTANCE_INHERITANCE */ |
543 | } |
544 | } |
545 | return FALSE; |
546 | } |
547 | |
548 | /* |
549 | * Routine: ipc_port_request_type |
550 | * Purpose: |
551 | * Determine the type(s) of port requests enabled for a name. |
552 | * Conditions: |
553 | * The port must be locked or inactive (to avoid table growth). |
554 | * The index must not be IE_REQ_NONE and for the name in question. |
555 | */ |
556 | mach_port_type_t |
557 | ipc_port_request_type( |
558 | ipc_port_t port, |
559 | __assert_only mach_port_name_t name, |
560 | ipc_port_request_index_t index) |
561 | { |
562 | ipc_port_request_table_t table; |
563 | ipc_port_request_t ipr; |
564 | mach_port_type_t type = 0; |
565 | |
566 | table = port->ip_requests; |
567 | assert(table != NULL); |
568 | |
569 | assert(index != IE_REQ_NONE); |
570 | ipr = ipc_port_request_table_get(array: table, i: index); |
571 | assert(ipr->ipr_name == name); |
572 | |
573 | if (IP_VALID(IPR_SOR_PORT(ipr->ipr_soright))) { |
574 | type |= MACH_PORT_TYPE_DNREQUEST; |
575 | |
576 | if (IPR_SOR_SPREQ(ipr->ipr_soright)) { |
577 | type |= MACH_PORT_TYPE_SPREQUEST; |
578 | |
579 | if (!IPR_SOR_SPARMED(ipr->ipr_soright)) { |
580 | type |= MACH_PORT_TYPE_SPREQUEST_DELAYED; |
581 | } |
582 | } |
583 | } |
584 | return type; |
585 | } |
586 | |
587 | /* |
588 | * Routine: ipc_port_request_cancel |
589 | * Purpose: |
590 | * Cancel a dead-name/send-possible request and return the send-once right. |
591 | * Conditions: |
592 | * The port must be locked and active. |
593 | * The index must not be IPR_REQ_NONE and must correspond with name. |
594 | */ |
595 | |
596 | ipc_port_t |
597 | ipc_port_request_cancel( |
598 | ipc_port_t port, |
599 | __assert_only mach_port_name_t name, |
600 | ipc_port_request_index_t index) |
601 | { |
602 | ipc_port_request_table_t table; |
603 | ipc_port_request_t base, ipr; |
604 | ipc_port_t request = IP_NULL; |
605 | |
606 | require_ip_active(port); |
607 | table = port->ip_requests; |
608 | base = ipc_port_request_table_base(array: table); |
609 | assert(table != NULL); |
610 | |
611 | assert(index != IE_REQ_NONE); |
612 | ipr = ipc_port_request_table_get(array: table, i: index); |
613 | assert(ipr->ipr_name == name); |
614 | request = IPR_SOR_PORT(ipr->ipr_soright); |
615 | |
616 | /* return ipr to the free list inside the table */ |
617 | ipr->ipr_next = base->ipr_next; |
618 | ipr->ipr_soright = IP_NULL; |
619 | if (base->ipr_hn_slot == ipr) { |
620 | base->ipr_hn_slot = NULL; |
621 | } |
622 | base->ipr_next = index; |
623 | |
624 | return request; |
625 | } |
626 | |
627 | |
628 | /* |
629 | * Routine: ipc_port_nsrequest |
630 | * Purpose: |
631 | * Make a no-senders request, returning the |
632 | * previously registered send-once right. |
633 | * Just cancels the previous request if notify is IP_NULL. |
634 | * Conditions: |
635 | * The port is locked and active. It is unlocked. |
636 | * Consumes a ref for notify (if non-null), and |
637 | * returns previous with a ref (if non-null). |
638 | */ |
639 | |
640 | void |
641 | ipc_port_nsrequest( |
642 | ipc_port_t port, |
643 | mach_port_mscount_t sync, |
644 | ipc_port_t notify, |
645 | ipc_port_t *previousp) |
646 | { |
647 | ipc_port_t previous; |
648 | mach_port_mscount_t mscount; |
649 | require_ip_active(port); |
650 | |
651 | assert(!ip_in_space(port, ipc_space_kernel)); |
652 | assert(port->ip_nsrequest != IP_KOBJECT_NSREQUEST_ARMED); |
653 | |
654 | previous = port->ip_nsrequest; |
655 | mscount = port->ip_mscount; |
656 | |
657 | if ((port->ip_srights == 0) && (sync <= mscount) && |
658 | (notify != IP_NULL)) { |
659 | port->ip_nsrequest = IP_NULL; |
660 | ip_mq_unlock(port); |
661 | ipc_notify_no_senders(notify, mscount, /* kobject */ false); |
662 | } else { |
663 | port->ip_nsrequest = notify; |
664 | ip_mq_unlock(port); |
665 | } |
666 | |
667 | *previousp = previous; |
668 | } |
669 | |
670 | |
671 | /* |
672 | * Routine: ipc_port_clear_receiver |
673 | * Purpose: |
674 | * Prepares a receive right for transmission/destruction, |
675 | * optionally performs mqueue destruction (with port lock held) |
676 | * |
677 | * Conditions: |
678 | * The port is locked and active. |
679 | * Returns: |
680 | * If should_destroy is TRUE, then the return value indicates |
681 | * whether the caller needs to reap kmsg structures that should |
682 | * be destroyed (by calling ipc_kmsg_reap_delayed) |
683 | * |
684 | * If should_destroy is FALSE, this always returns FALSE |
685 | */ |
686 | |
687 | boolean_t |
688 | ipc_port_clear_receiver( |
689 | ipc_port_t port, |
690 | boolean_t should_destroy, |
691 | waitq_link_list_t *free_l) |
692 | { |
693 | ipc_mqueue_t mqueue = &port->ip_messages; |
694 | boolean_t reap_messages = FALSE; |
695 | |
696 | /* |
697 | * Pull ourselves out of any sets to which we belong. |
698 | * We hold the write space lock or the receive entry has |
699 | * been deleted, so even though this acquires and releases |
700 | * the port lock, we know we won't be added to any other sets. |
701 | */ |
702 | if (ip_in_pset(port)) { |
703 | waitq_unlink_all_locked(waitq: &port->ip_waitq, NULL, free_l); |
704 | assert(!ip_in_pset(port)); |
705 | } |
706 | |
707 | /* |
708 | * Send anyone waiting on the port's queue directly away. |
709 | * Also clear the mscount, seqno, guard bits |
710 | */ |
711 | if (ip_in_a_space(port)) { |
712 | ipc_mqueue_changed(space: ip_get_receiver(port), waitq: &port->ip_waitq); |
713 | } else { |
714 | ipc_mqueue_changed(NULL, waitq: &port->ip_waitq); |
715 | } |
716 | port->ip_mscount = 0; |
717 | mqueue->imq_seqno = 0; |
718 | port->ip_context = port->ip_guarded = port->ip_strict_guard = 0; |
719 | |
720 | /* |
721 | * clear the immovable bit so the port can move back to anyone listening |
722 | * for the port destroy notification. |
723 | */ |
724 | port->ip_immovable_receive = 0; |
725 | |
726 | if (should_destroy) { |
727 | /* |
728 | * Mark the port and mqueue invalid, preventing further send/receive |
729 | * operations from succeeding. It's important for this to be |
730 | * done under the same lock hold as the ipc_mqueue_changed |
731 | * call to avoid additional threads blocking on an mqueue |
732 | * that's being destroyed. |
733 | * |
734 | * The port active bit needs to be guarded under mqueue lock for |
735 | * turnstiles |
736 | */ |
737 | |
738 | /* port transitions to INACTIVE state */ |
739 | io_bits_andnot(ip_to_object(port), IO_BITS_ACTIVE); |
740 | port->ip_receiver_name = MACH_PORT_NULL; |
741 | port->ip_timestamp = ipc_port_timestamp(); |
742 | |
743 | reap_messages = ipc_mqueue_destroy_locked(mqueue, free_l); |
744 | } else { |
745 | /* port transtions to IN-LIMBO state */ |
746 | port->ip_receiver_name = MACH_PORT_NULL; |
747 | port->ip_destination = IP_NULL; |
748 | } |
749 | |
750 | return reap_messages; |
751 | } |
752 | |
753 | |
754 | /* |
755 | * Routine: ipc_port_init_validate_flags |
756 | * Purpose: |
757 | * Validates the flag arguments for ipc_port_init |
758 | * so that overlapping flags are not accidentally used together |
759 | */ |
760 | |
761 | static kern_return_t |
762 | ipc_port_init_validate_flags(ipc_port_init_flags_t flags) |
763 | { |
764 | uint32_t at_most_one_flags = flags & (IPC_PORT_ENFORCE_REPLY_PORT_SEMANTICS | |
765 | IPC_PORT_ENFORCE_RIGID_REPLY_PORT_SEMANTICS | |
766 | IPC_PORT_INIT_PROVISIONAL_ID_PROT_OPTOUT | |
767 | IPC_PORT_INIT_PROVISIONAL_REPLY); |
768 | |
769 | if (at_most_one_flags & (at_most_one_flags - 1)) { |
770 | /* at most one of the listed flags can be set */ |
771 | return KERN_INVALID_ARGUMENT; |
772 | } |
773 | return KERN_SUCCESS; |
774 | } |
775 | |
776 | |
777 | /* |
778 | * Routine: ipc_port_init |
779 | * Purpose: |
780 | * Initializes a newly-allocated port. |
781 | * |
782 | * The memory is expected to be zero initialized (allocated with Z_ZERO). |
783 | */ |
784 | |
785 | void |
786 | ipc_port_init( |
787 | ipc_port_t port, |
788 | ipc_space_t space, |
789 | ipc_port_init_flags_t flags, |
790 | mach_port_name_t name) |
791 | { |
792 | int policy = SYNC_POLICY_FIFO; |
793 | task_t task = TASK_NULL; |
794 | |
795 | /* the port has been 0 initialized when called */ |
796 | |
797 | if (flags & IPC_PORT_INIT_FILTER_MESSAGE) { |
798 | io_bits_or(ip_to_object(port), IP_BIT_FILTER_MSG); |
799 | } |
800 | if (flags & IPC_PORT_INIT_LOCKED) { |
801 | policy |= SYNC_POLICY_INIT_LOCKED; |
802 | } |
803 | |
804 | /* must be done first, many ip_* bits live inside the waitq */ |
805 | waitq_init(waitq: &port->ip_waitq, type: WQT_PORT, policy); |
806 | if (flags & IPC_PORT_INIT_TG_BLOCK_TRACKING) { |
807 | port->ip_tg_block_tracking = true; |
808 | } |
809 | if (flags & IPC_PORT_INIT_SPECIAL_REPLY) { |
810 | port->ip_specialreply = true; |
811 | } |
812 | if ((flags & IPC_PORT_INIT_REPLY) || (flags & IPC_PORT_INIT_SPECIAL_REPLY)) { |
813 | task = current_task_early(); |
814 | |
815 | /* Strict enforcement of reply port semantics are disabled for 3p - rdar://97441265. */ |
816 | if (task && task_is_hardened_binary(task)) { |
817 | port->ip_immovable_receive = true; |
818 | ip_mark_reply_port(port); |
819 | } else { |
820 | ip_mark_provisional_reply_port(port); |
821 | } |
822 | } |
823 | if (flags & IPC_PORT_ENFORCE_REPLY_PORT_SEMANTICS) { |
824 | ip_enforce_reply_port_semantics(port); |
825 | } |
826 | if (flags & IPC_PORT_ENFORCE_RIGID_REPLY_PORT_SEMANTICS) { |
827 | ip_enforce_rigid_reply_port_semantics(port); |
828 | } |
829 | if (flags & IPC_PORT_INIT_PROVISIONAL_REPLY) { |
830 | ip_mark_provisional_reply_port(port); |
831 | } |
832 | |
833 | if (flags & IPC_PORT_INIT_PROVISIONAL_ID_PROT_OPTOUT) { |
834 | ip_mark_id_prot_opt_out(port); |
835 | port->ip_immovable_receive = true; |
836 | } |
837 | |
838 | port->ip_kernel_qos_override = THREAD_QOS_UNSPECIFIED; |
839 | port->ip_kernel_iotier_override = THROTTLE_LEVEL_END; |
840 | |
841 | ipc_mqueue_init(mqueue: &port->ip_messages); |
842 | #if MACH_ASSERT |
843 | ipc_port_init_debug(port, __builtin_frame_address(0)); |
844 | #endif /* MACH_ASSERT */ |
845 | |
846 | /* port transitions to IN-SPACE state */ |
847 | port->ip_receiver_name = name; |
848 | port->ip_receiver = space; |
849 | |
850 | if (flags & IPC_PORT_INIT_MAKE_SEND_RIGHT) { |
851 | port->ip_srights = 1; |
852 | port->ip_mscount = 1; |
853 | } |
854 | } |
855 | |
856 | /* |
857 | * Routine: ipc_port_alloc |
858 | * Purpose: |
859 | * Allocate a port. |
860 | * Conditions: |
861 | * Nothing locked. If successful, the port is returned |
862 | * locked. (The caller doesn't have a reference.) |
863 | * Returns: |
864 | * KERN_SUCCESS The port is allocated. |
865 | * KERN_INVALID_TASK The space is dead. |
866 | * KERN_NO_SPACE No room for an entry in the space. |
867 | * KERN_RESOURCE_SHORTAGE Couldn't allocate memory. |
868 | */ |
869 | |
870 | kern_return_t |
871 | ipc_port_alloc( |
872 | ipc_space_t space, |
873 | ipc_port_init_flags_t flags, |
874 | mach_port_name_t *namep, |
875 | ipc_port_t *portp) |
876 | { |
877 | ipc_port_t port; |
878 | mach_port_name_t name; |
879 | kern_return_t kr; |
880 | mach_port_type_t type = MACH_PORT_TYPE_RECEIVE; |
881 | mach_port_urefs_t urefs = 0; |
882 | |
883 | kr = ipc_port_init_validate_flags(flags); |
884 | if (kr != KERN_SUCCESS) { |
885 | return kr; |
886 | } |
887 | |
888 | if (flags & IPC_PORT_INIT_MAKE_SEND_RIGHT) { |
889 | type |= MACH_PORT_TYPE_SEND; |
890 | urefs = 1; |
891 | } |
892 | kr = ipc_object_alloc(space, IOT_PORT, type, urefs, |
893 | namep: &name, objectp: (ipc_object_t *) &port); |
894 | if (kr != KERN_SUCCESS) { |
895 | return kr; |
896 | } |
897 | |
898 | /* space is locked */ |
899 | ipc_port_init(port, space, flags: flags | IPC_PORT_INIT_LOCKED, name); |
900 | /* port is locked */ |
901 | #if MACH_ASSERT |
902 | ipc_port_init_debug(port, __builtin_frame_address(0)); |
903 | #endif /* MACH_ASSERT */ |
904 | |
905 | /* unlock space after init */ |
906 | is_write_unlock(space); |
907 | |
908 | *namep = name; |
909 | *portp = port; |
910 | |
911 | return KERN_SUCCESS; |
912 | } |
913 | |
914 | /* |
915 | * Routine: ipc_port_alloc_name |
916 | * Purpose: |
917 | * Allocate a port, with a specific name. |
918 | * Conditions: |
919 | * Nothing locked. If successful, the port is returned |
920 | * locked. (The caller doesn't have a reference.) |
921 | * Returns: |
922 | * KERN_SUCCESS The port is allocated. |
923 | * KERN_INVALID_TASK The space is dead. |
924 | * KERN_NAME_EXISTS The name already denotes a right. |
925 | * KERN_RESOURCE_SHORTAGE Couldn't allocate memory. |
926 | */ |
927 | |
928 | kern_return_t |
929 | ipc_port_alloc_name( |
930 | ipc_space_t space, |
931 | ipc_port_init_flags_t flags, |
932 | mach_port_name_t name, |
933 | ipc_port_t *portp) |
934 | { |
935 | mach_port_type_t type = MACH_PORT_TYPE_RECEIVE; |
936 | mach_port_urefs_t urefs = 0; |
937 | |
938 | kern_return_t kr = ipc_port_init_validate_flags(flags); |
939 | if (kr != KERN_SUCCESS) { |
940 | return kr; |
941 | } |
942 | |
943 | if (flags & IPC_PORT_INIT_MAKE_SEND_RIGHT) { |
944 | type |= MACH_PORT_TYPE_SEND; |
945 | urefs = 1; |
946 | } |
947 | flags |= IPC_PORT_INIT_LOCKED; |
948 | |
949 | return ipc_object_alloc_name(space, IOT_PORT, type, urefs, |
950 | name, objectp: (ipc_object_t *)portp, finish_init: ^(ipc_object_t object){ |
951 | ipc_port_init(ip_object_to_port(object), space, flags, name); |
952 | }); |
953 | } |
954 | |
955 | /* |
956 | * Routine: ipc_port_spnotify |
957 | * Purpose: |
958 | * Generate send-possible port notifications. |
959 | * Conditions: |
960 | * Nothing locked, reference held on port. |
961 | */ |
962 | void |
963 | ipc_port_spnotify( |
964 | ipc_port_t port) |
965 | { |
966 | ipc_port_request_index_t index = 0; |
967 | ipc_table_elems_t size = 0; |
968 | |
969 | /* |
970 | * If the port has no send-possible request |
971 | * armed, don't bother to lock the port. |
972 | */ |
973 | if (port->ip_sprequests == 0) { |
974 | return; |
975 | } |
976 | |
977 | ip_mq_lock(port); |
978 | |
979 | #if IMPORTANCE_INHERITANCE |
980 | if (port->ip_spimportant != 0) { |
981 | port->ip_spimportant = 0; |
982 | if (ipc_port_importance_delta(port, options: IPID_OPTION_NORMAL, delta: -1) == TRUE) { |
983 | ip_mq_lock(port); |
984 | } |
985 | } |
986 | #endif /* IMPORTANCE_INHERITANCE */ |
987 | |
988 | if (port->ip_sprequests == 0) { |
989 | ip_mq_unlock(port); |
990 | return; |
991 | } |
992 | port->ip_sprequests = 0; |
993 | |
994 | revalidate: |
995 | if (ip_active(port)) { |
996 | ipc_port_request_table_t requests; |
997 | |
998 | /* table may change each time port unlocked (reload) */ |
999 | requests = port->ip_requests; |
1000 | assert(requests != NULL); |
1001 | |
1002 | /* |
1003 | * no need to go beyond table size when first |
1004 | * we entered - those are future notifications. |
1005 | */ |
1006 | if (size == 0) { |
1007 | size = ipc_port_request_table_count(array: requests); |
1008 | } |
1009 | |
1010 | /* no need to backtrack either */ |
1011 | while (++index < size) { |
1012 | ipc_port_request_t ipr = ipc_port_request_table_get_nocheck(array: requests, i: index); |
1013 | mach_port_name_t name = ipr->ipr_name; |
1014 | ipc_port_t soright = IPR_SOR_PORT(ipr->ipr_soright); |
1015 | boolean_t armed = IPR_SOR_SPARMED(ipr->ipr_soright); |
1016 | |
1017 | if (MACH_PORT_VALID(name) && armed && IP_VALID(soright)) { |
1018 | /* claim send-once right - slot still inuse */ |
1019 | assert(name != IPR_HOST_NOTIFY); |
1020 | ipr->ipr_soright = IP_NULL; |
1021 | ip_mq_unlock(port); |
1022 | |
1023 | ipc_notify_send_possible(port: soright, name); |
1024 | |
1025 | ip_mq_lock(port); |
1026 | goto revalidate; |
1027 | } |
1028 | } |
1029 | } |
1030 | ip_mq_unlock(port); |
1031 | return; |
1032 | } |
1033 | |
1034 | /* |
1035 | * Routine: ipc_port_dnnotify |
1036 | * Purpose: |
1037 | * Generate dead name notifications for |
1038 | * all outstanding dead-name and send- |
1039 | * possible requests. |
1040 | * Conditions: |
1041 | * Nothing locked. |
1042 | * Port must be inactive. |
1043 | * Reference held on port. |
1044 | */ |
1045 | void |
1046 | ipc_port_dnnotify( |
1047 | ipc_port_t port) |
1048 | { |
1049 | ipc_port_request_table_t requests = port->ip_requests; |
1050 | |
1051 | assert(!ip_active(port)); |
1052 | if (requests != NULL) { |
1053 | ipc_port_request_t ipr, base; |
1054 | |
1055 | base = ipr = ipc_port_request_table_base(array: requests); |
1056 | |
1057 | while ((ipr = ipc_port_request_table_next_elem(array: requests, e: ipr))) { |
1058 | mach_port_name_t name = ipr->ipr_name; |
1059 | ipc_port_t soright; |
1060 | |
1061 | switch (name) { |
1062 | case MACH_PORT_DEAD: |
1063 | case MACH_PORT_NULL: |
1064 | break; |
1065 | case IPR_HOST_NOTIFY: |
1066 | assert(base->ipr_hn_slot == ipr); |
1067 | host_notify_cancel(entry: ipr->ipr_hnotify); |
1068 | break; |
1069 | default: |
1070 | soright = IPR_SOR_PORT(ipr->ipr_soright); |
1071 | if (IP_VALID(soright)) { |
1072 | ipc_notify_dead_name(port: soright, name); |
1073 | } |
1074 | break; |
1075 | } |
1076 | } |
1077 | } |
1078 | } |
1079 | |
1080 | /* |
1081 | * Routine: ipc_port_destroy |
1082 | * Purpose: |
1083 | * Destroys a port. Cleans up queued messages. |
1084 | * |
1085 | * If the port has a backup, it doesn't get destroyed, |
1086 | * but is sent in a port-destroyed notification to the backup. |
1087 | * Conditions: |
1088 | * The port is locked and alive; nothing else locked. |
1089 | * The caller has a reference, which is consumed. |
1090 | * Afterwards, the port is unlocked and dead. |
1091 | */ |
1092 | |
1093 | void |
1094 | ipc_port_destroy(ipc_port_t port) |
1095 | { |
1096 | bool special_reply = port->ip_specialreply; |
1097 | bool service_port = port->ip_service_port; |
1098 | bool reap_msgs; |
1099 | |
1100 | ipc_port_t pdrequest = IP_NULL; |
1101 | struct task_watchport_elem *twe = NULL; |
1102 | waitq_link_list_t free_l = { }; |
1103 | |
1104 | #if IMPORTANCE_INHERITANCE |
1105 | ipc_importance_task_t release_imp_task = IIT_NULL; |
1106 | thread_t self = current_thread(); |
1107 | boolean_t top = (self->ith_assertions == 0); |
1108 | natural_t assertcnt = 0; |
1109 | #endif /* IMPORTANCE_INHERITANCE */ |
1110 | |
1111 | require_ip_active(port); |
1112 | /* port->ip_receiver_name is garbage */ |
1113 | /* port->ip_receiver/port->ip_destination is garbage */ |
1114 | |
1115 | /* clear any reply-port context */ |
1116 | port->ip_reply_context = 0; |
1117 | |
1118 | /* must be done before we access ip_pdrequest */ |
1119 | twe = ipc_port_clear_watchport_elem_internal(port); |
1120 | assert(!port->ip_has_watchport); |
1121 | |
1122 | if (!special_reply) { |
1123 | /* we assume the ref for pdrequest */ |
1124 | pdrequest = port->ip_pdrequest; |
1125 | port->ip_pdrequest = IP_NULL; |
1126 | } else if (port->ip_tempowner) { |
1127 | panic("ipc_port_destroy: invalid state" ); |
1128 | } |
1129 | |
1130 | #if IMPORTANCE_INHERITANCE |
1131 | /* determine how many assertions to drop and from whom */ |
1132 | if (port->ip_tempowner != 0) { |
1133 | assert(top); |
1134 | release_imp_task = ip_get_imp_task(port); |
1135 | if (IIT_NULL != release_imp_task) { |
1136 | port->ip_imp_task = IIT_NULL; |
1137 | assertcnt = port->ip_impcount; |
1138 | } |
1139 | /* Otherwise, nothing to drop */ |
1140 | } else { |
1141 | assertcnt = port->ip_impcount; |
1142 | if (pdrequest != IP_NULL) { |
1143 | /* mark in limbo for the journey */ |
1144 | port->ip_tempowner = 1; |
1145 | } |
1146 | } |
1147 | |
1148 | if (top) { |
1149 | self->ith_assertions = assertcnt; |
1150 | } |
1151 | #endif /* IMPORTANCE_INHERITANCE */ |
1152 | |
1153 | /* |
1154 | * If no port-destroyed notification is armed, calling |
1155 | * ipc_port_clear_receiver() will mark the port inactive |
1156 | * and will wakeup any threads which may be blocked receiving on it. |
1157 | */ |
1158 | reap_msgs = ipc_port_clear_receiver(port, should_destroy: pdrequest == IP_NULL, free_l: &free_l); |
1159 | assert(!ip_in_pset(port)); |
1160 | assert(port->ip_mscount == 0); |
1161 | |
1162 | /* |
1163 | * Handle port-destroyed notification |
1164 | */ |
1165 | if (pdrequest != IP_NULL) { |
1166 | assert(reap_msgs == false); |
1167 | |
1168 | if (service_port) { |
1169 | assert(port->ip_splabel != NULL); |
1170 | if (ipc_service_port_label_is_special_pdrequest((ipc_service_port_label_t)port->ip_splabel)) { |
1171 | ipc_service_port_label_set_flag(port_splabel: port->ip_splabel, flag: ISPL_FLAGS_SEND_PD_NOTIFICATION); |
1172 | } |
1173 | } |
1174 | |
1175 | ipc_port_send_turnstile_recompute_push_locked(port); |
1176 | /* port unlocked */ |
1177 | |
1178 | /* consumes our refs for port and pdrequest */ |
1179 | ipc_notify_port_destroyed(port: pdrequest, right: port); |
1180 | } else { |
1181 | ipc_service_port_label_t splabel = NULL; |
1182 | ipc_notify_nsenders_t nsrequest; |
1183 | |
1184 | nsrequest = ipc_notify_no_senders_prepare(port); |
1185 | |
1186 | if (!ip_is_kolabeled(port)) { |
1187 | splabel = port->ip_splabel; |
1188 | port->ip_splabel = NULL; |
1189 | port->ip_service_port = false; |
1190 | } |
1191 | |
1192 | ipc_port_send_turnstile_recompute_push_locked(port); |
1193 | /* port unlocked */ |
1194 | |
1195 | /* unlink the kmsg from special reply port */ |
1196 | if (special_reply) { |
1197 | ipc_port_adjust_special_reply_port(special_reply_port: port, |
1198 | IPC_PORT_ADJUST_SR_ALLOW_SYNC_LINKAGE); |
1199 | } |
1200 | |
1201 | /* Deallocate the service/connection port label */ |
1202 | if (splabel) { |
1203 | ipc_service_port_label_dealloc(ip_splabel: splabel, service_port); |
1204 | splabel = NULL; |
1205 | } |
1206 | |
1207 | if (reap_msgs) { |
1208 | ipc_kmsg_reap_delayed(); |
1209 | } |
1210 | |
1211 | if (nsrequest.ns_notify) { |
1212 | /* |
1213 | * ipc_notify_no_senders_prepare will consume |
1214 | * the reference for kobjects. |
1215 | */ |
1216 | assert(!nsrequest.ns_is_kobject); |
1217 | ip_mq_lock(nsrequest.ns_notify); |
1218 | ipc_notify_send_once_and_unlock(port: nsrequest.ns_notify); /* consumes ref */ |
1219 | } |
1220 | |
1221 | /* generate dead-name notifications */ |
1222 | ipc_port_dnnotify(port); |
1223 | |
1224 | ipc_kobject_destroy(port); |
1225 | |
1226 | ip_release(port); /* consume caller's ref */ |
1227 | } |
1228 | |
1229 | if (twe) { |
1230 | task_watchport_elem_deallocate(watchport_elem: twe); |
1231 | twe = NULL; |
1232 | } |
1233 | |
1234 | waitq_link_free_list(type: WQT_PORT_SET, list: &free_l); |
1235 | |
1236 | #if IMPORTANCE_INHERITANCE |
1237 | if (release_imp_task != IIT_NULL) { |
1238 | if (assertcnt > 0) { |
1239 | assert(top); |
1240 | self->ith_assertions = 0; |
1241 | assert(ipc_importance_task_is_any_receiver_type(release_imp_task)); |
1242 | ipc_importance_task_drop_internal_assertion(task_imp: release_imp_task, count: assertcnt); |
1243 | } |
1244 | ipc_importance_task_release(task_imp: release_imp_task); |
1245 | } else if (assertcnt > 0) { |
1246 | if (top) { |
1247 | self->ith_assertions = 0; |
1248 | release_imp_task = current_task()->task_imp_base; |
1249 | if (ipc_importance_task_is_any_receiver_type(task_imp: release_imp_task)) { |
1250 | ipc_importance_task_drop_internal_assertion(task_imp: release_imp_task, count: assertcnt); |
1251 | } |
1252 | } |
1253 | } |
1254 | #endif /* IMPORTANCE_INHERITANCE */ |
1255 | } |
1256 | |
1257 | /* |
1258 | * Routine: ipc_port_destination_chain_lock |
1259 | * Purpose: |
1260 | * Search for the end of the chain (a port not in transit), |
1261 | * acquiring locks along the way, and return it in `base`. |
1262 | * |
1263 | * Returns true if a reference was taken on `base` |
1264 | * |
1265 | * Conditions: |
1266 | * No ports locked. |
1267 | * ipc_port_multiple_lock held. |
1268 | */ |
1269 | boolean_t |
1270 | ipc_port_destination_chain_lock( |
1271 | ipc_port_t port, |
1272 | ipc_port_t *base) |
1273 | { |
1274 | for (;;) { |
1275 | ip_mq_lock(port); |
1276 | |
1277 | if (!ip_active(port)) { |
1278 | /* |
1279 | * Active ports that are ip_mq_lock()ed cannot go away. |
1280 | * |
1281 | * But inactive ports at the end of walking |
1282 | * an ip_destination chain are only protected |
1283 | * from space termination cleanup while the entire |
1284 | * chain of ports leading to them is held. |
1285 | * |
1286 | * Callers of this code tend to unlock the chain |
1287 | * in the same order than this walk which doesn't |
1288 | * protect `base` properly when it's inactive. |
1289 | * |
1290 | * In that case, take a reference that the caller |
1291 | * is responsible for releasing. |
1292 | */ |
1293 | ip_reference(port); |
1294 | *base = port; |
1295 | return true; |
1296 | } |
1297 | |
1298 | /* port is active */ |
1299 | if (!ip_in_transit(port)) { |
1300 | *base = port; |
1301 | return false; |
1302 | } |
1303 | |
1304 | port = ip_get_destination(port); |
1305 | } |
1306 | } |
1307 | |
1308 | |
1309 | /* |
1310 | * Routine: ipc_port_check_circularity |
1311 | * Purpose: |
1312 | * Check if queueing "port" in a message for "dest" |
1313 | * would create a circular group of ports and messages. |
1314 | * |
1315 | * If no circularity (FALSE returned), then "port" |
1316 | * is changed from "in limbo" to "in transit". |
1317 | * |
1318 | * That is, we want to set port->ip_destination == dest, |
1319 | * but guaranteeing that this doesn't create a circle |
1320 | * port->ip_destination->ip_destination->... == port |
1321 | * |
1322 | * Conditions: |
1323 | * No ports locked. References held for "port" and "dest". |
1324 | */ |
1325 | |
1326 | boolean_t |
1327 | ipc_port_check_circularity( |
1328 | ipc_port_t port, |
1329 | ipc_port_t dest) |
1330 | { |
1331 | #if IMPORTANCE_INHERITANCE |
1332 | /* adjust importance counts at the same time */ |
1333 | return ipc_importance_check_circularity(port, dest); |
1334 | #else |
1335 | ipc_port_t base; |
1336 | struct task_watchport_elem *watchport_elem = NULL; |
1337 | bool took_base_ref = false; |
1338 | |
1339 | assert(port != IP_NULL); |
1340 | assert(dest != IP_NULL); |
1341 | |
1342 | if (port == dest) { |
1343 | return TRUE; |
1344 | } |
1345 | base = dest; |
1346 | |
1347 | /* Check if destination needs a turnstile */ |
1348 | ipc_port_send_turnstile_prepare(dest); |
1349 | |
1350 | /* |
1351 | * First try a quick check that can run in parallel. |
1352 | * No circularity if dest is not in transit. |
1353 | */ |
1354 | ip_mq_lock(port); |
1355 | if (ip_mq_lock_try(dest)) { |
1356 | if (!ip_in_transit(dest)) { |
1357 | goto not_circular; |
1358 | } |
1359 | |
1360 | /* dest is in transit; further checking necessary */ |
1361 | |
1362 | ip_mq_unlock(dest); |
1363 | } |
1364 | ip_mq_unlock(port); |
1365 | |
1366 | ipc_port_multiple_lock(); /* massive serialization */ |
1367 | |
1368 | /* |
1369 | * Search for the end of the chain (a port not in transit), |
1370 | * acquiring locks along the way. |
1371 | */ |
1372 | |
1373 | took_base_ref = ipc_port_destination_chain_lock(dest, &base); |
1374 | /* all ports in chain from dest to base, inclusive, are locked */ |
1375 | |
1376 | if (port == base) { |
1377 | /* circularity detected! */ |
1378 | |
1379 | ipc_port_multiple_unlock(); |
1380 | |
1381 | /* port (== base) is in limbo */ |
1382 | require_ip_active(port); |
1383 | assert(ip_in_limbo(port)); |
1384 | assert(!took_base_ref); |
1385 | |
1386 | base = dest; |
1387 | while (base != IP_NULL) { |
1388 | ipc_port_t next; |
1389 | |
1390 | /* dest is in transit or in limbo */ |
1391 | require_ip_active(base); |
1392 | assert(!ip_in_a_space(base)); |
1393 | |
1394 | next = ip_get_destination(base); |
1395 | ip_mq_unlock(base); |
1396 | base = next; |
1397 | } |
1398 | |
1399 | ipc_port_send_turnstile_complete(dest); |
1400 | return TRUE; |
1401 | } |
1402 | |
1403 | /* |
1404 | * The guarantee: lock port while the entire chain is locked. |
1405 | * Once port is locked, we can take a reference to dest, |
1406 | * add port to the chain, and unlock everything. |
1407 | */ |
1408 | |
1409 | ip_mq_lock(port); |
1410 | ipc_port_multiple_unlock(); |
1411 | |
1412 | not_circular: |
1413 | require_ip_active(port); |
1414 | assert(ip_in_limbo(port)); |
1415 | |
1416 | /* Clear the watchport boost */ |
1417 | watchport_elem = ipc_port_clear_watchport_elem_internal(port); |
1418 | |
1419 | /* Check if the port is being enqueued as a part of sync bootstrap checkin */ |
1420 | if (dest->ip_specialreply && dest->ip_sync_bootstrap_checkin) { |
1421 | port->ip_sync_bootstrap_checkin = 1; |
1422 | } |
1423 | |
1424 | ip_reference(dest); |
1425 | |
1426 | /* port transitions to IN-TRANSIT state */ |
1427 | assert(port->ip_receiver_name == MACH_PORT_NULL); |
1428 | port->ip_destination = dest; |
1429 | |
1430 | /* Setup linkage for source port if it has sync ipc push */ |
1431 | struct turnstile *send_turnstile = TURNSTILE_NULL; |
1432 | if (port_send_turnstile(port)) { |
1433 | send_turnstile = turnstile_prepare((uintptr_t)port, |
1434 | port_send_turnstile_address(port), |
1435 | TURNSTILE_NULL, TURNSTILE_SYNC_IPC); |
1436 | |
1437 | /* |
1438 | * What ipc_port_adjust_port_locked would do, |
1439 | * but we need to also drop even more locks before |
1440 | * calling turnstile_update_inheritor_complete(). |
1441 | */ |
1442 | ipc_port_adjust_sync_link_state_locked(port, PORT_SYNC_LINK_ANY, NULL); |
1443 | |
1444 | turnstile_update_inheritor(send_turnstile, port_send_turnstile(dest), |
1445 | (TURNSTILE_INHERITOR_TURNSTILE | TURNSTILE_IMMEDIATE_UPDATE)); |
1446 | |
1447 | /* update complete and turnstile complete called after dropping all locks */ |
1448 | } |
1449 | /* now unlock chain */ |
1450 | |
1451 | ip_mq_unlock(port); |
1452 | |
1453 | for (;;) { |
1454 | ipc_port_t next; |
1455 | |
1456 | if (dest == base) { |
1457 | break; |
1458 | } |
1459 | |
1460 | /* port is IN-TRANSIT */ |
1461 | require_ip_active(dest); |
1462 | assert(ip_in_transit(dest)); |
1463 | |
1464 | next = ip_get_destination(dest); |
1465 | ip_mq_unlock(dest); |
1466 | dest = next; |
1467 | } |
1468 | |
1469 | /* base is not IN-TRANSIT */ |
1470 | assert(!ip_in_transit(base)); |
1471 | |
1472 | ip_mq_unlock(base); |
1473 | if (took_base_ref) { |
1474 | ip_release(base); |
1475 | } |
1476 | |
1477 | /* All locks dropped, call turnstile_update_inheritor_complete for source port's turnstile */ |
1478 | if (send_turnstile) { |
1479 | turnstile_update_inheritor_complete(send_turnstile, TURNSTILE_INTERLOCK_NOT_HELD); |
1480 | |
1481 | /* Take the mq lock to call turnstile complete */ |
1482 | ip_mq_lock(port); |
1483 | turnstile_complete((uintptr_t)port, port_send_turnstile_address(port), NULL, TURNSTILE_SYNC_IPC); |
1484 | send_turnstile = TURNSTILE_NULL; |
1485 | ip_mq_unlock(port); |
1486 | turnstile_cleanup(); |
1487 | } |
1488 | |
1489 | if (watchport_elem) { |
1490 | task_watchport_elem_deallocate(watchport_elem); |
1491 | } |
1492 | |
1493 | return FALSE; |
1494 | #endif /* !IMPORTANCE_INHERITANCE */ |
1495 | } |
1496 | |
1497 | /* |
1498 | * Routine: ipc_port_watchport_elem |
1499 | * Purpose: |
1500 | * Get the port's watchport elem field |
1501 | * |
1502 | * Conditions: |
1503 | * port locked |
1504 | */ |
1505 | static struct task_watchport_elem * |
1506 | ipc_port_watchport_elem(ipc_port_t port) |
1507 | { |
1508 | if (port->ip_has_watchport) { |
1509 | assert(!port->ip_specialreply); |
1510 | return port->ip_twe; |
1511 | } |
1512 | return NULL; |
1513 | } |
1514 | |
1515 | /* |
1516 | * Routine: ipc_port_update_watchport_elem |
1517 | * Purpose: |
1518 | * Set the port's watchport elem field |
1519 | * |
1520 | * Conditions: |
1521 | * port locked and is not a special reply port. |
1522 | */ |
1523 | static inline struct task_watchport_elem * |
1524 | ipc_port_update_watchport_elem(ipc_port_t port, struct task_watchport_elem *we) |
1525 | { |
1526 | struct task_watchport_elem *old_we; |
1527 | ipc_port_t pdrequest; |
1528 | |
1529 | assert(!port->ip_specialreply); |
1530 | |
1531 | /* |
1532 | * Note: ip_pdrequest and ip_twe are unioned. |
1533 | * and ip_has_watchport controls the union "type" |
1534 | */ |
1535 | if (port->ip_has_watchport) { |
1536 | old_we = port->ip_twe; |
1537 | pdrequest = old_we->twe_pdrequest; |
1538 | old_we->twe_pdrequest = IP_NULL; |
1539 | } else { |
1540 | old_we = NULL; |
1541 | pdrequest = port->ip_pdrequest; |
1542 | } |
1543 | |
1544 | if (we) { |
1545 | port->ip_has_watchport = true; |
1546 | we->twe_pdrequest = pdrequest; |
1547 | port->ip_twe = we; |
1548 | } else { |
1549 | port->ip_has_watchport = false; |
1550 | port->ip_pdrequest = pdrequest; |
1551 | } |
1552 | |
1553 | return old_we; |
1554 | } |
1555 | |
1556 | /* |
1557 | * Routine: ipc_special_reply_stash_pid_locked |
1558 | * Purpose: |
1559 | * Set the pid of process that copied out send once right to special reply port. |
1560 | * |
1561 | * Conditions: |
1562 | * port locked |
1563 | */ |
1564 | static inline void |
1565 | ipc_special_reply_stash_pid_locked(ipc_port_t port, int pid) |
1566 | { |
1567 | assert(port->ip_specialreply); |
1568 | port->ip_pid = pid; |
1569 | } |
1570 | |
1571 | /* |
1572 | * Routine: ipc_special_reply_get_pid_locked |
1573 | * Purpose: |
1574 | * Get the pid of process that copied out send once right to special reply port. |
1575 | * |
1576 | * Conditions: |
1577 | * port locked |
1578 | */ |
1579 | int |
1580 | ipc_special_reply_get_pid_locked(ipc_port_t port) |
1581 | { |
1582 | assert(port->ip_specialreply); |
1583 | return port->ip_pid; |
1584 | } |
1585 | |
1586 | /* |
1587 | * Update the recv turnstile inheritor for a port. |
1588 | * |
1589 | * Sync IPC through the port receive turnstile only happens for the special |
1590 | * reply port case. It has three sub-cases: |
1591 | * |
1592 | * 1. a send-once right is in transit, and pushes on the send turnstile of its |
1593 | * destination mqueue. |
1594 | * |
1595 | * 2. a send-once right has been stashed on a knote it was copied out "through", |
1596 | * as the first such copied out port. |
1597 | * |
1598 | * 3. a send-once right has been stashed on a knote it was copied out "through", |
1599 | * as the second or more copied out port. |
1600 | */ |
1601 | void |
1602 | ipc_port_recv_update_inheritor( |
1603 | ipc_port_t port, |
1604 | struct turnstile *rcv_turnstile, |
1605 | turnstile_update_flags_t flags) |
1606 | { |
1607 | struct turnstile *inheritor = TURNSTILE_NULL; |
1608 | struct knote *kn; |
1609 | |
1610 | if (ip_active(port) && port->ip_specialreply) { |
1611 | ip_mq_lock_held(port); |
1612 | |
1613 | switch (port->ip_sync_link_state) { |
1614 | case PORT_SYNC_LINK_PORT: |
1615 | if (port->ip_sync_inheritor_port != NULL) { |
1616 | inheritor = port_send_turnstile(port->ip_sync_inheritor_port); |
1617 | } |
1618 | break; |
1619 | |
1620 | case PORT_SYNC_LINK_WORKLOOP_KNOTE: |
1621 | kn = port->ip_sync_inheritor_knote; |
1622 | inheritor = filt_ipc_kqueue_turnstile(kn); |
1623 | break; |
1624 | |
1625 | case PORT_SYNC_LINK_WORKLOOP_STASH: |
1626 | inheritor = port->ip_sync_inheritor_ts; |
1627 | break; |
1628 | } |
1629 | } |
1630 | |
1631 | turnstile_update_inheritor(turnstile: rcv_turnstile, new_inheritor: inheritor, |
1632 | flags: flags | TURNSTILE_INHERITOR_TURNSTILE); |
1633 | } |
1634 | |
1635 | /* |
1636 | * Update the send turnstile inheritor for a port. |
1637 | * |
1638 | * Sync IPC through the port send turnstile has 7 possible reasons to be linked: |
1639 | * |
1640 | * 1. a special reply port is part of sync ipc for bootstrap checkin and needs |
1641 | * to push on thread doing the sync ipc. |
1642 | * |
1643 | * 2. a receive right is in transit, and pushes on the send turnstile of its |
1644 | * destination mqueue. |
1645 | * |
1646 | * 3. port was passed as an exec watchport and port is pushing on main thread |
1647 | * of the task. |
1648 | * |
1649 | * 4. a receive right has been stashed on a knote it was copied out "through", |
1650 | * as the first such copied out port (same as PORT_SYNC_LINK_WORKLOOP_KNOTE |
1651 | * for the special reply port) |
1652 | * |
1653 | * 5. a receive right has been stashed on a knote it was copied out "through", |
1654 | * as the second or more copied out port (same as |
1655 | * PORT_SYNC_LINK_WORKLOOP_STASH for the special reply port) |
1656 | * |
1657 | * 6. a receive right has been copied out as a part of sync bootstrap checkin |
1658 | * and needs to push on thread doing the sync bootstrap checkin. |
1659 | * |
1660 | * 7. the receive right is monitored by a knote, and pushes on any that is |
1661 | * registered on a workloop. filt_machport makes sure that if such a knote |
1662 | * exists, it is kept as the first item in the knote list, so we never need |
1663 | * to walk. |
1664 | */ |
1665 | void |
1666 | ipc_port_send_update_inheritor( |
1667 | ipc_port_t port, |
1668 | struct turnstile *send_turnstile, |
1669 | turnstile_update_flags_t flags) |
1670 | { |
1671 | ipc_mqueue_t mqueue = &port->ip_messages; |
1672 | turnstile_inheritor_t inheritor = TURNSTILE_INHERITOR_NULL; |
1673 | struct knote *kn; |
1674 | turnstile_update_flags_t inheritor_flags = TURNSTILE_INHERITOR_TURNSTILE; |
1675 | |
1676 | ip_mq_lock_held(port); |
1677 | |
1678 | if (!ip_active(port)) { |
1679 | /* this port is no longer active, it should not push anywhere */ |
1680 | } else if (port->ip_specialreply) { |
1681 | /* Case 1. */ |
1682 | if (port->ip_sync_bootstrap_checkin && prioritize_launch) { |
1683 | inheritor = port->ip_messages.imq_srp_owner_thread; |
1684 | inheritor_flags = TURNSTILE_INHERITOR_THREAD; |
1685 | } |
1686 | } else if (ip_in_transit(port)) { |
1687 | /* Case 2. */ |
1688 | inheritor = port_send_turnstile(ip_get_destination(port)); |
1689 | } else if (port->ip_has_watchport) { |
1690 | /* Case 3. */ |
1691 | if (prioritize_launch) { |
1692 | assert(port->ip_sync_link_state == PORT_SYNC_LINK_ANY); |
1693 | inheritor = ipc_port_get_watchport_inheritor(port); |
1694 | inheritor_flags = TURNSTILE_INHERITOR_THREAD; |
1695 | } |
1696 | } else if (port->ip_sync_link_state == PORT_SYNC_LINK_WORKLOOP_KNOTE) { |
1697 | /* Case 4. */ |
1698 | inheritor = filt_ipc_kqueue_turnstile(kn: mqueue->imq_inheritor_knote); |
1699 | } else if (port->ip_sync_link_state == PORT_SYNC_LINK_WORKLOOP_STASH) { |
1700 | /* Case 5. */ |
1701 | inheritor = mqueue->imq_inheritor_turnstile; |
1702 | } else if (port->ip_sync_link_state == PORT_SYNC_LINK_RCV_THREAD) { |
1703 | /* Case 6. */ |
1704 | if (prioritize_launch) { |
1705 | inheritor = port->ip_messages.imq_inheritor_thread_ref; |
1706 | inheritor_flags = TURNSTILE_INHERITOR_THREAD; |
1707 | } |
1708 | } else if ((kn = SLIST_FIRST(&port->ip_klist))) { |
1709 | /* Case 7. Push on a workloop that is interested */ |
1710 | if (filt_machport_kqueue_has_turnstile(kn)) { |
1711 | assert(port->ip_sync_link_state == PORT_SYNC_LINK_ANY); |
1712 | inheritor = filt_ipc_kqueue_turnstile(kn); |
1713 | } |
1714 | } |
1715 | |
1716 | turnstile_update_inheritor(turnstile: send_turnstile, new_inheritor: inheritor, |
1717 | flags: flags | inheritor_flags); |
1718 | } |
1719 | |
1720 | /* |
1721 | * Routine: ipc_port_send_turnstile_prepare |
1722 | * Purpose: |
1723 | * Get a reference on port's send turnstile, if |
1724 | * port does not have a send turnstile then allocate one. |
1725 | * |
1726 | * Conditions: |
1727 | * Nothing is locked. |
1728 | */ |
1729 | void |
1730 | ipc_port_send_turnstile_prepare(ipc_port_t port) |
1731 | { |
1732 | struct turnstile *turnstile = TURNSTILE_NULL; |
1733 | struct turnstile *send_turnstile = TURNSTILE_NULL; |
1734 | |
1735 | retry_alloc: |
1736 | ip_mq_lock(port); |
1737 | |
1738 | if (port_send_turnstile(port) == NULL || |
1739 | port_send_turnstile(port)->ts_prim_count == 0) { |
1740 | if (turnstile == TURNSTILE_NULL) { |
1741 | ip_mq_unlock(port); |
1742 | turnstile = turnstile_alloc(); |
1743 | goto retry_alloc; |
1744 | } |
1745 | |
1746 | send_turnstile = turnstile_prepare(proprietor: (uintptr_t)port, |
1747 | port_send_turnstile_address(port), |
1748 | turnstile, type: TURNSTILE_SYNC_IPC); |
1749 | turnstile = TURNSTILE_NULL; |
1750 | |
1751 | ipc_port_send_update_inheritor(port, send_turnstile, |
1752 | flags: TURNSTILE_IMMEDIATE_UPDATE); |
1753 | |
1754 | /* turnstile complete will be called in ipc_port_send_turnstile_complete */ |
1755 | } |
1756 | |
1757 | /* Increment turnstile counter */ |
1758 | port_send_turnstile(port)->ts_prim_count++; |
1759 | ip_mq_unlock(port); |
1760 | |
1761 | if (send_turnstile) { |
1762 | turnstile_update_inheritor_complete(turnstile: send_turnstile, |
1763 | flags: TURNSTILE_INTERLOCK_NOT_HELD); |
1764 | } |
1765 | if (turnstile != TURNSTILE_NULL) { |
1766 | turnstile_deallocate(turnstile); |
1767 | } |
1768 | } |
1769 | |
1770 | |
1771 | /* |
1772 | * Routine: ipc_port_send_turnstile_complete |
1773 | * Purpose: |
1774 | * Drop a ref on the port's send turnstile, if the |
1775 | * ref becomes zero, deallocate the turnstile. |
1776 | * |
1777 | * Conditions: |
1778 | * The space might be locked, use safe deallocate. |
1779 | */ |
1780 | void |
1781 | ipc_port_send_turnstile_complete(ipc_port_t port) |
1782 | { |
1783 | struct turnstile *turnstile = TURNSTILE_NULL; |
1784 | |
1785 | /* Drop turnstile count on dest port */ |
1786 | ip_mq_lock(port); |
1787 | |
1788 | port_send_turnstile(port)->ts_prim_count--; |
1789 | if (port_send_turnstile(port)->ts_prim_count == 0) { |
1790 | turnstile_complete(proprietor: (uintptr_t)port, port_send_turnstile_address(port), |
1791 | turnstile: &turnstile, type: TURNSTILE_SYNC_IPC); |
1792 | assert(turnstile != TURNSTILE_NULL); |
1793 | } |
1794 | ip_mq_unlock(port); |
1795 | turnstile_cleanup(); |
1796 | |
1797 | if (turnstile != TURNSTILE_NULL) { |
1798 | turnstile_deallocate_safe(turnstile); |
1799 | turnstile = TURNSTILE_NULL; |
1800 | } |
1801 | } |
1802 | |
1803 | /* |
1804 | * Routine: ipc_port_rcv_turnstile |
1805 | * Purpose: |
1806 | * Get the port's receive turnstile |
1807 | * |
1808 | * Conditions: |
1809 | * mqueue locked or thread waiting on turnstile is locked. |
1810 | */ |
1811 | static struct turnstile * |
1812 | ipc_port_rcv_turnstile(ipc_port_t port) |
1813 | { |
1814 | return *port_rcv_turnstile_address(port); |
1815 | } |
1816 | |
1817 | |
1818 | /* |
1819 | * Routine: ipc_port_link_special_reply_port |
1820 | * Purpose: |
1821 | * Link the special reply port with the destination port. |
1822 | * Allocates turnstile to dest port. |
1823 | * |
1824 | * Conditions: |
1825 | * Nothing is locked. |
1826 | */ |
1827 | void |
1828 | ipc_port_link_special_reply_port( |
1829 | ipc_port_t special_reply_port, |
1830 | ipc_port_t dest_port, |
1831 | boolean_t sync_bootstrap_checkin) |
1832 | { |
1833 | boolean_t drop_turnstile_ref = FALSE; |
1834 | boolean_t special_reply = FALSE; |
1835 | |
1836 | /* Check if dest_port needs a turnstile */ |
1837 | ipc_port_send_turnstile_prepare(port: dest_port); |
1838 | |
1839 | /* Lock the special reply port and establish the linkage */ |
1840 | ip_mq_lock(special_reply_port); |
1841 | |
1842 | special_reply = special_reply_port->ip_specialreply; |
1843 | |
1844 | if (sync_bootstrap_checkin && special_reply) { |
1845 | special_reply_port->ip_sync_bootstrap_checkin = 1; |
1846 | } |
1847 | |
1848 | /* Check if we need to drop the acquired turnstile ref on dest port */ |
1849 | if (!special_reply || |
1850 | special_reply_port->ip_sync_link_state != PORT_SYNC_LINK_ANY || |
1851 | special_reply_port->ip_sync_inheritor_port != IPC_PORT_NULL) { |
1852 | drop_turnstile_ref = TRUE; |
1853 | } else { |
1854 | /* take a reference on dest_port */ |
1855 | ip_reference(dest_port); |
1856 | special_reply_port->ip_sync_inheritor_port = dest_port; |
1857 | special_reply_port->ip_sync_link_state = PORT_SYNC_LINK_PORT; |
1858 | } |
1859 | |
1860 | ip_mq_unlock(special_reply_port); |
1861 | |
1862 | if (special_reply) { |
1863 | /* |
1864 | * For special reply ports, if the destination port is |
1865 | * marked with the thread group blocked tracking flag, |
1866 | * callout to the performance controller. |
1867 | */ |
1868 | ipc_port_thread_group_blocked(port: dest_port); |
1869 | } |
1870 | |
1871 | if (drop_turnstile_ref) { |
1872 | ipc_port_send_turnstile_complete(port: dest_port); |
1873 | } |
1874 | |
1875 | return; |
1876 | } |
1877 | |
1878 | /* |
1879 | * Routine: ipc_port_thread_group_blocked |
1880 | * Purpose: |
1881 | * Call thread_group_blocked callout if the port |
1882 | * has ip_tg_block_tracking bit set and the thread |
1883 | * has not made this callout already. |
1884 | * |
1885 | * Conditions: |
1886 | * Nothing is locked. |
1887 | */ |
1888 | void |
1889 | ipc_port_thread_group_blocked(ipc_port_t port __unused) |
1890 | { |
1891 | #if CONFIG_THREAD_GROUPS |
1892 | bool port_tg_block_tracking = false; |
1893 | thread_t self = current_thread(); |
1894 | |
1895 | if (self->thread_group == NULL || |
1896 | (self->options & TH_OPT_IPC_TG_BLOCKED)) { |
1897 | return; |
1898 | } |
1899 | |
1900 | port_tg_block_tracking = port->ip_tg_block_tracking; |
1901 | if (!port_tg_block_tracking) { |
1902 | return; |
1903 | } |
1904 | |
1905 | machine_thread_group_blocked(tg_blocked: self->thread_group, NULL, |
1906 | PERFCONTROL_CALLOUT_BLOCKING_TG_RENDER_SERVER, blocked_thread: self); |
1907 | |
1908 | self->options |= TH_OPT_IPC_TG_BLOCKED; |
1909 | #endif |
1910 | } |
1911 | |
1912 | /* |
1913 | * Routine: ipc_port_thread_group_unblocked |
1914 | * Purpose: |
1915 | * Call thread_group_unblocked callout if the |
1916 | * thread had previously made a thread_group_blocked |
1917 | * callout before (indicated by TH_OPT_IPC_TG_BLOCKED |
1918 | * flag on the thread). |
1919 | * |
1920 | * Conditions: |
1921 | * Nothing is locked. |
1922 | */ |
1923 | void |
1924 | ipc_port_thread_group_unblocked(void) |
1925 | { |
1926 | #if CONFIG_THREAD_GROUPS |
1927 | thread_t self = current_thread(); |
1928 | |
1929 | if (!(self->options & TH_OPT_IPC_TG_BLOCKED)) { |
1930 | return; |
1931 | } |
1932 | |
1933 | machine_thread_group_unblocked(tg_unblocked: self->thread_group, NULL, |
1934 | PERFCONTROL_CALLOUT_BLOCKING_TG_RENDER_SERVER, unblocked_thread: self); |
1935 | |
1936 | self->options &= ~TH_OPT_IPC_TG_BLOCKED; |
1937 | #endif |
1938 | } |
1939 | |
1940 | #if DEVELOPMENT || DEBUG |
1941 | inline void |
1942 | ipc_special_reply_port_bits_reset(ipc_port_t special_reply_port) |
1943 | { |
1944 | special_reply_port->ip_srp_lost_link = 0; |
1945 | special_reply_port->ip_srp_msg_sent = 0; |
1946 | } |
1947 | |
1948 | static inline void |
1949 | ipc_special_reply_port_msg_sent_reset(ipc_port_t special_reply_port) |
1950 | { |
1951 | if (special_reply_port->ip_specialreply == 1) { |
1952 | special_reply_port->ip_srp_msg_sent = 0; |
1953 | } |
1954 | } |
1955 | |
1956 | inline void |
1957 | ipc_special_reply_port_msg_sent(ipc_port_t special_reply_port) |
1958 | { |
1959 | if (special_reply_port->ip_specialreply == 1) { |
1960 | special_reply_port->ip_srp_msg_sent = 1; |
1961 | } |
1962 | } |
1963 | |
1964 | static inline void |
1965 | ipc_special_reply_port_lost_link(ipc_port_t special_reply_port) |
1966 | { |
1967 | if (special_reply_port->ip_specialreply == 1 && special_reply_port->ip_srp_msg_sent == 0) { |
1968 | special_reply_port->ip_srp_lost_link = 1; |
1969 | } |
1970 | } |
1971 | |
1972 | #else /* DEVELOPMENT || DEBUG */ |
1973 | inline void |
1974 | ipc_special_reply_port_bits_reset(__unused ipc_port_t special_reply_port) |
1975 | { |
1976 | return; |
1977 | } |
1978 | |
1979 | static inline void |
1980 | ipc_special_reply_port_msg_sent_reset(__unused ipc_port_t special_reply_port) |
1981 | { |
1982 | return; |
1983 | } |
1984 | |
1985 | inline void |
1986 | ipc_special_reply_port_msg_sent(__unused ipc_port_t special_reply_port) |
1987 | { |
1988 | return; |
1989 | } |
1990 | |
1991 | static inline void |
1992 | ipc_special_reply_port_lost_link(__unused ipc_port_t special_reply_port) |
1993 | { |
1994 | return; |
1995 | } |
1996 | #endif /* DEVELOPMENT || DEBUG */ |
1997 | |
1998 | /* |
1999 | * Routine: ipc_port_adjust_special_reply_port_locked |
2000 | * Purpose: |
2001 | * If the special port has a turnstile, update its inheritor. |
2002 | * Condition: |
2003 | * Special reply port locked on entry. |
2004 | * Special reply port unlocked on return. |
2005 | * The passed in port is a special reply port. |
2006 | * Returns: |
2007 | * None. |
2008 | */ |
2009 | void |
2010 | ipc_port_adjust_special_reply_port_locked( |
2011 | ipc_port_t special_reply_port, |
2012 | struct knote *kn, |
2013 | uint8_t flags, |
2014 | boolean_t get_turnstile) |
2015 | { |
2016 | ipc_port_t dest_port = IPC_PORT_NULL; |
2017 | int sync_link_state = PORT_SYNC_LINK_NO_LINKAGE; |
2018 | turnstile_inheritor_t inheritor = TURNSTILE_INHERITOR_NULL; |
2019 | struct turnstile *ts = TURNSTILE_NULL; |
2020 | struct turnstile *port_stashed_turnstile = TURNSTILE_NULL; |
2021 | |
2022 | ip_mq_lock_held(special_reply_port); // ip_sync_link_state is touched |
2023 | |
2024 | if (!special_reply_port->ip_specialreply) { |
2025 | // only mach_msg_receive_results_complete() calls this with any port |
2026 | assert(get_turnstile); |
2027 | goto not_special; |
2028 | } |
2029 | |
2030 | if (flags & IPC_PORT_ADJUST_SR_RECEIVED_MSG) { |
2031 | ipc_special_reply_port_msg_sent_reset(special_reply_port); |
2032 | } |
2033 | |
2034 | if (flags & IPC_PORT_ADJUST_UNLINK_THREAD) { |
2035 | special_reply_port->ip_messages.imq_srp_owner_thread = NULL; |
2036 | } |
2037 | |
2038 | if (flags & IPC_PORT_ADJUST_RESET_BOOSTRAP_CHECKIN) { |
2039 | special_reply_port->ip_sync_bootstrap_checkin = 0; |
2040 | } |
2041 | |
2042 | /* Check if the special reply port is marked non-special */ |
2043 | if (special_reply_port->ip_sync_link_state == PORT_SYNC_LINK_ANY) { |
2044 | not_special: |
2045 | if (get_turnstile) { |
2046 | turnstile_complete(proprietor: (uintptr_t)special_reply_port, |
2047 | port_rcv_turnstile_address(special_reply_port), NULL, type: TURNSTILE_SYNC_IPC); |
2048 | } |
2049 | ip_mq_unlock(special_reply_port); |
2050 | if (get_turnstile) { |
2051 | turnstile_cleanup(); |
2052 | } |
2053 | return; |
2054 | } |
2055 | |
2056 | if (flags & IPC_PORT_ADJUST_SR_LINK_WORKLOOP) { |
2057 | if (ITH_KNOTE_VALID(kn, MACH_MSG_TYPE_PORT_SEND_ONCE)) { |
2058 | inheritor = filt_machport_stash_port(kn, port: special_reply_port, |
2059 | link: &sync_link_state); |
2060 | } |
2061 | } else if (flags & IPC_PORT_ADJUST_SR_ALLOW_SYNC_LINKAGE) { |
2062 | sync_link_state = PORT_SYNC_LINK_ANY; |
2063 | } |
2064 | |
2065 | /* Check if need to break linkage */ |
2066 | if (!get_turnstile && sync_link_state == PORT_SYNC_LINK_NO_LINKAGE && |
2067 | special_reply_port->ip_sync_link_state == PORT_SYNC_LINK_NO_LINKAGE) { |
2068 | ip_mq_unlock(special_reply_port); |
2069 | return; |
2070 | } |
2071 | |
2072 | switch (special_reply_port->ip_sync_link_state) { |
2073 | case PORT_SYNC_LINK_PORT: |
2074 | dest_port = special_reply_port->ip_sync_inheritor_port; |
2075 | special_reply_port->ip_sync_inheritor_port = IPC_PORT_NULL; |
2076 | break; |
2077 | case PORT_SYNC_LINK_WORKLOOP_KNOTE: |
2078 | special_reply_port->ip_sync_inheritor_knote = NULL; |
2079 | break; |
2080 | case PORT_SYNC_LINK_WORKLOOP_STASH: |
2081 | port_stashed_turnstile = special_reply_port->ip_sync_inheritor_ts; |
2082 | special_reply_port->ip_sync_inheritor_ts = NULL; |
2083 | break; |
2084 | } |
2085 | |
2086 | /* |
2087 | * Stash (or unstash) the server's PID in the ip_sorights field of the |
2088 | * special reply port, so that stackshot can later retrieve who the client |
2089 | * is blocked on. |
2090 | */ |
2091 | if (special_reply_port->ip_sync_link_state == PORT_SYNC_LINK_PORT && |
2092 | sync_link_state == PORT_SYNC_LINK_NO_LINKAGE) { |
2093 | ipc_special_reply_stash_pid_locked(port: special_reply_port, pid: pid_from_task(task: current_task())); |
2094 | } else if (special_reply_port->ip_sync_link_state == PORT_SYNC_LINK_NO_LINKAGE && |
2095 | sync_link_state == PORT_SYNC_LINK_ANY) { |
2096 | /* If we are resetting the special reply port, remove the stashed pid. */ |
2097 | ipc_special_reply_stash_pid_locked(port: special_reply_port, pid: 0); |
2098 | } |
2099 | |
2100 | special_reply_port->ip_sync_link_state = sync_link_state; |
2101 | |
2102 | switch (sync_link_state) { |
2103 | case PORT_SYNC_LINK_WORKLOOP_KNOTE: |
2104 | special_reply_port->ip_sync_inheritor_knote = kn; |
2105 | break; |
2106 | case PORT_SYNC_LINK_WORKLOOP_STASH: |
2107 | turnstile_reference(turnstile: inheritor); |
2108 | special_reply_port->ip_sync_inheritor_ts = inheritor; |
2109 | break; |
2110 | case PORT_SYNC_LINK_NO_LINKAGE: |
2111 | if (flags & IPC_PORT_ADJUST_SR_ENABLE_EVENT) { |
2112 | ipc_special_reply_port_lost_link(special_reply_port); |
2113 | } |
2114 | break; |
2115 | } |
2116 | |
2117 | /* Get thread's turnstile donated to special reply port */ |
2118 | if (get_turnstile) { |
2119 | turnstile_complete(proprietor: (uintptr_t)special_reply_port, |
2120 | port_rcv_turnstile_address(special_reply_port), NULL, type: TURNSTILE_SYNC_IPC); |
2121 | } else { |
2122 | ts = ipc_port_rcv_turnstile(port: special_reply_port); |
2123 | if (ts) { |
2124 | turnstile_reference(turnstile: ts); |
2125 | ipc_port_recv_update_inheritor(port: special_reply_port, rcv_turnstile: ts, |
2126 | flags: TURNSTILE_IMMEDIATE_UPDATE); |
2127 | } |
2128 | } |
2129 | |
2130 | ip_mq_unlock(special_reply_port); |
2131 | |
2132 | if (get_turnstile) { |
2133 | turnstile_cleanup(); |
2134 | } else if (ts) { |
2135 | /* Call turnstile cleanup after dropping the interlock */ |
2136 | turnstile_update_inheritor_complete(turnstile: ts, flags: TURNSTILE_INTERLOCK_NOT_HELD); |
2137 | turnstile_deallocate_safe(turnstile: ts); |
2138 | } |
2139 | |
2140 | if (port_stashed_turnstile) { |
2141 | turnstile_deallocate_safe(turnstile: port_stashed_turnstile); |
2142 | } |
2143 | |
2144 | /* Release the ref on the dest port and its turnstile */ |
2145 | if (dest_port) { |
2146 | ipc_port_send_turnstile_complete(port: dest_port); |
2147 | /* release the reference on the dest port, space lock might be held */ |
2148 | ip_release_safe(dest_port); |
2149 | } |
2150 | } |
2151 | |
2152 | /* |
2153 | * Routine: ipc_port_adjust_special_reply_port |
2154 | * Purpose: |
2155 | * If the special port has a turnstile, update its inheritor. |
2156 | * Condition: |
2157 | * Nothing locked. |
2158 | * Returns: |
2159 | * None. |
2160 | */ |
2161 | void |
2162 | ipc_port_adjust_special_reply_port( |
2163 | ipc_port_t port, |
2164 | uint8_t flags) |
2165 | { |
2166 | if (port->ip_specialreply) { |
2167 | ip_mq_lock(port); |
2168 | ipc_port_adjust_special_reply_port_locked(special_reply_port: port, NULL, flags, FALSE); |
2169 | } |
2170 | } |
2171 | |
2172 | /* |
2173 | * Routine: ipc_port_adjust_sync_link_state_locked |
2174 | * Purpose: |
2175 | * Update the sync link state of the port and the |
2176 | * turnstile inheritor. |
2177 | * Condition: |
2178 | * Port locked on entry. |
2179 | * Port locked on return. |
2180 | * Returns: |
2181 | * None. |
2182 | */ |
2183 | void |
2184 | ipc_port_adjust_sync_link_state_locked( |
2185 | ipc_port_t port, |
2186 | int sync_link_state, |
2187 | turnstile_inheritor_t inheritor) |
2188 | { |
2189 | switch (port->ip_sync_link_state) { |
2190 | case PORT_SYNC_LINK_RCV_THREAD: |
2191 | /* deallocate the thread reference for the inheritor */ |
2192 | thread_deallocate_safe(thread: port->ip_messages.imq_inheritor_thread_ref); |
2193 | break; |
2194 | case PORT_SYNC_LINK_WORKLOOP_STASH: |
2195 | /* deallocate the turnstile reference for the inheritor */ |
2196 | turnstile_deallocate_safe(turnstile: port->ip_messages.imq_inheritor_turnstile); |
2197 | break; |
2198 | } |
2199 | |
2200 | klist_init(list: &port->ip_klist); |
2201 | |
2202 | switch (sync_link_state) { |
2203 | case PORT_SYNC_LINK_WORKLOOP_KNOTE: |
2204 | port->ip_messages.imq_inheritor_knote = inheritor; |
2205 | break; |
2206 | case PORT_SYNC_LINK_WORKLOOP_STASH: |
2207 | /* knote can be deleted by userspace, take a reference on turnstile */ |
2208 | turnstile_reference(turnstile: inheritor); |
2209 | port->ip_messages.imq_inheritor_turnstile = inheritor; |
2210 | break; |
2211 | case PORT_SYNC_LINK_RCV_THREAD: |
2212 | /* The thread could exit without clearing port state, take a thread ref */ |
2213 | thread_reference(thread: (thread_t)inheritor); |
2214 | port->ip_messages.imq_inheritor_thread_ref = inheritor; |
2215 | break; |
2216 | default: |
2217 | klist_init(list: &port->ip_klist); |
2218 | sync_link_state = PORT_SYNC_LINK_ANY; |
2219 | } |
2220 | |
2221 | port->ip_sync_link_state = sync_link_state; |
2222 | } |
2223 | |
2224 | |
2225 | /* |
2226 | * Routine: ipc_port_adjust_port_locked |
2227 | * Purpose: |
2228 | * If the port has a turnstile, update its inheritor. |
2229 | * Condition: |
2230 | * Port locked on entry. |
2231 | * Port unlocked on return. |
2232 | * Returns: |
2233 | * None. |
2234 | */ |
2235 | void |
2236 | ipc_port_adjust_port_locked( |
2237 | ipc_port_t port, |
2238 | struct knote *kn, |
2239 | boolean_t sync_bootstrap_checkin) |
2240 | { |
2241 | int sync_link_state = PORT_SYNC_LINK_ANY; |
2242 | turnstile_inheritor_t inheritor = TURNSTILE_INHERITOR_NULL; |
2243 | |
2244 | ip_mq_lock_held(port); // ip_sync_link_state is touched |
2245 | assert(!port->ip_specialreply); |
2246 | |
2247 | if (kn) { |
2248 | inheritor = filt_machport_stash_port(kn, port, link: &sync_link_state); |
2249 | if (sync_link_state == PORT_SYNC_LINK_WORKLOOP_KNOTE) { |
2250 | inheritor = kn; |
2251 | } |
2252 | } else if (sync_bootstrap_checkin) { |
2253 | inheritor = current_thread(); |
2254 | sync_link_state = PORT_SYNC_LINK_RCV_THREAD; |
2255 | } |
2256 | |
2257 | ipc_port_adjust_sync_link_state_locked(port, sync_link_state, inheritor); |
2258 | port->ip_sync_bootstrap_checkin = 0; |
2259 | |
2260 | ipc_port_send_turnstile_recompute_push_locked(port); |
2261 | /* port unlocked */ |
2262 | } |
2263 | |
2264 | /* |
2265 | * Routine: ipc_port_clear_sync_rcv_thread_boost_locked |
2266 | * Purpose: |
2267 | * If the port is pushing on rcv thread, clear it. |
2268 | * Condition: |
2269 | * Port locked on entry |
2270 | * Port unlocked on return. |
2271 | * Returns: |
2272 | * None. |
2273 | */ |
2274 | void |
2275 | ipc_port_clear_sync_rcv_thread_boost_locked( |
2276 | ipc_port_t port) |
2277 | { |
2278 | ip_mq_lock_held(port); // ip_sync_link_state is touched |
2279 | |
2280 | if (port->ip_sync_link_state != PORT_SYNC_LINK_RCV_THREAD) { |
2281 | ip_mq_unlock(port); |
2282 | return; |
2283 | } |
2284 | |
2285 | ipc_port_adjust_sync_link_state_locked(port, PORT_SYNC_LINK_ANY, NULL); |
2286 | |
2287 | ipc_port_send_turnstile_recompute_push_locked(port); |
2288 | /* port unlocked */ |
2289 | } |
2290 | |
2291 | /* |
2292 | * Routine: ipc_port_has_prdrequest |
2293 | * Purpose: |
2294 | * Returns whether a port has a port-destroyed request armed |
2295 | * Condition: |
2296 | * Port is locked. |
2297 | */ |
2298 | bool |
2299 | ipc_port_has_prdrequest( |
2300 | ipc_port_t port) |
2301 | { |
2302 | if (port->ip_specialreply) { |
2303 | return false; |
2304 | } |
2305 | if (port->ip_has_watchport) { |
2306 | return port->ip_twe->twe_pdrequest != IP_NULL; |
2307 | } |
2308 | return port->ip_pdrequest != IP_NULL; |
2309 | } |
2310 | |
2311 | /* |
2312 | * Routine: ipc_port_add_watchport_elem_locked |
2313 | * Purpose: |
2314 | * Transfer the turnstile boost of watchport to task calling exec. |
2315 | * Condition: |
2316 | * Port locked on entry. |
2317 | * Port unlocked on return. |
2318 | * Returns: |
2319 | * KERN_SUCESS on success. |
2320 | * KERN_FAILURE otherwise. |
2321 | */ |
2322 | kern_return_t |
2323 | ipc_port_add_watchport_elem_locked( |
2324 | ipc_port_t port, |
2325 | struct task_watchport_elem *watchport_elem, |
2326 | struct task_watchport_elem **old_elem) |
2327 | { |
2328 | ip_mq_lock_held(port); |
2329 | |
2330 | /* Watchport boost only works for non-special active ports mapped in an ipc space */ |
2331 | if (!ip_active(port) || port->ip_specialreply || !ip_in_a_space(port)) { |
2332 | ip_mq_unlock(port); |
2333 | return KERN_FAILURE; |
2334 | } |
2335 | |
2336 | if (port->ip_sync_link_state != PORT_SYNC_LINK_ANY) { |
2337 | /* Sever the linkage if the port was pushing on knote */ |
2338 | ipc_port_adjust_sync_link_state_locked(port, PORT_SYNC_LINK_ANY, NULL); |
2339 | } |
2340 | |
2341 | *old_elem = ipc_port_update_watchport_elem(port, we: watchport_elem); |
2342 | |
2343 | ipc_port_send_turnstile_recompute_push_locked(port); |
2344 | /* port unlocked */ |
2345 | return KERN_SUCCESS; |
2346 | } |
2347 | |
2348 | /* |
2349 | * Routine: ipc_port_clear_watchport_elem_internal_conditional_locked |
2350 | * Purpose: |
2351 | * Remove the turnstile boost of watchport and recompute the push. |
2352 | * Condition: |
2353 | * Port locked on entry. |
2354 | * Port unlocked on return. |
2355 | * Returns: |
2356 | * KERN_SUCESS on success. |
2357 | * KERN_FAILURE otherwise. |
2358 | */ |
2359 | kern_return_t |
2360 | ipc_port_clear_watchport_elem_internal_conditional_locked( |
2361 | ipc_port_t port, |
2362 | struct task_watchport_elem *watchport_elem) |
2363 | { |
2364 | ip_mq_lock_held(port); |
2365 | |
2366 | if (ipc_port_watchport_elem(port) != watchport_elem) { |
2367 | ip_mq_unlock(port); |
2368 | return KERN_FAILURE; |
2369 | } |
2370 | |
2371 | ipc_port_clear_watchport_elem_internal(port); |
2372 | ipc_port_send_turnstile_recompute_push_locked(port); |
2373 | /* port unlocked */ |
2374 | return KERN_SUCCESS; |
2375 | } |
2376 | |
2377 | /* |
2378 | * Routine: ipc_port_replace_watchport_elem_conditional_locked |
2379 | * Purpose: |
2380 | * Replace the turnstile boost of watchport and recompute the push. |
2381 | * Condition: |
2382 | * Port locked on entry. |
2383 | * Port unlocked on return. |
2384 | * Returns: |
2385 | * KERN_SUCESS on success. |
2386 | * KERN_FAILURE otherwise. |
2387 | */ |
2388 | kern_return_t |
2389 | ipc_port_replace_watchport_elem_conditional_locked( |
2390 | ipc_port_t port, |
2391 | struct task_watchport_elem *old_watchport_elem, |
2392 | struct task_watchport_elem *new_watchport_elem) |
2393 | { |
2394 | ip_mq_lock_held(port); |
2395 | |
2396 | if (port->ip_specialreply || |
2397 | ipc_port_watchport_elem(port) != old_watchport_elem) { |
2398 | ip_mq_unlock(port); |
2399 | return KERN_FAILURE; |
2400 | } |
2401 | |
2402 | ipc_port_update_watchport_elem(port, we: new_watchport_elem); |
2403 | ipc_port_send_turnstile_recompute_push_locked(port); |
2404 | /* port unlocked */ |
2405 | return KERN_SUCCESS; |
2406 | } |
2407 | |
2408 | /* |
2409 | * Routine: ipc_port_clear_watchport_elem_internal |
2410 | * Purpose: |
2411 | * Remove the turnstile boost of watchport. |
2412 | * Condition: |
2413 | * Port locked on entry. |
2414 | * Port locked on return. |
2415 | * Returns: |
2416 | * Old task_watchport_elem returned. |
2417 | */ |
2418 | struct task_watchport_elem * |
2419 | ipc_port_clear_watchport_elem_internal( |
2420 | ipc_port_t port) |
2421 | { |
2422 | ip_mq_lock_held(port); |
2423 | |
2424 | if (!port->ip_has_watchport) { |
2425 | return NULL; |
2426 | } |
2427 | |
2428 | return ipc_port_update_watchport_elem(port, NULL); |
2429 | } |
2430 | |
2431 | /* |
2432 | * Routine: ipc_port_send_turnstile_recompute_push_locked |
2433 | * Purpose: |
2434 | * Update send turnstile inheritor of port and recompute the push. |
2435 | * Condition: |
2436 | * Port locked on entry. |
2437 | * Port unlocked on return. |
2438 | * Returns: |
2439 | * None. |
2440 | */ |
2441 | static void |
2442 | ipc_port_send_turnstile_recompute_push_locked( |
2443 | ipc_port_t port) |
2444 | { |
2445 | struct turnstile *send_turnstile = port_send_turnstile(port); |
2446 | if (send_turnstile) { |
2447 | turnstile_reference(turnstile: send_turnstile); |
2448 | ipc_port_send_update_inheritor(port, send_turnstile, |
2449 | flags: TURNSTILE_IMMEDIATE_UPDATE); |
2450 | } |
2451 | ip_mq_unlock(port); |
2452 | |
2453 | if (send_turnstile) { |
2454 | turnstile_update_inheritor_complete(turnstile: send_turnstile, |
2455 | flags: TURNSTILE_INTERLOCK_NOT_HELD); |
2456 | turnstile_deallocate_safe(turnstile: send_turnstile); |
2457 | } |
2458 | } |
2459 | |
2460 | /* |
2461 | * Routine: ipc_port_get_watchport_inheritor |
2462 | * Purpose: |
2463 | * Returns inheritor for watchport. |
2464 | * |
2465 | * Conditions: |
2466 | * mqueue locked. |
2467 | * Returns: |
2468 | * watchport inheritor. |
2469 | */ |
2470 | static thread_t |
2471 | ipc_port_get_watchport_inheritor( |
2472 | ipc_port_t port) |
2473 | { |
2474 | ip_mq_lock_held(port); |
2475 | return ipc_port_watchport_elem(port)->twe_task->watchports->tw_thread; |
2476 | } |
2477 | |
2478 | /* |
2479 | * Routine: ipc_port_get_receiver_task |
2480 | * Purpose: |
2481 | * Returns receiver task pointer and its pid (if any) for port. |
2482 | * |
2483 | * Conditions: |
2484 | * Assumes the port is locked. |
2485 | */ |
2486 | pid_t |
2487 | ipc_port_get_receiver_task_locked(ipc_port_t port, uintptr_t *task) |
2488 | { |
2489 | task_t receiver = TASK_NULL; |
2490 | pid_t pid = -1; |
2491 | |
2492 | if (!port) { |
2493 | goto out; |
2494 | } |
2495 | |
2496 | if (ip_in_a_space(port) && |
2497 | !ip_in_space(port, space: ipc_space_kernel) && |
2498 | !ip_in_space(port, space: ipc_space_reply)) { |
2499 | receiver = port->ip_receiver->is_task; |
2500 | pid = task_pid(task: receiver); |
2501 | } |
2502 | |
2503 | out: |
2504 | if (task) { |
2505 | *task = (uintptr_t)receiver; |
2506 | } |
2507 | return pid; |
2508 | } |
2509 | |
2510 | /* |
2511 | * Routine: ipc_port_get_receiver_task |
2512 | * Purpose: |
2513 | * Returns receiver task pointer and its pid (if any) for port. |
2514 | * |
2515 | * Conditions: |
2516 | * Nothing locked. The routine takes port lock. |
2517 | */ |
2518 | pid_t |
2519 | ipc_port_get_receiver_task(ipc_port_t port, uintptr_t *task) |
2520 | { |
2521 | pid_t pid = -1; |
2522 | |
2523 | if (!port) { |
2524 | if (task) { |
2525 | *task = (uintptr_t)TASK_NULL; |
2526 | } |
2527 | return pid; |
2528 | } |
2529 | |
2530 | ip_mq_lock(port); |
2531 | pid = ipc_port_get_receiver_task_locked(port, task); |
2532 | ip_mq_unlock(port); |
2533 | |
2534 | return pid; |
2535 | } |
2536 | |
2537 | /* |
2538 | * Routine: ipc_port_impcount_delta |
2539 | * Purpose: |
2540 | * Adjust only the importance count associated with a port. |
2541 | * If there are any adjustments to be made to receiver task, |
2542 | * those are handled elsewhere. |
2543 | * |
2544 | * For now, be defensive during deductions to make sure the |
2545 | * impcount for the port doesn't underflow zero. This will |
2546 | * go away when the port boost addition is made atomic (see |
2547 | * note in ipc_port_importance_delta()). |
2548 | * Conditions: |
2549 | * The port is referenced and locked. |
2550 | * Nothing else is locked. |
2551 | */ |
2552 | mach_port_delta_t |
2553 | ipc_port_impcount_delta( |
2554 | ipc_port_t port, |
2555 | mach_port_delta_t delta, |
2556 | ipc_port_t __unused base) |
2557 | { |
2558 | mach_port_delta_t absdelta; |
2559 | |
2560 | if (!ip_active(port)) { |
2561 | return 0; |
2562 | } |
2563 | |
2564 | /* adding/doing nothing is easy */ |
2565 | if (delta >= 0) { |
2566 | port->ip_impcount += delta; |
2567 | return delta; |
2568 | } |
2569 | |
2570 | absdelta = 0 - delta; |
2571 | if (port->ip_impcount >= absdelta) { |
2572 | port->ip_impcount -= absdelta; |
2573 | return delta; |
2574 | } |
2575 | |
2576 | #if (DEVELOPMENT || DEBUG) |
2577 | if (ip_in_a_space(port)) { |
2578 | task_t target_task = port->ip_receiver->is_task; |
2579 | ipc_importance_task_t target_imp = target_task->task_imp_base; |
2580 | const char *target_procname; |
2581 | int target_pid; |
2582 | |
2583 | if (target_imp != IIT_NULL) { |
2584 | target_procname = target_imp->iit_procname; |
2585 | target_pid = target_imp->iit_bsd_pid; |
2586 | } else { |
2587 | target_procname = "unknown" ; |
2588 | target_pid = -1; |
2589 | } |
2590 | printf("Over-release of importance assertions for port 0x%x receiver pid %d (%s), " |
2591 | "dropping %d assertion(s) but port only has %d remaining.\n" , |
2592 | ip_get_receiver_name(port), |
2593 | target_pid, target_procname, |
2594 | absdelta, port->ip_impcount); |
2595 | } else if (base != IP_NULL) { |
2596 | assert(ip_in_a_space(base)); |
2597 | task_t target_task = base->ip_receiver->is_task; |
2598 | ipc_importance_task_t target_imp = target_task->task_imp_base; |
2599 | const char *target_procname; |
2600 | int target_pid; |
2601 | |
2602 | if (target_imp != IIT_NULL) { |
2603 | target_procname = target_imp->iit_procname; |
2604 | target_pid = target_imp->iit_bsd_pid; |
2605 | } else { |
2606 | target_procname = "unknown" ; |
2607 | target_pid = -1; |
2608 | } |
2609 | printf("Over-release of importance assertions for port 0x%lx " |
2610 | "enqueued on port 0x%x with receiver pid %d (%s), " |
2611 | "dropping %d assertion(s) but port only has %d remaining.\n" , |
2612 | (unsigned long)VM_KERNEL_UNSLIDE_OR_PERM((uintptr_t)port), |
2613 | ip_get_receiver_name(base), |
2614 | target_pid, target_procname, |
2615 | absdelta, port->ip_impcount); |
2616 | } |
2617 | #endif |
2618 | |
2619 | delta = 0 - port->ip_impcount; |
2620 | port->ip_impcount = 0; |
2621 | return delta; |
2622 | } |
2623 | |
2624 | /* |
2625 | * Routine: ipc_port_importance_delta_internal |
2626 | * Purpose: |
2627 | * Adjust the importance count through the given port. |
2628 | * If the port is in transit, apply the delta throughout |
2629 | * the chain. Determine if the there is a task at the |
2630 | * base of the chain that wants/needs to be adjusted, |
2631 | * and if so, apply the delta. |
2632 | * Conditions: |
2633 | * The port is referenced and locked on entry. |
2634 | * Importance may be locked. |
2635 | * Nothing else is locked. |
2636 | * The lock may be dropped on exit. |
2637 | * Returns TRUE if lock was dropped. |
2638 | */ |
2639 | #if IMPORTANCE_INHERITANCE |
2640 | |
2641 | boolean_t |
2642 | ipc_port_importance_delta_internal( |
2643 | ipc_port_t port, |
2644 | natural_t options, |
2645 | mach_port_delta_t *deltap, |
2646 | ipc_importance_task_t *imp_task) |
2647 | { |
2648 | ipc_port_t next, base; |
2649 | bool dropped = false; |
2650 | bool took_base_ref = false; |
2651 | |
2652 | *imp_task = IIT_NULL; |
2653 | |
2654 | if (*deltap == 0) { |
2655 | return FALSE; |
2656 | } |
2657 | |
2658 | assert(options == IPID_OPTION_NORMAL || options == IPID_OPTION_SENDPOSSIBLE); |
2659 | |
2660 | base = port; |
2661 | |
2662 | /* if port is in transit, have to search for end of chain */ |
2663 | if (ip_in_transit(port)) { |
2664 | dropped = true; |
2665 | |
2666 | |
2667 | ip_mq_unlock(port); |
2668 | ipc_port_multiple_lock(); /* massive serialization */ |
2669 | |
2670 | took_base_ref = ipc_port_destination_chain_lock(port, base: &base); |
2671 | /* all ports in chain from port to base, inclusive, are locked */ |
2672 | |
2673 | ipc_port_multiple_unlock(); |
2674 | } |
2675 | |
2676 | /* |
2677 | * If the port lock is dropped b/c the port is in transit, there is a |
2678 | * race window where another thread can drain messages and/or fire a |
2679 | * send possible notification before we get here. |
2680 | * |
2681 | * We solve this race by checking to see if our caller armed the send |
2682 | * possible notification, whether or not it's been fired yet, and |
2683 | * whether or not we've already set the port's ip_spimportant bit. If |
2684 | * we don't need a send-possible boost, then we'll just apply a |
2685 | * harmless 0-boost to the port. |
2686 | */ |
2687 | if (options & IPID_OPTION_SENDPOSSIBLE) { |
2688 | assert(*deltap == 1); |
2689 | if (port->ip_sprequests && port->ip_spimportant == 0) { |
2690 | port->ip_spimportant = 1; |
2691 | } else { |
2692 | *deltap = 0; |
2693 | } |
2694 | } |
2695 | |
2696 | /* unlock down to the base, adjusting boost(s) at each level */ |
2697 | for (;;) { |
2698 | *deltap = ipc_port_impcount_delta(port, delta: *deltap, base); |
2699 | |
2700 | if (port == base) { |
2701 | break; |
2702 | } |
2703 | |
2704 | /* port is in transit */ |
2705 | assert(port->ip_tempowner == 0); |
2706 | assert(ip_in_transit(port)); |
2707 | next = ip_get_destination(port); |
2708 | ip_mq_unlock(port); |
2709 | port = next; |
2710 | } |
2711 | |
2712 | /* find the task (if any) to boost according to the base */ |
2713 | if (ip_active(base)) { |
2714 | if (base->ip_tempowner != 0) { |
2715 | if (IIT_NULL != ip_get_imp_task(port: base)) { |
2716 | *imp_task = ip_get_imp_task(port: base); |
2717 | } |
2718 | /* otherwise don't boost */ |
2719 | } else if (ip_in_a_space(port: base)) { |
2720 | ipc_space_t space = ip_get_receiver(port: base); |
2721 | |
2722 | /* only spaces with boost-accepting tasks */ |
2723 | if (space->is_task != TASK_NULL && |
2724 | ipc_importance_task_is_any_receiver_type(task_imp: space->is_task->task_imp_base)) { |
2725 | *imp_task = space->is_task->task_imp_base; |
2726 | } |
2727 | } |
2728 | } |
2729 | |
2730 | /* |
2731 | * Only the base is locked. If we have to hold or drop task |
2732 | * importance assertions, we'll have to drop that lock as well. |
2733 | */ |
2734 | if (*imp_task != IIT_NULL) { |
2735 | /* take a reference before unlocking base */ |
2736 | ipc_importance_task_reference(task_elem: *imp_task); |
2737 | } |
2738 | |
2739 | if (dropped) { |
2740 | ip_mq_unlock(base); |
2741 | if (took_base_ref) { |
2742 | /* importance lock might be held */ |
2743 | ip_release_safe(base); |
2744 | } |
2745 | } |
2746 | |
2747 | return dropped; |
2748 | } |
2749 | #endif /* IMPORTANCE_INHERITANCE */ |
2750 | |
2751 | /* |
2752 | * Routine: ipc_port_importance_delta |
2753 | * Purpose: |
2754 | * Adjust the importance count through the given port. |
2755 | * If the port is in transit, apply the delta throughout |
2756 | * the chain. |
2757 | * |
2758 | * If there is a task at the base of the chain that wants/needs |
2759 | * to be adjusted, apply the delta. |
2760 | * Conditions: |
2761 | * The port is referenced and locked on entry. |
2762 | * Nothing else is locked. |
2763 | * The lock may be dropped on exit. |
2764 | * Returns TRUE if lock was dropped. |
2765 | */ |
2766 | #if IMPORTANCE_INHERITANCE |
2767 | |
2768 | boolean_t |
2769 | ipc_port_importance_delta( |
2770 | ipc_port_t port, |
2771 | natural_t options, |
2772 | mach_port_delta_t delta) |
2773 | { |
2774 | ipc_importance_task_t imp_task = IIT_NULL; |
2775 | boolean_t dropped; |
2776 | |
2777 | dropped = ipc_port_importance_delta_internal(port, options, deltap: &delta, imp_task: &imp_task); |
2778 | |
2779 | if (IIT_NULL == imp_task || delta == 0) { |
2780 | if (imp_task) { |
2781 | ipc_importance_task_release(task_imp: imp_task); |
2782 | } |
2783 | return dropped; |
2784 | } |
2785 | |
2786 | if (!dropped) { |
2787 | ip_mq_unlock(port); |
2788 | } |
2789 | |
2790 | assert(ipc_importance_task_is_any_receiver_type(imp_task)); |
2791 | |
2792 | if (delta > 0) { |
2793 | ipc_importance_task_hold_internal_assertion(task_imp: imp_task, count: delta); |
2794 | } else { |
2795 | ipc_importance_task_drop_internal_assertion(task_imp: imp_task, count: -delta); |
2796 | } |
2797 | |
2798 | ipc_importance_task_release(task_imp: imp_task); |
2799 | return TRUE; |
2800 | } |
2801 | #endif /* IMPORTANCE_INHERITANCE */ |
2802 | |
2803 | ipc_port_t |
2804 | ipc_port_make_send_any_locked( |
2805 | ipc_port_t port) |
2806 | { |
2807 | require_ip_active(port); |
2808 | port->ip_mscount++; |
2809 | ip_srights_inc(port); |
2810 | ip_reference(port); |
2811 | return port; |
2812 | } |
2813 | |
2814 | ipc_port_t |
2815 | ipc_port_make_send_any( |
2816 | ipc_port_t port) |
2817 | { |
2818 | ipc_port_t sright = port; |
2819 | |
2820 | if (IP_VALID(port)) { |
2821 | ip_mq_lock(port); |
2822 | if (ip_active(port)) { |
2823 | ipc_port_make_send_any_locked(port); |
2824 | } else { |
2825 | sright = IP_DEAD; |
2826 | } |
2827 | ip_mq_unlock(port); |
2828 | } |
2829 | |
2830 | return sright; |
2831 | } |
2832 | |
2833 | ipc_port_t |
2834 | ipc_port_make_send_mqueue( |
2835 | ipc_port_t port) |
2836 | { |
2837 | ipc_port_t sright = port; |
2838 | ipc_kobject_type_t kotype; |
2839 | |
2840 | if (IP_VALID(port)) { |
2841 | kotype = ip_kotype(port); |
2842 | |
2843 | ip_mq_lock(port); |
2844 | if (__improbable(!ip_active(port))) { |
2845 | sright = IP_DEAD; |
2846 | } else if (kotype == IKOT_NONE) { |
2847 | ipc_port_make_send_any_locked(port); |
2848 | } else if (kotype == IKOT_TIMER) { |
2849 | ipc_kobject_mktimer_require_locked(port); |
2850 | ipc_port_make_send_any_locked(port); |
2851 | } else { |
2852 | sright = IP_NULL; |
2853 | } |
2854 | ip_mq_unlock(port); |
2855 | } |
2856 | |
2857 | return sright; |
2858 | } |
2859 | |
2860 | void |
2861 | ipc_port_copy_send_any_locked( |
2862 | ipc_port_t port) |
2863 | { |
2864 | assert(port->ip_srights > 0); |
2865 | ip_srights_inc(port); |
2866 | ip_reference(port); |
2867 | } |
2868 | |
2869 | ipc_port_t |
2870 | ipc_port_copy_send_any( |
2871 | ipc_port_t port) |
2872 | { |
2873 | ipc_port_t sright = port; |
2874 | |
2875 | if (IP_VALID(port)) { |
2876 | ip_mq_lock(port); |
2877 | if (ip_active(port)) { |
2878 | ipc_port_copy_send_any_locked(port); |
2879 | } else { |
2880 | sright = IP_DEAD; |
2881 | } |
2882 | ip_mq_unlock(port); |
2883 | } |
2884 | |
2885 | return sright; |
2886 | } |
2887 | |
2888 | ipc_port_t |
2889 | ipc_port_copy_send_mqueue( |
2890 | ipc_port_t port) |
2891 | { |
2892 | ipc_port_t sright = port; |
2893 | ipc_kobject_type_t kotype; |
2894 | |
2895 | if (IP_VALID(port)) { |
2896 | kotype = ip_kotype(port); |
2897 | |
2898 | ip_mq_lock(port); |
2899 | if (__improbable(!ip_active(port))) { |
2900 | sright = IP_DEAD; |
2901 | } else if (kotype == IKOT_NONE) { |
2902 | ipc_port_copy_send_any_locked(port); |
2903 | } else if (kotype == IKOT_TIMER) { |
2904 | ipc_kobject_mktimer_require_locked(port); |
2905 | ipc_port_copy_send_any_locked(port); |
2906 | } else { |
2907 | sright = IP_NULL; |
2908 | } |
2909 | ip_mq_unlock(port); |
2910 | } |
2911 | |
2912 | return sright; |
2913 | } |
2914 | |
2915 | /* |
2916 | * Routine: ipc_port_copyout_send |
2917 | * Purpose: |
2918 | * Copyout a naked send right (possibly null/dead), |
2919 | * or if that fails, destroy the right. |
2920 | * Conditions: |
2921 | * Nothing locked. |
2922 | */ |
2923 | |
2924 | static mach_port_name_t |
2925 | ipc_port_copyout_send_internal( |
2926 | ipc_port_t sright, |
2927 | ipc_space_t space, |
2928 | ipc_object_copyout_flags_t flags) |
2929 | { |
2930 | mach_port_name_t name; |
2931 | |
2932 | if (IP_VALID(sright)) { |
2933 | kern_return_t kr; |
2934 | |
2935 | kr = ipc_object_copyout(space, ip_to_object(sright), |
2936 | MACH_MSG_TYPE_PORT_SEND, flags, NULL, NULL, namep: &name); |
2937 | if (kr != KERN_SUCCESS) { |
2938 | if (kr == KERN_INVALID_CAPABILITY) { |
2939 | name = MACH_PORT_DEAD; |
2940 | } else { |
2941 | name = MACH_PORT_NULL; |
2942 | } |
2943 | } |
2944 | } else { |
2945 | name = CAST_MACH_PORT_TO_NAME(sright); |
2946 | } |
2947 | |
2948 | return name; |
2949 | } |
2950 | |
2951 | mach_port_name_t |
2952 | ipc_port_copyout_send( |
2953 | ipc_port_t sright, /* can be invalid */ |
2954 | ipc_space_t space) |
2955 | { |
2956 | return ipc_port_copyout_send_internal(sright, space, flags: IPC_OBJECT_COPYOUT_FLAGS_NONE); |
2957 | } |
2958 | |
2959 | /* Used by pthread kext to copyout thread port only */ |
2960 | mach_port_name_t |
2961 | ipc_port_copyout_send_pinned( |
2962 | ipc_port_t sright, /* can be invalid */ |
2963 | ipc_space_t space) |
2964 | { |
2965 | assert(space->is_task != TASK_NULL); |
2966 | |
2967 | if (IP_VALID(sright)) { |
2968 | assert(ip_kotype(sright) == IKOT_THREAD_CONTROL); |
2969 | } |
2970 | |
2971 | if (task_is_pinned(space->is_task)) { |
2972 | return ipc_port_copyout_send_internal(sright, space, flags: IPC_OBJECT_COPYOUT_FLAGS_PINNED); |
2973 | } else { |
2974 | return ipc_port_copyout_send_internal(sright, space, flags: IPC_OBJECT_COPYOUT_FLAGS_NONE); |
2975 | } |
2976 | } |
2977 | |
2978 | /* |
2979 | * Routine: ipc_port_release_send_and_unlock |
2980 | * Purpose: |
2981 | * Release a naked send right. |
2982 | * Consumes a ref for the port. |
2983 | * Conditions: |
2984 | * Port is valid and locked on entry |
2985 | * Port is unlocked on exit. |
2986 | */ |
2987 | void |
2988 | ipc_port_release_send_and_unlock( |
2989 | ipc_port_t port) |
2990 | { |
2991 | ipc_notify_nsenders_t nsrequest = { }; |
2992 | |
2993 | ip_srights_dec(port); |
2994 | |
2995 | if (ip_active(port) && port->ip_srights == 0) { |
2996 | nsrequest = ipc_notify_no_senders_prepare(port); |
2997 | } |
2998 | |
2999 | ip_mq_unlock(port); |
3000 | ip_release(port); |
3001 | |
3002 | ipc_notify_no_senders_emit(nsrequest); |
3003 | } |
3004 | |
3005 | /* |
3006 | * Routine: ipc_port_release_send |
3007 | * Purpose: |
3008 | * Release a naked send right. |
3009 | * Consumes a ref for the port. |
3010 | * Conditions: |
3011 | * Nothing locked. |
3012 | */ |
3013 | |
3014 | __attribute__((flatten, noinline)) |
3015 | void |
3016 | ipc_port_release_send( |
3017 | ipc_port_t port) |
3018 | { |
3019 | if (IP_VALID(port)) { |
3020 | ip_mq_lock(port); |
3021 | ipc_port_release_send_and_unlock(port); |
3022 | } |
3023 | } |
3024 | |
3025 | /* |
3026 | * Routine: ipc_port_make_sonce_locked |
3027 | * Purpose: |
3028 | * Make a naked send-once right from a receive right. |
3029 | * Conditions: |
3030 | * The port is locked and active. |
3031 | */ |
3032 | |
3033 | ipc_port_t |
3034 | ipc_port_make_sonce_locked( |
3035 | ipc_port_t port) |
3036 | { |
3037 | require_ip_active(port); |
3038 | ip_sorights_inc(port); |
3039 | ip_reference(port); |
3040 | return port; |
3041 | } |
3042 | |
3043 | /* |
3044 | * Routine: ipc_port_make_sonce |
3045 | * Purpose: |
3046 | * Make a naked send-once right from a receive right. |
3047 | * Conditions: |
3048 | * The port is not locked. |
3049 | */ |
3050 | |
3051 | ipc_port_t |
3052 | ipc_port_make_sonce( |
3053 | ipc_port_t port) |
3054 | { |
3055 | if (!IP_VALID(port)) { |
3056 | return port; |
3057 | } |
3058 | |
3059 | ip_mq_lock(port); |
3060 | if (ip_active(port)) { |
3061 | ipc_port_make_sonce_locked(port); |
3062 | ip_mq_unlock(port); |
3063 | return port; |
3064 | } |
3065 | ip_mq_unlock(port); |
3066 | return IP_DEAD; |
3067 | } |
3068 | |
3069 | /* |
3070 | * Routine: ipc_port_release_sonce |
3071 | * Purpose: |
3072 | * Release a naked send-once right. |
3073 | * Consumes a ref for the port. |
3074 | * |
3075 | * In normal situations, this is never used. |
3076 | * Send-once rights are only consumed when |
3077 | * a message (possibly a send-once notification) |
3078 | * is sent to them. |
3079 | * Conditions: |
3080 | * The port is locked, possibly a space too. |
3081 | */ |
3082 | void |
3083 | ipc_port_release_sonce_and_unlock( |
3084 | ipc_port_t port) |
3085 | { |
3086 | ip_mq_lock_held(port); |
3087 | |
3088 | ip_sorights_dec(port); |
3089 | |
3090 | if (port->ip_specialreply) { |
3091 | ipc_port_adjust_special_reply_port_locked(special_reply_port: port, NULL, |
3092 | IPC_PORT_ADJUST_RESET_BOOSTRAP_CHECKIN, FALSE); |
3093 | } else { |
3094 | ip_mq_unlock(port); |
3095 | } |
3096 | |
3097 | ip_release(port); |
3098 | } |
3099 | |
3100 | /* |
3101 | * Routine: ipc_port_release_sonce |
3102 | * Purpose: |
3103 | * Release a naked send-once right. |
3104 | * Consumes a ref for the port. |
3105 | * |
3106 | * In normal situations, this is never used. |
3107 | * Send-once rights are only consumed when |
3108 | * a message (possibly a send-once notification) |
3109 | * is sent to them. |
3110 | * Conditions: |
3111 | * Nothing locked except possibly a space. |
3112 | */ |
3113 | void |
3114 | ipc_port_release_sonce( |
3115 | ipc_port_t port) |
3116 | { |
3117 | if (IP_VALID(port)) { |
3118 | ip_mq_lock(port); |
3119 | ipc_port_release_sonce_and_unlock(port); |
3120 | } |
3121 | } |
3122 | |
3123 | /* |
3124 | * Routine: ipc_port_release_receive |
3125 | * Purpose: |
3126 | * Release a naked (in limbo or in transit) receive right. |
3127 | * Consumes a ref for the port; destroys the port. |
3128 | * Conditions: |
3129 | * Nothing locked. |
3130 | */ |
3131 | |
3132 | void |
3133 | ipc_port_release_receive( |
3134 | ipc_port_t port) |
3135 | { |
3136 | ipc_port_t dest; |
3137 | |
3138 | if (!IP_VALID(port)) { |
3139 | return; |
3140 | } |
3141 | |
3142 | ip_mq_lock(port); |
3143 | require_ip_active(port); |
3144 | assert(!ip_in_a_space(port)); |
3145 | dest = ip_get_destination(port); |
3146 | |
3147 | ipc_port_destroy(port); /* consumes ref, unlocks */ |
3148 | |
3149 | if (dest != IP_NULL) { |
3150 | ipc_port_send_turnstile_complete(port: dest); |
3151 | ip_release(dest); |
3152 | } |
3153 | } |
3154 | |
3155 | /* |
3156 | * Routine: ipc_port_alloc_special |
3157 | * Purpose: |
3158 | * Allocate a port in a special space. |
3159 | * The new port is returned with one ref. |
3160 | * If unsuccessful, IP_NULL is returned. |
3161 | * Conditions: |
3162 | * Nothing locked. |
3163 | */ |
3164 | |
3165 | ipc_port_t |
3166 | ipc_port_alloc_special( |
3167 | ipc_space_t space, |
3168 | ipc_port_init_flags_t flags) |
3169 | { |
3170 | ipc_port_t port; |
3171 | |
3172 | kern_return_t kr = ipc_port_init_validate_flags(flags); |
3173 | if (kr != KERN_SUCCESS) { |
3174 | return IP_NULL; |
3175 | } |
3176 | |
3177 | port = ip_object_to_port(io_alloc(IOT_PORT, Z_WAITOK | Z_ZERO)); |
3178 | if (port == IP_NULL) { |
3179 | return IP_NULL; |
3180 | } |
3181 | |
3182 | os_atomic_init(&port->ip_object.io_bits, io_makebits(IOT_PORT)); |
3183 | os_atomic_init(&port->ip_object.io_references, 1); |
3184 | |
3185 | ipc_port_init(port, space, flags, MACH_PORT_SPECIAL_DEFAULT); |
3186 | return port; |
3187 | } |
3188 | |
3189 | /* |
3190 | * Routine: ipc_port_dealloc_special_and_unlock |
3191 | * Purpose: |
3192 | * Deallocate a port in a special space. |
3193 | * Consumes one ref for the port. |
3194 | * Conditions: |
3195 | * Port is locked. |
3196 | */ |
3197 | |
3198 | void |
3199 | ipc_port_dealloc_special_and_unlock( |
3200 | ipc_port_t port, |
3201 | __assert_only ipc_space_t space) |
3202 | { |
3203 | require_ip_active(port); |
3204 | // assert(port->ip_receiver_name != MACH_PORT_NULL); |
3205 | assert(ip_in_space(port, space)); |
3206 | |
3207 | /* |
3208 | * We clear ip_receiver_name and ip_receiver to simplify |
3209 | * the ipc_space_kernel check in ipc_mqueue_send. |
3210 | */ |
3211 | |
3212 | /* port transtions to IN-LIMBO state */ |
3213 | port->ip_receiver_name = MACH_PORT_NULL; |
3214 | port->ip_receiver = IS_NULL; |
3215 | |
3216 | /* relevant part of ipc_port_clear_receiver */ |
3217 | port->ip_mscount = 0; |
3218 | port->ip_messages.imq_seqno = 0; |
3219 | |
3220 | ipc_port_destroy(port); |
3221 | } |
3222 | |
3223 | /* |
3224 | * Routine: ipc_port_dealloc_special |
3225 | * Purpose: |
3226 | * Deallocate a port in a special space. |
3227 | * Consumes one ref for the port. |
3228 | * Conditions: |
3229 | * Nothing locked. |
3230 | */ |
3231 | |
3232 | void |
3233 | ipc_port_dealloc_special( |
3234 | ipc_port_t port, |
3235 | ipc_space_t space) |
3236 | { |
3237 | ip_mq_lock(port); |
3238 | ipc_port_dealloc_special_and_unlock(port, space); |
3239 | } |
3240 | |
3241 | /* |
3242 | * Routine: ipc_port_finalize |
3243 | * Purpose: |
3244 | * Called on last reference deallocate to |
3245 | * free any remaining data associated with the |
3246 | * port. |
3247 | * Conditions: |
3248 | * Nothing locked. |
3249 | */ |
3250 | void |
3251 | ipc_port_finalize( |
3252 | ipc_port_t port) |
3253 | { |
3254 | ipc_port_request_table_t requests = port->ip_requests; |
3255 | |
3256 | assert(port_send_turnstile(port) == TURNSTILE_NULL); |
3257 | |
3258 | if (waitq_type(wq: &port->ip_waitq) == WQT_PORT) { |
3259 | assert(ipc_port_rcv_turnstile(port) == TURNSTILE_NULL); |
3260 | } |
3261 | |
3262 | if (ip_active(port)) { |
3263 | panic("Trying to free an active port. port %p" , port); |
3264 | } |
3265 | |
3266 | if (requests) { |
3267 | port->ip_requests = NULL; |
3268 | ipc_port_request_table_free_noclear(array: requests); |
3269 | } |
3270 | |
3271 | /* |
3272 | * (81997111) now it is safe to deallocate the prealloc message. |
3273 | * Keep the IP_BIT_PREALLOC bit, it has to be sticky as the turnstile |
3274 | * code looks at it without holding locks. |
3275 | */ |
3276 | if (IP_PREALLOC(port)) { |
3277 | ipc_kmsg_t kmsg = port->ip_premsg; |
3278 | |
3279 | if (kmsg == IKM_NULL || ikm_prealloc_inuse_port(kmsg)) { |
3280 | panic("port(%p, %p): prealloc message in an invalid state" , |
3281 | port, kmsg); |
3282 | } |
3283 | |
3284 | port->ip_premsg = IKM_NULL; |
3285 | ipc_kmsg_free(kmsg); |
3286 | } |
3287 | |
3288 | waitq_deinit(waitq: &port->ip_waitq); |
3289 | #if MACH_ASSERT |
3290 | if (port->ip_made_bt) { |
3291 | btref_put(port->ip_made_bt); |
3292 | } |
3293 | #endif |
3294 | } |
3295 | |
3296 | /* |
3297 | * Routine: kdp_mqueue_send_find_owner |
3298 | * Purpose: |
3299 | * Discover the owner of the ipc object that contains the input |
3300 | * waitq object. The thread blocked on the waitq should be |
3301 | * waiting for an IPC_MQUEUE_FULL event. |
3302 | * Conditions: |
3303 | * The 'waitinfo->wait_type' value should already be set to |
3304 | * kThreadWaitPortSend. |
3305 | * Note: |
3306 | * If we find out that the containing port is actually in |
3307 | * transit, we reset the wait_type field to reflect this. |
3308 | */ |
3309 | void |
3310 | kdp_mqueue_send_find_owner( |
3311 | struct waitq *waitq, |
3312 | __assert_only event64_t event, |
3313 | thread_waitinfo_v2_t *waitinfo, |
3314 | struct ipc_service_port_label **isplp) |
3315 | { |
3316 | struct turnstile *turnstile; |
3317 | assert(waitinfo->wait_type == kThreadWaitPortSend); |
3318 | assert(event == IPC_MQUEUE_FULL); |
3319 | assert(waitq_type(waitq) == WQT_TURNSTILE); |
3320 | |
3321 | turnstile = waitq_to_turnstile(waitq); |
3322 | ipc_port_t port = (ipc_port_t)turnstile->ts_proprietor; /* we are blocking on send */ |
3323 | |
3324 | zone_id_require(zone_id: ZONE_ID_IPC_PORT, elem_size: sizeof(struct ipc_port), addr: port); |
3325 | |
3326 | waitinfo->owner = 0; |
3327 | waitinfo->context = VM_KERNEL_UNSLIDE_OR_PERM(port); |
3328 | if (ip_mq_lock_held_kdp(port)) { |
3329 | /* |
3330 | * someone has the port locked: it may be in an |
3331 | * inconsistent state: bail |
3332 | */ |
3333 | waitinfo->owner = STACKSHOT_WAITOWNER_PORT_LOCKED; |
3334 | return; |
3335 | } |
3336 | |
3337 | /* now we are the only one accessing the port */ |
3338 | if (ip_active(port)) { |
3339 | /* |
3340 | * In kdp context, port must be left unlocked throughout. |
3341 | * Therefore can't use union field accessor helpers, manually strip PAC |
3342 | * and compare raw pointer. |
3343 | */ |
3344 | void *raw_ptr = ip_get_receiver_ptr_noauth(port); |
3345 | |
3346 | if (port->ip_tempowner) { |
3347 | ipc_importance_task_t imp_task = ip_get_imp_task(port); |
3348 | if (imp_task != IIT_NULL && imp_task->iit_task != NULL) { |
3349 | /* port is held by a tempowner */ |
3350 | waitinfo->owner = pid_from_task(task: port->ip_imp_task->iit_task); |
3351 | } else { |
3352 | waitinfo->owner = STACKSHOT_WAITOWNER_INTRANSIT; |
3353 | } |
3354 | } else if (ip_in_a_space(port)) { /* no port lock needed */ |
3355 | if ((ipc_space_t)raw_ptr == ipc_space_kernel) { /* access union field as ip_receiver */ |
3356 | /* |
3357 | * The kernel pid is 0, make this |
3358 | * distinguishable from no-owner and |
3359 | * inconsistent port state. |
3360 | */ |
3361 | waitinfo->owner = STACKSHOT_WAITOWNER_KERNEL; |
3362 | } else { |
3363 | waitinfo->owner = pid_from_task(task: ((ipc_space_t)raw_ptr)->is_task); |
3364 | } |
3365 | } else if ((ipc_port_t)raw_ptr != IP_NULL) { /* access union field as ip_destination */ |
3366 | waitinfo->wait_type = kThreadWaitPortSendInTransit; |
3367 | waitinfo->owner = VM_KERNEL_UNSLIDE_OR_PERM((ipc_port_t)raw_ptr); |
3368 | } |
3369 | if (port->ip_service_port && port->ip_splabel != NULL) { |
3370 | *isplp = (struct ipc_service_port_label *)port->ip_splabel; |
3371 | } |
3372 | } |
3373 | } |
3374 | |
3375 | /* |
3376 | * Routine: kdp_mqueue_recv_find_owner |
3377 | * Purpose: |
3378 | * Discover the "owner" of the ipc object that contains the input |
3379 | * waitq object. The thread blocked on the waitq is trying to |
3380 | * receive on the mqueue. |
3381 | * Conditions: |
3382 | * The 'waitinfo->wait_type' value should already be set to |
3383 | * kThreadWaitPortReceive. |
3384 | * Note: |
3385 | * If we find that we are actualy waiting on a port set, we reset |
3386 | * the wait_type field to reflect this. |
3387 | */ |
3388 | void |
3389 | kdp_mqueue_recv_find_owner( |
3390 | struct waitq *waitq, |
3391 | __assert_only event64_t event, |
3392 | thread_waitinfo_v2_t *waitinfo, |
3393 | struct ipc_service_port_label **isplp) |
3394 | { |
3395 | assert(waitinfo->wait_type == kThreadWaitPortReceive); |
3396 | assert(event == IPC_MQUEUE_RECEIVE); |
3397 | |
3398 | waitinfo->owner = 0; |
3399 | |
3400 | if (waitq_type(wq: waitq) == WQT_PORT_SET) { |
3401 | ipc_pset_t set = ips_from_waitq(wq: waitq); |
3402 | |
3403 | zone_id_require(zone_id: ZONE_ID_IPC_PORT_SET, elem_size: sizeof(struct ipc_pset), addr: set); |
3404 | |
3405 | /* Reset wait type to specify waiting on port set receive */ |
3406 | waitinfo->wait_type = kThreadWaitPortSetReceive; |
3407 | waitinfo->context = VM_KERNEL_UNSLIDE_OR_PERM(set); |
3408 | if (ips_mq_lock_held_kdp(set)) { |
3409 | waitinfo->owner = STACKSHOT_WAITOWNER_PSET_LOCKED; |
3410 | } |
3411 | /* There is no specific owner "at the other end" of a port set, so leave unset. */ |
3412 | } else if (waitq_type(wq: waitq) == WQT_PORT) { |
3413 | ipc_port_t port = ip_from_waitq(waitq); |
3414 | |
3415 | zone_id_require(zone_id: ZONE_ID_IPC_PORT, elem_size: sizeof(struct ipc_port), addr: port); |
3416 | |
3417 | waitinfo->context = VM_KERNEL_UNSLIDE_OR_PERM(port); |
3418 | if (ip_mq_lock_held_kdp(port)) { |
3419 | waitinfo->owner = STACKSHOT_WAITOWNER_PORT_LOCKED; |
3420 | return; |
3421 | } |
3422 | |
3423 | if (ip_active(port)) { |
3424 | if (ip_in_a_space(port)) { /* no port lock needed */ |
3425 | waitinfo->owner = ip_get_receiver_name(port); |
3426 | } else { |
3427 | waitinfo->owner = STACKSHOT_WAITOWNER_INTRANSIT; |
3428 | } |
3429 | if (port->ip_specialreply) { |
3430 | waitinfo->wait_flags |= STACKSHOT_WAITINFO_FLAGS_SPECIALREPLY; |
3431 | } |
3432 | if (port->ip_splabel != NULL) { |
3433 | *isplp = (struct ipc_service_port_label *)port->ip_splabel; |
3434 | } |
3435 | } |
3436 | } |
3437 | } |
3438 | |
3439 | void |
3440 | ipc_port_set_label( |
3441 | ipc_port_t port, |
3442 | ipc_label_t label) |
3443 | { |
3444 | ipc_kobject_label_t labelp; |
3445 | |
3446 | assert(!ip_is_kolabeled(port)); |
3447 | |
3448 | labelp = zalloc_flags(ipc_kobject_label_zone, Z_WAITOK | Z_ZERO | Z_NOFAIL); |
3449 | labelp->ikol_label = label; |
3450 | |
3451 | port->ip_kolabel = labelp; |
3452 | io_bits_or(ip_to_object(port), IO_BITS_KOLABEL); |
3453 | } |
3454 | |
3455 | kern_return_t |
3456 | ipc_port_reset_thread_attr( |
3457 | ipc_port_t port) |
3458 | { |
3459 | uint8_t iotier = THROTTLE_LEVEL_END; |
3460 | uint8_t qos = THREAD_QOS_UNSPECIFIED; |
3461 | |
3462 | return ipc_port_update_qos_n_iotier(port, qos, iotier); |
3463 | } |
3464 | |
3465 | kern_return_t |
3466 | ipc_port_propagate_thread_attr( |
3467 | ipc_port_t port, |
3468 | struct thread_attr_for_ipc_propagation attr) |
3469 | { |
3470 | uint8_t iotier = attr.tafip_iotier; |
3471 | uint8_t qos = attr.tafip_qos; |
3472 | |
3473 | return ipc_port_update_qos_n_iotier(port, qos, iotier); |
3474 | } |
3475 | |
3476 | static kern_return_t |
3477 | ipc_port_update_qos_n_iotier( |
3478 | ipc_port_t port, |
3479 | uint8_t qos, |
3480 | uint8_t iotier) |
3481 | { |
3482 | if (port == IPC_PORT_NULL) { |
3483 | return KERN_INVALID_ARGUMENT; |
3484 | } |
3485 | |
3486 | ip_mq_lock(port); |
3487 | |
3488 | if (!ip_active(port)) { |
3489 | ip_mq_unlock(port); |
3490 | return KERN_TERMINATED; |
3491 | } |
3492 | |
3493 | if (port->ip_specialreply) { |
3494 | ip_mq_unlock(port); |
3495 | return KERN_INVALID_ARGUMENT; |
3496 | } |
3497 | |
3498 | port->ip_kernel_iotier_override = iotier; |
3499 | port->ip_kernel_qos_override = qos; |
3500 | |
3501 | if (ip_in_a_space(port) && |
3502 | is_active(ip_get_receiver(port)) && |
3503 | ipc_port_has_klist(port)) { |
3504 | KNOTE(&port->ip_klist, 0); |
3505 | } |
3506 | |
3507 | ip_mq_unlock(port); |
3508 | return KERN_SUCCESS; |
3509 | } |
3510 | |
3511 | /* Returns true if a rigid reply port violation should be enforced (by killing the process) */ |
3512 | static bool |
3513 | __ip_rigid_reply_port_semantics_violation(ipc_port_t reply_port, int *reply_port_semantics_violation) |
3514 | { |
3515 | bool hardened_runtime = csproc_hardened_runtime(p: current_proc()); |
3516 | |
3517 | if (proc_is_simulated(current_proc()) |
3518 | #if CONFIG_ROSETTA |
3519 | || task_is_translated(current_task()) |
3520 | #endif |
3521 | ) { |
3522 | return FALSE; |
3523 | } |
3524 | |
3525 | if (task_is_hardened_binary(task: current_task())) { |
3526 | return TRUE; |
3527 | } |
3528 | if (!ip_is_provisional_reply_port(reply_port)) { |
3529 | /* record telemetry for when third party fails to use a provisional reply port */ |
3530 | *reply_port_semantics_violation = hardened_runtime ? RRP_HARDENED_RUNTIME_VIOLATOR : RRP_3P_VIOLATOR; |
3531 | } |
3532 | return FALSE; |
3533 | } |
3534 | |
3535 | bool |
3536 | ip_violates_reply_port_semantics(ipc_port_t dest_port, ipc_port_t reply_port, |
3537 | int *reply_port_semantics_violation) |
3538 | { |
3539 | if (ip_require_reply_port_semantics(dest_port) |
3540 | && !ip_is_reply_port(reply_port) |
3541 | && !ip_is_provisional_reply_port(reply_port)) { |
3542 | *reply_port_semantics_violation = REPLY_PORT_SEMANTICS_VIOLATOR; |
3543 | return TRUE; |
3544 | } |
3545 | return FALSE; |
3546 | } |
3547 | |
3548 | /* Rigid reply port semantics don't allow for provisional reply ports */ |
3549 | bool |
3550 | ip_violates_rigid_reply_port_semantics(ipc_port_t dest_port, ipc_port_t reply_port, int *violates_3p) |
3551 | { |
3552 | return ip_require_rigid_reply_port_semantics(dest_port) |
3553 | && !ip_is_reply_port(reply_port) |
3554 | && __ip_rigid_reply_port_semantics_violation(reply_port, reply_port_semantics_violation: violates_3p); |
3555 | } |
3556 | |
3557 | #if MACH_ASSERT |
3558 | #include <kern/machine.h> |
3559 | |
3560 | unsigned long port_count = 0; |
3561 | unsigned long port_count_warning = 20000; |
3562 | unsigned long port_timestamp = 0; |
3563 | |
3564 | void db_port_stack_trace( |
3565 | ipc_port_t port); |
3566 | void db_ref( |
3567 | int refs); |
3568 | int db_port_walk( |
3569 | unsigned int verbose, |
3570 | unsigned int display, |
3571 | unsigned int ref_search, |
3572 | unsigned int ref_target); |
3573 | |
3574 | #ifdef MACH_BSD |
3575 | extern int proc_pid(struct proc*); |
3576 | #endif /* MACH_BSD */ |
3577 | |
3578 | /* |
3579 | * Initialize all of the debugging state in a port. |
3580 | * Insert the port into a global list of all allocated ports. |
3581 | */ |
3582 | void |
3583 | ipc_port_init_debug(ipc_port_t port, void *fp) |
3584 | { |
3585 | port->ip_timetrack = port_timestamp++; |
3586 | |
3587 | if (ipc_portbt) { |
3588 | port->ip_made_bt = btref_get(fp, 0); |
3589 | } |
3590 | |
3591 | #ifdef MACH_BSD |
3592 | task_t task = current_task_early(); |
3593 | if (task != TASK_NULL) { |
3594 | struct proc *proc = get_bsdtask_info(task); |
3595 | if (proc) { |
3596 | port->ip_made_pid = proc_pid(proc); |
3597 | } |
3598 | } |
3599 | #endif /* MACH_BSD */ |
3600 | } |
3601 | |
3602 | #endif /* MACH_ASSERT */ |
3603 | |