1 | /* |
2 | * Copyright (c) 2000-2007 Apple Inc. All rights reserved. |
3 | * |
4 | * @APPLE_OSREFERENCE_LICENSE_HEADER_START@ |
5 | * |
6 | * This file contains Original Code and/or Modifications of Original Code |
7 | * as defined in and that are subject to the Apple Public Source License |
8 | * Version 2.0 (the 'License'). You may not use this file except in |
9 | * compliance with the License. The rights granted to you under the License |
10 | * may not be used to create, or enable the creation or redistribution of, |
11 | * unlawful or unlicensed copies of an Apple operating system, or to |
12 | * circumvent, violate, or enable the circumvention or violation of, any |
13 | * terms of an Apple operating system software license agreement. |
14 | * |
15 | * Please obtain a copy of the License at |
16 | * http://www.opensource.apple.com/apsl/ and read it before using this file. |
17 | * |
18 | * The Original Code and all software distributed under the License are |
19 | * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER |
20 | * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES, |
21 | * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY, |
22 | * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT. |
23 | * Please see the License for the specific language governing rights and |
24 | * limitations under the License. |
25 | * |
26 | * @APPLE_OSREFERENCE_LICENSE_HEADER_END@ |
27 | */ |
28 | /* |
29 | * @OSF_FREE_COPYRIGHT@ |
30 | */ |
31 | /* |
32 | * Mach Operating System |
33 | * Copyright (c) 1991,1990,1989 Carnegie Mellon University |
34 | * All Rights Reserved. |
35 | * |
36 | * Permission to use, copy, modify and distribute this software and its |
37 | * documentation is hereby granted, provided that both the copyright |
38 | * notice and this permission notice appear in all copies of the |
39 | * software, derivative works or modified versions, and any portions |
40 | * thereof, and that both notices appear in supporting documentation. |
41 | * |
42 | * CARNEGIE MELLON ALLOWS FREE USE OF THIS SOFTWARE IN ITS "AS IS" |
43 | * CONDITION. CARNEGIE MELLON DISCLAIMS ANY LIABILITY OF ANY KIND FOR |
44 | * ANY DAMAGES WHATSOEVER RESULTING FROM THE USE OF THIS SOFTWARE. |
45 | * |
46 | * Carnegie Mellon requests users of this software to return to |
47 | * |
48 | * Software Distribution Coordinator or Software.Distribution@CS.CMU.EDU |
49 | * School of Computer Science |
50 | * Carnegie Mellon University |
51 | * Pittsburgh PA 15213-3890 |
52 | * |
53 | * any improvements or extensions that they make and grant Carnegie Mellon |
54 | * the rights to redistribute these changes. |
55 | */ |
56 | /* |
57 | * NOTICE: This file was modified by McAfee Research in 2004 to introduce |
58 | * support for mandatory and extensible security protections. This notice |
59 | * is included in support of clause 2.2 (b) of the Apple Public License, |
60 | * Version 2.0. |
61 | */ |
62 | /* |
63 | */ |
64 | /* |
65 | * File: ipc/ipc_port.c |
66 | * Author: Rich Draves |
67 | * Date: 1989 |
68 | * |
69 | * Functions to manipulate IPC ports. |
70 | */ |
71 | |
72 | #include <zone_debug.h> |
73 | #include <mach_assert.h> |
74 | |
75 | #include <mach/port.h> |
76 | #include <mach/kern_return.h> |
77 | #include <kern/ipc_kobject.h> |
78 | #include <kern/thread.h> |
79 | #include <kern/misc_protos.h> |
80 | #include <kern/waitq.h> |
81 | #include <kern/policy_internal.h> |
82 | #include <kern/debug.h> |
83 | #include <kern/kcdata.h> |
84 | #include <ipc/ipc_entry.h> |
85 | #include <ipc/ipc_space.h> |
86 | #include <ipc/ipc_object.h> |
87 | #include <ipc/ipc_port.h> |
88 | #include <ipc/ipc_pset.h> |
89 | #include <ipc/ipc_kmsg.h> |
90 | #include <ipc/ipc_mqueue.h> |
91 | #include <ipc/ipc_notify.h> |
92 | #include <ipc/ipc_table.h> |
93 | #include <ipc/ipc_importance.h> |
94 | #include <machine/machlimits.h> |
95 | #include <kern/turnstile.h> |
96 | |
97 | #include <security/mac_mach_internal.h> |
98 | |
99 | #include <string.h> |
100 | |
101 | decl_lck_spin_data(, ipc_port_multiple_lock_data) |
102 | ipc_port_timestamp_t ipc_port_timestamp_data; |
103 | int ipc_portbt; |
104 | |
105 | #if MACH_ASSERT |
106 | void ipc_port_init_debug( |
107 | ipc_port_t port, |
108 | uintptr_t *callstack, |
109 | unsigned int callstack_max); |
110 | |
111 | void ipc_port_callstack_init_debug( |
112 | uintptr_t *callstack, |
113 | unsigned int callstack_max); |
114 | |
115 | #endif /* MACH_ASSERT */ |
116 | |
117 | void |
118 | ipc_port_release(ipc_port_t port) |
119 | { |
120 | ip_release(port); |
121 | } |
122 | |
123 | void |
124 | ipc_port_reference(ipc_port_t port) |
125 | { |
126 | ip_reference(port); |
127 | } |
128 | |
129 | /* |
130 | * Routine: ipc_port_timestamp |
131 | * Purpose: |
132 | * Retrieve a timestamp value. |
133 | */ |
134 | |
135 | ipc_port_timestamp_t |
136 | ipc_port_timestamp(void) |
137 | { |
138 | return OSIncrementAtomic(&ipc_port_timestamp_data); |
139 | } |
140 | |
141 | /* |
142 | * Routine: ipc_port_request_alloc |
143 | * Purpose: |
144 | * Try to allocate a request slot. |
145 | * If successful, returns the request index. |
146 | * Otherwise returns zero. |
147 | * Conditions: |
148 | * The port is locked and active. |
149 | * Returns: |
150 | * KERN_SUCCESS A request index was found. |
151 | * KERN_NO_SPACE No index allocated. |
152 | */ |
153 | |
154 | #if IMPORTANCE_INHERITANCE |
155 | kern_return_t |
156 | ipc_port_request_alloc( |
157 | ipc_port_t port, |
158 | mach_port_name_t name, |
159 | ipc_port_t soright, |
160 | boolean_t send_possible, |
161 | boolean_t immediate, |
162 | ipc_port_request_index_t *indexp, |
163 | boolean_t *importantp) |
164 | #else |
165 | kern_return_t |
166 | ipc_port_request_alloc( |
167 | ipc_port_t port, |
168 | mach_port_name_t name, |
169 | ipc_port_t soright, |
170 | boolean_t send_possible, |
171 | boolean_t immediate, |
172 | ipc_port_request_index_t *indexp) |
173 | #endif /* IMPORTANCE_INHERITANCE */ |
174 | { |
175 | ipc_port_request_t ipr, table; |
176 | ipc_port_request_index_t index; |
177 | uintptr_t mask = 0; |
178 | |
179 | #if IMPORTANCE_INHERITANCE |
180 | *importantp = FALSE; |
181 | #endif /* IMPORTANCE_INHERITANCE */ |
182 | |
183 | assert(ip_active(port)); |
184 | assert(name != MACH_PORT_NULL); |
185 | assert(soright != IP_NULL); |
186 | |
187 | table = port->ip_requests; |
188 | |
189 | if (table == IPR_NULL) |
190 | return KERN_NO_SPACE; |
191 | |
192 | index = table->ipr_next; |
193 | if (index == 0) |
194 | return KERN_NO_SPACE; |
195 | |
196 | ipr = &table[index]; |
197 | assert(ipr->ipr_name == MACH_PORT_NULL); |
198 | |
199 | table->ipr_next = ipr->ipr_next; |
200 | ipr->ipr_name = name; |
201 | |
202 | if (send_possible) { |
203 | mask |= IPR_SOR_SPREQ_MASK; |
204 | if (immediate) { |
205 | mask |= IPR_SOR_SPARM_MASK; |
206 | if (port->ip_sprequests == 0) { |
207 | port->ip_sprequests = 1; |
208 | #if IMPORTANCE_INHERITANCE |
209 | /* TODO: Live importance support in send-possible */ |
210 | if (port->ip_impdonation != 0 && |
211 | port->ip_spimportant == 0 && |
212 | (task_is_importance_donor(current_task()))) { |
213 | *importantp = TRUE; |
214 | } |
215 | #endif /* IMPORTANCE_INHERTANCE */ |
216 | } |
217 | } |
218 | } |
219 | ipr->ipr_soright = IPR_SOR_MAKE(soright, mask); |
220 | |
221 | *indexp = index; |
222 | |
223 | return KERN_SUCCESS; |
224 | } |
225 | |
226 | /* |
227 | * Routine: ipc_port_request_grow |
228 | * Purpose: |
229 | * Grow a port's table of requests. |
230 | * Conditions: |
231 | * The port must be locked and active. |
232 | * Nothing else locked; will allocate memory. |
233 | * Upon return the port is unlocked. |
234 | * Returns: |
235 | * KERN_SUCCESS Grew the table. |
236 | * KERN_SUCCESS Somebody else grew the table. |
237 | * KERN_SUCCESS The port died. |
238 | * KERN_RESOURCE_SHORTAGE Couldn't allocate new table. |
239 | * KERN_NO_SPACE Couldn't grow to desired size |
240 | */ |
241 | |
242 | kern_return_t |
243 | ipc_port_request_grow( |
244 | ipc_port_t port, |
245 | ipc_table_elems_t target_size) |
246 | { |
247 | ipc_table_size_t its; |
248 | ipc_port_request_t otable, ntable; |
249 | |
250 | assert(ip_active(port)); |
251 | |
252 | otable = port->ip_requests; |
253 | if (otable == IPR_NULL) |
254 | its = &ipc_table_requests[0]; |
255 | else |
256 | its = otable->ipr_size + 1; |
257 | |
258 | if (target_size != ITS_SIZE_NONE) { |
259 | if ((otable != IPR_NULL) && |
260 | (target_size <= otable->ipr_size->its_size)) { |
261 | ip_unlock(port); |
262 | return KERN_SUCCESS; |
263 | } |
264 | while ((its->its_size) && (its->its_size < target_size)) { |
265 | its++; |
266 | } |
267 | if (its->its_size == 0) { |
268 | ip_unlock(port); |
269 | return KERN_NO_SPACE; |
270 | } |
271 | } |
272 | |
273 | ip_reference(port); |
274 | ip_unlock(port); |
275 | |
276 | if ((its->its_size == 0) || |
277 | ((ntable = it_requests_alloc(its)) == IPR_NULL)) { |
278 | ip_release(port); |
279 | return KERN_RESOURCE_SHORTAGE; |
280 | } |
281 | |
282 | ip_lock(port); |
283 | |
284 | /* |
285 | * Check that port is still active and that nobody else |
286 | * has slipped in and grown the table on us. Note that |
287 | * just checking if the current table pointer == otable |
288 | * isn't sufficient; must check ipr_size. |
289 | */ |
290 | |
291 | if (ip_active(port) && (port->ip_requests == otable) && |
292 | ((otable == IPR_NULL) || (otable->ipr_size+1 == its))) { |
293 | ipc_table_size_t oits; |
294 | ipc_table_elems_t osize, nsize; |
295 | ipc_port_request_index_t free, i; |
296 | |
297 | /* copy old table to new table */ |
298 | |
299 | if (otable != IPR_NULL) { |
300 | oits = otable->ipr_size; |
301 | osize = oits->its_size; |
302 | free = otable->ipr_next; |
303 | |
304 | (void) memcpy((void *)(ntable + 1), |
305 | (const void *)(otable + 1), |
306 | (osize - 1) * sizeof(struct ipc_port_request)); |
307 | } else { |
308 | osize = 1; |
309 | oits = 0; |
310 | free = 0; |
311 | } |
312 | |
313 | nsize = its->its_size; |
314 | assert(nsize > osize); |
315 | |
316 | /* add new elements to the new table's free list */ |
317 | |
318 | for (i = osize; i < nsize; i++) { |
319 | ipc_port_request_t ipr = &ntable[i]; |
320 | |
321 | ipr->ipr_name = MACH_PORT_NULL; |
322 | ipr->ipr_next = free; |
323 | free = i; |
324 | } |
325 | |
326 | ntable->ipr_next = free; |
327 | ntable->ipr_size = its; |
328 | port->ip_requests = ntable; |
329 | ip_unlock(port); |
330 | ip_release(port); |
331 | |
332 | if (otable != IPR_NULL) { |
333 | it_requests_free(oits, otable); |
334 | } |
335 | } else { |
336 | ip_unlock(port); |
337 | ip_release(port); |
338 | it_requests_free(its, ntable); |
339 | } |
340 | |
341 | return KERN_SUCCESS; |
342 | } |
343 | |
344 | /* |
345 | * Routine: ipc_port_request_sparm |
346 | * Purpose: |
347 | * Arm delayed send-possible request. |
348 | * Conditions: |
349 | * The port must be locked and active. |
350 | * |
351 | * Returns TRUE if the request was armed |
352 | * (or armed with importance in that version). |
353 | */ |
354 | |
355 | boolean_t |
356 | ipc_port_request_sparm( |
357 | ipc_port_t port, |
358 | __assert_only mach_port_name_t name, |
359 | ipc_port_request_index_t index, |
360 | mach_msg_option_t option, |
361 | mach_msg_priority_t override) |
362 | { |
363 | if (index != IE_REQ_NONE) { |
364 | ipc_port_request_t ipr, table; |
365 | |
366 | assert(ip_active(port)); |
367 | |
368 | table = port->ip_requests; |
369 | assert(table != IPR_NULL); |
370 | |
371 | ipr = &table[index]; |
372 | assert(ipr->ipr_name == name); |
373 | |
374 | /* Is there a valid destination? */ |
375 | if (IPR_SOR_SPREQ(ipr->ipr_soright)) { |
376 | ipr->ipr_soright = IPR_SOR_MAKE(ipr->ipr_soright, IPR_SOR_SPARM_MASK); |
377 | port->ip_sprequests = 1; |
378 | |
379 | if (option & MACH_SEND_OVERRIDE) { |
380 | /* apply override to message queue */ |
381 | ipc_mqueue_override_send(&port->ip_messages, override); |
382 | } |
383 | |
384 | #if IMPORTANCE_INHERITANCE |
385 | if (((option & MACH_SEND_NOIMPORTANCE) == 0) && |
386 | (port->ip_impdonation != 0) && |
387 | (port->ip_spimportant == 0) && |
388 | (((option & MACH_SEND_IMPORTANCE) != 0) || |
389 | (task_is_importance_donor(current_task())))) { |
390 | return TRUE; |
391 | } |
392 | #else |
393 | return TRUE; |
394 | #endif /* IMPORTANCE_INHERITANCE */ |
395 | } |
396 | } |
397 | return FALSE; |
398 | } |
399 | |
400 | /* |
401 | * Routine: ipc_port_request_type |
402 | * Purpose: |
403 | * Determine the type(s) of port requests enabled for a name. |
404 | * Conditions: |
405 | * The port must be locked or inactive (to avoid table growth). |
406 | * The index must not be IE_REQ_NONE and for the name in question. |
407 | */ |
408 | mach_port_type_t |
409 | ipc_port_request_type( |
410 | ipc_port_t port, |
411 | __assert_only mach_port_name_t name, |
412 | ipc_port_request_index_t index) |
413 | { |
414 | ipc_port_request_t ipr, table; |
415 | mach_port_type_t type = 0; |
416 | |
417 | table = port->ip_requests; |
418 | assert (table != IPR_NULL); |
419 | |
420 | assert(index != IE_REQ_NONE); |
421 | ipr = &table[index]; |
422 | assert(ipr->ipr_name == name); |
423 | |
424 | if (IP_VALID(IPR_SOR_PORT(ipr->ipr_soright))) { |
425 | type |= MACH_PORT_TYPE_DNREQUEST; |
426 | |
427 | if (IPR_SOR_SPREQ(ipr->ipr_soright)) { |
428 | type |= MACH_PORT_TYPE_SPREQUEST; |
429 | |
430 | if (!IPR_SOR_SPARMED(ipr->ipr_soright)) { |
431 | type |= MACH_PORT_TYPE_SPREQUEST_DELAYED; |
432 | } |
433 | } |
434 | } |
435 | return type; |
436 | } |
437 | |
438 | /* |
439 | * Routine: ipc_port_request_cancel |
440 | * Purpose: |
441 | * Cancel a dead-name/send-possible request and return the send-once right. |
442 | * Conditions: |
443 | * The port must be locked and active. |
444 | * The index must not be IPR_REQ_NONE and must correspond with name. |
445 | */ |
446 | |
447 | ipc_port_t |
448 | ipc_port_request_cancel( |
449 | ipc_port_t port, |
450 | __assert_only mach_port_name_t name, |
451 | ipc_port_request_index_t index) |
452 | { |
453 | ipc_port_request_t ipr, table; |
454 | ipc_port_t request = IP_NULL; |
455 | |
456 | assert(ip_active(port)); |
457 | table = port->ip_requests; |
458 | assert(table != IPR_NULL); |
459 | |
460 | assert (index != IE_REQ_NONE); |
461 | ipr = &table[index]; |
462 | assert(ipr->ipr_name == name); |
463 | request = IPR_SOR_PORT(ipr->ipr_soright); |
464 | |
465 | /* return ipr to the free list inside the table */ |
466 | ipr->ipr_name = MACH_PORT_NULL; |
467 | ipr->ipr_next = table->ipr_next; |
468 | table->ipr_next = index; |
469 | |
470 | return request; |
471 | } |
472 | |
473 | /* |
474 | * Routine: ipc_port_pdrequest |
475 | * Purpose: |
476 | * Make a port-deleted request, returning the |
477 | * previously registered send-once right. |
478 | * Just cancels the previous request if notify is IP_NULL. |
479 | * Conditions: |
480 | * The port is locked and active. It is unlocked. |
481 | * Consumes a ref for notify (if non-null), and |
482 | * returns previous with a ref (if non-null). |
483 | */ |
484 | |
485 | void |
486 | ipc_port_pdrequest( |
487 | ipc_port_t port, |
488 | ipc_port_t notify, |
489 | ipc_port_t *previousp) |
490 | { |
491 | ipc_port_t previous; |
492 | |
493 | assert(ip_active(port)); |
494 | |
495 | previous = port->ip_pdrequest; |
496 | port->ip_pdrequest = notify; |
497 | ip_unlock(port); |
498 | |
499 | *previousp = previous; |
500 | } |
501 | |
502 | /* |
503 | * Routine: ipc_port_nsrequest |
504 | * Purpose: |
505 | * Make a no-senders request, returning the |
506 | * previously registered send-once right. |
507 | * Just cancels the previous request if notify is IP_NULL. |
508 | * Conditions: |
509 | * The port is locked and active. It is unlocked. |
510 | * Consumes a ref for notify (if non-null), and |
511 | * returns previous with a ref (if non-null). |
512 | */ |
513 | |
514 | void |
515 | ipc_port_nsrequest( |
516 | ipc_port_t port, |
517 | mach_port_mscount_t sync, |
518 | ipc_port_t notify, |
519 | ipc_port_t *previousp) |
520 | { |
521 | ipc_port_t previous; |
522 | mach_port_mscount_t mscount; |
523 | |
524 | assert(ip_active(port)); |
525 | |
526 | previous = port->ip_nsrequest; |
527 | mscount = port->ip_mscount; |
528 | |
529 | if ((port->ip_srights == 0) && (sync <= mscount) && |
530 | (notify != IP_NULL)) { |
531 | port->ip_nsrequest = IP_NULL; |
532 | ip_unlock(port); |
533 | ipc_notify_no_senders(notify, mscount); |
534 | } else { |
535 | port->ip_nsrequest = notify; |
536 | ip_unlock(port); |
537 | } |
538 | |
539 | *previousp = previous; |
540 | } |
541 | |
542 | |
543 | /* |
544 | * Routine: ipc_port_clear_receiver |
545 | * Purpose: |
546 | * Prepares a receive right for transmission/destruction, |
547 | * optionally performs mqueue destruction (with port lock held) |
548 | * |
549 | * Conditions: |
550 | * The port is locked and active. |
551 | * Returns: |
552 | * If should_destroy is TRUE, then the return value indicates |
553 | * whether the caller needs to reap kmsg structures that should |
554 | * be destroyed (by calling ipc_kmsg_reap_delayed) |
555 | * |
556 | * If should_destroy is FALSE, this always returns FALSE |
557 | */ |
558 | |
559 | boolean_t |
560 | ipc_port_clear_receiver( |
561 | ipc_port_t port, |
562 | boolean_t should_destroy) |
563 | { |
564 | ipc_mqueue_t mqueue = &port->ip_messages; |
565 | boolean_t reap_messages = FALSE; |
566 | |
567 | /* |
568 | * Pull ourselves out of any sets to which we belong. |
569 | * We hold the port locked, so even though this acquires and releases |
570 | * the mqueue lock, we know we won't be added to any other sets. |
571 | */ |
572 | if (port->ip_in_pset != 0) { |
573 | ipc_pset_remove_from_all(port); |
574 | assert(port->ip_in_pset == 0); |
575 | } |
576 | |
577 | /* |
578 | * Send anyone waiting on the port's queue directly away. |
579 | * Also clear the mscount and seqno. |
580 | */ |
581 | imq_lock(mqueue); |
582 | ipc_mqueue_changed(mqueue); |
583 | port->ip_mscount = 0; |
584 | mqueue->imq_seqno = 0; |
585 | port->ip_context = port->ip_guarded = port->ip_strict_guard = 0; |
586 | |
587 | if (should_destroy) { |
588 | /* |
589 | * Mark the mqueue invalid, preventing further send/receive |
590 | * operations from succeeding. It's important for this to be |
591 | * done under the same lock hold as the ipc_mqueue_changed |
592 | * call to avoid additional threads blocking on an mqueue |
593 | * that's being destroyed. |
594 | */ |
595 | reap_messages = ipc_mqueue_destroy_locked(mqueue); |
596 | } |
597 | |
598 | imq_unlock(&port->ip_messages); |
599 | |
600 | return reap_messages; |
601 | } |
602 | |
603 | /* |
604 | * Routine: ipc_port_init |
605 | * Purpose: |
606 | * Initializes a newly-allocated port. |
607 | * Doesn't touch the ip_object fields. |
608 | */ |
609 | |
610 | void |
611 | ipc_port_init( |
612 | ipc_port_t port, |
613 | ipc_space_t space, |
614 | mach_port_name_t name) |
615 | { |
616 | /* port->ip_kobject doesn't have to be initialized */ |
617 | |
618 | port->ip_receiver = space; |
619 | port->ip_receiver_name = name; |
620 | |
621 | port->ip_mscount = 0; |
622 | port->ip_srights = 0; |
623 | port->ip_sorights = 0; |
624 | |
625 | port->ip_nsrequest = IP_NULL; |
626 | port->ip_pdrequest = IP_NULL; |
627 | port->ip_requests = IPR_NULL; |
628 | |
629 | port->ip_premsg = IKM_NULL; |
630 | port->ip_context = 0; |
631 | |
632 | port->ip_sprequests = 0; |
633 | port->ip_spimportant = 0; |
634 | port->ip_impdonation = 0; |
635 | port->ip_tempowner = 0; |
636 | |
637 | port->ip_guarded = 0; |
638 | port->ip_strict_guard = 0; |
639 | port->ip_impcount = 0; |
640 | |
641 | port->ip_specialreply = 0; |
642 | port->ip_sync_link_state = PORT_SYNC_LINK_ANY; |
643 | |
644 | reset_ip_srp_bits(port); |
645 | |
646 | port->ip_send_turnstile = TURNSTILE_NULL; |
647 | |
648 | ipc_mqueue_init(&port->ip_messages, |
649 | FALSE /* !set */); |
650 | } |
651 | |
652 | /* |
653 | * Routine: ipc_port_alloc |
654 | * Purpose: |
655 | * Allocate a port. |
656 | * Conditions: |
657 | * Nothing locked. If successful, the port is returned |
658 | * locked. (The caller doesn't have a reference.) |
659 | * Returns: |
660 | * KERN_SUCCESS The port is allocated. |
661 | * KERN_INVALID_TASK The space is dead. |
662 | * KERN_NO_SPACE No room for an entry in the space. |
663 | * KERN_RESOURCE_SHORTAGE Couldn't allocate memory. |
664 | */ |
665 | |
666 | kern_return_t |
667 | ipc_port_alloc( |
668 | ipc_space_t space, |
669 | mach_port_name_t *namep, |
670 | ipc_port_t *portp) |
671 | { |
672 | ipc_port_t port; |
673 | mach_port_name_t name; |
674 | kern_return_t kr; |
675 | |
676 | #if MACH_ASSERT |
677 | uintptr_t buf[IP_CALLSTACK_MAX]; |
678 | ipc_port_callstack_init_debug(&buf[0], IP_CALLSTACK_MAX); |
679 | #endif /* MACH_ASSERT */ |
680 | |
681 | kr = ipc_object_alloc(space, IOT_PORT, |
682 | MACH_PORT_TYPE_RECEIVE, 0, |
683 | &name, (ipc_object_t *) &port); |
684 | if (kr != KERN_SUCCESS) |
685 | return kr; |
686 | |
687 | /* port and space are locked */ |
688 | ipc_port_init(port, space, name); |
689 | |
690 | #if MACH_ASSERT |
691 | ipc_port_init_debug(port, &buf[0], IP_CALLSTACK_MAX); |
692 | #endif /* MACH_ASSERT */ |
693 | |
694 | /* unlock space after init */ |
695 | is_write_unlock(space); |
696 | |
697 | *namep = name; |
698 | *portp = port; |
699 | |
700 | return KERN_SUCCESS; |
701 | } |
702 | |
703 | /* |
704 | * Routine: ipc_port_alloc_name |
705 | * Purpose: |
706 | * Allocate a port, with a specific name. |
707 | * Conditions: |
708 | * Nothing locked. If successful, the port is returned |
709 | * locked. (The caller doesn't have a reference.) |
710 | * Returns: |
711 | * KERN_SUCCESS The port is allocated. |
712 | * KERN_INVALID_TASK The space is dead. |
713 | * KERN_NAME_EXISTS The name already denotes a right. |
714 | * KERN_RESOURCE_SHORTAGE Couldn't allocate memory. |
715 | */ |
716 | |
717 | kern_return_t |
718 | ipc_port_alloc_name( |
719 | ipc_space_t space, |
720 | mach_port_name_t name, |
721 | ipc_port_t *portp) |
722 | { |
723 | ipc_port_t port; |
724 | kern_return_t kr; |
725 | |
726 | #if MACH_ASSERT |
727 | uintptr_t buf[IP_CALLSTACK_MAX]; |
728 | ipc_port_callstack_init_debug(&buf[0], IP_CALLSTACK_MAX); |
729 | #endif /* MACH_ASSERT */ |
730 | |
731 | kr = ipc_object_alloc_name(space, IOT_PORT, |
732 | MACH_PORT_TYPE_RECEIVE, 0, |
733 | name, (ipc_object_t *) &port); |
734 | if (kr != KERN_SUCCESS) |
735 | return kr; |
736 | |
737 | /* port is locked */ |
738 | |
739 | ipc_port_init(port, space, name); |
740 | |
741 | #if MACH_ASSERT |
742 | ipc_port_init_debug(port, &buf[0], IP_CALLSTACK_MAX); |
743 | #endif /* MACH_ASSERT */ |
744 | |
745 | *portp = port; |
746 | |
747 | return KERN_SUCCESS; |
748 | } |
749 | |
750 | /* |
751 | * Routine: ipc_port_spnotify |
752 | * Purpose: |
753 | * Generate send-possible port notifications. |
754 | * Conditions: |
755 | * Nothing locked, reference held on port. |
756 | */ |
757 | void |
758 | ipc_port_spnotify( |
759 | ipc_port_t port) |
760 | { |
761 | ipc_port_request_index_t index = 0; |
762 | ipc_table_elems_t size = 0; |
763 | |
764 | /* |
765 | * If the port has no send-possible request |
766 | * armed, don't bother to lock the port. |
767 | */ |
768 | if (port->ip_sprequests == 0) |
769 | return; |
770 | |
771 | ip_lock(port); |
772 | |
773 | #if IMPORTANCE_INHERITANCE |
774 | if (port->ip_spimportant != 0) { |
775 | port->ip_spimportant = 0; |
776 | if (ipc_port_importance_delta(port, IPID_OPTION_NORMAL, -1) == TRUE) { |
777 | ip_lock(port); |
778 | } |
779 | } |
780 | #endif /* IMPORTANCE_INHERITANCE */ |
781 | |
782 | if (port->ip_sprequests == 0) { |
783 | ip_unlock(port); |
784 | return; |
785 | } |
786 | port->ip_sprequests = 0; |
787 | |
788 | revalidate: |
789 | if (ip_active(port)) { |
790 | ipc_port_request_t requests; |
791 | |
792 | /* table may change each time port unlocked (reload) */ |
793 | requests = port->ip_requests; |
794 | assert(requests != IPR_NULL); |
795 | |
796 | /* |
797 | * no need to go beyond table size when first |
798 | * we entered - those are future notifications. |
799 | */ |
800 | if (size == 0) |
801 | size = requests->ipr_size->its_size; |
802 | |
803 | /* no need to backtrack either */ |
804 | while (++index < size) { |
805 | ipc_port_request_t ipr = &requests[index]; |
806 | mach_port_name_t name = ipr->ipr_name; |
807 | ipc_port_t soright = IPR_SOR_PORT(ipr->ipr_soright); |
808 | boolean_t armed = IPR_SOR_SPARMED(ipr->ipr_soright); |
809 | |
810 | if (MACH_PORT_VALID(name) && armed && IP_VALID(soright)) { |
811 | /* claim send-once right - slot still inuse */ |
812 | ipr->ipr_soright = IP_NULL; |
813 | ip_unlock(port); |
814 | |
815 | ipc_notify_send_possible(soright, name); |
816 | |
817 | ip_lock(port); |
818 | goto revalidate; |
819 | } |
820 | } |
821 | } |
822 | ip_unlock(port); |
823 | return; |
824 | } |
825 | |
826 | /* |
827 | * Routine: ipc_port_dnnotify |
828 | * Purpose: |
829 | * Generate dead name notifications for |
830 | * all outstanding dead-name and send- |
831 | * possible requests. |
832 | * Conditions: |
833 | * Nothing locked. |
834 | * Port must be inactive. |
835 | * Reference held on port. |
836 | */ |
837 | void |
838 | ipc_port_dnnotify( |
839 | ipc_port_t port) |
840 | { |
841 | ipc_port_request_t requests = port->ip_requests; |
842 | |
843 | assert(!ip_active(port)); |
844 | if (requests != IPR_NULL) { |
845 | ipc_table_size_t its = requests->ipr_size; |
846 | ipc_table_elems_t size = its->its_size; |
847 | ipc_port_request_index_t index; |
848 | for (index = 1; index < size; index++) { |
849 | ipc_port_request_t ipr = &requests[index]; |
850 | mach_port_name_t name = ipr->ipr_name; |
851 | ipc_port_t soright = IPR_SOR_PORT(ipr->ipr_soright); |
852 | |
853 | if (MACH_PORT_VALID(name) && IP_VALID(soright)) { |
854 | ipc_notify_dead_name(soright, name); |
855 | } |
856 | } |
857 | } |
858 | } |
859 | |
860 | |
861 | /* |
862 | * Routine: ipc_port_destroy |
863 | * Purpose: |
864 | * Destroys a port. Cleans up queued messages. |
865 | * |
866 | * If the port has a backup, it doesn't get destroyed, |
867 | * but is sent in a port-destroyed notification to the backup. |
868 | * Conditions: |
869 | * The port is locked and alive; nothing else locked. |
870 | * The caller has a reference, which is consumed. |
871 | * Afterwards, the port is unlocked and dead. |
872 | */ |
873 | |
874 | void |
875 | ipc_port_destroy(ipc_port_t port) |
876 | { |
877 | ipc_port_t pdrequest, nsrequest; |
878 | ipc_mqueue_t mqueue; |
879 | ipc_kmsg_t kmsg; |
880 | boolean_t special_reply = port->ip_specialreply; |
881 | |
882 | #if IMPORTANCE_INHERITANCE |
883 | ipc_importance_task_t release_imp_task = IIT_NULL; |
884 | thread_t self = current_thread(); |
885 | boolean_t top = (self->ith_assertions == 0); |
886 | natural_t assertcnt = 0; |
887 | #endif /* IMPORTANCE_INHERITANCE */ |
888 | |
889 | assert(ip_active(port)); |
890 | /* port->ip_receiver_name is garbage */ |
891 | /* port->ip_receiver/port->ip_destination is garbage */ |
892 | |
893 | /* check for a backup port */ |
894 | pdrequest = port->ip_pdrequest; |
895 | |
896 | #if IMPORTANCE_INHERITANCE |
897 | /* determine how many assertions to drop and from whom */ |
898 | if (port->ip_tempowner != 0) { |
899 | assert(top); |
900 | release_imp_task = port->ip_imp_task; |
901 | if (IIT_NULL != release_imp_task) { |
902 | port->ip_imp_task = IIT_NULL; |
903 | assertcnt = port->ip_impcount; |
904 | } |
905 | /* Otherwise, nothing to drop */ |
906 | } else { |
907 | assertcnt = port->ip_impcount; |
908 | if (pdrequest != IP_NULL) |
909 | /* mark in limbo for the journey */ |
910 | port->ip_tempowner = 1; |
911 | } |
912 | |
913 | if (top) |
914 | self->ith_assertions = assertcnt; |
915 | #endif /* IMPORTANCE_INHERITANCE */ |
916 | |
917 | if (pdrequest != IP_NULL) { |
918 | /* clear receiver, don't destroy the port */ |
919 | (void)ipc_port_clear_receiver(port, FALSE); |
920 | assert(port->ip_in_pset == 0); |
921 | assert(port->ip_mscount == 0); |
922 | |
923 | /* we assume the ref for pdrequest */ |
924 | port->ip_pdrequest = IP_NULL; |
925 | |
926 | /* make port be in limbo */ |
927 | imq_lock(&port->ip_messages); |
928 | port->ip_receiver_name = MACH_PORT_NULL; |
929 | port->ip_destination = IP_NULL; |
930 | imq_unlock(&port->ip_messages); |
931 | ip_unlock(port); |
932 | |
933 | if (special_reply) { |
934 | ipc_port_adjust_special_reply_port(port, |
935 | IPC_PORT_ADJUST_SR_ALLOW_SYNC_LINKAGE, FALSE); |
936 | } |
937 | /* consumes our refs for port and pdrequest */ |
938 | ipc_notify_port_destroyed(pdrequest, port); |
939 | |
940 | goto drop_assertions; |
941 | } |
942 | |
943 | /* port active bit needs to be guarded under mqueue lock for turnstiles */ |
944 | imq_lock(&port->ip_messages); |
945 | port->ip_object.io_bits &= ~IO_BITS_ACTIVE; |
946 | port->ip_timestamp = ipc_port_timestamp(); |
947 | imq_unlock(&port->ip_messages); |
948 | nsrequest = port->ip_nsrequest; |
949 | |
950 | /* |
951 | * The mach_msg_* paths don't hold a port lock, they only hold a |
952 | * reference to the port object. If a thread raced us and is now |
953 | * blocked waiting for message reception on this mqueue (or waiting |
954 | * for ipc_mqueue_full), it will never be woken up. We call |
955 | * ipc_port_clear_receiver() here, _after_ the port has been marked |
956 | * inactive, to wakeup any threads which may be blocked and ensure |
957 | * that no other thread can get lost waiting for a wake up on a |
958 | * port/mqueue that's been destroyed. |
959 | */ |
960 | boolean_t reap_msgs = FALSE; |
961 | reap_msgs = ipc_port_clear_receiver(port, TRUE); /* marks mqueue inactive */ |
962 | assert(port->ip_in_pset == 0); |
963 | assert(port->ip_mscount == 0); |
964 | |
965 | /* |
966 | * If the port has a preallocated message buffer and that buffer |
967 | * is not inuse, free it. If it has an inuse one, then the kmsg |
968 | * free will detect that we freed the association and it can free it |
969 | * like a normal buffer. |
970 | * |
971 | * Once the port is marked inactive we don't need to keep it locked. |
972 | */ |
973 | if (IP_PREALLOC(port)) { |
974 | ipc_port_t inuse_port; |
975 | |
976 | kmsg = port->ip_premsg; |
977 | assert(kmsg != IKM_NULL); |
978 | inuse_port = ikm_prealloc_inuse_port(kmsg); |
979 | ipc_kmsg_clear_prealloc(kmsg, port); |
980 | ip_unlock(port); |
981 | if (inuse_port != IP_NULL) { |
982 | assert(inuse_port == port); |
983 | } else { |
984 | ipc_kmsg_free(kmsg); |
985 | } |
986 | } else { |
987 | ip_unlock(port); |
988 | } |
989 | |
990 | /* unlink the kmsg from special reply port */ |
991 | if (special_reply) { |
992 | ipc_port_adjust_special_reply_port(port, |
993 | IPC_PORT_ADJUST_SR_ALLOW_SYNC_LINKAGE, FALSE); |
994 | } |
995 | |
996 | /* throw away no-senders request */ |
997 | if (nsrequest != IP_NULL) |
998 | ipc_notify_send_once(nsrequest); /* consumes ref */ |
999 | |
1000 | /* |
1001 | * Reap any kmsg objects waiting to be destroyed. |
1002 | * This must be done after we've released the port lock. |
1003 | */ |
1004 | if (reap_msgs) |
1005 | ipc_kmsg_reap_delayed(); |
1006 | |
1007 | mqueue = &port->ip_messages; |
1008 | |
1009 | /* cleanup waitq related resources */ |
1010 | ipc_mqueue_deinit(mqueue); |
1011 | |
1012 | /* generate dead-name notifications */ |
1013 | ipc_port_dnnotify(port); |
1014 | |
1015 | ipc_kobject_destroy(port); |
1016 | |
1017 | ip_release(port); /* consume caller's ref */ |
1018 | |
1019 | drop_assertions: |
1020 | #if IMPORTANCE_INHERITANCE |
1021 | if (release_imp_task != IIT_NULL) { |
1022 | if (assertcnt > 0) { |
1023 | assert(top); |
1024 | self->ith_assertions = 0; |
1025 | assert(ipc_importance_task_is_any_receiver_type(release_imp_task)); |
1026 | ipc_importance_task_drop_internal_assertion(release_imp_task, assertcnt); |
1027 | } |
1028 | ipc_importance_task_release(release_imp_task); |
1029 | |
1030 | } else if (assertcnt > 0) { |
1031 | if (top) { |
1032 | self->ith_assertions = 0; |
1033 | release_imp_task = current_task()->task_imp_base; |
1034 | if (ipc_importance_task_is_any_receiver_type(release_imp_task)) { |
1035 | ipc_importance_task_drop_internal_assertion(release_imp_task, assertcnt); |
1036 | } |
1037 | } |
1038 | } |
1039 | #endif /* IMPORTANCE_INHERITANCE */ |
1040 | } |
1041 | |
1042 | /* |
1043 | * Routine: ipc_port_check_circularity |
1044 | * Purpose: |
1045 | * Check if queueing "port" in a message for "dest" |
1046 | * would create a circular group of ports and messages. |
1047 | * |
1048 | * If no circularity (FALSE returned), then "port" |
1049 | * is changed from "in limbo" to "in transit". |
1050 | * |
1051 | * That is, we want to set port->ip_destination == dest, |
1052 | * but guaranteeing that this doesn't create a circle |
1053 | * port->ip_destination->ip_destination->... == port |
1054 | * |
1055 | * Conditions: |
1056 | * No ports locked. References held for "port" and "dest". |
1057 | */ |
1058 | |
1059 | boolean_t |
1060 | ipc_port_check_circularity( |
1061 | ipc_port_t port, |
1062 | ipc_port_t dest) |
1063 | { |
1064 | #if IMPORTANCE_INHERITANCE |
1065 | /* adjust importance counts at the same time */ |
1066 | return ipc_importance_check_circularity(port, dest); |
1067 | #else |
1068 | ipc_port_t base; |
1069 | |
1070 | assert(port != IP_NULL); |
1071 | assert(dest != IP_NULL); |
1072 | |
1073 | if (port == dest) |
1074 | return TRUE; |
1075 | base = dest; |
1076 | |
1077 | /* Check if destination needs a turnstile */ |
1078 | ipc_port_send_turnstile_prepare(dest); |
1079 | |
1080 | /* |
1081 | * First try a quick check that can run in parallel. |
1082 | * No circularity if dest is not in transit. |
1083 | */ |
1084 | ip_lock(port); |
1085 | if (ip_lock_try(dest)) { |
1086 | if (!ip_active(dest) || |
1087 | (dest->ip_receiver_name != MACH_PORT_NULL) || |
1088 | (dest->ip_destination == IP_NULL)) |
1089 | goto not_circular; |
1090 | |
1091 | /* dest is in transit; further checking necessary */ |
1092 | |
1093 | ip_unlock(dest); |
1094 | } |
1095 | ip_unlock(port); |
1096 | |
1097 | ipc_port_multiple_lock(); /* massive serialization */ |
1098 | |
1099 | /* |
1100 | * Search for the end of the chain (a port not in transit), |
1101 | * acquiring locks along the way. |
1102 | */ |
1103 | |
1104 | for (;;) { |
1105 | ip_lock(base); |
1106 | |
1107 | if (!ip_active(base) || |
1108 | (base->ip_receiver_name != MACH_PORT_NULL) || |
1109 | (base->ip_destination == IP_NULL)) |
1110 | break; |
1111 | |
1112 | base = base->ip_destination; |
1113 | } |
1114 | |
1115 | /* all ports in chain from dest to base, inclusive, are locked */ |
1116 | |
1117 | if (port == base) { |
1118 | /* circularity detected! */ |
1119 | |
1120 | ipc_port_multiple_unlock(); |
1121 | |
1122 | /* port (== base) is in limbo */ |
1123 | |
1124 | assert(ip_active(port)); |
1125 | assert(port->ip_receiver_name == MACH_PORT_NULL); |
1126 | assert(port->ip_destination == IP_NULL); |
1127 | |
1128 | base = dest; |
1129 | while (base != IP_NULL) { |
1130 | ipc_port_t next; |
1131 | |
1132 | /* dest is in transit or in limbo */ |
1133 | |
1134 | assert(ip_active(base)); |
1135 | assert(base->ip_receiver_name == MACH_PORT_NULL); |
1136 | |
1137 | next = base->ip_destination; |
1138 | ip_unlock(base); |
1139 | base = next; |
1140 | } |
1141 | |
1142 | ipc_port_send_turnstile_complete(dest); |
1143 | return TRUE; |
1144 | } |
1145 | |
1146 | /* |
1147 | * The guarantee: lock port while the entire chain is locked. |
1148 | * Once port is locked, we can take a reference to dest, |
1149 | * add port to the chain, and unlock everything. |
1150 | */ |
1151 | |
1152 | ip_lock(port); |
1153 | ipc_port_multiple_unlock(); |
1154 | |
1155 | not_circular: |
1156 | imq_lock(&port->ip_messages); |
1157 | |
1158 | /* port is in limbo */ |
1159 | |
1160 | assert(ip_active(port)); |
1161 | assert(port->ip_receiver_name == MACH_PORT_NULL); |
1162 | assert(port->ip_destination == IP_NULL); |
1163 | |
1164 | ip_reference(dest); |
1165 | port->ip_destination = dest; |
1166 | |
1167 | /* Setup linkage for source port if it has sync ipc push */ |
1168 | struct turnstile *send_turnstile = TURNSTILE_NULL; |
1169 | if (port_send_turnstile(port)) { |
1170 | send_turnstile = turnstile_prepare((uintptr_t)port, |
1171 | port_send_turnstile_address(port), |
1172 | TURNSTILE_NULL, TURNSTILE_SYNC_IPC); |
1173 | |
1174 | turnstile_update_inheritor(send_turnstile, port_send_turnstile(dest), |
1175 | (TURNSTILE_INHERITOR_TURNSTILE | TURNSTILE_IMMEDIATE_UPDATE)); |
1176 | |
1177 | /* update complete and turnstile complete called after dropping all locks */ |
1178 | } |
1179 | imq_unlock(&port->ip_messages); |
1180 | |
1181 | /* now unlock chain */ |
1182 | |
1183 | ip_unlock(port); |
1184 | |
1185 | for (;;) { |
1186 | ipc_port_t next; |
1187 | |
1188 | if (dest == base) |
1189 | break; |
1190 | |
1191 | /* port is in transit */ |
1192 | |
1193 | assert(ip_active(dest)); |
1194 | assert(dest->ip_receiver_name == MACH_PORT_NULL); |
1195 | assert(dest->ip_destination != IP_NULL); |
1196 | |
1197 | next = dest->ip_destination; |
1198 | ip_unlock(dest); |
1199 | dest = next; |
1200 | } |
1201 | |
1202 | /* base is not in transit */ |
1203 | assert(!ip_active(base) || |
1204 | (base->ip_receiver_name != MACH_PORT_NULL) || |
1205 | (base->ip_destination == IP_NULL)); |
1206 | |
1207 | ip_unlock(base); |
1208 | |
1209 | /* All locks dropped, call turnstile_update_inheritor_complete for source port's turnstile */ |
1210 | if (send_turnstile) { |
1211 | turnstile_update_inheritor_complete(send_turnstile, TURNSTILE_INTERLOCK_NOT_HELD); |
1212 | |
1213 | /* Take the mq lock to call turnstile complete */ |
1214 | imq_lock(&port->ip_messages); |
1215 | turnstile_complete((uintptr_t)port, port_send_turnstile_address(port), NULL); |
1216 | send_turnstile = TURNSTILE_NULL; |
1217 | imq_unlock(&port->ip_messages); |
1218 | turnstile_cleanup(); |
1219 | } |
1220 | |
1221 | return FALSE; |
1222 | #endif /* !IMPORTANCE_INHERITANCE */ |
1223 | } |
1224 | |
1225 | struct turnstile * |
1226 | ipc_port_get_inheritor(ipc_port_t port) |
1227 | { |
1228 | ipc_mqueue_t mqueue = &port->ip_messages; |
1229 | struct knote *kn; |
1230 | |
1231 | assert(imq_held(mqueue)); |
1232 | |
1233 | if (!IMQ_KLIST_VALID(mqueue)) { |
1234 | return IMQ_INHERITOR(mqueue); |
1235 | } |
1236 | |
1237 | SLIST_FOREACH(kn, &port->ip_messages.imq_klist, kn_selnext) { |
1238 | if ((kn->kn_sfflags & MACH_RCV_MSG) && (kn->kn_status & KN_DISPATCH)) { |
1239 | return filt_machport_kqueue_turnstile(kn); |
1240 | } |
1241 | } |
1242 | |
1243 | return TURNSTILE_NULL; |
1244 | } |
1245 | |
1246 | /* |
1247 | * Routine: ipc_port_send_turnstile_prepare |
1248 | * Purpose: |
1249 | * Get a reference on port's send turnstile, if |
1250 | * port does not have a send turnstile then allocate one. |
1251 | * |
1252 | * Conditions: |
1253 | * Nothing is locked. |
1254 | */ |
1255 | void |
1256 | ipc_port_send_turnstile_prepare(ipc_port_t port) |
1257 | { |
1258 | struct turnstile *turnstile = TURNSTILE_NULL; |
1259 | struct turnstile *inheritor = TURNSTILE_NULL; |
1260 | struct turnstile *send_turnstile = TURNSTILE_NULL; |
1261 | |
1262 | retry_alloc: |
1263 | imq_lock(&port->ip_messages); |
1264 | |
1265 | if (port_send_turnstile(port) == NULL || |
1266 | port_send_turnstile(port)->ts_port_ref == 0) { |
1267 | |
1268 | if (turnstile == TURNSTILE_NULL) { |
1269 | imq_unlock(&port->ip_messages); |
1270 | turnstile = turnstile_alloc(); |
1271 | goto retry_alloc; |
1272 | } |
1273 | |
1274 | send_turnstile = turnstile_prepare((uintptr_t)port, |
1275 | port_send_turnstile_address(port), |
1276 | turnstile, TURNSTILE_SYNC_IPC); |
1277 | turnstile = TURNSTILE_NULL; |
1278 | |
1279 | /* |
1280 | * if port in transit, setup linkage for its turnstile, |
1281 | * otherwise the link it to WL turnstile. |
1282 | */ |
1283 | if (ip_active(port) && |
1284 | port->ip_receiver_name == MACH_PORT_NULL && |
1285 | port->ip_destination != IP_NULL) { |
1286 | assert(port->ip_receiver_name == MACH_PORT_NULL); |
1287 | assert(port->ip_destination != IP_NULL); |
1288 | |
1289 | inheritor = port_send_turnstile(port->ip_destination); |
1290 | } else { |
1291 | inheritor = ipc_port_get_inheritor(port); |
1292 | } |
1293 | turnstile_update_inheritor(send_turnstile, inheritor, |
1294 | TURNSTILE_INHERITOR_TURNSTILE | TURNSTILE_IMMEDIATE_UPDATE); |
1295 | /* turnstile complete will be called in ipc_port_send_turnstile_complete */ |
1296 | } |
1297 | |
1298 | /* Increment turnstile counter */ |
1299 | port_send_turnstile(port)->ts_port_ref++; |
1300 | imq_unlock(&port->ip_messages); |
1301 | |
1302 | if (send_turnstile) { |
1303 | turnstile_update_inheritor_complete(send_turnstile, |
1304 | TURNSTILE_INTERLOCK_NOT_HELD); |
1305 | } |
1306 | if (turnstile != TURNSTILE_NULL) { |
1307 | turnstile_deallocate(turnstile); |
1308 | } |
1309 | } |
1310 | |
1311 | |
1312 | /* |
1313 | * Routine: ipc_port_send_turnstile_complete |
1314 | * Purpose: |
1315 | * Drop a ref on the port's send turnstile, if the |
1316 | * ref becomes zero, deallocate the turnstile. |
1317 | * |
1318 | * Conditions: |
1319 | * The space might be locked, use safe deallocate. |
1320 | */ |
1321 | void |
1322 | ipc_port_send_turnstile_complete(ipc_port_t port) |
1323 | { |
1324 | struct turnstile *turnstile = TURNSTILE_NULL; |
1325 | |
1326 | /* Drop turnstile count on dest port */ |
1327 | imq_lock(&port->ip_messages); |
1328 | |
1329 | port_send_turnstile(port)->ts_port_ref--; |
1330 | if (port_send_turnstile(port)->ts_port_ref == 0) { |
1331 | turnstile_complete((uintptr_t)port, port_send_turnstile_address(port), |
1332 | &turnstile); |
1333 | assert(turnstile != TURNSTILE_NULL); |
1334 | } |
1335 | imq_unlock(&port->ip_messages); |
1336 | turnstile_cleanup(); |
1337 | |
1338 | if (turnstile != TURNSTILE_NULL) { |
1339 | turnstile_deallocate_safe(turnstile); |
1340 | turnstile = TURNSTILE_NULL; |
1341 | } |
1342 | } |
1343 | |
1344 | |
1345 | /* |
1346 | * Routine: ipc_port_rcv_turnstile_waitq |
1347 | * Purpose: |
1348 | * Given the mqueue's waitq, find the port's |
1349 | * rcv turnstile and return its waitq. |
1350 | * |
1351 | * Conditions: |
1352 | * mqueue locked or thread waiting on turnstile is locked. |
1353 | */ |
1354 | struct waitq * |
1355 | ipc_port_rcv_turnstile_waitq(struct waitq *waitq) |
1356 | { |
1357 | struct waitq *safeq; |
1358 | |
1359 | ipc_mqueue_t mqueue = imq_from_waitq(waitq); |
1360 | ipc_port_t port = ip_from_mq(mqueue); |
1361 | struct turnstile *rcv_turnstile = ipc_port_rcv_turnstile(port); |
1362 | |
1363 | /* Check if the port has a rcv turnstile */ |
1364 | if (rcv_turnstile != TURNSTILE_NULL) { |
1365 | safeq = &rcv_turnstile->ts_waitq; |
1366 | } else { |
1367 | safeq = global_eventq(waitq); |
1368 | } |
1369 | return safeq; |
1370 | } |
1371 | |
1372 | |
1373 | /* |
1374 | * Routine: ipc_port_rcv_turnstile |
1375 | * Purpose: |
1376 | * Get the port's receive turnstile |
1377 | * |
1378 | * Conditions: |
1379 | * mqueue locked or thread waiting on turnstile is locked. |
1380 | */ |
1381 | struct turnstile * |
1382 | ipc_port_rcv_turnstile(ipc_port_t port) |
1383 | { |
1384 | return turnstile_lookup_by_proprietor((uintptr_t)port); |
1385 | } |
1386 | |
1387 | |
1388 | /* |
1389 | * Routine: ipc_port_link_special_reply_port |
1390 | * Purpose: |
1391 | * Link the special reply port with the destination port. |
1392 | * Allocates turnstile to dest port. |
1393 | * |
1394 | * Conditions: |
1395 | * Nothing is locked. |
1396 | */ |
1397 | void |
1398 | ipc_port_link_special_reply_port( |
1399 | ipc_port_t special_reply_port, |
1400 | ipc_port_t dest_port) |
1401 | { |
1402 | boolean_t drop_turnstile_ref = FALSE; |
1403 | |
1404 | /* Check if dest_port needs a turnstile */ |
1405 | ipc_port_send_turnstile_prepare(dest_port); |
1406 | |
1407 | /* Lock the special reply port and establish the linkage */ |
1408 | ip_lock(special_reply_port); |
1409 | imq_lock(&special_reply_port->ip_messages); |
1410 | |
1411 | /* Check if we need to drop the acquired turnstile ref on dest port */ |
1412 | if (!special_reply_port->ip_specialreply || |
1413 | special_reply_port->ip_sync_link_state != PORT_SYNC_LINK_ANY || |
1414 | special_reply_port->ip_sync_inheritor_port != IPC_PORT_NULL) { |
1415 | drop_turnstile_ref = TRUE; |
1416 | } else { |
1417 | /* take a reference on dest_port */ |
1418 | ip_reference(dest_port); |
1419 | special_reply_port->ip_sync_inheritor_port = dest_port; |
1420 | special_reply_port->ip_sync_link_state = PORT_SYNC_LINK_PORT; |
1421 | } |
1422 | |
1423 | imq_unlock(&special_reply_port->ip_messages); |
1424 | ip_unlock(special_reply_port); |
1425 | |
1426 | if (drop_turnstile_ref) { |
1427 | ipc_port_send_turnstile_complete(dest_port); |
1428 | } |
1429 | |
1430 | return; |
1431 | } |
1432 | |
1433 | #if DEVELOPMENT || DEBUG |
1434 | inline void |
1435 | reset_ip_srp_bits(ipc_port_t special_reply_port) |
1436 | { |
1437 | special_reply_port->ip_srp_lost_link = 0; |
1438 | special_reply_port->ip_srp_msg_sent = 0; |
1439 | } |
1440 | |
1441 | inline void |
1442 | reset_ip_srp_msg_sent(ipc_port_t special_reply_port) |
1443 | { |
1444 | if (special_reply_port->ip_specialreply == 1) { |
1445 | special_reply_port->ip_srp_msg_sent = 0; |
1446 | } |
1447 | } |
1448 | |
1449 | inline void |
1450 | set_ip_srp_msg_sent(ipc_port_t special_reply_port) |
1451 | { |
1452 | if (special_reply_port->ip_specialreply == 1) { |
1453 | special_reply_port->ip_srp_msg_sent = 1; |
1454 | } |
1455 | } |
1456 | |
1457 | inline void |
1458 | set_ip_srp_lost_link(ipc_port_t special_reply_port) |
1459 | { |
1460 | if (special_reply_port->ip_specialreply == 1 && special_reply_port->ip_srp_msg_sent == 0) { |
1461 | special_reply_port->ip_srp_lost_link = 1; |
1462 | } |
1463 | } |
1464 | |
1465 | #else /* DEVELOPMENT || DEBUG */ |
1466 | inline void |
1467 | reset_ip_srp_bits(__unused ipc_port_t special_reply_port) |
1468 | { |
1469 | return; |
1470 | } |
1471 | |
1472 | inline void |
1473 | reset_ip_srp_msg_sent(__unused ipc_port_t special_reply_port) |
1474 | { |
1475 | return; |
1476 | } |
1477 | |
1478 | inline void |
1479 | set_ip_srp_msg_sent(__unused ipc_port_t special_reply_port) |
1480 | { |
1481 | return; |
1482 | } |
1483 | |
1484 | inline void |
1485 | set_ip_srp_lost_link(__unused ipc_port_t special_reply_port) |
1486 | { |
1487 | return; |
1488 | } |
1489 | #endif /* DEVELOPMENT || DEBUG */ |
1490 | |
1491 | /* |
1492 | * Routine: ipc_port_adjust_special_reply_port_locked |
1493 | * Purpose: |
1494 | * If the special port has a turnstile, update it's inheritor. |
1495 | * Condition: |
1496 | * Special reply port locked on entry. |
1497 | * Special reply port unlocked on return. |
1498 | * Returns: |
1499 | * None. |
1500 | */ |
1501 | void |
1502 | ipc_port_adjust_special_reply_port_locked( |
1503 | ipc_port_t special_reply_port, |
1504 | struct knote *kn, |
1505 | uint8_t flags, |
1506 | boolean_t get_turnstile) |
1507 | { |
1508 | ipc_port_t dest_port = IPC_PORT_NULL; |
1509 | int sync_link_state = PORT_SYNC_LINK_NO_LINKAGE; |
1510 | turnstile_inheritor_t inheritor = TURNSTILE_INHERITOR_NULL; |
1511 | struct turnstile *dest_ts = TURNSTILE_NULL, *ts = TURNSTILE_NULL; |
1512 | |
1513 | imq_lock(&special_reply_port->ip_messages); |
1514 | |
1515 | if (flags & IPC_PORT_ADJUST_SR_RECEIVED_MSG) { |
1516 | reset_ip_srp_msg_sent(special_reply_port); |
1517 | } |
1518 | |
1519 | /* Check if the special reply port is marked non-special */ |
1520 | if (special_reply_port->ip_specialreply == 0 || |
1521 | special_reply_port->ip_sync_link_state == PORT_SYNC_LINK_ANY) { |
1522 | if (get_turnstile) { |
1523 | turnstile_complete((uintptr_t)special_reply_port, |
1524 | port_rcv_turnstile_address(special_reply_port), |
1525 | NULL); |
1526 | } |
1527 | imq_unlock(&special_reply_port->ip_messages); |
1528 | ip_unlock(special_reply_port); |
1529 | if (get_turnstile) { |
1530 | turnstile_cleanup(); |
1531 | } |
1532 | return; |
1533 | } |
1534 | |
1535 | /* Clear thread's special reply port and clear linkage */ |
1536 | if (flags & IPC_PORT_ADJUST_SR_CLEAR_SPECIAL_REPLY) { |
1537 | /* This option should only be specified by a non blocking thread */ |
1538 | assert(get_turnstile == FALSE); |
1539 | special_reply_port->ip_specialreply = 0; |
1540 | |
1541 | reset_ip_srp_bits(special_reply_port); |
1542 | |
1543 | /* Check if need to break linkage */ |
1544 | if (special_reply_port->ip_sync_link_state == PORT_SYNC_LINK_NO_LINKAGE) { |
1545 | imq_unlock(&special_reply_port->ip_messages); |
1546 | ip_unlock(special_reply_port); |
1547 | return; |
1548 | } |
1549 | } else if (flags & IPC_PORT_ADJUST_SR_LINK_WORKLOOP) { |
1550 | if (special_reply_port->ip_sync_link_state == PORT_SYNC_LINK_ANY || |
1551 | special_reply_port->ip_sync_link_state == PORT_SYNC_LINK_PORT) { |
1552 | if (ITH_KNOTE_VALID(kn, MACH_MSG_TYPE_PORT_SEND_ONCE)) { |
1553 | inheritor = filt_machport_stash_port(kn, special_reply_port, |
1554 | &sync_link_state); |
1555 | } |
1556 | } |
1557 | } else if (flags & IPC_PORT_ADJUST_SR_ALLOW_SYNC_LINKAGE) { |
1558 | sync_link_state = PORT_SYNC_LINK_ANY; |
1559 | } |
1560 | |
1561 | switch (special_reply_port->ip_sync_link_state) { |
1562 | case PORT_SYNC_LINK_PORT: |
1563 | dest_port = special_reply_port->ip_sync_inheritor_port; |
1564 | special_reply_port->ip_sync_inheritor_port = IPC_PORT_NULL; |
1565 | break; |
1566 | case PORT_SYNC_LINK_WORKLOOP_KNOTE: |
1567 | special_reply_port->ip_sync_inheritor_knote = NULL; |
1568 | break; |
1569 | case PORT_SYNC_LINK_WORKLOOP_STASH: |
1570 | dest_ts = special_reply_port->ip_sync_inheritor_ts; |
1571 | special_reply_port->ip_sync_inheritor_ts = NULL; |
1572 | break; |
1573 | } |
1574 | |
1575 | special_reply_port->ip_sync_link_state = sync_link_state; |
1576 | |
1577 | switch (sync_link_state) { |
1578 | case PORT_SYNC_LINK_WORKLOOP_KNOTE: |
1579 | special_reply_port->ip_sync_inheritor_knote = kn; |
1580 | break; |
1581 | case PORT_SYNC_LINK_WORKLOOP_STASH: |
1582 | turnstile_reference(inheritor); |
1583 | special_reply_port->ip_sync_inheritor_ts = inheritor; |
1584 | break; |
1585 | case PORT_SYNC_LINK_NO_LINKAGE: |
1586 | if (flags & IPC_PORT_ADJUST_SR_ENABLE_EVENT) { |
1587 | set_ip_srp_lost_link(special_reply_port); |
1588 | } |
1589 | break; |
1590 | } |
1591 | |
1592 | /* Get thread's turnstile donated to special reply port */ |
1593 | if (get_turnstile) { |
1594 | turnstile_complete((uintptr_t)special_reply_port, |
1595 | port_rcv_turnstile_address(special_reply_port), |
1596 | NULL); |
1597 | } else { |
1598 | ts = ipc_port_rcv_turnstile(special_reply_port); |
1599 | if (ts) { |
1600 | turnstile_reference(ts); |
1601 | turnstile_update_inheritor(ts, inheritor, |
1602 | (TURNSTILE_INHERITOR_TURNSTILE | TURNSTILE_IMMEDIATE_UPDATE)); |
1603 | } |
1604 | } |
1605 | |
1606 | imq_unlock(&special_reply_port->ip_messages); |
1607 | ip_unlock(special_reply_port); |
1608 | |
1609 | if (get_turnstile) { |
1610 | turnstile_cleanup(); |
1611 | } else if (ts) { |
1612 | /* Call turnstile cleanup after dropping the interlock */ |
1613 | turnstile_update_inheritor_complete(ts, TURNSTILE_INTERLOCK_NOT_HELD); |
1614 | turnstile_deallocate_safe(ts); |
1615 | } |
1616 | |
1617 | /* Release the ref on the dest port and it's turnstile */ |
1618 | if (dest_port) { |
1619 | ipc_port_send_turnstile_complete(dest_port); |
1620 | /* release the reference on the dest port */ |
1621 | ip_release(dest_port); |
1622 | } |
1623 | |
1624 | if (dest_ts) { |
1625 | turnstile_deallocate_safe(dest_ts); |
1626 | } |
1627 | } |
1628 | |
1629 | /* |
1630 | * Routine: ipc_port_adjust_special_reply_port |
1631 | * Purpose: |
1632 | * If the special port has a turnstile, update it's inheritor. |
1633 | * Condition: |
1634 | * Nothing locked. |
1635 | * Returns: |
1636 | * None. |
1637 | */ |
1638 | void |
1639 | ipc_port_adjust_special_reply_port( |
1640 | ipc_port_t special_reply_port, |
1641 | uint8_t flags, |
1642 | boolean_t get_turnstile) |
1643 | { |
1644 | ip_lock(special_reply_port); |
1645 | ipc_port_adjust_special_reply_port_locked(special_reply_port, NULL, flags, get_turnstile); |
1646 | /* special_reply_port unlocked */ |
1647 | } |
1648 | |
1649 | /* |
1650 | * Routine: ipc_port_get_special_reply_port_inheritor |
1651 | * Purpose: |
1652 | * Returns the current inheritor of the special reply port |
1653 | * Condition: |
1654 | * mqueue is locked, port is a special reply port |
1655 | * Returns: |
1656 | * the current inheritor |
1657 | */ |
1658 | turnstile_inheritor_t |
1659 | ipc_port_get_special_reply_port_inheritor( |
1660 | ipc_port_t port) |
1661 | { |
1662 | assert(port->ip_specialreply); |
1663 | imq_held(&port->ip_messages); |
1664 | |
1665 | switch (port->ip_sync_link_state) { |
1666 | case PORT_SYNC_LINK_PORT: |
1667 | if (port->ip_sync_inheritor_port != NULL) { |
1668 | return port_send_turnstile(port->ip_sync_inheritor_port); |
1669 | } |
1670 | break; |
1671 | case PORT_SYNC_LINK_WORKLOOP_KNOTE: |
1672 | return filt_machport_stashed_special_reply_port_turnstile(port); |
1673 | case PORT_SYNC_LINK_WORKLOOP_STASH: |
1674 | return port->ip_sync_inheritor_ts; |
1675 | } |
1676 | return TURNSTILE_INHERITOR_NULL; |
1677 | } |
1678 | |
1679 | /* |
1680 | * Routine: ipc_port_impcount_delta |
1681 | * Purpose: |
1682 | * Adjust only the importance count associated with a port. |
1683 | * If there are any adjustments to be made to receiver task, |
1684 | * those are handled elsewhere. |
1685 | * |
1686 | * For now, be defensive during deductions to make sure the |
1687 | * impcount for the port doesn't underflow zero. This will |
1688 | * go away when the port boost addition is made atomic (see |
1689 | * note in ipc_port_importance_delta()). |
1690 | * Conditions: |
1691 | * The port is referenced and locked. |
1692 | * Nothing else is locked. |
1693 | */ |
1694 | mach_port_delta_t |
1695 | ipc_port_impcount_delta( |
1696 | ipc_port_t port, |
1697 | mach_port_delta_t delta, |
1698 | ipc_port_t __unused base) |
1699 | { |
1700 | mach_port_delta_t absdelta; |
1701 | |
1702 | if (!ip_active(port)) { |
1703 | return 0; |
1704 | } |
1705 | |
1706 | /* adding/doing nothing is easy */ |
1707 | if (delta >= 0) { |
1708 | port->ip_impcount += delta; |
1709 | return delta; |
1710 | } |
1711 | |
1712 | absdelta = 0 - delta; |
1713 | if (port->ip_impcount >= absdelta) { |
1714 | port->ip_impcount -= absdelta; |
1715 | return delta; |
1716 | } |
1717 | |
1718 | #if (DEVELOPMENT || DEBUG) |
1719 | if (port->ip_receiver_name != MACH_PORT_NULL) { |
1720 | task_t target_task = port->ip_receiver->is_task; |
1721 | ipc_importance_task_t target_imp = target_task->task_imp_base; |
1722 | const char *target_procname; |
1723 | int target_pid; |
1724 | |
1725 | if (target_imp != IIT_NULL) { |
1726 | target_procname = target_imp->iit_procname; |
1727 | target_pid = target_imp->iit_bsd_pid; |
1728 | } else { |
1729 | target_procname = "unknown" ; |
1730 | target_pid = -1; |
1731 | } |
1732 | printf("Over-release of importance assertions for port 0x%x receiver pid %d (%s), " |
1733 | "dropping %d assertion(s) but port only has %d remaining.\n" , |
1734 | port->ip_receiver_name, |
1735 | target_pid, target_procname, |
1736 | absdelta, port->ip_impcount); |
1737 | |
1738 | } else if (base != IP_NULL) { |
1739 | task_t target_task = base->ip_receiver->is_task; |
1740 | ipc_importance_task_t target_imp = target_task->task_imp_base; |
1741 | const char *target_procname; |
1742 | int target_pid; |
1743 | |
1744 | if (target_imp != IIT_NULL) { |
1745 | target_procname = target_imp->iit_procname; |
1746 | target_pid = target_imp->iit_bsd_pid; |
1747 | } else { |
1748 | target_procname = "unknown" ; |
1749 | target_pid = -1; |
1750 | } |
1751 | printf("Over-release of importance assertions for port 0x%lx " |
1752 | "enqueued on port 0x%x with receiver pid %d (%s), " |
1753 | "dropping %d assertion(s) but port only has %d remaining.\n" , |
1754 | (unsigned long)VM_KERNEL_UNSLIDE_OR_PERM((uintptr_t)port), |
1755 | base->ip_receiver_name, |
1756 | target_pid, target_procname, |
1757 | absdelta, port->ip_impcount); |
1758 | } |
1759 | #endif |
1760 | |
1761 | delta = 0 - port->ip_impcount; |
1762 | port->ip_impcount = 0; |
1763 | return delta; |
1764 | } |
1765 | |
1766 | /* |
1767 | * Routine: ipc_port_importance_delta_internal |
1768 | * Purpose: |
1769 | * Adjust the importance count through the given port. |
1770 | * If the port is in transit, apply the delta throughout |
1771 | * the chain. Determine if the there is a task at the |
1772 | * base of the chain that wants/needs to be adjusted, |
1773 | * and if so, apply the delta. |
1774 | * Conditions: |
1775 | * The port is referenced and locked on entry. |
1776 | * Importance may be locked. |
1777 | * Nothing else is locked. |
1778 | * The lock may be dropped on exit. |
1779 | * Returns TRUE if lock was dropped. |
1780 | */ |
1781 | #if IMPORTANCE_INHERITANCE |
1782 | |
1783 | boolean_t |
1784 | ipc_port_importance_delta_internal( |
1785 | ipc_port_t port, |
1786 | natural_t options, |
1787 | mach_port_delta_t *deltap, |
1788 | ipc_importance_task_t *imp_task) |
1789 | { |
1790 | ipc_port_t next, base; |
1791 | boolean_t dropped = FALSE; |
1792 | |
1793 | *imp_task = IIT_NULL; |
1794 | |
1795 | if (*deltap == 0) |
1796 | return FALSE; |
1797 | |
1798 | assert(options == IPID_OPTION_NORMAL || options == IPID_OPTION_SENDPOSSIBLE); |
1799 | |
1800 | base = port; |
1801 | |
1802 | /* if port is in transit, have to search for end of chain */ |
1803 | if (ip_active(port) && |
1804 | port->ip_destination != IP_NULL && |
1805 | port->ip_receiver_name == MACH_PORT_NULL) { |
1806 | |
1807 | dropped = TRUE; |
1808 | |
1809 | ip_unlock(port); |
1810 | ipc_port_multiple_lock(); /* massive serialization */ |
1811 | ip_lock(base); |
1812 | |
1813 | while(ip_active(base) && |
1814 | base->ip_destination != IP_NULL && |
1815 | base->ip_receiver_name == MACH_PORT_NULL) { |
1816 | |
1817 | base = base->ip_destination; |
1818 | ip_lock(base); |
1819 | } |
1820 | ipc_port_multiple_unlock(); |
1821 | } |
1822 | |
1823 | /* |
1824 | * If the port lock is dropped b/c the port is in transit, there is a |
1825 | * race window where another thread can drain messages and/or fire a |
1826 | * send possible notification before we get here. |
1827 | * |
1828 | * We solve this race by checking to see if our caller armed the send |
1829 | * possible notification, whether or not it's been fired yet, and |
1830 | * whether or not we've already set the port's ip_spimportant bit. If |
1831 | * we don't need a send-possible boost, then we'll just apply a |
1832 | * harmless 0-boost to the port. |
1833 | */ |
1834 | if (options & IPID_OPTION_SENDPOSSIBLE) { |
1835 | assert(*deltap == 1); |
1836 | if (port->ip_sprequests && port->ip_spimportant == 0) |
1837 | port->ip_spimportant = 1; |
1838 | else |
1839 | *deltap = 0; |
1840 | } |
1841 | |
1842 | /* unlock down to the base, adjusting boost(s) at each level */ |
1843 | for (;;) { |
1844 | *deltap = ipc_port_impcount_delta(port, *deltap, base); |
1845 | |
1846 | if (port == base) { |
1847 | break; |
1848 | } |
1849 | |
1850 | /* port is in transit */ |
1851 | assert(port->ip_tempowner == 0); |
1852 | next = port->ip_destination; |
1853 | ip_unlock(port); |
1854 | port = next; |
1855 | } |
1856 | |
1857 | /* find the task (if any) to boost according to the base */ |
1858 | if (ip_active(base)) { |
1859 | if (base->ip_tempowner != 0) { |
1860 | if (IIT_NULL != base->ip_imp_task) |
1861 | *imp_task = base->ip_imp_task; |
1862 | /* otherwise don't boost */ |
1863 | |
1864 | } else if (base->ip_receiver_name != MACH_PORT_NULL) { |
1865 | ipc_space_t space = base->ip_receiver; |
1866 | |
1867 | /* only spaces with boost-accepting tasks */ |
1868 | if (space->is_task != TASK_NULL && |
1869 | ipc_importance_task_is_any_receiver_type(space->is_task->task_imp_base)) { |
1870 | *imp_task = space->is_task->task_imp_base; |
1871 | } |
1872 | } |
1873 | } |
1874 | |
1875 | /* |
1876 | * Only the base is locked. If we have to hold or drop task |
1877 | * importance assertions, we'll have to drop that lock as well. |
1878 | */ |
1879 | if (*imp_task != IIT_NULL) { |
1880 | /* take a reference before unlocking base */ |
1881 | ipc_importance_task_reference(*imp_task); |
1882 | } |
1883 | |
1884 | if (dropped == TRUE) { |
1885 | ip_unlock(base); |
1886 | } |
1887 | |
1888 | return dropped; |
1889 | } |
1890 | #endif /* IMPORTANCE_INHERITANCE */ |
1891 | |
1892 | /* |
1893 | * Routine: ipc_port_importance_delta |
1894 | * Purpose: |
1895 | * Adjust the importance count through the given port. |
1896 | * If the port is in transit, apply the delta throughout |
1897 | * the chain. |
1898 | * |
1899 | * If there is a task at the base of the chain that wants/needs |
1900 | * to be adjusted, apply the delta. |
1901 | * Conditions: |
1902 | * The port is referenced and locked on entry. |
1903 | * Nothing else is locked. |
1904 | * The lock may be dropped on exit. |
1905 | * Returns TRUE if lock was dropped. |
1906 | */ |
1907 | #if IMPORTANCE_INHERITANCE |
1908 | |
1909 | boolean_t |
1910 | ipc_port_importance_delta( |
1911 | ipc_port_t port, |
1912 | natural_t options, |
1913 | mach_port_delta_t delta) |
1914 | { |
1915 | ipc_importance_task_t imp_task = IIT_NULL; |
1916 | boolean_t dropped; |
1917 | |
1918 | dropped = ipc_port_importance_delta_internal(port, options, &delta, &imp_task); |
1919 | |
1920 | if (IIT_NULL == imp_task || delta == 0) |
1921 | return dropped; |
1922 | |
1923 | if (!dropped) |
1924 | ip_unlock(port); |
1925 | |
1926 | assert(ipc_importance_task_is_any_receiver_type(imp_task)); |
1927 | |
1928 | if (delta > 0) |
1929 | ipc_importance_task_hold_internal_assertion(imp_task, delta); |
1930 | else |
1931 | ipc_importance_task_drop_internal_assertion(imp_task, -delta); |
1932 | |
1933 | ipc_importance_task_release(imp_task); |
1934 | return TRUE; |
1935 | } |
1936 | #endif /* IMPORTANCE_INHERITANCE */ |
1937 | |
1938 | /* |
1939 | * Routine: ipc_port_lookup_notify |
1940 | * Purpose: |
1941 | * Make a send-once notify port from a receive right. |
1942 | * Returns IP_NULL if name doesn't denote a receive right. |
1943 | * Conditions: |
1944 | * The space must be locked (read or write) and active. |
1945 | * Being the active space, we can rely on thread server_id |
1946 | * context to give us the proper server level sub-order |
1947 | * within the space. |
1948 | */ |
1949 | |
1950 | ipc_port_t |
1951 | ipc_port_lookup_notify( |
1952 | ipc_space_t space, |
1953 | mach_port_name_t name) |
1954 | { |
1955 | ipc_port_t port; |
1956 | ipc_entry_t entry; |
1957 | |
1958 | assert(is_active(space)); |
1959 | |
1960 | entry = ipc_entry_lookup(space, name); |
1961 | if (entry == IE_NULL) |
1962 | return IP_NULL; |
1963 | if ((entry->ie_bits & MACH_PORT_TYPE_RECEIVE) == 0) |
1964 | return IP_NULL; |
1965 | |
1966 | __IGNORE_WCASTALIGN(port = (ipc_port_t) entry->ie_object); |
1967 | assert(port != IP_NULL); |
1968 | |
1969 | ip_lock(port); |
1970 | assert(ip_active(port)); |
1971 | assert(port->ip_receiver_name == name); |
1972 | assert(port->ip_receiver == space); |
1973 | |
1974 | ip_reference(port); |
1975 | port->ip_sorights++; |
1976 | ip_unlock(port); |
1977 | |
1978 | return port; |
1979 | } |
1980 | |
1981 | /* |
1982 | * Routine: ipc_port_make_send_locked |
1983 | * Purpose: |
1984 | * Make a naked send right from a receive right. |
1985 | * |
1986 | * Conditions: |
1987 | * port locked and active. |
1988 | */ |
1989 | ipc_port_t |
1990 | ipc_port_make_send_locked( |
1991 | ipc_port_t port) |
1992 | { |
1993 | assert(ip_active(port)); |
1994 | port->ip_mscount++; |
1995 | port->ip_srights++; |
1996 | ip_reference(port); |
1997 | return port; |
1998 | } |
1999 | |
2000 | /* |
2001 | * Routine: ipc_port_make_send |
2002 | * Purpose: |
2003 | * Make a naked send right from a receive right. |
2004 | */ |
2005 | |
2006 | ipc_port_t |
2007 | ipc_port_make_send( |
2008 | ipc_port_t port) |
2009 | { |
2010 | |
2011 | if (!IP_VALID(port)) |
2012 | return port; |
2013 | |
2014 | ip_lock(port); |
2015 | if (ip_active(port)) { |
2016 | port->ip_mscount++; |
2017 | port->ip_srights++; |
2018 | ip_reference(port); |
2019 | ip_unlock(port); |
2020 | return port; |
2021 | } |
2022 | ip_unlock(port); |
2023 | return IP_DEAD; |
2024 | } |
2025 | |
2026 | /* |
2027 | * Routine: ipc_port_copy_send |
2028 | * Purpose: |
2029 | * Make a naked send right from another naked send right. |
2030 | * IP_NULL -> IP_NULL |
2031 | * IP_DEAD -> IP_DEAD |
2032 | * dead port -> IP_DEAD |
2033 | * live port -> port + ref |
2034 | * Conditions: |
2035 | * Nothing locked except possibly a space. |
2036 | */ |
2037 | |
2038 | ipc_port_t |
2039 | ipc_port_copy_send( |
2040 | ipc_port_t port) |
2041 | { |
2042 | ipc_port_t sright; |
2043 | |
2044 | if (!IP_VALID(port)) |
2045 | return port; |
2046 | |
2047 | ip_lock(port); |
2048 | if (ip_active(port)) { |
2049 | assert(port->ip_srights > 0); |
2050 | |
2051 | ip_reference(port); |
2052 | port->ip_srights++; |
2053 | sright = port; |
2054 | } else |
2055 | sright = IP_DEAD; |
2056 | ip_unlock(port); |
2057 | |
2058 | return sright; |
2059 | } |
2060 | |
2061 | /* |
2062 | * Routine: ipc_port_copyout_send |
2063 | * Purpose: |
2064 | * Copyout a naked send right (possibly null/dead), |
2065 | * or if that fails, destroy the right. |
2066 | * Conditions: |
2067 | * Nothing locked. |
2068 | */ |
2069 | |
2070 | mach_port_name_t |
2071 | ipc_port_copyout_send( |
2072 | ipc_port_t sright, |
2073 | ipc_space_t space) |
2074 | { |
2075 | mach_port_name_t name; |
2076 | |
2077 | if (IP_VALID(sright)) { |
2078 | kern_return_t kr; |
2079 | |
2080 | kr = ipc_object_copyout(space, (ipc_object_t) sright, |
2081 | MACH_MSG_TYPE_PORT_SEND, TRUE, &name); |
2082 | if (kr != KERN_SUCCESS) { |
2083 | ipc_port_release_send(sright); |
2084 | |
2085 | if (kr == KERN_INVALID_CAPABILITY) |
2086 | name = MACH_PORT_DEAD; |
2087 | else |
2088 | name = MACH_PORT_NULL; |
2089 | } |
2090 | } else |
2091 | name = CAST_MACH_PORT_TO_NAME(sright); |
2092 | |
2093 | return name; |
2094 | } |
2095 | |
2096 | /* |
2097 | * Routine: ipc_port_copyout_name_send |
2098 | * Purpose: |
2099 | * Copyout a naked send right (possibly null/dead) to given name, |
2100 | * or if that fails, destroy the right. |
2101 | * Conditions: |
2102 | * Nothing locked. |
2103 | */ |
2104 | |
2105 | mach_port_name_t |
2106 | ipc_port_copyout_name_send( |
2107 | ipc_port_t sright, |
2108 | ipc_space_t space, |
2109 | mach_port_name_t name) |
2110 | { |
2111 | if (IP_VALID(sright)) { |
2112 | kern_return_t kr; |
2113 | |
2114 | kr = ipc_object_copyout_name(space, (ipc_object_t) sright, |
2115 | MACH_MSG_TYPE_PORT_SEND, TRUE, name); |
2116 | if (kr != KERN_SUCCESS) { |
2117 | ipc_port_release_send(sright); |
2118 | |
2119 | if (kr == KERN_INVALID_CAPABILITY) |
2120 | name = MACH_PORT_DEAD; |
2121 | else |
2122 | name = MACH_PORT_NULL; |
2123 | } |
2124 | } else |
2125 | name = CAST_MACH_PORT_TO_NAME(sright); |
2126 | |
2127 | return name; |
2128 | } |
2129 | |
2130 | /* |
2131 | * Routine: ipc_port_release_send |
2132 | * Purpose: |
2133 | * Release a naked send right. |
2134 | * Consumes a ref for the port. |
2135 | * Conditions: |
2136 | * Nothing locked. |
2137 | */ |
2138 | |
2139 | void |
2140 | ipc_port_release_send( |
2141 | ipc_port_t port) |
2142 | { |
2143 | ipc_port_t nsrequest = IP_NULL; |
2144 | mach_port_mscount_t mscount; |
2145 | |
2146 | if (!IP_VALID(port)) |
2147 | return; |
2148 | |
2149 | ip_lock(port); |
2150 | |
2151 | assert(port->ip_srights > 0); |
2152 | if (port->ip_srights == 0) { |
2153 | panic("Over-release of port %p send right!" , port); |
2154 | } |
2155 | |
2156 | port->ip_srights--; |
2157 | |
2158 | if (!ip_active(port)) { |
2159 | ip_unlock(port); |
2160 | ip_release(port); |
2161 | return; |
2162 | } |
2163 | |
2164 | if (port->ip_srights == 0 && |
2165 | port->ip_nsrequest != IP_NULL) { |
2166 | nsrequest = port->ip_nsrequest; |
2167 | port->ip_nsrequest = IP_NULL; |
2168 | mscount = port->ip_mscount; |
2169 | ip_unlock(port); |
2170 | ip_release(port); |
2171 | ipc_notify_no_senders(nsrequest, mscount); |
2172 | } else { |
2173 | ip_unlock(port); |
2174 | ip_release(port); |
2175 | } |
2176 | } |
2177 | |
2178 | /* |
2179 | * Routine: ipc_port_make_sonce_locked |
2180 | * Purpose: |
2181 | * Make a naked send-once right from a receive right. |
2182 | * Conditions: |
2183 | * The port is locked and active. |
2184 | */ |
2185 | |
2186 | ipc_port_t |
2187 | ipc_port_make_sonce_locked( |
2188 | ipc_port_t port) |
2189 | { |
2190 | assert(ip_active(port)); |
2191 | port->ip_sorights++; |
2192 | ip_reference(port); |
2193 | return port; |
2194 | } |
2195 | |
2196 | /* |
2197 | * Routine: ipc_port_make_sonce |
2198 | * Purpose: |
2199 | * Make a naked send-once right from a receive right. |
2200 | * Conditions: |
2201 | * The port is not locked. |
2202 | */ |
2203 | |
2204 | ipc_port_t |
2205 | ipc_port_make_sonce( |
2206 | ipc_port_t port) |
2207 | { |
2208 | if (!IP_VALID(port)) |
2209 | return port; |
2210 | |
2211 | ip_lock(port); |
2212 | if (ip_active(port)) { |
2213 | port->ip_sorights++; |
2214 | ip_reference(port); |
2215 | ip_unlock(port); |
2216 | return port; |
2217 | } |
2218 | ip_unlock(port); |
2219 | return IP_DEAD; |
2220 | } |
2221 | |
2222 | /* |
2223 | * Routine: ipc_port_release_sonce |
2224 | * Purpose: |
2225 | * Release a naked send-once right. |
2226 | * Consumes a ref for the port. |
2227 | * |
2228 | * In normal situations, this is never used. |
2229 | * Send-once rights are only consumed when |
2230 | * a message (possibly a send-once notification) |
2231 | * is sent to them. |
2232 | * Conditions: |
2233 | * Nothing locked except possibly a space. |
2234 | */ |
2235 | |
2236 | void |
2237 | ipc_port_release_sonce( |
2238 | ipc_port_t port) |
2239 | { |
2240 | if (!IP_VALID(port)) |
2241 | return; |
2242 | |
2243 | ipc_port_adjust_special_reply_port(port, IPC_PORT_ADJUST_SR_NONE, FALSE); |
2244 | |
2245 | ip_lock(port); |
2246 | |
2247 | assert(port->ip_sorights > 0); |
2248 | if (port->ip_sorights == 0) { |
2249 | panic("Over-release of port %p send-once right!" , port); |
2250 | } |
2251 | |
2252 | port->ip_sorights--; |
2253 | |
2254 | ip_unlock(port); |
2255 | ip_release(port); |
2256 | } |
2257 | |
2258 | /* |
2259 | * Routine: ipc_port_release_receive |
2260 | * Purpose: |
2261 | * Release a naked (in limbo or in transit) receive right. |
2262 | * Consumes a ref for the port; destroys the port. |
2263 | * Conditions: |
2264 | * Nothing locked. |
2265 | */ |
2266 | |
2267 | void |
2268 | ipc_port_release_receive( |
2269 | ipc_port_t port) |
2270 | { |
2271 | ipc_port_t dest; |
2272 | |
2273 | if (!IP_VALID(port)) |
2274 | return; |
2275 | |
2276 | ip_lock(port); |
2277 | assert(ip_active(port)); |
2278 | assert(port->ip_receiver_name == MACH_PORT_NULL); |
2279 | dest = port->ip_destination; |
2280 | |
2281 | ipc_port_destroy(port); /* consumes ref, unlocks */ |
2282 | |
2283 | if (dest != IP_NULL) { |
2284 | ipc_port_send_turnstile_complete(dest); |
2285 | ip_release(dest); |
2286 | } |
2287 | } |
2288 | |
2289 | /* |
2290 | * Routine: ipc_port_alloc_special |
2291 | * Purpose: |
2292 | * Allocate a port in a special space. |
2293 | * The new port is returned with one ref. |
2294 | * If unsuccessful, IP_NULL is returned. |
2295 | * Conditions: |
2296 | * Nothing locked. |
2297 | */ |
2298 | |
2299 | ipc_port_t |
2300 | ipc_port_alloc_special( |
2301 | ipc_space_t space) |
2302 | { |
2303 | ipc_port_t port; |
2304 | |
2305 | __IGNORE_WCASTALIGN(port = (ipc_port_t) io_alloc(IOT_PORT)); |
2306 | if (port == IP_NULL) |
2307 | return IP_NULL; |
2308 | |
2309 | #if MACH_ASSERT |
2310 | uintptr_t buf[IP_CALLSTACK_MAX]; |
2311 | ipc_port_callstack_init_debug(&buf[0], IP_CALLSTACK_MAX); |
2312 | #endif /* MACH_ASSERT */ |
2313 | |
2314 | bzero((char *)port, sizeof(*port)); |
2315 | io_lock_init(&port->ip_object); |
2316 | port->ip_references = 1; |
2317 | port->ip_object.io_bits = io_makebits(TRUE, IOT_PORT, 0); |
2318 | |
2319 | ipc_port_init(port, space, 1); |
2320 | |
2321 | #if MACH_ASSERT |
2322 | ipc_port_init_debug(port, &buf[0], IP_CALLSTACK_MAX); |
2323 | #endif /* MACH_ASSERT */ |
2324 | |
2325 | return port; |
2326 | } |
2327 | |
2328 | /* |
2329 | * Routine: ipc_port_dealloc_special |
2330 | * Purpose: |
2331 | * Deallocate a port in a special space. |
2332 | * Consumes one ref for the port. |
2333 | * Conditions: |
2334 | * Nothing locked. |
2335 | */ |
2336 | |
2337 | void |
2338 | ipc_port_dealloc_special( |
2339 | ipc_port_t port, |
2340 | __assert_only ipc_space_t space) |
2341 | { |
2342 | ip_lock(port); |
2343 | assert(ip_active(port)); |
2344 | // assert(port->ip_receiver_name != MACH_PORT_NULL); |
2345 | assert(port->ip_receiver == space); |
2346 | |
2347 | /* |
2348 | * We clear ip_receiver_name and ip_receiver to simplify |
2349 | * the ipc_space_kernel check in ipc_mqueue_send. |
2350 | */ |
2351 | |
2352 | imq_lock(&port->ip_messages); |
2353 | port->ip_receiver_name = MACH_PORT_NULL; |
2354 | port->ip_receiver = IS_NULL; |
2355 | imq_unlock(&port->ip_messages); |
2356 | |
2357 | /* relevant part of ipc_port_clear_receiver */ |
2358 | ipc_port_set_mscount(port, 0); |
2359 | port->ip_messages.imq_seqno = 0; |
2360 | |
2361 | ipc_port_destroy(port); |
2362 | } |
2363 | |
2364 | /* |
2365 | * Routine: ipc_port_finalize |
2366 | * Purpose: |
2367 | * Called on last reference deallocate to |
2368 | * free any remaining data associated with the |
2369 | * port. |
2370 | * Conditions: |
2371 | * Nothing locked. |
2372 | */ |
2373 | void |
2374 | ipc_port_finalize( |
2375 | ipc_port_t port) |
2376 | { |
2377 | ipc_port_request_t requests = port->ip_requests; |
2378 | |
2379 | assert(port_send_turnstile(port) == TURNSTILE_NULL); |
2380 | assert(ipc_port_rcv_turnstile(port) == TURNSTILE_NULL); |
2381 | |
2382 | if (ip_active(port)) { |
2383 | panic("Trying to free an active port. port %p" , port); |
2384 | } |
2385 | |
2386 | if (requests != IPR_NULL) { |
2387 | ipc_table_size_t its = requests->ipr_size; |
2388 | it_requests_free(its, requests); |
2389 | port->ip_requests = IPR_NULL; |
2390 | } |
2391 | |
2392 | ipc_mqueue_deinit(&port->ip_messages); |
2393 | |
2394 | #if MACH_ASSERT |
2395 | ipc_port_track_dealloc(port); |
2396 | #endif /* MACH_ASSERT */ |
2397 | } |
2398 | |
2399 | /* |
2400 | * Routine: kdp_mqueue_send_find_owner |
2401 | * Purpose: |
2402 | * Discover the owner of the ipc_mqueue that contains the input |
2403 | * waitq object. The thread blocked on the waitq should be |
2404 | * waiting for an IPC_MQUEUE_FULL event. |
2405 | * Conditions: |
2406 | * The 'waitinfo->wait_type' value should already be set to |
2407 | * kThreadWaitPortSend. |
2408 | * Note: |
2409 | * If we find out that the containing port is actually in |
2410 | * transit, we reset the wait_type field to reflect this. |
2411 | */ |
2412 | void |
2413 | kdp_mqueue_send_find_owner(struct waitq * waitq, __assert_only event64_t event, thread_waitinfo_t * waitinfo) |
2414 | { |
2415 | struct turnstile *turnstile; |
2416 | assert(waitinfo->wait_type == kThreadWaitPortSend); |
2417 | assert(event == IPC_MQUEUE_FULL); |
2418 | assert(waitq_is_turnstile_queue(waitq)); |
2419 | |
2420 | turnstile = waitq_to_turnstile(waitq); |
2421 | ipc_port_t port = (ipc_port_t)turnstile->ts_proprietor; /* we are blocking on send */ |
2422 | assert(kdp_is_in_zone(port, "ipc ports" )); |
2423 | |
2424 | waitinfo->owner = 0; |
2425 | waitinfo->context = VM_KERNEL_UNSLIDE_OR_PERM(port); |
2426 | if (ip_lock_held_kdp(port)) { |
2427 | /* |
2428 | * someone has the port locked: it may be in an |
2429 | * inconsistent state: bail |
2430 | */ |
2431 | waitinfo->owner = STACKSHOT_WAITOWNER_PORT_LOCKED; |
2432 | return; |
2433 | } |
2434 | |
2435 | if (ip_active(port)) { |
2436 | if (port->ip_tempowner) { |
2437 | if (port->ip_imp_task != IIT_NULL && port->ip_imp_task->iit_task != NULL) { |
2438 | /* port is held by a tempowner */ |
2439 | waitinfo->owner = pid_from_task(port->ip_imp_task->iit_task); |
2440 | } else { |
2441 | waitinfo->owner = STACKSHOT_WAITOWNER_INTRANSIT; |
2442 | } |
2443 | } else if (port->ip_receiver_name) { |
2444 | /* port in a space */ |
2445 | if (port->ip_receiver == ipc_space_kernel) { |
2446 | /* |
2447 | * The kernel pid is 0, make this |
2448 | * distinguishable from no-owner and |
2449 | * inconsistent port state. |
2450 | */ |
2451 | waitinfo->owner = STACKSHOT_WAITOWNER_KERNEL; |
2452 | } else { |
2453 | waitinfo->owner = pid_from_task(port->ip_receiver->is_task); |
2454 | } |
2455 | } else if (port->ip_destination != IP_NULL) { |
2456 | /* port in transit */ |
2457 | waitinfo->wait_type = kThreadWaitPortSendInTransit; |
2458 | waitinfo->owner = VM_KERNEL_UNSLIDE_OR_PERM(port->ip_destination); |
2459 | } |
2460 | } |
2461 | } |
2462 | |
2463 | /* |
2464 | * Routine: kdp_mqueue_recv_find_owner |
2465 | * Purpose: |
2466 | * Discover the "owner" of the ipc_mqueue that contains the input |
2467 | * waitq object. The thread blocked on the waitq is trying to |
2468 | * receive on the mqueue. |
2469 | * Conditions: |
2470 | * The 'waitinfo->wait_type' value should already be set to |
2471 | * kThreadWaitPortReceive. |
2472 | * Note: |
2473 | * If we find that we are actualy waiting on a port set, we reset |
2474 | * the wait_type field to reflect this. |
2475 | */ |
2476 | void |
2477 | kdp_mqueue_recv_find_owner(struct waitq * waitq, __assert_only event64_t event, thread_waitinfo_t * waitinfo) |
2478 | { |
2479 | assert(waitinfo->wait_type == kThreadWaitPortReceive); |
2480 | assert(event == IPC_MQUEUE_RECEIVE); |
2481 | |
2482 | ipc_mqueue_t mqueue = imq_from_waitq(waitq); |
2483 | waitinfo->owner = 0; |
2484 | if (imq_is_set(mqueue)) { /* we are waiting on a port set */ |
2485 | ipc_pset_t set = ips_from_mq(mqueue); |
2486 | assert(kdp_is_in_zone(set, "ipc port sets" )); |
2487 | |
2488 | /* Reset wait type to specify waiting on port set receive */ |
2489 | waitinfo->wait_type = kThreadWaitPortSetReceive; |
2490 | waitinfo->context = VM_KERNEL_UNSLIDE_OR_PERM(set); |
2491 | if (ips_lock_held_kdp(set)) { |
2492 | waitinfo->owner = STACKSHOT_WAITOWNER_PSET_LOCKED; |
2493 | } |
2494 | /* There is no specific owner "at the other end" of a port set, so leave unset. */ |
2495 | } else { |
2496 | ipc_port_t port = ip_from_mq(mqueue); |
2497 | assert(kdp_is_in_zone(port, "ipc ports" )); |
2498 | |
2499 | waitinfo->context = VM_KERNEL_UNSLIDE_OR_PERM(port); |
2500 | if (ip_lock_held_kdp(port)) { |
2501 | waitinfo->owner = STACKSHOT_WAITOWNER_PORT_LOCKED; |
2502 | return; |
2503 | } |
2504 | |
2505 | if (ip_active(port)) { |
2506 | if (port->ip_receiver_name != MACH_PORT_NULL) { |
2507 | waitinfo->owner = port->ip_receiver_name; |
2508 | } else { |
2509 | waitinfo->owner = STACKSHOT_WAITOWNER_INTRANSIT; |
2510 | } |
2511 | } |
2512 | } |
2513 | } |
2514 | |
2515 | #if MACH_ASSERT |
2516 | #include <kern/machine.h> |
2517 | |
2518 | /* |
2519 | * Keep a list of all allocated ports. |
2520 | * Allocation is intercepted via ipc_port_init; |
2521 | * deallocation is intercepted via io_free. |
2522 | */ |
2523 | #if 0 |
2524 | queue_head_t port_alloc_queue; |
2525 | lck_spin_t port_alloc_queue_lock; |
2526 | #endif |
2527 | |
2528 | unsigned long port_count = 0; |
2529 | unsigned long port_count_warning = 20000; |
2530 | unsigned long port_timestamp = 0; |
2531 | |
2532 | void db_port_stack_trace( |
2533 | ipc_port_t port); |
2534 | void db_ref( |
2535 | int refs); |
2536 | int db_port_walk( |
2537 | unsigned int verbose, |
2538 | unsigned int display, |
2539 | unsigned int ref_search, |
2540 | unsigned int ref_target); |
2541 | |
2542 | /* |
2543 | * Initialize global state needed for run-time |
2544 | * port debugging. |
2545 | */ |
2546 | void |
2547 | ipc_port_debug_init(void) |
2548 | { |
2549 | #if 0 |
2550 | queue_init(&port_alloc_queue); |
2551 | lck_spin_init(&port_alloc_queue_lock, &ipc_lck_grp, &ipc_lck_attr); |
2552 | #endif |
2553 | |
2554 | if (!PE_parse_boot_argn("ipc_portbt" , &ipc_portbt, sizeof (ipc_portbt))) |
2555 | ipc_portbt = 0; |
2556 | } |
2557 | |
2558 | #ifdef MACH_BSD |
2559 | extern int proc_pid(struct proc*); |
2560 | #endif /* MACH_BSD */ |
2561 | |
2562 | /* |
2563 | * Initialize all of the debugging state in a port. |
2564 | * Insert the port into a global list of all allocated ports. |
2565 | */ |
2566 | void |
2567 | ipc_port_init_debug( |
2568 | ipc_port_t port, |
2569 | uintptr_t *callstack, |
2570 | unsigned int callstack_max) |
2571 | { |
2572 | unsigned int i; |
2573 | |
2574 | port->ip_thread = current_thread(); |
2575 | port->ip_timetrack = port_timestamp++; |
2576 | for (i = 0; i < callstack_max; ++i) |
2577 | port->ip_callstack[i] = callstack[i]; |
2578 | for (i = 0; i < IP_NSPARES; ++i) |
2579 | port->ip_spares[i] = 0; |
2580 | |
2581 | #ifdef MACH_BSD |
2582 | task_t task = current_task(); |
2583 | if (task != TASK_NULL) { |
2584 | struct proc* proc = (struct proc*) get_bsdtask_info(task); |
2585 | if (proc) |
2586 | port->ip_spares[0] = proc_pid(proc); |
2587 | } |
2588 | #endif /* MACH_BSD */ |
2589 | |
2590 | #if 0 |
2591 | lck_spin_lock(&port_alloc_queue_lock); |
2592 | ++port_count; |
2593 | if (port_count_warning > 0 && port_count >= port_count_warning) |
2594 | assert(port_count < port_count_warning); |
2595 | queue_enter(&port_alloc_queue, port, ipc_port_t, ip_port_links); |
2596 | lck_spin_unlock(&port_alloc_queue_lock); |
2597 | #endif |
2598 | } |
2599 | |
2600 | /* |
2601 | * Routine: ipc_port_callstack_init_debug |
2602 | * Purpose: |
2603 | * Calls the machine-dependent routine to |
2604 | * fill in an array with up to IP_CALLSTACK_MAX |
2605 | * levels of return pc information |
2606 | * Conditions: |
2607 | * May block (via copyin) |
2608 | */ |
2609 | void |
2610 | ipc_port_callstack_init_debug( |
2611 | uintptr_t *callstack, |
2612 | unsigned int callstack_max) |
2613 | { |
2614 | unsigned int i; |
2615 | |
2616 | /* guarantee the callstack is initialized */ |
2617 | for (i=0; i < callstack_max; i++) |
2618 | callstack[i] = 0; |
2619 | |
2620 | if (ipc_portbt) |
2621 | machine_callstack(callstack, callstack_max); |
2622 | } |
2623 | |
2624 | /* |
2625 | * Remove a port from the queue of allocated ports. |
2626 | * This routine should be invoked JUST prior to |
2627 | * deallocating the actual memory occupied by the port. |
2628 | */ |
2629 | #if 1 |
2630 | void |
2631 | ipc_port_track_dealloc( |
2632 | __unused ipc_port_t port) |
2633 | { |
2634 | } |
2635 | #else |
2636 | void |
2637 | ipc_port_track_dealloc( |
2638 | ipc_port_t port) |
2639 | { |
2640 | lck_spin_lock(&port_alloc_queue_lock); |
2641 | assert(port_count > 0); |
2642 | --port_count; |
2643 | queue_remove(&port_alloc_queue, port, ipc_port_t, ip_port_links); |
2644 | lck_spin_unlock(&port_alloc_queue_lock); |
2645 | } |
2646 | #endif |
2647 | |
2648 | |
2649 | #endif /* MACH_ASSERT */ |
2650 | |