1/*
2 * Copyright (c) 2013 Apple Inc. All rights reserved.
3 *
4 * @APPLE_OSREFERENCE_LICENSE_HEADER_START@
5 *
6 * This file contains Original Code and/or Modifications of Original Code
7 * as defined in and that are subject to the Apple Public Source License
8 * Version 2.0 (the 'License'). You may not use this file except in
9 * compliance with the License. The rights granted to you under the License
10 * may not be used to create, or enable the creation or redistribution of,
11 * unlawful or unlicensed copies of an Apple operating system, or to
12 * circumvent, violate, or enable the circumvention or violation of, any
13 * terms of an Apple operating system software license agreement.
14 *
15 * Please obtain a copy of the License at
16 * http://www.opensource.apple.com/apsl/ and read it before using this file.
17 *
18 * The Original Code and all software distributed under the License are
19 * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER
20 * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES,
21 * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY,
22 * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT.
23 * Please see the License for the specific language governing rights and
24 * limitations under the License.
25 *
26 * @APPLE_OSREFERENCE_LICENSE_HEADER_END@
27 */
28
29#include <mach/mach_types.h>
30#include <mach/notify.h>
31#include <ipc/ipc_types.h>
32#include <ipc/ipc_importance.h>
33#include <ipc/ipc_port.h>
34#include <ipc/ipc_voucher.h>
35#include <kern/ipc_kobject.h>
36#include <kern/ipc_tt.h>
37#include <kern/mach_param.h>
38#include <kern/misc_protos.h>
39#include <kern/kalloc.h>
40#include <kern/zalloc.h>
41#include <kern/queue.h>
42#include <kern/task.h>
43#include <kern/policy_internal.h>
44
45#include <sys/kdebug.h>
46
47#include <mach/mach_voucher_attr_control.h>
48#include <mach/machine/sdt.h>
49
50extern int proc_pid(void *);
51extern int proc_selfpid(void);
52extern uint64_t proc_uniqueid(void *p);
53extern char *proc_name_address(void *p);
54
55/*
56 * Globals for delayed boost drop processing.
57 */
58static queue_head_t ipc_importance_delayed_drop_queue;
59static thread_call_t ipc_importance_delayed_drop_call;
60static uint64_t ipc_importance_delayed_drop_timestamp;
61static boolean_t ipc_importance_delayed_drop_call_requested = FALSE;
62
63#define DENAP_DROP_TARGET (1000 * NSEC_PER_MSEC) /* optimum denap delay */
64#define DENAP_DROP_SKEW (100 * NSEC_PER_MSEC) /* request skew for wakeup */
65#define DENAP_DROP_LEEWAY (2 * DENAP_DROP_SKEW) /* specified wakeup leeway */
66
67#define DENAP_DROP_DELAY (DENAP_DROP_TARGET + DENAP_DROP_SKEW)
68#define DENAP_DROP_FLAGS (THREAD_CALL_DELAY_SYS_NORMAL | THREAD_CALL_DELAY_LEEWAY)
69
70/*
71 * Importance Voucher Attribute Manager
72 */
73
74static lck_spin_t ipc_importance_lock_data; /* single lock for now */
75
76
77#define ipc_importance_lock_init() \
78 lck_spin_init(&ipc_importance_lock_data, &ipc_lck_grp, &ipc_lck_attr)
79#define ipc_importance_lock_destroy() \
80 lck_spin_destroy(&ipc_importance_lock_data, &ipc_lck_grp)
81#define ipc_importance_lock() \
82 lck_spin_lock(&ipc_importance_lock_data)
83#define ipc_importance_lock_try() \
84 lck_spin_try_lock(&ipc_importance_lock_data)
85#define ipc_importance_unlock() \
86 lck_spin_unlock(&ipc_importance_lock_data)
87#define ipc_importance_assert_held() \
88 lck_spin_assert(&ipc_importance_lock_data, LCK_ASSERT_OWNED)
89
90#if IIE_REF_DEBUG
91#define incr_ref_counter(x) (hw_atomic_add(&(x), 1))
92
93static inline
94uint32_t ipc_importance_reference_internal(ipc_importance_elem_t elem)
95{
96 incr_ref_counter(elem->iie_refs_added);
97 return (hw_atomic_add(&elem->iie_bits, 1) & IIE_REFS_MASK);
98}
99
100static inline
101uint32_t ipc_importance_release_internal(ipc_importance_elem_t elem)
102{
103 incr_ref_counter(elem->iie_refs_dropped);
104 return (hw_atomic_sub(&elem->iie_bits, 1) & IIE_REFS_MASK);
105}
106
107static inline
108uint32_t ipc_importance_task_reference_internal(ipc_importance_task_t task_imp)
109{
110 uint32_t out;
111 out = ipc_importance_reference_internal(&task_imp->iit_elem);
112 incr_ref_counter(task_imp->iit_elem.iie_task_refs_added);
113 return out;
114}
115
116static inline
117uint32_t ipc_importance_task_release_internal(ipc_importance_task_t task_imp)
118{
119 uint32_t out;
120
121 assert(1 < IIT_REFS(task_imp));
122 incr_ref_counter(task_imp->iit_elem.iie_task_refs_dropped);
123 out = ipc_importance_release_internal(&task_imp->iit_elem);
124 return out;
125}
126
127static inline
128void ipc_importance_counter_init(ipc_importance_elem_t elem)
129{
130
131 elem->iie_refs_added = 0;
132 elem->iie_refs_dropped = 0;
133 elem->iie_kmsg_refs_added = 0;
134 elem->iie_kmsg_refs_inherited = 0;
135 elem->iie_kmsg_refs_coalesced = 0;
136 elem->iie_kmsg_refs_dropped = 0;
137 elem->iie_task_refs_added = 0;
138 elem->iie_task_refs_added_inherit_from = 0;
139 elem->iie_task_refs_added_transition = 0;
140 elem->iie_task_refs_self_added = 0;
141 elem->iie_task_refs_inherited = 0;
142 elem->iie_task_refs_coalesced = 0;
143 elem->iie_task_refs_dropped = 0;
144}
145#else
146#define incr_ref_counter(x)
147#endif
148
149#if DEVELOPMENT || DEBUG
150static queue_head_t global_iit_alloc_queue;
151#endif
152
153/* TODO: remove this varibale when interactive daemon audit is complete */
154boolean_t ipc_importance_interactive_receiver = FALSE;
155
156static zone_t ipc_importance_task_zone;
157static zone_t ipc_importance_inherit_zone;
158
159static ipc_voucher_attr_control_t ipc_importance_control;
160
161static boolean_t ipc_importance_task_check_transition(ipc_importance_task_t task_imp,
162 iit_update_type_t type, uint32_t delta);
163
164static void ipc_importance_task_propagate_assertion_locked(ipc_importance_task_t task_imp,
165 iit_update_type_t type, boolean_t update_task_imp);
166
167static ipc_importance_inherit_t ipc_importance_inherit_from_task(task_t from_task, task_t to_task);
168
169/*
170 * Routine: ipc_importance_kmsg_link
171 * Purpose:
172 * Link the kmsg onto the appropriate propagation chain.
173 * If the element is a task importance, we link directly
174 * on its propagation chain. Otherwise, we link onto the
175 * destination task of the inherit.
176 * Conditions:
177 * Importance lock held.
178 * Caller is donating an importance elem reference to the kmsg.
179 */
180static void
181ipc_importance_kmsg_link(
182 ipc_kmsg_t kmsg,
183 ipc_importance_elem_t elem)
184{
185 ipc_importance_elem_t link_elem;
186
187 assert(IIE_NULL == kmsg->ikm_importance);
188
189 link_elem = (IIE_TYPE_INHERIT == IIE_TYPE(elem)) ?
190 (ipc_importance_elem_t)((ipc_importance_inherit_t)elem)->iii_to_task :
191 elem;
192
193 queue_enter(&link_elem->iie_kmsgs, kmsg, ipc_kmsg_t, ikm_inheritance);
194 kmsg->ikm_importance = elem;
195}
196
197/*
198 * Routine: ipc_importance_kmsg_unlink
199 * Purpose:
200 * Unlink the kmsg from its current propagation chain.
201 * If the element is a task importance, we unlink directly
202 * from its propagation chain. Otherwise, we unlink from the
203 * destination task of the inherit.
204 * Returns:
205 * The reference to the importance element it was linked on.
206 * Conditions:
207 * Importance lock held.
208 * Caller is responsible for dropping reference on returned elem.
209 */
210static ipc_importance_elem_t
211ipc_importance_kmsg_unlink(
212 ipc_kmsg_t kmsg)
213{
214 ipc_importance_elem_t elem = kmsg->ikm_importance;
215
216 if (IIE_NULL != elem) {
217 ipc_importance_elem_t unlink_elem;
218
219 unlink_elem = (IIE_TYPE_INHERIT == IIE_TYPE(elem)) ?
220 (ipc_importance_elem_t)((ipc_importance_inherit_t)elem)->iii_to_task :
221 elem;
222
223 queue_remove(&unlink_elem->iie_kmsgs, kmsg, ipc_kmsg_t, ikm_inheritance);
224 kmsg->ikm_importance = IIE_NULL;
225 }
226 return elem;
227}
228
229/*
230 * Routine: ipc_importance_inherit_link
231 * Purpose:
232 * Link the inherit onto the appropriate propagation chain.
233 * If the element is a task importance, we link directly
234 * on its propagation chain. Otherwise, we link onto the
235 * destination task of the inherit.
236 * Conditions:
237 * Importance lock held.
238 * Caller is donating an elem importance reference to the inherit.
239 */
240static void
241ipc_importance_inherit_link(
242 ipc_importance_inherit_t inherit,
243 ipc_importance_elem_t elem)
244{
245 ipc_importance_task_t link_task;
246
247 assert(IIE_NULL == inherit->iii_from_elem);
248 link_task = (IIE_TYPE_INHERIT == IIE_TYPE(elem)) ?
249 ((ipc_importance_inherit_t)elem)->iii_to_task :
250 (ipc_importance_task_t)elem;
251
252 queue_enter(&link_task->iit_inherits, inherit,
253 ipc_importance_inherit_t, iii_inheritance);
254 inherit->iii_from_elem = elem;
255}
256
257/*
258 * Routine: ipc_importance_inherit_find
259 * Purpose:
260 * Find an existing inherit that links the from element to the
261 * to_task at a given nesting depth. As inherits from other
262 * inherits are actually linked off the original inherit's donation
263 * receiving task, we have to conduct our search from there if
264 * the from element is an inherit.
265 * Returns:
266 * A pointer (not a reference) to the matching inherit.
267 * Conditions:
268 * Importance lock held.
269 */
270static ipc_importance_inherit_t
271ipc_importance_inherit_find(
272 ipc_importance_elem_t from,
273 ipc_importance_task_t to_task,
274 unsigned int depth)
275{
276 ipc_importance_task_t link_task;
277 ipc_importance_inherit_t inherit;
278
279 link_task = (IIE_TYPE_INHERIT == IIE_TYPE(from)) ?
280 ((ipc_importance_inherit_t)from)->iii_to_task :
281 (ipc_importance_task_t)from;
282
283 queue_iterate(&link_task->iit_inherits, inherit,
284 ipc_importance_inherit_t, iii_inheritance) {
285 if (inherit->iii_to_task == to_task && inherit->iii_depth == depth) {
286 return inherit;
287 }
288 }
289 return III_NULL;
290}
291
292/*
293 * Routine: ipc_importance_inherit_unlink
294 * Purpose:
295 * Unlink the inherit from its current propagation chain.
296 * If the element is a task importance, we unlink directly
297 * from its propagation chain. Otherwise, we unlink from the
298 * destination task of the inherit.
299 * Returns:
300 * The reference to the importance element it was linked on.
301 * Conditions:
302 * Importance lock held.
303 * Caller is responsible for dropping reference on returned elem.
304 */
305static ipc_importance_elem_t
306ipc_importance_inherit_unlink(
307 ipc_importance_inherit_t inherit)
308{
309 ipc_importance_elem_t elem = inherit->iii_from_elem;
310
311 if (IIE_NULL != elem) {
312 ipc_importance_task_t unlink_task;
313
314 unlink_task = (IIE_TYPE_INHERIT == IIE_TYPE(elem)) ?
315 ((ipc_importance_inherit_t)elem)->iii_to_task :
316 (ipc_importance_task_t)elem;
317
318 queue_remove(&unlink_task->iit_inherits, inherit,
319 ipc_importance_inherit_t, iii_inheritance);
320 inherit->iii_from_elem = IIE_NULL;
321 }
322 return elem;
323}
324
325/*
326 * Routine: ipc_importance_reference
327 * Purpose:
328 * Add a reference to the importance element.
329 * Conditions:
330 * Caller must hold a reference on the element.
331 */
332void
333ipc_importance_reference(ipc_importance_elem_t elem)
334{
335 assert(0 < IIE_REFS(elem));
336 ipc_importance_reference_internal(elem);
337}
338
339/*
340 * Routine: ipc_importance_release_locked
341 * Purpose:
342 * Release a reference on an importance attribute value,
343 * unlinking and deallocating the attribute if the last reference.
344 * Conditions:
345 * Entered with importance lock held, leaves with it unlocked.
346 */
347static void
348ipc_importance_release_locked(ipc_importance_elem_t elem)
349{
350 assert(0 < IIE_REFS(elem));
351
352#if IMPORTANCE_DEBUG
353 ipc_importance_inherit_t temp_inherit;
354 ipc_importance_task_t link_task;
355 ipc_kmsg_t temp_kmsg;
356 uint32_t expected = 0;
357
358 if (0 < elem->iie_made)
359 expected++;
360
361 link_task = (IIE_TYPE_INHERIT == IIE_TYPE(elem)) ?
362 ((ipc_importance_inherit_t)elem)->iii_to_task :
363 (ipc_importance_task_t)elem;
364
365 queue_iterate(&link_task->iit_kmsgs, temp_kmsg, ipc_kmsg_t, ikm_inheritance)
366 if (temp_kmsg->ikm_importance == elem)
367 expected++;
368 queue_iterate(&link_task->iit_inherits, temp_inherit,
369 ipc_importance_inherit_t, iii_inheritance)
370 if (temp_inherit->iii_from_elem == elem)
371 expected++;
372 if (IIE_REFS(elem) < expected + 1)
373 panic("ipc_importance_release_locked (%p)", elem);
374#endif /* IMPORTANCE_DEBUG */
375
376 if (0 < ipc_importance_release_internal(elem)) {
377 ipc_importance_unlock();
378 return;
379 }
380
381 /* last ref */
382
383 switch (IIE_TYPE(elem)) {
384
385 /* just a "from" task reference to drop */
386 case IIE_TYPE_TASK:
387 {
388 ipc_importance_task_t task_elem;
389
390 task_elem = (ipc_importance_task_t)elem;
391
392 /* the task can't still hold a reference on the task importance */
393 assert(TASK_NULL == task_elem->iit_task);
394
395#if DEVELOPMENT || DEBUG
396 queue_remove(&global_iit_alloc_queue, task_elem, ipc_importance_task_t, iit_allocation);
397#endif
398
399 ipc_importance_unlock();
400
401 zfree(ipc_importance_task_zone, task_elem);
402 break;
403 }
404
405 /* dropping an inherit element */
406 case IIE_TYPE_INHERIT:
407 {
408 ipc_importance_inherit_t inherit = (ipc_importance_inherit_t)elem;
409 ipc_importance_task_t to_task = inherit->iii_to_task;
410 ipc_importance_elem_t from_elem;
411
412 assert(IIT_NULL != to_task);
413 assert(ipc_importance_task_is_any_receiver_type(to_task));
414
415 /* unlink the inherit from its source element */
416 from_elem = ipc_importance_inherit_unlink(inherit);
417 assert(IIE_NULL != from_elem);
418
419 /*
420 * The attribute might have pending external boosts if the attribute
421 * was given out during exec, drop them from the appropriate destination
422 * task.
423 *
424 * The attribute will not have any pending external boosts if the
425 * attribute was given out to voucher system since it would have been
426 * dropped by ipc_importance_release_value, but there is not way to
427 * detect that, thus if the attribute has a pending external boost,
428 * drop them from the appropriate destination task.
429 *
430 * The inherit attribute from exec and voucher system would not
431 * get deduped to each other, thus dropping the external boost
432 * from destination task at two different places will not have
433 * any unintended side effects.
434 */
435 assert(inherit->iii_externcnt >= inherit->iii_externdrop);
436 if (inherit->iii_donating) {
437 uint32_t assertcnt = III_EXTERN(inherit);
438
439 assert(ipc_importance_task_is_any_receiver_type(to_task));
440 assert(to_task->iit_externcnt >= inherit->iii_externcnt);
441 assert(to_task->iit_externdrop >= inherit->iii_externdrop);
442 to_task->iit_externcnt -= inherit->iii_externcnt;
443 to_task->iit_externdrop -= inherit->iii_externdrop;
444 inherit->iii_externcnt = 0;
445 inherit->iii_externdrop = 0;
446 inherit->iii_donating = FALSE;
447
448 /* adjust the internal assertions - and propagate as needed */
449 if (ipc_importance_task_check_transition(to_task, IIT_UPDATE_DROP, assertcnt)) {
450 ipc_importance_task_propagate_assertion_locked(to_task, IIT_UPDATE_DROP, TRUE);
451 }
452 } else {
453 inherit->iii_externcnt = 0;
454 inherit->iii_externdrop = 0;
455 }
456
457 /* release the reference on the source element */
458 ipc_importance_release_locked(from_elem);
459 /* unlocked on return */
460
461 /* release the reference on the destination task */
462 ipc_importance_task_release(to_task);
463
464 /* free the inherit */
465 zfree(ipc_importance_inherit_zone, inherit);
466 break;
467 }
468 }
469}
470
471/*
472 * Routine: ipc_importance_release
473 * Purpose:
474 * Release a reference on an importance attribute value,
475 * unlinking and deallocating the attribute if the last reference.
476 * Conditions:
477 * nothing locked on entrance, nothing locked on exit.
478 * May block.
479 */
480void
481ipc_importance_release(ipc_importance_elem_t elem)
482{
483 if (IIE_NULL == elem)
484 return;
485
486 ipc_importance_lock();
487 ipc_importance_release_locked(elem);
488 /* unlocked */
489}
490
491/*
492 * Routine: ipc_importance_task_reference
493
494
495 * Purpose:
496 * Retain a reference on a task importance attribute value.
497 * Conditions:
498 * nothing locked on entrance, nothing locked on exit.
499 * caller holds a reference already.
500 */
501void
502ipc_importance_task_reference(ipc_importance_task_t task_elem)
503{
504 if (IIT_NULL == task_elem)
505 return;
506#if IIE_REF_DEBUG
507 incr_ref_counter(task_elem->iit_elem.iie_task_refs_added);
508#endif
509 ipc_importance_reference(&task_elem->iit_elem);
510}
511
512/*
513 * Routine: ipc_importance_task_release
514 * Purpose:
515 * Release a reference on a task importance attribute value,
516 * unlinking and deallocating the attribute if the last reference.
517 * Conditions:
518 * nothing locked on entrance, nothing locked on exit.
519 * May block.
520 */
521void
522ipc_importance_task_release(ipc_importance_task_t task_elem)
523{
524 if (IIT_NULL == task_elem)
525 return;
526
527 ipc_importance_lock();
528#if IIE_REF_DEBUG
529 incr_ref_counter(task_elem->iit_elem.iie_task_refs_dropped);
530#endif
531 ipc_importance_release_locked(&task_elem->iit_elem);
532 /* unlocked */
533}
534
535/*
536 * Routine: ipc_importance_task_release_locked
537 * Purpose:
538 * Release a reference on a task importance attribute value,
539 * unlinking and deallocating the attribute if the last reference.
540 * Conditions:
541 * importance lock held on entry, nothing locked on exit.
542 * May block.
543 */
544static void
545ipc_importance_task_release_locked(ipc_importance_task_t task_elem)
546{
547 if (IIT_NULL == task_elem) {
548 ipc_importance_unlock();
549 return;
550 }
551#if IIE_REF_DEBUG
552 incr_ref_counter(task_elem->iit_elem.iie_task_refs_dropped);
553#endif
554 ipc_importance_release_locked(&task_elem->iit_elem);
555 /* unlocked */
556}
557
558/*
559 * Routines for importance donation/inheritance/boosting
560 */
561
562
563/*
564 * External importance assertions are managed by the process in userspace
565 * Internal importance assertions are the responsibility of the kernel
566 * Assertions are changed from internal to external via task_importance_externalize_assertion
567 */
568
569/*
570 * Routine: ipc_importance_task_check_transition
571 * Purpose:
572 * Increase or decrement the internal task importance counter of the
573 * specified task and determine if propagation and a task policy
574 * update is required.
575 *
576 * If it is already enqueued for a policy update, steal it from that queue
577 * (as we are reversing that update before it happens).
578 *
579 * Conditions:
580 * Called with the importance lock held.
581 * It is the caller's responsibility to perform the propagation of the
582 * transition and/or policy changes by checking the return value.
583 */
584static boolean_t
585ipc_importance_task_check_transition(
586 ipc_importance_task_t task_imp,
587 iit_update_type_t type,
588 uint32_t delta)
589{
590#if IMPORTANCE_TRACE
591 task_t target_task = task_imp->iit_task;
592#endif
593 boolean_t boost = (IIT_UPDATE_HOLD == type);
594 boolean_t before_boosted, after_boosted;
595
596 ipc_importance_assert_held();
597
598 if (!ipc_importance_task_is_any_receiver_type(task_imp))
599 return FALSE;
600
601#if IMPORTANCE_TRACE
602 int target_pid = task_pid(target_task);
603
604 KERNEL_DEBUG_CONSTANT_IST(KDEBUG_TRACE, (IMPORTANCE_CODE(IMP_ASSERTION, (((boost) ? IMP_HOLD : IMP_DROP) | TASK_POLICY_INTERNAL))) | DBG_FUNC_START,
605 proc_selfpid(), target_pid, task_imp->iit_assertcnt, IIT_EXTERN(task_imp), 0);
606#endif
607
608 /* snapshot the effective boosting status before making any changes */
609 before_boosted = (task_imp->iit_assertcnt > 0);
610
611 /* Adjust the assertcnt appropriately */
612 if (boost) {
613 task_imp->iit_assertcnt += delta;
614#if IMPORTANCE_TRACE
615 DTRACE_BOOST6(send_boost, task_t, target_task, int, target_pid,
616 task_t, current_task(), int, proc_selfpid(), int, delta, int, task_imp->iit_assertcnt);
617#endif
618 } else {
619 // assert(delta <= task_imp->iit_assertcnt);
620 if (task_imp->iit_assertcnt < delta + IIT_EXTERN(task_imp)) {
621 /* TODO: Turn this back into a panic <rdar://problem/12592649> */
622 task_imp->iit_assertcnt = IIT_EXTERN(task_imp);
623 } else {
624 task_imp->iit_assertcnt -= delta;
625 }
626#if IMPORTANCE_TRACE
627 // This convers both legacy and voucher-based importance.
628 DTRACE_BOOST4(drop_boost, task_t, target_task, int, target_pid, int, delta, int, task_imp->iit_assertcnt);
629#endif
630 }
631
632#if IMPORTANCE_TRACE
633 KERNEL_DEBUG_CONSTANT_IST(KDEBUG_TRACE, (IMPORTANCE_CODE(IMP_ASSERTION, (((boost) ? IMP_HOLD : IMP_DROP) | TASK_POLICY_INTERNAL))) | DBG_FUNC_END,
634 proc_selfpid(), target_pid, task_imp->iit_assertcnt, IIT_EXTERN(task_imp), 0);
635#endif
636
637 /* did the change result in an effective donor status change? */
638 after_boosted = (task_imp->iit_assertcnt > 0);
639
640 if (after_boosted != before_boosted) {
641
642 /*
643 * If the task importance is already on an update queue, we just reversed the need for a
644 * pending policy update. If the queue is any other than the delayed-drop-queue, pull it
645 * off that queue and release the reference it got going onto the update queue. If it is
646 * the delayed-drop-queue we leave it in place in case it comes back into the drop state
647 * before its time delay is up.
648 *
649 * We still need to propagate the change downstream to reverse the assertcnt effects,
650 * but we no longer need to update this task's boost policy state.
651 *
652 * Otherwise, mark it as needing a policy update.
653 */
654 assert(0 == task_imp->iit_updatepolicy);
655 if (NULL != task_imp->iit_updateq) {
656 if (&ipc_importance_delayed_drop_queue != task_imp->iit_updateq) {
657 queue_remove(task_imp->iit_updateq, task_imp, ipc_importance_task_t, iit_updates);
658 task_imp->iit_updateq = NULL;
659 ipc_importance_task_release_internal(task_imp); /* can't be last ref */
660 }
661 } else {
662 task_imp->iit_updatepolicy = 1;
663 }
664 return TRUE;
665 }
666
667 return FALSE;
668}
669
670
671/*
672 * Routine: ipc_importance_task_propagate_helper
673 * Purpose:
674 * Increase or decrement the internal task importance counter of all
675 * importance tasks inheriting from the specified one. If this causes
676 * that importance task to change state, add it to the list of tasks
677 * to do a policy update against.
678 * Conditions:
679 * Called with the importance lock held.
680 * It is the caller's responsibility to iterate down the generated list
681 * and propagate any subsequent assertion changes from there.
682 */
683static void
684ipc_importance_task_propagate_helper(
685 ipc_importance_task_t task_imp,
686 iit_update_type_t type,
687 queue_t propagation)
688{
689 ipc_importance_task_t temp_task_imp;
690
691 /*
692 * iterate the downstream kmsgs, adjust their boosts,
693 * and capture the next task to adjust for each message
694 */
695
696 ipc_kmsg_t temp_kmsg;
697
698 queue_iterate(&task_imp->iit_kmsgs, temp_kmsg, ipc_kmsg_t, ikm_inheritance) {
699 mach_msg_header_t *hdr = temp_kmsg->ikm_header;
700 mach_port_delta_t delta;
701 ipc_port_t port;
702
703 /* toggle the kmsg importance bit as a barrier to parallel adjusts */
704 if (IIT_UPDATE_HOLD == type) {
705 if (MACH_MSGH_BITS_RAISED_IMPORTANCE(hdr->msgh_bits)) {
706 continue;
707 }
708
709 /* mark the message as now carrying importance */
710 hdr->msgh_bits |= MACH_MSGH_BITS_RAISEIMP;
711 delta = 1;
712 } else {
713 if (!MACH_MSGH_BITS_RAISED_IMPORTANCE(hdr->msgh_bits)) {
714 continue;
715 }
716
717 /* clear the message as now carrying importance */
718 hdr->msgh_bits &= ~MACH_MSGH_BITS_RAISEIMP;
719 delta = -1;
720 }
721
722 /* determine the task importance to adjust as result (if any) */
723 port = (ipc_port_t) hdr->msgh_remote_port;
724 assert(IP_VALID(port));
725 ip_lock(port);
726 temp_task_imp = IIT_NULL;
727 if (!ipc_port_importance_delta_internal(port, IPID_OPTION_NORMAL, &delta, &temp_task_imp)) {
728 ip_unlock(port);
729 }
730
731 /* no task importance to adjust associated with the port? */
732 if (IIT_NULL == temp_task_imp) {
733 continue;
734 }
735
736 /* hold a reference on temp_task_imp */
737
738 /* Adjust the task assertions and determine if an edge was crossed */
739 if (ipc_importance_task_check_transition(temp_task_imp, type, 1)) {
740 incr_ref_counter(temp_task_imp->iit_elem.iie_task_refs_added_transition);
741 queue_enter(propagation, temp_task_imp, ipc_importance_task_t, iit_props);
742 /* reference donated */
743 } else {
744 ipc_importance_task_release_internal(temp_task_imp);
745 }
746 }
747
748 /*
749 * iterate the downstream importance inherits
750 * and capture the next task importance to boost for each
751 */
752 ipc_importance_inherit_t temp_inherit;
753
754 queue_iterate(&task_imp->iit_inherits, temp_inherit, ipc_importance_inherit_t, iii_inheritance) {
755 uint32_t assertcnt = III_EXTERN(temp_inherit);
756
757 temp_task_imp = temp_inherit->iii_to_task;
758 assert(IIT_NULL != temp_task_imp);
759
760 if (IIT_UPDATE_HOLD == type) {
761 /* if no undropped externcnts in the inherit, nothing to do */
762 if (0 == assertcnt) {
763 assert(temp_inherit->iii_donating == FALSE);
764 continue;
765 }
766
767 /* nothing to do if the inherit is already donating (forced donation) */
768 if (temp_inherit->iii_donating) {
769 continue;
770 }
771
772 /* mark it donating and contribute to the task externcnts */
773 temp_inherit->iii_donating = TRUE;
774 temp_task_imp->iit_externcnt += temp_inherit->iii_externcnt;
775 temp_task_imp->iit_externdrop += temp_inherit->iii_externdrop;
776
777 } else {
778 /* if no contributing assertions, move on */
779 if (0 == assertcnt) {
780 assert(temp_inherit->iii_donating == FALSE);
781 continue;
782 }
783
784 /* nothing to do if the inherit is not donating */
785 if (!temp_inherit->iii_donating) {
786 continue;
787 }
788
789 /* mark it no longer donating */
790 temp_inherit->iii_donating = FALSE;
791
792 /* remove the contribution the inherit made to the to-task */
793 assert(IIT_EXTERN(temp_task_imp) >= III_EXTERN(temp_inherit));
794 assert(temp_task_imp->iit_externcnt >= temp_inherit->iii_externcnt);
795 assert(temp_task_imp->iit_externdrop >= temp_inherit->iii_externdrop);
796 temp_task_imp->iit_externcnt -= temp_inherit->iii_externcnt;
797 temp_task_imp->iit_externdrop -= temp_inherit->iii_externdrop;
798
799 }
800
801 /* Adjust the task assertions and determine if an edge was crossed */
802 assert(ipc_importance_task_is_any_receiver_type(temp_task_imp));
803 if (ipc_importance_task_check_transition(temp_task_imp, type, assertcnt)) {
804 ipc_importance_task_reference(temp_task_imp);
805 incr_ref_counter(temp_task_imp->iit_elem.iie_task_refs_added_transition);
806 queue_enter(propagation, temp_task_imp, ipc_importance_task_t, iit_props);
807 }
808 }
809}
810
811/*
812 * Routine: ipc_importance_task_process_updates
813 * Purpose:
814 * Process the queue of task importances and apply the policy
815 * update called for. Only process tasks in the queue with an
816 * update timestamp less than the supplied max.
817 * Conditions:
818 * Called and returns with importance locked.
819 * May drop importance lock and block temporarily.
820 */
821static void
822ipc_importance_task_process_updates(
823 queue_t supplied_queue,
824 boolean_t boost,
825 uint64_t max_timestamp)
826{
827 ipc_importance_task_t task_imp;
828 queue_head_t second_chance;
829 queue_t queue = supplied_queue;
830
831 /*
832 * This queue will hold the task's we couldn't trylock on first pass.
833 * By using a second (private) queue, we guarantee all tasks that get
834 * entered on this queue have a timestamp under the maximum.
835 */
836 queue_init(&second_chance);
837
838 /* process any resulting policy updates */
839 retry:
840 while(!queue_empty(queue)) {
841 task_t target_task;
842 struct task_pend_token pend_token = {};
843
844 task_imp = (ipc_importance_task_t)queue_first(queue);
845 assert(0 == task_imp->iit_updatepolicy);
846 assert(queue == task_imp->iit_updateq);
847
848 /* if timestamp is too big, we're done */
849 if (task_imp->iit_updatetime > max_timestamp) {
850 break;
851 }
852
853 /* we were given a reference on each task in the queue */
854
855 /* remove it from the supplied queue */
856 queue_remove(queue, task_imp, ipc_importance_task_t, iit_updates);
857 task_imp->iit_updateq = NULL;
858
859 target_task = task_imp->iit_task;
860
861 /* Is it well on the way to exiting? */
862 if (TASK_NULL == target_task) {
863 ipc_importance_task_release_locked(task_imp);
864 /* importance unlocked */
865 ipc_importance_lock();
866 continue;
867 }
868
869 /* Has the update been reversed on the hysteresis queue? */
870 if (0 < task_imp->iit_assertcnt &&
871 queue == &ipc_importance_delayed_drop_queue) {
872 ipc_importance_task_release_locked(task_imp);
873 /* importance unlocked */
874 ipc_importance_lock();
875 continue;
876 }
877
878 /*
879 * Can we get the task lock out-of-order?
880 * If not, stick this back on the second-chance queue.
881 */
882 if (!task_lock_try(target_task)) {
883 boolean_t should_wait_lock = (queue == &second_chance);
884 task_imp->iit_updateq = &second_chance;
885
886 /*
887 * If we're already processing second-chances on
888 * tasks, keep this task on the front of the queue.
889 * We will wait for the task lock before coming
890 * back and trying again, and we have a better
891 * chance of re-acquiring the lock if we come back
892 * to it right away.
893 */
894 if (should_wait_lock){
895 task_reference(target_task);
896 queue_enter_first(&second_chance, task_imp,
897 ipc_importance_task_t, iit_updates);
898 } else {
899 queue_enter(&second_chance, task_imp,
900 ipc_importance_task_t, iit_updates);
901 }
902 ipc_importance_unlock();
903
904 if (should_wait_lock) {
905 task_lock(target_task);
906 task_unlock(target_task);
907 task_deallocate(target_task);
908 }
909
910 ipc_importance_lock();
911 continue;
912 }
913
914 /* is it going away? */
915 if (!target_task->active) {
916 task_unlock(target_task);
917 ipc_importance_task_release_locked(task_imp);
918 /* importance unlocked */
919 ipc_importance_lock();
920 continue;
921 }
922
923 /* take a task reference for while we don't have the importance lock */
924 task_reference(target_task);
925
926 /* count the transition */
927 if (boost)
928 task_imp->iit_transitions++;
929
930 ipc_importance_unlock();
931
932 /* apply the policy adjust to the target task (while it is still locked) */
933 task_update_boost_locked(target_task, boost, &pend_token);
934
935 /* complete the policy update with the task unlocked */
936 ipc_importance_task_release(task_imp);
937 task_unlock(target_task);
938 task_policy_update_complete_unlocked(target_task, &pend_token);
939 task_deallocate(target_task);
940
941 ipc_importance_lock();
942 }
943
944 /* If there are tasks we couldn't update the first time, try again */
945 if (!queue_empty(&second_chance)) {
946 queue = &second_chance;
947 goto retry;
948 }
949}
950
951
952/*
953 * Routine: ipc_importance_task_delayed_drop_scan
954 * Purpose:
955 * The thread call routine to scan the delayed drop queue,
956 * requesting all updates with a deadline up to the last target
957 * for the thread-call (which is DENAP_DROP_SKEW beyond the first
958 * thread's optimum delay).
959 * update to drop its boost.
960 * Conditions:
961 * Nothing locked
962 */
963static void
964ipc_importance_task_delayed_drop_scan(
965 __unused void *arg1,
966 __unused void *arg2)
967{
968 ipc_importance_lock();
969
970 /* process all queued task drops with timestamps up to TARGET(first)+SKEW */
971 ipc_importance_task_process_updates(&ipc_importance_delayed_drop_queue,
972 FALSE,
973 ipc_importance_delayed_drop_timestamp);
974
975 /* importance lock may have been temporarily dropped */
976
977 /* If there are any entries left in the queue, re-arm the call here */
978 if (!queue_empty(&ipc_importance_delayed_drop_queue)) {
979 ipc_importance_task_t task_imp;
980 uint64_t deadline;
981 uint64_t leeway;
982
983 task_imp = (ipc_importance_task_t)queue_first(&ipc_importance_delayed_drop_queue);
984
985 nanoseconds_to_absolutetime(DENAP_DROP_DELAY, &deadline);
986 deadline += task_imp->iit_updatetime;
987 ipc_importance_delayed_drop_timestamp = deadline;
988
989 nanoseconds_to_absolutetime(DENAP_DROP_LEEWAY, &leeway);
990
991 thread_call_enter_delayed_with_leeway(
992 ipc_importance_delayed_drop_call,
993 NULL,
994 deadline,
995 leeway,
996 DENAP_DROP_FLAGS);
997 } else {
998 ipc_importance_delayed_drop_call_requested = FALSE;
999 }
1000 ipc_importance_unlock();
1001}
1002
1003/*
1004 * Routine: ipc_importance_task_delayed_drop
1005 * Purpose:
1006 * Queue the specified task importance for delayed policy
1007 * update to drop its boost.
1008 * Conditions:
1009 * Called with the importance lock held.
1010 */
1011static void
1012ipc_importance_task_delayed_drop(ipc_importance_task_t task_imp)
1013{
1014 uint64_t timestamp = mach_absolute_time(); /* no mach_approximate_time() in kernel */
1015
1016 assert(ipc_importance_delayed_drop_call != NULL);
1017
1018 /*
1019 * If still on an update queue from a previous change,
1020 * remove it first (and use that reference). Otherwise, take
1021 * a new reference for the delay drop update queue.
1022 */
1023 if (NULL != task_imp->iit_updateq) {
1024 queue_remove(task_imp->iit_updateq, task_imp,
1025 ipc_importance_task_t, iit_updates);
1026 } else {
1027 ipc_importance_task_reference_internal(task_imp);
1028 }
1029
1030 task_imp->iit_updateq = &ipc_importance_delayed_drop_queue;
1031 task_imp->iit_updatetime = timestamp;
1032
1033 queue_enter(&ipc_importance_delayed_drop_queue, task_imp,
1034 ipc_importance_task_t, iit_updates);
1035
1036 /* request the delayed thread-call if not already requested */
1037 if (!ipc_importance_delayed_drop_call_requested) {
1038 uint64_t deadline;
1039 uint64_t leeway;
1040
1041 nanoseconds_to_absolutetime(DENAP_DROP_DELAY, &deadline);
1042 deadline += task_imp->iit_updatetime;
1043 ipc_importance_delayed_drop_timestamp = deadline;
1044
1045 nanoseconds_to_absolutetime(DENAP_DROP_LEEWAY, &leeway);
1046
1047 ipc_importance_delayed_drop_call_requested = TRUE;
1048 thread_call_enter_delayed_with_leeway(
1049 ipc_importance_delayed_drop_call,
1050 NULL,
1051 deadline,
1052 leeway,
1053 DENAP_DROP_FLAGS);
1054 }
1055}
1056
1057
1058/*
1059 * Routine: ipc_importance_task_propagate_assertion_locked
1060 * Purpose:
1061 * Propagate the importance transition type to every item
1062 * If this causes a boost to be applied, determine if that
1063 * boost should propagate downstream.
1064 * Conditions:
1065 * Called with the importance lock held.
1066 */
1067static void
1068ipc_importance_task_propagate_assertion_locked(
1069 ipc_importance_task_t task_imp,
1070 iit_update_type_t type,
1071 boolean_t update_task_imp)
1072{
1073 boolean_t boost = (IIT_UPDATE_HOLD == type);
1074 ipc_importance_task_t temp_task_imp;
1075 queue_head_t propagate;
1076 queue_head_t updates;
1077
1078 queue_init(&updates);
1079 queue_init(&propagate);
1080
1081 ipc_importance_assert_held();
1082
1083 /*
1084 * If we're going to update the policy for the provided task,
1085 * enqueue it on the propagate queue itself. Otherwise, only
1086 * enqueue downstream things.
1087 */
1088 if (update_task_imp) {
1089 ipc_importance_task_reference(task_imp);
1090 incr_ref_counter(task_imp->iit_elem.iie_task_refs_added_transition);
1091 queue_enter(&propagate, task_imp, ipc_importance_task_t, iit_props);
1092 } else {
1093 ipc_importance_task_propagate_helper(task_imp, type, &propagate);
1094 }
1095
1096 /*
1097 * for each item on the propagation list, propagate any change downstream,
1098 * adding new tasks to propagate further if they transistioned as well.
1099 */
1100 while (!queue_empty(&propagate)) {
1101 boolean_t need_update;
1102
1103 queue_remove_first(&propagate, temp_task_imp, ipc_importance_task_t, iit_props);
1104 /* hold a reference on temp_task_imp */
1105
1106 assert(IIT_NULL != temp_task_imp);
1107
1108 /* only propagate for receivers not already marked as a donor */
1109 if (!ipc_importance_task_is_marked_donor(temp_task_imp) &&
1110 ipc_importance_task_is_marked_receiver(temp_task_imp)) {
1111 ipc_importance_task_propagate_helper(temp_task_imp, type, &propagate);
1112 }
1113
1114 /* if we have a policy update to apply, enqueue a reference for later processing */
1115 need_update = (0 != temp_task_imp->iit_updatepolicy);
1116 temp_task_imp->iit_updatepolicy = 0;
1117 if (need_update && TASK_NULL != temp_task_imp->iit_task) {
1118 if (NULL == temp_task_imp->iit_updateq) {
1119
1120 /*
1121 * If a downstream task that needs an update is subjects to AppNap,
1122 * drop boosts according to the delay hysteresis. Otherwise,
1123 * immediate update it.
1124 */
1125 if (!boost && temp_task_imp != task_imp &&
1126 ipc_importance_delayed_drop_call != NULL &&
1127 ipc_importance_task_is_marked_denap_receiver(temp_task_imp)) {
1128 ipc_importance_task_delayed_drop(temp_task_imp);
1129 } else {
1130 temp_task_imp->iit_updatetime = 0;
1131 temp_task_imp->iit_updateq = &updates;
1132 ipc_importance_task_reference_internal(temp_task_imp);
1133 if (boost) {
1134 queue_enter(&updates, temp_task_imp,
1135 ipc_importance_task_t, iit_updates);
1136 } else {
1137 queue_enter_first(&updates, temp_task_imp,
1138 ipc_importance_task_t, iit_updates);
1139 }
1140 }
1141 } else {
1142 /* Must already be on the AppNap hysteresis queue */
1143 assert(ipc_importance_delayed_drop_call != NULL);
1144 assert(ipc_importance_task_is_marked_denap_receiver(temp_task_imp));
1145 }
1146 }
1147
1148 ipc_importance_task_release_internal(temp_task_imp);
1149 }
1150
1151 /* apply updates to task (may drop importance lock) */
1152 if (!queue_empty(&updates)) {
1153 ipc_importance_task_process_updates(&updates, boost, 0);
1154 }
1155}
1156
1157/*
1158 * Routine: ipc_importance_task_hold_internal_assertion_locked
1159 * Purpose:
1160 * Increment the assertion count on the task importance.
1161 * If this results in a boost state change in that task,
1162 * prepare to update task policy for this task AND, if
1163 * if not just waking out of App Nap, all down-stream
1164 * tasks that have a similar transition through inheriting
1165 * this update.
1166 * Conditions:
1167 * importance locked on entry and exit.
1168 * May temporarily drop importance lock and block.
1169 */
1170static kern_return_t
1171ipc_importance_task_hold_internal_assertion_locked(ipc_importance_task_t task_imp, uint32_t count)
1172{
1173 if (ipc_importance_task_check_transition(task_imp, IIT_UPDATE_HOLD, count)) {
1174 ipc_importance_task_propagate_assertion_locked(task_imp, IIT_UPDATE_HOLD, TRUE);
1175 }
1176 return KERN_SUCCESS;
1177}
1178
1179/*
1180 * Routine: ipc_importance_task_drop_internal_assertion_locked
1181 * Purpose:
1182 * Decrement the assertion count on the task importance.
1183 * If this results in a boost state change in that task,
1184 * prepare to update task policy for this task AND, if
1185 * if not just waking out of App Nap, all down-stream
1186 * tasks that have a similar transition through inheriting
1187 * this update.
1188 * Conditions:
1189 * importance locked on entry and exit.
1190 * May temporarily drop importance lock and block.
1191 */
1192static kern_return_t
1193ipc_importance_task_drop_internal_assertion_locked(ipc_importance_task_t task_imp, uint32_t count)
1194{
1195 if (ipc_importance_task_check_transition(task_imp, IIT_UPDATE_DROP, count)) {
1196 ipc_importance_task_propagate_assertion_locked(task_imp, IIT_UPDATE_DROP, TRUE);
1197 }
1198 return KERN_SUCCESS;
1199}
1200
1201/*
1202 * Routine: ipc_importance_task_hold_internal_assertion
1203 * Purpose:
1204 * Increment the assertion count on the task importance.
1205 * If this results in a 0->1 change in that count,
1206 * prepare to update task policy for this task AND
1207 * (potentially) all down-stream tasks that have a
1208 * similar transition through inheriting this update.
1209 * Conditions:
1210 * Nothing locked
1211 * May block after dropping importance lock.
1212 */
1213int
1214ipc_importance_task_hold_internal_assertion(ipc_importance_task_t task_imp, uint32_t count)
1215{
1216 int ret = KERN_SUCCESS;
1217
1218 if (ipc_importance_task_is_any_receiver_type(task_imp)) {
1219 ipc_importance_lock();
1220 ret = ipc_importance_task_hold_internal_assertion_locked(task_imp, count);
1221 ipc_importance_unlock();
1222 }
1223 return ret;
1224}
1225
1226/*
1227 * Routine: ipc_importance_task_drop_internal_assertion
1228 * Purpose:
1229 * Decrement the assertion count on the task importance.
1230 * If this results in a X->0 change in that count,
1231 * prepare to update task policy for this task AND
1232 * all down-stream tasks that have a similar transition
1233 * through inheriting this drop update.
1234 * Conditions:
1235 * Nothing locked on entry.
1236 * May block after dropping importance lock.
1237 */
1238kern_return_t
1239ipc_importance_task_drop_internal_assertion(ipc_importance_task_t task_imp, uint32_t count)
1240{
1241 kern_return_t ret = KERN_SUCCESS;
1242
1243 if (ipc_importance_task_is_any_receiver_type(task_imp)) {
1244 ipc_importance_lock();
1245 ret = ipc_importance_task_drop_internal_assertion_locked(task_imp, count);
1246 ipc_importance_unlock();
1247 }
1248 return ret;
1249}
1250
1251/*
1252 * Routine: ipc_importance_task_hold_file_lock_assertion
1253 * Purpose:
1254 * Increment the file lock assertion count on the task importance.
1255 * If this results in a 0->1 change in that count,
1256 * prepare to update task policy for this task AND
1257 * (potentially) all down-stream tasks that have a
1258 * similar transition through inheriting this update.
1259 * Conditions:
1260 * Nothing locked
1261 * May block after dropping importance lock.
1262 */
1263kern_return_t
1264ipc_importance_task_hold_file_lock_assertion(ipc_importance_task_t task_imp, uint32_t count)
1265{
1266 kern_return_t ret = KERN_SUCCESS;
1267
1268 if (ipc_importance_task_is_any_receiver_type(task_imp)) {
1269 ipc_importance_lock();
1270 ret = ipc_importance_task_hold_internal_assertion_locked(task_imp, count);
1271 if (KERN_SUCCESS == ret) {
1272 task_imp->iit_filelocks += count;
1273 }
1274 ipc_importance_unlock();
1275 }
1276 return ret;
1277}
1278
1279/*
1280 * Routine: ipc_importance_task_drop_file_lock_assertion
1281 * Purpose:
1282 * Decrement the assertion count on the task importance.
1283 * If this results in a X->0 change in that count,
1284 * prepare to update task policy for this task AND
1285 * all down-stream tasks that have a similar transition
1286 * through inheriting this drop update.
1287 * Conditions:
1288 * Nothing locked on entry.
1289 * May block after dropping importance lock.
1290 */
1291kern_return_t
1292ipc_importance_task_drop_file_lock_assertion(ipc_importance_task_t task_imp, uint32_t count)
1293{
1294 kern_return_t ret = KERN_SUCCESS;
1295
1296 if (ipc_importance_task_is_any_receiver_type(task_imp)) {
1297 ipc_importance_lock();
1298 if (count <= task_imp->iit_filelocks) {
1299 task_imp->iit_filelocks -= count;
1300 ret = ipc_importance_task_drop_internal_assertion_locked(task_imp, count);
1301 } else {
1302 ret = KERN_INVALID_ARGUMENT;
1303 }
1304 ipc_importance_unlock();
1305 }
1306 return ret;
1307}
1308
1309/*
1310 * Routine: ipc_importance_task_hold_legacy_external_assertion
1311 * Purpose:
1312 * Increment the external assertion count on the task importance.
1313 * This cannot result in an 0->1 transition, as the caller must
1314 * already hold an external boost.
1315 * Conditions:
1316 * Nothing locked on entry.
1317 * May block after dropping importance lock.
1318 * A queue of task importance structures is returned
1319 * by ipc_importance_task_hold_assertion_locked(). Each
1320 * needs to be updated (outside the importance lock hold).
1321 */
1322kern_return_t
1323ipc_importance_task_hold_legacy_external_assertion(ipc_importance_task_t task_imp, uint32_t count)
1324{
1325 task_t target_task;
1326 uint32_t target_assertcnt;
1327 uint32_t target_externcnt;
1328 uint32_t target_legacycnt;
1329
1330 kern_return_t ret;
1331
1332 ipc_importance_lock();
1333 target_task = task_imp->iit_task;
1334
1335#if IMPORTANCE_TRACE
1336 int target_pid = task_pid(target_task);
1337
1338 KERNEL_DEBUG_CONSTANT_IST(KDEBUG_TRACE, (IMPORTANCE_CODE(IMP_ASSERTION, (IMP_HOLD | TASK_POLICY_EXTERNAL))) | DBG_FUNC_START,
1339 proc_selfpid(), target_pid, task_imp->iit_assertcnt, IIT_LEGACY_EXTERN(task_imp), 0);
1340#endif
1341
1342 if (IIT_LEGACY_EXTERN(task_imp) == 0) {
1343 /* Only allowed to take a new boost assertion when holding an external boost */
1344 /* save data for diagnostic printf below */
1345 target_assertcnt = task_imp->iit_assertcnt;
1346 target_externcnt = IIT_EXTERN(task_imp);
1347 target_legacycnt = IIT_LEGACY_EXTERN(task_imp);
1348 ret = KERN_FAILURE;
1349 count = 0;
1350 } else {
1351 assert(ipc_importance_task_is_any_receiver_type(task_imp));
1352 assert(0 < task_imp->iit_assertcnt);
1353 assert(0 < IIT_EXTERN(task_imp));
1354 task_imp->iit_assertcnt += count;
1355 task_imp->iit_externcnt += count;
1356 task_imp->iit_legacy_externcnt += count;
1357 ret = KERN_SUCCESS;
1358 }
1359 ipc_importance_unlock();
1360
1361#if IMPORTANCE_TRACE
1362 KERNEL_DEBUG_CONSTANT_IST(KDEBUG_TRACE, (IMPORTANCE_CODE(IMP_ASSERTION, (IMP_HOLD | TASK_POLICY_EXTERNAL))) | DBG_FUNC_END,
1363 proc_selfpid(), target_pid, task_imp->iit_assertcnt, IIT_LEGACY_EXTERN(task_imp), 0);
1364 // This covers the legacy case where a task takes an extra boost.
1365 DTRACE_BOOST5(receive_boost, task_t, target_task, int, target_pid, int, proc_selfpid(), int, count, int, task_imp->iit_assertcnt);
1366#endif
1367
1368 if (KERN_FAILURE == ret && target_task != TASK_NULL) {
1369 printf("BUG in process %s[%d]: "
1370 "attempt to acquire an additional legacy external boost assertion without holding an existing legacy external assertion. "
1371 "(%d total, %d external, %d legacy-external)\n",
1372 proc_name_address(target_task->bsd_info), task_pid(target_task),
1373 target_assertcnt, target_externcnt, target_legacycnt);
1374 }
1375
1376 return(ret);
1377}
1378
1379/*
1380 * Routine: ipc_importance_task_drop_legacy_external_assertion
1381 * Purpose:
1382 * Drop the legacy external assertion count on the task and
1383 * reflect that change to total external assertion count and
1384 * then onto the internal importance count.
1385 *
1386 * If this results in a X->0 change in the internal,
1387 * count, prepare to update task policy for this task AND
1388 * all down-stream tasks that have a similar transition
1389 * through inheriting this update.
1390 * Conditions:
1391 * Nothing locked on entry.
1392 */
1393kern_return_t
1394ipc_importance_task_drop_legacy_external_assertion(ipc_importance_task_t task_imp, uint32_t count)
1395{
1396 int ret = KERN_SUCCESS;
1397 task_t target_task;
1398 uint32_t target_assertcnt;
1399 uint32_t target_externcnt;
1400 uint32_t target_legacycnt;
1401
1402 if (count > 1) {
1403 return KERN_INVALID_ARGUMENT;
1404 }
1405
1406 ipc_importance_lock();
1407 target_task = task_imp->iit_task;
1408
1409#if IMPORTANCE_TRACE
1410 int target_pid = task_pid(target_task);
1411
1412 KERNEL_DEBUG_CONSTANT_IST(KDEBUG_TRACE, (IMPORTANCE_CODE(IMP_ASSERTION, (IMP_DROP | TASK_POLICY_EXTERNAL))) | DBG_FUNC_START,
1413 proc_selfpid(), target_pid, task_imp->iit_assertcnt, IIT_LEGACY_EXTERN(task_imp), 0);
1414#endif
1415
1416 if (count > IIT_LEGACY_EXTERN(task_imp)) {
1417 /* Process over-released its boost count - save data for diagnostic printf */
1418 /* TODO: If count > 1, we should clear out as many external assertions as there are left. */
1419 target_assertcnt = task_imp->iit_assertcnt;
1420 target_externcnt = IIT_EXTERN(task_imp);
1421 target_legacycnt = IIT_LEGACY_EXTERN(task_imp);
1422 ret = KERN_FAILURE;
1423 } else {
1424 /*
1425 * decrement legacy external count from the top level and reflect
1426 * into internal for this and all subsequent updates.
1427 */
1428 assert(ipc_importance_task_is_any_receiver_type(task_imp));
1429 assert(IIT_EXTERN(task_imp) >= count);
1430
1431 task_imp->iit_legacy_externdrop += count;
1432 task_imp->iit_externdrop += count;
1433
1434 /* reset extern counters (if appropriate) */
1435 if (IIT_LEGACY_EXTERN(task_imp) == 0) {
1436 if (IIT_EXTERN(task_imp) != 0) {
1437 task_imp->iit_externcnt -= task_imp->iit_legacy_externcnt;
1438 task_imp->iit_externdrop -= task_imp->iit_legacy_externdrop;
1439 } else {
1440 task_imp->iit_externcnt = 0;
1441 task_imp->iit_externdrop = 0;
1442 }
1443 task_imp->iit_legacy_externcnt = 0;
1444 task_imp->iit_legacy_externdrop = 0;
1445 }
1446
1447 /* reflect the drop to the internal assertion count (and effect any importance change) */
1448 if (ipc_importance_task_check_transition(task_imp, IIT_UPDATE_DROP, count)) {
1449 ipc_importance_task_propagate_assertion_locked(task_imp, IIT_UPDATE_DROP, TRUE);
1450 }
1451 ret = KERN_SUCCESS;
1452 }
1453
1454#if IMPORTANCE_TRACE
1455 KERNEL_DEBUG_CONSTANT_IST(KDEBUG_TRACE, (IMPORTANCE_CODE(IMP_ASSERTION, (IMP_DROP | TASK_POLICY_EXTERNAL))) | DBG_FUNC_END,
1456 proc_selfpid(), target_pid, task_imp->iit_assertcnt, IIT_LEGACY_EXTERN(task_imp), 0);
1457#endif
1458
1459 ipc_importance_unlock();
1460
1461 /* delayed printf for user-supplied data failures */
1462 if (KERN_FAILURE == ret && TASK_NULL != target_task) {
1463 printf("BUG in process %s[%d]: over-released legacy external boost assertions (%d total, %d external, %d legacy-external)\n",
1464 proc_name_address(target_task->bsd_info), task_pid(target_task),
1465 target_assertcnt, target_externcnt, target_legacycnt);
1466 }
1467
1468 return(ret);
1469}
1470
1471
1472
1473/* Transfer an assertion to legacy userspace responsibility */
1474static kern_return_t
1475ipc_importance_task_externalize_legacy_assertion(ipc_importance_task_t task_imp, uint32_t count, __unused int sender_pid)
1476{
1477 task_t target_task;
1478
1479 assert(IIT_NULL != task_imp);
1480 target_task = task_imp->iit_task;
1481
1482 if (TASK_NULL == target_task ||
1483 !ipc_importance_task_is_any_receiver_type(task_imp)) {
1484 return KERN_FAILURE;
1485 }
1486
1487#if IMPORTANCE_TRACE
1488 int target_pid = task_pid(target_task);
1489
1490 KERNEL_DEBUG_CONSTANT_IST(KDEBUG_TRACE, (IMPORTANCE_CODE(IMP_ASSERTION, IMP_EXTERN)) | DBG_FUNC_START,
1491 proc_selfpid(), target_pid, task_imp->iit_assertcnt, IIT_EXTERN(task_imp), 0);
1492#endif
1493
1494 ipc_importance_lock();
1495 /* assert(task_imp->iit_assertcnt >= IIT_EXTERN(task_imp) + count); */
1496 assert(IIT_EXTERN(task_imp) >= IIT_LEGACY_EXTERN(task_imp));
1497 task_imp->iit_legacy_externcnt += count;
1498 task_imp->iit_externcnt += count;
1499 ipc_importance_unlock();
1500
1501#if IMPORTANCE_TRACE
1502 KERNEL_DEBUG_CONSTANT_IST(KDEBUG_TRACE, (IMPORTANCE_CODE(IMP_ASSERTION, IMP_EXTERN)) | DBG_FUNC_END,
1503 proc_selfpid(), target_pid, task_imp->iit_assertcnt, IIT_LEGACY_EXTERN(task_imp), 0);
1504 // This is the legacy boosting path
1505 DTRACE_BOOST5(receive_boost, task_t, target_task, int, target_pid, int, sender_pid, int, count, int, IIT_LEGACY_EXTERN(task_imp));
1506#endif /* IMPORTANCE_TRACE */
1507
1508 return(KERN_SUCCESS);
1509}
1510
1511/*
1512 * Routine: ipc_importance_task_update_live_donor
1513 * Purpose:
1514 * Read the live donor status and update the live_donor bit/propagate the change in importance.
1515 * Conditions:
1516 * Nothing locked on entrance, nothing locked on exit.
1517 *
1518 * TODO: Need tracepoints around this function...
1519 */
1520void
1521ipc_importance_task_update_live_donor(ipc_importance_task_t task_imp)
1522{
1523 uint32_t task_live_donor;
1524 boolean_t before_donor;
1525 boolean_t after_donor;
1526 task_t target_task;
1527
1528 assert(task_imp != NULL);
1529
1530 /*
1531 * Nothing to do if the task is not marked as expecting
1532 * live donor updates.
1533 */
1534 if (!ipc_importance_task_is_marked_live_donor(task_imp)) {
1535 return;
1536 }
1537
1538 ipc_importance_lock();
1539
1540 /* If the task got disconnected on the way here, no use (or ability) adjusting live donor status */
1541 target_task = task_imp->iit_task;
1542 if (TASK_NULL == target_task) {
1543 ipc_importance_unlock();
1544 return;
1545 }
1546 before_donor = ipc_importance_task_is_marked_donor(task_imp);
1547
1548 /* snapshot task live donor status - may change, but another call will accompany the change */
1549 task_live_donor = target_task->effective_policy.tep_live_donor;
1550
1551#if IMPORTANCE_TRACE
1552 int target_pid = task_pid(target_task);
1553
1554 KERNEL_DEBUG_CONSTANT_IST(KDEBUG_TRACE,
1555 (IMPORTANCE_CODE(IMP_DONOR_CHANGE, IMP_DONOR_UPDATE_LIVE_DONOR_STATE)) | DBG_FUNC_START,
1556 target_pid, task_imp->iit_donor, task_live_donor, before_donor, 0);
1557#endif
1558
1559 /* update the task importance live donor status based on the task's value */
1560 task_imp->iit_donor = task_live_donor;
1561
1562 after_donor = ipc_importance_task_is_marked_donor(task_imp);
1563
1564 /* Has the effectiveness of being a donor changed as a result of this update? */
1565 if (before_donor != after_donor) {
1566 iit_update_type_t type;
1567
1568 /* propagate assertions without updating the current task policy (already handled) */
1569 if (0 == before_donor) {
1570 task_imp->iit_transitions++;
1571 type = IIT_UPDATE_HOLD;
1572 } else {
1573 type = IIT_UPDATE_DROP;
1574 }
1575 ipc_importance_task_propagate_assertion_locked(task_imp, type, FALSE);
1576 }
1577
1578#if IMPORTANCE_TRACE
1579 KERNEL_DEBUG_CONSTANT_IST(KDEBUG_TRACE,
1580 (IMPORTANCE_CODE(IMP_DONOR_CHANGE, IMP_DONOR_UPDATE_LIVE_DONOR_STATE)) | DBG_FUNC_END,
1581 target_pid, task_imp->iit_donor, task_live_donor, after_donor, 0);
1582#endif
1583
1584 ipc_importance_unlock();
1585}
1586
1587
1588/*
1589 * Routine: ipc_importance_task_mark_donor
1590 * Purpose:
1591 * Set the task importance donor flag.
1592 * Conditions:
1593 * Nothing locked on entrance, nothing locked on exit.
1594 *
1595 * This is only called while the task is being constructed,
1596 * so no need to update task policy or propagate downstream.
1597 */
1598void
1599ipc_importance_task_mark_donor(ipc_importance_task_t task_imp, boolean_t donating)
1600{
1601 assert(task_imp != NULL);
1602
1603 ipc_importance_lock();
1604
1605 int old_donor = task_imp->iit_donor;
1606
1607 task_imp->iit_donor = (donating ? 1 : 0);
1608
1609 if (task_imp->iit_donor > 0 && old_donor == 0)
1610 task_imp->iit_transitions++;
1611
1612 KERNEL_DEBUG_CONSTANT_IST(KDEBUG_TRACE,
1613 (IMPORTANCE_CODE(IMP_DONOR_CHANGE, IMP_DONOR_INIT_DONOR_STATE)) | DBG_FUNC_NONE,
1614 task_pid(task_imp->iit_task), donating,
1615 old_donor, task_imp->iit_donor, 0);
1616
1617 ipc_importance_unlock();
1618}
1619
1620/*
1621 * Routine: ipc_importance_task_marked_donor
1622 * Purpose:
1623 * Query the donor flag for the given task importance.
1624 * Conditions:
1625 * May be called without taking the importance lock.
1626 * In that case, donor status can change so you must
1627 * check only once for each donation event.
1628 */
1629boolean_t
1630ipc_importance_task_is_marked_donor(ipc_importance_task_t task_imp)
1631{
1632 if (IIT_NULL == task_imp) {
1633 return FALSE;
1634 }
1635 return (0 != task_imp->iit_donor);
1636}
1637
1638/*
1639 * Routine: ipc_importance_task_mark_live_donor
1640 * Purpose:
1641 * Indicate that the task is eligible for live donor updates.
1642 * Conditions:
1643 * Nothing locked on entrance, nothing locked on exit.
1644 *
1645 * This is only called while the task is being constructed.
1646 */
1647void
1648ipc_importance_task_mark_live_donor(ipc_importance_task_t task_imp, boolean_t live_donating)
1649{
1650 assert(task_imp != NULL);
1651
1652 ipc_importance_lock();
1653 task_imp->iit_live_donor = (live_donating ? 1 : 0);
1654 ipc_importance_unlock();
1655}
1656
1657/*
1658 * Routine: ipc_importance_task_is_marked_live_donor
1659 * Purpose:
1660 * Query the live donor and donor flags for the given task importance.
1661 * Conditions:
1662 * May be called without taking the importance lock.
1663 * In that case, donor status can change so you must
1664 * check only once for each donation event.
1665 */
1666boolean_t
1667ipc_importance_task_is_marked_live_donor(ipc_importance_task_t task_imp)
1668{
1669 if (IIT_NULL == task_imp) {
1670 return FALSE;
1671 }
1672 return (0 != task_imp->iit_live_donor);
1673}
1674
1675/*
1676 * Routine: ipc_importance_task_is_donor
1677 * Purpose:
1678 * Query the full donor status for the given task importance.
1679 * Conditions:
1680 * May be called without taking the importance lock.
1681 * In that case, donor status can change so you must
1682 * check only once for each donation event.
1683 */
1684boolean_t
1685ipc_importance_task_is_donor(ipc_importance_task_t task_imp)
1686{
1687 if (IIT_NULL == task_imp) {
1688 return FALSE;
1689 }
1690 return (ipc_importance_task_is_marked_donor(task_imp) ||
1691 (ipc_importance_task_is_marked_receiver(task_imp) &&
1692 task_imp->iit_assertcnt > 0));
1693}
1694
1695/*
1696 * Routine: ipc_importance_task_is_never_donor
1697 * Purpose:
1698 * Query if a given task can ever donate importance.
1699 * Conditions:
1700 * May be called without taking the importance lock.
1701 * Condition is permanent for a give task.
1702 */
1703boolean_t
1704ipc_importance_task_is_never_donor(ipc_importance_task_t task_imp)
1705{
1706 if (IIT_NULL == task_imp) {
1707 return FALSE;
1708 }
1709 return (!ipc_importance_task_is_marked_donor(task_imp) &&
1710 !ipc_importance_task_is_marked_live_donor(task_imp) &&
1711 !ipc_importance_task_is_marked_receiver(task_imp));
1712}
1713
1714/*
1715 * Routine: ipc_importance_task_mark_receiver
1716 * Purpose:
1717 * Update the task importance receiver flag.
1718 * Conditions:
1719 * Nothing locked on entrance, nothing locked on exit.
1720 * This can only be invoked before the task is discoverable,
1721 * so no worries about atomicity(?)
1722 */
1723void
1724ipc_importance_task_mark_receiver(ipc_importance_task_t task_imp, boolean_t receiving)
1725{
1726 assert(task_imp != NULL);
1727
1728 ipc_importance_lock();
1729 if (receiving) {
1730 assert(task_imp->iit_assertcnt == 0);
1731 assert(task_imp->iit_externcnt == 0);
1732 assert(task_imp->iit_externdrop == 0);
1733 assert(task_imp->iit_denap == 0);
1734 task_imp->iit_receiver = 1; /* task can receive importance boost */
1735 } else if (task_imp->iit_receiver) {
1736 assert(task_imp->iit_denap == 0);
1737 if (task_imp->iit_assertcnt != 0 || IIT_EXTERN(task_imp) != 0) {
1738 panic("disabling imp_receiver on task with pending importance boosts!");
1739 }
1740 task_imp->iit_receiver = 0;
1741 }
1742 ipc_importance_unlock();
1743}
1744
1745
1746/*
1747 * Routine: ipc_importance_task_marked_receiver
1748 * Purpose:
1749 * Query the receiver flag for the given task importance.
1750 * Conditions:
1751 * May be called without taking the importance lock as
1752 * the importance flag can never change after task init.
1753 */
1754boolean_t
1755ipc_importance_task_is_marked_receiver(ipc_importance_task_t task_imp)
1756{
1757 return (IIT_NULL != task_imp && 0 != task_imp->iit_receiver);
1758}
1759
1760
1761/*
1762 * Routine: ipc_importance_task_mark_denap_receiver
1763 * Purpose:
1764 * Update the task importance de-nap receiver flag.
1765 * Conditions:
1766 * Nothing locked on entrance, nothing locked on exit.
1767 * This can only be invoked before the task is discoverable,
1768 * so no worries about atomicity(?)
1769 */
1770void
1771ipc_importance_task_mark_denap_receiver(ipc_importance_task_t task_imp, boolean_t denap)
1772{
1773 assert(task_imp != NULL);
1774
1775 ipc_importance_lock();
1776 if (denap) {
1777 assert(task_imp->iit_assertcnt == 0);
1778 assert(task_imp->iit_externcnt == 0);
1779 assert(task_imp->iit_receiver == 0);
1780 task_imp->iit_denap = 1; /* task can receive de-nap boost */
1781 } else if (task_imp->iit_denap) {
1782 assert(task_imp->iit_receiver == 0);
1783 if (0 < task_imp->iit_assertcnt || 0 < IIT_EXTERN(task_imp)) {
1784 panic("disabling de-nap on task with pending de-nap boosts!");
1785 }
1786 task_imp->iit_denap = 0;
1787 }
1788 ipc_importance_unlock();
1789}
1790
1791
1792/*
1793 * Routine: ipc_importance_task_marked_denap_receiver
1794 * Purpose:
1795 * Query the de-nap receiver flag for the given task importance.
1796 * Conditions:
1797 * May be called without taking the importance lock as
1798 * the de-nap flag can never change after task init.
1799 */
1800boolean_t
1801ipc_importance_task_is_marked_denap_receiver(ipc_importance_task_t task_imp)
1802{
1803 return (IIT_NULL != task_imp && 0 != task_imp->iit_denap);
1804}
1805
1806/*
1807 * Routine: ipc_importance_task_is_denap_receiver
1808 * Purpose:
1809 * Query the full de-nap receiver status for the given task importance.
1810 * For now, that is simply whether the receiver flag is set.
1811 * Conditions:
1812 * May be called without taking the importance lock as
1813 * the de-nap receiver flag can never change after task init.
1814 */
1815boolean_t
1816ipc_importance_task_is_denap_receiver(ipc_importance_task_t task_imp)
1817{
1818 return (ipc_importance_task_is_marked_denap_receiver(task_imp));
1819}
1820
1821/*
1822 * Routine: ipc_importance_task_is_any_receiver_type
1823 * Purpose:
1824 * Query if the task is marked to receive boosts - either
1825 * importance or denap.
1826 * Conditions:
1827 * May be called without taking the importance lock as both
1828 * the importance and de-nap receiver flags can never change
1829 * after task init.
1830 */
1831boolean_t
1832ipc_importance_task_is_any_receiver_type(ipc_importance_task_t task_imp)
1833{
1834 return (ipc_importance_task_is_marked_receiver(task_imp) ||
1835 ipc_importance_task_is_marked_denap_receiver(task_imp));
1836}
1837
1838#if 0 /* currently unused */
1839
1840/*
1841 * Routine: ipc_importance_inherit_reference
1842 * Purpose:
1843 * Add a reference to the inherit importance element.
1844 * Conditions:
1845 * Caller most hold a reference on the inherit element.
1846 */
1847static inline void
1848ipc_importance_inherit_reference(ipc_importance_inherit_t inherit)
1849{
1850 ipc_importance_reference(&inherit->iii_elem);
1851}
1852#endif /* currently unused */
1853
1854/*
1855 * Routine: ipc_importance_inherit_release_locked
1856 * Purpose:
1857 * Release a reference on an inherit importance attribute value,
1858 * unlinking and deallocating the attribute if the last reference.
1859 * Conditions:
1860 * Entered with importance lock held, leaves with it unlocked.
1861 */
1862static inline void
1863ipc_importance_inherit_release_locked(ipc_importance_inherit_t inherit)
1864{
1865 ipc_importance_release_locked(&inherit->iii_elem);
1866}
1867
1868#if 0 /* currently unused */
1869/*
1870 * Routine: ipc_importance_inherit_release
1871 * Purpose:
1872 * Release a reference on an inherit importance attribute value,
1873 * unlinking and deallocating the attribute if the last reference.
1874 * Conditions:
1875 * nothing locked on entrance, nothing locked on exit.
1876 * May block.
1877 */
1878void
1879ipc_importance_inherit_release(ipc_importance_inherit_t inherit)
1880{
1881 if (III_NULL != inherit)
1882 ipc_importance_release(&inherit->iii_elem);
1883}
1884#endif /* 0 currently unused */
1885
1886/*
1887 * Routine: ipc_importance_for_task
1888 * Purpose:
1889 * Create a reference for the specified task's base importance
1890 * element. If the base importance element doesn't exist, make it and
1891 * bind it to the active task. If the task is inactive, there isn't
1892 * any need to return a new reference.
1893 * Conditions:
1894 * If made is true, a "made" reference is returned (for donating to
1895 * the voucher system). Otherwise an internal reference is returned.
1896 *
1897 * Nothing locked on entry. May block.
1898 */
1899ipc_importance_task_t
1900ipc_importance_for_task(task_t task, boolean_t made)
1901{
1902 ipc_importance_task_t task_elem;
1903 boolean_t first_pass = TRUE;
1904
1905 assert(TASK_NULL != task);
1906
1907 retry:
1908 /* No use returning anything for inactive task */
1909 if (!task->active)
1910 return IIT_NULL;
1911
1912 ipc_importance_lock();
1913 task_elem = task->task_imp_base;
1914 if (IIT_NULL != task_elem) {
1915 /* Add a made reference (borrowing active task ref to do it) */
1916 if (made) {
1917 if (0 == task_elem->iit_made++) {
1918 assert(IIT_REFS_MAX > IIT_REFS(task_elem));
1919 ipc_importance_task_reference_internal(task_elem);
1920 }
1921 } else {
1922 assert(IIT_REFS_MAX > IIT_REFS(task_elem));
1923 ipc_importance_task_reference_internal(task_elem);
1924 }
1925 ipc_importance_unlock();
1926 return task_elem;
1927 }
1928 ipc_importance_unlock();
1929
1930 if (!first_pass)
1931 return IIT_NULL;
1932 first_pass = FALSE;
1933
1934 /* Need to make one - may race with others (be prepared to drop) */
1935 task_elem = (ipc_importance_task_t)zalloc(ipc_importance_task_zone);
1936 if (IIT_NULL == task_elem)
1937 goto retry;
1938
1939 task_elem->iit_bits = IIE_TYPE_TASK | 2; /* one for task, one for return/made */
1940 task_elem->iit_made = (made) ? 1 : 0;
1941 task_elem->iit_task = task; /* take actual ref when we're sure */
1942 task_elem->iit_updateq = NULL;
1943 task_elem->iit_receiver = 0;
1944 task_elem->iit_denap = 0;
1945 task_elem->iit_donor = 0;
1946 task_elem->iit_live_donor = 0;
1947 task_elem->iit_updatepolicy = 0;
1948 task_elem->iit_reserved = 0;
1949 task_elem->iit_filelocks = 0;
1950 task_elem->iit_updatetime = 0;
1951 task_elem->iit_transitions = 0;
1952 task_elem->iit_assertcnt = 0;
1953 task_elem->iit_externcnt = 0;
1954 task_elem->iit_externdrop = 0;
1955 task_elem->iit_legacy_externcnt = 0;
1956 task_elem->iit_legacy_externdrop = 0;
1957#if IIE_REF_DEBUG
1958 ipc_importance_counter_init(&task_elem->iit_elem);
1959#endif
1960 queue_init(&task_elem->iit_kmsgs);
1961 queue_init(&task_elem->iit_inherits);
1962
1963 ipc_importance_lock();
1964 if (!task->active) {
1965 ipc_importance_unlock();
1966 zfree(ipc_importance_task_zone, task_elem);
1967 return IIT_NULL;
1968 }
1969
1970 /* did we lose the race? */
1971 if (IIT_NULL != task->task_imp_base) {
1972 ipc_importance_unlock();
1973 zfree(ipc_importance_task_zone, task_elem);
1974 goto retry;
1975 }
1976
1977 /* we won the race */
1978 task->task_imp_base = task_elem;
1979 task_reference(task);
1980#if DEVELOPMENT || DEBUG
1981 queue_enter(&global_iit_alloc_queue, task_elem, ipc_importance_task_t, iit_allocation);
1982 task_importance_update_owner_info(task);
1983#endif
1984 ipc_importance_unlock();
1985
1986 return task_elem;
1987}
1988
1989#if DEVELOPMENT || DEBUG
1990void task_importance_update_owner_info(task_t task) {
1991
1992 if (task != TASK_NULL && task->task_imp_base != IIT_NULL) {
1993 ipc_importance_task_t task_elem = task->task_imp_base;
1994
1995 task_elem->iit_bsd_pid = task_pid(task);
1996 if (task->bsd_info) {
1997 strncpy(&task_elem->iit_procname[0], proc_name_address(task->bsd_info), 16);
1998 task_elem->iit_procname[16] = '\0';
1999 } else {
2000 strncpy(&task_elem->iit_procname[0], "unknown", 16);
2001 }
2002 }
2003}
2004#endif
2005
2006/*
2007 * Routine: ipc_importance_reset_locked
2008 * Purpose:
2009 * Reset a task's IPC importance (the task is going away or exec'ing)
2010 *
2011 * Remove the donor bit and legacy externalized assertions from the
2012 * current task importance and see if that wipes out downstream donations.
2013 * Conditions:
2014 * importance lock held.
2015 */
2016
2017static void
2018ipc_importance_reset_locked(ipc_importance_task_t task_imp, boolean_t donor)
2019{
2020 boolean_t before_donor, after_donor;
2021
2022 /* remove the donor bit, live-donor bit and externalized boosts */
2023 before_donor = ipc_importance_task_is_donor(task_imp);
2024 if (donor) {
2025 task_imp->iit_donor = 0;
2026 }
2027 assert(IIT_LEGACY_EXTERN(task_imp) <= IIT_EXTERN(task_imp));
2028 assert(task_imp->iit_legacy_externcnt <= task_imp->iit_externcnt);
2029 assert(task_imp->iit_legacy_externdrop <= task_imp->iit_externdrop);
2030 task_imp->iit_externcnt -= task_imp->iit_legacy_externcnt;
2031 task_imp->iit_externdrop -= task_imp->iit_legacy_externdrop;
2032
2033 /* assert(IIT_LEGACY_EXTERN(task_imp) <= task_imp->iit_assertcnt); */
2034 if (IIT_EXTERN(task_imp) < task_imp->iit_assertcnt) {
2035 task_imp->iit_assertcnt -= IIT_LEGACY_EXTERN(task_imp);
2036 } else {
2037 task_imp->iit_assertcnt = IIT_EXTERN(task_imp);
2038 }
2039 task_imp->iit_legacy_externcnt = 0;
2040 task_imp->iit_legacy_externdrop = 0;
2041 after_donor = ipc_importance_task_is_donor(task_imp);
2042
2043#if DEVELOPMENT || DEBUG
2044 if (task_imp->iit_assertcnt > 0 && task_imp->iit_live_donor) {
2045 printf("Live donor task %s[%d] still has %d importance assertions after reset\n",
2046 task_imp->iit_procname, task_imp->iit_bsd_pid, task_imp->iit_assertcnt);
2047 }
2048#endif
2049
2050 /* propagate a downstream drop if there was a change in donor status */
2051 if (after_donor != before_donor) {
2052 ipc_importance_task_propagate_assertion_locked(task_imp, IIT_UPDATE_DROP, FALSE);
2053 }
2054}
2055
2056/*
2057 * Routine: ipc_importance_reset
2058 * Purpose:
2059 * Reset a task's IPC importance
2060 *
2061 * The task is being reset, although staying around. Arrange to have the
2062 * external state of the task reset from the importance.
2063 * Conditions:
2064 * importance lock not held.
2065 */
2066
2067void
2068ipc_importance_reset(ipc_importance_task_t task_imp, boolean_t donor)
2069{
2070 if (IIT_NULL == task_imp) {
2071 return;
2072 }
2073 ipc_importance_lock();
2074 ipc_importance_reset_locked(task_imp, donor);
2075 ipc_importance_unlock();
2076}
2077
2078/*
2079 * Routine: ipc_importance_disconnect_task
2080 * Purpose:
2081 * Disconnect a task from its importance.
2082 *
2083 * Clear the task pointer from the importance and drop the
2084 * reference the task held on the importance object. Before
2085 * doing that, reset the effects the current task holds on
2086 * the importance and see if that wipes out downstream donations.
2087 *
2088 * We allow the upstream boosts to continue to affect downstream
2089 * even though the local task is being effectively pulled from
2090 * the chain.
2091 * Conditions:
2092 * Nothing locked.
2093 */
2094void
2095ipc_importance_disconnect_task(task_t task)
2096{
2097 ipc_importance_task_t task_imp;
2098
2099 task_lock(task);
2100 ipc_importance_lock();
2101 task_imp = task->task_imp_base;
2102
2103 /* did somebody beat us to it? */
2104 if (IIT_NULL == task_imp) {
2105 ipc_importance_unlock();
2106 task_unlock(task);
2107 return;
2108 }
2109
2110 /* disconnect the task from this importance */
2111 assert(task_imp->iit_task == task);
2112 task_imp->iit_task = TASK_NULL;
2113 task->task_imp_base = IIT_NULL;
2114 task_unlock(task);
2115
2116 /* reset the effects the current task hold on the importance */
2117 ipc_importance_reset_locked(task_imp, TRUE);
2118
2119 ipc_importance_task_release_locked(task_imp);
2120 /* importance unlocked */
2121
2122 /* deallocate the task now that the importance is unlocked */
2123 task_deallocate(task);
2124}
2125
2126/*
2127 * Routine: ipc_importance_exec_switch_task
2128 * Purpose:
2129 * Switch importance task base from old task to new task in exec.
2130 *
2131 * Create an ipc importance linkage from old task to new task,
2132 * once the linkage is created, switch the importance task base
2133 * from old task to new task. After the switch, the linkage will
2134 * represent importance linkage from new task to old task with
2135 * watch port importance inheritance linked to new task.
2136 * Conditions:
2137 * Nothing locked.
2138 * Returns a reference on importance inherit.
2139 */
2140ipc_importance_inherit_t
2141ipc_importance_exec_switch_task(
2142 task_t old_task,
2143 task_t new_task)
2144{
2145 ipc_importance_inherit_t inherit = III_NULL;
2146 ipc_importance_task_t old_task_imp = IIT_NULL;
2147 ipc_importance_task_t new_task_imp = IIT_NULL;
2148
2149 task_importance_reset(old_task);
2150
2151 /* Create an importance linkage from old_task to new_task */
2152 inherit = ipc_importance_inherit_from_task(old_task, new_task);
2153
2154 /* Switch task importance base from old task to new task */
2155 ipc_importance_lock();
2156
2157 old_task_imp = old_task->task_imp_base;
2158 new_task_imp = new_task->task_imp_base;
2159
2160 old_task_imp->iit_task = new_task;
2161 new_task_imp->iit_task = old_task;
2162
2163 old_task->task_imp_base = new_task_imp;
2164 new_task->task_imp_base = old_task_imp;
2165
2166#if DEVELOPMENT || DEBUG
2167 /*
2168 * Update the pid an proc name for importance base if any
2169 */
2170 task_importance_update_owner_info(new_task);
2171#endif
2172 ipc_importance_unlock();
2173
2174 return inherit;
2175}
2176
2177/*
2178 * Routine: ipc_importance_check_circularity
2179 * Purpose:
2180 * Check if queueing "port" in a message for "dest"
2181 * would create a circular group of ports and messages.
2182 *
2183 * If no circularity (FALSE returned), then "port"
2184 * is changed from "in limbo" to "in transit".
2185 *
2186 * That is, we want to set port->ip_destination == dest,
2187 * but guaranteeing that this doesn't create a circle
2188 * port->ip_destination->ip_destination->... == port
2189 *
2190 * Additionally, if port was successfully changed to "in transit",
2191 * propagate boost assertions from the "in limbo" port to all
2192 * the ports in the chain, and, if the destination task accepts
2193 * boosts, to the destination task.
2194 *
2195 * Conditions:
2196 * No ports locked. References held for "port" and "dest".
2197 */
2198
2199boolean_t
2200ipc_importance_check_circularity(
2201 ipc_port_t port,
2202 ipc_port_t dest)
2203{
2204 ipc_importance_task_t imp_task = IIT_NULL;
2205 ipc_importance_task_t release_imp_task = IIT_NULL;
2206 boolean_t imp_lock_held = FALSE;
2207 int assertcnt = 0;
2208 ipc_port_t base;
2209 struct turnstile *send_turnstile = TURNSTILE_NULL;
2210
2211 assert(port != IP_NULL);
2212 assert(dest != IP_NULL);
2213
2214 if (port == dest)
2215 return TRUE;
2216 base = dest;
2217
2218 /* Check if destination needs a turnstile */
2219 ipc_port_send_turnstile_prepare(dest);
2220
2221 /* port is in limbo, so donation status is safe to latch */
2222 if (port->ip_impdonation != 0) {
2223 imp_lock_held = TRUE;
2224 ipc_importance_lock();
2225 }
2226
2227 /*
2228 * First try a quick check that can run in parallel.
2229 * No circularity if dest is not in transit.
2230 */
2231 ip_lock(port);
2232
2233 /*
2234 * Even if port is just carrying assertions for others,
2235 * we need the importance lock.
2236 */
2237 if (port->ip_impcount > 0 && !imp_lock_held) {
2238 if (!ipc_importance_lock_try()) {
2239 ip_unlock(port);
2240 ipc_importance_lock();
2241 ip_lock(port);
2242 }
2243 imp_lock_held = TRUE;
2244 }
2245
2246 if (ip_lock_try(dest)) {
2247 if (!ip_active(dest) ||
2248 (dest->ip_receiver_name != MACH_PORT_NULL) ||
2249 (dest->ip_destination == IP_NULL))
2250 goto not_circular;
2251
2252 /* dest is in transit; further checking necessary */
2253
2254 ip_unlock(dest);
2255 }
2256 ip_unlock(port);
2257
2258 /*
2259 * We're about to pay the cost to serialize,
2260 * just go ahead and grab importance lock.
2261 */
2262 if (!imp_lock_held) {
2263 ipc_importance_lock();
2264 imp_lock_held = TRUE;
2265 }
2266
2267 ipc_port_multiple_lock(); /* massive serialization */
2268
2269 /*
2270 * Search for the end of the chain (a port not in transit),
2271 * acquiring locks along the way.
2272 */
2273
2274 for (;;) {
2275 ip_lock(base);
2276
2277 if (!ip_active(base) ||
2278 (base->ip_receiver_name != MACH_PORT_NULL) ||
2279 (base->ip_destination == IP_NULL))
2280 break;
2281
2282 base = base->ip_destination;
2283 }
2284
2285 /* all ports in chain from dest to base, inclusive, are locked */
2286
2287 if (port == base) {
2288 /* circularity detected! */
2289
2290 ipc_port_multiple_unlock();
2291
2292 /* port (== base) is in limbo */
2293
2294 assert(ip_active(port));
2295 assert(port->ip_receiver_name == MACH_PORT_NULL);
2296 assert(port->ip_destination == IP_NULL);
2297
2298 base = dest;
2299 while (base != IP_NULL) {
2300 ipc_port_t next;
2301
2302 /* base is in transit or in limbo */
2303
2304 assert(ip_active(base));
2305 assert(base->ip_receiver_name == MACH_PORT_NULL);
2306
2307 next = base->ip_destination;
2308 ip_unlock(base);
2309 base = next;
2310 }
2311
2312 if (imp_lock_held)
2313 ipc_importance_unlock();
2314
2315 ipc_port_send_turnstile_complete(dest);
2316 return TRUE;
2317 }
2318
2319 /*
2320 * The guarantee: lock port while the entire chain is locked.
2321 * Once port is locked, we can take a reference to dest,
2322 * add port to the chain, and unlock everything.
2323 */
2324
2325 ip_lock(port);
2326 ipc_port_multiple_unlock();
2327
2328not_circular:
2329 /* port is in limbo */
2330 imq_lock(&port->ip_messages);
2331
2332 assert(ip_active(port));
2333 assert(port->ip_receiver_name == MACH_PORT_NULL);
2334 assert(port->ip_destination == IP_NULL);
2335
2336 ip_reference(dest);
2337 port->ip_destination = dest;
2338
2339 /* must have been in limbo or still bound to a task */
2340 assert(port->ip_tempowner != 0);
2341
2342 /*
2343 * We delayed dropping assertions from a specific task.
2344 * Cache that info now (we'll drop assertions and the
2345 * task reference below).
2346 */
2347 release_imp_task = port->ip_imp_task;
2348 if (IIT_NULL != release_imp_task) {
2349 port->ip_imp_task = IIT_NULL;
2350 }
2351 assertcnt = port->ip_impcount;
2352
2353 /* take the port out of limbo w.r.t. assertions */
2354 port->ip_tempowner = 0;
2355
2356 /*
2357 * Setup linkage for source port if it has a send turnstile i.e. it has
2358 * a thread waiting in send or has a port enqueued in it or has sync ipc
2359 * push from a special reply port.
2360 */
2361 if (port_send_turnstile(port)) {
2362 send_turnstile = turnstile_prepare((uintptr_t)port,
2363 port_send_turnstile_address(port),
2364 TURNSTILE_NULL, TURNSTILE_SYNC_IPC);
2365
2366 turnstile_update_inheritor(send_turnstile, port_send_turnstile(dest),
2367 (TURNSTILE_INHERITOR_TURNSTILE | TURNSTILE_IMMEDIATE_UPDATE));
2368
2369 /* update complete and turnstile complete called after dropping all locks */
2370 }
2371 imq_unlock(&port->ip_messages);
2372
2373 /* now unlock chain */
2374
2375 ip_unlock(port);
2376
2377 for (;;) {
2378
2379 ipc_port_t next;
2380 /* every port along chain track assertions behind it */
2381 ipc_port_impcount_delta(dest, assertcnt, base);
2382
2383 if (dest == base)
2384 break;
2385
2386 /* port is in transit */
2387
2388 assert(ip_active(dest));
2389 assert(dest->ip_receiver_name == MACH_PORT_NULL);
2390 assert(dest->ip_destination != IP_NULL);
2391 assert(dest->ip_tempowner == 0);
2392
2393 next = dest->ip_destination;
2394 ip_unlock(dest);
2395 dest = next;
2396 }
2397
2398 /* base is not in transit */
2399 assert(!ip_active(base) ||
2400 (base->ip_receiver_name != MACH_PORT_NULL) ||
2401 (base->ip_destination == IP_NULL));
2402
2403 /*
2404 * Find the task to boost (if any).
2405 * We will boost "through" ports that don't know
2406 * about inheritance to deliver receive rights that
2407 * do.
2408 */
2409 if (ip_active(base) && (assertcnt > 0)) {
2410 assert(imp_lock_held);
2411 if (base->ip_tempowner != 0) {
2412 if (IIT_NULL != base->ip_imp_task) {
2413 /* specified tempowner task */
2414 imp_task = base->ip_imp_task;
2415 assert(ipc_importance_task_is_any_receiver_type(imp_task));
2416 }
2417 /* otherwise don't boost current task */
2418
2419 } else if (base->ip_receiver_name != MACH_PORT_NULL) {
2420 ipc_space_t space = base->ip_receiver;
2421
2422 /* only spaces with boost-accepting tasks */
2423 if (space->is_task != TASK_NULL &&
2424 ipc_importance_task_is_any_receiver_type(space->is_task->task_imp_base))
2425 imp_task = space->is_task->task_imp_base;
2426 }
2427
2428 /* take reference before unlocking base */
2429 if (imp_task != IIT_NULL) {
2430 ipc_importance_task_reference(imp_task);
2431 }
2432 }
2433
2434 ip_unlock(base);
2435
2436 /*
2437 * Transfer assertions now that the ports are unlocked.
2438 * Avoid extra overhead if transferring to/from the same task.
2439 *
2440 * NOTE: If a transfer is occurring, the new assertions will
2441 * be added to imp_task BEFORE the importance lock is unlocked.
2442 * This is critical - to avoid decrements coming from the kmsgs
2443 * beating the increment to the task.
2444 */
2445 boolean_t transfer_assertions = (imp_task != release_imp_task);
2446
2447 if (imp_task != IIT_NULL) {
2448 assert(imp_lock_held);
2449 if (transfer_assertions)
2450 ipc_importance_task_hold_internal_assertion_locked(imp_task, assertcnt);
2451 }
2452
2453 if (release_imp_task != IIT_NULL) {
2454 assert(imp_lock_held);
2455 if (transfer_assertions)
2456 ipc_importance_task_drop_internal_assertion_locked(release_imp_task, assertcnt);
2457 }
2458
2459 if (imp_lock_held)
2460 ipc_importance_unlock();
2461
2462 /* All locks dropped, call turnstile_update_inheritor_complete for source port's turnstile */
2463 if (send_turnstile) {
2464 turnstile_update_inheritor_complete(send_turnstile, TURNSTILE_INTERLOCK_NOT_HELD);
2465
2466 /* Take the mq lock to call turnstile complete */
2467 imq_lock(&port->ip_messages);
2468 turnstile_complete((uintptr_t)port, port_send_turnstile_address(port), NULL);
2469 send_turnstile = TURNSTILE_NULL;
2470 imq_unlock(&port->ip_messages);
2471 turnstile_cleanup();
2472 }
2473
2474 if (imp_task != IIT_NULL)
2475 ipc_importance_task_release(imp_task);
2476
2477 if (release_imp_task != IIT_NULL)
2478 ipc_importance_task_release(release_imp_task);
2479
2480 return FALSE;
2481}
2482
2483/*
2484 * Routine: ipc_importance_send
2485 * Purpose:
2486 * Post the importance voucher attribute [if sent] or a static
2487 * importance boost depending upon options and conditions.
2488 * Conditions:
2489 * Destination port locked on entry and exit, may be dropped during the call.
2490 * Returns:
2491 * A boolean identifying if the port lock was tempoarily dropped.
2492 */
2493boolean_t
2494ipc_importance_send(
2495 ipc_kmsg_t kmsg,
2496 mach_msg_option_t option)
2497{
2498 ipc_port_t port = (ipc_port_t) kmsg->ikm_header->msgh_remote_port;
2499 boolean_t port_lock_dropped = FALSE;
2500 ipc_importance_elem_t elem;
2501 task_t task;
2502 ipc_importance_task_t task_imp;
2503 kern_return_t kr;
2504
2505 assert(IP_VALID(port));
2506
2507 /* If no donation to be made, return quickly */
2508 if ((port->ip_impdonation == 0) ||
2509 (option & MACH_SEND_NOIMPORTANCE) != 0) {
2510 return port_lock_dropped;
2511 }
2512
2513 task = current_task();
2514
2515 /* If forced sending a static boost, go update the port */
2516 if ((option & MACH_SEND_IMPORTANCE) != 0) {
2517 /* acquire the importance lock while trying to hang on to port lock */
2518 if (!ipc_importance_lock_try()) {
2519 port_lock_dropped = TRUE;
2520 ip_unlock(port);
2521 ipc_importance_lock();
2522 }
2523 goto portupdate;
2524 }
2525
2526 task_imp = task->task_imp_base;
2527 assert(IIT_NULL != task_imp);
2528
2529 /* If the sender can never donate importance, nothing to do */
2530 if (ipc_importance_task_is_never_donor(task_imp)) {
2531 return port_lock_dropped;
2532 }
2533
2534 elem = IIE_NULL;
2535
2536 /* If importance receiver and passing a voucher, look for importance in there */
2537 if (IP_VALID(kmsg->ikm_voucher) &&
2538 ipc_importance_task_is_marked_receiver(task_imp)) {
2539 mach_voucher_attr_value_handle_t vals[MACH_VOUCHER_ATTR_VALUE_MAX_NESTED];
2540 mach_voucher_attr_value_handle_array_size_t val_count;
2541 ipc_voucher_t voucher;
2542
2543 assert(ip_kotype(kmsg->ikm_voucher) == IKOT_VOUCHER);
2544 voucher = (ipc_voucher_t)kmsg->ikm_voucher->ip_kobject;
2545
2546 /* check to see if the voucher has an importance attribute */
2547 val_count = MACH_VOUCHER_ATTR_VALUE_MAX_NESTED;
2548 kr = mach_voucher_attr_control_get_values(ipc_importance_control, voucher,
2549 vals, &val_count);
2550 assert(KERN_SUCCESS == kr);
2551
2552 /*
2553 * Only use importance associated with our task (either directly
2554 * or through an inherit that donates to our task).
2555 */
2556 if (0 < val_count) {
2557 ipc_importance_elem_t check_elem;
2558
2559 check_elem = (ipc_importance_elem_t)vals[0];
2560 assert(IIE_NULL != check_elem);
2561 if (IIE_TYPE_INHERIT == IIE_TYPE(check_elem)) {
2562 ipc_importance_inherit_t inherit;
2563 inherit = (ipc_importance_inherit_t) check_elem;
2564 if (inherit->iii_to_task == task_imp) {
2565 elem = check_elem;
2566 }
2567 } else if (check_elem == (ipc_importance_elem_t)task_imp) {
2568 elem = check_elem;
2569 }
2570 }
2571 }
2572
2573 /* If we haven't found an importance attribute to send yet, use the task's */
2574 if (IIE_NULL == elem) {
2575 elem = (ipc_importance_elem_t)task_imp;
2576 }
2577
2578 /* take a reference for the message to hold */
2579 ipc_importance_reference_internal(elem);
2580
2581 /* acquire the importance lock while trying to hang on to port lock */
2582 if (!ipc_importance_lock_try()) {
2583 port_lock_dropped = TRUE;
2584 ip_unlock(port);
2585 ipc_importance_lock();
2586 }
2587
2588 /* link kmsg onto the donor element propagation chain */
2589 ipc_importance_kmsg_link(kmsg, elem);
2590 /* elem reference transfered to kmsg */
2591
2592 incr_ref_counter(elem->iie_kmsg_refs_added);
2593
2594 /* If the sender isn't currently a donor, no need to apply boost */
2595 if (!ipc_importance_task_is_donor(task_imp)) {
2596 ipc_importance_unlock();
2597
2598 /* re-acquire port lock, if needed */
2599 if (TRUE == port_lock_dropped)
2600 ip_lock(port);
2601
2602 return port_lock_dropped;
2603 }
2604
2605portupdate:
2606 /* Mark the fact that we are (currently) donating through this message */
2607 kmsg->ikm_header->msgh_bits |= MACH_MSGH_BITS_RAISEIMP;
2608
2609 /*
2610 * If we need to relock the port, do it with the importance still locked.
2611 * This assures we get to add the importance boost through the port to
2612 * the task BEFORE anyone else can attempt to undo that operation if
2613 * the sender lost donor status.
2614 */
2615 if (TRUE == port_lock_dropped) {
2616 ip_lock(port);
2617 }
2618
2619 ipc_importance_assert_held();
2620
2621#if IMPORTANCE_TRACE
2622 if (kdebug_enable) {
2623 mach_msg_max_trailer_t *dbgtrailer = (mach_msg_max_trailer_t *)
2624 ((vm_offset_t)kmsg->ikm_header + round_msg(kmsg->ikm_header->msgh_size));
2625 unsigned int sender_pid = dbgtrailer->msgh_audit.val[5];
2626 mach_msg_id_t imp_msgh_id = kmsg->ikm_header->msgh_id;
2627 KERNEL_DEBUG_CONSTANT_IST(KDEBUG_TRACE, (IMPORTANCE_CODE(IMP_MSG, IMP_MSG_SEND)) | DBG_FUNC_START,
2628 task_pid(task), sender_pid, imp_msgh_id, 0, 0);
2629 }
2630#endif /* IMPORTANCE_TRACE */
2631
2632 mach_port_delta_t delta = 1;
2633 boolean_t need_port_lock;
2634 task_imp = IIT_NULL;
2635
2636 /* adjust port boost count (with importance and port locked) */
2637 need_port_lock = ipc_port_importance_delta_internal(port, IPID_OPTION_NORMAL, &delta, &task_imp);
2638 /* hold a reference on task_imp */
2639
2640 /* if we need to adjust a task importance as a result, apply that here */
2641 if (IIT_NULL != task_imp && delta != 0) {
2642 assert(delta == 1);
2643
2644 /* if this results in a change of state, propagate the transistion */
2645 if (ipc_importance_task_check_transition(task_imp, IIT_UPDATE_HOLD, delta)) {
2646
2647 /* can't hold the port lock during task transition(s) */
2648 if (!need_port_lock) {
2649 need_port_lock = TRUE;
2650 ip_unlock(port);
2651 }
2652 ipc_importance_task_propagate_assertion_locked(task_imp, IIT_UPDATE_HOLD, TRUE);
2653 }
2654 }
2655
2656 if (task_imp) {
2657 ipc_importance_task_release_locked(task_imp);
2658 /* importance unlocked */
2659 } else {
2660 ipc_importance_unlock();
2661 }
2662
2663 if (need_port_lock) {
2664 port_lock_dropped = TRUE;
2665 ip_lock(port);
2666 }
2667
2668 return port_lock_dropped;
2669}
2670
2671/*
2672 * Routine: ipc_importance_inherit_from_kmsg
2673 * Purpose:
2674 * Create a "made" reference for an importance attribute representing
2675 * an inheritance between the sender of a message (if linked) and the
2676 * current task importance. If the message is not linked, a static
2677 * boost may be created, based on the boost state of the message.
2678 *
2679 * Any transfer from kmsg linkage to inherit linkage must be atomic.
2680 *
2681 * If the task is inactive, there isn't any need to return a new reference.
2682 * Conditions:
2683 * Nothing locked on entry. May block.
2684 */
2685static ipc_importance_inherit_t
2686ipc_importance_inherit_from_kmsg(ipc_kmsg_t kmsg)
2687{
2688 ipc_importance_task_t task_imp = IIT_NULL;
2689 ipc_importance_elem_t from_elem = kmsg->ikm_importance;
2690 ipc_importance_elem_t elem;
2691 task_t task_self = current_task();
2692
2693 ipc_port_t port = kmsg->ikm_header->msgh_remote_port;
2694 ipc_importance_inherit_t inherit = III_NULL;
2695 ipc_importance_inherit_t alloc = III_NULL;
2696 boolean_t cleared_self_donation = FALSE;
2697 boolean_t donating;
2698 uint32_t depth = 1;
2699
2700 /* The kmsg must have an importance donor or static boost to proceed */
2701 if (IIE_NULL == kmsg->ikm_importance &&
2702 !MACH_MSGH_BITS_RAISED_IMPORTANCE(kmsg->ikm_header->msgh_bits)) {
2703 return III_NULL;
2704 }
2705
2706 /*
2707 * No need to set up an inherit linkage if the dest isn't a receiver
2708 * of one type or the other.
2709 */
2710 if (!ipc_importance_task_is_any_receiver_type(task_self->task_imp_base)) {
2711 ipc_importance_lock();
2712 goto out_locked;
2713 }
2714
2715 /* Grab a reference on the importance of the destination */
2716 task_imp = ipc_importance_for_task(task_self, FALSE);
2717
2718 ipc_importance_lock();
2719
2720 if (IIT_NULL == task_imp) {
2721 goto out_locked;
2722 }
2723
2724 incr_ref_counter(task_imp->iit_elem.iie_task_refs_added_inherit_from);
2725
2726 /* If message is already associated with an inherit... */
2727 if (IIE_TYPE_INHERIT == IIE_TYPE(from_elem)) {
2728 ipc_importance_inherit_t from_inherit = (ipc_importance_inherit_t)from_elem;
2729
2730 /* already targeting our task? - just use it */
2731 if (from_inherit->iii_to_task == task_imp) {
2732 /* clear self-donation if not also present in inherit */
2733 if (!from_inherit->iii_donating &&
2734 MACH_MSGH_BITS_RAISED_IMPORTANCE(kmsg->ikm_header->msgh_bits)) {
2735 kmsg->ikm_header->msgh_bits &= ~MACH_MSGH_BITS_RAISEIMP;
2736 cleared_self_donation = TRUE;
2737 }
2738 inherit = from_inherit;
2739
2740 } else if (III_DEPTH_MAX == III_DEPTH(from_inherit)) {
2741 ipc_importance_task_t to_task;
2742 ipc_importance_elem_t unlinked_from;
2743
2744 /*
2745 * Chain too long. Switch to looking
2746 * directly at the from_inherit's to-task
2747 * as our source of importance.
2748 */
2749 to_task = from_inherit->iii_to_task;
2750 ipc_importance_task_reference(to_task);
2751 from_elem = (ipc_importance_elem_t)to_task;
2752 depth = III_DEPTH_RESET | 1;
2753
2754 /* Fixup the kmsg linkage to reflect change */
2755 unlinked_from = ipc_importance_kmsg_unlink(kmsg);
2756 assert(unlinked_from == (ipc_importance_elem_t)from_inherit);
2757 ipc_importance_kmsg_link(kmsg, from_elem);
2758 ipc_importance_inherit_release_locked(from_inherit);
2759 /* importance unlocked */
2760 ipc_importance_lock();
2761
2762 } else {
2763 /* inheriting from an inherit */
2764 depth = from_inherit->iii_depth + 1;
2765 }
2766 }
2767
2768 /*
2769 * Don't allow a task to inherit from itself (would keep it permanently
2770 * boosted even if all other donors to the task went away).
2771 */
2772
2773 if (from_elem == (ipc_importance_elem_t)task_imp) {
2774 goto out_locked;
2775 }
2776
2777 /*
2778 * But if the message isn't associated with any linked source, it is
2779 * intended to be permanently boosting (static boost from kernel).
2780 * In that case DO let the process permanently boost itself.
2781 */
2782 if (IIE_NULL == from_elem) {
2783 assert(MACH_MSGH_BITS_RAISED_IMPORTANCE(kmsg->ikm_header->msgh_bits));
2784 ipc_importance_task_reference_internal(task_imp);
2785 from_elem = (ipc_importance_elem_t)task_imp;
2786 }
2787
2788 /*
2789 * Now that we have the from_elem figured out,
2790 * check to see if we already have an inherit for this pairing
2791 */
2792 while (III_NULL == inherit) {
2793 inherit = ipc_importance_inherit_find(from_elem, task_imp, depth);
2794
2795 /* Do we have to allocate a new inherit */
2796 if (III_NULL == inherit) {
2797 if (III_NULL != alloc) {
2798 break;
2799 }
2800
2801 /* allocate space */
2802 ipc_importance_unlock();
2803 alloc = (ipc_importance_inherit_t)
2804 zalloc(ipc_importance_inherit_zone);
2805 ipc_importance_lock();
2806 }
2807 }
2808
2809 /* snapshot the donating status while we have importance locked */
2810 donating = MACH_MSGH_BITS_RAISED_IMPORTANCE(kmsg->ikm_header->msgh_bits);
2811
2812 if (III_NULL != inherit) {
2813 /* We found one, piggyback on that */
2814 assert(0 < III_REFS(inherit));
2815 assert(0 < IIE_REFS(inherit->iii_from_elem));
2816 assert(inherit->iii_externcnt >= inherit->iii_made);
2817
2818 /* add in a made reference */
2819 if (0 == inherit->iii_made++) {
2820 assert(III_REFS_MAX > III_REFS(inherit));
2821 ipc_importance_inherit_reference_internal(inherit);
2822 }
2823
2824 /* Reflect the inherit's change of status into the task boosts */
2825 if (0 == III_EXTERN(inherit)) {
2826 assert(!inherit->iii_donating);
2827 inherit->iii_donating = donating;
2828 if (donating) {
2829 task_imp->iit_externcnt += inherit->iii_externcnt;
2830 task_imp->iit_externdrop += inherit->iii_externdrop;
2831 }
2832 } else {
2833 assert(donating == inherit->iii_donating);
2834 }
2835
2836 /* add in a external reference for this use of the inherit */
2837 inherit->iii_externcnt++;
2838 } else {
2839 /* initialize the previously allocated space */
2840 inherit = alloc;
2841 inherit->iii_bits = IIE_TYPE_INHERIT | 1;
2842 inherit->iii_made = 1;
2843 inherit->iii_externcnt = 1;
2844 inherit->iii_externdrop = 0;
2845 inherit->iii_depth = depth;
2846 inherit->iii_to_task = task_imp;
2847 inherit->iii_from_elem = IIE_NULL;
2848 queue_init(&inherit->iii_kmsgs);
2849
2850 if (donating) {
2851 inherit->iii_donating = TRUE;
2852 } else {
2853 inherit->iii_donating = FALSE;
2854 }
2855
2856 /*
2857 * Chain our new inherit on the element it inherits from.
2858 * The new inherit takes our reference on from_elem.
2859 */
2860 ipc_importance_inherit_link(inherit, from_elem);
2861
2862#if IIE_REF_DEBUG
2863 ipc_importance_counter_init(&inherit->iii_elem);
2864 from_elem->iie_kmsg_refs_inherited++;
2865 task_imp->iit_elem.iie_task_refs_inherited++;
2866#endif
2867 }
2868
2869 out_locked:
2870 /*
2871 * for those paths that came straight here: snapshot the donating status
2872 * (this should match previous snapshot for other paths).
2873 */
2874 donating = MACH_MSGH_BITS_RAISED_IMPORTANCE(kmsg->ikm_header->msgh_bits);
2875
2876 /* unlink the kmsg inheritance (if any) */
2877 elem = ipc_importance_kmsg_unlink(kmsg);
2878 assert(elem == from_elem);
2879
2880 /* If found inherit and donating, reflect that in the task externcnt */
2881 if (III_NULL != inherit && donating) {
2882 task_imp->iit_externcnt++;
2883 /* The owner of receive right might have changed, take the internal assertion */
2884 ipc_importance_task_hold_internal_assertion_locked(task_imp, 1);
2885 /* may have dropped and retaken importance lock */
2886 }
2887
2888 /* If we didn't create a new inherit, we have some resources to release */
2889 if (III_NULL == inherit || inherit != alloc) {
2890 if (IIE_NULL != from_elem) {
2891 if (III_NULL != inherit) {
2892 incr_ref_counter(from_elem->iie_kmsg_refs_coalesced);
2893 } else {
2894 incr_ref_counter(from_elem->iie_kmsg_refs_dropped);
2895 }
2896 ipc_importance_release_locked(from_elem);
2897 /* importance unlocked */
2898 } else {
2899 ipc_importance_unlock();
2900 }
2901
2902 if (IIT_NULL != task_imp) {
2903 if (III_NULL != inherit) {
2904 incr_ref_counter(task_imp->iit_elem.iie_task_refs_coalesced);
2905 }
2906 ipc_importance_task_release(task_imp);
2907 }
2908
2909 if (III_NULL != alloc)
2910 zfree(ipc_importance_inherit_zone, alloc);
2911 } else {
2912 /* from_elem and task_imp references transferred to new inherit */
2913 ipc_importance_unlock();
2914 }
2915
2916 /*
2917 * decrement port boost count
2918 * This is OK to do without the importance lock as we atomically
2919 * unlinked the kmsg and snapshot the donating state while holding
2920 * the importance lock
2921 */
2922 if (donating || cleared_self_donation) {
2923 ip_lock(port);
2924 /* drop importance from port and destination task */
2925 if (ipc_port_importance_delta(port, IPID_OPTION_NORMAL, -1) == FALSE) {
2926 ip_unlock(port);
2927 }
2928 }
2929
2930 if (III_NULL != inherit) {
2931 /* have an associated importance attr, even if currently not donating */
2932 kmsg->ikm_header->msgh_bits |= MACH_MSGH_BITS_RAISEIMP;
2933 } else {
2934 /* we won't have an importance attribute associated with our message */
2935 kmsg->ikm_header->msgh_bits &= ~MACH_MSGH_BITS_RAISEIMP;
2936 }
2937
2938 return inherit;
2939}
2940
2941/*
2942 * Routine: ipc_importance_inherit_from_task
2943 * Purpose:
2944 * Create a reference for an importance attribute representing
2945 * an inheritance between the to_task and from_task. The iii
2946 * created will be marked as III_FLAGS_FOR_OTHERS.
2947 *
2948 * It will not dedup any iii which are not marked as III_FLAGS_FOR_OTHERS.
2949 *
2950 * If the task is inactive, there isn't any need to return a new reference.
2951 * Conditions:
2952 * Nothing locked on entry. May block.
2953 * It should not be called from voucher subsystem.
2954 */
2955static ipc_importance_inherit_t
2956ipc_importance_inherit_from_task(
2957 task_t from_task,
2958 task_t to_task)
2959{
2960 ipc_importance_task_t to_task_imp = IIT_NULL;
2961 ipc_importance_task_t from_task_imp = IIT_NULL;
2962 ipc_importance_elem_t from_elem = IIE_NULL;
2963
2964 ipc_importance_inherit_t inherit = III_NULL;
2965 ipc_importance_inherit_t alloc = III_NULL;
2966 boolean_t donating;
2967 uint32_t depth = 1;
2968
2969 to_task_imp = ipc_importance_for_task(to_task, FALSE);
2970 from_task_imp = ipc_importance_for_task(from_task, FALSE);
2971 from_elem = (ipc_importance_elem_t)from_task_imp;
2972
2973 ipc_importance_lock();
2974
2975 if (IIT_NULL == to_task_imp || IIT_NULL == from_task_imp) {
2976 goto out_locked;
2977 }
2978
2979 /*
2980 * No need to set up an inherit linkage if the to_task or from_task
2981 * isn't a receiver of one type or the other.
2982 */
2983 if (!ipc_importance_task_is_any_receiver_type(to_task_imp) ||
2984 !ipc_importance_task_is_any_receiver_type(from_task_imp)) {
2985 goto out_locked;
2986 }
2987
2988 /* Do not allow to create a linkage to self */
2989 if (to_task_imp == from_task_imp) {
2990 goto out_locked;
2991 }
2992
2993 incr_ref_counter(to_task_imp->iit_elem.iie_task_refs_added_inherit_from);
2994 incr_ref_counter(from_elem->iie_kmsg_refs_added);
2995
2996 /*
2997 * Now that we have the from_elem figured out,
2998 * check to see if we already have an inherit for this pairing
2999 */
3000 while (III_NULL == inherit) {
3001 inherit = ipc_importance_inherit_find(from_elem, to_task_imp, depth);
3002
3003 /* Do we have to allocate a new inherit */
3004 if (III_NULL == inherit) {
3005 if (III_NULL != alloc) {
3006 break;
3007 }
3008
3009 /* allocate space */
3010 ipc_importance_unlock();
3011 alloc = (ipc_importance_inherit_t)
3012 zalloc(ipc_importance_inherit_zone);
3013 ipc_importance_lock();
3014 }
3015 }
3016
3017 /* snapshot the donating status while we have importance locked */
3018 donating = ipc_importance_task_is_donor(from_task_imp);
3019
3020 if (III_NULL != inherit) {
3021 /* We found one, piggyback on that */
3022 assert(0 < III_REFS(inherit));
3023 assert(0 < IIE_REFS(inherit->iii_from_elem));
3024
3025 /* Take a reference for inherit */
3026 assert(III_REFS_MAX > III_REFS(inherit));
3027 ipc_importance_inherit_reference_internal(inherit);
3028
3029 /* Reflect the inherit's change of status into the task boosts */
3030 if (0 == III_EXTERN(inherit)) {
3031 assert(!inherit->iii_donating);
3032 inherit->iii_donating = donating;
3033 if (donating) {
3034 to_task_imp->iit_externcnt += inherit->iii_externcnt;
3035 to_task_imp->iit_externdrop += inherit->iii_externdrop;
3036 }
3037 } else {
3038 assert(donating == inherit->iii_donating);
3039 }
3040
3041 /* add in a external reference for this use of the inherit */
3042 inherit->iii_externcnt++;
3043 } else {
3044 /* initialize the previously allocated space */
3045 inherit = alloc;
3046 inherit->iii_bits = IIE_TYPE_INHERIT | 1;
3047 inherit->iii_made = 0;
3048 inherit->iii_externcnt = 1;
3049 inherit->iii_externdrop = 0;
3050 inherit->iii_depth = depth;
3051 inherit->iii_to_task = to_task_imp;
3052 inherit->iii_from_elem = IIE_NULL;
3053 queue_init(&inherit->iii_kmsgs);
3054
3055 if (donating) {
3056 inherit->iii_donating = TRUE;
3057 } else {
3058 inherit->iii_donating = FALSE;
3059 }
3060
3061 /*
3062 * Chain our new inherit on the element it inherits from.
3063 * The new inherit takes our reference on from_elem.
3064 */
3065 ipc_importance_inherit_link(inherit, from_elem);
3066
3067#if IIE_REF_DEBUG
3068 ipc_importance_counter_init(&inherit->iii_elem);
3069 from_elem->iie_kmsg_refs_inherited++;
3070 task_imp->iit_elem.iie_task_refs_inherited++;
3071#endif
3072 }
3073
3074out_locked:
3075
3076 /* If found inherit and donating, reflect that in the task externcnt */
3077 if (III_NULL != inherit && donating) {
3078 to_task_imp->iit_externcnt++;
3079 /* take the internal assertion */
3080 ipc_importance_task_hold_internal_assertion_locked(to_task_imp, 1);
3081 /* may have dropped and retaken importance lock */
3082 }
3083
3084 /* If we didn't create a new inherit, we have some resources to release */
3085 if (III_NULL == inherit || inherit != alloc) {
3086 if (IIE_NULL != from_elem) {
3087 if (III_NULL != inherit) {
3088 incr_ref_counter(from_elem->iie_kmsg_refs_coalesced);
3089 } else {
3090 incr_ref_counter(from_elem->iie_kmsg_refs_dropped);
3091 }
3092 ipc_importance_release_locked(from_elem);
3093 /* importance unlocked */
3094 } else {
3095 ipc_importance_unlock();
3096 }
3097
3098 if (IIT_NULL != to_task_imp) {
3099 if (III_NULL != inherit) {
3100 incr_ref_counter(to_task_imp->iit_elem.iie_task_refs_coalesced);
3101 }
3102 ipc_importance_task_release(to_task_imp);
3103 }
3104
3105 if (III_NULL != alloc) {
3106 zfree(ipc_importance_inherit_zone, alloc);
3107 }
3108 } else {
3109 /* from_elem and to_task_imp references transferred to new inherit */
3110 ipc_importance_unlock();
3111 }
3112
3113 return inherit;
3114}
3115
3116/*
3117 * Routine: ipc_importance_receive
3118 * Purpose:
3119 * Process importance attributes in a received message.
3120 *
3121 * If an importance voucher attribute was sent, transform
3122 * that into an attribute value reflecting the inheritance
3123 * from the sender to the receiver.
3124 *
3125 * If a static boost is received (or the voucher isn't on
3126 * a voucher-based boost), export a static boost.
3127 * Conditions:
3128 * Nothing locked.
3129 */
3130void
3131ipc_importance_receive(
3132 ipc_kmsg_t kmsg,
3133 mach_msg_option_t option)
3134{
3135 unsigned int sender_pid = ((mach_msg_max_trailer_t *)
3136 ((vm_offset_t)kmsg->ikm_header +
3137 round_msg(kmsg->ikm_header->msgh_size)))->msgh_audit.val[5];
3138 task_t task_self = current_task();
3139 int impresult = -1;
3140
3141 /* convert to a voucher with an inherit importance attribute? */
3142 if ((option & MACH_RCV_VOUCHER) != 0) {
3143 uint8_t recipes[2 * sizeof(ipc_voucher_attr_recipe_data_t) +
3144 sizeof(mach_voucher_attr_value_handle_t)];
3145 ipc_voucher_attr_raw_recipe_array_size_t recipe_size = 0;
3146 ipc_voucher_attr_recipe_t recipe = (ipc_voucher_attr_recipe_t)recipes;
3147 ipc_voucher_t recv_voucher;
3148 mach_voucher_attr_value_handle_t handle;
3149 ipc_importance_inherit_t inherit;
3150 kern_return_t kr;
3151
3152 /* set up recipe to copy the old voucher */
3153 if (IP_VALID(kmsg->ikm_voucher)) {
3154 ipc_voucher_t sent_voucher = (ipc_voucher_t)kmsg->ikm_voucher->ip_kobject;
3155
3156 recipe->key = MACH_VOUCHER_ATTR_KEY_ALL;
3157 recipe->command = MACH_VOUCHER_ATTR_COPY;
3158 recipe->previous_voucher = sent_voucher;
3159 recipe->content_size = 0;
3160 recipe_size += sizeof(*recipe);
3161 }
3162
3163 /*
3164 * create an inheritance attribute from the kmsg (may be NULL)
3165 * transferring any boosts from the kmsg linkage through the
3166 * port directly to the new inheritance object.
3167 */
3168 inherit = ipc_importance_inherit_from_kmsg(kmsg);
3169 handle = (mach_voucher_attr_value_handle_t)inherit;
3170
3171 assert(IIE_NULL == kmsg->ikm_importance);
3172
3173 /*
3174 * Only create a new voucher if we have an inherit object
3175 * (from the ikm_importance field of the incoming message), OR
3176 * we have a valid incoming voucher. If we have neither of
3177 * these things then there is no need to create a new voucher.
3178 */
3179 if (IP_VALID(kmsg->ikm_voucher) || inherit != III_NULL) {
3180 /* replace the importance attribute with the handle we created */
3181 /* our made reference on the inherit is donated to the voucher */
3182 recipe = (ipc_voucher_attr_recipe_t)&recipes[recipe_size];
3183 recipe->key = MACH_VOUCHER_ATTR_KEY_IMPORTANCE;
3184 recipe->command = MACH_VOUCHER_ATTR_SET_VALUE_HANDLE;
3185 recipe->previous_voucher = IPC_VOUCHER_NULL;
3186 recipe->content_size = sizeof(mach_voucher_attr_value_handle_t);
3187 *(mach_voucher_attr_value_handle_t *)(void *)recipe->content = handle;
3188 recipe_size += sizeof(*recipe) + sizeof(mach_voucher_attr_value_handle_t);
3189
3190 kr = ipc_voucher_attr_control_create_mach_voucher(ipc_importance_control,
3191 recipes,
3192 recipe_size,
3193 &recv_voucher);
3194 assert(KERN_SUCCESS == kr);
3195
3196 /* swap the voucher port (and set voucher bits in case it didn't already exist) */
3197 kmsg->ikm_header->msgh_bits |= (MACH_MSG_TYPE_MOVE_SEND << 16);
3198 ipc_port_release_send(kmsg->ikm_voucher);
3199 kmsg->ikm_voucher = convert_voucher_to_port(recv_voucher);
3200 if (III_NULL != inherit)
3201 impresult = 2;
3202 }
3203 } else { /* Don't want a voucher */
3204
3205 /* got linked importance? have to drop */
3206 if (IIE_NULL != kmsg->ikm_importance) {
3207 ipc_importance_elem_t elem;
3208
3209 ipc_importance_lock();
3210 elem = ipc_importance_kmsg_unlink(kmsg);
3211#if IIE_REF_DEBUG
3212 elem->iie_kmsg_refs_dropped++;
3213#endif
3214 ipc_importance_release_locked(elem);
3215 /* importance unlocked */
3216 }
3217
3218 /* With kmsg unlinked, can safely examine message importance attribute. */
3219 if (MACH_MSGH_BITS_RAISED_IMPORTANCE(kmsg->ikm_header->msgh_bits)) {
3220 ipc_importance_task_t task_imp = task_self->task_imp_base;
3221 ipc_port_t port = kmsg->ikm_header->msgh_remote_port;
3222
3223 /* The owner of receive right might have changed, take the internal assertion */
3224 if (KERN_SUCCESS == ipc_importance_task_hold_internal_assertion(task_imp, 1)) {
3225 ipc_importance_task_externalize_legacy_assertion(task_imp, 1, sender_pid);
3226 impresult = 1;
3227 } else {
3228 /* The importance boost never applied to task (clear the bit) */
3229 kmsg->ikm_header->msgh_bits &= ~MACH_MSGH_BITS_RAISEIMP;
3230 impresult = 0;
3231 }
3232
3233 /* Drop the boost on the port and the owner of the receive right */
3234 ip_lock(port);
3235 if (ipc_port_importance_delta(port, IPID_OPTION_NORMAL, -1) == FALSE) {
3236 ip_unlock(port);
3237 }
3238 }
3239 }
3240
3241#if IMPORTANCE_TRACE
3242 if (-1 < impresult)
3243 KERNEL_DEBUG_CONSTANT_IST(KDEBUG_TRACE, (IMPORTANCE_CODE(IMP_MSG, IMP_MSG_DELV)) | DBG_FUNC_NONE,
3244 sender_pid, task_pid(task_self),
3245 kmsg->ikm_header->msgh_id, impresult, 0);
3246 if (impresult == 2){
3247 /*
3248 * This probe only covers new voucher-based path. Legacy importance
3249 * will trigger the probe in ipc_importance_task_externalize_assertion()
3250 * above and have impresult==1 here.
3251 */
3252 DTRACE_BOOST5(receive_boost, task_t, task_self, int, task_pid(task_self), int, sender_pid, int, 1, int, task_self->task_imp_base->iit_assertcnt);
3253 }
3254#endif /* IMPORTANCE_TRACE */
3255}
3256
3257/*
3258 * Routine: ipc_importance_unreceive
3259 * Purpose:
3260 * Undo receive of importance attributes in a message.
3261 *
3262 * Conditions:
3263 * Nothing locked.
3264 */
3265void
3266ipc_importance_unreceive(
3267 ipc_kmsg_t kmsg,
3268 mach_msg_option_t __unused option)
3269{
3270 /* importance should already be in the voucher and out of the kmsg */
3271 assert(IIE_NULL == kmsg->ikm_importance);
3272
3273 /* See if there is a legacy boost to be dropped from receiver */
3274 if (MACH_MSGH_BITS_RAISED_IMPORTANCE(kmsg->ikm_header->msgh_bits)) {
3275 ipc_importance_task_t task_imp;
3276
3277 kmsg->ikm_header->msgh_bits &= ~MACH_MSGH_BITS_RAISEIMP;
3278 task_imp = current_task()->task_imp_base;
3279 if (!IP_VALID(kmsg->ikm_voucher) && IIT_NULL != task_imp) {
3280 ipc_importance_task_drop_legacy_external_assertion(task_imp, 1);
3281 }
3282 /*
3283 * ipc_kmsg_copyout_dest() will consume the voucher
3284 * and any contained importance.
3285 */
3286 }
3287}
3288
3289/*
3290 * Routine: ipc_importance_clean
3291 * Purpose:
3292 * Clean up importance state in a kmsg that is being cleaned.
3293 * Unlink the importance chain if one was set up, and drop
3294 * the reference this kmsg held on the donor. Then check to
3295 * if importance was carried to the port, and remove that if
3296 * needed.
3297 * Conditions:
3298 * Nothing locked.
3299 */
3300void
3301ipc_importance_clean(
3302 ipc_kmsg_t kmsg)
3303{
3304 ipc_port_t port;
3305
3306 /* Is the kmsg still linked? If so, remove that first */
3307 if (IIE_NULL != kmsg->ikm_importance) {
3308 ipc_importance_elem_t elem;
3309
3310 ipc_importance_lock();
3311 elem = ipc_importance_kmsg_unlink(kmsg);
3312 assert(IIE_NULL != elem);
3313 ipc_importance_release_locked(elem);
3314 /* importance unlocked */
3315 }
3316
3317 /* See if there is a legacy importance boost to be dropped from port */
3318 if (MACH_MSGH_BITS_RAISED_IMPORTANCE(kmsg->ikm_header->msgh_bits)) {
3319 kmsg->ikm_header->msgh_bits &= ~MACH_MSGH_BITS_RAISEIMP;
3320 port = kmsg->ikm_header->msgh_remote_port;
3321 if (IP_VALID(port)) {
3322 ip_lock(port);
3323 /* inactive ports already had their importance boosts dropped */
3324 if (!ip_active(port) ||
3325 ipc_port_importance_delta(port, IPID_OPTION_NORMAL, -1) == FALSE) {
3326 ip_unlock(port);
3327 }
3328 }
3329 }
3330}
3331
3332void
3333ipc_importance_assert_clean(__assert_only ipc_kmsg_t kmsg)
3334{
3335 assert(IIE_NULL == kmsg->ikm_importance);
3336 assert(!MACH_MSGH_BITS_RAISED_IMPORTANCE(kmsg->ikm_header->msgh_bits));
3337}
3338
3339/*
3340 * IPC Importance Attribute Manager definition
3341 */
3342
3343static kern_return_t
3344ipc_importance_release_value(
3345 ipc_voucher_attr_manager_t manager,
3346 mach_voucher_attr_key_t key,
3347 mach_voucher_attr_value_handle_t value,
3348 mach_voucher_attr_value_reference_t sync);
3349
3350static kern_return_t
3351ipc_importance_get_value(
3352 ipc_voucher_attr_manager_t manager,
3353 mach_voucher_attr_key_t key,
3354 mach_voucher_attr_recipe_command_t command,
3355 mach_voucher_attr_value_handle_array_t prev_values,
3356 mach_voucher_attr_value_handle_array_size_t prev_value_count,
3357 mach_voucher_attr_content_t content,
3358 mach_voucher_attr_content_size_t content_size,
3359 mach_voucher_attr_value_handle_t *out_value,
3360 mach_voucher_attr_value_flags_t *out_flags,
3361 ipc_voucher_t *out_value_voucher);
3362
3363static kern_return_t
3364ipc_importance_extract_content(
3365 ipc_voucher_attr_manager_t manager,
3366 mach_voucher_attr_key_t key,
3367 mach_voucher_attr_value_handle_array_t values,
3368 mach_voucher_attr_value_handle_array_size_t value_count,
3369 mach_voucher_attr_recipe_command_t *out_command,
3370 mach_voucher_attr_content_t out_content,
3371 mach_voucher_attr_content_size_t *in_out_content_size);
3372
3373static kern_return_t
3374ipc_importance_command(
3375 ipc_voucher_attr_manager_t manager,
3376 mach_voucher_attr_key_t key,
3377 mach_voucher_attr_value_handle_array_t values,
3378 mach_msg_type_number_t value_count,
3379 mach_voucher_attr_command_t command,
3380 mach_voucher_attr_content_t in_content,
3381 mach_voucher_attr_content_size_t in_content_size,
3382 mach_voucher_attr_content_t out_content,
3383 mach_voucher_attr_content_size_t *out_content_size);
3384
3385static void
3386ipc_importance_manager_release(
3387 ipc_voucher_attr_manager_t manager);
3388
3389struct ipc_voucher_attr_manager ipc_importance_manager = {
3390 .ivam_release_value = ipc_importance_release_value,
3391 .ivam_get_value = ipc_importance_get_value,
3392 .ivam_extract_content = ipc_importance_extract_content,
3393 .ivam_command = ipc_importance_command,
3394 .ivam_release = ipc_importance_manager_release,
3395 .ivam_flags = IVAM_FLAGS_NONE,
3396};
3397
3398#define IMPORTANCE_ASSERT_KEY(key) assert(MACH_VOUCHER_ATTR_KEY_IMPORTANCE == (key))
3399#define IMPORTANCE_ASSERT_MANAGER(manager) assert(&ipc_importance_manager == (manager))
3400
3401/*
3402 * Routine: ipc_importance_release_value [Voucher Attribute Manager Interface]
3403 * Purpose:
3404 * Release what the voucher system believes is the last "made" reference
3405 * on an importance attribute value handle. The sync parameter is used to
3406 * avoid races with new made references concurrently being returned to the
3407 * voucher system in other threads.
3408 * Conditions:
3409 * Nothing locked on entry. May block.
3410 */
3411static kern_return_t
3412ipc_importance_release_value(
3413 ipc_voucher_attr_manager_t __assert_only manager,
3414 mach_voucher_attr_key_t __assert_only key,
3415 mach_voucher_attr_value_handle_t value,
3416 mach_voucher_attr_value_reference_t sync)
3417{
3418 ipc_importance_elem_t elem;
3419
3420 IMPORTANCE_ASSERT_MANAGER(manager);
3421 IMPORTANCE_ASSERT_KEY(key);
3422 assert(0 < sync);
3423
3424 elem = (ipc_importance_elem_t)value;
3425
3426 ipc_importance_lock();
3427
3428 /* Any oustanding made refs? */
3429 if (sync != elem->iie_made) {
3430 assert(sync < elem->iie_made);
3431 ipc_importance_unlock();
3432 return KERN_FAILURE;
3433 }
3434
3435 /* clear made */
3436 elem->iie_made = 0;
3437
3438 /*
3439 * If there are pending external boosts represented by this attribute,
3440 * drop them from the apropriate task
3441 */
3442 if (IIE_TYPE_INHERIT == IIE_TYPE(elem)) {
3443 ipc_importance_inherit_t inherit = (ipc_importance_inherit_t)elem;
3444
3445 assert(inherit->iii_externcnt >= inherit->iii_externdrop);
3446
3447 if (inherit->iii_donating) {
3448 ipc_importance_task_t imp_task = inherit->iii_to_task;
3449 uint32_t assertcnt = III_EXTERN(inherit);
3450
3451 assert(ipc_importance_task_is_any_receiver_type(imp_task));
3452 assert(imp_task->iit_externcnt >= inherit->iii_externcnt);
3453 assert(imp_task->iit_externdrop >= inherit->iii_externdrop);
3454 imp_task->iit_externcnt -= inherit->iii_externcnt;
3455 imp_task->iit_externdrop -= inherit->iii_externdrop;
3456 inherit->iii_externcnt = 0;
3457 inherit->iii_externdrop = 0;
3458 inherit->iii_donating = FALSE;
3459
3460 /* adjust the internal assertions - and propagate if needed */
3461 if (ipc_importance_task_check_transition(imp_task, IIT_UPDATE_DROP, assertcnt)) {
3462 ipc_importance_task_propagate_assertion_locked(imp_task, IIT_UPDATE_DROP, TRUE);
3463 }
3464 } else {
3465 inherit->iii_externcnt = 0;
3466 inherit->iii_externdrop = 0;
3467 }
3468 }
3469
3470 /* drop the made reference on elem */
3471 ipc_importance_release_locked(elem);
3472 /* returns unlocked */
3473
3474 return KERN_SUCCESS;
3475}
3476
3477
3478/*
3479 * Routine: ipc_importance_get_value [Voucher Attribute Manager Interface]
3480 * Purpose:
3481 * Convert command and content data into a reference on a [potentially new]
3482 * attribute value. The importance attribute manager will only allow the
3483 * caller to get a value for the current task's importance, or to redeem
3484 * an importance attribute from an existing voucher.
3485 * Conditions:
3486 * Nothing locked on entry. May block.
3487 */
3488static kern_return_t
3489ipc_importance_get_value(
3490 ipc_voucher_attr_manager_t __assert_only manager,
3491 mach_voucher_attr_key_t __assert_only key,
3492 mach_voucher_attr_recipe_command_t command,
3493 mach_voucher_attr_value_handle_array_t prev_values,
3494 mach_voucher_attr_value_handle_array_size_t prev_value_count,
3495 mach_voucher_attr_content_t __unused content,
3496 mach_voucher_attr_content_size_t content_size,
3497 mach_voucher_attr_value_handle_t *out_value,
3498 mach_voucher_attr_value_flags_t *out_flags,
3499 ipc_voucher_t *out_value_voucher)
3500{
3501 ipc_importance_elem_t elem;
3502 task_t self;
3503
3504 IMPORTANCE_ASSERT_MANAGER(manager);
3505 IMPORTANCE_ASSERT_KEY(key);
3506
3507 if (0 != content_size)
3508 return KERN_INVALID_ARGUMENT;
3509
3510 *out_flags = MACH_VOUCHER_ATTR_VALUE_FLAGS_NONE;
3511 /* never an out voucher */
3512
3513 switch (command) {
3514
3515 case MACH_VOUCHER_ATTR_REDEEM:
3516
3517 /* redeem of previous values is the value */
3518 if (0 < prev_value_count) {
3519 elem = (ipc_importance_elem_t)prev_values[0];
3520 assert(IIE_NULL != elem);
3521
3522 ipc_importance_lock();
3523 assert(0 < elem->iie_made);
3524 elem->iie_made++;
3525 ipc_importance_unlock();
3526
3527 *out_value = prev_values[0];
3528 return KERN_SUCCESS;
3529 }
3530
3531 /* redeem of default is default */
3532 *out_value = 0;
3533 *out_value_voucher = IPC_VOUCHER_NULL;
3534 return KERN_SUCCESS;
3535
3536 case MACH_VOUCHER_ATTR_IMPORTANCE_SELF:
3537 self = current_task();
3538
3539 elem = (ipc_importance_elem_t)ipc_importance_for_task(self, TRUE);
3540 /* made reference added (or IIE_NULL which isn't referenced) */
3541
3542 *out_value = (mach_voucher_attr_value_handle_t)elem;
3543 *out_value_voucher = IPC_VOUCHER_NULL;
3544 return KERN_SUCCESS;
3545
3546 default:
3547 /*
3548 * every other command is unknown
3549 *
3550 * Specifically, there is no mechanism provided to construct an
3551 * importance attribute for a task/process from just a pid or
3552 * task port. It has to be copied (or redeemed) from a previous
3553 * voucher that has it.
3554 */
3555 return KERN_INVALID_ARGUMENT;
3556 }
3557}
3558
3559/*
3560 * Routine: ipc_importance_extract_content [Voucher Attribute Manager Interface]
3561 * Purpose:
3562 * Extract meaning from the attribute value present in a voucher. While
3563 * the real goal is to provide commands and data that can reproduce the
3564 * voucher's value "out of thin air", this isn't possible with importance
3565 * attribute values. Instead, return debug info to help track down dependencies.
3566 * Conditions:
3567 * Nothing locked on entry. May block.
3568 */
3569static kern_return_t
3570ipc_importance_extract_content(
3571 ipc_voucher_attr_manager_t __assert_only manager,
3572 mach_voucher_attr_key_t __assert_only key,
3573 mach_voucher_attr_value_handle_array_t values,
3574 mach_voucher_attr_value_handle_array_size_t value_count,
3575 mach_voucher_attr_recipe_command_t *out_command,
3576 mach_voucher_attr_content_t out_content,
3577 mach_voucher_attr_content_size_t *in_out_content_size)
3578{
3579 mach_voucher_attr_content_size_t size = 0;
3580 ipc_importance_elem_t elem;
3581 unsigned int i;
3582
3583 IMPORTANCE_ASSERT_MANAGER(manager);
3584 IMPORTANCE_ASSERT_KEY(key);
3585
3586 /* the first non-default value provides the data */
3587 for (i = 0; i < value_count ; i++) {
3588 elem = (ipc_importance_elem_t)values[i];
3589 if (IIE_NULL == elem)
3590 continue;
3591
3592 snprintf((char *)out_content, *in_out_content_size, "Importance for pid ");
3593 size = (mach_voucher_attr_content_size_t)strlen((char *)out_content);
3594
3595 for(;;) {
3596 ipc_importance_inherit_t inherit = III_NULL;
3597 ipc_importance_task_t task_imp;
3598 task_t task;
3599 int t_pid;
3600
3601 if (IIE_TYPE_TASK == IIE_TYPE(elem)) {
3602 task_imp = (ipc_importance_task_t)elem;
3603 task = task_imp->iit_task;
3604 t_pid = (TASK_NULL != task) ?
3605 task_pid(task) : -1;
3606 snprintf((char *)out_content + size, *in_out_content_size - size, "%d", t_pid);
3607 } else {
3608 inherit = (ipc_importance_inherit_t)elem;
3609 task_imp = inherit->iii_to_task;
3610 task = task_imp->iit_task;
3611 t_pid = (TASK_NULL != task) ?
3612 task_pid(task) : -1;
3613 snprintf((char *)out_content + size, *in_out_content_size - size,
3614 "%d (%d of %d boosts) %s from pid ", t_pid,
3615 III_EXTERN(inherit), inherit->iii_externcnt,
3616 (inherit->iii_donating) ? "donated" : "linked");
3617 }
3618
3619 size = (mach_voucher_attr_content_size_t)strlen((char *)out_content);
3620
3621 if (III_NULL == inherit)
3622 break;
3623
3624 elem = inherit->iii_from_elem;
3625 }
3626 size++; /* account for NULL */
3627 }
3628 *out_command = MACH_VOUCHER_ATTR_NOOP; /* cannot be used to regenerate value */
3629 *in_out_content_size = size;
3630 return KERN_SUCCESS;
3631}
3632
3633/*
3634 * Routine: ipc_importance_command [Voucher Attribute Manager Interface]
3635 * Purpose:
3636 * Run commands against the importance attribute value found in a voucher.
3637 * No such commands are currently supported.
3638 * Conditions:
3639 * Nothing locked on entry. May block.
3640 */
3641static kern_return_t
3642ipc_importance_command(
3643 ipc_voucher_attr_manager_t __assert_only manager,
3644 mach_voucher_attr_key_t __assert_only key,
3645 mach_voucher_attr_value_handle_array_t values,
3646 mach_msg_type_number_t value_count,
3647 mach_voucher_attr_command_t command,
3648 mach_voucher_attr_content_t in_content,
3649 mach_voucher_attr_content_size_t in_content_size,
3650 mach_voucher_attr_content_t out_content,
3651 mach_voucher_attr_content_size_t *out_content_size)
3652{
3653 ipc_importance_inherit_t inherit;
3654 ipc_importance_task_t to_task;
3655 uint32_t refs, *outrefsp;
3656 mach_msg_type_number_t i;
3657 uint32_t externcnt;
3658
3659 IMPORTANCE_ASSERT_MANAGER(manager);
3660 IMPORTANCE_ASSERT_KEY(key);
3661
3662 if (in_content_size != sizeof(refs) ||
3663 (*out_content_size != 0 && *out_content_size != sizeof(refs))) {
3664 return KERN_INVALID_ARGUMENT;
3665 }
3666 refs = *(uint32_t *)(void *)in_content;
3667 outrefsp = (*out_content_size != 0) ? (uint32_t *)(void *)out_content : NULL;
3668
3669 if (MACH_VOUCHER_IMPORTANCE_ATTR_DROP_EXTERNAL != command) {
3670 return KERN_NOT_SUPPORTED;
3671 }
3672
3673 /* the first non-default value of the apropos type provides the data */
3674 inherit = III_NULL;
3675 for (i = 0; i < value_count; i++) {
3676 ipc_importance_elem_t elem = (ipc_importance_elem_t)values[i];
3677
3678 if (IIE_NULL != elem && IIE_TYPE_INHERIT == IIE_TYPE(elem)) {
3679 inherit = (ipc_importance_inherit_t)elem;
3680 break;
3681 }
3682 }
3683 if (III_NULL == inherit) {
3684 return KERN_INVALID_ARGUMENT;
3685 }
3686
3687 ipc_importance_lock();
3688
3689 if (0 == refs) {
3690 if (NULL != outrefsp) {
3691 *outrefsp = III_EXTERN(inherit);
3692 }
3693 ipc_importance_unlock();
3694 return KERN_SUCCESS;
3695 }
3696
3697 to_task = inherit->iii_to_task;
3698 assert(ipc_importance_task_is_any_receiver_type(to_task));
3699
3700 /* if not donating to a denap receiver, it was called incorrectly */
3701 if (!ipc_importance_task_is_marked_denap_receiver(to_task)) {
3702 ipc_importance_unlock();
3703 return KERN_INVALID_TASK; /* keeps dispatch happy */
3704 }
3705
3706 /* Enough external references left to drop? */
3707 if (III_EXTERN(inherit) < refs) {
3708 ipc_importance_unlock();
3709 return KERN_FAILURE;
3710 }
3711
3712 /* re-base external and internal counters at the inherit and the to-task (if apropos) */
3713 if (inherit->iii_donating) {
3714 assert(IIT_EXTERN(to_task) >= III_EXTERN(inherit));
3715 assert(to_task->iit_externcnt >= inherit->iii_externcnt);
3716 assert(to_task->iit_externdrop >= inherit->iii_externdrop);
3717 inherit->iii_externdrop += refs;
3718 to_task->iit_externdrop += refs;
3719 externcnt = III_EXTERN(inherit);
3720 if (0 == externcnt) {
3721 inherit->iii_donating = FALSE;
3722 to_task->iit_externcnt -= inherit->iii_externcnt;
3723 to_task->iit_externdrop -= inherit->iii_externdrop;
3724
3725
3726 /* Start AppNap delay hysteresis - even if not the last boost for the task. */
3727 if (ipc_importance_delayed_drop_call != NULL &&
3728 ipc_importance_task_is_marked_denap_receiver(to_task)) {
3729 ipc_importance_task_delayed_drop(to_task);
3730 }
3731
3732 /* drop task assertions associated with the dropped boosts */
3733 if (ipc_importance_task_check_transition(to_task, IIT_UPDATE_DROP, refs)) {
3734 ipc_importance_task_propagate_assertion_locked(to_task, IIT_UPDATE_DROP, TRUE);
3735 /* may have dropped and retaken importance lock */
3736 }
3737 } else {
3738 /* assert(to_task->iit_assertcnt >= refs + externcnt); */
3739 /* defensive deduction in case of assertcnt underflow */
3740 if (to_task->iit_assertcnt > refs + externcnt) {
3741 to_task->iit_assertcnt -= refs;
3742 } else {
3743 to_task->iit_assertcnt = externcnt;
3744 }
3745 }
3746 } else {
3747 inherit->iii_externdrop += refs;
3748 externcnt = III_EXTERN(inherit);
3749 }
3750
3751 /* capture result (if requested) */
3752 if (NULL != outrefsp) {
3753 *outrefsp = externcnt;
3754 }
3755
3756 ipc_importance_unlock();
3757 return KERN_SUCCESS;
3758}
3759
3760/*
3761 * Routine: ipc_importance_manager_release [Voucher Attribute Manager Interface]
3762 * Purpose:
3763 * Release the Voucher system's reference on the IPC importance attribute
3764 * manager.
3765 * Conditions:
3766 * As this can only occur after the manager drops the Attribute control
3767 * reference granted back at registration time, and that reference is never
3768 * dropped, this should never be called.
3769 */
3770static void
3771ipc_importance_manager_release(
3772 ipc_voucher_attr_manager_t __assert_only manager)
3773{
3774 IMPORTANCE_ASSERT_MANAGER(manager);
3775 panic("Voucher importance manager released");
3776}
3777
3778/*
3779 * Routine: ipc_importance_init
3780 * Purpose:
3781 * Initialize the IPC importance manager.
3782 * Conditions:
3783 * Zones and Vouchers are already initialized.
3784 */
3785void
3786ipc_importance_init(void)
3787{
3788 natural_t ipc_importance_max = (task_max + thread_max) * 2;
3789 char temp_buf[26];
3790 kern_return_t kr;
3791
3792 if (PE_parse_boot_argn("imp_interactive_receiver", temp_buf, sizeof(temp_buf))) {
3793 ipc_importance_interactive_receiver = TRUE;
3794 }
3795
3796 ipc_importance_task_zone = zinit(sizeof(struct ipc_importance_task),
3797 ipc_importance_max * sizeof(struct ipc_importance_task),
3798 sizeof(struct ipc_importance_task),
3799 "ipc task importance");
3800 zone_change(ipc_importance_task_zone, Z_NOENCRYPT, TRUE);
3801
3802 ipc_importance_inherit_zone = zinit(sizeof(struct ipc_importance_inherit),
3803 ipc_importance_max * sizeof(struct ipc_importance_inherit),
3804 sizeof(struct ipc_importance_inherit),
3805 "ipc importance inherit");
3806 zone_change(ipc_importance_inherit_zone, Z_NOENCRYPT, TRUE);
3807
3808
3809#if DEVELOPMENT || DEBUG
3810 queue_init(&global_iit_alloc_queue);
3811#endif
3812
3813 /* initialize global locking */
3814 ipc_importance_lock_init();
3815
3816 kr = ipc_register_well_known_mach_voucher_attr_manager(&ipc_importance_manager,
3817 (mach_voucher_attr_value_handle_t)0,
3818 MACH_VOUCHER_ATTR_KEY_IMPORTANCE,
3819 &ipc_importance_control);
3820 if (KERN_SUCCESS != kr)
3821 printf("Voucher importance manager register returned %d", kr);
3822}
3823
3824/*
3825 * Routine: ipc_importance_thread_call_init
3826 * Purpose:
3827 * Initialize the IPC importance code dependent upon
3828 * thread-call support being available.
3829 * Conditions:
3830 * Thread-call mechanism is already initialized.
3831 */
3832void
3833ipc_importance_thread_call_init(void)
3834{
3835 /* initialize delayed drop queue and thread-call */
3836 queue_init(&ipc_importance_delayed_drop_queue);
3837 ipc_importance_delayed_drop_call =
3838 thread_call_allocate(ipc_importance_task_delayed_drop_scan, NULL);
3839 if (NULL == ipc_importance_delayed_drop_call) {
3840 panic("ipc_importance_init");
3841 }
3842}
3843
3844/*
3845 * Routing: task_importance_list_pids
3846 * Purpose: list pids where task in donating importance.
3847 * Conditions: To be called only from kdp stackshot code.
3848 * Will panic the system otherwise.
3849 */
3850extern int
3851task_importance_list_pids(task_t task, int flags, char *pid_list, unsigned int max_count)
3852{
3853 if (kdp_lck_spin_is_acquired(&ipc_importance_lock_data) ||
3854 max_count < 1 ||
3855 task->task_imp_base == IIT_NULL ||
3856 pid_list == NULL ||
3857 flags != TASK_IMP_LIST_DONATING_PIDS) {
3858 return 0;
3859 }
3860 unsigned int pidcount = 0;
3861 task_t temp_task;
3862 ipc_importance_task_t task_imp = task->task_imp_base;
3863 ipc_kmsg_t temp_kmsg;
3864 ipc_importance_inherit_t temp_inherit;
3865 ipc_importance_elem_t elem;
3866 int target_pid = 0, previous_pid;
3867
3868 queue_iterate(&task_imp->iit_inherits, temp_inherit, ipc_importance_inherit_t, iii_inheritance) {
3869 /* check space in buffer */
3870 if (pidcount >= max_count)
3871 break;
3872 previous_pid = target_pid;
3873 target_pid = -1;
3874
3875 if (temp_inherit->iii_donating) {
3876
3877#if DEVELOPMENT || DEBUG
3878 target_pid = temp_inherit->iii_to_task->iit_bsd_pid;
3879#else
3880 temp_task = temp_inherit->iii_to_task->iit_task;
3881 if (temp_task != TASK_NULL) {
3882 target_pid = task_pid(temp_task);
3883 }
3884#endif
3885 }
3886
3887 if (target_pid != -1 && previous_pid != target_pid) {
3888 memcpy(pid_list, &target_pid, sizeof(target_pid));
3889 pid_list += sizeof(target_pid);
3890 pidcount++;
3891 }
3892
3893 }
3894
3895 target_pid = 0;
3896 queue_iterate(&task_imp->iit_kmsgs, temp_kmsg, ipc_kmsg_t, ikm_inheritance) {
3897 if (pidcount >= max_count)
3898 break;
3899 previous_pid = target_pid;
3900 target_pid = -1;
3901 elem = temp_kmsg->ikm_importance;
3902 temp_task = TASK_NULL;
3903
3904 if (elem == IIE_NULL) {
3905 continue;
3906 }
3907
3908 if (!(temp_kmsg->ikm_header && MACH_MSGH_BITS_RAISED_IMPORTANCE(temp_kmsg->ikm_header->msgh_bits))) {
3909 continue;
3910 }
3911
3912 if (IIE_TYPE_TASK == IIE_TYPE(elem) &&
3913 (((ipc_importance_task_t)elem)->iit_task != TASK_NULL)) {
3914 target_pid = task_pid(((ipc_importance_task_t)elem)->iit_task);
3915 } else {
3916 temp_inherit = (ipc_importance_inherit_t)elem;
3917#if DEVELOPMENT || DEBUG
3918 target_pid = temp_inherit->iii_to_task->iit_bsd_pid;
3919#else
3920 temp_task = temp_inherit->iii_to_task->iit_task;
3921 if (temp_task != TASK_NULL) {
3922 target_pid = task_pid(temp_task);
3923 }
3924#endif
3925 }
3926
3927 if (target_pid != -1 && previous_pid != target_pid) {
3928 memcpy(pid_list, &target_pid, sizeof(target_pid));
3929 pid_list += sizeof(target_pid);
3930 pidcount++;
3931 }
3932 }
3933
3934 return pidcount;
3935}
3936
3937