1/*
2 * Copyright (c) 2013-2020 Apple Inc. All rights reserved.
3 *
4 * @APPLE_OSREFERENCE_LICENSE_HEADER_START@
5 *
6 * This file contains Original Code and/or Modifications of Original Code
7 * as defined in and that are subject to the Apple Public Source License
8 * Version 2.0 (the 'License'). You may not use this file except in
9 * compliance with the License. The rights granted to you under the License
10 * may not be used to create, or enable the creation or redistribution of,
11 * unlawful or unlicensed copies of an Apple operating system, or to
12 * circumvent, violate, or enable the circumvention or violation of, any
13 * terms of an Apple operating system software license agreement.
14 *
15 * Please obtain a copy of the License at
16 * http://www.opensource.apple.com/apsl/ and read it before using this file.
17 *
18 * The Original Code and all software distributed under the License are
19 * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER
20 * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES,
21 * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY,
22 * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT.
23 * Please see the License for the specific language governing rights and
24 * limitations under the License.
25 *
26 * @APPLE_OSREFERENCE_LICENSE_HEADER_END@
27 */
28
29#include <mach/kern_return.h>
30#include <mach/mach_types.h>
31#include <mach/notify.h>
32#include <os/refcnt.h>
33#include <ipc/ipc_types.h>
34#include <ipc/ipc_importance.h>
35#include <ipc/ipc_port.h>
36#include <ipc/ipc_voucher.h>
37#include <kern/ipc_kobject.h>
38#include <kern/ipc_tt.h>
39#include <kern/mach_param.h>
40#include <kern/misc_protos.h>
41#include <kern/zalloc.h>
42#include <kern/queue.h>
43#include <kern/task.h>
44#include <kern/policy_internal.h>
45
46#include <sys/kdebug.h>
47
48#include <mach/machine/sdt.h>
49
50extern int proc_pid(void *);
51extern int proc_selfpid(void);
52extern uint64_t proc_uniqueid(void *p);
53extern char *proc_name_address(void *p);
54
55/*
56 * Globals for delayed boost drop processing.
57 */
58static queue_head_t ipc_importance_delayed_drop_queue;
59static thread_call_t ipc_importance_delayed_drop_call;
60static uint64_t ipc_importance_delayed_drop_timestamp;
61static boolean_t ipc_importance_delayed_drop_call_requested = FALSE;
62
63#define DENAP_DROP_TARGET (1000 * NSEC_PER_MSEC) /* optimum denap delay */
64#define DENAP_DROP_SKEW (100 * NSEC_PER_MSEC) /* request skew for wakeup */
65#define DENAP_DROP_LEEWAY (2 * DENAP_DROP_SKEW) /* specified wakeup leeway */
66
67#define DENAP_DROP_DELAY (DENAP_DROP_TARGET + DENAP_DROP_SKEW)
68#define DENAP_DROP_FLAGS (THREAD_CALL_DELAY_SYS_NORMAL | THREAD_CALL_DELAY_LEEWAY)
69
70/*
71 * Importance Voucher Attribute Manager
72 */
73static LCK_SPIN_DECLARE_ATTR(ipc_importance_lock_data, &ipc_lck_grp, &ipc_lck_attr);
74
75#define ipc_importance_lock() \
76 lck_spin_lock_grp(&ipc_importance_lock_data, &ipc_lck_grp)
77#define ipc_importance_lock_try() \
78 lck_spin_try_lock_grp(&ipc_importance_lock_data, &ipc_lck_grp)
79#define ipc_importance_unlock() \
80 lck_spin_unlock(&ipc_importance_lock_data)
81#define ipc_importance_assert_held() \
82 lck_spin_assert(&ipc_importance_lock_data, LCK_ASSERT_OWNED)
83
84#if IIE_REF_DEBUG
85#define incr_ref_counter(x) (os_atomic_inc(&(x), relaxed))
86
87static inline
88void
89ipc_importance_reference_internal(ipc_importance_elem_t elem)
90{
91 incr_ref_counter(elem->iie_refs_added);
92 os_ref_retain_mask(&elem->iie_bits, IIE_TYPE_BITS, &iie_refgrp);
93}
94
95static inline
96uint32_t
97ipc_importance_release_internal(ipc_importance_elem_t elem)
98{
99 incr_ref_counter(elem->iie_refs_dropped);
100 return os_ref_release_relaxed_mask(&elem->iie_bits, IIE_TYPE_BITS, &iie_refgrp);
101}
102
103static inline
104void
105ipc_importance_task_reference_internal(ipc_importance_task_t task_imp)
106{
107 uint32_t out;
108 ipc_importance_reference_internal(&task_imp->iit_elem);
109 incr_ref_counter(task_imp->iit_elem.iie_task_refs_added);
110}
111
112static inline
113uint32_t
114ipc_importance_task_release_internal(ipc_importance_task_t task_imp)
115{
116 uint32_t out;
117
118 assert(1 < IIT_REFS(task_imp));
119 incr_ref_counter(task_imp->iit_elem.iie_task_refs_dropped);
120 out = ipc_importance_release_internal(&task_imp->iit_elem);
121 return out;
122}
123
124static inline
125void
126ipc_importance_counter_init(ipc_importance_elem_t elem)
127{
128 elem->iie_refs_added = 0;
129 elem->iie_refs_dropped = 0;
130 elem->iie_kmsg_refs_added = 0;
131 elem->iie_kmsg_refs_inherited = 0;
132 elem->iie_kmsg_refs_coalesced = 0;
133 elem->iie_kmsg_refs_dropped = 0;
134 elem->iie_task_refs_added = 0;
135 elem->iie_task_refs_added_inherit_from = 0;
136 elem->iie_task_refs_added_transition = 0;
137 elem->iie_task_refs_self_added = 0;
138 elem->iie_task_refs_inherited = 0;
139 elem->iie_task_refs_coalesced = 0;
140 elem->iie_task_refs_dropped = 0;
141}
142#else
143#define incr_ref_counter(x)
144#endif
145
146#if DEVELOPMENT || DEBUG
147static queue_head_t global_iit_alloc_queue =
148 QUEUE_HEAD_INITIALIZER(global_iit_alloc_queue);
149#endif
150
151static ZONE_DEFINE_TYPE(ipc_importance_task_zone, "ipc task importance",
152 struct ipc_importance_task, ZC_ZFREE_CLEARMEM);
153static ZONE_DEFINE_TYPE(ipc_importance_inherit_zone, "ipc importance inherit",
154 struct ipc_importance_inherit, ZC_ZFREE_CLEARMEM);
155static zone_t ipc_importance_inherit_zone;
156
157static ipc_voucher_attr_control_t ipc_importance_control;
158
159static boolean_t ipc_importance_task_check_transition(ipc_importance_task_t task_imp,
160 iit_update_type_t type, uint32_t delta);
161
162static void ipc_importance_task_propagate_assertion_locked(ipc_importance_task_t task_imp,
163 iit_update_type_t type, boolean_t update_task_imp);
164
165static ipc_importance_inherit_t ipc_importance_inherit_from_task(task_t from_task, task_t to_task);
166
167/*
168 * Routine: ipc_importance_kmsg_link
169 * Purpose:
170 * Link the kmsg onto the appropriate propagation chain.
171 * If the element is a task importance, we link directly
172 * on its propagation chain. Otherwise, we link onto the
173 * destination task of the inherit.
174 * Conditions:
175 * Importance lock held.
176 * Caller is donating an importance elem reference to the kmsg.
177 */
178static void
179ipc_importance_kmsg_link(
180 ipc_kmsg_t kmsg,
181 ipc_importance_elem_t elem)
182{
183 ipc_importance_elem_t link_elem;
184
185 assert(IIE_NULL == kmsg->ikm_importance);
186
187 link_elem = (IIE_TYPE_INHERIT == IIE_TYPE(elem)) ?
188 (ipc_importance_elem_t)((ipc_importance_inherit_t)elem)->iii_to_task :
189 elem;
190
191 queue_enter(&link_elem->iie_kmsgs, kmsg, ipc_kmsg_t, ikm_inheritance);
192 kmsg->ikm_importance = elem;
193}
194
195/*
196 * Routine: ipc_importance_kmsg_unlink
197 * Purpose:
198 * Unlink the kmsg from its current propagation chain.
199 * If the element is a task importance, we unlink directly
200 * from its propagation chain. Otherwise, we unlink from the
201 * destination task of the inherit.
202 * Returns:
203 * The reference to the importance element it was linked on.
204 * Conditions:
205 * Importance lock held.
206 * Caller is responsible for dropping reference on returned elem.
207 */
208static ipc_importance_elem_t
209ipc_importance_kmsg_unlink(
210 ipc_kmsg_t kmsg)
211{
212 ipc_importance_elem_t elem = kmsg->ikm_importance;
213
214 if (IIE_NULL != elem) {
215 ipc_importance_elem_t unlink_elem;
216
217 unlink_elem = (IIE_TYPE_INHERIT == IIE_TYPE(elem)) ?
218 (ipc_importance_elem_t)((ipc_importance_inherit_t)elem)->iii_to_task :
219 elem;
220
221 queue_remove(&unlink_elem->iie_kmsgs, kmsg, ipc_kmsg_t, ikm_inheritance);
222 kmsg->ikm_importance = IIE_NULL;
223 }
224 return elem;
225}
226
227/*
228 * Routine: ipc_importance_inherit_link
229 * Purpose:
230 * Link the inherit onto the appropriate propagation chain.
231 * If the element is a task importance, we link directly
232 * on its propagation chain. Otherwise, we link onto the
233 * destination task of the inherit.
234 * Conditions:
235 * Importance lock held.
236 * Caller is donating an elem importance reference to the inherit.
237 */
238static void
239ipc_importance_inherit_link(
240 ipc_importance_inherit_t inherit,
241 ipc_importance_elem_t elem)
242{
243 ipc_importance_task_t link_task;
244
245 assert(IIE_NULL == inherit->iii_from_elem);
246 link_task = (IIE_TYPE_INHERIT == IIE_TYPE(elem)) ?
247 ((ipc_importance_inherit_t)elem)->iii_to_task :
248 (ipc_importance_task_t)elem;
249
250 queue_enter(&link_task->iit_inherits, inherit,
251 ipc_importance_inherit_t, iii_inheritance);
252 inherit->iii_from_elem = elem;
253}
254
255/*
256 * Routine: ipc_importance_inherit_find
257 * Purpose:
258 * Find an existing inherit that links the from element to the
259 * to_task at a given nesting depth. As inherits from other
260 * inherits are actually linked off the original inherit's donation
261 * receiving task, we have to conduct our search from there if
262 * the from element is an inherit.
263 * Returns:
264 * A pointer (not a reference) to the matching inherit.
265 * Conditions:
266 * Importance lock held.
267 */
268static ipc_importance_inherit_t
269ipc_importance_inherit_find(
270 ipc_importance_elem_t from,
271 ipc_importance_task_t to_task,
272 unsigned int depth)
273{
274 ipc_importance_task_t link_task;
275 ipc_importance_inherit_t inherit;
276
277 link_task = (IIE_TYPE_INHERIT == IIE_TYPE(from)) ?
278 ((ipc_importance_inherit_t)from)->iii_to_task :
279 (ipc_importance_task_t)from;
280
281 queue_iterate(&link_task->iit_inherits, inherit,
282 ipc_importance_inherit_t, iii_inheritance) {
283 if (inherit->iii_to_task == to_task && inherit->iii_depth == depth) {
284 return inherit;
285 }
286 }
287 return III_NULL;
288}
289
290/*
291 * Routine: ipc_importance_inherit_unlink
292 * Purpose:
293 * Unlink the inherit from its current propagation chain.
294 * If the element is a task importance, we unlink directly
295 * from its propagation chain. Otherwise, we unlink from the
296 * destination task of the inherit.
297 * Returns:
298 * The reference to the importance element it was linked on.
299 * Conditions:
300 * Importance lock held.
301 * Caller is responsible for dropping reference on returned elem.
302 */
303static ipc_importance_elem_t
304ipc_importance_inherit_unlink(
305 ipc_importance_inherit_t inherit)
306{
307 ipc_importance_elem_t elem = inherit->iii_from_elem;
308
309 if (IIE_NULL != elem) {
310 ipc_importance_task_t unlink_task;
311
312 unlink_task = (IIE_TYPE_INHERIT == IIE_TYPE(elem)) ?
313 ((ipc_importance_inherit_t)elem)->iii_to_task :
314 (ipc_importance_task_t)elem;
315
316 queue_remove(&unlink_task->iit_inherits, inherit,
317 ipc_importance_inherit_t, iii_inheritance);
318 inherit->iii_from_elem = IIE_NULL;
319 }
320 return elem;
321}
322
323/*
324 * Routine: ipc_importance_reference
325 * Purpose:
326 * Add a reference to the importance element.
327 * Conditions:
328 * Caller must hold a reference on the element.
329 */
330void
331ipc_importance_reference(ipc_importance_elem_t elem)
332{
333 assert(0 < IIE_REFS(elem));
334 ipc_importance_reference_internal(elem);
335}
336
337/*
338 * Routine: ipc_importance_release_locked
339 * Purpose:
340 * Release a reference on an importance attribute value,
341 * unlinking and deallocating the attribute if the last reference.
342 * Conditions:
343 * Entered with importance lock held, leaves with it unlocked.
344 */
345static void
346ipc_importance_release_locked(ipc_importance_elem_t elem)
347{
348 assert(0 < IIE_REFS(elem));
349
350#if IMPORTANCE_DEBUG
351 ipc_importance_inherit_t temp_inherit;
352 ipc_importance_task_t link_task;
353 ipc_kmsg_t temp_kmsg;
354 uint32_t expected = 0;
355
356 if (0 < elem->iie_made) {
357 expected++;
358 }
359
360 link_task = (IIE_TYPE_INHERIT == IIE_TYPE(elem)) ?
361 ((ipc_importance_inherit_t)elem)->iii_to_task :
362 (ipc_importance_task_t)elem;
363
364 queue_iterate(&link_task->iit_kmsgs, temp_kmsg, ipc_kmsg_t, ikm_inheritance)
365 if (temp_kmsg->ikm_importance == elem) {
366 expected++;
367 }
368 queue_iterate(&link_task->iit_inherits, temp_inherit,
369 ipc_importance_inherit_t, iii_inheritance)
370 if (temp_inherit->iii_from_elem == elem) {
371 expected++;
372 }
373 if (IIE_REFS(elem) < expected + 1) {
374 panic("ipc_importance_release_locked (%p)", elem);
375 }
376#endif /* IMPORTANCE_DEBUG */
377
378 if (0 < ipc_importance_release_internal(elem)) {
379 ipc_importance_unlock();
380 return;
381 }
382
383 /* last ref */
384
385 switch (IIE_TYPE(elem)) {
386 /* just a "from" task reference to drop */
387 case IIE_TYPE_TASK:
388 {
389 ipc_importance_task_t task_elem;
390
391 task_elem = (ipc_importance_task_t)elem;
392
393 /* the task can't still hold a reference on the task importance */
394 assert(TASK_NULL == task_elem->iit_task);
395
396#if DEVELOPMENT || DEBUG
397 queue_remove(&global_iit_alloc_queue, task_elem, ipc_importance_task_t, iit_allocation);
398#endif
399
400 ipc_importance_unlock();
401
402 zfree(ipc_importance_task_zone, task_elem);
403 break;
404 }
405
406 /* dropping an inherit element */
407 case IIE_TYPE_INHERIT:
408 {
409 ipc_importance_inherit_t inherit = (ipc_importance_inherit_t)elem;
410 ipc_importance_task_t to_task = inherit->iii_to_task;
411 ipc_importance_elem_t from_elem;
412
413 assert(IIT_NULL != to_task);
414 assert(ipc_importance_task_is_any_receiver_type(to_task));
415
416 /* unlink the inherit from its source element */
417 from_elem = ipc_importance_inherit_unlink(inherit);
418 assert(IIE_NULL != from_elem);
419
420 /*
421 * The attribute might have pending external boosts if the attribute
422 * was given out during exec, drop them from the appropriate destination
423 * task.
424 *
425 * The attribute will not have any pending external boosts if the
426 * attribute was given out to voucher system since it would have been
427 * dropped by ipc_importance_release_value, but there is not way to
428 * detect that, thus if the attribute has a pending external boost,
429 * drop them from the appropriate destination task.
430 *
431 * The inherit attribute from exec and voucher system would not
432 * get deduped to each other, thus dropping the external boost
433 * from destination task at two different places will not have
434 * any unintended side effects.
435 */
436 assert(inherit->iii_externcnt >= inherit->iii_externdrop);
437 if (inherit->iii_donating) {
438 uint32_t assertcnt = III_EXTERN(inherit);
439
440 assert(ipc_importance_task_is_any_receiver_type(to_task));
441 assert(to_task->iit_externcnt >= inherit->iii_externcnt);
442 assert(to_task->iit_externdrop >= inherit->iii_externdrop);
443 to_task->iit_externcnt -= inherit->iii_externcnt;
444 to_task->iit_externdrop -= inherit->iii_externdrop;
445 inherit->iii_externcnt = 0;
446 inherit->iii_externdrop = 0;
447 inherit->iii_donating = FALSE;
448
449 /* adjust the internal assertions - and propagate as needed */
450 if (ipc_importance_task_check_transition(task_imp: to_task, IIT_UPDATE_DROP, delta: assertcnt)) {
451 ipc_importance_task_propagate_assertion_locked(task_imp: to_task, IIT_UPDATE_DROP, TRUE);
452 }
453 } else {
454 inherit->iii_externcnt = 0;
455 inherit->iii_externdrop = 0;
456 }
457
458 /* release the reference on the source element */
459 ipc_importance_release_locked(elem: from_elem);
460 /* unlocked on return */
461
462 /* release the reference on the destination task */
463 ipc_importance_task_release(task_imp: to_task);
464
465 /* free the inherit */
466 zfree(ipc_importance_inherit_zone, inherit);
467 break;
468 }
469 }
470}
471
472/*
473 * Routine: ipc_importance_release
474 * Purpose:
475 * Release a reference on an importance attribute value,
476 * unlinking and deallocating the attribute if the last reference.
477 * Conditions:
478 * nothing locked on entrance, nothing locked on exit.
479 * May block.
480 */
481void
482ipc_importance_release(ipc_importance_elem_t elem)
483{
484 if (IIE_NULL == elem) {
485 return;
486 }
487
488 ipc_importance_lock();
489 ipc_importance_release_locked(elem);
490 /* unlocked */
491}
492
493__abortlike
494static void
495iit_over_release_panic(ipc_importance_task_t task_imp)
496{
497 panic("iit unexpected zero refs: %p", task_imp);
498}
499
500/*
501 * Routine: ipc_importance_task_reference
502 *
503 *
504 * Purpose:
505 * Retain a reference on a task importance attribute value.
506 * Conditions:
507 * nothing locked on entrance, nothing locked on exit.
508 * caller holds a reference already.
509 */
510void
511ipc_importance_task_reference(ipc_importance_task_t task_elem)
512{
513 if (IIT_NULL == task_elem) {
514 return;
515 }
516#if IIE_REF_DEBUG
517 incr_ref_counter(task_elem->iit_elem.iie_task_refs_added);
518#endif
519 ipc_importance_reference(elem: &task_elem->iit_elem);
520}
521
522/*
523 * Routine: ipc_importance_task_release
524 * Purpose:
525 * Release a reference on a task importance attribute value,
526 * unlinking and deallocating the attribute if the last reference.
527 * Conditions:
528 * nothing locked on entrance, nothing locked on exit.
529 * May block.
530 */
531void
532ipc_importance_task_release(ipc_importance_task_t task_elem)
533{
534 if (IIT_NULL == task_elem) {
535 return;
536 }
537
538 ipc_importance_lock();
539#if IIE_REF_DEBUG
540 incr_ref_counter(task_elem->iit_elem.iie_task_refs_dropped);
541#endif
542 ipc_importance_release_locked(elem: &task_elem->iit_elem);
543 /* unlocked */
544}
545
546/*
547 * Routine: ipc_importance_task_release_locked
548 * Purpose:
549 * Release a reference on a task importance attribute value,
550 * unlinking and deallocating the attribute if the last reference.
551 * Conditions:
552 * importance lock held on entry, nothing locked on exit.
553 * May block.
554 */
555static void
556ipc_importance_task_release_locked(ipc_importance_task_t task_elem)
557{
558 if (IIT_NULL == task_elem) {
559 ipc_importance_unlock();
560 return;
561 }
562#if IIE_REF_DEBUG
563 incr_ref_counter(task_elem->iit_elem.iie_task_refs_dropped);
564#endif
565 ipc_importance_release_locked(elem: &task_elem->iit_elem);
566 /* unlocked */
567}
568
569/*
570 * Routines for importance donation/inheritance/boosting
571 */
572
573
574/*
575 * External importance assertions are managed by the process in userspace
576 * Internal importance assertions are the responsibility of the kernel
577 * Assertions are changed from internal to external via task_importance_externalize_assertion
578 */
579
580/*
581 * Routine: ipc_importance_task_check_transition
582 * Purpose:
583 * Increase or decrement the internal task importance counter of the
584 * specified task and determine if propagation and a task policy
585 * update is required.
586 *
587 * If it is already enqueued for a policy update, steal it from that queue
588 * (as we are reversing that update before it happens).
589 *
590 * Conditions:
591 * Called with the importance lock held.
592 * It is the caller's responsibility to perform the propagation of the
593 * transition and/or policy changes by checking the return value.
594 */
595static boolean_t
596ipc_importance_task_check_transition(
597 ipc_importance_task_t task_imp,
598 iit_update_type_t type,
599 uint32_t delta)
600{
601#if IMPORTANCE_TRACE
602 task_t target_task = task_imp->iit_task;
603#endif
604 boolean_t boost = (IIT_UPDATE_HOLD == type);
605 boolean_t before_boosted, after_boosted;
606
607 ipc_importance_assert_held();
608
609 if (!ipc_importance_task_is_any_receiver_type(task_imp)) {
610 return FALSE;
611 }
612
613#if IMPORTANCE_TRACE
614 int target_pid = task_pid(target_task);
615
616 KERNEL_DEBUG_CONSTANT_IST(KDEBUG_TRACE, (IMPORTANCE_CODE(IMP_ASSERTION, (((boost) ? IMP_HOLD : IMP_DROP) | TASK_POLICY_INTERNAL))) | DBG_FUNC_START,
617 proc_selfpid(), target_pid, task_imp->iit_assertcnt, IIT_EXTERN(task_imp), 0);
618#endif
619
620 /* snapshot the effective boosting status before making any changes */
621 before_boosted = (task_imp->iit_assertcnt > 0);
622
623 /* Adjust the assertcnt appropriately */
624 if (boost) {
625 task_imp->iit_assertcnt += delta;
626#if IMPORTANCE_TRACE
627 DTRACE_BOOST6(send_boost, task_t, target_task, int, target_pid,
628 task_t, current_task(), int, proc_selfpid(), int, delta, int, task_imp->iit_assertcnt);
629#endif
630 } else {
631 // assert(delta <= task_imp->iit_assertcnt);
632 if (task_imp->iit_assertcnt < delta + IIT_EXTERN(task_imp)) {
633 /* TODO: Turn this back into a panic <rdar://problem/12592649> */
634 task_imp->iit_assertcnt = IIT_EXTERN(task_imp);
635 } else {
636 task_imp->iit_assertcnt -= delta;
637 }
638#if IMPORTANCE_TRACE
639 // This convers both legacy and voucher-based importance.
640 DTRACE_BOOST4(drop_boost, task_t, target_task, int, target_pid, int, delta, int, task_imp->iit_assertcnt);
641#endif
642 }
643
644#if IMPORTANCE_TRACE
645 KERNEL_DEBUG_CONSTANT_IST(KDEBUG_TRACE, (IMPORTANCE_CODE(IMP_ASSERTION, (((boost) ? IMP_HOLD : IMP_DROP) | TASK_POLICY_INTERNAL))) | DBG_FUNC_END,
646 proc_selfpid(), target_pid, task_imp->iit_assertcnt, IIT_EXTERN(task_imp), 0);
647#endif
648
649 /* did the change result in an effective donor status change? */
650 after_boosted = (task_imp->iit_assertcnt > 0);
651
652 if (after_boosted != before_boosted) {
653 /*
654 * If the task importance is already on an update queue, we just reversed the need for a
655 * pending policy update. If the queue is any other than the delayed-drop-queue, pull it
656 * off that queue and release the reference it got going onto the update queue. If it is
657 * the delayed-drop-queue we leave it in place in case it comes back into the drop state
658 * before its time delay is up.
659 *
660 * We still need to propagate the change downstream to reverse the assertcnt effects,
661 * but we no longer need to update this task's boost policy state.
662 *
663 * Otherwise, mark it as needing a policy update.
664 */
665 assert(0 == task_imp->iit_updatepolicy);
666 if (NULL != task_imp->iit_updateq) {
667 if (&ipc_importance_delayed_drop_queue != task_imp->iit_updateq) {
668 queue_remove(task_imp->iit_updateq, task_imp, ipc_importance_task_t, iit_updates);
669 task_imp->iit_updateq = NULL;
670 if (!ipc_importance_task_release_internal(task_imp)) {
671 /* can't be last ref */
672 iit_over_release_panic(task_imp);
673 }
674 }
675 } else {
676 task_imp->iit_updatepolicy = 1;
677 }
678 return TRUE;
679 }
680
681 return FALSE;
682}
683
684
685/*
686 * Routine: ipc_importance_task_propagate_helper
687 * Purpose:
688 * Increase or decrement the internal task importance counter of all
689 * importance tasks inheriting from the specified one. If this causes
690 * that importance task to change state, add it to the list of tasks
691 * to do a policy update against.
692 * Conditions:
693 * Called with the importance lock held.
694 * It is the caller's responsibility to iterate down the generated list
695 * and propagate any subsequent assertion changes from there.
696 */
697static void
698ipc_importance_task_propagate_helper(
699 ipc_importance_task_t task_imp,
700 iit_update_type_t type,
701 queue_t propagation)
702{
703 ipc_importance_task_t temp_task_imp;
704
705 /*
706 * iterate the downstream kmsgs, adjust their boosts,
707 * and capture the next task to adjust for each message
708 */
709
710 ipc_kmsg_t temp_kmsg;
711
712 queue_iterate(&task_imp->iit_kmsgs, temp_kmsg, ipc_kmsg_t, ikm_inheritance) {
713 mach_msg_header_t *hdr = ikm_header(kmsg: temp_kmsg);
714 mach_port_delta_t delta;
715 ipc_port_t port;
716
717 /* toggle the kmsg importance bit as a barrier to parallel adjusts */
718 if (IIT_UPDATE_HOLD == type) {
719 if (MACH_MSGH_BITS_RAISED_IMPORTANCE(hdr->msgh_bits)) {
720 continue;
721 }
722
723 /* mark the message as now carrying importance */
724 hdr->msgh_bits |= MACH_MSGH_BITS_RAISEIMP;
725 delta = 1;
726 } else {
727 if (!MACH_MSGH_BITS_RAISED_IMPORTANCE(hdr->msgh_bits)) {
728 continue;
729 }
730
731 /* clear the message as now carrying importance */
732 hdr->msgh_bits &= ~MACH_MSGH_BITS_RAISEIMP;
733 delta = -1;
734 }
735
736 /* determine the task importance to adjust as result (if any) */
737 port = hdr->msgh_remote_port;
738 assert(IP_VALID(port));
739 ip_mq_lock(port);
740 temp_task_imp = IIT_NULL;
741 if (!ipc_port_importance_delta_internal(port, options: IPID_OPTION_NORMAL, deltap: &delta, imp_task: &temp_task_imp)) {
742 ip_mq_unlock(port);
743 }
744
745 /* no task importance to adjust associated with the port? */
746 if (IIT_NULL == temp_task_imp) {
747 continue;
748 }
749
750 /* hold a reference on temp_task_imp */
751
752 /* Adjust the task assertions and determine if an edge was crossed */
753 if (ipc_importance_task_check_transition(task_imp: temp_task_imp, type, delta: 1)) {
754 incr_ref_counter(temp_task_imp->iit_elem.iie_task_refs_added_transition);
755 queue_enter(propagation, temp_task_imp, ipc_importance_task_t, iit_props);
756 /* reference donated */
757 } else {
758 if (!ipc_importance_task_release_internal(temp_task_imp)) {
759 /* can't be last ref */
760 iit_over_release_panic(task_imp: temp_task_imp);
761 }
762 }
763 }
764
765 /*
766 * iterate the downstream importance inherits
767 * and capture the next task importance to boost for each
768 */
769 ipc_importance_inherit_t temp_inherit;
770
771 queue_iterate(&task_imp->iit_inherits, temp_inherit, ipc_importance_inherit_t, iii_inheritance) {
772 uint32_t assertcnt = III_EXTERN(temp_inherit);
773
774 temp_task_imp = temp_inherit->iii_to_task;
775 assert(IIT_NULL != temp_task_imp);
776
777 if (IIT_UPDATE_HOLD == type) {
778 /* if no undropped externcnts in the inherit, nothing to do */
779 if (0 == assertcnt) {
780 assert(temp_inherit->iii_donating == FALSE);
781 continue;
782 }
783
784 /* nothing to do if the inherit is already donating (forced donation) */
785 if (temp_inherit->iii_donating) {
786 continue;
787 }
788
789 /* mark it donating and contribute to the task externcnts */
790 temp_inherit->iii_donating = TRUE;
791 temp_task_imp->iit_externcnt += temp_inherit->iii_externcnt;
792 temp_task_imp->iit_externdrop += temp_inherit->iii_externdrop;
793 } else {
794 /* if no contributing assertions, move on */
795 if (0 == assertcnt) {
796 assert(temp_inherit->iii_donating == FALSE);
797 continue;
798 }
799
800 /* nothing to do if the inherit is not donating */
801 if (!temp_inherit->iii_donating) {
802 continue;
803 }
804
805 /* mark it no longer donating */
806 temp_inherit->iii_donating = FALSE;
807
808 /* remove the contribution the inherit made to the to-task */
809 assert(IIT_EXTERN(temp_task_imp) >= III_EXTERN(temp_inherit));
810 assert(temp_task_imp->iit_externcnt >= temp_inherit->iii_externcnt);
811 assert(temp_task_imp->iit_externdrop >= temp_inherit->iii_externdrop);
812 temp_task_imp->iit_externcnt -= temp_inherit->iii_externcnt;
813 temp_task_imp->iit_externdrop -= temp_inherit->iii_externdrop;
814 }
815
816 /* Adjust the task assertions and determine if an edge was crossed */
817 assert(ipc_importance_task_is_any_receiver_type(temp_task_imp));
818 if (ipc_importance_task_check_transition(task_imp: temp_task_imp, type, delta: assertcnt)) {
819 ipc_importance_task_reference(task_elem: temp_task_imp);
820 incr_ref_counter(temp_task_imp->iit_elem.iie_task_refs_added_transition);
821 queue_enter(propagation, temp_task_imp, ipc_importance_task_t, iit_props);
822 }
823 }
824}
825
826/*
827 * Routine: ipc_importance_task_process_updates
828 * Purpose:
829 * Process the queue of task importances and apply the policy
830 * update called for. Only process tasks in the queue with an
831 * update timestamp less than the supplied max.
832 * Conditions:
833 * Called and returns with importance locked.
834 * May drop importance lock and block temporarily.
835 */
836static void
837ipc_importance_task_process_updates(
838 queue_t supplied_queue,
839 boolean_t boost,
840 uint64_t max_timestamp)
841{
842 ipc_importance_task_t task_imp;
843 queue_head_t second_chance;
844 queue_t queue = supplied_queue;
845
846 /*
847 * This queue will hold the task's we couldn't trylock on first pass.
848 * By using a second (private) queue, we guarantee all tasks that get
849 * entered on this queue have a timestamp under the maximum.
850 */
851 queue_init(&second_chance);
852
853 /* process any resulting policy updates */
854retry:
855 while (!queue_empty(queue)) {
856 task_t target_task;
857 struct task_pend_token pend_token = {};
858
859 task_imp = (ipc_importance_task_t)queue_first(queue);
860 assert(0 == task_imp->iit_updatepolicy);
861 assert(queue == task_imp->iit_updateq);
862
863 /* if timestamp is too big, we're done */
864 if (task_imp->iit_updatetime > max_timestamp) {
865 break;
866 }
867
868 /* we were given a reference on each task in the queue */
869
870 /* remove it from the supplied queue */
871 queue_remove(queue, task_imp, ipc_importance_task_t, iit_updates);
872 task_imp->iit_updateq = NULL;
873
874 target_task = task_imp->iit_task;
875
876 /* Is it well on the way to exiting? */
877 if (TASK_NULL == target_task) {
878 ipc_importance_task_release_locked(task_elem: task_imp);
879 /* importance unlocked */
880 ipc_importance_lock();
881 continue;
882 }
883
884 /* Has the update been reversed on the hysteresis queue? */
885 if (0 < task_imp->iit_assertcnt &&
886 queue == &ipc_importance_delayed_drop_queue) {
887 ipc_importance_task_release_locked(task_elem: task_imp);
888 /* importance unlocked */
889 ipc_importance_lock();
890 continue;
891 }
892
893 /*
894 * Can we get the task lock out-of-order?
895 * If not, stick this back on the second-chance queue.
896 */
897 if (!task_lock_try(target_task)) {
898 boolean_t should_wait_lock = (queue == &second_chance);
899 task_imp->iit_updateq = &second_chance;
900
901 /*
902 * If we're already processing second-chances on
903 * tasks, keep this task on the front of the queue.
904 * We will wait for the task lock before coming
905 * back and trying again, and we have a better
906 * chance of re-acquiring the lock if we come back
907 * to it right away.
908 */
909 if (should_wait_lock) {
910 task_reference(target_task);
911 queue_enter_first(&second_chance, task_imp,
912 ipc_importance_task_t, iit_updates);
913 } else {
914 queue_enter(&second_chance, task_imp,
915 ipc_importance_task_t, iit_updates);
916 }
917 ipc_importance_unlock();
918
919 if (should_wait_lock) {
920 task_lock(target_task);
921 task_unlock(target_task);
922 task_deallocate(target_task);
923 }
924
925 ipc_importance_lock();
926 continue;
927 }
928
929 /* is it going away? */
930 if (!target_task->active) {
931 task_unlock(target_task);
932 ipc_importance_task_release_locked(task_elem: task_imp);
933 /* importance unlocked */
934 ipc_importance_lock();
935 continue;
936 }
937
938 /* take a task reference for while we don't have the importance lock */
939 task_reference(target_task);
940
941 /* count the transition */
942 if (boost) {
943 task_imp->iit_transitions++;
944 }
945
946 ipc_importance_unlock();
947
948 /* reevaluate turnstile boost */
949 pend_token.tpt_update_turnstile = 1;
950
951 /* apply the policy adjust to the target task (while it is still locked) */
952 task_update_boost_locked(task: target_task, boost_active: boost, pend_token: &pend_token);
953
954 /* complete the policy update with the task unlocked */
955 ipc_importance_task_release(task_elem: task_imp);
956 task_unlock(target_task);
957 task_policy_update_complete_unlocked(task: target_task, pend_token: &pend_token);
958 task_deallocate(target_task);
959
960 ipc_importance_lock();
961 }
962
963 /* If there are tasks we couldn't update the first time, try again */
964 if (!queue_empty(&second_chance)) {
965 queue = &second_chance;
966 goto retry;
967 }
968}
969
970
971/*
972 * Routine: ipc_importance_task_delayed_drop_scan
973 * Purpose:
974 * The thread call routine to scan the delayed drop queue,
975 * requesting all updates with a deadline up to the last target
976 * for the thread-call (which is DENAP_DROP_SKEW beyond the first
977 * thread's optimum delay).
978 * update to drop its boost.
979 * Conditions:
980 * Nothing locked
981 */
982static void
983ipc_importance_task_delayed_drop_scan(
984 __unused void *arg1,
985 __unused void *arg2)
986{
987 ipc_importance_lock();
988
989 /* process all queued task drops with timestamps up to TARGET(first)+SKEW */
990 ipc_importance_task_process_updates(supplied_queue: &ipc_importance_delayed_drop_queue,
991 FALSE,
992 max_timestamp: ipc_importance_delayed_drop_timestamp);
993
994 /* importance lock may have been temporarily dropped */
995
996 /* If there are any entries left in the queue, re-arm the call here */
997 if (!queue_empty(&ipc_importance_delayed_drop_queue)) {
998 ipc_importance_task_t task_imp;
999 uint64_t deadline;
1000 uint64_t leeway;
1001
1002 task_imp = (ipc_importance_task_t)queue_first(&ipc_importance_delayed_drop_queue);
1003
1004 nanoseconds_to_absolutetime(DENAP_DROP_DELAY, result: &deadline);
1005 deadline += task_imp->iit_updatetime;
1006 ipc_importance_delayed_drop_timestamp = deadline;
1007
1008 nanoseconds_to_absolutetime(DENAP_DROP_LEEWAY, result: &leeway);
1009
1010 thread_call_enter_delayed_with_leeway(
1011 call: ipc_importance_delayed_drop_call,
1012 NULL,
1013 deadline,
1014 leeway,
1015 DENAP_DROP_FLAGS);
1016 } else {
1017 ipc_importance_delayed_drop_call_requested = FALSE;
1018 }
1019 ipc_importance_unlock();
1020}
1021
1022/*
1023 * Routine: ipc_importance_task_delayed_drop
1024 * Purpose:
1025 * Queue the specified task importance for delayed policy
1026 * update to drop its boost.
1027 * Conditions:
1028 * Called with the importance lock held.
1029 */
1030static void
1031ipc_importance_task_delayed_drop(ipc_importance_task_t task_imp)
1032{
1033 uint64_t timestamp = mach_absolute_time(); /* no mach_approximate_time() in kernel */
1034
1035 assert(ipc_importance_delayed_drop_call != NULL);
1036
1037 /*
1038 * If still on an update queue from a previous change,
1039 * remove it first (and use that reference). Otherwise, take
1040 * a new reference for the delay drop update queue.
1041 */
1042 if (NULL != task_imp->iit_updateq) {
1043 queue_remove(task_imp->iit_updateq, task_imp,
1044 ipc_importance_task_t, iit_updates);
1045 } else {
1046 ipc_importance_task_reference_internal(task_imp);
1047 }
1048
1049 task_imp->iit_updateq = &ipc_importance_delayed_drop_queue;
1050 task_imp->iit_updatetime = timestamp;
1051
1052 queue_enter(&ipc_importance_delayed_drop_queue, task_imp,
1053 ipc_importance_task_t, iit_updates);
1054
1055 /* request the delayed thread-call if not already requested */
1056 if (!ipc_importance_delayed_drop_call_requested) {
1057 uint64_t deadline;
1058 uint64_t leeway;
1059
1060 nanoseconds_to_absolutetime(DENAP_DROP_DELAY, result: &deadline);
1061 deadline += task_imp->iit_updatetime;
1062 ipc_importance_delayed_drop_timestamp = deadline;
1063
1064 nanoseconds_to_absolutetime(DENAP_DROP_LEEWAY, result: &leeway);
1065
1066 ipc_importance_delayed_drop_call_requested = TRUE;
1067 thread_call_enter_delayed_with_leeway(
1068 call: ipc_importance_delayed_drop_call,
1069 NULL,
1070 deadline,
1071 leeway,
1072 DENAP_DROP_FLAGS);
1073 }
1074}
1075
1076
1077/*
1078 * Routine: ipc_importance_task_propagate_assertion_locked
1079 * Purpose:
1080 * Propagate the importance transition type to every item
1081 * If this causes a boost to be applied, determine if that
1082 * boost should propagate downstream.
1083 * Conditions:
1084 * Called with the importance lock held.
1085 */
1086static void
1087ipc_importance_task_propagate_assertion_locked(
1088 ipc_importance_task_t task_imp,
1089 iit_update_type_t type,
1090 boolean_t update_task_imp)
1091{
1092 boolean_t boost = (IIT_UPDATE_HOLD == type);
1093 ipc_importance_task_t temp_task_imp;
1094 queue_head_t propagate;
1095 queue_head_t updates;
1096
1097 queue_init(&updates);
1098 queue_init(&propagate);
1099
1100 ipc_importance_assert_held();
1101
1102 /*
1103 * If we're going to update the policy for the provided task,
1104 * enqueue it on the propagate queue itself. Otherwise, only
1105 * enqueue downstream things.
1106 */
1107 if (update_task_imp) {
1108 ipc_importance_task_reference(task_elem: task_imp);
1109 incr_ref_counter(task_imp->iit_elem.iie_task_refs_added_transition);
1110 queue_enter(&propagate, task_imp, ipc_importance_task_t, iit_props);
1111 } else {
1112 ipc_importance_task_propagate_helper(task_imp, type, propagation: &propagate);
1113 }
1114
1115 /*
1116 * for each item on the propagation list, propagate any change downstream,
1117 * adding new tasks to propagate further if they transistioned as well.
1118 */
1119 while (!queue_empty(&propagate)) {
1120 boolean_t need_update;
1121
1122 queue_remove_first(&propagate, temp_task_imp, ipc_importance_task_t, iit_props);
1123 /* hold a reference on temp_task_imp */
1124
1125 assert(IIT_NULL != temp_task_imp);
1126
1127 /* only propagate for receivers not already marked as a donor */
1128 if (!ipc_importance_task_is_marked_donor(task_imp: temp_task_imp) &&
1129 ipc_importance_task_is_marked_receiver(task_imp: temp_task_imp)) {
1130 ipc_importance_task_propagate_helper(task_imp: temp_task_imp, type, propagation: &propagate);
1131 }
1132
1133 /* if we have a policy update to apply, enqueue a reference for later processing */
1134 need_update = (0 != temp_task_imp->iit_updatepolicy);
1135 temp_task_imp->iit_updatepolicy = 0;
1136 if (need_update && TASK_NULL != temp_task_imp->iit_task) {
1137 if (NULL == temp_task_imp->iit_updateq) {
1138 /*
1139 * If a downstream task that needs an update is subjects to AppNap,
1140 * drop boosts according to the delay hysteresis. Otherwise,
1141 * immediate update it.
1142 */
1143 if (!boost && temp_task_imp != task_imp &&
1144 ipc_importance_delayed_drop_call != NULL &&
1145 ipc_importance_task_is_marked_denap_receiver(task_imp: temp_task_imp)) {
1146 ipc_importance_task_delayed_drop(task_imp: temp_task_imp);
1147 } else {
1148 temp_task_imp->iit_updatetime = 0;
1149 temp_task_imp->iit_updateq = &updates;
1150 ipc_importance_task_reference_internal(temp_task_imp);
1151 if (boost) {
1152 queue_enter(&updates, temp_task_imp,
1153 ipc_importance_task_t, iit_updates);
1154 } else {
1155 queue_enter_first(&updates, temp_task_imp,
1156 ipc_importance_task_t, iit_updates);
1157 }
1158 }
1159 } else {
1160 /* Must already be on the AppNap hysteresis queue */
1161 assert(ipc_importance_delayed_drop_call != NULL);
1162 assert(ipc_importance_task_is_marked_denap_receiver(temp_task_imp));
1163 }
1164 }
1165
1166 if (!ipc_importance_task_release_internal(temp_task_imp)) {
1167 /* can't be last ref */
1168 iit_over_release_panic(task_imp: temp_task_imp);
1169 }
1170 }
1171
1172 /* apply updates to task (may drop importance lock) */
1173 if (!queue_empty(&updates)) {
1174 ipc_importance_task_process_updates(supplied_queue: &updates, boost, max_timestamp: 0);
1175 }
1176}
1177
1178/*
1179 * Routine: ipc_importance_task_hold_internal_assertion_locked
1180 * Purpose:
1181 * Increment the assertion count on the task importance.
1182 * If this results in a boost state change in that task,
1183 * prepare to update task policy for this task AND, if
1184 * if not just waking out of App Nap, all down-stream
1185 * tasks that have a similar transition through inheriting
1186 * this update.
1187 * Conditions:
1188 * importance locked on entry and exit.
1189 * May temporarily drop importance lock and block.
1190 */
1191static kern_return_t
1192ipc_importance_task_hold_internal_assertion_locked(ipc_importance_task_t task_imp, uint32_t count)
1193{
1194 if (ipc_importance_task_check_transition(task_imp, IIT_UPDATE_HOLD, delta: count)) {
1195 ipc_importance_task_propagate_assertion_locked(task_imp, IIT_UPDATE_HOLD, TRUE);
1196 }
1197 return KERN_SUCCESS;
1198}
1199
1200/*
1201 * Routine: ipc_importance_task_drop_internal_assertion_locked
1202 * Purpose:
1203 * Decrement the assertion count on the task importance.
1204 * If this results in a boost state change in that task,
1205 * prepare to update task policy for this task AND, if
1206 * if not just waking out of App Nap, all down-stream
1207 * tasks that have a similar transition through inheriting
1208 * this update.
1209 * Conditions:
1210 * importance locked on entry and exit.
1211 * May temporarily drop importance lock and block.
1212 */
1213static kern_return_t
1214ipc_importance_task_drop_internal_assertion_locked(ipc_importance_task_t task_imp, uint32_t count)
1215{
1216 if (ipc_importance_task_check_transition(task_imp, IIT_UPDATE_DROP, delta: count)) {
1217 ipc_importance_task_propagate_assertion_locked(task_imp, IIT_UPDATE_DROP, TRUE);
1218 }
1219 return KERN_SUCCESS;
1220}
1221
1222/*
1223 * Routine: ipc_importance_task_hold_internal_assertion
1224 * Purpose:
1225 * Increment the assertion count on the task importance.
1226 * If this results in a 0->1 change in that count,
1227 * prepare to update task policy for this task AND
1228 * (potentially) all down-stream tasks that have a
1229 * similar transition through inheriting this update.
1230 * Conditions:
1231 * Nothing locked
1232 * May block after dropping importance lock.
1233 */
1234int
1235ipc_importance_task_hold_internal_assertion(ipc_importance_task_t task_imp, uint32_t count)
1236{
1237 int ret = KERN_SUCCESS;
1238
1239 if (ipc_importance_task_is_any_receiver_type(task_imp)) {
1240 ipc_importance_lock();
1241 ret = ipc_importance_task_hold_internal_assertion_locked(task_imp, count);
1242 ipc_importance_unlock();
1243 }
1244 return ret;
1245}
1246
1247/*
1248 * Routine: ipc_importance_task_drop_internal_assertion
1249 * Purpose:
1250 * Decrement the assertion count on the task importance.
1251 * If this results in a X->0 change in that count,
1252 * prepare to update task policy for this task AND
1253 * all down-stream tasks that have a similar transition
1254 * through inheriting this drop update.
1255 * Conditions:
1256 * Nothing locked on entry.
1257 * May block after dropping importance lock.
1258 */
1259kern_return_t
1260ipc_importance_task_drop_internal_assertion(ipc_importance_task_t task_imp, uint32_t count)
1261{
1262 kern_return_t ret = KERN_SUCCESS;
1263
1264 if (ipc_importance_task_is_any_receiver_type(task_imp)) {
1265 ipc_importance_lock();
1266 ret = ipc_importance_task_drop_internal_assertion_locked(task_imp, count);
1267 ipc_importance_unlock();
1268 }
1269 return ret;
1270}
1271
1272/*
1273 * Routine: ipc_importance_task_hold_file_lock_assertion
1274 * Purpose:
1275 * Increment the file lock assertion count on the task importance.
1276 * If this results in a 0->1 change in that count,
1277 * prepare to update task policy for this task AND
1278 * (potentially) all down-stream tasks that have a
1279 * similar transition through inheriting this update.
1280 * Conditions:
1281 * Nothing locked
1282 * May block after dropping importance lock.
1283 */
1284kern_return_t
1285ipc_importance_task_hold_file_lock_assertion(ipc_importance_task_t task_imp, uint32_t count)
1286{
1287 kern_return_t ret = KERN_SUCCESS;
1288
1289 if (ipc_importance_task_is_any_receiver_type(task_imp)) {
1290 ipc_importance_lock();
1291 ret = ipc_importance_task_hold_internal_assertion_locked(task_imp, count);
1292 if (KERN_SUCCESS == ret) {
1293 task_imp->iit_filelocks += count;
1294 }
1295 ipc_importance_unlock();
1296 }
1297 return ret;
1298}
1299
1300/*
1301 * Routine: ipc_importance_task_drop_file_lock_assertion
1302 * Purpose:
1303 * Decrement the assertion count on the task importance.
1304 * If this results in a X->0 change in that count,
1305 * prepare to update task policy for this task AND
1306 * all down-stream tasks that have a similar transition
1307 * through inheriting this drop update.
1308 * Conditions:
1309 * Nothing locked on entry.
1310 * May block after dropping importance lock.
1311 */
1312kern_return_t
1313ipc_importance_task_drop_file_lock_assertion(ipc_importance_task_t task_imp, uint32_t count)
1314{
1315 kern_return_t ret = KERN_SUCCESS;
1316
1317 if (ipc_importance_task_is_any_receiver_type(task_imp)) {
1318 ipc_importance_lock();
1319 if (count <= task_imp->iit_filelocks) {
1320 task_imp->iit_filelocks -= count;
1321 ret = ipc_importance_task_drop_internal_assertion_locked(task_imp, count);
1322 } else {
1323 ret = KERN_INVALID_ARGUMENT;
1324 }
1325 ipc_importance_unlock();
1326 }
1327 return ret;
1328}
1329
1330/*
1331 * Routine: ipc_importance_task_hold_legacy_external_assertion
1332 * Purpose:
1333 * Increment the external assertion count on the task importance.
1334 * This cannot result in an 0->1 transition, as the caller must
1335 * already hold an external boost.
1336 * Conditions:
1337 * Nothing locked on entry.
1338 * May block after dropping importance lock.
1339 * A queue of task importance structures is returned
1340 * by ipc_importance_task_hold_assertion_locked(). Each
1341 * needs to be updated (outside the importance lock hold).
1342 */
1343kern_return_t
1344ipc_importance_task_hold_legacy_external_assertion(ipc_importance_task_t task_imp, uint32_t count)
1345{
1346 task_t target_task;
1347 uint32_t target_assertcnt;
1348 uint32_t target_externcnt;
1349 uint32_t target_legacycnt;
1350
1351 kern_return_t ret;
1352
1353 ipc_importance_lock();
1354 target_task = task_imp->iit_task;
1355
1356#if IMPORTANCE_TRACE
1357 int target_pid = task_pid(target_task);
1358
1359 KERNEL_DEBUG_CONSTANT_IST(KDEBUG_TRACE, (IMPORTANCE_CODE(IMP_ASSERTION, (IMP_HOLD | TASK_POLICY_EXTERNAL))) | DBG_FUNC_START,
1360 proc_selfpid(), target_pid, task_imp->iit_assertcnt, IIT_LEGACY_EXTERN(task_imp), 0);
1361#endif
1362
1363 if (IIT_LEGACY_EXTERN(task_imp) == 0) {
1364 /* Only allowed to take a new boost assertion when holding an external boost */
1365 /* save data for diagnostic printf below */
1366 target_assertcnt = task_imp->iit_assertcnt;
1367 target_externcnt = IIT_EXTERN(task_imp);
1368 target_legacycnt = IIT_LEGACY_EXTERN(task_imp);
1369 ret = KERN_FAILURE;
1370 count = 0;
1371 } else {
1372 assert(ipc_importance_task_is_any_receiver_type(task_imp));
1373 assert(0 < task_imp->iit_assertcnt);
1374 assert(0 < IIT_EXTERN(task_imp));
1375 task_imp->iit_assertcnt += count;
1376 task_imp->iit_externcnt += count;
1377 task_imp->iit_legacy_externcnt += count;
1378 ret = KERN_SUCCESS;
1379 }
1380 ipc_importance_unlock();
1381
1382#if IMPORTANCE_TRACE
1383 KERNEL_DEBUG_CONSTANT_IST(KDEBUG_TRACE, (IMPORTANCE_CODE(IMP_ASSERTION, (IMP_HOLD | TASK_POLICY_EXTERNAL))) | DBG_FUNC_END,
1384 proc_selfpid(), target_pid, task_imp->iit_assertcnt, IIT_LEGACY_EXTERN(task_imp), 0);
1385 // This covers the legacy case where a task takes an extra boost.
1386 DTRACE_BOOST5(receive_boost, task_t, target_task, int, target_pid, int, proc_selfpid(), int, count, int, task_imp->iit_assertcnt);
1387#endif
1388
1389 if (KERN_FAILURE == ret && target_task != TASK_NULL) {
1390 printf(format: "BUG in process %s[%d]: "
1391 "attempt to acquire an additional legacy external boost assertion without holding an existing legacy external assertion. "
1392 "(%d total, %d external, %d legacy-external)\n",
1393 proc_name_address(p: get_bsdtask_info(target_task)), task_pid(task: target_task),
1394 target_assertcnt, target_externcnt, target_legacycnt);
1395 }
1396
1397 return ret;
1398}
1399
1400/*
1401 * Routine: ipc_importance_task_drop_legacy_external_assertion
1402 * Purpose:
1403 * Drop the legacy external assertion count on the task and
1404 * reflect that change to total external assertion count and
1405 * then onto the internal importance count.
1406 *
1407 * If this results in a X->0 change in the internal,
1408 * count, prepare to update task policy for this task AND
1409 * all down-stream tasks that have a similar transition
1410 * through inheriting this update.
1411 * Conditions:
1412 * Nothing locked on entry.
1413 */
1414kern_return_t
1415ipc_importance_task_drop_legacy_external_assertion(ipc_importance_task_t task_imp, uint32_t count)
1416{
1417 int ret = KERN_SUCCESS;
1418 task_t target_task;
1419 uint32_t target_assertcnt;
1420 uint32_t target_externcnt;
1421 uint32_t target_legacycnt;
1422
1423 if (count > 1) {
1424 return KERN_INVALID_ARGUMENT;
1425 }
1426
1427 ipc_importance_lock();
1428 target_task = task_imp->iit_task;
1429
1430#if IMPORTANCE_TRACE
1431 int target_pid = task_pid(target_task);
1432
1433 KERNEL_DEBUG_CONSTANT_IST(KDEBUG_TRACE, (IMPORTANCE_CODE(IMP_ASSERTION, (IMP_DROP | TASK_POLICY_EXTERNAL))) | DBG_FUNC_START,
1434 proc_selfpid(), target_pid, task_imp->iit_assertcnt, IIT_LEGACY_EXTERN(task_imp), 0);
1435#endif
1436
1437 if (count > IIT_LEGACY_EXTERN(task_imp)) {
1438 /* Process over-released its boost count - save data for diagnostic printf */
1439 /* TODO: If count > 1, we should clear out as many external assertions as there are left. */
1440 target_assertcnt = task_imp->iit_assertcnt;
1441 target_externcnt = IIT_EXTERN(task_imp);
1442 target_legacycnt = IIT_LEGACY_EXTERN(task_imp);
1443 ret = KERN_FAILURE;
1444 } else {
1445 /*
1446 * decrement legacy external count from the top level and reflect
1447 * into internal for this and all subsequent updates.
1448 */
1449 assert(ipc_importance_task_is_any_receiver_type(task_imp));
1450 assert(IIT_EXTERN(task_imp) >= count);
1451
1452 task_imp->iit_legacy_externdrop += count;
1453 task_imp->iit_externdrop += count;
1454
1455 /* reset extern counters (if appropriate) */
1456 if (IIT_LEGACY_EXTERN(task_imp) == 0) {
1457 if (IIT_EXTERN(task_imp) != 0) {
1458 task_imp->iit_externcnt -= task_imp->iit_legacy_externcnt;
1459 task_imp->iit_externdrop -= task_imp->iit_legacy_externdrop;
1460 } else {
1461 task_imp->iit_externcnt = 0;
1462 task_imp->iit_externdrop = 0;
1463 }
1464 task_imp->iit_legacy_externcnt = 0;
1465 task_imp->iit_legacy_externdrop = 0;
1466 }
1467
1468 /* reflect the drop to the internal assertion count (and effect any importance change) */
1469 if (ipc_importance_task_check_transition(task_imp, IIT_UPDATE_DROP, delta: count)) {
1470 ipc_importance_task_propagate_assertion_locked(task_imp, IIT_UPDATE_DROP, TRUE);
1471 }
1472 ret = KERN_SUCCESS;
1473 }
1474
1475#if IMPORTANCE_TRACE
1476 KERNEL_DEBUG_CONSTANT_IST(KDEBUG_TRACE, (IMPORTANCE_CODE(IMP_ASSERTION, (IMP_DROP | TASK_POLICY_EXTERNAL))) | DBG_FUNC_END,
1477 proc_selfpid(), target_pid, task_imp->iit_assertcnt, IIT_LEGACY_EXTERN(task_imp), 0);
1478#endif
1479
1480 ipc_importance_unlock();
1481
1482 /* delayed printf for user-supplied data failures */
1483 if (KERN_FAILURE == ret && TASK_NULL != target_task) {
1484 printf(format: "BUG in process %s[%d]: over-released legacy external boost assertions (%d total, %d external, %d legacy-external)\n",
1485 proc_name_address(p: get_bsdtask_info(target_task)), task_pid(task: target_task),
1486 target_assertcnt, target_externcnt, target_legacycnt);
1487 }
1488
1489 return ret;
1490}
1491
1492
1493#if LEGACY_IMPORTANCE_DELIVERY
1494/* Transfer an assertion to legacy userspace responsibility */
1495static kern_return_t
1496ipc_importance_task_externalize_legacy_assertion(ipc_importance_task_t task_imp, uint32_t count, __unused int sender_pid)
1497{
1498 task_t target_task;
1499
1500 assert(IIT_NULL != task_imp);
1501 target_task = task_imp->iit_task;
1502
1503 if (TASK_NULL == target_task ||
1504 !ipc_importance_task_is_any_receiver_type(task_imp)) {
1505 return KERN_FAILURE;
1506 }
1507
1508#if IMPORTANCE_TRACE
1509 int target_pid = task_pid(target_task);
1510
1511 KERNEL_DEBUG_CONSTANT_IST(KDEBUG_TRACE, (IMPORTANCE_CODE(IMP_ASSERTION, IMP_EXTERN)) | DBG_FUNC_START,
1512 proc_selfpid(), target_pid, task_imp->iit_assertcnt, IIT_EXTERN(task_imp), 0);
1513#endif
1514
1515 ipc_importance_lock();
1516 /* assert(task_imp->iit_assertcnt >= IIT_EXTERN(task_imp) + count); */
1517 assert(IIT_EXTERN(task_imp) >= IIT_LEGACY_EXTERN(task_imp));
1518 task_imp->iit_legacy_externcnt += count;
1519 task_imp->iit_externcnt += count;
1520 ipc_importance_unlock();
1521
1522#if IMPORTANCE_TRACE
1523 KERNEL_DEBUG_CONSTANT_IST(KDEBUG_TRACE, (IMPORTANCE_CODE(IMP_ASSERTION, IMP_EXTERN)) | DBG_FUNC_END,
1524 proc_selfpid(), target_pid, task_imp->iit_assertcnt, IIT_LEGACY_EXTERN(task_imp), 0);
1525 // This is the legacy boosting path
1526 DTRACE_BOOST5(receive_boost, task_t, target_task, int, target_pid, int, sender_pid, int, count, int, IIT_LEGACY_EXTERN(task_imp));
1527#endif /* IMPORTANCE_TRACE */
1528
1529 return KERN_SUCCESS;
1530}
1531#endif /* LEGACY_IMPORTANCE_DELIVERY */
1532
1533/*
1534 * Routine: ipc_importance_task_update_live_donor
1535 * Purpose:
1536 * Read the live donor status and update the live_donor bit/propagate the change in importance.
1537 * Conditions:
1538 * Nothing locked on entrance, nothing locked on exit.
1539 *
1540 * TODO: Need tracepoints around this function...
1541 */
1542void
1543ipc_importance_task_update_live_donor(ipc_importance_task_t task_imp)
1544{
1545 uint32_t task_live_donor;
1546 boolean_t before_donor;
1547 boolean_t after_donor;
1548 task_t target_task;
1549
1550 assert(task_imp != NULL);
1551
1552 /*
1553 * Nothing to do if the task is not marked as expecting
1554 * live donor updates.
1555 */
1556 if (!ipc_importance_task_is_marked_live_donor(task_imp)) {
1557 return;
1558 }
1559
1560 ipc_importance_lock();
1561
1562 /* If the task got disconnected on the way here, no use (or ability) adjusting live donor status */
1563 target_task = task_imp->iit_task;
1564 if (TASK_NULL == target_task) {
1565 ipc_importance_unlock();
1566 return;
1567 }
1568 before_donor = ipc_importance_task_is_marked_donor(task_imp);
1569
1570 /* snapshot task live donor status - may change, but another call will accompany the change */
1571 task_live_donor = target_task->effective_policy.tep_live_donor;
1572
1573#if IMPORTANCE_TRACE
1574 int target_pid = task_pid(target_task);
1575
1576 KERNEL_DEBUG_CONSTANT_IST(KDEBUG_TRACE,
1577 (IMPORTANCE_CODE(IMP_DONOR_CHANGE, IMP_DONOR_UPDATE_LIVE_DONOR_STATE)) | DBG_FUNC_START,
1578 target_pid, task_imp->iit_donor, task_live_donor, before_donor, 0);
1579#endif
1580
1581 /* update the task importance live donor status based on the task's value */
1582 task_imp->iit_donor = task_live_donor;
1583
1584 after_donor = ipc_importance_task_is_marked_donor(task_imp);
1585
1586 /* Has the effectiveness of being a donor changed as a result of this update? */
1587 if (before_donor != after_donor) {
1588 iit_update_type_t type;
1589
1590 /* propagate assertions without updating the current task policy (already handled) */
1591 if (0 == before_donor) {
1592 task_imp->iit_transitions++;
1593 type = IIT_UPDATE_HOLD;
1594 } else {
1595 type = IIT_UPDATE_DROP;
1596 }
1597 ipc_importance_task_propagate_assertion_locked(task_imp, type, FALSE);
1598 }
1599
1600#if IMPORTANCE_TRACE
1601 KERNEL_DEBUG_CONSTANT_IST(KDEBUG_TRACE,
1602 (IMPORTANCE_CODE(IMP_DONOR_CHANGE, IMP_DONOR_UPDATE_LIVE_DONOR_STATE)) | DBG_FUNC_END,
1603 target_pid, task_imp->iit_donor, task_live_donor, after_donor, 0);
1604#endif
1605
1606 ipc_importance_unlock();
1607}
1608
1609
1610/*
1611 * Routine: ipc_importance_task_mark_donor
1612 * Purpose:
1613 * Set the task importance donor flag.
1614 * Conditions:
1615 * Nothing locked on entrance, nothing locked on exit.
1616 *
1617 * This is only called while the task is being constructed,
1618 * so no need to update task policy or propagate downstream.
1619 */
1620void
1621ipc_importance_task_mark_donor(ipc_importance_task_t task_imp, boolean_t donating)
1622{
1623 assert(task_imp != NULL);
1624
1625 ipc_importance_lock();
1626
1627 int old_donor = task_imp->iit_donor;
1628
1629 task_imp->iit_donor = (donating ? 1 : 0);
1630
1631 if (task_imp->iit_donor > 0 && old_donor == 0) {
1632 task_imp->iit_transitions++;
1633 }
1634
1635 KERNEL_DEBUG_CONSTANT_IST(KDEBUG_TRACE,
1636 (IMPORTANCE_CODE(IMP_DONOR_CHANGE, IMP_DONOR_INIT_DONOR_STATE)) | DBG_FUNC_NONE,
1637 task_pid(task_imp->iit_task), donating,
1638 old_donor, task_imp->iit_donor, 0);
1639
1640 ipc_importance_unlock();
1641}
1642
1643/*
1644 * Routine: ipc_importance_task_marked_donor
1645 * Purpose:
1646 * Query the donor flag for the given task importance.
1647 * Conditions:
1648 * May be called without taking the importance lock.
1649 * In that case, donor status can change so you must
1650 * check only once for each donation event.
1651 */
1652boolean_t
1653ipc_importance_task_is_marked_donor(ipc_importance_task_t task_imp)
1654{
1655 if (IIT_NULL == task_imp) {
1656 return FALSE;
1657 }
1658 return 0 != task_imp->iit_donor;
1659}
1660
1661/*
1662 * Routine: ipc_importance_task_mark_live_donor
1663 * Purpose:
1664 * Indicate that the task is eligible for live donor updates.
1665 * Conditions:
1666 * Nothing locked on entrance, nothing locked on exit.
1667 *
1668 * This is only called while the task is being constructed.
1669 */
1670void
1671ipc_importance_task_mark_live_donor(ipc_importance_task_t task_imp, boolean_t live_donating)
1672{
1673 assert(task_imp != NULL);
1674
1675 ipc_importance_lock();
1676 task_imp->iit_live_donor = (live_donating ? 1 : 0);
1677 ipc_importance_unlock();
1678}
1679
1680/*
1681 * Routine: ipc_importance_task_is_marked_live_donor
1682 * Purpose:
1683 * Query the live donor and donor flags for the given task importance.
1684 * Conditions:
1685 * May be called without taking the importance lock.
1686 * In that case, donor status can change so you must
1687 * check only once for each donation event.
1688 */
1689boolean_t
1690ipc_importance_task_is_marked_live_donor(ipc_importance_task_t task_imp)
1691{
1692 if (IIT_NULL == task_imp) {
1693 return FALSE;
1694 }
1695 return 0 != task_imp->iit_live_donor;
1696}
1697
1698/*
1699 * Routine: ipc_importance_task_is_donor
1700 * Purpose:
1701 * Query the full donor status for the given task importance.
1702 * Conditions:
1703 * May be called without taking the importance lock.
1704 * In that case, donor status can change so you must
1705 * check only once for each donation event.
1706 */
1707boolean_t
1708ipc_importance_task_is_donor(ipc_importance_task_t task_imp)
1709{
1710 if (IIT_NULL == task_imp) {
1711 return FALSE;
1712 }
1713 return ipc_importance_task_is_marked_donor(task_imp) ||
1714 (ipc_importance_task_is_marked_receiver(task_imp) &&
1715 task_imp->iit_assertcnt > 0);
1716}
1717
1718/*
1719 * Routine: ipc_importance_task_is_never_donor
1720 * Purpose:
1721 * Query if a given task can ever donate importance.
1722 * Conditions:
1723 * May be called without taking the importance lock.
1724 * Condition is permanent for a give task.
1725 */
1726boolean_t
1727ipc_importance_task_is_never_donor(ipc_importance_task_t task_imp)
1728{
1729 if (IIT_NULL == task_imp) {
1730 return TRUE;
1731 }
1732 return !ipc_importance_task_is_marked_donor(task_imp) &&
1733 !ipc_importance_task_is_marked_live_donor(task_imp) &&
1734 !ipc_importance_task_is_marked_receiver(task_imp);
1735}
1736
1737/*
1738 * Routine: ipc_importance_task_mark_receiver
1739 * Purpose:
1740 * Update the task importance receiver flag.
1741 * Conditions:
1742 * Nothing locked on entrance, nothing locked on exit.
1743 * This can only be invoked before the task is discoverable,
1744 * so no worries about atomicity(?)
1745 */
1746void
1747ipc_importance_task_mark_receiver(ipc_importance_task_t task_imp, boolean_t receiving)
1748{
1749 assert(task_imp != NULL);
1750
1751 ipc_importance_lock();
1752 if (receiving) {
1753 assert(task_imp->iit_assertcnt == 0);
1754 assert(task_imp->iit_externcnt == 0);
1755 assert(task_imp->iit_externdrop == 0);
1756 assert(task_imp->iit_denap == 0);
1757 task_imp->iit_receiver = 1; /* task can receive importance boost */
1758 } else if (task_imp->iit_receiver) {
1759 assert(task_imp->iit_denap == 0);
1760 if (task_imp->iit_assertcnt != 0 || IIT_EXTERN(task_imp) != 0) {
1761 panic("disabling imp_receiver on task with pending importance boosts!");
1762 }
1763 task_imp->iit_receiver = 0;
1764 }
1765 ipc_importance_unlock();
1766}
1767
1768
1769/*
1770 * Routine: ipc_importance_task_marked_receiver
1771 * Purpose:
1772 * Query the receiver flag for the given task importance.
1773 * Conditions:
1774 * May be called without taking the importance lock as
1775 * the importance flag can never change after task init.
1776 */
1777boolean_t
1778ipc_importance_task_is_marked_receiver(ipc_importance_task_t task_imp)
1779{
1780 return IIT_NULL != task_imp && 0 != task_imp->iit_receiver;
1781}
1782
1783
1784/*
1785 * Routine: ipc_importance_task_mark_denap_receiver
1786 * Purpose:
1787 * Update the task importance de-nap receiver flag.
1788 * Conditions:
1789 * Nothing locked on entrance, nothing locked on exit.
1790 * This can only be invoked before the task is discoverable,
1791 * so no worries about atomicity(?)
1792 */
1793void
1794ipc_importance_task_mark_denap_receiver(ipc_importance_task_t task_imp, boolean_t denap)
1795{
1796 assert(task_imp != NULL);
1797
1798 ipc_importance_lock();
1799 if (denap) {
1800 assert(task_imp->iit_assertcnt == 0);
1801 assert(task_imp->iit_externcnt == 0);
1802 assert(task_imp->iit_receiver == 0);
1803 task_imp->iit_denap = 1; /* task can receive de-nap boost */
1804 } else if (task_imp->iit_denap) {
1805 assert(task_imp->iit_receiver == 0);
1806 if (0 < task_imp->iit_assertcnt || 0 < IIT_EXTERN(task_imp)) {
1807 panic("disabling de-nap on task with pending de-nap boosts!");
1808 }
1809 task_imp->iit_denap = 0;
1810 }
1811 ipc_importance_unlock();
1812}
1813
1814
1815/*
1816 * Routine: ipc_importance_task_marked_denap_receiver
1817 * Purpose:
1818 * Query the de-nap receiver flag for the given task importance.
1819 * Conditions:
1820 * May be called without taking the importance lock as
1821 * the de-nap flag can never change after task init.
1822 */
1823boolean_t
1824ipc_importance_task_is_marked_denap_receiver(ipc_importance_task_t task_imp)
1825{
1826 return IIT_NULL != task_imp && 0 != task_imp->iit_denap;
1827}
1828
1829/*
1830 * Routine: ipc_importance_task_is_denap_receiver
1831 * Purpose:
1832 * Query the full de-nap receiver status for the given task importance.
1833 * For now, that is simply whether the receiver flag is set.
1834 * Conditions:
1835 * May be called without taking the importance lock as
1836 * the de-nap receiver flag can never change after task init.
1837 */
1838boolean_t
1839ipc_importance_task_is_denap_receiver(ipc_importance_task_t task_imp)
1840{
1841 return ipc_importance_task_is_marked_denap_receiver(task_imp);
1842}
1843
1844/*
1845 * Routine: ipc_importance_task_is_any_receiver_type
1846 * Purpose:
1847 * Query if the task is marked to receive boosts - either
1848 * importance or denap.
1849 * Conditions:
1850 * May be called without taking the importance lock as both
1851 * the importance and de-nap receiver flags can never change
1852 * after task init.
1853 */
1854boolean_t
1855ipc_importance_task_is_any_receiver_type(ipc_importance_task_t task_imp)
1856{
1857 return ipc_importance_task_is_marked_receiver(task_imp) ||
1858 ipc_importance_task_is_marked_denap_receiver(task_imp);
1859}
1860
1861#if 0 /* currently unused */
1862
1863/*
1864 * Routine: ipc_importance_inherit_reference
1865 * Purpose:
1866 * Add a reference to the inherit importance element.
1867 * Conditions:
1868 * Caller most hold a reference on the inherit element.
1869 */
1870static inline void
1871ipc_importance_inherit_reference(ipc_importance_inherit_t inherit)
1872{
1873 ipc_importance_reference(&inherit->iii_elem);
1874}
1875#endif /* currently unused */
1876
1877/*
1878 * Routine: ipc_importance_inherit_release_locked
1879 * Purpose:
1880 * Release a reference on an inherit importance attribute value,
1881 * unlinking and deallocating the attribute if the last reference.
1882 * Conditions:
1883 * Entered with importance lock held, leaves with it unlocked.
1884 */
1885static inline void
1886ipc_importance_inherit_release_locked(ipc_importance_inherit_t inherit)
1887{
1888 ipc_importance_release_locked(elem: &inherit->iii_elem);
1889}
1890
1891#if 0 /* currently unused */
1892/*
1893 * Routine: ipc_importance_inherit_release
1894 * Purpose:
1895 * Release a reference on an inherit importance attribute value,
1896 * unlinking and deallocating the attribute if the last reference.
1897 * Conditions:
1898 * nothing locked on entrance, nothing locked on exit.
1899 * May block.
1900 */
1901void
1902ipc_importance_inherit_release(ipc_importance_inherit_t inherit)
1903{
1904 if (III_NULL != inherit) {
1905 ipc_importance_release(&inherit->iii_elem);
1906 }
1907}
1908#endif /* 0 currently unused */
1909
1910/*
1911 * Routine: ipc_importance_for_task
1912 * Purpose:
1913 * Create a reference for the specified task's base importance
1914 * element. If the base importance element doesn't exist, make it and
1915 * bind it to the active task. If the task is inactive, there isn't
1916 * any need to return a new reference.
1917 * Conditions:
1918 * If made is true, a "made" reference is returned (for donating to
1919 * the voucher system). Otherwise an internal reference is returned.
1920 *
1921 * Nothing locked on entry. May block.
1922 */
1923ipc_importance_task_t
1924ipc_importance_for_task(task_t task, boolean_t made)
1925{
1926 ipc_importance_task_t task_elem;
1927 boolean_t first_pass = TRUE;
1928
1929 assert(TASK_NULL != task);
1930
1931retry:
1932 /* No use returning anything for inactive task */
1933 if (!task->active) {
1934 return IIT_NULL;
1935 }
1936
1937 ipc_importance_lock();
1938 task_elem = task->task_imp_base;
1939 if (IIT_NULL != task_elem) {
1940 /* Add a made reference (borrowing active task ref to do it) */
1941 if (made) {
1942 if (0 == task_elem->iit_made++) {
1943 ipc_importance_task_reference_internal(task_elem);
1944 }
1945 } else {
1946 ipc_importance_task_reference_internal(task_elem);
1947 }
1948 ipc_importance_unlock();
1949 return task_elem;
1950 }
1951 ipc_importance_unlock();
1952
1953 if (!first_pass) {
1954 return IIT_NULL;
1955 }
1956 first_pass = FALSE;
1957
1958 /* Need to make one - may race with others (be prepared to drop) */
1959 task_elem = zalloc_flags(ipc_importance_task_zone, Z_WAITOK | Z_ZERO);
1960 if (IIT_NULL == task_elem) {
1961 goto retry;
1962 }
1963
1964 /* one for task, one for return/made */
1965 os_ref_init_count_mask(&task_elem->iit_bits, IIE_TYPE_BITS, &iie_refgrp, 2, IIE_TYPE_TASK);
1966
1967 task_elem->iit_made = (made) ? 1 : 0;
1968 task_elem->iit_task = task; /* take actual ref when we're sure */
1969#if IIE_REF_DEBUG
1970 ipc_importance_counter_init(&task_elem->iit_elem);
1971#endif
1972 queue_init(&task_elem->iit_kmsgs);
1973 queue_init(&task_elem->iit_inherits);
1974
1975 ipc_importance_lock();
1976 if (!task->active) {
1977 ipc_importance_unlock();
1978 zfree(ipc_importance_task_zone, task_elem);
1979 return IIT_NULL;
1980 }
1981
1982 /* did we lose the race? */
1983 if (IIT_NULL != task->task_imp_base) {
1984 ipc_importance_unlock();
1985 zfree(ipc_importance_task_zone, task_elem);
1986 goto retry;
1987 }
1988
1989 /* we won the race */
1990 task->task_imp_base = task_elem;
1991 task_reference_grp(task, TASK_GRP_INTERNAL);
1992#if DEVELOPMENT || DEBUG
1993 queue_enter(&global_iit_alloc_queue, task_elem, ipc_importance_task_t, iit_allocation);
1994 task_importance_update_owner_info(task);
1995#endif
1996 ipc_importance_unlock();
1997
1998 return task_elem;
1999}
2000
2001#if DEVELOPMENT || DEBUG
2002void
2003task_importance_update_owner_info(task_t task)
2004{
2005 if (task != TASK_NULL && task->task_imp_base != IIT_NULL) {
2006 ipc_importance_task_t task_elem = task->task_imp_base;
2007
2008 task_elem->iit_bsd_pid = task_pid(task);
2009 if (get_bsdtask_info(task)) {
2010 strncpy(&task_elem->iit_procname[0], proc_name_address(get_bsdtask_info(task)), 16);
2011 task_elem->iit_procname[16] = '\0';
2012 } else {
2013 strncpy(&task_elem->iit_procname[0], "unknown", 16);
2014 }
2015 }
2016}
2017#endif
2018
2019static int
2020task_importance_task_get_pid(ipc_importance_task_t iit)
2021{
2022#if DEVELOPMENT || DEBUG
2023 return (int)iit->iit_bsd_pid;
2024#else
2025 return task_pid(task: iit->iit_task);
2026#endif
2027}
2028
2029/*
2030 * Routine: ipc_importance_reset_locked
2031 * Purpose:
2032 * Reset a task's IPC importance (the task is going away or exec'ing)
2033 *
2034 * Remove the donor bit and legacy externalized assertions from the
2035 * current task importance and see if that wipes out downstream donations.
2036 * Conditions:
2037 * importance lock held.
2038 */
2039
2040static void
2041ipc_importance_reset_locked(ipc_importance_task_t task_imp, boolean_t donor)
2042{
2043 boolean_t before_donor, after_donor;
2044
2045 /* remove the donor bit, live-donor bit and externalized boosts */
2046 before_donor = ipc_importance_task_is_donor(task_imp);
2047 if (donor) {
2048 task_imp->iit_donor = 0;
2049 }
2050 assert(IIT_LEGACY_EXTERN(task_imp) <= IIT_EXTERN(task_imp));
2051 assert(task_imp->iit_legacy_externcnt <= task_imp->iit_externcnt);
2052 assert(task_imp->iit_legacy_externdrop <= task_imp->iit_externdrop);
2053 task_imp->iit_externcnt -= task_imp->iit_legacy_externcnt;
2054 task_imp->iit_externdrop -= task_imp->iit_legacy_externdrop;
2055
2056 /* assert(IIT_LEGACY_EXTERN(task_imp) <= task_imp->iit_assertcnt); */
2057 if (IIT_EXTERN(task_imp) < task_imp->iit_assertcnt) {
2058 task_imp->iit_assertcnt -= IIT_LEGACY_EXTERN(task_imp);
2059 } else {
2060 task_imp->iit_assertcnt = IIT_EXTERN(task_imp);
2061 }
2062 task_imp->iit_legacy_externcnt = 0;
2063 task_imp->iit_legacy_externdrop = 0;
2064 after_donor = ipc_importance_task_is_donor(task_imp);
2065
2066 /* propagate a downstream drop if there was a change in donor status */
2067 if (after_donor != before_donor) {
2068 ipc_importance_task_propagate_assertion_locked(task_imp, IIT_UPDATE_DROP, FALSE);
2069 }
2070}
2071
2072/*
2073 * Routine: ipc_importance_reset
2074 * Purpose:
2075 * Reset a task's IPC importance
2076 *
2077 * The task is being reset, although staying around. Arrange to have the
2078 * external state of the task reset from the importance.
2079 * Conditions:
2080 * importance lock not held.
2081 */
2082
2083void
2084ipc_importance_reset(ipc_importance_task_t task_imp, boolean_t donor)
2085{
2086 if (IIT_NULL == task_imp) {
2087 return;
2088 }
2089 ipc_importance_lock();
2090 ipc_importance_reset_locked(task_imp, donor);
2091 ipc_importance_unlock();
2092}
2093
2094/*
2095 * Routine: ipc_importance_disconnect_task
2096 * Purpose:
2097 * Disconnect a task from its importance.
2098 *
2099 * Clear the task pointer from the importance and drop the
2100 * reference the task held on the importance object. Before
2101 * doing that, reset the effects the current task holds on
2102 * the importance and see if that wipes out downstream donations.
2103 *
2104 * We allow the upstream boosts to continue to affect downstream
2105 * even though the local task is being effectively pulled from
2106 * the chain.
2107 * Conditions:
2108 * Nothing locked.
2109 */
2110void
2111ipc_importance_disconnect_task(task_t task)
2112{
2113 ipc_importance_task_t task_imp;
2114
2115 task_lock(task);
2116 ipc_importance_lock();
2117 task_imp = task->task_imp_base;
2118
2119 /* did somebody beat us to it? */
2120 if (IIT_NULL == task_imp) {
2121 ipc_importance_unlock();
2122 task_unlock(task);
2123 return;
2124 }
2125
2126 /* disconnect the task from this importance */
2127 assert(task_imp->iit_task == task);
2128 task_imp->iit_task = TASK_NULL;
2129 task->task_imp_base = IIT_NULL;
2130 task_unlock(task);
2131
2132 /* reset the effects the current task hold on the importance */
2133 ipc_importance_reset_locked(task_imp, TRUE);
2134
2135 ipc_importance_task_release_locked(task_elem: task_imp);
2136 /* importance unlocked */
2137
2138 /* deallocate the task now that the importance is unlocked */
2139 task_deallocate_grp(task, TASK_GRP_INTERNAL);
2140}
2141
2142/*
2143 * Routine: ipc_importance_exec_switch_task
2144 * Purpose:
2145 * Switch importance task base from old task to new task in exec.
2146 *
2147 * Create an ipc importance linkage from old task to new task,
2148 * once the linkage is created, switch the importance task base
2149 * from old task to new task. After the switch, the linkage will
2150 * represent importance linkage from new task to old task with
2151 * watch port importance inheritance linked to new task.
2152 * Conditions:
2153 * Nothing locked.
2154 * Returns a reference on importance inherit.
2155 */
2156ipc_importance_inherit_t
2157ipc_importance_exec_switch_task(
2158 task_t old_task,
2159 task_t new_task)
2160{
2161 ipc_importance_inherit_t inherit = III_NULL;
2162 ipc_importance_task_t old_task_imp = IIT_NULL;
2163 ipc_importance_task_t new_task_imp = IIT_NULL;
2164
2165 task_importance_reset(task: old_task);
2166
2167 /* Create an importance linkage from old_task to new_task */
2168 inherit = ipc_importance_inherit_from_task(from_task: old_task, to_task: new_task);
2169
2170 /* Switch task importance base from old task to new task */
2171 ipc_importance_lock();
2172
2173 old_task_imp = old_task->task_imp_base;
2174 new_task_imp = new_task->task_imp_base;
2175
2176 old_task_imp->iit_task = new_task;
2177 new_task_imp->iit_task = old_task;
2178
2179 old_task->task_imp_base = new_task_imp;
2180 new_task->task_imp_base = old_task_imp;
2181
2182#if DEVELOPMENT || DEBUG
2183 /*
2184 * Update the pid an proc name for importance base if any
2185 */
2186 task_importance_update_owner_info(new_task);
2187#endif
2188 ipc_importance_unlock();
2189
2190 return inherit;
2191}
2192
2193/*
2194 * Routine: ipc_importance_check_circularity
2195 * Purpose:
2196 * Check if queueing "port" in a message for "dest"
2197 * would create a circular group of ports and messages.
2198 *
2199 * If no circularity (FALSE returned), then "port"
2200 * is changed from "in limbo" to "in transit".
2201 *
2202 * That is, we want to set port->ip_destination == dest,
2203 * but guaranteeing that this doesn't create a circle
2204 * port->ip_destination->ip_destination->... == port
2205 *
2206 * Additionally, if port was successfully changed to "in transit",
2207 * propagate boost assertions from the "in limbo" port to all
2208 * the ports in the chain, and, if the destination task accepts
2209 * boosts, to the destination task.
2210 *
2211 * Conditions:
2212 * No ports locked. References held for "port" and "dest".
2213 */
2214
2215boolean_t
2216ipc_importance_check_circularity(
2217 ipc_port_t port,
2218 ipc_port_t dest)
2219{
2220 ipc_importance_task_t imp_task = IIT_NULL;
2221 ipc_importance_task_t release_imp_task = IIT_NULL;
2222 boolean_t imp_lock_held = FALSE;
2223 int assertcnt = 0;
2224 ipc_port_t base;
2225 struct turnstile *send_turnstile = TURNSTILE_NULL;
2226 struct task_watchport_elem *watchport_elem = NULL;
2227 bool took_base_ref = false;
2228
2229 assert(port != IP_NULL);
2230 assert(dest != IP_NULL);
2231
2232 if (port == dest) {
2233 return TRUE;
2234 }
2235 base = dest;
2236
2237 /* Check if destination needs a turnstile */
2238 ipc_port_send_turnstile_prepare(port: dest);
2239
2240 /* port is in limbo, so donation status is safe to latch */
2241 if (port->ip_impdonation != 0) {
2242 imp_lock_held = TRUE;
2243 ipc_importance_lock();
2244 }
2245
2246 /*
2247 * First try a quick check that can run in parallel.
2248 * No circularity if dest is not in transit.
2249 */
2250 ip_mq_lock(port);
2251
2252 /*
2253 * Even if port is just carrying assertions for others,
2254 * we need the importance lock.
2255 */
2256 if (port->ip_impcount > 0 && !imp_lock_held) {
2257 if (!ipc_importance_lock_try()) {
2258 ip_mq_unlock(port);
2259 ipc_importance_lock();
2260 ip_mq_lock(port);
2261 }
2262 imp_lock_held = TRUE;
2263 }
2264
2265 if (ip_mq_lock_try(dest)) {
2266 if (!ip_in_transit(port: dest)) {
2267 goto not_circular;
2268 }
2269
2270 /* dest is in transit; further checking necessary */
2271
2272 ip_mq_unlock(dest);
2273 }
2274 ip_mq_unlock(port);
2275
2276 /*
2277 * We're about to pay the cost to serialize,
2278 * just go ahead and grab importance lock.
2279 */
2280 if (!imp_lock_held) {
2281 ipc_importance_lock();
2282 imp_lock_held = TRUE;
2283 }
2284
2285 ipc_port_multiple_lock(); /* massive serialization */
2286
2287 took_base_ref = ipc_port_destination_chain_lock(port: dest, base: &base);
2288 /* all ports in chain from dest to base, inclusive, are locked */
2289
2290 if (port == base) {
2291 /* circularity detected! */
2292
2293 ipc_port_multiple_unlock();
2294
2295 /* port (== base) is in limbo */
2296
2297 require_ip_active(port);
2298 assert(ip_in_limbo(port));
2299 assert(!took_base_ref);
2300
2301 base = dest;
2302 while (base != IP_NULL) {
2303 ipc_port_t next;
2304
2305 /* base is in transit or in limbo */
2306
2307 require_ip_active(port: base);
2308 assert(base->ip_receiver_name == MACH_PORT_NULL);
2309 next = ip_get_destination(port: base);
2310 ip_mq_unlock(base);
2311 base = next;
2312 }
2313
2314 if (imp_lock_held) {
2315 ipc_importance_unlock();
2316 }
2317
2318 ipc_port_send_turnstile_complete(port: dest);
2319 return TRUE;
2320 }
2321
2322 /*
2323 * The guarantee: lock port while the entire chain is locked.
2324 * Once port is locked, we can take a reference to dest,
2325 * add port to the chain, and unlock everything.
2326 */
2327
2328 ip_mq_lock(port);
2329 ipc_port_multiple_unlock();
2330
2331not_circular:
2332 /* port is in limbo */
2333 require_ip_active(port);
2334 assert(ip_in_limbo(port));
2335
2336 /* Port is being enqueued in a kmsg, remove the watchport boost in order to push on destination port */
2337 watchport_elem = ipc_port_clear_watchport_elem_internal(port);
2338
2339 /* Check if the port is being enqueued as a part of sync bootstrap checkin */
2340 if (dest->ip_specialreply && dest->ip_sync_bootstrap_checkin) {
2341 port->ip_sync_bootstrap_checkin = 1;
2342 }
2343
2344 ip_reference(dest);
2345
2346 /* port transitions to IN-TRANSIT state */
2347 assert(port->ip_receiver_name == MACH_PORT_NULL);
2348 port->ip_destination = dest;
2349
2350 /* must have been in limbo or still bound to a task */
2351 assert(port->ip_tempowner != 0);
2352
2353 /*
2354 * We delayed dropping assertions from a specific task.
2355 * Cache that info now (we'll drop assertions and the
2356 * task reference below).
2357 */
2358 release_imp_task = ip_get_imp_task(port);
2359 if (IIT_NULL != release_imp_task) {
2360 port->ip_imp_task = IIT_NULL;
2361 }
2362 assertcnt = port->ip_impcount;
2363
2364 /* take the port out of limbo w.r.t. assertions */
2365 port->ip_tempowner = 0;
2366
2367 /*
2368 * Setup linkage for source port if it has a send turnstile i.e. it has
2369 * a thread waiting in send or has a port enqueued in it or has sync ipc
2370 * push from a special reply port.
2371 */
2372 if (port_send_turnstile(port)) {
2373 send_turnstile = turnstile_prepare(proprietor: (uintptr_t)port,
2374 port_send_turnstile_address(port),
2375 TURNSTILE_NULL, type: TURNSTILE_SYNC_IPC);
2376
2377 turnstile_update_inheritor(turnstile: send_turnstile, port_send_turnstile(dest),
2378 flags: (TURNSTILE_INHERITOR_TURNSTILE | TURNSTILE_IMMEDIATE_UPDATE));
2379
2380 /* update complete and turnstile complete called after dropping all locks */
2381 }
2382 /* now unlock chain */
2383
2384 ip_mq_unlock(port);
2385
2386 for (;;) {
2387 ipc_port_t next;
2388 /* every port along chain track assertions behind it */
2389 ipc_port_impcount_delta(port: dest, delta: assertcnt, base);
2390
2391 if (dest == base) {
2392 break;
2393 }
2394
2395 /* port is in transit */
2396
2397 require_ip_active(port: dest);
2398 assert(ip_in_transit(dest));
2399 assert(dest->ip_tempowner == 0);
2400
2401 next = ip_get_destination(port: dest);
2402 ip_mq_unlock(dest);
2403 dest = next;
2404 }
2405
2406 /* base is not in transit */
2407 assert(!ip_in_transit(base));
2408
2409 /*
2410 * Find the task to boost (if any).
2411 * We will boost "through" ports that don't know
2412 * about inheritance to deliver receive rights that
2413 * do.
2414 */
2415 if (ip_active(base) && (assertcnt > 0)) {
2416 assert(imp_lock_held);
2417 if (base->ip_tempowner != 0) {
2418 if (IIT_NULL != ip_get_imp_task(port: base)) {
2419 /* specified tempowner task */
2420 imp_task = ip_get_imp_task(port: base);
2421 assert(ipc_importance_task_is_any_receiver_type(imp_task));
2422 }
2423 /* otherwise don't boost current task */
2424 } else if (ip_in_a_space(port: base)) {
2425 ipc_space_t space = ip_get_receiver(port: base);
2426 /* only spaces with boost-accepting tasks */
2427 if (space->is_task != TASK_NULL &&
2428 ipc_importance_task_is_any_receiver_type(task_imp: space->is_task->task_imp_base)) {
2429 imp_task = space->is_task->task_imp_base;
2430 }
2431 }
2432
2433 /* take reference before unlocking base */
2434 if (imp_task != IIT_NULL) {
2435 ipc_importance_task_reference(task_elem: imp_task);
2436 }
2437 }
2438
2439 ip_mq_unlock(base);
2440
2441 /* All locks dropped, call turnstile_update_inheritor_complete for source port's turnstile */
2442 if (send_turnstile) {
2443 turnstile_update_inheritor_complete(turnstile: send_turnstile, flags: TURNSTILE_INTERLOCK_NOT_HELD);
2444
2445 /* Take the port lock to call turnstile complete */
2446 ip_mq_lock(port);
2447 turnstile_complete(proprietor: (uintptr_t)port, port_send_turnstile_address(port), NULL, type: TURNSTILE_SYNC_IPC);
2448 send_turnstile = TURNSTILE_NULL;
2449 ip_mq_unlock(port);
2450 turnstile_cleanup();
2451 }
2452
2453 /*
2454 * Transfer assertions now that the ports are unlocked.
2455 * Avoid extra overhead if transferring to/from the same task.
2456 *
2457 * NOTE: If a transfer is occurring, the new assertions will
2458 * be added to imp_task BEFORE the importance lock is unlocked.
2459 * This is critical - to avoid decrements coming from the kmsgs
2460 * beating the increment to the task.
2461 */
2462 boolean_t transfer_assertions = (imp_task != release_imp_task);
2463
2464 if (imp_task != IIT_NULL) {
2465 assert(imp_lock_held);
2466 if (transfer_assertions) {
2467 ipc_importance_task_hold_internal_assertion_locked(task_imp: imp_task, count: assertcnt);
2468 }
2469 }
2470
2471 if (release_imp_task != IIT_NULL) {
2472 assert(imp_lock_held);
2473 if (transfer_assertions) {
2474 ipc_importance_task_drop_internal_assertion_locked(task_imp: release_imp_task, count: assertcnt);
2475 }
2476 }
2477
2478 if (imp_lock_held) {
2479 ipc_importance_unlock();
2480 }
2481
2482 if (took_base_ref) {
2483 ip_release(base);
2484 }
2485
2486 if (imp_task != IIT_NULL) {
2487 ipc_importance_task_release(task_elem: imp_task);
2488 }
2489
2490 if (release_imp_task != IIT_NULL) {
2491 ipc_importance_task_release(task_elem: release_imp_task);
2492 }
2493
2494 if (watchport_elem) {
2495 task_watchport_elem_deallocate(watchport_elem);
2496 }
2497
2498 return FALSE;
2499}
2500
2501/*
2502 * Routine: ipc_importance_send
2503 * Purpose:
2504 * Post the importance voucher attribute [if sent] or a static
2505 * importance boost depending upon options and conditions.
2506 * Conditions:
2507 * Destination port locked on entry and exit, may be dropped during the call.
2508 * Returns:
2509 * A boolean identifying if the port lock was tempoarily dropped.
2510 */
2511boolean_t
2512ipc_importance_send(
2513 ipc_kmsg_t kmsg,
2514 mach_msg_option_t option)
2515{
2516 mach_msg_header_t *hdr = ikm_header(kmsg);
2517 ipc_port_t port = hdr->msgh_remote_port;
2518 ipc_port_t voucher_port;
2519 boolean_t port_lock_dropped = FALSE;
2520 ipc_importance_elem_t elem;
2521 task_t task;
2522 ipc_importance_task_t task_imp;
2523 kern_return_t kr;
2524
2525 assert(IP_VALID(port));
2526
2527 /* If no donation to be made, return quickly */
2528 if ((port->ip_impdonation == 0) ||
2529 (option & MACH_SEND_NOIMPORTANCE) != 0) {
2530 return port_lock_dropped;
2531 }
2532
2533 task = current_task();
2534
2535 /* If forced sending a static boost, go update the port */
2536 if ((option & MACH_SEND_IMPORTANCE) != 0) {
2537 /* acquire the importance lock while trying to hang on to port lock */
2538 if (!ipc_importance_lock_try()) {
2539 port_lock_dropped = TRUE;
2540 ip_mq_unlock(port);
2541 ipc_importance_lock();
2542 }
2543 goto portupdate;
2544 }
2545
2546 task_imp = task->task_imp_base;
2547
2548 /* If the sender can never donate importance, nothing to do */
2549 if (ipc_importance_task_is_never_donor(task_imp)) {
2550 return port_lock_dropped;
2551 }
2552
2553 elem = IIE_NULL;
2554
2555 /* If importance receiver and passing a voucher, look for importance in there */
2556 voucher_port = ipc_kmsg_get_voucher_port(kmsg);
2557 if (IP_VALID(voucher_port) &&
2558 ipc_importance_task_is_marked_receiver(task_imp)) {
2559 mach_voucher_attr_value_handle_t vals[MACH_VOUCHER_ATTR_VALUE_MAX_NESTED];
2560 mach_voucher_attr_value_handle_array_size_t val_count;
2561 ipc_voucher_t voucher;
2562
2563 assert(ip_kotype(voucher_port) == IKOT_VOUCHER);
2564 voucher = (ipc_voucher_t)ipc_kobject_get_raw(port: voucher_port,
2565 type: IKOT_VOUCHER);
2566
2567 /* check to see if the voucher has an importance attribute */
2568 val_count = MACH_VOUCHER_ATTR_VALUE_MAX_NESTED;
2569 kr = mach_voucher_attr_control_get_values(control: ipc_importance_control, voucher,
2570 out_values: vals, in_out_size: &val_count);
2571 assert(KERN_SUCCESS == kr);
2572
2573 /*
2574 * Only use importance associated with our task (either directly
2575 * or through an inherit that donates to our task).
2576 */
2577 if (0 < val_count) {
2578 ipc_importance_elem_t check_elem;
2579
2580 check_elem = (ipc_importance_elem_t)vals[0];
2581 assert(IIE_NULL != check_elem);
2582 if (IIE_TYPE_INHERIT == IIE_TYPE(check_elem)) {
2583 ipc_importance_inherit_t inherit;
2584 inherit = (ipc_importance_inherit_t) check_elem;
2585 if (inherit->iii_to_task == task_imp) {
2586 elem = check_elem;
2587 }
2588 } else if (check_elem == (ipc_importance_elem_t)task_imp) {
2589 elem = check_elem;
2590 }
2591 }
2592 }
2593
2594 /* If we haven't found an importance attribute to send yet, use the task's */
2595 if (IIE_NULL == elem) {
2596 elem = (ipc_importance_elem_t)task_imp;
2597 }
2598
2599 /* take a reference for the message to hold */
2600 ipc_importance_reference_internal(elem);
2601
2602 /* acquire the importance lock while trying to hang on to port lock */
2603 if (!ipc_importance_lock_try()) {
2604 port_lock_dropped = TRUE;
2605 ip_mq_unlock(port);
2606 ipc_importance_lock();
2607 }
2608
2609 /* link kmsg onto the donor element propagation chain */
2610 ipc_importance_kmsg_link(kmsg, elem);
2611 /* elem reference transfered to kmsg */
2612
2613 incr_ref_counter(elem->iie_kmsg_refs_added);
2614
2615 /* If the sender isn't currently a donor, no need to apply boost */
2616 if (!ipc_importance_task_is_donor(task_imp)) {
2617 ipc_importance_unlock();
2618
2619 /* re-acquire port lock, if needed */
2620 if (TRUE == port_lock_dropped) {
2621 ip_mq_lock(port);
2622 }
2623
2624 return port_lock_dropped;
2625 }
2626
2627portupdate:
2628 /* Mark the fact that we are (currently) donating through this message */
2629 hdr->msgh_bits |= MACH_MSGH_BITS_RAISEIMP;
2630
2631 /*
2632 * If we need to relock the port, do it with the importance still locked.
2633 * This assures we get to add the importance boost through the port to
2634 * the task BEFORE anyone else can attempt to undo that operation if
2635 * the sender lost donor status.
2636 */
2637 if (TRUE == port_lock_dropped) {
2638 ip_mq_lock(port);
2639 }
2640
2641 ipc_importance_assert_held();
2642
2643#if IMPORTANCE_TRACE
2644 if (kdebug_enable) {
2645 mach_msg_max_trailer_t *dbgtrailer = ipc_kmsg_get_trailer(kmsg, false);
2646 unsigned int sender_pid = dbgtrailer->msgh_audit.val[5];
2647 mach_msg_id_t imp_msgh_id = hdr->msgh_id;
2648 KERNEL_DEBUG_CONSTANT_IST(KDEBUG_TRACE, (IMPORTANCE_CODE(IMP_MSG, IMP_MSG_SEND)) | DBG_FUNC_START,
2649 task_pid(task), sender_pid, imp_msgh_id, 0, 0);
2650 }
2651#endif /* IMPORTANCE_TRACE */
2652
2653 mach_port_delta_t delta = 1;
2654 boolean_t need_port_lock;
2655 task_imp = IIT_NULL;
2656
2657 /* adjust port boost count (with importance and port locked) */
2658 need_port_lock = ipc_port_importance_delta_internal(port, options: IPID_OPTION_NORMAL, deltap: &delta, imp_task: &task_imp);
2659 /* hold a reference on task_imp */
2660
2661 /* if we need to adjust a task importance as a result, apply that here */
2662 if (IIT_NULL != task_imp && delta != 0) {
2663 assert(delta == 1);
2664
2665 /* if this results in a change of state, propagate the transistion */
2666 if (ipc_importance_task_check_transition(task_imp, IIT_UPDATE_HOLD, delta)) {
2667 /* can't hold the port lock during task transition(s) */
2668 if (!need_port_lock) {
2669 need_port_lock = TRUE;
2670 ip_mq_unlock(port);
2671 }
2672 ipc_importance_task_propagate_assertion_locked(task_imp, IIT_UPDATE_HOLD, TRUE);
2673 }
2674 }
2675
2676 if (task_imp) {
2677 ipc_importance_task_release_locked(task_elem: task_imp);
2678 /* importance unlocked */
2679 } else {
2680 ipc_importance_unlock();
2681 }
2682
2683 if (need_port_lock) {
2684 port_lock_dropped = TRUE;
2685 ip_mq_lock(port);
2686 }
2687
2688 return port_lock_dropped;
2689}
2690
2691/*
2692 * Routine: ipc_importance_inherit_from_kmsg
2693 * Purpose:
2694 * Create a "made" reference for an importance attribute representing
2695 * an inheritance between the sender of a message (if linked) and the
2696 * current task importance. If the message is not linked, a static
2697 * boost may be created, based on the boost state of the message.
2698 *
2699 * Any transfer from kmsg linkage to inherit linkage must be atomic.
2700 *
2701 * If the task is inactive, there isn't any need to return a new reference.
2702 * Conditions:
2703 * Nothing locked on entry. May block.
2704 */
2705static ipc_importance_inherit_t
2706ipc_importance_inherit_from_kmsg(ipc_kmsg_t kmsg)
2707{
2708 ipc_importance_task_t task_imp = IIT_NULL;
2709 ipc_importance_elem_t from_elem = kmsg->ikm_importance;
2710 ipc_importance_elem_t elem;
2711 task_t task_self = current_task();
2712
2713 mach_msg_header_t *hdr = ikm_header(kmsg);
2714 ipc_port_t port = hdr->msgh_remote_port;
2715 ipc_importance_inherit_t inherit = III_NULL;
2716 ipc_importance_inherit_t alloc = III_NULL;
2717 boolean_t cleared_self_donation = FALSE;
2718 boolean_t donating;
2719 uint32_t depth = 1;
2720
2721 /* The kmsg must have an importance donor or static boost to proceed */
2722 if (IIE_NULL == kmsg->ikm_importance &&
2723 !MACH_MSGH_BITS_RAISED_IMPORTANCE(hdr->msgh_bits)) {
2724 return III_NULL;
2725 }
2726
2727 /*
2728 * No need to set up an inherit linkage if the dest isn't a receiver
2729 * of one type or the other.
2730 */
2731 if (!ipc_importance_task_is_any_receiver_type(task_imp: task_self->task_imp_base)) {
2732 ipc_importance_lock();
2733 goto out_locked;
2734 }
2735
2736 /* Grab a reference on the importance of the destination */
2737 task_imp = ipc_importance_for_task(task: task_self, FALSE);
2738
2739 ipc_importance_lock();
2740
2741 if (IIT_NULL == task_imp) {
2742 goto out_locked;
2743 }
2744
2745 incr_ref_counter(task_imp->iit_elem.iie_task_refs_added_inherit_from);
2746
2747 /* If message is already associated with an inherit... */
2748 if (IIE_TYPE_INHERIT == IIE_TYPE(from_elem)) {
2749 ipc_importance_inherit_t from_inherit = (ipc_importance_inherit_t)from_elem;
2750
2751 /* already targeting our task? - just use it */
2752 if (from_inherit->iii_to_task == task_imp) {
2753 /* clear self-donation if not also present in inherit */
2754 if (!from_inherit->iii_donating &&
2755 MACH_MSGH_BITS_RAISED_IMPORTANCE(hdr->msgh_bits)) {
2756 hdr->msgh_bits &= ~MACH_MSGH_BITS_RAISEIMP;
2757 cleared_self_donation = TRUE;
2758 }
2759 inherit = from_inherit;
2760 } else if (III_DEPTH_MAX == III_DEPTH(from_inherit)) {
2761 ipc_importance_task_t to_task;
2762 ipc_importance_elem_t unlinked_from;
2763
2764 /*
2765 * Chain too long. Switch to looking
2766 * directly at the from_inherit's to-task
2767 * as our source of importance.
2768 */
2769 to_task = from_inherit->iii_to_task;
2770 ipc_importance_task_reference(task_elem: to_task);
2771 from_elem = (ipc_importance_elem_t)to_task;
2772 depth = III_DEPTH_RESET | 1;
2773
2774 /* Fixup the kmsg linkage to reflect change */
2775 unlinked_from = ipc_importance_kmsg_unlink(kmsg);
2776 assert(unlinked_from == (ipc_importance_elem_t)from_inherit);
2777 ipc_importance_kmsg_link(kmsg, elem: from_elem);
2778 ipc_importance_inherit_release_locked(inherit: from_inherit);
2779 /* importance unlocked */
2780 ipc_importance_lock();
2781 } else {
2782 /* inheriting from an inherit */
2783 depth = from_inherit->iii_depth + 1;
2784 }
2785 }
2786
2787 /*
2788 * Don't allow a task to inherit from itself (would keep it permanently
2789 * boosted even if all other donors to the task went away).
2790 */
2791
2792 if (from_elem == (ipc_importance_elem_t)task_imp) {
2793 goto out_locked;
2794 }
2795
2796 /*
2797 * But if the message isn't associated with any linked source, it is
2798 * intended to be permanently boosting (static boost from kernel).
2799 * In that case DO let the process permanently boost itself.
2800 */
2801 if (IIE_NULL == from_elem) {
2802 assert(MACH_MSGH_BITS_RAISED_IMPORTANCE(hdr->msgh_bits));
2803 ipc_importance_task_reference_internal(task_imp);
2804 from_elem = (ipc_importance_elem_t)task_imp;
2805 }
2806
2807 /*
2808 * Now that we have the from_elem figured out,
2809 * check to see if we already have an inherit for this pairing
2810 */
2811 while (III_NULL == inherit) {
2812 inherit = ipc_importance_inherit_find(from: from_elem, to_task: task_imp, depth);
2813
2814 /* Do we have to allocate a new inherit */
2815 if (III_NULL == inherit) {
2816 if (III_NULL != alloc) {
2817 break;
2818 }
2819
2820 /* allocate space */
2821 ipc_importance_unlock();
2822 alloc = (ipc_importance_inherit_t)
2823 zalloc(zone: ipc_importance_inherit_zone);
2824 ipc_importance_lock();
2825 }
2826 }
2827
2828 /* snapshot the donating status while we have importance locked */
2829 donating = MACH_MSGH_BITS_RAISED_IMPORTANCE(hdr->msgh_bits);
2830
2831 if (III_NULL != inherit) {
2832 /* We found one, piggyback on that */
2833 assert(0 < III_REFS(inherit));
2834 assert(0 < IIE_REFS(inherit->iii_from_elem));
2835 assert(inherit->iii_externcnt >= inherit->iii_made);
2836
2837 /* add in a made reference */
2838 if (0 == inherit->iii_made++) {
2839 ipc_importance_inherit_reference_internal(inherit);
2840 }
2841
2842 /* Reflect the inherit's change of status into the task boosts */
2843 if (0 == III_EXTERN(inherit)) {
2844 assert(!inherit->iii_donating);
2845 inherit->iii_donating = donating;
2846 if (donating) {
2847 task_imp->iit_externcnt += inherit->iii_externcnt;
2848 task_imp->iit_externdrop += inherit->iii_externdrop;
2849 }
2850 } else {
2851 assert(donating == inherit->iii_donating);
2852 }
2853
2854 /* add in a external reference for this use of the inherit */
2855 inherit->iii_externcnt++;
2856 } else {
2857 /* initialize the previously allocated space */
2858 inherit = alloc;
2859 os_ref_init_mask(&inherit->iii_bits, IIE_TYPE_BITS, &iie_refgrp, IIE_TYPE_INHERIT);
2860 inherit->iii_made = 1;
2861 inherit->iii_externcnt = 1;
2862 inherit->iii_externdrop = 0;
2863 inherit->iii_depth = depth;
2864 inherit->iii_to_task = task_imp;
2865 inherit->iii_from_elem = IIE_NULL;
2866 queue_init(&inherit->iii_kmsgs);
2867
2868 if (donating) {
2869 inherit->iii_donating = TRUE;
2870 } else {
2871 inherit->iii_donating = FALSE;
2872 }
2873
2874 /*
2875 * Chain our new inherit on the element it inherits from.
2876 * The new inherit takes our reference on from_elem.
2877 */
2878 ipc_importance_inherit_link(inherit, elem: from_elem);
2879
2880#if IIE_REF_DEBUG
2881 ipc_importance_counter_init(&inherit->iii_elem);
2882 from_elem->iie_kmsg_refs_inherited++;
2883 task_imp->iit_elem.iie_task_refs_inherited++;
2884#endif
2885 }
2886
2887out_locked:
2888 /*
2889 * for those paths that came straight here: snapshot the donating status
2890 * (this should match previous snapshot for other paths).
2891 */
2892 donating = MACH_MSGH_BITS_RAISED_IMPORTANCE(hdr->msgh_bits);
2893
2894 /* unlink the kmsg inheritance (if any) */
2895 elem = ipc_importance_kmsg_unlink(kmsg);
2896 assert(elem == from_elem);
2897
2898 /* If found inherit and donating, reflect that in the task externcnt */
2899 if (III_NULL != inherit && donating) {
2900 task_imp->iit_externcnt++;
2901 /* The owner of receive right might have changed, take the internal assertion */
2902 ipc_importance_task_hold_internal_assertion_locked(task_imp, count: 1);
2903 /* may have dropped and retaken importance lock */
2904 }
2905
2906 /* If we didn't create a new inherit, we have some resources to release */
2907 if (III_NULL == inherit || inherit != alloc) {
2908 if (IIE_NULL != from_elem) {
2909 if (III_NULL != inherit) {
2910 incr_ref_counter(from_elem->iie_kmsg_refs_coalesced);
2911 } else {
2912 incr_ref_counter(from_elem->iie_kmsg_refs_dropped);
2913 }
2914 ipc_importance_release_locked(elem: from_elem);
2915 /* importance unlocked */
2916 } else {
2917 ipc_importance_unlock();
2918 }
2919
2920 if (IIT_NULL != task_imp) {
2921 if (III_NULL != inherit) {
2922 incr_ref_counter(task_imp->iit_elem.iie_task_refs_coalesced);
2923 }
2924 ipc_importance_task_release(task_elem: task_imp);
2925 }
2926
2927 if (III_NULL != alloc) {
2928 zfree(ipc_importance_inherit_zone, alloc);
2929 }
2930 } else {
2931 /* from_elem and task_imp references transferred to new inherit */
2932 ipc_importance_unlock();
2933 }
2934
2935 /*
2936 * decrement port boost count
2937 * This is OK to do without the importance lock as we atomically
2938 * unlinked the kmsg and snapshot the donating state while holding
2939 * the importance lock
2940 */
2941 if (donating || cleared_self_donation) {
2942 ip_mq_lock(port);
2943 /* drop importance from port and destination task */
2944 if (ipc_port_importance_delta(port, options: IPID_OPTION_NORMAL, delta: -1) == FALSE) {
2945 ip_mq_unlock(port);
2946 }
2947 }
2948
2949 if (III_NULL != inherit) {
2950 /* have an associated importance attr, even if currently not donating */
2951 hdr->msgh_bits |= MACH_MSGH_BITS_RAISEIMP;
2952 } else {
2953 /* we won't have an importance attribute associated with our message */
2954 hdr->msgh_bits &= ~MACH_MSGH_BITS_RAISEIMP;
2955 }
2956
2957 return inherit;
2958}
2959
2960/*
2961 * Routine: ipc_importance_inherit_from_task
2962 * Purpose:
2963 * Create a reference for an importance attribute representing
2964 * an inheritance between the to_task and from_task. The iii
2965 * created will be marked as III_FLAGS_FOR_OTHERS.
2966 *
2967 * It will not dedup any iii which are not marked as III_FLAGS_FOR_OTHERS.
2968 *
2969 * If the task is inactive, there isn't any need to return a new reference.
2970 * Conditions:
2971 * Nothing locked on entry. May block.
2972 * It should not be called from voucher subsystem.
2973 */
2974static ipc_importance_inherit_t
2975ipc_importance_inherit_from_task(
2976 task_t from_task,
2977 task_t to_task)
2978{
2979 ipc_importance_task_t to_task_imp = IIT_NULL;
2980 ipc_importance_task_t from_task_imp = IIT_NULL;
2981 ipc_importance_elem_t from_elem = IIE_NULL;
2982
2983 ipc_importance_inherit_t inherit = III_NULL;
2984 ipc_importance_inherit_t alloc = III_NULL;
2985 boolean_t donating;
2986 uint32_t depth = 1;
2987
2988 to_task_imp = ipc_importance_for_task(task: to_task, FALSE);
2989 from_task_imp = ipc_importance_for_task(task: from_task, FALSE);
2990 from_elem = (ipc_importance_elem_t)from_task_imp;
2991
2992 ipc_importance_lock();
2993
2994 if (IIT_NULL == to_task_imp || IIT_NULL == from_task_imp) {
2995 goto out_locked;
2996 }
2997
2998 /*
2999 * No need to set up an inherit linkage if the to_task or from_task
3000 * isn't a receiver of one type or the other.
3001 */
3002 if (!ipc_importance_task_is_any_receiver_type(task_imp: to_task_imp) ||
3003 !ipc_importance_task_is_any_receiver_type(task_imp: from_task_imp)) {
3004 goto out_locked;
3005 }
3006
3007 /* Do not allow to create a linkage to self */
3008 if (to_task_imp == from_task_imp) {
3009 goto out_locked;
3010 }
3011
3012 incr_ref_counter(to_task_imp->iit_elem.iie_task_refs_added_inherit_from);
3013 incr_ref_counter(from_elem->iie_kmsg_refs_added);
3014
3015 /*
3016 * Now that we have the from_elem figured out,
3017 * check to see if we already have an inherit for this pairing
3018 */
3019 while (III_NULL == inherit) {
3020 inherit = ipc_importance_inherit_find(from: from_elem, to_task: to_task_imp, depth);
3021
3022 /* Do we have to allocate a new inherit */
3023 if (III_NULL == inherit) {
3024 if (III_NULL != alloc) {
3025 break;
3026 }
3027
3028 /* allocate space */
3029 ipc_importance_unlock();
3030 alloc = (ipc_importance_inherit_t)
3031 zalloc(zone: ipc_importance_inherit_zone);
3032 ipc_importance_lock();
3033 }
3034 }
3035
3036 /* snapshot the donating status while we have importance locked */
3037 donating = ipc_importance_task_is_donor(task_imp: from_task_imp);
3038
3039 if (III_NULL != inherit) {
3040 /* We found one, piggyback on that */
3041 assert(0 < III_REFS(inherit));
3042 assert(0 < IIE_REFS(inherit->iii_from_elem));
3043
3044 /* Take a reference for inherit */
3045 ipc_importance_inherit_reference_internal(inherit);
3046
3047 /* Reflect the inherit's change of status into the task boosts */
3048 if (0 == III_EXTERN(inherit)) {
3049 assert(!inherit->iii_donating);
3050 inherit->iii_donating = donating;
3051 if (donating) {
3052 to_task_imp->iit_externcnt += inherit->iii_externcnt;
3053 to_task_imp->iit_externdrop += inherit->iii_externdrop;
3054 }
3055 } else {
3056 assert(donating == inherit->iii_donating);
3057 }
3058
3059 /* add in a external reference for this use of the inherit */
3060 inherit->iii_externcnt++;
3061 } else {
3062 /* initialize the previously allocated space */
3063 inherit = alloc;
3064 os_ref_init_mask(&inherit->iii_bits, IIE_TYPE_BITS, &iie_refgrp, IIE_TYPE_INHERIT);
3065 inherit->iii_made = 0;
3066 inherit->iii_externcnt = 1;
3067 inherit->iii_externdrop = 0;
3068 inherit->iii_depth = depth;
3069 inherit->iii_to_task = to_task_imp;
3070 inherit->iii_from_elem = IIE_NULL;
3071 queue_init(&inherit->iii_kmsgs);
3072
3073 if (donating) {
3074 inherit->iii_donating = TRUE;
3075 } else {
3076 inherit->iii_donating = FALSE;
3077 }
3078
3079 /*
3080 * Chain our new inherit on the element it inherits from.
3081 * The new inherit takes our reference on from_elem.
3082 */
3083 ipc_importance_inherit_link(inherit, elem: from_elem);
3084
3085#if IIE_REF_DEBUG
3086 ipc_importance_counter_init(&inherit->iii_elem);
3087 from_elem->iie_kmsg_refs_inherited++;
3088 task_imp->iit_elem.iie_task_refs_inherited++;
3089#endif
3090 }
3091
3092out_locked:
3093
3094 /* If found inherit and donating, reflect that in the task externcnt */
3095 if (III_NULL != inherit && donating) {
3096 to_task_imp->iit_externcnt++;
3097 /* take the internal assertion */
3098 ipc_importance_task_hold_internal_assertion_locked(task_imp: to_task_imp, count: 1);
3099 /* may have dropped and retaken importance lock */
3100 }
3101
3102 /* If we didn't create a new inherit, we have some resources to release */
3103 if (III_NULL == inherit || inherit != alloc) {
3104 if (IIE_NULL != from_elem) {
3105 if (III_NULL != inherit) {
3106 incr_ref_counter(from_elem->iie_kmsg_refs_coalesced);
3107 } else {
3108 incr_ref_counter(from_elem->iie_kmsg_refs_dropped);
3109 }
3110 ipc_importance_release_locked(elem: from_elem);
3111 /* importance unlocked */
3112 } else {
3113 ipc_importance_unlock();
3114 }
3115
3116 if (IIT_NULL != to_task_imp) {
3117 if (III_NULL != inherit) {
3118 incr_ref_counter(to_task_imp->iit_elem.iie_task_refs_coalesced);
3119 }
3120 ipc_importance_task_release(task_elem: to_task_imp);
3121 }
3122
3123 if (III_NULL != alloc) {
3124 zfree(ipc_importance_inherit_zone, alloc);
3125 }
3126 } else {
3127 /* from_elem and to_task_imp references transferred to new inherit */
3128 ipc_importance_unlock();
3129 }
3130
3131 return inherit;
3132}
3133
3134/*
3135 * Routine: ipc_importance_receive
3136 * Purpose:
3137 * Process importance attributes in a received message.
3138 *
3139 * If an importance voucher attribute was sent, transform
3140 * that into an attribute value reflecting the inheritance
3141 * from the sender to the receiver.
3142 *
3143 * If a static boost is received (or the voucher isn't on
3144 * a voucher-based boost), export a static boost.
3145 * Conditions:
3146 * Nothing locked.
3147 */
3148void
3149ipc_importance_receive(
3150 ipc_kmsg_t kmsg,
3151 mach_msg_option_t option)
3152{
3153 int impresult = -1;
3154
3155#if IMPORTANCE_TRACE || LEGACY_IMPORTANCE_DELIVERY
3156 task_t task_self = current_task();
3157 unsigned int sender_pid = ipc_kmsg_get_trailer(kmsg, false)->msgh_audit.val[5];
3158#endif
3159 mach_msg_header_t *hdr = ikm_header(kmsg);
3160
3161 /* convert to a voucher with an inherit importance attribute? */
3162 if ((option & MACH_RCV_VOUCHER) != 0) {
3163 uint8_t recipes[2 * sizeof(ipc_voucher_attr_recipe_data_t) +
3164 sizeof(mach_voucher_attr_value_handle_t)];
3165 ipc_voucher_attr_raw_recipe_array_size_t recipe_size = 0;
3166 ipc_voucher_attr_recipe_t recipe = (ipc_voucher_attr_recipe_t)recipes;
3167 ipc_port_t voucher_port = ipc_kmsg_get_voucher_port(kmsg);
3168 ipc_voucher_t recv_voucher;
3169 mach_voucher_attr_value_handle_t handle;
3170 ipc_importance_inherit_t inherit;
3171 kern_return_t kr;
3172
3173 /* set up recipe to copy the old voucher */
3174 if (IP_VALID(voucher_port)) {
3175 ipc_voucher_t sent_voucher;
3176
3177 sent_voucher = (ipc_voucher_t)ipc_kobject_get_raw(port: voucher_port,
3178 type: IKOT_VOUCHER);
3179
3180 recipe->key = MACH_VOUCHER_ATTR_KEY_ALL;
3181 recipe->command = MACH_VOUCHER_ATTR_COPY;
3182 recipe->previous_voucher = sent_voucher;
3183 recipe->content_size = 0;
3184 recipe_size += sizeof(*recipe);
3185 }
3186
3187 /*
3188 * create an inheritance attribute from the kmsg (may be NULL)
3189 * transferring any boosts from the kmsg linkage through the
3190 * port directly to the new inheritance object.
3191 */
3192 inherit = ipc_importance_inherit_from_kmsg(kmsg);
3193 handle = (mach_voucher_attr_value_handle_t)inherit;
3194
3195 assert(IIE_NULL == kmsg->ikm_importance);
3196
3197 /*
3198 * Only create a new voucher if we have an inherit object
3199 * (from the ikm_importance field of the incoming message), OR
3200 * we have a valid incoming voucher. If we have neither of
3201 * these things then there is no need to create a new voucher.
3202 */
3203 if (IP_VALID(voucher_port) || inherit != III_NULL) {
3204 /* replace the importance attribute with the handle we created */
3205 /* our made reference on the inherit is donated to the voucher */
3206 recipe = (ipc_voucher_attr_recipe_t)&recipes[recipe_size];
3207 recipe->key = MACH_VOUCHER_ATTR_KEY_IMPORTANCE;
3208 recipe->command = MACH_VOUCHER_ATTR_SET_VALUE_HANDLE;
3209 recipe->previous_voucher = IPC_VOUCHER_NULL;
3210 recipe->content_size = sizeof(mach_voucher_attr_value_handle_t);
3211 *(mach_voucher_attr_value_handle_t *)(void *)recipe->content = handle;
3212 recipe_size += sizeof(*recipe) + sizeof(mach_voucher_attr_value_handle_t);
3213
3214 kr = ipc_voucher_attr_control_create_mach_voucher(control: ipc_importance_control,
3215 recipes,
3216 recipe_size,
3217 new_voucher: &recv_voucher);
3218 assert(KERN_SUCCESS == kr);
3219
3220 /* swap the voucher port (and set voucher bits in case it didn't already exist) */
3221 hdr->msgh_bits |= (MACH_MSG_TYPE_MOVE_SEND << 16);
3222 ipc_port_release_send(port: voucher_port);
3223 voucher_port = convert_voucher_to_port(voucher: recv_voucher);
3224 ipc_kmsg_set_voucher_port(kmsg, voucher: voucher_port, MACH_MSG_TYPE_MOVE_SEND);
3225 if (III_NULL != inherit) {
3226 impresult = 2;
3227 }
3228 }
3229 } else { /* Don't want a voucher */
3230 /* got linked importance? have to drop */
3231 if (IIE_NULL != kmsg->ikm_importance) {
3232 ipc_importance_elem_t elem;
3233
3234 ipc_importance_lock();
3235 elem = ipc_importance_kmsg_unlink(kmsg);
3236#if IIE_REF_DEBUG
3237 elem->iie_kmsg_refs_dropped++;
3238#endif
3239 ipc_importance_release_locked(elem);
3240 /* importance unlocked */
3241 }
3242
3243 /* With kmsg unlinked, can safely examine message importance attribute. */
3244 if (MACH_MSGH_BITS_RAISED_IMPORTANCE(hdr->msgh_bits)) {
3245 ipc_port_t port = hdr->msgh_remote_port;
3246#if LEGACY_IMPORTANCE_DELIVERY
3247 ipc_importance_task_t task_imp = task_self->task_imp_base;
3248
3249 /* The owner of receive right might have changed, take the internal assertion */
3250 if (KERN_SUCCESS == ipc_importance_task_hold_internal_assertion(task_imp, 1)) {
3251 ipc_importance_task_externalize_legacy_assertion(task_imp, 1, sender_pid);
3252 impresult = 1;
3253 } else
3254#endif
3255 {
3256 /* The importance boost never applied to task (clear the bit) */
3257 hdr->msgh_bits &= ~MACH_MSGH_BITS_RAISEIMP;
3258 impresult = 0;
3259 }
3260
3261 /* Drop the boost on the port and the owner of the receive right */
3262 ip_mq_lock(port);
3263 if (ipc_port_importance_delta(port, options: IPID_OPTION_NORMAL, delta: -1) == FALSE) {
3264 ip_mq_unlock(port);
3265 }
3266 }
3267 }
3268
3269#if IMPORTANCE_TRACE
3270 if (-1 < impresult) {
3271 KERNEL_DEBUG_CONSTANT_IST(KDEBUG_TRACE, (IMPORTANCE_CODE(IMP_MSG, IMP_MSG_DELV)) | DBG_FUNC_NONE,
3272 sender_pid, task_pid(task_self),
3273 hdr->msgh_id, impresult, 0);
3274 }
3275 if (impresult == 2) {
3276 /*
3277 * This probe only covers new voucher-based path. Legacy importance
3278 * will trigger the probe in ipc_importance_task_externalize_assertion()
3279 * above and have impresult==1 here.
3280 */
3281 DTRACE_BOOST5(receive_boost, task_t, task_self, int, task_pid(task_self),
3282 int, sender_pid, int, 1, int, task_self->task_imp_base->iit_assertcnt);
3283 }
3284#endif /* IMPORTANCE_TRACE */
3285}
3286
3287/*
3288 * Routine: ipc_importance_unreceive
3289 * Purpose:
3290 * Undo receive of importance attributes in a message.
3291 *
3292 * Conditions:
3293 * Nothing locked.
3294 */
3295void
3296ipc_importance_unreceive(
3297 ipc_kmsg_t kmsg,
3298 mach_msg_option_t __unused option)
3299{
3300 /* importance should already be in the voucher and out of the kmsg */
3301 assert(IIE_NULL == kmsg->ikm_importance);
3302 mach_msg_header_t *hdr = ikm_header(kmsg);
3303
3304 /* See if there is a legacy boost to be dropped from receiver */
3305 if (MACH_MSGH_BITS_RAISED_IMPORTANCE(hdr->msgh_bits)) {
3306 ipc_importance_task_t task_imp;
3307
3308 hdr->msgh_bits &= ~MACH_MSGH_BITS_RAISEIMP;
3309 task_imp = current_task()->task_imp_base;
3310
3311 if (!IP_VALID(ipc_kmsg_get_voucher_port(kmsg)) && IIT_NULL != task_imp) {
3312 ipc_importance_task_drop_legacy_external_assertion(task_imp, count: 1);
3313 }
3314 /*
3315 * ipc_kmsg_copyout_dest_to_user() will consume the voucher
3316 * and any contained importance.
3317 */
3318 }
3319}
3320
3321/*
3322 * Routine: ipc_importance_clean
3323 * Purpose:
3324 * Clean up importance state in a kmsg that is being cleaned.
3325 * Unlink the importance chain if one was set up, and drop
3326 * the reference this kmsg held on the donor. Then check to
3327 * if importance was carried to the port, and remove that if
3328 * needed.
3329 * Conditions:
3330 * Nothing locked.
3331 */
3332void
3333ipc_importance_clean(
3334 ipc_kmsg_t kmsg)
3335{
3336 ipc_port_t port;
3337 mach_msg_header_t *hdr = ikm_header(kmsg);
3338
3339 /* Is the kmsg still linked? If so, remove that first */
3340 if (IIE_NULL != kmsg->ikm_importance) {
3341 ipc_importance_elem_t elem;
3342
3343 ipc_importance_lock();
3344 elem = ipc_importance_kmsg_unlink(kmsg);
3345 assert(IIE_NULL != elem);
3346 ipc_importance_release_locked(elem);
3347 /* importance unlocked */
3348 }
3349
3350 /* See if there is a legacy importance boost to be dropped from port */
3351 if (MACH_MSGH_BITS_RAISED_IMPORTANCE(hdr->msgh_bits)) {
3352 hdr->msgh_bits &= ~MACH_MSGH_BITS_RAISEIMP;
3353 port = hdr->msgh_remote_port;
3354 if (IP_VALID(port)) {
3355 ip_mq_lock(port);
3356 /* inactive ports already had their importance boosts dropped */
3357 if (!ip_active(port) ||
3358 ipc_port_importance_delta(port, options: IPID_OPTION_NORMAL, delta: -1) == FALSE) {
3359 ip_mq_unlock(port);
3360 }
3361 }
3362 }
3363}
3364
3365void
3366ipc_importance_assert_clean(__assert_only ipc_kmsg_t kmsg)
3367{
3368 assert(IIE_NULL == kmsg->ikm_importance);
3369 assert(!MACH_MSGH_BITS_RAISED_IMPORTANCE(ikm_header(kmsg)->msgh_bits));
3370}
3371
3372/*
3373 * IPC Importance Attribute Manager definition
3374 */
3375
3376static kern_return_t
3377ipc_importance_release_value(
3378 ipc_voucher_attr_manager_t manager,
3379 mach_voucher_attr_key_t key,
3380 mach_voucher_attr_value_handle_t value,
3381 mach_voucher_attr_value_reference_t sync);
3382
3383static kern_return_t
3384ipc_importance_get_value(
3385 ipc_voucher_attr_manager_t manager,
3386 mach_voucher_attr_key_t key,
3387 mach_voucher_attr_recipe_command_t command,
3388 mach_voucher_attr_value_handle_array_t prev_values,
3389 mach_voucher_attr_value_handle_array_size_t prev_value_count,
3390 mach_voucher_attr_content_t content,
3391 mach_voucher_attr_content_size_t content_size,
3392 mach_voucher_attr_value_handle_t *out_value,
3393 mach_voucher_attr_value_flags_t *out_flags,
3394 ipc_voucher_t *out_value_voucher);
3395
3396static kern_return_t
3397ipc_importance_extract_content(
3398 ipc_voucher_attr_manager_t manager,
3399 mach_voucher_attr_key_t key,
3400 mach_voucher_attr_value_handle_array_t values,
3401 mach_voucher_attr_value_handle_array_size_t value_count,
3402 mach_voucher_attr_recipe_command_t *out_command,
3403 mach_voucher_attr_content_t out_content,
3404 mach_voucher_attr_content_size_t *in_out_content_size);
3405
3406static kern_return_t
3407ipc_importance_command(
3408 ipc_voucher_attr_manager_t manager,
3409 mach_voucher_attr_key_t key,
3410 mach_voucher_attr_value_handle_array_t values,
3411 mach_msg_type_number_t value_count,
3412 mach_voucher_attr_command_t command,
3413 mach_voucher_attr_content_t in_content,
3414 mach_voucher_attr_content_size_t in_content_size,
3415 mach_voucher_attr_content_t out_content,
3416 mach_voucher_attr_content_size_t *out_content_size);
3417
3418const struct ipc_voucher_attr_manager ipc_importance_manager = {
3419 .ivam_release_value = ipc_importance_release_value,
3420 .ivam_get_value = ipc_importance_get_value,
3421 .ivam_extract_content = ipc_importance_extract_content,
3422 .ivam_command = ipc_importance_command,
3423 .ivam_flags = IVAM_FLAGS_NONE,
3424};
3425
3426#define IMPORTANCE_ASSERT_KEY(key) assert(MACH_VOUCHER_ATTR_KEY_IMPORTANCE == (key))
3427#define IMPORTANCE_ASSERT_MANAGER(manager) assert(&ipc_importance_manager == (manager))
3428
3429/*
3430 * Routine: ipc_importance_release_value [Voucher Attribute Manager Interface]
3431 * Purpose:
3432 * Release what the voucher system believes is the last "made" reference
3433 * on an importance attribute value handle. The sync parameter is used to
3434 * avoid races with new made references concurrently being returned to the
3435 * voucher system in other threads.
3436 * Conditions:
3437 * Nothing locked on entry. May block.
3438 */
3439static kern_return_t
3440ipc_importance_release_value(
3441 ipc_voucher_attr_manager_t __assert_only manager,
3442 mach_voucher_attr_key_t __assert_only key,
3443 mach_voucher_attr_value_handle_t value,
3444 mach_voucher_attr_value_reference_t sync)
3445{
3446 ipc_importance_elem_t elem;
3447
3448 IMPORTANCE_ASSERT_MANAGER(manager);
3449 IMPORTANCE_ASSERT_KEY(key);
3450 assert(0 < sync);
3451
3452 elem = (ipc_importance_elem_t)value;
3453
3454 ipc_importance_lock();
3455
3456 /* Any oustanding made refs? */
3457 if (sync != elem->iie_made) {
3458 assert(sync < elem->iie_made);
3459 ipc_importance_unlock();
3460 return KERN_FAILURE;
3461 }
3462
3463 /* clear made */
3464 elem->iie_made = 0;
3465
3466 /*
3467 * If there are pending external boosts represented by this attribute,
3468 * drop them from the apropriate task
3469 */
3470 if (IIE_TYPE_INHERIT == IIE_TYPE(elem)) {
3471 ipc_importance_inherit_t inherit = (ipc_importance_inherit_t)elem;
3472
3473 assert(inherit->iii_externcnt >= inherit->iii_externdrop);
3474
3475 if (inherit->iii_donating) {
3476 ipc_importance_task_t imp_task = inherit->iii_to_task;
3477 uint32_t assertcnt = III_EXTERN(inherit);
3478
3479 assert(ipc_importance_task_is_any_receiver_type(imp_task));
3480 assert(imp_task->iit_externcnt >= inherit->iii_externcnt);
3481 assert(imp_task->iit_externdrop >= inherit->iii_externdrop);
3482 imp_task->iit_externcnt -= inherit->iii_externcnt;
3483 imp_task->iit_externdrop -= inherit->iii_externdrop;
3484 inherit->iii_externcnt = 0;
3485 inherit->iii_externdrop = 0;
3486 inherit->iii_donating = FALSE;
3487
3488 /* adjust the internal assertions - and propagate if needed */
3489 if (ipc_importance_task_check_transition(task_imp: imp_task, IIT_UPDATE_DROP, delta: assertcnt)) {
3490 ipc_importance_task_propagate_assertion_locked(task_imp: imp_task, IIT_UPDATE_DROP, TRUE);
3491 }
3492 } else {
3493 inherit->iii_externcnt = 0;
3494 inherit->iii_externdrop = 0;
3495 }
3496 }
3497
3498 /* drop the made reference on elem */
3499 ipc_importance_release_locked(elem);
3500 /* returns unlocked */
3501
3502 return KERN_SUCCESS;
3503}
3504
3505
3506/*
3507 * Routine: ipc_importance_get_value [Voucher Attribute Manager Interface]
3508 * Purpose:
3509 * Convert command and content data into a reference on a [potentially new]
3510 * attribute value. The importance attribute manager will only allow the
3511 * caller to get a value for the current task's importance, or to redeem
3512 * an importance attribute from an existing voucher.
3513 * Conditions:
3514 * Nothing locked on entry. May block.
3515 */
3516static kern_return_t
3517ipc_importance_get_value(
3518 ipc_voucher_attr_manager_t __assert_only manager,
3519 mach_voucher_attr_key_t __assert_only key,
3520 mach_voucher_attr_recipe_command_t command,
3521 mach_voucher_attr_value_handle_array_t prev_values,
3522 mach_voucher_attr_value_handle_array_size_t prev_value_count,
3523 mach_voucher_attr_content_t __unused content,
3524 mach_voucher_attr_content_size_t content_size,
3525 mach_voucher_attr_value_handle_t *out_value,
3526 mach_voucher_attr_value_flags_t *out_flags,
3527 ipc_voucher_t *out_value_voucher)
3528{
3529 ipc_importance_elem_t elem;
3530 task_t self;
3531
3532 IMPORTANCE_ASSERT_MANAGER(manager);
3533 IMPORTANCE_ASSERT_KEY(key);
3534
3535 if (0 != content_size) {
3536 return KERN_INVALID_ARGUMENT;
3537 }
3538
3539 *out_flags = MACH_VOUCHER_ATTR_VALUE_FLAGS_NONE;
3540 /* never an out voucher */
3541
3542 switch (command) {
3543 case MACH_VOUCHER_ATTR_REDEEM:
3544
3545 /* redeem of previous values is the value */
3546 if (0 < prev_value_count) {
3547 elem = (ipc_importance_elem_t)prev_values[0];
3548 assert(IIE_NULL != elem);
3549
3550 ipc_importance_lock();
3551 assert(0 < elem->iie_made);
3552 elem->iie_made++;
3553 ipc_importance_unlock();
3554
3555 *out_value = prev_values[0];
3556 return KERN_SUCCESS;
3557 }
3558
3559 /* redeem of default is default */
3560 *out_value = 0;
3561 *out_value_voucher = IPC_VOUCHER_NULL;
3562 return KERN_SUCCESS;
3563
3564 case MACH_VOUCHER_ATTR_IMPORTANCE_SELF:
3565 self = current_task();
3566
3567 elem = (ipc_importance_elem_t)ipc_importance_for_task(task: self, TRUE);
3568 /* made reference added (or IIE_NULL which isn't referenced) */
3569
3570 *out_value = (mach_voucher_attr_value_handle_t)elem;
3571 *out_value_voucher = IPC_VOUCHER_NULL;
3572 return KERN_SUCCESS;
3573
3574 default:
3575 /*
3576 * every other command is unknown
3577 *
3578 * Specifically, there is no mechanism provided to construct an
3579 * importance attribute for a task/process from just a pid or
3580 * task port. It has to be copied (or redeemed) from a previous
3581 * voucher that has it.
3582 */
3583 return KERN_INVALID_ARGUMENT;
3584 }
3585}
3586
3587/*
3588 * Routine: ipc_importance_extract_content [Voucher Attribute Manager Interface]
3589 * Purpose:
3590 * Extract meaning from the attribute value present in a voucher. While
3591 * the real goal is to provide commands and data that can reproduce the
3592 * voucher's value "out of thin air", this isn't possible with importance
3593 * attribute values. Instead, return debug info to help track down dependencies.
3594 * Conditions:
3595 * Nothing locked on entry. May block.
3596 */
3597static kern_return_t
3598ipc_importance_extract_content(
3599 ipc_voucher_attr_manager_t __assert_only manager,
3600 mach_voucher_attr_key_t __assert_only key,
3601 mach_voucher_attr_value_handle_array_t values,
3602 mach_voucher_attr_value_handle_array_size_t value_count,
3603 mach_voucher_attr_recipe_command_t *out_command,
3604 mach_voucher_attr_content_t out_content,
3605 mach_voucher_attr_content_size_t *in_out_content_size)
3606{
3607 ipc_importance_elem_t elem;
3608 unsigned int i;
3609
3610 char *buf = (char *)out_content;
3611 mach_voucher_attr_content_size_t size = *in_out_content_size;
3612 mach_voucher_attr_content_size_t pos = 0;
3613 __unused int pid;
3614
3615 IMPORTANCE_ASSERT_MANAGER(manager);
3616 IMPORTANCE_ASSERT_KEY(key);
3617
3618 if (size < 1) {
3619 /* rdar://110276886 we need space for the terminating NUL */
3620 return KERN_NO_SPACE;
3621 }
3622
3623 /* the first non-default value provides the data */
3624 for (i = 0; i < value_count; i++) {
3625 elem = (ipc_importance_elem_t)values[i];
3626 if (IIE_NULL == elem) {
3627 continue;
3628 }
3629
3630 pos += scnprintf(buf + pos, size - pos, "Importance for ");
3631
3632 for (;;) {
3633 ipc_importance_inherit_t inherit = III_NULL;
3634 ipc_importance_task_t task_imp;
3635
3636 if (IIE_TYPE_TASK == IIE_TYPE(elem)) {
3637 task_imp = (ipc_importance_task_t)elem;
3638 } else {
3639 inherit = (ipc_importance_inherit_t)elem;
3640 task_imp = inherit->iii_to_task;
3641 }
3642#if DEVELOPMENT || DEBUG
3643 pos += scnprintf(buf + pos, size - pos, "%s[%d]",
3644 task_imp->iit_procname, task_imp->iit_bsd_pid);
3645#else
3646 ipc_importance_lock();
3647 pid = task_importance_task_get_pid(iit: task_imp);
3648 ipc_importance_unlock();
3649 pos += scnprintf(buf + pos, size - pos, "pid %d", pid);
3650#endif /* DEVELOPMENT || DEBUG */
3651
3652 if (III_NULL == inherit) {
3653 break;
3654 }
3655 pos += scnprintf(buf + pos, size - pos,
3656 " (%d of %d boosts) %s from ",
3657 III_EXTERN(inherit), inherit->iii_externcnt,
3658 (inherit->iii_donating) ? "donated" : "linked");
3659 elem = inherit->iii_from_elem;
3660 }
3661
3662 pos++; /* account for terminating \0 */
3663 break;
3664 }
3665 *out_command = MACH_VOUCHER_ATTR_NOOP; /* cannot be used to regenerate value */
3666 *in_out_content_size = pos;
3667 return KERN_SUCCESS;
3668}
3669
3670/*
3671 * Routine: ipc_importance_command [Voucher Attribute Manager Interface]
3672 * Purpose:
3673 * Run commands against the importance attribute value found in a voucher.
3674 * No such commands are currently supported.
3675 * Conditions:
3676 * Nothing locked on entry. May block.
3677 */
3678static kern_return_t
3679ipc_importance_command(
3680 ipc_voucher_attr_manager_t __assert_only manager,
3681 mach_voucher_attr_key_t __assert_only key,
3682 mach_voucher_attr_value_handle_array_t values,
3683 mach_msg_type_number_t value_count,
3684 mach_voucher_attr_command_t command,
3685 mach_voucher_attr_content_t in_content,
3686 mach_voucher_attr_content_size_t in_content_size,
3687 mach_voucher_attr_content_t out_content,
3688 mach_voucher_attr_content_size_t *out_content_size)
3689{
3690 ipc_importance_inherit_t inherit;
3691 ipc_importance_task_t to_task;
3692 uint32_t refs, *outrefsp;
3693 mach_msg_type_number_t i;
3694 uint32_t externcnt;
3695
3696 IMPORTANCE_ASSERT_MANAGER(manager);
3697 IMPORTANCE_ASSERT_KEY(key);
3698
3699 if (in_content_size != sizeof(refs) ||
3700 (*out_content_size != 0 && *out_content_size != sizeof(refs))) {
3701 return KERN_INVALID_ARGUMENT;
3702 }
3703 refs = *(uint32_t *)(void *)in_content;
3704 outrefsp = (*out_content_size != 0) ? (uint32_t *)(void *)out_content : NULL;
3705
3706 if (MACH_VOUCHER_IMPORTANCE_ATTR_DROP_EXTERNAL != command) {
3707 return KERN_NOT_SUPPORTED;
3708 }
3709
3710 /* the first non-default value of the apropos type provides the data */
3711 inherit = III_NULL;
3712 for (i = 0; i < value_count; i++) {
3713 ipc_importance_elem_t elem = (ipc_importance_elem_t)values[i];
3714
3715 if (IIE_NULL != elem && IIE_TYPE_INHERIT == IIE_TYPE(elem)) {
3716 inherit = (ipc_importance_inherit_t)elem;
3717 break;
3718 }
3719 }
3720 if (III_NULL == inherit) {
3721 return KERN_INVALID_ARGUMENT;
3722 }
3723
3724 ipc_importance_lock();
3725
3726 if (0 == refs) {
3727 if (NULL != outrefsp) {
3728 *outrefsp = III_EXTERN(inherit);
3729 }
3730 ipc_importance_unlock();
3731 return KERN_SUCCESS;
3732 }
3733
3734 to_task = inherit->iii_to_task;
3735 assert(ipc_importance_task_is_any_receiver_type(to_task));
3736
3737 /* if not donating to a denap receiver, it was called incorrectly */
3738 if (!ipc_importance_task_is_marked_denap_receiver(task_imp: to_task)) {
3739 ipc_importance_unlock();
3740 return KERN_INVALID_TASK; /* keeps dispatch happy */
3741 }
3742
3743 /* Enough external references left to drop? */
3744 if (III_EXTERN(inherit) < refs) {
3745 ipc_importance_unlock();
3746 return KERN_FAILURE;
3747 }
3748
3749 /* re-base external and internal counters at the inherit and the to-task (if apropos) */
3750 if (inherit->iii_donating) {
3751 assert(IIT_EXTERN(to_task) >= III_EXTERN(inherit));
3752 assert(to_task->iit_externcnt >= inherit->iii_externcnt);
3753 assert(to_task->iit_externdrop >= inherit->iii_externdrop);
3754 inherit->iii_externdrop += refs;
3755 to_task->iit_externdrop += refs;
3756 externcnt = III_EXTERN(inherit);
3757 if (0 == externcnt) {
3758 inherit->iii_donating = FALSE;
3759 to_task->iit_externcnt -= inherit->iii_externcnt;
3760 to_task->iit_externdrop -= inherit->iii_externdrop;
3761
3762
3763 /* Start AppNap delay hysteresis - even if not the last boost for the task. */
3764 if (ipc_importance_delayed_drop_call != NULL &&
3765 ipc_importance_task_is_marked_denap_receiver(task_imp: to_task)) {
3766 ipc_importance_task_delayed_drop(task_imp: to_task);
3767 }
3768
3769 /* drop task assertions associated with the dropped boosts */
3770 if (ipc_importance_task_check_transition(task_imp: to_task, IIT_UPDATE_DROP, delta: refs)) {
3771 ipc_importance_task_propagate_assertion_locked(task_imp: to_task, IIT_UPDATE_DROP, TRUE);
3772 /* may have dropped and retaken importance lock */
3773 }
3774 } else {
3775 /* assert(to_task->iit_assertcnt >= refs + externcnt); */
3776 /* defensive deduction in case of assertcnt underflow */
3777 if (to_task->iit_assertcnt > refs + externcnt) {
3778 to_task->iit_assertcnt -= refs;
3779 } else {
3780 to_task->iit_assertcnt = externcnt;
3781 }
3782 }
3783 } else {
3784 inherit->iii_externdrop += refs;
3785 externcnt = III_EXTERN(inherit);
3786 }
3787
3788 /* capture result (if requested) */
3789 if (NULL != outrefsp) {
3790 *outrefsp = externcnt;
3791 }
3792
3793 ipc_importance_unlock();
3794 return KERN_SUCCESS;
3795}
3796
3797/*
3798 * Routine: ipc_importance_init
3799 * Purpose:
3800 * Initialize the IPC importance manager.
3801 * Conditions:
3802 * Zones and Vouchers are already initialized.
3803 */
3804__startup_func
3805static void
3806ipc_importance_init(void)
3807{
3808 ipc_register_well_known_mach_voucher_attr_manager(manager: &ipc_importance_manager,
3809 default_value: (mach_voucher_attr_value_handle_t)0,
3810 MACH_VOUCHER_ATTR_KEY_IMPORTANCE,
3811 control: &ipc_importance_control);
3812}
3813STARTUP(MACH_IPC, STARTUP_RANK_LAST, ipc_importance_init);
3814
3815/*
3816 * Routine: ipc_importance_thread_call_init
3817 * Purpose:
3818 * Initialize the IPC importance code dependent upon
3819 * thread-call support being available.
3820 * Conditions:
3821 * Thread-call mechanism is already initialized.
3822 */
3823__startup_func
3824static void
3825ipc_importance_thread_call_init(void)
3826{
3827 /* initialize delayed drop queue and thread-call */
3828 queue_init(&ipc_importance_delayed_drop_queue);
3829 ipc_importance_delayed_drop_call =
3830 thread_call_allocate(func: ipc_importance_task_delayed_drop_scan, NULL);
3831 if (NULL == ipc_importance_delayed_drop_call) {
3832 panic("ipc_importance_init");
3833 }
3834}
3835STARTUP(THREAD_CALL, STARTUP_RANK_MIDDLE, ipc_importance_thread_call_init);
3836
3837/*
3838 * Routing: task_importance_list_pids
3839 * Purpose: list pids where task in donating importance.
3840 * Conditions: To be called only from kdp stackshot code.
3841 * Will panic the system otherwise.
3842 */
3843extern int
3844task_importance_list_pids(task_t task, int flags, char *pid_list, unsigned int max_count)
3845{
3846 if (kdp_lck_spin_is_acquired(lck: &ipc_importance_lock_data) ||
3847 max_count < 1 ||
3848 task->task_imp_base == IIT_NULL ||
3849 pid_list == NULL ||
3850 flags != TASK_IMP_LIST_DONATING_PIDS) {
3851 return 0;
3852 }
3853 unsigned int pidcount = 0;
3854 ipc_importance_task_t task_imp = task->task_imp_base;
3855 ipc_kmsg_t temp_kmsg;
3856 mach_msg_header_t *temp_hdr;
3857 ipc_importance_inherit_t temp_inherit;
3858 ipc_importance_elem_t elem;
3859 int target_pid = 0, previous_pid;
3860
3861 queue_iterate(&task_imp->iit_inherits, temp_inherit, ipc_importance_inherit_t, iii_inheritance) {
3862 /* check space in buffer */
3863 if (pidcount >= max_count) {
3864 break;
3865 }
3866 previous_pid = target_pid;
3867 target_pid = -1;
3868
3869 if (temp_inherit->iii_donating) {
3870 target_pid = task_importance_task_get_pid(iit: temp_inherit->iii_to_task);
3871 }
3872
3873 if (target_pid != -1 && previous_pid != target_pid) {
3874 memcpy(dst: pid_list, src: &target_pid, n: sizeof(target_pid));
3875 pid_list += sizeof(target_pid);
3876 pidcount++;
3877 }
3878 }
3879
3880 target_pid = 0;
3881 queue_iterate(&task_imp->iit_kmsgs, temp_kmsg, ipc_kmsg_t, ikm_inheritance) {
3882 if (pidcount >= max_count) {
3883 break;
3884 }
3885 previous_pid = target_pid;
3886 target_pid = -1;
3887 elem = temp_kmsg->ikm_importance;
3888
3889 if (elem == IIE_NULL) {
3890 continue;
3891 }
3892
3893 temp_hdr = ikm_header(kmsg: temp_kmsg);
3894
3895 if (!(temp_hdr &&
3896 MACH_MSGH_BITS_RAISED_IMPORTANCE(temp_hdr->msgh_bits))) {
3897 continue;
3898 }
3899
3900 if (IIE_TYPE_TASK == IIE_TYPE(elem)) {
3901 ipc_importance_task_t temp_iit = (ipc_importance_task_t)elem;
3902 target_pid = task_importance_task_get_pid(iit: temp_iit);
3903 } else {
3904 temp_inherit = (ipc_importance_inherit_t)elem;
3905 target_pid = task_importance_task_get_pid(iit: temp_inherit->iii_to_task);
3906 }
3907
3908 if (target_pid != -1 && previous_pid != target_pid) {
3909 memcpy(dst: pid_list, src: &target_pid, n: sizeof(target_pid));
3910 pid_list += sizeof(target_pid);
3911 pidcount++;
3912 }
3913 }
3914
3915 return pidcount;
3916}
3917