1/*
2 * Copyright (c) 2013 Apple Computer, Inc. All rights reserved.
3 *
4 * @APPLE_OSREFERENCE_LICENSE_HEADER_START@
5 *
6 * This file contains Original Code and/or Modifications of Original Code
7 * as defined in and that are subject to the Apple Public Source License
8 * Version 2.0 (the 'License'). You may not use this file except in
9 * compliance with the License. The rights granted to you under the License
10 * may not be used to create, or enable the creation or redistribution of,
11 * unlawful or unlicensed copies of an Apple operating system, or to
12 * circumvent, violate, or enable the circumvention or violation of, any
13 * terms of an Apple operating system software license agreement.
14 *
15 * Please obtain a copy of the License at
16 * http://www.opensource.apple.com/apsl/ and read it before using this file.
17 *
18 * The Original Code and all software distributed under the License are
19 * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER
20 * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES,
21 * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY,
22 * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT.
23 * Please see the License for the specific language governing rights and
24 * limitations under the License.
25 *
26 * @APPLE_OSREFERENCE_LICENSE_HEADER_END@
27 */
28
29#include <kern/kern_types.h>
30#include <mach/mach_types.h>
31#include <mach/boolean.h>
32
33#include <kern/coalition.h>
34#include <kern/host.h>
35#include <kern/kalloc.h>
36#include <kern/ledger.h>
37#include <kern/mach_param.h> /* for TASK_CHUNK */
38#include <kern/task.h>
39#include <kern/thread_group.h>
40#include <kern/zalloc.h>
41
42#include <libkern/OSAtomic.h>
43
44#include <mach/coalition_notification_server.h>
45#include <mach/host_priv.h>
46#include <mach/host_special_ports.h>
47
48#include <sys/errno.h>
49
50/*
51 * BSD interface functions
52 */
53int coalitions_get_list(int type, struct procinfo_coalinfo *coal_list, int list_sz);
54boolean_t coalition_is_leader(task_t task, int coal_type, coalition_t *coal);
55task_t coalition_get_leader(coalition_t coal);
56int coalition_get_task_count(coalition_t coal);
57uint64_t coalition_get_page_count(coalition_t coal, int *ntasks);
58int coalition_get_pid_list(coalition_t coal, uint32_t rolemask, int sort_order,
59 int *pid_list, int list_sz);
60
61/* defined in task.c */
62extern ledger_template_t task_ledger_template;
63
64/*
65 * Coalition zone needs limits. We expect there will be as many coalitions as
66 * tasks (same order of magnitude), so use the task zone's limits.
67 * */
68#define CONFIG_COALITION_MAX CONFIG_TASK_MAX
69#define COALITION_CHUNK TASK_CHUNK
70
71int unrestrict_coalition_syscalls;
72int merge_adaptive_coalitions;
73
74lck_attr_t coalitions_lck_attr;
75lck_grp_t coalitions_lck_grp;
76lck_grp_attr_t coalitions_lck_grp_attr;
77
78/* coalitions_list_lock protects coalition_count, coalitions queue, next_coalition_id. */
79decl_lck_mtx_data(static,coalitions_list_lock);
80static uint64_t coalition_count;
81static uint64_t coalition_next_id = 1;
82static queue_head_t coalitions_q;
83
84coalition_t init_coalition[COALITION_NUM_TYPES];
85coalition_t corpse_coalition[COALITION_NUM_TYPES];
86
87zone_t coalition_zone;
88
89static const char *coal_type_str(int type)
90{
91 switch(type) {
92 case COALITION_TYPE_RESOURCE:
93 return "RESOURCE";
94 case COALITION_TYPE_JETSAM:
95 return "JETSAM";
96 default:
97 return "<unknown>";
98 }
99}
100
101struct coalition_type {
102 int type;
103 int has_default;
104 /*
105 * init
106 * pre-condition: coalition just allocated (unlocked), unreferenced,
107 * type field set
108 */
109 kern_return_t (*init)(coalition_t coal, boolean_t privileged);
110
111 /*
112 * dealloc
113 * pre-condition: coalition unlocked
114 * pre-condition: coalition refcount=0, active_count=0,
115 * termrequested=1, terminated=1, reaped=1
116 */
117 void (*dealloc)(coalition_t coal);
118
119 /*
120 * adopt_task
121 * pre-condition: coalition locked
122 * pre-condition: coalition !repead and !terminated
123 */
124 kern_return_t (*adopt_task)(coalition_t coal, task_t task);
125
126 /*
127 * remove_task
128 * pre-condition: coalition locked
129 * pre-condition: task has been removed from coalition's task list
130 */
131 kern_return_t (*remove_task)(coalition_t coal, task_t task);
132
133 /*
134 * set_taskrole
135 * pre-condition: coalition locked
136 * pre-condition: task added to coalition's task list,
137 * active_count >= 1 (at least the given task is active)
138 */
139 kern_return_t (*set_taskrole)(coalition_t coal, task_t task, int role);
140
141 /*
142 * get_taskrole
143 * pre-condition: coalition locked
144 * pre-condition: task added to coalition's task list,
145 * active_count >= 1 (at least the given task is active)
146 */
147 int (*get_taskrole)(coalition_t coal, task_t task);
148
149 /*
150 * iterate_tasks
151 * pre-condition: coalition locked
152 */
153 void (*iterate_tasks)(coalition_t coal, void *ctx, void (*callback)(coalition_t, void *, task_t));
154};
155
156/*
157 * COALITION_TYPE_RESOURCE
158 */
159
160static kern_return_t i_coal_resource_init(coalition_t coal, boolean_t privileged);
161static void i_coal_resource_dealloc(coalition_t coal);
162static kern_return_t i_coal_resource_adopt_task(coalition_t coal, task_t task);
163static kern_return_t i_coal_resource_remove_task(coalition_t coal, task_t task);
164static kern_return_t i_coal_resource_set_taskrole(coalition_t coal,
165 task_t task, int role);
166static int i_coal_resource_get_taskrole(coalition_t coal, task_t task);
167static void i_coal_resource_iterate_tasks(coalition_t coal, void *ctx,
168 void (*callback)(coalition_t, void *, task_t));
169
170/*
171 * Ensure COALITION_NUM_THREAD_QOS_TYPES defined in mach/coalition.h still
172 * matches THREAD_QOS_LAST defined in mach/thread_policy.h
173 */
174static_assert(COALITION_NUM_THREAD_QOS_TYPES == THREAD_QOS_LAST);
175
176struct i_resource_coalition {
177 ledger_t ledger;
178 uint64_t bytesread;
179 uint64_t byteswritten;
180 uint64_t energy;
181 uint64_t gpu_time;
182 uint64_t logical_immediate_writes;
183 uint64_t logical_deferred_writes;
184 uint64_t logical_invalidated_writes;
185 uint64_t logical_metadata_writes;
186 uint64_t cpu_ptime;
187 uint64_t cpu_time_eqos[COALITION_NUM_THREAD_QOS_TYPES]; /* cpu time per effective QoS class */
188 uint64_t cpu_time_rqos[COALITION_NUM_THREAD_QOS_TYPES]; /* cpu time per requested QoS class */
189
190 uint64_t task_count; /* tasks that have started in this coalition */
191 uint64_t dead_task_count; /* tasks that have exited in this coalition;
192 subtract from task_count to get count
193 of "active" tasks */
194 /*
195 * Count the length of time this coalition had at least one active task.
196 * This can be a 'denominator' to turn e.g. cpu_time to %cpu.
197 * */
198 uint64_t last_became_nonempty_time;
199 uint64_t time_nonempty;
200
201 queue_head_t tasks; /* List of active tasks in the coalition */
202};
203
204/*
205 * COALITION_TYPE_JETSAM
206 */
207
208static kern_return_t i_coal_jetsam_init(coalition_t coal, boolean_t privileged);
209static void i_coal_jetsam_dealloc(coalition_t coal);
210static kern_return_t i_coal_jetsam_adopt_task(coalition_t coal, task_t task);
211static kern_return_t i_coal_jetsam_remove_task(coalition_t coal, task_t task);
212static kern_return_t i_coal_jetsam_set_taskrole(coalition_t coal,
213 task_t task, int role);
214static int i_coal_jetsam_get_taskrole(coalition_t coal, task_t task);
215static void i_coal_jetsam_iterate_tasks(coalition_t coal, void *ctx,
216 void (*callback)(coalition_t, void *, task_t));
217
218struct i_jetsam_coalition {
219 task_t leader;
220 queue_head_t extensions;
221 queue_head_t services;
222 queue_head_t other;
223 struct thread_group *thread_group;
224};
225
226
227/*
228 * main coalition structure
229 */
230struct coalition {
231 uint64_t id; /* monotonically increasing */
232 uint32_t type;
233 uint32_t role; /* default task role (background, adaptive, interactive, etc) */
234 uint32_t ref_count; /* Number of references to the memory containing this struct */
235 uint32_t active_count; /* Number of members of (tasks in) the
236 coalition, plus vouchers referring
237 to the coalition */
238 uint32_t focal_task_count; /* Number of TASK_FOREGROUND_APPLICATION tasks in the coalition */
239 uint32_t nonfocal_task_count; /* Number of TASK_BACKGROUND_APPLICATION tasks in the coalition */
240
241 /* coalition flags */
242 uint32_t privileged : 1; /* Members of this coalition may create
243 and manage coalitions and may posix_spawn
244 processes into selected coalitions */
245 /* ast? */
246 /* voucher */
247 uint32_t termrequested : 1; /* launchd has requested termination when coalition becomes empty */
248 uint32_t terminated : 1; /* coalition became empty and spawns are now forbidden */
249 uint32_t reaped : 1; /* reaped, invisible to userspace, but waiting for ref_count to go to zero */
250 uint32_t notified : 1; /* no-more-processes notification was sent via special port */
251 uint32_t efficient : 1; /* launchd has marked the coalition as efficient */
252#if DEVELOPMENT || DEBUG
253 uint32_t should_notify : 1; /* should this coalition send notifications (default: yes) */
254#endif
255
256 queue_chain_t coalitions; /* global list of coalitions */
257
258 decl_lck_mtx_data(,lock) /* Coalition lock. */
259
260 /* put coalition type-specific structures here */
261 union {
262 struct i_resource_coalition r;
263 struct i_jetsam_coalition j;
264 };
265};
266
267/*
268 * register different coalition types:
269 * these must be kept in the order specified in coalition.h
270 */
271static const struct coalition_type
272s_coalition_types[COALITION_NUM_TYPES] = {
273 {
274 COALITION_TYPE_RESOURCE,
275 1,
276 i_coal_resource_init,
277 i_coal_resource_dealloc,
278 i_coal_resource_adopt_task,
279 i_coal_resource_remove_task,
280 i_coal_resource_set_taskrole,
281 i_coal_resource_get_taskrole,
282 i_coal_resource_iterate_tasks,
283 },
284 {
285 COALITION_TYPE_JETSAM,
286 1,
287 i_coal_jetsam_init,
288 i_coal_jetsam_dealloc,
289 i_coal_jetsam_adopt_task,
290 i_coal_jetsam_remove_task,
291 i_coal_jetsam_set_taskrole,
292 i_coal_jetsam_get_taskrole,
293 i_coal_jetsam_iterate_tasks,
294 },
295};
296
297#define coal_call(coal, func, ...) \
298 (s_coalition_types[(coal)->type].func)(coal, ## __VA_ARGS__)
299
300
301#define coalition_lock(c) do{ lck_mtx_lock(&c->lock); }while(0)
302#define coalition_unlock(c) do{ lck_mtx_unlock(&c->lock); }while(0)
303
304/*
305 * Define the coalition type to track focal tasks.
306 * On embedded, track them using jetsam coalitions since they have associated thread
307 * groups which reflect this property as a flag (and pass it down to CLPC).
308 * On non-embedded platforms, since not all coalitions have jetsam coalitions
309 * track focal counts on the resource coalition.
310 */
311#if CONFIG_EMBEDDED
312#define COALITION_FOCAL_TASKS_ACCOUNTING COALITION_TYPE_JETSAM
313#else /* CONFIG_EMBEDDED */
314#define COALITION_FOCAL_TASKS_ACCOUNTING COALITION_TYPE_RESOURCE
315#endif /* CONFIG_EMBEDDED */
316
317
318static void
319coalition_notify_user(uint64_t id, uint32_t flags)
320{
321 mach_port_t user_port;
322 kern_return_t kr;
323
324 kr = host_get_coalition_port(host_priv_self(), &user_port);
325 if ((kr != KERN_SUCCESS) || !IPC_PORT_VALID(user_port)) {
326 return;
327 }
328
329 coalition_notification(user_port, id, flags);
330 ipc_port_release_send(user_port);
331}
332
333/*
334 *
335 * COALITION_TYPE_RESOURCE
336 *
337 */
338static kern_return_t
339i_coal_resource_init(coalition_t coal, boolean_t privileged)
340{
341 (void)privileged;
342 assert(coal && coal->type == COALITION_TYPE_RESOURCE);
343 coal->r.ledger = ledger_instantiate(task_ledger_template,
344 LEDGER_CREATE_ACTIVE_ENTRIES);
345 if (coal->r.ledger == NULL)
346 return KERN_RESOURCE_SHORTAGE;
347
348 queue_init(&coal->r.tasks);
349
350 return KERN_SUCCESS;
351}
352
353static void
354i_coal_resource_dealloc(coalition_t coal)
355{
356 assert(coal && coal->type == COALITION_TYPE_RESOURCE);
357 ledger_dereference(coal->r.ledger);
358}
359
360static kern_return_t
361i_coal_resource_adopt_task(coalition_t coal, task_t task)
362{
363 struct i_resource_coalition *cr;
364
365 assert(coal && coal->type == COALITION_TYPE_RESOURCE);
366 assert(queue_empty(&task->task_coalition[COALITION_TYPE_RESOURCE]));
367
368 cr = &coal->r;
369 cr->task_count++;
370
371 if (cr->task_count < cr->dead_task_count) {
372 panic("%s: coalition %p id:%llu type:%s task_count(%llu) < dead_task_count(%llu)",
373 __func__, coal, coal->id, coal_type_str(coal->type),
374 cr->task_count, cr->dead_task_count);
375 }
376
377 /* If moving from 0->1 active tasks */
378 if (cr->task_count - cr->dead_task_count == 1) {
379 cr->last_became_nonempty_time = mach_absolute_time();
380 }
381
382 /* put the task on the coalition's list of tasks */
383 enqueue_tail(&cr->tasks, &task->task_coalition[COALITION_TYPE_RESOURCE]);
384
385 coal_dbg("Added PID:%d to id:%llu, task_count:%llu, dead_count:%llu, nonempty_time:%llu",
386 task_pid(task), coal->id, cr->task_count, cr->dead_task_count,
387 cr->last_became_nonempty_time);
388
389 return KERN_SUCCESS;
390}
391
392static kern_return_t
393i_coal_resource_remove_task(coalition_t coal, task_t task)
394{
395 struct i_resource_coalition *cr;
396
397 assert(coal && coal->type == COALITION_TYPE_RESOURCE);
398 assert(task->coalition[COALITION_TYPE_RESOURCE] == coal);
399 assert(!queue_empty(&task->task_coalition[COALITION_TYPE_RESOURCE]));
400
401 /*
402 * handle resource coalition accounting rollup for dead tasks
403 */
404 cr = &coal->r;
405
406 cr->dead_task_count++;
407
408 if (cr->task_count < cr->dead_task_count) {
409 panic("%s: coalition %p id:%llu type:%s task_count(%llu) < dead_task_count(%llu)",
410 __func__, coal, coal->id, coal_type_str(coal->type), cr->task_count, cr->dead_task_count);
411 }
412
413 /* If moving from 1->0 active tasks */
414 if (cr->task_count - cr->dead_task_count == 0) {
415 uint64_t last_time_nonempty = mach_absolute_time() - cr->last_became_nonempty_time;
416 cr->last_became_nonempty_time = 0;
417 cr->time_nonempty += last_time_nonempty;
418 }
419
420 /* Do not roll up for exec'd task or exec copy task */
421 if (!task_is_exec_copy(task) && !task_did_exec(task)) {
422 ledger_rollup(cr->ledger, task->ledger);
423 cr->bytesread += task->task_io_stats->disk_reads.size;
424 cr->byteswritten += task->task_io_stats->total_io.size - task->task_io_stats->disk_reads.size;
425#if !CONFIG_EMBEDDED
426 cr->gpu_time += task_gpu_utilisation(task);
427#else
428 cr->energy += task_energy(task);
429#endif
430 cr->logical_immediate_writes += task->task_immediate_writes;
431 cr->logical_deferred_writes += task->task_deferred_writes;
432 cr->logical_invalidated_writes += task->task_invalidated_writes;
433 cr->logical_metadata_writes += task->task_metadata_writes;
434 cr->cpu_ptime += task_cpu_ptime(task);
435 task_update_cpu_time_qos_stats(task, cr->cpu_time_eqos, cr->cpu_time_rqos);
436 }
437
438 /* remove the task from the coalition's list */
439 remqueue(&task->task_coalition[COALITION_TYPE_RESOURCE]);
440 queue_chain_init(task->task_coalition[COALITION_TYPE_RESOURCE]);
441
442 coal_dbg("removed PID:%d from id:%llu, task_count:%llu, dead_count:%llu",
443 task_pid(task), coal->id, cr->task_count, cr->dead_task_count);
444
445 return KERN_SUCCESS;
446}
447
448static kern_return_t
449i_coal_resource_set_taskrole(__unused coalition_t coal,
450 __unused task_t task, __unused int role)
451{
452 return KERN_SUCCESS;
453}
454
455static int
456i_coal_resource_get_taskrole(__unused coalition_t coal, __unused task_t task)
457{
458 task_t t;
459
460 assert(coal && coal->type == COALITION_TYPE_RESOURCE);
461
462 qe_foreach_element(t, &coal->r.tasks, task_coalition[COALITION_TYPE_RESOURCE]) {
463 if (t == task)
464 return COALITION_TASKROLE_UNDEF;
465 }
466
467 return -1;
468}
469
470static void
471i_coal_resource_iterate_tasks(coalition_t coal, void *ctx, void (*callback)(coalition_t, void *, task_t))
472{
473 task_t t;
474 assert(coal && coal->type == COALITION_TYPE_RESOURCE);
475
476 qe_foreach_element(t, &coal->r.tasks, task_coalition[COALITION_TYPE_RESOURCE])
477 callback(coal, ctx, t);
478}
479
480kern_return_t
481coalition_resource_usage_internal(coalition_t coal, struct coalition_resource_usage *cru_out)
482{
483 kern_return_t kr;
484 ledger_amount_t credit, debit;
485 int i;
486
487 if (coal->type != COALITION_TYPE_RESOURCE)
488 return KERN_INVALID_ARGUMENT;
489
490 /* Return KERN_INVALID_ARGUMENT for Corpse coalition */
491 for (i = 0; i < COALITION_NUM_TYPES; i++) {
492 if (coal == corpse_coalition[i]) {
493 return KERN_INVALID_ARGUMENT;
494 }
495 }
496
497 ledger_t sum_ledger = ledger_instantiate(task_ledger_template, LEDGER_CREATE_ACTIVE_ENTRIES);
498 if (sum_ledger == LEDGER_NULL)
499 return KERN_RESOURCE_SHORTAGE;
500
501 coalition_lock(coal);
502
503 /*
504 * Start with the coalition's ledger, which holds the totals from all
505 * the dead tasks.
506 */
507 ledger_rollup(sum_ledger, coal->r.ledger);
508 uint64_t bytesread = coal->r.bytesread;
509 uint64_t byteswritten = coal->r.byteswritten;
510 uint64_t gpu_time = coal->r.gpu_time;
511 uint64_t energy = coal->r.energy;
512 uint64_t logical_immediate_writes = coal->r.logical_immediate_writes;
513 uint64_t logical_deferred_writes = coal->r.logical_deferred_writes;
514 uint64_t logical_invalidated_writes = coal->r.logical_invalidated_writes;
515 uint64_t logical_metadata_writes = coal->r.logical_metadata_writes;
516 int64_t cpu_time_billed_to_me = 0;
517 int64_t cpu_time_billed_to_others = 0;
518 int64_t energy_billed_to_me = 0;
519 int64_t energy_billed_to_others = 0;
520 uint64_t cpu_ptime = coal->r.cpu_ptime;
521 uint64_t cpu_time_eqos[COALITION_NUM_THREAD_QOS_TYPES];
522 memcpy(cpu_time_eqos, coal->r.cpu_time_eqos, sizeof(cpu_time_eqos));
523 uint64_t cpu_time_rqos[COALITION_NUM_THREAD_QOS_TYPES];
524 memcpy(cpu_time_rqos, coal->r.cpu_time_rqos, sizeof(cpu_time_rqos));
525 /*
526 * Add to that all the active tasks' ledgers. Tasks cannot deallocate
527 * out from under us, since we hold the coalition lock.
528 */
529 task_t task;
530 qe_foreach_element(task, &coal->r.tasks, task_coalition[COALITION_TYPE_RESOURCE]) {
531 /*
532 * Rolling up stats for exec copy task or exec'd task will lead to double accounting.
533 * Cannot take task lock after taking coaliton lock
534 */
535 if (task_is_exec_copy(task) || task_did_exec(task)) {
536 continue;
537 }
538
539 ledger_rollup(sum_ledger, task->ledger);
540 bytesread += task->task_io_stats->disk_reads.size;
541 byteswritten += task->task_io_stats->total_io.size - task->task_io_stats->disk_reads.size;
542#if !CONFIG_EMBEDDED
543 gpu_time += task_gpu_utilisation(task);
544#else
545 energy += task_energy(task);
546#endif
547 logical_immediate_writes += task->task_immediate_writes;
548 logical_deferred_writes += task->task_deferred_writes;
549 logical_invalidated_writes += task->task_invalidated_writes;
550 logical_metadata_writes += task->task_metadata_writes;
551 cpu_ptime += task_cpu_ptime(task);
552 task_update_cpu_time_qos_stats(task, cpu_time_eqos, cpu_time_rqos);
553 }
554
555 kr = ledger_get_balance(sum_ledger, task_ledgers.cpu_time_billed_to_me, (int64_t *)&cpu_time_billed_to_me);
556 if (kr != KERN_SUCCESS || cpu_time_billed_to_me < 0) {
557 cpu_time_billed_to_me = 0;
558 }
559
560 kr = ledger_get_balance(sum_ledger, task_ledgers.cpu_time_billed_to_others, (int64_t *)&cpu_time_billed_to_others);
561 if (kr != KERN_SUCCESS || cpu_time_billed_to_others < 0) {
562 cpu_time_billed_to_others = 0;
563 }
564
565 kr = ledger_get_balance(sum_ledger, task_ledgers.energy_billed_to_me, (int64_t *)&energy_billed_to_me);
566 if (kr != KERN_SUCCESS || energy_billed_to_me < 0) {
567 energy_billed_to_me = 0;
568 }
569
570 kr = ledger_get_balance(sum_ledger, task_ledgers.energy_billed_to_others, (int64_t *)&energy_billed_to_others);
571 if (kr != KERN_SUCCESS || energy_billed_to_others < 0) {
572 energy_billed_to_others = 0;
573 }
574
575 /* collect information from the coalition itself */
576 cru_out->tasks_started = coal->r.task_count;
577 cru_out->tasks_exited = coal->r.dead_task_count;
578
579 uint64_t time_nonempty = coal->r.time_nonempty;
580 uint64_t last_became_nonempty_time = coal->r.last_became_nonempty_time;
581
582 coalition_unlock(coal);
583
584 /* Copy the totals out of sum_ledger */
585 kr = ledger_get_entries(sum_ledger, task_ledgers.cpu_time,
586 &credit, &debit);
587 if (kr != KERN_SUCCESS) {
588 credit = 0;
589 }
590 cru_out->cpu_time = credit;
591 cru_out->cpu_time_billed_to_me = (uint64_t)cpu_time_billed_to_me;
592 cru_out->cpu_time_billed_to_others = (uint64_t)cpu_time_billed_to_others;
593 cru_out->energy_billed_to_me = (uint64_t)energy_billed_to_me;
594 cru_out->energy_billed_to_others = (uint64_t)energy_billed_to_others;
595
596 kr = ledger_get_entries(sum_ledger, task_ledgers.interrupt_wakeups,
597 &credit, &debit);
598 if (kr != KERN_SUCCESS) {
599 credit = 0;
600 }
601 cru_out->interrupt_wakeups = credit;
602
603 kr = ledger_get_entries(sum_ledger, task_ledgers.platform_idle_wakeups,
604 &credit, &debit);
605 if (kr != KERN_SUCCESS) {
606 credit = 0;
607 }
608 cru_out->platform_idle_wakeups = credit;
609
610 cru_out->bytesread = bytesread;
611 cru_out->byteswritten = byteswritten;
612 cru_out->gpu_time = gpu_time;
613 cru_out->energy = energy;
614 cru_out->logical_immediate_writes = logical_immediate_writes;
615 cru_out->logical_deferred_writes = logical_deferred_writes;
616 cru_out->logical_invalidated_writes = logical_invalidated_writes;
617 cru_out->logical_metadata_writes = logical_metadata_writes;
618 cru_out->cpu_ptime = cpu_ptime;
619 cru_out->cpu_time_eqos_len = COALITION_NUM_THREAD_QOS_TYPES;
620 memcpy(cru_out->cpu_time_eqos, cpu_time_eqos, sizeof(cru_out->cpu_time_eqos));
621 ledger_dereference(sum_ledger);
622 sum_ledger = LEDGER_NULL;
623
624 if (last_became_nonempty_time) {
625 time_nonempty += mach_absolute_time() - last_became_nonempty_time;
626 }
627 absolutetime_to_nanoseconds(time_nonempty, &cru_out->time_nonempty);
628
629 return KERN_SUCCESS;
630}
631
632/*
633 *
634 * COALITION_TYPE_JETSAM
635 *
636 */
637static kern_return_t
638i_coal_jetsam_init(coalition_t coal, boolean_t privileged)
639{
640 assert(coal && coal->type == COALITION_TYPE_JETSAM);
641 (void)privileged;
642
643 coal->j.leader= TASK_NULL;
644 queue_head_init(coal->j.extensions);
645 queue_head_init(coal->j.services);
646 queue_head_init(coal->j.other);
647
648 return KERN_SUCCESS;
649}
650
651static void
652i_coal_jetsam_dealloc(__unused coalition_t coal)
653{
654 assert(coal && coal->type == COALITION_TYPE_JETSAM);
655
656 /* the coalition should be completely clear at this point */
657 assert(queue_empty(&coal->j.extensions));
658 assert(queue_empty(&coal->j.services));
659 assert(queue_empty(&coal->j.other));
660 assert(coal->j.leader == TASK_NULL);
661
662}
663
664static kern_return_t
665i_coal_jetsam_adopt_task(coalition_t coal, task_t task)
666{
667 struct i_jetsam_coalition *cj;
668 assert(coal && coal->type == COALITION_TYPE_JETSAM);
669
670 cj = &coal->j;
671
672 assert(queue_empty(&task->task_coalition[COALITION_TYPE_JETSAM]));
673
674 /* put each task initially in the "other" list */
675 enqueue_tail(&cj->other, &task->task_coalition[COALITION_TYPE_JETSAM]);
676 coal_dbg("coalition %lld adopted PID:%d as UNDEF",
677 coal->id, task_pid(task));
678
679 return KERN_SUCCESS;
680}
681
682static kern_return_t
683i_coal_jetsam_remove_task(coalition_t coal, task_t task)
684{
685 assert(coal && coal->type == COALITION_TYPE_JETSAM);
686 assert(task->coalition[COALITION_TYPE_JETSAM] == coal);
687
688 coal_dbg("removing PID:%d from coalition id:%lld",
689 task_pid(task), coal->id);
690
691 if (task == coal->j.leader) {
692 coal->j.leader = NULL;
693 coal_dbg(" PID:%d was the leader!", task_pid(task));
694 } else {
695 assert(!queue_empty(&task->task_coalition[COALITION_TYPE_JETSAM]));
696 }
697
698 /* remove the task from the specific coalition role queue */
699 remqueue(&task->task_coalition[COALITION_TYPE_JETSAM]);
700 queue_chain_init(task->task_coalition[COALITION_TYPE_RESOURCE]);
701
702 return KERN_SUCCESS;
703}
704
705static kern_return_t
706i_coal_jetsam_set_taskrole(coalition_t coal, task_t task, int role)
707{
708 struct i_jetsam_coalition *cj;
709 queue_t q = NULL;
710 assert(coal && coal->type == COALITION_TYPE_JETSAM);
711 assert(task->coalition[COALITION_TYPE_JETSAM] == coal);
712
713 cj = &coal->j;
714
715 switch (role) {
716 case COALITION_TASKROLE_LEADER:
717 coal_dbg("setting PID:%d as LEADER of %lld",
718 task_pid(task), coal->id);
719 if (cj->leader != TASK_NULL) {
720 /* re-queue the exiting leader onto the "other" list */
721 coal_dbg(" re-queue existing leader (%d) as OTHER",
722 task_pid(cj->leader));
723 re_queue_tail(&cj->other, &cj->leader->task_coalition[COALITION_TYPE_JETSAM]);
724 }
725 /*
726 * remove the task from the "other" list
727 * (where it was put by default)
728 */
729 remqueue(&task->task_coalition[COALITION_TYPE_JETSAM]);
730 queue_chain_init(task->task_coalition[COALITION_TYPE_JETSAM]);
731
732 /* set the coalition leader */
733 cj->leader = task;
734 break;
735 case COALITION_TASKROLE_XPC:
736 coal_dbg("setting PID:%d as XPC in %lld",
737 task_pid(task), coal->id);
738 q = (queue_t)&cj->services;
739 break;
740 case COALITION_TASKROLE_EXT:
741 coal_dbg("setting PID:%d as EXT in %lld",
742 task_pid(task), coal->id);
743 q = (queue_t)&cj->extensions;
744 break;
745 case COALITION_TASKROLE_NONE:
746 /*
747 * Tasks with a role of "none" should fall through to an
748 * undefined role so long as the task is currently a member
749 * of the coalition. This scenario can happen if a task is
750 * killed (usually via jetsam) during exec.
751 */
752 if (task->coalition[COALITION_TYPE_JETSAM] != coal) {
753 panic("%s: task %p attempting to set role %d "
754 "in coalition %p to which it does not belong!", __func__, task, role, coal);
755 }
756 /* fall through */
757 case COALITION_TASKROLE_UNDEF:
758 coal_dbg("setting PID:%d as UNDEF in %lld",
759 task_pid(task), coal->id);
760 q = (queue_t)&cj->other;
761 break;
762 default:
763 panic("%s: invalid role(%d) for task", __func__, role);
764 return KERN_INVALID_ARGUMENT;
765 }
766
767 if (q != NULL)
768 re_queue_tail(q, &task->task_coalition[COALITION_TYPE_JETSAM]);
769
770 return KERN_SUCCESS;
771}
772
773static int
774i_coal_jetsam_get_taskrole(coalition_t coal, task_t task)
775{
776 struct i_jetsam_coalition *cj;
777 task_t t;
778
779 assert(coal && coal->type == COALITION_TYPE_JETSAM);
780 assert(task->coalition[COALITION_TYPE_JETSAM] == coal);
781
782 cj = &coal->j;
783
784 if (task == cj->leader)
785 return COALITION_TASKROLE_LEADER;
786
787 qe_foreach_element(t, &cj->services, task_coalition[COALITION_TYPE_JETSAM]) {
788 if (t == task)
789 return COALITION_TASKROLE_XPC;
790 }
791
792 qe_foreach_element(t, &cj->extensions, task_coalition[COALITION_TYPE_JETSAM]) {
793 if (t == task)
794 return COALITION_TASKROLE_EXT;
795 }
796
797 qe_foreach_element(t, &cj->other, task_coalition[COALITION_TYPE_JETSAM]) {
798 if (t == task)
799 return COALITION_TASKROLE_UNDEF;
800 }
801
802 /* task not in the coalition?! */
803 return COALITION_TASKROLE_NONE;
804}
805
806static void
807i_coal_jetsam_iterate_tasks(coalition_t coal, void *ctx, void (*callback)(coalition_t, void *, task_t))
808{
809 struct i_jetsam_coalition *cj;
810 task_t t;
811
812 assert(coal && coal->type == COALITION_TYPE_JETSAM);
813
814 cj = &coal->j;
815
816 if (cj->leader)
817 callback(coal, ctx, cj->leader);
818
819 qe_foreach_element(t, &cj->services, task_coalition[COALITION_TYPE_JETSAM])
820 callback(coal, ctx, t);
821
822 qe_foreach_element(t, &cj->extensions, task_coalition[COALITION_TYPE_JETSAM])
823 callback(coal, ctx, t);
824
825 qe_foreach_element(t, &cj->other, task_coalition[COALITION_TYPE_JETSAM])
826 callback(coal, ctx, t);
827}
828
829
830/*
831 *
832 * Main Coalition implementation
833 *
834 */
835
836/*
837 * coalition_create_internal
838 * Returns: New coalition object, referenced for the caller and unlocked.
839 * Condition: coalitions_list_lock must be UNLOCKED.
840 */
841kern_return_t
842coalition_create_internal(int type, int role, boolean_t privileged, coalition_t *out)
843{
844 kern_return_t kr;
845 struct coalition *new_coal;
846
847 if (type < 0 || type > COALITION_TYPE_MAX)
848 return KERN_INVALID_ARGUMENT;
849
850 new_coal = (struct coalition *)zalloc(coalition_zone);
851 if (new_coal == COALITION_NULL)
852 return KERN_RESOURCE_SHORTAGE;
853 bzero(new_coal, sizeof(*new_coal));
854
855 new_coal->type = type;
856 new_coal->role = role;
857
858 /* initialize type-specific resources */
859 kr = coal_call(new_coal, init, privileged);
860 if (kr != KERN_SUCCESS) {
861 zfree(coalition_zone, new_coal);
862 return kr;
863 }
864
865 /* One for caller, one for coalitions list */
866 new_coal->ref_count = 2;
867
868 new_coal->privileged = privileged ? TRUE : FALSE;
869#if DEVELOPMENT || DEBUG
870 new_coal->should_notify = 1;
871#endif
872
873 lck_mtx_init(&new_coal->lock, &coalitions_lck_grp, &coalitions_lck_attr);
874
875 lck_mtx_lock(&coalitions_list_lock);
876 new_coal->id = coalition_next_id++;
877 coalition_count++;
878 enqueue_tail(&coalitions_q, &new_coal->coalitions);
879
880 KDBG_RELEASE(MACHDBG_CODE(DBG_MACH_COALITION, MACH_COALITION_NEW),
881 new_coal->id, new_coal->type);
882 lck_mtx_unlock(&coalitions_list_lock);
883
884 coal_dbg("id:%llu, type:%s", new_coal->id, coal_type_str(new_coal->type));
885
886 *out = new_coal;
887 return KERN_SUCCESS;
888}
889
890/*
891 * coalition_release
892 * Condition: coalition must be UNLOCKED.
893 * */
894void
895coalition_release(coalition_t coal)
896{
897 /* TODO: This can be done with atomics. */
898 coalition_lock(coal);
899 coal->ref_count--;
900
901#if COALITION_DEBUG
902 uint32_t rc = coal->ref_count;
903 uint32_t ac = coal->active_count;
904#endif /* COALITION_DEBUG */
905
906 coal_dbg("id:%llu type:%s ref_count:%u active_count:%u%s",
907 coal->id, coal_type_str(coal->type), rc, ac,
908 rc <= 0 ? ", will deallocate now" : "");
909
910 if (coal->ref_count > 0) {
911 coalition_unlock(coal);
912 return;
913 }
914
915 assert(coal->termrequested);
916 assert(coal->terminated);
917 assert(coal->active_count == 0);
918 assert(coal->reaped);
919 assert(coal->focal_task_count == 0);
920 assert(coal->nonfocal_task_count == 0);
921 KDBG_RELEASE(MACHDBG_CODE(DBG_MACH_COALITION, MACH_COALITION_FREE),
922 coal->id, coal->type);
923
924 coal_call(coal, dealloc);
925
926 coalition_unlock(coal);
927
928 lck_mtx_destroy(&coal->lock, &coalitions_lck_grp);
929
930 zfree(coalition_zone, coal);
931}
932
933/*
934 * coalition_find_by_id_internal
935 * Returns: Coalition object with specified id, NOT referenced.
936 * If not found, returns COALITION_NULL.
937 * Condition: coalitions_list_lock must be LOCKED.
938 */
939static coalition_t
940coalition_find_by_id_internal(uint64_t coal_id)
941{
942 if (coal_id == 0) {
943 return COALITION_NULL;
944 }
945
946 lck_mtx_assert(&coalitions_list_lock, LCK_MTX_ASSERT_OWNED);
947 coalition_t coal;
948 qe_foreach_element(coal, &coalitions_q, coalitions) {
949 if (coal->id == coal_id) {
950 return coal;
951 }
952 }
953 return COALITION_NULL;
954}
955
956/*
957 * coalition_find_by_id
958 * Returns: Coalition object with specified id, referenced.
959 * Condition: coalitions_list_lock must be UNLOCKED.
960 */
961coalition_t
962coalition_find_by_id(uint64_t cid)
963{
964 if (cid == 0) {
965 return COALITION_NULL;
966 }
967
968 lck_mtx_lock(&coalitions_list_lock);
969
970 coalition_t coal = coalition_find_by_id_internal(cid);
971 if (coal == COALITION_NULL) {
972 lck_mtx_unlock(&coalitions_list_lock);
973 return COALITION_NULL;
974 }
975
976 coalition_lock(coal);
977
978 if (coal->reaped) {
979 coalition_unlock(coal);
980 lck_mtx_unlock(&coalitions_list_lock);
981 return COALITION_NULL;
982 }
983
984 if (coal->ref_count == 0) {
985 panic("resurrecting coalition %p id:%llu type:%s, active_count:%u\n",
986 coal, coal->id, coal_type_str(coal->type), coal->active_count);
987 }
988 coal->ref_count++;
989#if COALITION_DEBUG
990 uint32_t rc = coal->ref_count;
991#endif
992
993 coalition_unlock(coal);
994 lck_mtx_unlock(&coalitions_list_lock);
995
996 coal_dbg("id:%llu type:%s ref_count:%u",
997 coal->id, coal_type_str(coal->type), rc);
998
999 return coal;
1000}
1001
1002/*
1003 * coalition_find_and_activate_by_id
1004 * Returns: Coalition object with specified id, referenced, and activated.
1005 * Condition: coalitions_list_lock must be UNLOCKED.
1006 * This is the function to use when putting a 'new' thing into a coalition,
1007 * like posix_spawn of an XPC service by launchd.
1008 * See also coalition_extend_active.
1009 */
1010coalition_t
1011coalition_find_and_activate_by_id(uint64_t cid)
1012{
1013 if (cid == 0) {
1014 return COALITION_NULL;
1015 }
1016
1017 lck_mtx_lock(&coalitions_list_lock);
1018
1019 coalition_t coal = coalition_find_by_id_internal(cid);
1020 if (coal == COALITION_NULL) {
1021 lck_mtx_unlock(&coalitions_list_lock);
1022 return COALITION_NULL;
1023 }
1024
1025 coalition_lock(coal);
1026
1027 if (coal->reaped || coal->terminated) {
1028 /* Too late to put something new into this coalition, it's
1029 * already on its way out the door */
1030 coalition_unlock(coal);
1031 lck_mtx_unlock(&coalitions_list_lock);
1032 return COALITION_NULL;
1033 }
1034
1035 if (coal->ref_count == 0) {
1036 panic("resurrecting coalition %p id:%llu type:%s, active_count:%u\n",
1037 coal, coal->id, coal_type_str(coal->type), coal->active_count);
1038 }
1039
1040 coal->ref_count++;
1041 coal->active_count++;
1042
1043#if COALITION_DEBUG
1044 uint32_t rc = coal->ref_count;
1045 uint32_t ac = coal->active_count;
1046#endif
1047
1048 coalition_unlock(coal);
1049 lck_mtx_unlock(&coalitions_list_lock);
1050
1051 coal_dbg("id:%llu type:%s ref_count:%u, active_count:%u",
1052 coal->id, coal_type_str(coal->type), rc, ac);
1053
1054 return coal;
1055}
1056
1057uint64_t
1058coalition_id(coalition_t coal)
1059{
1060 return coal->id;
1061}
1062
1063void
1064task_coalition_ids(task_t task, uint64_t ids[COALITION_NUM_TYPES])
1065{
1066 int i;
1067 for (i = 0; i < COALITION_NUM_TYPES; i++) {
1068 if (task->coalition[i])
1069 ids[i] = task->coalition[i]->id;
1070 else
1071 ids[i] = 0;
1072 }
1073}
1074
1075void
1076task_coalition_roles(task_t task, int roles[COALITION_NUM_TYPES])
1077{
1078 int i;
1079 memset(roles, 0, COALITION_NUM_TYPES * sizeof(roles[0]));
1080
1081 for (i = 0; i < COALITION_NUM_TYPES; i++) {
1082 if (task->coalition[i]) {
1083 coalition_lock(task->coalition[i]);
1084 roles[i] = coal_call(task->coalition[i],
1085 get_taskrole, task);
1086 coalition_unlock(task->coalition[i]);
1087 } else {
1088 roles[i] = COALITION_TASKROLE_NONE;
1089 }
1090 }
1091}
1092
1093
1094int
1095coalition_type(coalition_t coal)
1096{
1097 return coal->type;
1098}
1099
1100boolean_t
1101coalition_term_requested(coalition_t coal)
1102{
1103 return coal->termrequested;
1104}
1105
1106boolean_t
1107coalition_is_terminated(coalition_t coal)
1108{
1109 return coal->terminated;
1110}
1111
1112boolean_t
1113coalition_is_reaped(coalition_t coal)
1114{
1115 return coal->reaped;
1116}
1117
1118boolean_t
1119coalition_is_privileged(coalition_t coal)
1120{
1121 return coal->privileged || unrestrict_coalition_syscalls;
1122}
1123
1124boolean_t
1125task_is_in_privileged_coalition(task_t task, int type)
1126{
1127 if (type < 0 || type > COALITION_TYPE_MAX)
1128 return FALSE;
1129 if (unrestrict_coalition_syscalls)
1130 return TRUE;
1131 if (!task->coalition[type])
1132 return FALSE;
1133 return task->coalition[type]->privileged;
1134}
1135
1136void task_coalition_update_gpu_stats(task_t task, uint64_t gpu_ns_delta)
1137{
1138 coalition_t coal;
1139
1140 assert(task != TASK_NULL);
1141 if (gpu_ns_delta == 0)
1142 return;
1143
1144 coal = task->coalition[COALITION_TYPE_RESOURCE];
1145 assert(coal != COALITION_NULL);
1146
1147 coalition_lock(coal);
1148 coal->r.gpu_time += gpu_ns_delta;
1149 coalition_unlock(coal);
1150}
1151
1152boolean_t task_coalition_adjust_focal_count(task_t task, int count, uint32_t *new_count)
1153{
1154 coalition_t coal = task->coalition[COALITION_FOCAL_TASKS_ACCOUNTING];
1155 if (coal == COALITION_NULL)
1156 return FALSE;
1157
1158 *new_count = hw_atomic_add(&coal->focal_task_count, count);
1159 assert(*new_count != UINT32_MAX);
1160 return TRUE;
1161}
1162
1163uint32_t task_coalition_focal_count(task_t task)
1164{
1165 coalition_t coal = task->coalition[COALITION_FOCAL_TASKS_ACCOUNTING];
1166 if (coal == COALITION_NULL)
1167 return 0;
1168
1169 return coal->focal_task_count;
1170}
1171
1172boolean_t task_coalition_adjust_nonfocal_count(task_t task, int count, uint32_t *new_count)
1173{
1174 coalition_t coal = task->coalition[COALITION_FOCAL_TASKS_ACCOUNTING];
1175 if (coal == COALITION_NULL)
1176 return FALSE;
1177
1178 *new_count = hw_atomic_add(&coal->nonfocal_task_count, count);
1179 assert(*new_count != UINT32_MAX);
1180 return TRUE;
1181}
1182
1183uint32_t task_coalition_nonfocal_count(task_t task)
1184{
1185 coalition_t coal = task->coalition[COALITION_FOCAL_TASKS_ACCOUNTING];
1186 if (coal == COALITION_NULL)
1187 return 0;
1188
1189 return coal->nonfocal_task_count;
1190}
1191
1192void coalition_set_efficient(coalition_t coal)
1193{
1194 coalition_lock(coal);
1195 coal->efficient = TRUE;
1196 coalition_unlock(coal);
1197}
1198
1199
1200void coalition_for_each_task(coalition_t coal, void *ctx,
1201 void (*callback)(coalition_t, void *, task_t))
1202{
1203 assert(coal != COALITION_NULL);
1204
1205 coal_dbg("iterating tasks in coalition %p id:%llu type:%s, active_count:%u",
1206 coal, coal->id, coal_type_str(coal->type), coal->active_count);
1207
1208 coalition_lock(coal);
1209
1210 coal_call(coal, iterate_tasks, ctx, callback);
1211
1212 coalition_unlock(coal);
1213}
1214
1215
1216void
1217coalition_remove_active(coalition_t coal)
1218{
1219 coalition_lock(coal);
1220
1221 assert(!coal->reaped);
1222 assert(coal->active_count > 0);
1223
1224 coal->active_count--;
1225
1226 boolean_t do_notify = FALSE;
1227 uint64_t notify_id = 0;
1228 uint32_t notify_flags = 0;
1229 if (coal->termrequested && coal->active_count == 0) {
1230 /* We only notify once, when active_count reaches zero.
1231 * We just decremented, so if it reached zero, we mustn't have
1232 * notified already.
1233 */
1234 assert(!coal->terminated);
1235 coal->terminated = TRUE;
1236
1237 assert(!coal->notified);
1238
1239 coal->notified = TRUE;
1240#if DEVELOPMENT || DEBUG
1241 do_notify = coal->should_notify;
1242#else
1243 do_notify = TRUE;
1244#endif
1245 notify_id = coal->id;
1246 notify_flags = 0;
1247 }
1248
1249#if COALITION_DEBUG
1250 uint64_t cid = coal->id;
1251 uint32_t rc = coal->ref_count;
1252 int ac = coal->active_count;
1253 int ct = coal->type;
1254#endif
1255 coalition_unlock(coal);
1256
1257 coal_dbg("id:%llu type:%s ref_count:%u, active_count:%u,%s",
1258 cid, coal_type_str(ct), rc, ac, do_notify ? " NOTIFY" : " ");
1259
1260 if (do_notify) {
1261 coalition_notify_user(notify_id, notify_flags);
1262 }
1263}
1264
1265/* Used for kernel_task, launchd, launchd's early boot tasks... */
1266kern_return_t
1267coalitions_adopt_init_task(task_t task)
1268{
1269 kern_return_t kr;
1270 kr = coalitions_adopt_task(init_coalition, task);
1271 if (kr != KERN_SUCCESS) {
1272 panic("failed to adopt task %p into default coalition: %d", task, kr);
1273 }
1274 return kr;
1275}
1276
1277/* Used for forked corpses. */
1278kern_return_t
1279coalitions_adopt_corpse_task(task_t task)
1280{
1281 kern_return_t kr;
1282 kr = coalitions_adopt_task(corpse_coalition, task);
1283 if (kr != KERN_SUCCESS) {
1284 panic("failed to adopt task %p into corpse coalition: %d", task, kr);
1285 }
1286 return kr;
1287}
1288
1289/*
1290 * coalition_adopt_task_internal
1291 * Condition: Coalition must be referenced and unlocked. Will fail if coalition
1292 * is already terminated.
1293 */
1294static kern_return_t
1295coalition_adopt_task_internal(coalition_t coal, task_t task)
1296{
1297 kern_return_t kr;
1298
1299 if (task->coalition[coal->type]) {
1300 return KERN_ALREADY_IN_SET;
1301 }
1302
1303 coalition_lock(coal);
1304
1305 if (coal->reaped || coal->terminated) {
1306 coalition_unlock(coal);
1307 return KERN_TERMINATED;
1308 }
1309
1310 kr = coal_call(coal, adopt_task, task);
1311 if (kr != KERN_SUCCESS)
1312 goto out_unlock;
1313
1314 coal->active_count++;
1315
1316 coal->ref_count++;
1317
1318 task->coalition[coal->type] = coal;
1319
1320out_unlock:
1321#if COALITION_DEBUG
1322 (void)coal; /* need expression after label */
1323 uint64_t cid = coal->id;
1324 uint32_t rc = coal->ref_count;
1325 uint32_t ct = coal->type;
1326#endif
1327 if (get_task_uniqueid(task) != UINT64_MAX) {
1328 /* On 32-bit targets, uniqueid will get truncated to 32 bits */
1329 KDBG_RELEASE(MACHDBG_CODE(DBG_MACH_COALITION, MACH_COALITION_ADOPT),
1330 coal->id, get_task_uniqueid(task));
1331 }
1332
1333 coalition_unlock(coal);
1334
1335 coal_dbg("task:%d, id:%llu type:%s ref_count:%u, kr=%d",
1336 task_pid(task), cid, coal_type_str(ct), rc, kr);
1337 return kr;
1338}
1339
1340static kern_return_t
1341coalition_remove_task_internal(task_t task, int type)
1342{
1343 kern_return_t kr;
1344
1345 coalition_t coal = task->coalition[type];
1346
1347 if (!coal)
1348 return KERN_SUCCESS;
1349
1350 assert(coal->type == (uint32_t)type);
1351
1352 coalition_lock(coal);
1353
1354 kr = coal_call(coal, remove_task, task);
1355
1356#if COALITION_DEBUG
1357 uint64_t cid = coal->id;
1358 uint32_t rc = coal->ref_count;
1359 int ac = coal->active_count;
1360 int ct = coal->type;
1361#endif
1362 KDBG_RELEASE(MACHDBG_CODE(DBG_MACH_COALITION, MACH_COALITION_REMOVE),
1363 coal->id, get_task_uniqueid(task));
1364 coalition_unlock(coal);
1365
1366 coal_dbg("id:%llu type:%s ref_count:%u, active_count:%u, kr=%d",
1367 cid, coal_type_str(ct), rc, ac, kr);
1368
1369 coalition_remove_active(coal);
1370
1371 return kr;
1372}
1373
1374/*
1375 * coalitions_adopt_task
1376 * Condition: All coalitions must be referenced and unlocked.
1377 * Will fail if any coalition is already terminated.
1378 */
1379kern_return_t
1380coalitions_adopt_task(coalition_t *coals, task_t task)
1381{
1382 int i;
1383 kern_return_t kr;
1384
1385 if (!coals || coals[COALITION_TYPE_RESOURCE] == COALITION_NULL)
1386 return KERN_INVALID_ARGUMENT;
1387
1388 /* verify that the incoming coalitions are what they say they are */
1389 for (i = 0; i < COALITION_NUM_TYPES; i++)
1390 if (coals[i] && coals[i]->type != (uint32_t)i)
1391 return KERN_INVALID_ARGUMENT;
1392
1393 for (i = 0; i < COALITION_NUM_TYPES; i++) {
1394 kr = KERN_SUCCESS;
1395 if (coals[i])
1396 kr = coalition_adopt_task_internal(coals[i], task);
1397 if (kr != KERN_SUCCESS) {
1398 /* dis-associate any coalitions that just adopted this task */
1399 while (--i >= 0) {
1400 if (task->coalition[i])
1401 coalition_remove_task_internal(task, i);
1402 }
1403 break;
1404 }
1405 }
1406 return kr;
1407}
1408
1409/*
1410 * coalitions_remove_task
1411 * Condition: task must be referenced and UNLOCKED; all task's coalitions must be UNLOCKED
1412 */
1413kern_return_t
1414coalitions_remove_task(task_t task)
1415{
1416 kern_return_t kr;
1417 int i;
1418
1419 for (i = 0; i < COALITION_NUM_TYPES; i++) {
1420 kr = coalition_remove_task_internal(task, i);
1421 assert(kr == KERN_SUCCESS);
1422 }
1423
1424 return kr;
1425}
1426
1427/*
1428 * task_release_coalitions
1429 * helper function to release references to all coalitions in which
1430 * 'task' is a member.
1431 */
1432void
1433task_release_coalitions(task_t task)
1434{
1435 int i;
1436 for (i = 0; i < COALITION_NUM_TYPES; i++) {
1437 if (task->coalition[i]) {
1438 coalition_release(task->coalition[i]);
1439 } else if (i == COALITION_TYPE_RESOURCE) {
1440 panic("deallocating task %p was not a member of a resource coalition", task);
1441 }
1442 }
1443}
1444
1445/*
1446 * coalitions_set_roles
1447 * for each type of coalition, if the task is a member of a coalition of
1448 * that type (given in the coalitions parameter) then set the role of
1449 * the task within that that coalition.
1450 */
1451kern_return_t coalitions_set_roles(coalition_t coalitions[COALITION_NUM_TYPES],
1452 task_t task, int roles[COALITION_NUM_TYPES])
1453{
1454 kern_return_t kr = KERN_SUCCESS;
1455 int i;
1456
1457 for (i = 0; i < COALITION_NUM_TYPES; i++) {
1458 if (!coalitions[i])
1459 continue;
1460 coalition_lock(coalitions[i]);
1461 kr = coal_call(coalitions[i], set_taskrole, task, roles[i]);
1462 coalition_unlock(coalitions[i]);
1463 assert(kr == KERN_SUCCESS);
1464 }
1465
1466 return kr;
1467}
1468
1469/*
1470 * coalition_terminate_internal
1471 * Condition: Coalition must be referenced and UNLOCKED.
1472 */
1473kern_return_t
1474coalition_request_terminate_internal(coalition_t coal)
1475{
1476 assert(coal->type >= 0 && coal->type <= COALITION_TYPE_MAX);
1477
1478 if (coal == init_coalition[coal->type]) {
1479 return KERN_DEFAULT_SET;
1480 }
1481
1482 coalition_lock(coal);
1483
1484 if (coal->reaped) {
1485 coalition_unlock(coal);
1486 return KERN_INVALID_NAME;
1487 }
1488
1489 if (coal->terminated || coal->termrequested) {
1490 coalition_unlock(coal);
1491 return KERN_TERMINATED;
1492 }
1493
1494 coal->termrequested = TRUE;
1495
1496 boolean_t do_notify = FALSE;
1497 uint64_t note_id = 0;
1498 uint32_t note_flags = 0;
1499
1500 if (coal->active_count == 0) {
1501 /*
1502 * We only notify once, when active_count reaches zero.
1503 * We just set termrequested to zero. If the active count
1504 * was already at zero (tasks died before we could request
1505 * a termination notification), we should notify.
1506 */
1507 assert(!coal->terminated);
1508 coal->terminated = TRUE;
1509
1510 assert(!coal->notified);
1511
1512 coal->notified = TRUE;
1513#if DEVELOPMENT || DEBUG
1514 do_notify = coal->should_notify;
1515#else
1516 do_notify = TRUE;
1517#endif
1518 note_id = coal->id;
1519 note_flags = 0;
1520 }
1521
1522 coalition_unlock(coal);
1523
1524 if (do_notify) {
1525 coalition_notify_user(note_id, note_flags);
1526 }
1527
1528 return KERN_SUCCESS;
1529}
1530
1531/*
1532 * coalition_reap_internal
1533 * Condition: Coalition must be referenced and UNLOCKED.
1534 */
1535kern_return_t
1536coalition_reap_internal(coalition_t coal)
1537{
1538 assert(coal->type <= COALITION_TYPE_MAX);
1539
1540 if (coal == init_coalition[coal->type]) {
1541 return KERN_DEFAULT_SET;
1542 }
1543
1544 coalition_lock(coal);
1545 if (coal->reaped) {
1546 coalition_unlock(coal);
1547 return KERN_TERMINATED;
1548 }
1549 if (!coal->terminated) {
1550 coalition_unlock(coal);
1551 return KERN_FAILURE;
1552 }
1553 assert(coal->termrequested);
1554 if (coal->active_count > 0) {
1555 coalition_unlock(coal);
1556 return KERN_FAILURE;
1557 }
1558
1559 coal->reaped = TRUE;
1560
1561 /* Caller, launchd, and coalitions list should each have a reference */
1562 assert(coal->ref_count > 2);
1563
1564 coalition_unlock(coal);
1565
1566 lck_mtx_lock(&coalitions_list_lock);
1567 coalition_count--;
1568 remqueue(&coal->coalitions);
1569 lck_mtx_unlock(&coalitions_list_lock);
1570
1571 /* Release the list's reference and launchd's reference. */
1572 coalition_release(coal);
1573 coalition_release(coal);
1574
1575 return KERN_SUCCESS;
1576}
1577
1578#if DEVELOPMENT || DEBUG
1579int coalition_should_notify(coalition_t coal)
1580{
1581 int should;
1582 if (!coal)
1583 return -1;
1584 coalition_lock(coal);
1585 should = coal->should_notify;
1586 coalition_unlock(coal);
1587
1588 return should;
1589}
1590
1591void coalition_set_notify(coalition_t coal, int notify)
1592{
1593 if (!coal)
1594 return;
1595 coalition_lock(coal);
1596 coal->should_notify = !!notify;
1597 coalition_unlock(coal);
1598}
1599#endif
1600
1601void
1602coalitions_init(void)
1603{
1604 kern_return_t kr;
1605 int i;
1606 const struct coalition_type *ctype;
1607
1608 coalition_zone = zinit(
1609 sizeof(struct coalition),
1610 CONFIG_COALITION_MAX * sizeof(struct coalition),
1611 COALITION_CHUNK * sizeof(struct coalition),
1612 "coalitions");
1613 zone_change(coalition_zone, Z_NOENCRYPT, TRUE);
1614 queue_head_init(coalitions_q);
1615
1616 if (!PE_parse_boot_argn("unrestrict_coalition_syscalls", &unrestrict_coalition_syscalls,
1617 sizeof (unrestrict_coalition_syscalls))) {
1618 unrestrict_coalition_syscalls = 0;
1619 }
1620
1621 if (!PE_parse_boot_argn("tg_adaptive", &merge_adaptive_coalitions,
1622 sizeof (merge_adaptive_coalitions))) {
1623 merge_adaptive_coalitions = 0;
1624 }
1625
1626 lck_grp_attr_setdefault(&coalitions_lck_grp_attr);
1627 lck_grp_init(&coalitions_lck_grp, "coalition", &coalitions_lck_grp_attr);
1628 lck_attr_setdefault(&coalitions_lck_attr);
1629 lck_mtx_init(&coalitions_list_lock, &coalitions_lck_grp, &coalitions_lck_attr);
1630
1631 init_task_ledgers();
1632
1633 for (i = 0, ctype = &s_coalition_types[0]; i < COALITION_NUM_TYPES; ctype++, i++) {
1634 /* verify the entry in the global coalition types array */
1635 if (ctype->type != i ||
1636 !ctype->init ||
1637 !ctype->dealloc ||
1638 !ctype->adopt_task ||
1639 !ctype->remove_task) {
1640 panic("%s: Malformed coalition type %s(%d) in slot for type:%s(%d)",
1641 __func__, coal_type_str(ctype->type), ctype->type, coal_type_str(i), i);
1642 }
1643 if (!ctype->has_default)
1644 continue;
1645 kr = coalition_create_internal(ctype->type, COALITION_ROLE_SYSTEM, TRUE, &init_coalition[ctype->type]);
1646 if (kr != KERN_SUCCESS)
1647 panic("%s: could not create init %s coalition: kr:%d",
1648 __func__, coal_type_str(i), kr);
1649 kr = coalition_create_internal(ctype->type, COALITION_ROLE_SYSTEM, FALSE, &corpse_coalition[ctype->type]);
1650 if (kr != KERN_SUCCESS)
1651 panic("%s: could not create corpse %s coalition: kr:%d",
1652 __func__, coal_type_str(i), kr);
1653 }
1654
1655 /* "Leak" our reference to the global object */
1656}
1657
1658/*
1659 * BSD Kernel interface functions
1660 *
1661 */
1662static void coalition_fill_procinfo(struct coalition *coal,
1663 struct procinfo_coalinfo *coalinfo)
1664{
1665 coalinfo->coalition_id = coal->id;
1666 coalinfo->coalition_type = coal->type;
1667 coalinfo->coalition_tasks = coalition_get_task_count(coal);
1668}
1669
1670
1671int coalitions_get_list(int type, struct procinfo_coalinfo *coal_list, int list_sz)
1672{
1673 int ncoals = 0;
1674 struct coalition *coal;
1675
1676 lck_mtx_lock(&coalitions_list_lock);
1677 qe_foreach_element(coal, &coalitions_q, coalitions) {
1678 if (!coal->reaped && (type < 0 || type == (int)coal->type)) {
1679 if (coal_list && ncoals < list_sz)
1680 coalition_fill_procinfo(coal, &coal_list[ncoals]);
1681 ++ncoals;
1682 }
1683 }
1684 lck_mtx_unlock(&coalitions_list_lock);
1685
1686 return ncoals;
1687}
1688
1689/*
1690 * Jetsam coalition interface
1691 *
1692 */
1693boolean_t coalition_is_leader(task_t task, int coal_type, coalition_t *coal)
1694{
1695 coalition_t c;
1696 boolean_t ret;
1697
1698 if (coal) /* handle the error cases gracefully */
1699 *coal = COALITION_NULL;
1700
1701 if (!task)
1702 return FALSE;
1703
1704 if (coal_type > COALITION_TYPE_MAX)
1705 return FALSE;
1706
1707 c = task->coalition[coal_type];
1708 if (!c)
1709 return FALSE;
1710
1711 assert((int)c->type == coal_type);
1712
1713 coalition_lock(c);
1714
1715 if (coal)
1716 *coal = c;
1717
1718 ret = FALSE;
1719 if (c->type == COALITION_TYPE_JETSAM && c->j.leader == task)
1720 ret = TRUE;
1721
1722 coalition_unlock(c);
1723
1724 return ret;
1725}
1726
1727kern_return_t coalition_iterate_stackshot(coalition_iterate_fn_t callout, void *arg, uint32_t coalition_type)
1728{
1729 coalition_t coal;
1730 int i = 0;
1731
1732 qe_foreach_element(coal, &coalitions_q, coalitions) {
1733 if (coal == NULL || !ml_validate_nofault((vm_offset_t)coal, sizeof(struct coalition)))
1734 return KERN_FAILURE;
1735
1736 if (coalition_type == coal->type)
1737 callout(arg, i++, coal);
1738 }
1739
1740 return KERN_SUCCESS;
1741}
1742
1743task_t kdp_coalition_get_leader(coalition_t coal)
1744{
1745 if (!coal)
1746 return TASK_NULL;
1747
1748 if (coal->type == COALITION_TYPE_JETSAM) {
1749 return coal->j.leader;
1750 }
1751 return TASK_NULL;
1752}
1753
1754task_t coalition_get_leader(coalition_t coal)
1755{
1756 task_t leader = TASK_NULL;
1757
1758 if (!coal)
1759 return TASK_NULL;
1760
1761 coalition_lock(coal);
1762 if (coal->type != COALITION_TYPE_JETSAM)
1763 goto out_unlock;
1764
1765 leader = coal->j.leader;
1766 if (leader != TASK_NULL)
1767 task_reference(leader);
1768
1769out_unlock:
1770 coalition_unlock(coal);
1771 return leader;
1772}
1773
1774
1775int coalition_get_task_count(coalition_t coal)
1776{
1777 int ntasks = 0;
1778 struct queue_entry *qe;
1779 if (!coal)
1780 return 0;
1781
1782 coalition_lock(coal);
1783 switch (coal->type) {
1784 case COALITION_TYPE_RESOURCE:
1785 qe_foreach(qe, &coal->r.tasks)
1786 ntasks++;
1787 break;
1788 case COALITION_TYPE_JETSAM:
1789 if (coal->j.leader)
1790 ntasks++;
1791 qe_foreach(qe, &coal->j.other)
1792 ntasks++;
1793 qe_foreach(qe, &coal->j.extensions)
1794 ntasks++;
1795 qe_foreach(qe, &coal->j.services)
1796 ntasks++;
1797 break;
1798 default:
1799 break;
1800 }
1801 coalition_unlock(coal);
1802
1803 return ntasks;
1804}
1805
1806
1807static uint64_t i_get_list_footprint(queue_t list, int type, int *ntasks)
1808{
1809 task_t task;
1810 uint64_t bytes = 0;
1811
1812 qe_foreach_element(task, list, task_coalition[type]) {
1813 bytes += get_task_phys_footprint(task);
1814 coal_dbg(" [%d] task_pid:%d, type:%d, footprint:%lld",
1815 *ntasks, task_pid(task), type, bytes);
1816 *ntasks += 1;
1817 }
1818
1819 return bytes;
1820}
1821
1822uint64_t coalition_get_page_count(coalition_t coal, int *ntasks)
1823{
1824 uint64_t bytes = 0;
1825 int num_tasks = 0;
1826
1827 if (ntasks)
1828 *ntasks = 0;
1829 if (!coal)
1830 return bytes;
1831
1832 coalition_lock(coal);
1833
1834 switch (coal->type) {
1835 case COALITION_TYPE_RESOURCE:
1836 bytes += i_get_list_footprint(&coal->r.tasks, COALITION_TYPE_RESOURCE, &num_tasks);
1837 break;
1838 case COALITION_TYPE_JETSAM:
1839 if (coal->j.leader) {
1840 bytes += get_task_phys_footprint(coal->j.leader);
1841 num_tasks = 1;
1842 }
1843 bytes += i_get_list_footprint(&coal->j.extensions, COALITION_TYPE_JETSAM, &num_tasks);
1844 bytes += i_get_list_footprint(&coal->j.services, COALITION_TYPE_JETSAM, &num_tasks);
1845 bytes += i_get_list_footprint(&coal->j.other, COALITION_TYPE_JETSAM, &num_tasks);
1846 break;
1847 default:
1848 break;
1849 }
1850
1851 coalition_unlock(coal);
1852
1853 if (ntasks)
1854 *ntasks = num_tasks;
1855
1856 return bytes / PAGE_SIZE_64;
1857}
1858
1859struct coal_sort_s {
1860 int pid;
1861 int usr_order;
1862 uint64_t bytes;
1863};
1864
1865/*
1866 * return < 0 for a < b
1867 * 0 for a == b
1868 * > 0 for a > b
1869 */
1870typedef int (*cmpfunc_t)(const void *a, const void *b);
1871
1872extern void
1873qsort(void *a, size_t n, size_t es, cmpfunc_t cmp);
1874
1875static int dflt_cmp(const void *a, const void *b)
1876{
1877 const struct coal_sort_s *csA = (const struct coal_sort_s *)a;
1878 const struct coal_sort_s *csB = (const struct coal_sort_s *)b;
1879
1880 /*
1881 * if both A and B are equal, use a memory descending sort
1882 */
1883 if (csA->usr_order == csB->usr_order)
1884 return (int)((int64_t)csB->bytes - (int64_t)csA->bytes);
1885
1886 /* otherwise, return the relationship between user specified orders */
1887 return (csA->usr_order - csB->usr_order);
1888}
1889
1890static int mem_asc_cmp(const void *a, const void *b)
1891{
1892 const struct coal_sort_s *csA = (const struct coal_sort_s *)a;
1893 const struct coal_sort_s *csB = (const struct coal_sort_s *)b;
1894
1895 return (int)((int64_t)csA->bytes - (int64_t)csB->bytes);
1896}
1897
1898static int mem_dec_cmp(const void *a, const void *b)
1899{
1900 const struct coal_sort_s *csA = (const struct coal_sort_s *)a;
1901 const struct coal_sort_s *csB = (const struct coal_sort_s *)b;
1902
1903 return (int)((int64_t)csB->bytes - (int64_t)csA->bytes);
1904}
1905
1906static int usr_asc_cmp(const void *a, const void *b)
1907{
1908 const struct coal_sort_s *csA = (const struct coal_sort_s *)a;
1909 const struct coal_sort_s *csB = (const struct coal_sort_s *)b;
1910
1911 return (csA->usr_order - csB->usr_order);
1912}
1913
1914static int usr_dec_cmp(const void *a, const void *b)
1915{
1916 const struct coal_sort_s *csA = (const struct coal_sort_s *)a;
1917 const struct coal_sort_s *csB = (const struct coal_sort_s *)b;
1918
1919 return (csB->usr_order - csA->usr_order);
1920}
1921
1922/* avoid dynamic allocation in this path */
1923#define MAX_SORTED_PIDS 80
1924
1925static int coalition_get_sort_list(coalition_t coal, int sort_order, queue_t list,
1926 struct coal_sort_s *sort_array, int array_sz)
1927{
1928 int ntasks = 0;
1929 task_t task;
1930
1931 assert(sort_array != NULL);
1932
1933 if (array_sz <= 0)
1934 return 0;
1935
1936 if (!list) {
1937 /*
1938 * this function will only be called with a NULL
1939 * list for JETSAM-type coalitions, and is intended
1940 * to investigate the leader process
1941 */
1942 if (coal->type != COALITION_TYPE_JETSAM ||
1943 coal->j.leader == TASK_NULL)
1944 return 0;
1945 sort_array[0].pid = task_pid(coal->j.leader);
1946 switch (sort_order) {
1947 case COALITION_SORT_DEFAULT:
1948 sort_array[0].usr_order = 0;
1949 /* fall-through */
1950 case COALITION_SORT_MEM_ASC:
1951 case COALITION_SORT_MEM_DEC:
1952 sort_array[0].bytes = get_task_phys_footprint(coal->j.leader);
1953 break;
1954 case COALITION_SORT_USER_ASC:
1955 case COALITION_SORT_USER_DEC:
1956 sort_array[0].usr_order = 0;
1957 break;
1958 default:
1959 break;
1960 }
1961 return 1;
1962 }
1963
1964 qe_foreach_element(task, list, task_coalition[coal->type]) {
1965 if (ntasks >= array_sz) {
1966 printf("WARNING: more than %d pids in coalition %llu\n",
1967 MAX_SORTED_PIDS, coal->id);
1968 break;
1969 }
1970
1971 sort_array[ntasks].pid = task_pid(task);
1972
1973 switch (sort_order) {
1974 case COALITION_SORT_DEFAULT:
1975 sort_array[ntasks].usr_order = 0;
1976 /* fall-through */
1977 case COALITION_SORT_MEM_ASC:
1978 case COALITION_SORT_MEM_DEC:
1979 sort_array[ntasks].bytes = get_task_phys_footprint(task);
1980 break;
1981 case COALITION_SORT_USER_ASC:
1982 case COALITION_SORT_USER_DEC:
1983 sort_array[ntasks].usr_order = 0;
1984 break;
1985 default:
1986 break;
1987 }
1988
1989 ntasks++;
1990 }
1991
1992 return ntasks;
1993}
1994
1995int coalition_get_pid_list(coalition_t coal, uint32_t rolemask, int sort_order,
1996 int *pid_list, int list_sz)
1997{
1998 struct i_jetsam_coalition *cj;
1999 int ntasks = 0;
2000 cmpfunc_t cmp_func = NULL;
2001 struct coal_sort_s sort_array[MAX_SORTED_PIDS] = { {0,0,0} }; /* keep to < 2k */
2002
2003 if (!coal ||
2004 !(rolemask & COALITION_ROLEMASK_ALLROLES) ||
2005 !pid_list || list_sz < 1) {
2006 coal_dbg("Invalid parameters: coal:%p, type:%d, rolemask:0x%x, "
2007 "pid_list:%p, list_sz:%d", coal, coal ? coal->type : -1,
2008 rolemask, pid_list, list_sz);
2009 return -EINVAL;
2010 }
2011
2012 switch (sort_order) {
2013 case COALITION_SORT_NOSORT:
2014 cmp_func = NULL;
2015 break;
2016 case COALITION_SORT_DEFAULT:
2017 cmp_func = dflt_cmp;
2018 break;
2019 case COALITION_SORT_MEM_ASC:
2020 cmp_func = mem_asc_cmp;
2021 break;
2022 case COALITION_SORT_MEM_DEC:
2023 cmp_func = mem_dec_cmp;
2024 break;
2025 case COALITION_SORT_USER_ASC:
2026 cmp_func = usr_asc_cmp;
2027 break;
2028 case COALITION_SORT_USER_DEC:
2029 cmp_func = usr_dec_cmp;
2030 break;
2031 default:
2032 return -ENOTSUP;
2033 }
2034
2035 coalition_lock(coal);
2036
2037 if (coal->type == COALITION_TYPE_RESOURCE) {
2038 ntasks += coalition_get_sort_list(coal, sort_order, &coal->r.tasks,
2039 sort_array, MAX_SORTED_PIDS);
2040 goto unlock_coal;
2041 }
2042
2043 cj = &coal->j;
2044
2045 if (rolemask & COALITION_ROLEMASK_UNDEF)
2046 ntasks += coalition_get_sort_list(coal, sort_order, &cj->other,
2047 sort_array + ntasks,
2048 MAX_SORTED_PIDS - ntasks);
2049
2050 if (rolemask & COALITION_ROLEMASK_XPC)
2051 ntasks += coalition_get_sort_list(coal, sort_order, &cj->services,
2052 sort_array + ntasks,
2053 MAX_SORTED_PIDS - ntasks);
2054
2055 if (rolemask & COALITION_ROLEMASK_EXT)
2056 ntasks += coalition_get_sort_list(coal, sort_order, &cj->extensions,
2057 sort_array + ntasks,
2058 MAX_SORTED_PIDS - ntasks);
2059
2060 if (rolemask & COALITION_ROLEMASK_LEADER)
2061 ntasks += coalition_get_sort_list(coal, sort_order, NULL,
2062 sort_array + ntasks,
2063 MAX_SORTED_PIDS - ntasks);
2064
2065unlock_coal:
2066 coalition_unlock(coal);
2067
2068 /* sort based on the chosen criterion (no sense sorting 1 item) */
2069 if (cmp_func && ntasks > 1)
2070 qsort(sort_array, ntasks, sizeof(struct coal_sort_s), cmp_func);
2071
2072 for (int i = 0; i < ntasks; i++) {
2073 if (i >= list_sz)
2074 break;
2075 coal_dbg(" [%d] PID:%d, footprint:%lld, usr_order:%d",
2076 i, sort_array[i].pid, sort_array[i].bytes,
2077 sort_array[i].usr_order);
2078 pid_list[i] = sort_array[i].pid;
2079 }
2080
2081 return ntasks;
2082}
2083