1 | /* |
2 | * Copyright (c) 2000-2016 Apple Computer, Inc. All rights reserved. |
3 | * |
4 | * @APPLE_OSREFERENCE_LICENSE_HEADER_START@ |
5 | * |
6 | * This file contains Original Code and/or Modifications of Original Code |
7 | * as defined in and that are subject to the Apple Public Source License |
8 | * Version 2.0 (the 'License'). You may not use this file except in |
9 | * compliance with the License. The rights granted to you under the License |
10 | * may not be used to create, or enable the creation or redistribution of, |
11 | * unlawful or unlicensed copies of an Apple operating system, or to |
12 | * circumvent, violate, or enable the circumvention or violation of, any |
13 | * terms of an Apple operating system software license agreement. |
14 | * |
15 | * Please obtain a copy of the License at |
16 | * http://www.opensource.apple.com/apsl/ and read it before using this file. |
17 | * |
18 | * The Original Code and all software distributed under the License are |
19 | * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER |
20 | * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES, |
21 | * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY, |
22 | * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT. |
23 | * Please see the License for the specific language governing rights and |
24 | * limitations under the License. |
25 | * |
26 | * @APPLE_OSREFERENCE_LICENSE_HEADER_END@ |
27 | */ |
28 | |
29 | #include <kern/policy_internal.h> |
30 | #include <mach/task_policy.h> |
31 | |
32 | #include <mach/mach_types.h> |
33 | #include <mach/task_server.h> |
34 | |
35 | #include <kern/host.h> /* host_priv_self() */ |
36 | #include <mach/host_priv.h> /* host_get_special_port() */ |
37 | #include <mach/host_special_ports.h> /* RESOURCE_NOTIFY_PORT */ |
38 | #include <kern/sched.h> |
39 | #include <kern/task.h> |
40 | #include <mach/thread_policy.h> |
41 | #include <sys/errno.h> |
42 | #include <sys/resource.h> |
43 | #include <machine/limits.h> |
44 | #include <kern/ledger.h> |
45 | #include <kern/thread_call.h> |
46 | #include <kern/sfi.h> |
47 | #include <kern/coalition.h> |
48 | #if CONFIG_TELEMETRY |
49 | #include <kern/telemetry.h> |
50 | #endif |
51 | #if CONFIG_EMBEDDED |
52 | #include <kern/kalloc.h> |
53 | #include <sys/errno.h> |
54 | #endif /* CONFIG_EMBEDDED */ |
55 | |
56 | #if IMPORTANCE_INHERITANCE |
57 | #include <ipc/ipc_importance.h> |
58 | #if IMPORTANCE_TRACE |
59 | #include <mach/machine/sdt.h> |
60 | #endif /* IMPORTANCE_TRACE */ |
61 | #endif /* IMPORTANCE_INHERITACE */ |
62 | |
63 | #include <sys/kdebug.h> |
64 | |
65 | /* |
66 | * Task Policy |
67 | * |
68 | * This subsystem manages task and thread IO priority and backgrounding, |
69 | * as well as importance inheritance, process suppression, task QoS, and apptype. |
70 | * These properties have a suprising number of complex interactions, so they are |
71 | * centralized here in one state machine to simplify the implementation of those interactions. |
72 | * |
73 | * Architecture: |
74 | * Threads and tasks have two policy fields: requested, effective. |
75 | * Requested represents the wishes of each interface that influences task policy. |
76 | * Effective represents the distillation of that policy into a set of behaviors. |
77 | * |
78 | * Each thread making a modification in the policy system passes a 'pending' struct, |
79 | * which tracks updates that will be applied after dropping the policy engine lock. |
80 | * |
81 | * Each interface that has an input into the task policy state machine controls a field in requested. |
82 | * If the interface has a getter, it returns what is in the field in requested, but that is |
83 | * not necessarily what is actually in effect. |
84 | * |
85 | * All kernel subsystems that behave differently based on task policy call into |
86 | * the proc_get_effective_(task|thread)_policy functions, which return the decision of the task policy state machine |
87 | * for that subsystem by querying only the 'effective' field. |
88 | * |
89 | * Policy change operations: |
90 | * Here are the steps to change a policy on a task or thread: |
91 | * 1) Lock task |
92 | * 2) Change requested field for the relevant policy |
93 | * 3) Run a task policy update, which recalculates effective based on requested, |
94 | * then takes a diff between the old and new versions of requested and calls the relevant |
95 | * other subsystems to apply these changes, and updates the pending field. |
96 | * 4) Unlock task |
97 | * 5) Run task policy update complete, which looks at the pending field to update |
98 | * subsystems which cannot be touched while holding the task lock. |
99 | * |
100 | * To add a new requested policy, add the field in the requested struct, the flavor in task.h, |
101 | * the setter and getter in proc_(set|get)_task_policy*, |
102 | * then set up the effects of that behavior in task_policy_update*. If the policy manifests |
103 | * itself as a distinct effective policy, add it to the effective struct and add it to the |
104 | * proc_get_effective_task_policy accessor. |
105 | * |
106 | * Most policies are set via proc_set_task_policy, but policies that don't fit that interface |
107 | * roll their own lock/set/update/unlock/complete code inside this file. |
108 | * |
109 | * |
110 | * Suppression policy |
111 | * |
112 | * These are a set of behaviors that can be requested for a task. They currently have specific |
113 | * implied actions when they're enabled, but they may be made customizable in the future. |
114 | * |
115 | * When the affected task is boosted, we temporarily disable the suppression behaviors |
116 | * so that the affected process has a chance to run so it can call the API to permanently |
117 | * disable the suppression behaviors. |
118 | * |
119 | * Locking |
120 | * |
121 | * Changing task policy on a task takes the task lock. |
122 | * Changing task policy on a thread takes the thread mutex. |
123 | * Task policy changes that affect threads will take each thread's mutex to update it if necessary. |
124 | * |
125 | * Querying the effective policy does not take a lock, because callers |
126 | * may run in interrupt context or other place where locks are not OK. |
127 | * |
128 | * This means that any notification of state change needs to be externally synchronized. |
129 | * We do this by idempotent callouts after the state has changed to ask |
130 | * other subsystems to update their view of the world. |
131 | * |
132 | * TODO: Move all cpu/wakes/io monitor code into a separate file |
133 | * TODO: Move all importance code over to importance subsystem |
134 | * TODO: Move all taskwatch code into a separate file |
135 | * TODO: Move all VM importance code into a separate file |
136 | */ |
137 | |
138 | /* Task policy related helper functions */ |
139 | static void proc_set_task_policy_locked(task_t task, int category, int flavor, int value, int value2); |
140 | |
141 | static void task_policy_update_locked(task_t task, task_pend_token_t pend_token); |
142 | static void task_policy_update_internal_locked(task_t task, boolean_t in_create, task_pend_token_t pend_token); |
143 | |
144 | /* For attributes that have two scalars as input/output */ |
145 | static void proc_set_task_policy2(task_t task, int category, int flavor, int value1, int value2); |
146 | static void proc_get_task_policy2(task_t task, int category, int flavor, int *value1, int *value2); |
147 | |
148 | static boolean_t task_policy_update_coalition_focal_tasks(task_t task, int prev_role, int next_role, task_pend_token_t pend_token); |
149 | |
150 | static uint64_t task_requested_bitfield(task_t task); |
151 | static uint64_t task_effective_bitfield(task_t task); |
152 | |
153 | /* Convenience functions for munging a policy bitfield into a tracepoint */ |
154 | static uintptr_t trequested_0(task_t task); |
155 | static uintptr_t trequested_1(task_t task); |
156 | static uintptr_t teffective_0(task_t task); |
157 | static uintptr_t teffective_1(task_t task); |
158 | |
159 | /* CPU limits helper functions */ |
160 | static int task_set_cpuusage(task_t task, uint8_t percentage, uint64_t interval, uint64_t deadline, int scope, int entitled); |
161 | static int task_get_cpuusage(task_t task, uint8_t *percentagep, uint64_t *intervalp, uint64_t *deadlinep, int *scope); |
162 | static int task_enable_cpumon_locked(task_t task); |
163 | static int task_disable_cpumon(task_t task); |
164 | static int task_clear_cpuusage_locked(task_t task, int cpumon_entitled); |
165 | static int task_apply_resource_actions(task_t task, int type); |
166 | static void task_action_cpuusage(thread_call_param_t param0, thread_call_param_t param1); |
167 | |
168 | #ifdef MACH_BSD |
169 | typedef struct proc * proc_t; |
170 | int proc_pid(void *proc); |
171 | extern int proc_selfpid(void); |
172 | extern char * proc_name_address(void *p); |
173 | extern char * proc_best_name(proc_t proc); |
174 | |
175 | extern int proc_pidpathinfo_internal(proc_t p, uint64_t arg, |
176 | char *buffer, uint32_t buffersize, |
177 | int32_t *retval); |
178 | #endif /* MACH_BSD */ |
179 | |
180 | |
181 | #if CONFIG_EMBEDDED |
182 | /* TODO: make CONFIG_TASKWATCH */ |
183 | /* Taskwatch related helper functions */ |
184 | static void set_thread_appbg(thread_t thread, int setbg,int importance); |
185 | static void add_taskwatch_locked(task_t task, task_watch_t * twp); |
186 | static void remove_taskwatch_locked(task_t task, task_watch_t * twp); |
187 | static void task_watch_lock(void); |
188 | static void task_watch_unlock(void); |
189 | static void apply_appstate_watchers(task_t task); |
190 | |
191 | typedef struct task_watcher { |
192 | queue_chain_t tw_links; /* queueing of threads */ |
193 | task_t tw_task; /* task that is being watched */ |
194 | thread_t tw_thread; /* thread that is watching the watch_task */ |
195 | int tw_state; /* the current app state of the thread */ |
196 | int tw_importance; /* importance prior to backgrounding */ |
197 | } task_watch_t; |
198 | |
199 | typedef struct thread_watchlist { |
200 | thread_t thread; /* thread being worked on for taskwatch action */ |
201 | int importance; /* importance to be restored if thread is being made active */ |
202 | } thread_watchlist_t; |
203 | |
204 | #endif /* CONFIG_EMBEDDED */ |
205 | |
206 | extern int memorystatus_update_priority_for_appnap(proc_t p, boolean_t is_appnap); |
207 | |
208 | /* Importance Inheritance related helper functions */ |
209 | |
210 | #if IMPORTANCE_INHERITANCE |
211 | |
212 | static void task_importance_mark_live_donor(task_t task, boolean_t donating); |
213 | static void task_importance_mark_receiver(task_t task, boolean_t receiving); |
214 | static void task_importance_mark_denap_receiver(task_t task, boolean_t denap); |
215 | |
216 | static boolean_t task_is_marked_live_importance_donor(task_t task); |
217 | static boolean_t task_is_importance_receiver(task_t task); |
218 | static boolean_t task_is_importance_denap_receiver(task_t task); |
219 | |
220 | static int task_importance_hold_internal_assertion(task_t target_task, uint32_t count); |
221 | |
222 | static void task_add_importance_watchport(task_t task, mach_port_t port, int *boostp); |
223 | static void task_importance_update_live_donor(task_t target_task); |
224 | |
225 | static void task_set_boost_locked(task_t task, boolean_t boost_active); |
226 | |
227 | #endif /* IMPORTANCE_INHERITANCE */ |
228 | |
229 | #if IMPORTANCE_TRACE |
230 | #define __imptrace_only |
231 | #else /* IMPORTANCE_TRACE */ |
232 | #define __imptrace_only __unused |
233 | #endif /* !IMPORTANCE_TRACE */ |
234 | |
235 | #if IMPORTANCE_INHERITANCE |
236 | #define __imp_only |
237 | #else |
238 | #define __imp_only __unused |
239 | #endif |
240 | |
241 | /* |
242 | * Default parameters for certain policies |
243 | */ |
244 | |
245 | int proc_standard_daemon_tier = THROTTLE_LEVEL_TIER1; |
246 | int proc_suppressed_disk_tier = THROTTLE_LEVEL_TIER1; |
247 | int proc_tal_disk_tier = THROTTLE_LEVEL_TIER1; |
248 | |
249 | int proc_graphics_timer_qos = (LATENCY_QOS_TIER_0 & 0xFF); |
250 | |
251 | const int proc_default_bg_iotier = THROTTLE_LEVEL_TIER2; |
252 | |
253 | /* Latency/throughput QoS fields remain zeroed, i.e. TIER_UNSPECIFIED at creation */ |
254 | const struct task_requested_policy default_task_requested_policy = { |
255 | .trp_bg_iotier = proc_default_bg_iotier |
256 | }; |
257 | const struct task_effective_policy default_task_effective_policy = {}; |
258 | |
259 | /* |
260 | * Default parameters for CPU usage monitor. |
261 | * |
262 | * Default setting is 50% over 3 minutes. |
263 | */ |
264 | #define DEFAULT_CPUMON_PERCENTAGE 50 |
265 | #define DEFAULT_CPUMON_INTERVAL (3 * 60) |
266 | |
267 | uint8_t proc_max_cpumon_percentage; |
268 | uint64_t proc_max_cpumon_interval; |
269 | |
270 | |
271 | kern_return_t |
272 | qos_latency_policy_validate(task_latency_qos_t ltier) { |
273 | if ((ltier != LATENCY_QOS_TIER_UNSPECIFIED) && |
274 | ((ltier > LATENCY_QOS_TIER_5) || (ltier < LATENCY_QOS_TIER_0))) |
275 | return KERN_INVALID_ARGUMENT; |
276 | |
277 | return KERN_SUCCESS; |
278 | } |
279 | |
280 | kern_return_t |
281 | qos_throughput_policy_validate(task_throughput_qos_t ttier) { |
282 | if ((ttier != THROUGHPUT_QOS_TIER_UNSPECIFIED) && |
283 | ((ttier > THROUGHPUT_QOS_TIER_5) || (ttier < THROUGHPUT_QOS_TIER_0))) |
284 | return KERN_INVALID_ARGUMENT; |
285 | |
286 | return KERN_SUCCESS; |
287 | } |
288 | |
289 | static kern_return_t |
290 | task_qos_policy_validate(task_qos_policy_t qosinfo, mach_msg_type_number_t count) { |
291 | if (count < TASK_QOS_POLICY_COUNT) |
292 | return KERN_INVALID_ARGUMENT; |
293 | |
294 | task_latency_qos_t ltier = qosinfo->task_latency_qos_tier; |
295 | task_throughput_qos_t ttier = qosinfo->task_throughput_qos_tier; |
296 | |
297 | kern_return_t kr = qos_latency_policy_validate(ltier); |
298 | |
299 | if (kr != KERN_SUCCESS) |
300 | return kr; |
301 | |
302 | kr = qos_throughput_policy_validate(ttier); |
303 | |
304 | return kr; |
305 | } |
306 | |
307 | uint32_t |
308 | (uint32_t qv) { |
309 | return (qv & 0xFF); |
310 | } |
311 | |
312 | uint32_t |
313 | qos_latency_policy_package(uint32_t qv) { |
314 | return (qv == LATENCY_QOS_TIER_UNSPECIFIED) ? LATENCY_QOS_TIER_UNSPECIFIED : ((0xFF << 16) | qv); |
315 | } |
316 | |
317 | uint32_t |
318 | qos_throughput_policy_package(uint32_t qv) { |
319 | return (qv == THROUGHPUT_QOS_TIER_UNSPECIFIED) ? THROUGHPUT_QOS_TIER_UNSPECIFIED : ((0xFE << 16) | qv); |
320 | } |
321 | |
322 | #define TASK_POLICY_SUPPRESSION_DISABLE 0x1 |
323 | #define TASK_POLICY_SUPPRESSION_IOTIER2 0x2 |
324 | #define TASK_POLICY_SUPPRESSION_NONDONOR 0x4 |
325 | /* TEMPORARY boot-arg controlling task_policy suppression (App Nap) */ |
326 | static boolean_t task_policy_suppression_flags = TASK_POLICY_SUPPRESSION_IOTIER2 | |
327 | TASK_POLICY_SUPPRESSION_NONDONOR; |
328 | |
329 | kern_return_t |
330 | task_policy_set( |
331 | task_t task, |
332 | task_policy_flavor_t flavor, |
333 | task_policy_t policy_info, |
334 | mach_msg_type_number_t count) |
335 | { |
336 | kern_return_t result = KERN_SUCCESS; |
337 | |
338 | if (task == TASK_NULL || task == kernel_task) |
339 | return (KERN_INVALID_ARGUMENT); |
340 | |
341 | switch (flavor) { |
342 | |
343 | case TASK_CATEGORY_POLICY: { |
344 | task_category_policy_t info = (task_category_policy_t)policy_info; |
345 | |
346 | if (count < TASK_CATEGORY_POLICY_COUNT) |
347 | return (KERN_INVALID_ARGUMENT); |
348 | |
349 | #if CONFIG_EMBEDDED |
350 | /* On embedded, you can't modify your own role. */ |
351 | if (current_task() == task) |
352 | return (KERN_INVALID_ARGUMENT); |
353 | #endif |
354 | |
355 | switch(info->role) { |
356 | case TASK_FOREGROUND_APPLICATION: |
357 | case TASK_BACKGROUND_APPLICATION: |
358 | case TASK_DEFAULT_APPLICATION: |
359 | proc_set_task_policy(task, |
360 | TASK_POLICY_ATTRIBUTE, TASK_POLICY_ROLE, |
361 | info->role); |
362 | break; |
363 | |
364 | case TASK_CONTROL_APPLICATION: |
365 | if (task != current_task() || task->sec_token.val[0] != 0) |
366 | result = KERN_INVALID_ARGUMENT; |
367 | else |
368 | proc_set_task_policy(task, |
369 | TASK_POLICY_ATTRIBUTE, TASK_POLICY_ROLE, |
370 | info->role); |
371 | break; |
372 | |
373 | case TASK_GRAPHICS_SERVER: |
374 | /* TODO: Restrict this role to FCFS <rdar://problem/12552788> */ |
375 | if (task != current_task() || task->sec_token.val[0] != 0) |
376 | result = KERN_INVALID_ARGUMENT; |
377 | else |
378 | proc_set_task_policy(task, |
379 | TASK_POLICY_ATTRIBUTE, TASK_POLICY_ROLE, |
380 | info->role); |
381 | break; |
382 | default: |
383 | result = KERN_INVALID_ARGUMENT; |
384 | break; |
385 | } /* switch (info->role) */ |
386 | |
387 | break; |
388 | } |
389 | |
390 | /* Desired energy-efficiency/performance "quality-of-service" */ |
391 | case TASK_BASE_QOS_POLICY: |
392 | case TASK_OVERRIDE_QOS_POLICY: |
393 | { |
394 | task_qos_policy_t qosinfo = (task_qos_policy_t)policy_info; |
395 | kern_return_t kr = task_qos_policy_validate(qosinfo, count); |
396 | |
397 | if (kr != KERN_SUCCESS) |
398 | return kr; |
399 | |
400 | |
401 | uint32_t lqos = qos_extract(qosinfo->task_latency_qos_tier); |
402 | uint32_t tqos = qos_extract(qosinfo->task_throughput_qos_tier); |
403 | |
404 | proc_set_task_policy2(task, TASK_POLICY_ATTRIBUTE, |
405 | flavor == TASK_BASE_QOS_POLICY ? TASK_POLICY_BASE_LATENCY_AND_THROUGHPUT_QOS : TASK_POLICY_OVERRIDE_LATENCY_AND_THROUGHPUT_QOS, |
406 | lqos, tqos); |
407 | } |
408 | break; |
409 | |
410 | case TASK_BASE_LATENCY_QOS_POLICY: |
411 | { |
412 | task_qos_policy_t qosinfo = (task_qos_policy_t)policy_info; |
413 | kern_return_t kr = task_qos_policy_validate(qosinfo, count); |
414 | |
415 | if (kr != KERN_SUCCESS) |
416 | return kr; |
417 | |
418 | uint32_t lqos = qos_extract(qosinfo->task_latency_qos_tier); |
419 | |
420 | proc_set_task_policy(task, TASK_POLICY_ATTRIBUTE, TASK_BASE_LATENCY_QOS_POLICY, lqos); |
421 | } |
422 | break; |
423 | |
424 | case TASK_BASE_THROUGHPUT_QOS_POLICY: |
425 | { |
426 | task_qos_policy_t qosinfo = (task_qos_policy_t)policy_info; |
427 | kern_return_t kr = task_qos_policy_validate(qosinfo, count); |
428 | |
429 | if (kr != KERN_SUCCESS) |
430 | return kr; |
431 | |
432 | uint32_t tqos = qos_extract(qosinfo->task_throughput_qos_tier); |
433 | |
434 | proc_set_task_policy(task, TASK_POLICY_ATTRIBUTE, TASK_BASE_THROUGHPUT_QOS_POLICY, tqos); |
435 | } |
436 | break; |
437 | |
438 | case TASK_SUPPRESSION_POLICY: |
439 | { |
440 | #if CONFIG_EMBEDDED |
441 | /* |
442 | * Suppression policy is not enabled for embedded |
443 | * because apps aren't marked as denap receivers |
444 | */ |
445 | result = KERN_INVALID_ARGUMENT; |
446 | break; |
447 | #else /* CONFIG_EMBEDDED */ |
448 | |
449 | task_suppression_policy_t info = (task_suppression_policy_t)policy_info; |
450 | |
451 | if (count < TASK_SUPPRESSION_POLICY_COUNT) |
452 | return (KERN_INVALID_ARGUMENT); |
453 | |
454 | struct task_qos_policy qosinfo; |
455 | |
456 | qosinfo.task_latency_qos_tier = info->timer_throttle; |
457 | qosinfo.task_throughput_qos_tier = info->throughput_qos; |
458 | |
459 | kern_return_t kr = task_qos_policy_validate(&qosinfo, TASK_QOS_POLICY_COUNT); |
460 | |
461 | if (kr != KERN_SUCCESS) |
462 | return kr; |
463 | |
464 | /* TEMPORARY disablement of task suppression */ |
465 | if (info->active && |
466 | (task_policy_suppression_flags & TASK_POLICY_SUPPRESSION_DISABLE)) |
467 | return KERN_SUCCESS; |
468 | |
469 | struct task_pend_token pend_token = {}; |
470 | |
471 | task_lock(task); |
472 | |
473 | KERNEL_DEBUG_CONSTANT_IST(KDEBUG_TRACE, |
474 | (IMPORTANCE_CODE(IMP_TASK_SUPPRESSION, info->active)) | DBG_FUNC_START, |
475 | proc_selfpid(), task_pid(task), trequested_0(task), |
476 | trequested_1(task), 0); |
477 | |
478 | task->requested_policy.trp_sup_active = (info->active) ? 1 : 0; |
479 | task->requested_policy.trp_sup_lowpri_cpu = (info->lowpri_cpu) ? 1 : 0; |
480 | task->requested_policy.trp_sup_timer = qos_extract(info->timer_throttle); |
481 | task->requested_policy.trp_sup_disk = (info->disk_throttle) ? 1 : 0; |
482 | task->requested_policy.trp_sup_throughput = qos_extract(info->throughput_qos); |
483 | task->requested_policy.trp_sup_cpu = (info->suppressed_cpu) ? 1 : 0; |
484 | task->requested_policy.trp_sup_bg_sockets = (info->background_sockets) ? 1 : 0; |
485 | |
486 | task_policy_update_locked(task, &pend_token); |
487 | |
488 | KERNEL_DEBUG_CONSTANT_IST(KDEBUG_TRACE, |
489 | (IMPORTANCE_CODE(IMP_TASK_SUPPRESSION, info->active)) | DBG_FUNC_END, |
490 | proc_selfpid(), task_pid(task), trequested_0(task), |
491 | trequested_1(task), 0); |
492 | |
493 | task_unlock(task); |
494 | |
495 | task_policy_update_complete_unlocked(task, &pend_token); |
496 | |
497 | break; |
498 | |
499 | #endif /* CONFIG_EMBEDDED */ |
500 | } |
501 | |
502 | default: |
503 | result = KERN_INVALID_ARGUMENT; |
504 | break; |
505 | } |
506 | |
507 | return (result); |
508 | } |
509 | |
510 | /* Sets BSD 'nice' value on the task */ |
511 | kern_return_t |
512 | task_importance( |
513 | task_t task, |
514 | integer_t importance) |
515 | { |
516 | if (task == TASK_NULL || task == kernel_task) |
517 | return (KERN_INVALID_ARGUMENT); |
518 | |
519 | task_lock(task); |
520 | |
521 | if (!task->active) { |
522 | task_unlock(task); |
523 | |
524 | return (KERN_TERMINATED); |
525 | } |
526 | |
527 | if (proc_get_effective_task_policy(task, TASK_POLICY_ROLE) >= TASK_CONTROL_APPLICATION) { |
528 | task_unlock(task); |
529 | |
530 | return (KERN_INVALID_ARGUMENT); |
531 | } |
532 | |
533 | task->importance = importance; |
534 | |
535 | struct task_pend_token pend_token = {}; |
536 | |
537 | task_policy_update_locked(task, &pend_token); |
538 | |
539 | task_unlock(task); |
540 | |
541 | task_policy_update_complete_unlocked(task, &pend_token); |
542 | |
543 | return (KERN_SUCCESS); |
544 | } |
545 | |
546 | kern_return_t |
547 | task_policy_get( |
548 | task_t task, |
549 | task_policy_flavor_t flavor, |
550 | task_policy_t policy_info, |
551 | mach_msg_type_number_t *count, |
552 | boolean_t *get_default) |
553 | { |
554 | if (task == TASK_NULL || task == kernel_task) |
555 | return (KERN_INVALID_ARGUMENT); |
556 | |
557 | switch (flavor) { |
558 | |
559 | case TASK_CATEGORY_POLICY: |
560 | { |
561 | task_category_policy_t info = (task_category_policy_t)policy_info; |
562 | |
563 | if (*count < TASK_CATEGORY_POLICY_COUNT) |
564 | return (KERN_INVALID_ARGUMENT); |
565 | |
566 | if (*get_default) |
567 | info->role = TASK_UNSPECIFIED; |
568 | else |
569 | info->role = proc_get_task_policy(task, TASK_POLICY_ATTRIBUTE, TASK_POLICY_ROLE); |
570 | break; |
571 | } |
572 | |
573 | case TASK_BASE_QOS_POLICY: /* FALLTHRU */ |
574 | case TASK_OVERRIDE_QOS_POLICY: |
575 | { |
576 | task_qos_policy_t info = (task_qos_policy_t)policy_info; |
577 | |
578 | if (*count < TASK_QOS_POLICY_COUNT) |
579 | return (KERN_INVALID_ARGUMENT); |
580 | |
581 | if (*get_default) { |
582 | info->task_latency_qos_tier = LATENCY_QOS_TIER_UNSPECIFIED; |
583 | info->task_throughput_qos_tier = THROUGHPUT_QOS_TIER_UNSPECIFIED; |
584 | } else if (flavor == TASK_BASE_QOS_POLICY) { |
585 | int value1, value2; |
586 | |
587 | proc_get_task_policy2(task, TASK_POLICY_ATTRIBUTE, TASK_POLICY_BASE_LATENCY_AND_THROUGHPUT_QOS, &value1, &value2); |
588 | |
589 | info->task_latency_qos_tier = qos_latency_policy_package(value1); |
590 | info->task_throughput_qos_tier = qos_throughput_policy_package(value2); |
591 | |
592 | } else if (flavor == TASK_OVERRIDE_QOS_POLICY) { |
593 | int value1, value2; |
594 | |
595 | proc_get_task_policy2(task, TASK_POLICY_ATTRIBUTE, TASK_POLICY_OVERRIDE_LATENCY_AND_THROUGHPUT_QOS, &value1, &value2); |
596 | |
597 | info->task_latency_qos_tier = qos_latency_policy_package(value1); |
598 | info->task_throughput_qos_tier = qos_throughput_policy_package(value2); |
599 | } |
600 | |
601 | break; |
602 | } |
603 | |
604 | case TASK_POLICY_STATE: |
605 | { |
606 | task_policy_state_t info = (task_policy_state_t)policy_info; |
607 | |
608 | if (*count < TASK_POLICY_STATE_COUNT) |
609 | return (KERN_INVALID_ARGUMENT); |
610 | |
611 | /* Only root can get this info */ |
612 | if (current_task()->sec_token.val[0] != 0) |
613 | return KERN_PROTECTION_FAILURE; |
614 | |
615 | if (*get_default) { |
616 | info->requested = 0; |
617 | info->effective = 0; |
618 | info->pending = 0; |
619 | info->imp_assertcnt = 0; |
620 | info->imp_externcnt = 0; |
621 | info->flags = 0; |
622 | info->imp_transitions = 0; |
623 | } else { |
624 | task_lock(task); |
625 | |
626 | info->requested = task_requested_bitfield(task); |
627 | info->effective = task_effective_bitfield(task); |
628 | info->pending = 0; |
629 | |
630 | info->tps_requested_policy = *(uint64_t*)(&task->requested_policy); |
631 | info->tps_effective_policy = *(uint64_t*)(&task->effective_policy); |
632 | |
633 | info->flags = 0; |
634 | if (task->task_imp_base != NULL) { |
635 | info->imp_assertcnt = task->task_imp_base->iit_assertcnt; |
636 | info->imp_externcnt = IIT_EXTERN(task->task_imp_base); |
637 | info->flags |= (task_is_marked_importance_receiver(task) ? TASK_IMP_RECEIVER : 0); |
638 | info->flags |= (task_is_marked_importance_denap_receiver(task) ? TASK_DENAP_RECEIVER : 0); |
639 | info->flags |= (task_is_marked_importance_donor(task) ? TASK_IMP_DONOR : 0); |
640 | info->flags |= (task_is_marked_live_importance_donor(task) ? TASK_IMP_LIVE_DONOR : 0); |
641 | info->imp_transitions = task->task_imp_base->iit_transitions; |
642 | } else { |
643 | info->imp_assertcnt = 0; |
644 | info->imp_externcnt = 0; |
645 | info->imp_transitions = 0; |
646 | } |
647 | task_unlock(task); |
648 | } |
649 | |
650 | break; |
651 | } |
652 | |
653 | case TASK_SUPPRESSION_POLICY: |
654 | { |
655 | task_suppression_policy_t info = (task_suppression_policy_t)policy_info; |
656 | |
657 | if (*count < TASK_SUPPRESSION_POLICY_COUNT) |
658 | return (KERN_INVALID_ARGUMENT); |
659 | |
660 | task_lock(task); |
661 | |
662 | if (*get_default) { |
663 | info->active = 0; |
664 | info->lowpri_cpu = 0; |
665 | info->timer_throttle = LATENCY_QOS_TIER_UNSPECIFIED; |
666 | info->disk_throttle = 0; |
667 | info->cpu_limit = 0; |
668 | info->suspend = 0; |
669 | info->throughput_qos = 0; |
670 | info->suppressed_cpu = 0; |
671 | } else { |
672 | info->active = task->requested_policy.trp_sup_active; |
673 | info->lowpri_cpu = task->requested_policy.trp_sup_lowpri_cpu; |
674 | info->timer_throttle = qos_latency_policy_package(task->requested_policy.trp_sup_timer); |
675 | info->disk_throttle = task->requested_policy.trp_sup_disk; |
676 | info->cpu_limit = 0; |
677 | info->suspend = 0; |
678 | info->throughput_qos = qos_throughput_policy_package(task->requested_policy.trp_sup_throughput); |
679 | info->suppressed_cpu = task->requested_policy.trp_sup_cpu; |
680 | info->background_sockets = task->requested_policy.trp_sup_bg_sockets; |
681 | } |
682 | |
683 | task_unlock(task); |
684 | break; |
685 | } |
686 | |
687 | default: |
688 | return (KERN_INVALID_ARGUMENT); |
689 | } |
690 | |
691 | return (KERN_SUCCESS); |
692 | } |
693 | |
694 | /* |
695 | * Called at task creation |
696 | * We calculate the correct effective but don't apply it to anything yet. |
697 | * The threads, etc will inherit from the task as they get created. |
698 | */ |
699 | void |
700 | task_policy_create(task_t task, task_t parent_task) |
701 | { |
702 | task->requested_policy.trp_apptype = parent_task->requested_policy.trp_apptype; |
703 | |
704 | task->requested_policy.trp_int_darwinbg = parent_task->requested_policy.trp_int_darwinbg; |
705 | task->requested_policy.trp_ext_darwinbg = parent_task->requested_policy.trp_ext_darwinbg; |
706 | task->requested_policy.trp_int_iotier = parent_task->requested_policy.trp_int_iotier; |
707 | task->requested_policy.trp_ext_iotier = parent_task->requested_policy.trp_ext_iotier; |
708 | task->requested_policy.trp_int_iopassive = parent_task->requested_policy.trp_int_iopassive; |
709 | task->requested_policy.trp_ext_iopassive = parent_task->requested_policy.trp_ext_iopassive; |
710 | task->requested_policy.trp_bg_iotier = parent_task->requested_policy.trp_bg_iotier; |
711 | task->requested_policy.trp_terminated = parent_task->requested_policy.trp_terminated; |
712 | task->requested_policy.trp_qos_clamp = parent_task->requested_policy.trp_qos_clamp; |
713 | |
714 | if (task->requested_policy.trp_apptype == TASK_APPTYPE_DAEMON_ADAPTIVE && !task_is_exec_copy(task)) { |
715 | /* Do not update the apptype for exec copy task */ |
716 | if (parent_task->requested_policy.trp_boosted) { |
717 | task->requested_policy.trp_apptype = TASK_APPTYPE_DAEMON_INTERACTIVE; |
718 | task_importance_mark_donor(task, TRUE); |
719 | } else { |
720 | task->requested_policy.trp_apptype = TASK_APPTYPE_DAEMON_BACKGROUND; |
721 | task_importance_mark_receiver(task, FALSE); |
722 | } |
723 | } |
724 | |
725 | KERNEL_DEBUG_CONSTANT_IST(KDEBUG_TRACE, |
726 | (IMPORTANCE_CODE(IMP_UPDATE, (IMP_UPDATE_TASK_CREATE | TASK_POLICY_TASK))) | DBG_FUNC_START, |
727 | task_pid(task), teffective_0(task), |
728 | teffective_1(task), task->priority, 0); |
729 | |
730 | task_policy_update_internal_locked(task, TRUE, NULL); |
731 | |
732 | KERNEL_DEBUG_CONSTANT_IST(KDEBUG_TRACE, |
733 | (IMPORTANCE_CODE(IMP_UPDATE, (IMP_UPDATE_TASK_CREATE | TASK_POLICY_TASK))) | DBG_FUNC_END, |
734 | task_pid(task), teffective_0(task), |
735 | teffective_1(task), task->priority, 0); |
736 | |
737 | task_importance_update_live_donor(task); |
738 | } |
739 | |
740 | |
741 | static void |
742 | task_policy_update_locked(task_t task, task_pend_token_t pend_token) |
743 | { |
744 | KERNEL_DEBUG_CONSTANT_IST(KDEBUG_TRACE, |
745 | (IMPORTANCE_CODE(IMP_UPDATE, TASK_POLICY_TASK) | DBG_FUNC_START), |
746 | task_pid(task), teffective_0(task), |
747 | teffective_1(task), task->priority, 0); |
748 | |
749 | task_policy_update_internal_locked(task, FALSE, pend_token); |
750 | |
751 | KERNEL_DEBUG_CONSTANT_IST(KDEBUG_TRACE, |
752 | (IMPORTANCE_CODE(IMP_UPDATE, TASK_POLICY_TASK)) | DBG_FUNC_END, |
753 | task_pid(task), teffective_0(task), |
754 | teffective_1(task), task->priority, 0); |
755 | } |
756 | |
757 | /* |
758 | * One state update function TO RULE THEM ALL |
759 | * |
760 | * This function updates the task or thread effective policy fields |
761 | * and pushes the results to the relevant subsystems. |
762 | * |
763 | * Must call update_complete after unlocking the task, |
764 | * as some subsystems cannot be updated while holding the task lock. |
765 | * |
766 | * Called with task locked, not thread |
767 | */ |
768 | |
769 | static void |
770 | task_policy_update_internal_locked(task_t task, boolean_t in_create, task_pend_token_t pend_token) |
771 | { |
772 | /* |
773 | * Step 1: |
774 | * Gather requested policy |
775 | */ |
776 | |
777 | struct task_requested_policy requested = task->requested_policy; |
778 | |
779 | /* |
780 | * Step 2: |
781 | * Calculate new effective policies from requested policy and task state |
782 | * Rules: |
783 | * Don't change requested, it won't take effect |
784 | */ |
785 | |
786 | struct task_effective_policy next = {}; |
787 | |
788 | /* Update task role */ |
789 | next.tep_role = requested.trp_role; |
790 | |
791 | /* Set task qos clamp and ceiling */ |
792 | next.tep_qos_clamp = requested.trp_qos_clamp; |
793 | |
794 | if (requested.trp_apptype == TASK_APPTYPE_APP_DEFAULT || |
795 | requested.trp_apptype == TASK_APPTYPE_APP_TAL) { |
796 | |
797 | switch (next.tep_role) { |
798 | case TASK_FOREGROUND_APPLICATION: |
799 | /* Foreground apps get urgent scheduler priority */ |
800 | next.tep_qos_ui_is_urgent = 1; |
801 | next.tep_qos_ceiling = THREAD_QOS_UNSPECIFIED; |
802 | break; |
803 | |
804 | case TASK_BACKGROUND_APPLICATION: |
805 | /* This is really 'non-focal but on-screen' */ |
806 | next.tep_qos_ceiling = THREAD_QOS_UNSPECIFIED; |
807 | break; |
808 | |
809 | case TASK_DEFAULT_APPLICATION: |
810 | /* This is 'may render UI but we don't know if it's focal/nonfocal' */ |
811 | next.tep_qos_ceiling = THREAD_QOS_UNSPECIFIED; |
812 | break; |
813 | |
814 | case TASK_NONUI_APPLICATION: |
815 | /* i.e. 'off-screen' */ |
816 | next.tep_qos_ceiling = THREAD_QOS_LEGACY; |
817 | break; |
818 | |
819 | case TASK_CONTROL_APPLICATION: |
820 | case TASK_GRAPHICS_SERVER: |
821 | next.tep_qos_ui_is_urgent = 1; |
822 | next.tep_qos_ceiling = THREAD_QOS_UNSPECIFIED; |
823 | break; |
824 | |
825 | case TASK_THROTTLE_APPLICATION: |
826 | /* i.e. 'TAL launch' */ |
827 | next.tep_qos_ceiling = THREAD_QOS_UTILITY; |
828 | break; |
829 | |
830 | case TASK_DARWINBG_APPLICATION: |
831 | /* i.e. 'DARWIN_BG throttled background application' */ |
832 | next.tep_qos_ceiling = THREAD_QOS_BACKGROUND; |
833 | break; |
834 | |
835 | case TASK_UNSPECIFIED: |
836 | default: |
837 | /* Apps that don't have an application role get |
838 | * USER_INTERACTIVE and USER_INITIATED squashed to LEGACY */ |
839 | next.tep_qos_ceiling = THREAD_QOS_LEGACY; |
840 | break; |
841 | } |
842 | } else { |
843 | /* Daemons get USER_INTERACTIVE squashed to USER_INITIATED */ |
844 | next.tep_qos_ceiling = THREAD_QOS_USER_INITIATED; |
845 | } |
846 | |
847 | /* Calculate DARWIN_BG */ |
848 | boolean_t wants_darwinbg = FALSE; |
849 | boolean_t wants_all_sockets_bg = FALSE; /* Do I want my existing sockets to be bg */ |
850 | boolean_t wants_watchersbg = FALSE; /* Do I want my pidbound threads to be bg */ |
851 | |
852 | /* |
853 | * If DARWIN_BG has been requested at either level, it's engaged. |
854 | * Only true DARWIN_BG changes cause watchers to transition. |
855 | * |
856 | * Backgrounding due to apptype does. |
857 | */ |
858 | if (requested.trp_int_darwinbg || requested.trp_ext_darwinbg || |
859 | next.tep_role == TASK_DARWINBG_APPLICATION) |
860 | wants_watchersbg = wants_all_sockets_bg = wants_darwinbg = TRUE; |
861 | |
862 | /* |
863 | * Deprecated TAL implementation for TAL apptype |
864 | * Background TAL apps are throttled when TAL is enabled |
865 | */ |
866 | if (requested.trp_apptype == TASK_APPTYPE_APP_TAL && |
867 | requested.trp_role == TASK_BACKGROUND_APPLICATION && |
868 | requested.trp_tal_enabled == 1) { |
869 | next.tep_tal_engaged = 1; |
870 | } |
871 | |
872 | /* New TAL implementation based on TAL role alone, works for all apps */ |
873 | if ((requested.trp_apptype == TASK_APPTYPE_APP_DEFAULT || |
874 | requested.trp_apptype == TASK_APPTYPE_APP_TAL) && |
875 | requested.trp_role == TASK_THROTTLE_APPLICATION) { |
876 | next.tep_tal_engaged = 1; |
877 | } |
878 | |
879 | /* Adaptive daemons are DARWIN_BG unless boosted, and don't get network throttled. */ |
880 | if (requested.trp_apptype == TASK_APPTYPE_DAEMON_ADAPTIVE && |
881 | requested.trp_boosted == 0) |
882 | wants_darwinbg = TRUE; |
883 | |
884 | /* Background daemons are always DARWIN_BG, no exceptions, and don't get network throttled. */ |
885 | if (requested.trp_apptype == TASK_APPTYPE_DAEMON_BACKGROUND) |
886 | wants_darwinbg = TRUE; |
887 | |
888 | if (next.tep_qos_clamp == THREAD_QOS_BACKGROUND || next.tep_qos_clamp == THREAD_QOS_MAINTENANCE) |
889 | wants_darwinbg = TRUE; |
890 | |
891 | /* Calculate side effects of DARWIN_BG */ |
892 | |
893 | if (wants_darwinbg) { |
894 | next.tep_darwinbg = 1; |
895 | /* darwinbg tasks always create bg sockets, but we don't always loop over all sockets */ |
896 | next.tep_new_sockets_bg = 1; |
897 | next.tep_lowpri_cpu = 1; |
898 | } |
899 | |
900 | if (wants_all_sockets_bg) |
901 | next.tep_all_sockets_bg = 1; |
902 | |
903 | if (wants_watchersbg) |
904 | next.tep_watchers_bg = 1; |
905 | |
906 | /* Calculate low CPU priority */ |
907 | |
908 | boolean_t wants_lowpri_cpu = FALSE; |
909 | |
910 | if (wants_darwinbg) |
911 | wants_lowpri_cpu = TRUE; |
912 | |
913 | if (next.tep_tal_engaged) |
914 | wants_lowpri_cpu = TRUE; |
915 | |
916 | if (requested.trp_sup_lowpri_cpu && requested.trp_boosted == 0) |
917 | wants_lowpri_cpu = TRUE; |
918 | |
919 | if (wants_lowpri_cpu) |
920 | next.tep_lowpri_cpu = 1; |
921 | |
922 | /* Calculate IO policy */ |
923 | |
924 | /* Update BG IO policy (so we can see if it has changed) */ |
925 | next.tep_bg_iotier = requested.trp_bg_iotier; |
926 | |
927 | int iopol = THROTTLE_LEVEL_TIER0; |
928 | |
929 | if (wants_darwinbg) |
930 | iopol = MAX(iopol, requested.trp_bg_iotier); |
931 | |
932 | if (requested.trp_apptype == TASK_APPTYPE_DAEMON_STANDARD) |
933 | iopol = MAX(iopol, proc_standard_daemon_tier); |
934 | |
935 | if (requested.trp_sup_disk && requested.trp_boosted == 0) |
936 | iopol = MAX(iopol, proc_suppressed_disk_tier); |
937 | |
938 | if (next.tep_tal_engaged) |
939 | iopol = MAX(iopol, proc_tal_disk_tier); |
940 | |
941 | if (next.tep_qos_clamp != THREAD_QOS_UNSPECIFIED) |
942 | iopol = MAX(iopol, thread_qos_policy_params.qos_iotier[next.tep_qos_clamp]); |
943 | |
944 | iopol = MAX(iopol, requested.trp_int_iotier); |
945 | iopol = MAX(iopol, requested.trp_ext_iotier); |
946 | |
947 | next.tep_io_tier = iopol; |
948 | |
949 | /* Calculate Passive IO policy */ |
950 | |
951 | if (requested.trp_ext_iopassive || requested.trp_int_iopassive) |
952 | next.tep_io_passive = 1; |
953 | |
954 | /* Calculate suppression-active flag */ |
955 | boolean_t appnap_transition = FALSE; |
956 | |
957 | if (requested.trp_sup_active && requested.trp_boosted == 0) |
958 | next.tep_sup_active = 1; |
959 | |
960 | if (task->effective_policy.tep_sup_active != next.tep_sup_active) |
961 | appnap_transition = TRUE; |
962 | |
963 | /* Calculate timer QOS */ |
964 | int latency_qos = requested.trp_base_latency_qos; |
965 | |
966 | if (requested.trp_sup_timer && requested.trp_boosted == 0) |
967 | latency_qos = requested.trp_sup_timer; |
968 | |
969 | if (next.tep_qos_clamp != THREAD_QOS_UNSPECIFIED) |
970 | latency_qos = MAX(latency_qos, (int)thread_qos_policy_params.qos_latency_qos[next.tep_qos_clamp]); |
971 | |
972 | if (requested.trp_over_latency_qos != 0) |
973 | latency_qos = requested.trp_over_latency_qos; |
974 | |
975 | /* Treat the windowserver special */ |
976 | if (requested.trp_role == TASK_GRAPHICS_SERVER) |
977 | latency_qos = proc_graphics_timer_qos; |
978 | |
979 | next.tep_latency_qos = latency_qos; |
980 | |
981 | /* Calculate throughput QOS */ |
982 | int through_qos = requested.trp_base_through_qos; |
983 | |
984 | if (requested.trp_sup_throughput && requested.trp_boosted == 0) |
985 | through_qos = requested.trp_sup_throughput; |
986 | |
987 | if (next.tep_qos_clamp != THREAD_QOS_UNSPECIFIED) |
988 | through_qos = MAX(through_qos, (int)thread_qos_policy_params.qos_through_qos[next.tep_qos_clamp]); |
989 | |
990 | if (requested.trp_over_through_qos != 0) |
991 | through_qos = requested.trp_over_through_qos; |
992 | |
993 | next.tep_through_qos = through_qos; |
994 | |
995 | /* Calculate suppressed CPU priority */ |
996 | if (requested.trp_sup_cpu && requested.trp_boosted == 0) |
997 | next.tep_suppressed_cpu = 1; |
998 | |
999 | /* |
1000 | * Calculate background sockets |
1001 | * Don't take into account boosting to limit transition frequency. |
1002 | */ |
1003 | if (requested.trp_sup_bg_sockets){ |
1004 | next.tep_all_sockets_bg = 1; |
1005 | next.tep_new_sockets_bg = 1; |
1006 | } |
1007 | |
1008 | /* Apply SFI Managed class bit */ |
1009 | next.tep_sfi_managed = requested.trp_sfi_managed; |
1010 | |
1011 | /* Calculate 'live donor' status for live importance */ |
1012 | switch (requested.trp_apptype) { |
1013 | case TASK_APPTYPE_APP_TAL: |
1014 | case TASK_APPTYPE_APP_DEFAULT: |
1015 | if (requested.trp_ext_darwinbg == 1 || |
1016 | (next.tep_sup_active == 1 && |
1017 | (task_policy_suppression_flags & TASK_POLICY_SUPPRESSION_NONDONOR)) || |
1018 | next.tep_role == TASK_DARWINBG_APPLICATION) { |
1019 | next.tep_live_donor = 0; |
1020 | } else { |
1021 | next.tep_live_donor = 1; |
1022 | } |
1023 | break; |
1024 | |
1025 | case TASK_APPTYPE_DAEMON_INTERACTIVE: |
1026 | case TASK_APPTYPE_DAEMON_STANDARD: |
1027 | case TASK_APPTYPE_DAEMON_ADAPTIVE: |
1028 | case TASK_APPTYPE_DAEMON_BACKGROUND: |
1029 | default: |
1030 | next.tep_live_donor = 0; |
1031 | break; |
1032 | } |
1033 | |
1034 | if (requested.trp_terminated) { |
1035 | /* |
1036 | * Shoot down the throttles that slow down exit or response to SIGTERM |
1037 | * We don't need to shoot down: |
1038 | * passive (don't want to cause others to throttle) |
1039 | * all_sockets_bg (don't need to iterate FDs on every exit) |
1040 | * new_sockets_bg (doesn't matter for exiting process) |
1041 | * pidsuspend (jetsam-ed BG process shouldn't run again) |
1042 | * watchers_bg (watcher threads don't need to be unthrottled) |
1043 | * latency_qos (affects userspace timers only) |
1044 | */ |
1045 | |
1046 | next.tep_terminated = 1; |
1047 | next.tep_darwinbg = 0; |
1048 | next.tep_lowpri_cpu = 0; |
1049 | next.tep_io_tier = THROTTLE_LEVEL_TIER0; |
1050 | next.tep_tal_engaged = 0; |
1051 | next.tep_role = TASK_UNSPECIFIED; |
1052 | next.tep_suppressed_cpu = 0; |
1053 | } |
1054 | |
1055 | /* |
1056 | * Step 3: |
1057 | * Swap out old policy for new policy |
1058 | */ |
1059 | |
1060 | struct task_effective_policy prev = task->effective_policy; |
1061 | |
1062 | /* This is the point where the new values become visible to other threads */ |
1063 | task->effective_policy = next; |
1064 | |
1065 | /* Don't do anything further to a half-formed task */ |
1066 | if (in_create) |
1067 | return; |
1068 | |
1069 | if (task == kernel_task) |
1070 | panic("Attempting to set task policy on kernel_task" ); |
1071 | |
1072 | /* |
1073 | * Step 4: |
1074 | * Pend updates that can't be done while holding the task lock |
1075 | */ |
1076 | |
1077 | if (prev.tep_all_sockets_bg != next.tep_all_sockets_bg) |
1078 | pend_token->tpt_update_sockets = 1; |
1079 | |
1080 | /* Only re-scan the timer list if the qos level is getting less strong */ |
1081 | if (prev.tep_latency_qos > next.tep_latency_qos) |
1082 | pend_token->tpt_update_timers = 1; |
1083 | |
1084 | #if CONFIG_EMBEDDED |
1085 | if (prev.tep_watchers_bg != next.tep_watchers_bg) |
1086 | pend_token->tpt_update_watchers = 1; |
1087 | #endif /* CONFIG_EMBEDDED */ |
1088 | |
1089 | if (prev.tep_live_donor != next.tep_live_donor) |
1090 | pend_token->tpt_update_live_donor = 1; |
1091 | |
1092 | /* |
1093 | * Step 5: |
1094 | * Update other subsystems as necessary if something has changed |
1095 | */ |
1096 | |
1097 | boolean_t update_threads = FALSE, update_sfi = FALSE; |
1098 | |
1099 | /* |
1100 | * Check for the attributes that thread_policy_update_internal_locked() consults, |
1101 | * and trigger thread policy re-evaluation. |
1102 | */ |
1103 | if (prev.tep_io_tier != next.tep_io_tier || |
1104 | prev.tep_bg_iotier != next.tep_bg_iotier || |
1105 | prev.tep_io_passive != next.tep_io_passive || |
1106 | prev.tep_darwinbg != next.tep_darwinbg || |
1107 | prev.tep_qos_clamp != next.tep_qos_clamp || |
1108 | prev.tep_qos_ceiling != next.tep_qos_ceiling || |
1109 | prev.tep_qos_ui_is_urgent != next.tep_qos_ui_is_urgent || |
1110 | prev.tep_latency_qos != next.tep_latency_qos || |
1111 | prev.tep_through_qos != next.tep_through_qos || |
1112 | prev.tep_lowpri_cpu != next.tep_lowpri_cpu || |
1113 | prev.tep_new_sockets_bg != next.tep_new_sockets_bg || |
1114 | prev.tep_terminated != next.tep_terminated ) |
1115 | update_threads = TRUE; |
1116 | |
1117 | /* |
1118 | * Check for the attributes that sfi_thread_classify() consults, |
1119 | * and trigger SFI re-evaluation. |
1120 | */ |
1121 | if (prev.tep_latency_qos != next.tep_latency_qos || |
1122 | prev.tep_role != next.tep_role || |
1123 | prev.tep_sfi_managed != next.tep_sfi_managed ) |
1124 | update_sfi = TRUE; |
1125 | |
1126 | /* Reflect task role transitions into the coalition role counters */ |
1127 | if (prev.tep_role != next.tep_role) { |
1128 | if (task_policy_update_coalition_focal_tasks(task, prev.tep_role, next.tep_role, pend_token)) |
1129 | update_sfi = TRUE; |
1130 | } |
1131 | |
1132 | boolean_t update_priority = FALSE; |
1133 | |
1134 | int priority = BASEPRI_DEFAULT; |
1135 | int max_priority = MAXPRI_USER; |
1136 | |
1137 | if (next.tep_lowpri_cpu) { |
1138 | priority = MAXPRI_THROTTLE; |
1139 | max_priority = MAXPRI_THROTTLE; |
1140 | } else if (next.tep_suppressed_cpu) { |
1141 | priority = MAXPRI_SUPPRESSED; |
1142 | max_priority = MAXPRI_SUPPRESSED; |
1143 | } else { |
1144 | switch (next.tep_role) { |
1145 | case TASK_CONTROL_APPLICATION: |
1146 | priority = BASEPRI_CONTROL; |
1147 | break; |
1148 | case TASK_GRAPHICS_SERVER: |
1149 | priority = BASEPRI_GRAPHICS; |
1150 | max_priority = MAXPRI_RESERVED; |
1151 | break; |
1152 | default: |
1153 | break; |
1154 | } |
1155 | |
1156 | /* factor in 'nice' value */ |
1157 | priority += task->importance; |
1158 | |
1159 | if (task->effective_policy.tep_qos_clamp != THREAD_QOS_UNSPECIFIED) { |
1160 | int qos_clamp_priority = thread_qos_policy_params.qos_pri[task->effective_policy.tep_qos_clamp]; |
1161 | |
1162 | priority = MIN(priority, qos_clamp_priority); |
1163 | max_priority = MIN(max_priority, qos_clamp_priority); |
1164 | } |
1165 | |
1166 | if (priority > max_priority) |
1167 | priority = max_priority; |
1168 | else if (priority < MINPRI) |
1169 | priority = MINPRI; |
1170 | } |
1171 | |
1172 | assert(priority <= max_priority); |
1173 | |
1174 | /* avoid extra work if priority isn't changing */ |
1175 | if (priority != task->priority || |
1176 | max_priority != task->max_priority ) { |
1177 | /* update the scheduling priority for the task */ |
1178 | task->max_priority = max_priority; |
1179 | task->priority = priority; |
1180 | update_priority = TRUE; |
1181 | } |
1182 | |
1183 | /* Loop over the threads in the task: |
1184 | * only once |
1185 | * only if necessary |
1186 | * with one thread mutex hold per thread |
1187 | */ |
1188 | if (update_threads || update_priority || update_sfi) { |
1189 | thread_t thread; |
1190 | |
1191 | queue_iterate(&task->threads, thread, thread_t, task_threads) { |
1192 | struct task_pend_token thread_pend_token = {}; |
1193 | |
1194 | if (update_sfi) |
1195 | thread_pend_token.tpt_update_thread_sfi = 1; |
1196 | |
1197 | if (update_priority || update_threads) |
1198 | thread_policy_update_tasklocked(thread, |
1199 | task->priority, task->max_priority, |
1200 | &thread_pend_token); |
1201 | |
1202 | assert(!thread_pend_token.tpt_update_sockets); |
1203 | |
1204 | // Slightly risky, as we still hold the task lock... |
1205 | thread_policy_update_complete_unlocked(thread, &thread_pend_token); |
1206 | } |
1207 | } |
1208 | |
1209 | /* |
1210 | * Use the app-nap transitions to influence the |
1211 | * transition of the process within the jetsam band |
1212 | * [and optionally its live-donor status] |
1213 | * On macOS only. |
1214 | */ |
1215 | if (appnap_transition == TRUE) { |
1216 | if (task->effective_policy.tep_sup_active == 1) { |
1217 | |
1218 | memorystatus_update_priority_for_appnap(((proc_t) task->bsd_info), TRUE); |
1219 | } else { |
1220 | memorystatus_update_priority_for_appnap(((proc_t) task->bsd_info), FALSE); |
1221 | } |
1222 | } |
1223 | } |
1224 | |
1225 | |
1226 | /* |
1227 | * Yet another layering violation. We reach out and bang on the coalition directly. |
1228 | */ |
1229 | static boolean_t |
1230 | task_policy_update_coalition_focal_tasks(task_t task, |
1231 | int prev_role, |
1232 | int next_role, |
1233 | task_pend_token_t pend_token) |
1234 | { |
1235 | boolean_t sfi_transition = FALSE; |
1236 | uint32_t new_count = 0; |
1237 | |
1238 | /* task moving into/out-of the foreground */ |
1239 | if (prev_role != TASK_FOREGROUND_APPLICATION && next_role == TASK_FOREGROUND_APPLICATION) { |
1240 | if (task_coalition_adjust_focal_count(task, 1, &new_count) && (new_count == 1)) { |
1241 | sfi_transition = TRUE; |
1242 | pend_token->tpt_update_tg_ui_flag = TRUE; |
1243 | } |
1244 | } else if (prev_role == TASK_FOREGROUND_APPLICATION && next_role != TASK_FOREGROUND_APPLICATION) { |
1245 | if (task_coalition_adjust_focal_count(task, -1, &new_count) && (new_count == 0)) { |
1246 | sfi_transition = TRUE; |
1247 | pend_token->tpt_update_tg_ui_flag = TRUE; |
1248 | } |
1249 | } |
1250 | |
1251 | /* task moving into/out-of background */ |
1252 | if (prev_role != TASK_BACKGROUND_APPLICATION && next_role == TASK_BACKGROUND_APPLICATION) { |
1253 | if (task_coalition_adjust_nonfocal_count(task, 1, &new_count) && (new_count == 1)) |
1254 | sfi_transition = TRUE; |
1255 | } else if (prev_role == TASK_BACKGROUND_APPLICATION && next_role != TASK_BACKGROUND_APPLICATION) { |
1256 | if (task_coalition_adjust_nonfocal_count(task, -1, &new_count) && (new_count == 0)) |
1257 | sfi_transition = TRUE; |
1258 | } |
1259 | |
1260 | if (sfi_transition) |
1261 | pend_token->tpt_update_coal_sfi = 1; |
1262 | return sfi_transition; |
1263 | } |
1264 | |
1265 | #if CONFIG_SCHED_SFI |
1266 | |
1267 | /* coalition object is locked */ |
1268 | static void |
1269 | task_sfi_reevaluate_cb(coalition_t coal, void *ctx, task_t task) |
1270 | { |
1271 | thread_t thread; |
1272 | |
1273 | /* unused for now */ |
1274 | (void)coal; |
1275 | |
1276 | /* skip the task we're re-evaluating on behalf of: it's already updated */ |
1277 | if (task == (task_t)ctx) |
1278 | return; |
1279 | |
1280 | task_lock(task); |
1281 | |
1282 | queue_iterate(&task->threads, thread, thread_t, task_threads) { |
1283 | sfi_reevaluate(thread); |
1284 | } |
1285 | |
1286 | task_unlock(task); |
1287 | } |
1288 | #endif /* CONFIG_SCHED_SFI */ |
1289 | |
1290 | /* |
1291 | * Called with task unlocked to do things that can't be done while holding the task lock |
1292 | */ |
1293 | void |
1294 | task_policy_update_complete_unlocked(task_t task, task_pend_token_t pend_token) |
1295 | { |
1296 | #ifdef MACH_BSD |
1297 | if (pend_token->tpt_update_sockets) |
1298 | proc_apply_task_networkbg(task->bsd_info, THREAD_NULL); |
1299 | #endif /* MACH_BSD */ |
1300 | |
1301 | /* The timer throttle has been removed or reduced, we need to look for expired timers and fire them */ |
1302 | if (pend_token->tpt_update_timers) |
1303 | ml_timer_evaluate(); |
1304 | |
1305 | #if CONFIG_EMBEDDED |
1306 | if (pend_token->tpt_update_watchers) |
1307 | apply_appstate_watchers(task); |
1308 | #endif /* CONFIG_EMBEDDED */ |
1309 | |
1310 | if (pend_token->tpt_update_live_donor) |
1311 | task_importance_update_live_donor(task); |
1312 | |
1313 | #if CONFIG_SCHED_SFI |
1314 | /* use the resource coalition for SFI re-evaluation */ |
1315 | if (pend_token->tpt_update_coal_sfi) |
1316 | coalition_for_each_task(task->coalition[COALITION_TYPE_RESOURCE], |
1317 | (void *)task, task_sfi_reevaluate_cb); |
1318 | #endif /* CONFIG_SCHED_SFI */ |
1319 | |
1320 | } |
1321 | |
1322 | /* |
1323 | * Initiate a task policy state transition |
1324 | * |
1325 | * Everything that modifies requested except functions that need to hold the task lock |
1326 | * should use this function |
1327 | * |
1328 | * Argument validation should be performed before reaching this point. |
1329 | * |
1330 | * TODO: Do we need to check task->active? |
1331 | */ |
1332 | void |
1333 | proc_set_task_policy(task_t task, |
1334 | int category, |
1335 | int flavor, |
1336 | int value) |
1337 | { |
1338 | struct task_pend_token pend_token = {}; |
1339 | |
1340 | task_lock(task); |
1341 | |
1342 | KERNEL_DEBUG_CONSTANT_IST(KDEBUG_TRACE, |
1343 | (IMPORTANCE_CODE(flavor, (category | TASK_POLICY_TASK))) | DBG_FUNC_START, |
1344 | task_pid(task), trequested_0(task), |
1345 | trequested_1(task), value, 0); |
1346 | |
1347 | proc_set_task_policy_locked(task, category, flavor, value, 0); |
1348 | |
1349 | task_policy_update_locked(task, &pend_token); |
1350 | |
1351 | |
1352 | KERNEL_DEBUG_CONSTANT_IST(KDEBUG_TRACE, |
1353 | (IMPORTANCE_CODE(flavor, (category | TASK_POLICY_TASK))) | DBG_FUNC_END, |
1354 | task_pid(task), trequested_0(task), |
1355 | trequested_1(task), tpending(&pend_token), 0); |
1356 | |
1357 | task_unlock(task); |
1358 | |
1359 | task_policy_update_complete_unlocked(task, &pend_token); |
1360 | } |
1361 | |
1362 | /* |
1363 | * Variant of proc_set_task_policy() that sets two scalars in the requested policy structure. |
1364 | * Same locking rules apply. |
1365 | */ |
1366 | void |
1367 | proc_set_task_policy2(task_t task, |
1368 | int category, |
1369 | int flavor, |
1370 | int value, |
1371 | int value2) |
1372 | { |
1373 | struct task_pend_token pend_token = {}; |
1374 | |
1375 | task_lock(task); |
1376 | |
1377 | KERNEL_DEBUG_CONSTANT_IST(KDEBUG_TRACE, |
1378 | (IMPORTANCE_CODE(flavor, (category | TASK_POLICY_TASK))) | DBG_FUNC_START, |
1379 | task_pid(task), trequested_0(task), |
1380 | trequested_1(task), value, 0); |
1381 | |
1382 | proc_set_task_policy_locked(task, category, flavor, value, value2); |
1383 | |
1384 | task_policy_update_locked(task, &pend_token); |
1385 | |
1386 | KERNEL_DEBUG_CONSTANT_IST(KDEBUG_TRACE, |
1387 | (IMPORTANCE_CODE(flavor, (category | TASK_POLICY_TASK))) | DBG_FUNC_END, |
1388 | task_pid(task), trequested_0(task), |
1389 | trequested_1(task), tpending(&pend_token), 0); |
1390 | |
1391 | task_unlock(task); |
1392 | |
1393 | task_policy_update_complete_unlocked(task, &pend_token); |
1394 | } |
1395 | |
1396 | /* |
1397 | * Set the requested state for a specific flavor to a specific value. |
1398 | * |
1399 | * TODO: |
1400 | * Verify that arguments to non iopol things are 1 or 0 |
1401 | */ |
1402 | static void |
1403 | proc_set_task_policy_locked(task_t task, |
1404 | int category, |
1405 | int flavor, |
1406 | int value, |
1407 | int value2) |
1408 | { |
1409 | int tier, passive; |
1410 | |
1411 | struct task_requested_policy requested = task->requested_policy; |
1412 | |
1413 | switch (flavor) { |
1414 | |
1415 | /* Category: EXTERNAL and INTERNAL */ |
1416 | |
1417 | case TASK_POLICY_DARWIN_BG: |
1418 | if (category == TASK_POLICY_EXTERNAL) |
1419 | requested.trp_ext_darwinbg = value; |
1420 | else |
1421 | requested.trp_int_darwinbg = value; |
1422 | break; |
1423 | |
1424 | case TASK_POLICY_IOPOL: |
1425 | proc_iopol_to_tier(value, &tier, &passive); |
1426 | if (category == TASK_POLICY_EXTERNAL) { |
1427 | requested.trp_ext_iotier = tier; |
1428 | requested.trp_ext_iopassive = passive; |
1429 | } else { |
1430 | requested.trp_int_iotier = tier; |
1431 | requested.trp_int_iopassive = passive; |
1432 | } |
1433 | break; |
1434 | |
1435 | case TASK_POLICY_IO: |
1436 | if (category == TASK_POLICY_EXTERNAL) |
1437 | requested.trp_ext_iotier = value; |
1438 | else |
1439 | requested.trp_int_iotier = value; |
1440 | break; |
1441 | |
1442 | case TASK_POLICY_PASSIVE_IO: |
1443 | if (category == TASK_POLICY_EXTERNAL) |
1444 | requested.trp_ext_iopassive = value; |
1445 | else |
1446 | requested.trp_int_iopassive = value; |
1447 | break; |
1448 | |
1449 | /* Category: INTERNAL */ |
1450 | |
1451 | case TASK_POLICY_DARWIN_BG_IOPOL: |
1452 | assert(category == TASK_POLICY_INTERNAL); |
1453 | proc_iopol_to_tier(value, &tier, &passive); |
1454 | requested.trp_bg_iotier = tier; |
1455 | break; |
1456 | |
1457 | /* Category: ATTRIBUTE */ |
1458 | |
1459 | case TASK_POLICY_TAL: |
1460 | assert(category == TASK_POLICY_ATTRIBUTE); |
1461 | requested.trp_tal_enabled = value; |
1462 | break; |
1463 | |
1464 | case TASK_POLICY_BOOST: |
1465 | assert(category == TASK_POLICY_ATTRIBUTE); |
1466 | requested.trp_boosted = value; |
1467 | break; |
1468 | |
1469 | case TASK_POLICY_ROLE: |
1470 | assert(category == TASK_POLICY_ATTRIBUTE); |
1471 | requested.trp_role = value; |
1472 | break; |
1473 | |
1474 | case TASK_POLICY_TERMINATED: |
1475 | assert(category == TASK_POLICY_ATTRIBUTE); |
1476 | requested.trp_terminated = value; |
1477 | break; |
1478 | |
1479 | case TASK_BASE_LATENCY_QOS_POLICY: |
1480 | assert(category == TASK_POLICY_ATTRIBUTE); |
1481 | requested.trp_base_latency_qos = value; |
1482 | break; |
1483 | |
1484 | case TASK_BASE_THROUGHPUT_QOS_POLICY: |
1485 | assert(category == TASK_POLICY_ATTRIBUTE); |
1486 | requested.trp_base_through_qos = value; |
1487 | break; |
1488 | |
1489 | case TASK_POLICY_SFI_MANAGED: |
1490 | assert(category == TASK_POLICY_ATTRIBUTE); |
1491 | requested.trp_sfi_managed = value; |
1492 | break; |
1493 | |
1494 | case TASK_POLICY_BASE_LATENCY_AND_THROUGHPUT_QOS: |
1495 | assert(category == TASK_POLICY_ATTRIBUTE); |
1496 | requested.trp_base_latency_qos = value; |
1497 | requested.trp_base_through_qos = value2; |
1498 | break; |
1499 | |
1500 | case TASK_POLICY_OVERRIDE_LATENCY_AND_THROUGHPUT_QOS: |
1501 | assert(category == TASK_POLICY_ATTRIBUTE); |
1502 | requested.trp_over_latency_qos = value; |
1503 | requested.trp_over_through_qos = value2; |
1504 | break; |
1505 | |
1506 | default: |
1507 | panic("unknown task policy: %d %d %d %d" , category, flavor, value, value2); |
1508 | break; |
1509 | } |
1510 | |
1511 | task->requested_policy = requested; |
1512 | } |
1513 | |
1514 | /* |
1515 | * Gets what you set. Effective values may be different. |
1516 | */ |
1517 | int |
1518 | proc_get_task_policy(task_t task, |
1519 | int category, |
1520 | int flavor) |
1521 | { |
1522 | int value = 0; |
1523 | |
1524 | task_lock(task); |
1525 | |
1526 | struct task_requested_policy requested = task->requested_policy; |
1527 | |
1528 | switch (flavor) { |
1529 | case TASK_POLICY_DARWIN_BG: |
1530 | if (category == TASK_POLICY_EXTERNAL) |
1531 | value = requested.trp_ext_darwinbg; |
1532 | else |
1533 | value = requested.trp_int_darwinbg; |
1534 | break; |
1535 | case TASK_POLICY_IOPOL: |
1536 | if (category == TASK_POLICY_EXTERNAL) |
1537 | value = proc_tier_to_iopol(requested.trp_ext_iotier, |
1538 | requested.trp_ext_iopassive); |
1539 | else |
1540 | value = proc_tier_to_iopol(requested.trp_int_iotier, |
1541 | requested.trp_int_iopassive); |
1542 | break; |
1543 | case TASK_POLICY_IO: |
1544 | if (category == TASK_POLICY_EXTERNAL) |
1545 | value = requested.trp_ext_iotier; |
1546 | else |
1547 | value = requested.trp_int_iotier; |
1548 | break; |
1549 | case TASK_POLICY_PASSIVE_IO: |
1550 | if (category == TASK_POLICY_EXTERNAL) |
1551 | value = requested.trp_ext_iopassive; |
1552 | else |
1553 | value = requested.trp_int_iopassive; |
1554 | break; |
1555 | case TASK_POLICY_DARWIN_BG_IOPOL: |
1556 | assert(category == TASK_POLICY_ATTRIBUTE); |
1557 | value = proc_tier_to_iopol(requested.trp_bg_iotier, 0); |
1558 | break; |
1559 | case TASK_POLICY_ROLE: |
1560 | assert(category == TASK_POLICY_ATTRIBUTE); |
1561 | value = requested.trp_role; |
1562 | break; |
1563 | case TASK_POLICY_SFI_MANAGED: |
1564 | assert(category == TASK_POLICY_ATTRIBUTE); |
1565 | value = requested.trp_sfi_managed; |
1566 | break; |
1567 | default: |
1568 | panic("unknown policy_flavor %d" , flavor); |
1569 | break; |
1570 | } |
1571 | |
1572 | task_unlock(task); |
1573 | |
1574 | return value; |
1575 | } |
1576 | |
1577 | /* |
1578 | * Variant of proc_get_task_policy() that returns two scalar outputs. |
1579 | */ |
1580 | void |
1581 | proc_get_task_policy2(task_t task, |
1582 | __assert_only int category, |
1583 | int flavor, |
1584 | int *value1, |
1585 | int *value2) |
1586 | { |
1587 | task_lock(task); |
1588 | |
1589 | struct task_requested_policy requested = task->requested_policy; |
1590 | |
1591 | switch (flavor) { |
1592 | case TASK_POLICY_BASE_LATENCY_AND_THROUGHPUT_QOS: |
1593 | assert(category == TASK_POLICY_ATTRIBUTE); |
1594 | *value1 = requested.trp_base_latency_qos; |
1595 | *value2 = requested.trp_base_through_qos; |
1596 | break; |
1597 | |
1598 | case TASK_POLICY_OVERRIDE_LATENCY_AND_THROUGHPUT_QOS: |
1599 | assert(category == TASK_POLICY_ATTRIBUTE); |
1600 | *value1 = requested.trp_over_latency_qos; |
1601 | *value2 = requested.trp_over_through_qos; |
1602 | break; |
1603 | |
1604 | default: |
1605 | panic("unknown policy_flavor %d" , flavor); |
1606 | break; |
1607 | } |
1608 | |
1609 | task_unlock(task); |
1610 | } |
1611 | |
1612 | /* |
1613 | * Function for querying effective state for relevant subsystems |
1614 | * Gets what is actually in effect, for subsystems which pull policy instead of receive updates. |
1615 | * |
1616 | * ONLY the relevant subsystem should query this. |
1617 | * NEVER take a value from the 'effective' function and stuff it into a setter. |
1618 | * |
1619 | * NOTE: This accessor does not take the task lock. |
1620 | * Notifications of state updates need to be externally synchronized with state queries. |
1621 | * This routine *MUST* remain interrupt safe, as it is potentially invoked |
1622 | * within the context of a timer interrupt. It is also called in KDP context for stackshot. |
1623 | */ |
1624 | int |
1625 | proc_get_effective_task_policy(task_t task, |
1626 | int flavor) |
1627 | { |
1628 | int value = 0; |
1629 | |
1630 | switch (flavor) { |
1631 | case TASK_POLICY_DARWIN_BG: |
1632 | /* |
1633 | * This backs the KPI call proc_pidbackgrounded to find |
1634 | * out if a pid is backgrounded. |
1635 | * It is used to communicate state to the VM system, as well as |
1636 | * prioritizing requests to the graphics system. |
1637 | * Returns 1 for background mode, 0 for normal mode |
1638 | */ |
1639 | value = task->effective_policy.tep_darwinbg; |
1640 | break; |
1641 | case TASK_POLICY_ALL_SOCKETS_BG: |
1642 | /* |
1643 | * do_background_socket() calls this to determine what it should do to the proc's sockets |
1644 | * Returns 1 for background mode, 0 for normal mode |
1645 | * |
1646 | * This consults both thread and task so un-DBGing a thread while the task is BG |
1647 | * doesn't get you out of the network throttle. |
1648 | */ |
1649 | value = task->effective_policy.tep_all_sockets_bg; |
1650 | break; |
1651 | case TASK_POLICY_LATENCY_QOS: |
1652 | /* |
1653 | * timer arming calls into here to find out the timer coalescing level |
1654 | * Returns a QoS tier (0-6) |
1655 | */ |
1656 | value = task->effective_policy.tep_latency_qos; |
1657 | break; |
1658 | case TASK_POLICY_THROUGH_QOS: |
1659 | /* |
1660 | * This value is passed into the urgency callout from the scheduler |
1661 | * to the performance management subsystem. |
1662 | * Returns a QoS tier (0-6) |
1663 | */ |
1664 | value = task->effective_policy.tep_through_qos; |
1665 | break; |
1666 | case TASK_POLICY_ROLE: |
1667 | /* |
1668 | * This controls various things that ask whether a process is foreground, |
1669 | * like SFI, VM, access to GPU, etc |
1670 | */ |
1671 | value = task->effective_policy.tep_role; |
1672 | break; |
1673 | case TASK_POLICY_WATCHERS_BG: |
1674 | /* |
1675 | * This controls whether or not a thread watching this process should be BG. |
1676 | */ |
1677 | value = task->effective_policy.tep_watchers_bg; |
1678 | break; |
1679 | case TASK_POLICY_SFI_MANAGED: |
1680 | /* |
1681 | * This controls whether or not a process is targeted for specific control by thermald. |
1682 | */ |
1683 | value = task->effective_policy.tep_sfi_managed; |
1684 | break; |
1685 | default: |
1686 | panic("unknown policy_flavor %d" , flavor); |
1687 | break; |
1688 | } |
1689 | |
1690 | return value; |
1691 | } |
1692 | |
1693 | /* |
1694 | * Convert from IOPOL_* values to throttle tiers. |
1695 | * |
1696 | * TODO: Can this be made more compact, like an array lookup |
1697 | * Note that it is possible to support e.g. IOPOL_PASSIVE_STANDARD in the future |
1698 | */ |
1699 | |
1700 | void |
1701 | proc_iopol_to_tier(int iopolicy, int *tier, int *passive) |
1702 | { |
1703 | *passive = 0; |
1704 | *tier = 0; |
1705 | switch (iopolicy) { |
1706 | case IOPOL_IMPORTANT: |
1707 | *tier = THROTTLE_LEVEL_TIER0; |
1708 | break; |
1709 | case IOPOL_PASSIVE: |
1710 | *tier = THROTTLE_LEVEL_TIER0; |
1711 | *passive = 1; |
1712 | break; |
1713 | case IOPOL_STANDARD: |
1714 | *tier = THROTTLE_LEVEL_TIER1; |
1715 | break; |
1716 | case IOPOL_UTILITY: |
1717 | *tier = THROTTLE_LEVEL_TIER2; |
1718 | break; |
1719 | case IOPOL_THROTTLE: |
1720 | *tier = THROTTLE_LEVEL_TIER3; |
1721 | break; |
1722 | default: |
1723 | panic("unknown I/O policy %d" , iopolicy); |
1724 | break; |
1725 | } |
1726 | } |
1727 | |
1728 | int |
1729 | proc_tier_to_iopol(int tier, int passive) |
1730 | { |
1731 | if (passive == 1) { |
1732 | switch (tier) { |
1733 | case THROTTLE_LEVEL_TIER0: |
1734 | return IOPOL_PASSIVE; |
1735 | default: |
1736 | panic("unknown passive tier %d" , tier); |
1737 | return IOPOL_DEFAULT; |
1738 | } |
1739 | } else { |
1740 | switch (tier) { |
1741 | case THROTTLE_LEVEL_NONE: |
1742 | case THROTTLE_LEVEL_TIER0: |
1743 | return IOPOL_DEFAULT; |
1744 | case THROTTLE_LEVEL_TIER1: |
1745 | return IOPOL_STANDARD; |
1746 | case THROTTLE_LEVEL_TIER2: |
1747 | return IOPOL_UTILITY; |
1748 | case THROTTLE_LEVEL_TIER3: |
1749 | return IOPOL_THROTTLE; |
1750 | default: |
1751 | panic("unknown tier %d" , tier); |
1752 | return IOPOL_DEFAULT; |
1753 | } |
1754 | } |
1755 | } |
1756 | |
1757 | int |
1758 | proc_darwin_role_to_task_role(int darwin_role, int* task_role) |
1759 | { |
1760 | integer_t role = TASK_UNSPECIFIED; |
1761 | |
1762 | switch (darwin_role) { |
1763 | case PRIO_DARWIN_ROLE_DEFAULT: |
1764 | role = TASK_UNSPECIFIED; |
1765 | break; |
1766 | case PRIO_DARWIN_ROLE_UI_FOCAL: |
1767 | role = TASK_FOREGROUND_APPLICATION; |
1768 | break; |
1769 | case PRIO_DARWIN_ROLE_UI: |
1770 | role = TASK_DEFAULT_APPLICATION; |
1771 | break; |
1772 | case PRIO_DARWIN_ROLE_NON_UI: |
1773 | role = TASK_NONUI_APPLICATION; |
1774 | break; |
1775 | case PRIO_DARWIN_ROLE_UI_NON_FOCAL: |
1776 | role = TASK_BACKGROUND_APPLICATION; |
1777 | break; |
1778 | case PRIO_DARWIN_ROLE_TAL_LAUNCH: |
1779 | role = TASK_THROTTLE_APPLICATION; |
1780 | break; |
1781 | case PRIO_DARWIN_ROLE_DARWIN_BG: |
1782 | role = TASK_DARWINBG_APPLICATION; |
1783 | break; |
1784 | default: |
1785 | return EINVAL; |
1786 | } |
1787 | |
1788 | *task_role = role; |
1789 | |
1790 | return 0; |
1791 | } |
1792 | |
1793 | int |
1794 | proc_task_role_to_darwin_role(int task_role) |
1795 | { |
1796 | switch (task_role) { |
1797 | case TASK_FOREGROUND_APPLICATION: |
1798 | return PRIO_DARWIN_ROLE_UI_FOCAL; |
1799 | case TASK_BACKGROUND_APPLICATION: |
1800 | return PRIO_DARWIN_ROLE_UI_NON_FOCAL; |
1801 | case TASK_NONUI_APPLICATION: |
1802 | return PRIO_DARWIN_ROLE_NON_UI; |
1803 | case TASK_DEFAULT_APPLICATION: |
1804 | return PRIO_DARWIN_ROLE_UI; |
1805 | case TASK_THROTTLE_APPLICATION: |
1806 | return PRIO_DARWIN_ROLE_TAL_LAUNCH; |
1807 | case TASK_DARWINBG_APPLICATION: |
1808 | return PRIO_DARWIN_ROLE_DARWIN_BG; |
1809 | case TASK_UNSPECIFIED: |
1810 | default: |
1811 | return PRIO_DARWIN_ROLE_DEFAULT; |
1812 | } |
1813 | } |
1814 | |
1815 | |
1816 | /* TODO: remove this variable when interactive daemon audit period is over */ |
1817 | extern boolean_t ipc_importance_interactive_receiver; |
1818 | |
1819 | /* |
1820 | * Called at process exec to initialize the apptype, qos clamp, and qos seed of a process |
1821 | * |
1822 | * TODO: Make this function more table-driven instead of ad-hoc |
1823 | */ |
1824 | void |
1825 | proc_set_task_spawnpolicy(task_t task, int apptype, int qos_clamp, int role, |
1826 | ipc_port_t * portwatch_ports, int portwatch_count) |
1827 | { |
1828 | struct task_pend_token pend_token = {}; |
1829 | |
1830 | KERNEL_DEBUG_CONSTANT_IST(KDEBUG_TRACE, |
1831 | (IMPORTANCE_CODE(IMP_TASK_APPTYPE, apptype)) | DBG_FUNC_START, |
1832 | task_pid(task), trequested_0(task), trequested_1(task), |
1833 | apptype, 0); |
1834 | |
1835 | switch (apptype) { |
1836 | case TASK_APPTYPE_APP_TAL: |
1837 | case TASK_APPTYPE_APP_DEFAULT: |
1838 | /* Apps become donors via the 'live-donor' flag instead of the static donor flag */ |
1839 | task_importance_mark_donor(task, FALSE); |
1840 | task_importance_mark_live_donor(task, TRUE); |
1841 | task_importance_mark_receiver(task, FALSE); |
1842 | #if CONFIG_EMBEDDED |
1843 | task_importance_mark_denap_receiver(task, FALSE); |
1844 | #else |
1845 | /* Apps are de-nap recievers on desktop for suppression behaviors */ |
1846 | task_importance_mark_denap_receiver(task, TRUE); |
1847 | #endif /* CONFIG_EMBEDDED */ |
1848 | break; |
1849 | |
1850 | case TASK_APPTYPE_DAEMON_INTERACTIVE: |
1851 | task_importance_mark_donor(task, TRUE); |
1852 | task_importance_mark_live_donor(task, FALSE); |
1853 | |
1854 | /* |
1855 | * A boot arg controls whether interactive daemons are importance receivers. |
1856 | * Normally, they are not. But for testing their behavior as an adaptive |
1857 | * daemon, the boot-arg can be set. |
1858 | * |
1859 | * TODO: remove this when the interactive daemon audit period is over. |
1860 | */ |
1861 | task_importance_mark_receiver(task, /* FALSE */ ipc_importance_interactive_receiver); |
1862 | task_importance_mark_denap_receiver(task, FALSE); |
1863 | break; |
1864 | |
1865 | case TASK_APPTYPE_DAEMON_STANDARD: |
1866 | task_importance_mark_donor(task, TRUE); |
1867 | task_importance_mark_live_donor(task, FALSE); |
1868 | task_importance_mark_receiver(task, FALSE); |
1869 | task_importance_mark_denap_receiver(task, FALSE); |
1870 | break; |
1871 | |
1872 | case TASK_APPTYPE_DAEMON_ADAPTIVE: |
1873 | task_importance_mark_donor(task, FALSE); |
1874 | task_importance_mark_live_donor(task, FALSE); |
1875 | task_importance_mark_receiver(task, TRUE); |
1876 | task_importance_mark_denap_receiver(task, FALSE); |
1877 | break; |
1878 | |
1879 | case TASK_APPTYPE_DAEMON_BACKGROUND: |
1880 | task_importance_mark_donor(task, FALSE); |
1881 | task_importance_mark_live_donor(task, FALSE); |
1882 | task_importance_mark_receiver(task, FALSE); |
1883 | task_importance_mark_denap_receiver(task, FALSE); |
1884 | break; |
1885 | |
1886 | case TASK_APPTYPE_NONE: |
1887 | break; |
1888 | } |
1889 | |
1890 | if (portwatch_ports != NULL && apptype == TASK_APPTYPE_DAEMON_ADAPTIVE) { |
1891 | int portwatch_boosts = 0; |
1892 | |
1893 | for (int i = 0; i < portwatch_count; i++) { |
1894 | ipc_port_t port = NULL; |
1895 | |
1896 | if ((port = portwatch_ports[i]) != NULL) { |
1897 | int boost = 0; |
1898 | task_add_importance_watchport(task, port, &boost); |
1899 | portwatch_boosts += boost; |
1900 | } |
1901 | } |
1902 | |
1903 | if (portwatch_boosts > 0) { |
1904 | task_importance_hold_internal_assertion(task, portwatch_boosts); |
1905 | } |
1906 | } |
1907 | |
1908 | task_lock(task); |
1909 | |
1910 | if (apptype == TASK_APPTYPE_APP_TAL) { |
1911 | /* TAL starts off enabled by default */ |
1912 | task->requested_policy.trp_tal_enabled = 1; |
1913 | } |
1914 | |
1915 | if (apptype != TASK_APPTYPE_NONE) { |
1916 | task->requested_policy.trp_apptype = apptype; |
1917 | } |
1918 | |
1919 | #if CONFIG_EMBEDDED |
1920 | /* Remove this after launchd starts setting it properly */ |
1921 | if (apptype == TASK_APPTYPE_APP_DEFAULT && role == TASK_UNSPECIFIED) { |
1922 | task->requested_policy.trp_role = TASK_FOREGROUND_APPLICATION; |
1923 | } else |
1924 | #endif |
1925 | if (role != TASK_UNSPECIFIED) { |
1926 | task->requested_policy.trp_role = role; |
1927 | } |
1928 | |
1929 | if (qos_clamp != THREAD_QOS_UNSPECIFIED) { |
1930 | task->requested_policy.trp_qos_clamp = qos_clamp; |
1931 | } |
1932 | |
1933 | task_policy_update_locked(task, &pend_token); |
1934 | |
1935 | task_unlock(task); |
1936 | |
1937 | /* Ensure the donor bit is updated to be in sync with the new live donor status */ |
1938 | pend_token.tpt_update_live_donor = 1; |
1939 | |
1940 | task_policy_update_complete_unlocked(task, &pend_token); |
1941 | |
1942 | KERNEL_DEBUG_CONSTANT_IST(KDEBUG_TRACE, |
1943 | (IMPORTANCE_CODE(IMP_TASK_APPTYPE, apptype)) | DBG_FUNC_END, |
1944 | task_pid(task), trequested_0(task), trequested_1(task), |
1945 | task_is_importance_receiver(task), 0); |
1946 | } |
1947 | |
1948 | /* |
1949 | * Inherit task role across exec |
1950 | */ |
1951 | void |
1952 | proc_inherit_task_role(task_t new_task, |
1953 | task_t old_task) |
1954 | { |
1955 | int role; |
1956 | |
1957 | /* inherit the role from old task to new task */ |
1958 | role = proc_get_task_policy(old_task, TASK_POLICY_ATTRIBUTE, TASK_POLICY_ROLE); |
1959 | proc_set_task_policy(new_task, TASK_POLICY_ATTRIBUTE, TASK_POLICY_ROLE, role); |
1960 | } |
1961 | |
1962 | extern void *initproc; |
1963 | |
1964 | /* |
1965 | * Compute the default main thread qos for a task |
1966 | */ |
1967 | int |
1968 | task_compute_main_thread_qos(task_t task) |
1969 | { |
1970 | int primordial_qos = THREAD_QOS_UNSPECIFIED; |
1971 | |
1972 | int qos_clamp = task->requested_policy.trp_qos_clamp; |
1973 | |
1974 | switch (task->requested_policy.trp_apptype) { |
1975 | case TASK_APPTYPE_APP_TAL: |
1976 | case TASK_APPTYPE_APP_DEFAULT: |
1977 | primordial_qos = THREAD_QOS_USER_INTERACTIVE; |
1978 | break; |
1979 | |
1980 | case TASK_APPTYPE_DAEMON_INTERACTIVE: |
1981 | case TASK_APPTYPE_DAEMON_STANDARD: |
1982 | case TASK_APPTYPE_DAEMON_ADAPTIVE: |
1983 | primordial_qos = THREAD_QOS_LEGACY; |
1984 | break; |
1985 | |
1986 | case TASK_APPTYPE_DAEMON_BACKGROUND: |
1987 | primordial_qos = THREAD_QOS_BACKGROUND; |
1988 | break; |
1989 | } |
1990 | |
1991 | if (task->bsd_info == initproc) { |
1992 | /* PID 1 gets a special case */ |
1993 | primordial_qos = MAX(primordial_qos, THREAD_QOS_USER_INITIATED); |
1994 | } |
1995 | |
1996 | if (qos_clamp != THREAD_QOS_UNSPECIFIED) { |
1997 | if (primordial_qos != THREAD_QOS_UNSPECIFIED) { |
1998 | primordial_qos = MIN(qos_clamp, primordial_qos); |
1999 | } else { |
2000 | primordial_qos = qos_clamp; |
2001 | } |
2002 | } |
2003 | |
2004 | return primordial_qos; |
2005 | } |
2006 | |
2007 | |
2008 | /* for process_policy to check before attempting to set */ |
2009 | boolean_t |
2010 | proc_task_is_tal(task_t task) |
2011 | { |
2012 | return (task->requested_policy.trp_apptype == TASK_APPTYPE_APP_TAL) ? TRUE : FALSE; |
2013 | } |
2014 | |
2015 | int |
2016 | task_get_apptype(task_t task) |
2017 | { |
2018 | return task->requested_policy.trp_apptype; |
2019 | } |
2020 | |
2021 | boolean_t |
2022 | task_is_daemon(task_t task) |
2023 | { |
2024 | switch (task->requested_policy.trp_apptype) { |
2025 | case TASK_APPTYPE_DAEMON_INTERACTIVE: |
2026 | case TASK_APPTYPE_DAEMON_STANDARD: |
2027 | case TASK_APPTYPE_DAEMON_ADAPTIVE: |
2028 | case TASK_APPTYPE_DAEMON_BACKGROUND: |
2029 | return TRUE; |
2030 | default: |
2031 | return FALSE; |
2032 | } |
2033 | } |
2034 | |
2035 | boolean_t |
2036 | task_is_app(task_t task) |
2037 | { |
2038 | switch (task->requested_policy.trp_apptype) { |
2039 | case TASK_APPTYPE_APP_DEFAULT: |
2040 | case TASK_APPTYPE_APP_TAL: |
2041 | return TRUE; |
2042 | default: |
2043 | return FALSE; |
2044 | } |
2045 | } |
2046 | |
2047 | /* for telemetry */ |
2048 | integer_t |
2049 | task_grab_latency_qos(task_t task) |
2050 | { |
2051 | return qos_latency_policy_package(proc_get_effective_task_policy(task, TASK_POLICY_LATENCY_QOS)); |
2052 | } |
2053 | |
2054 | /* update the darwin background action state in the flags field for libproc */ |
2055 | int |
2056 | proc_get_darwinbgstate(task_t task, uint32_t * flagsp) |
2057 | { |
2058 | if (task->requested_policy.trp_ext_darwinbg) |
2059 | *flagsp |= PROC_FLAG_EXT_DARWINBG; |
2060 | |
2061 | if (task->requested_policy.trp_int_darwinbg) |
2062 | *flagsp |= PROC_FLAG_DARWINBG; |
2063 | |
2064 | #if CONFIG_EMBEDDED |
2065 | if (task->requested_policy.trp_apptype == TASK_APPTYPE_DAEMON_BACKGROUND) |
2066 | *flagsp |= PROC_FLAG_IOS_APPLEDAEMON; |
2067 | |
2068 | if (task->requested_policy.trp_apptype == TASK_APPTYPE_DAEMON_ADAPTIVE) |
2069 | *flagsp |= PROC_FLAG_IOS_IMPPROMOTION; |
2070 | #endif /* CONFIG_EMBEDDED */ |
2071 | |
2072 | if (task->requested_policy.trp_apptype == TASK_APPTYPE_APP_DEFAULT || |
2073 | task->requested_policy.trp_apptype == TASK_APPTYPE_APP_TAL) |
2074 | *flagsp |= PROC_FLAG_APPLICATION; |
2075 | |
2076 | if (task->requested_policy.trp_apptype == TASK_APPTYPE_DAEMON_ADAPTIVE) |
2077 | *flagsp |= PROC_FLAG_ADAPTIVE; |
2078 | |
2079 | if (task->requested_policy.trp_apptype == TASK_APPTYPE_DAEMON_ADAPTIVE && |
2080 | task->requested_policy.trp_boosted == 1) |
2081 | *flagsp |= PROC_FLAG_ADAPTIVE_IMPORTANT; |
2082 | |
2083 | if (task_is_importance_donor(task)) |
2084 | *flagsp |= PROC_FLAG_IMPORTANCE_DONOR; |
2085 | |
2086 | if (task->effective_policy.tep_sup_active) |
2087 | *flagsp |= PROC_FLAG_SUPPRESSED; |
2088 | |
2089 | return(0); |
2090 | } |
2091 | |
2092 | /* |
2093 | * Tracepoint data... Reading the tracepoint data can be somewhat complicated. |
2094 | * The current scheme packs as much data into a single tracepoint as it can. |
2095 | * |
2096 | * Each task/thread requested/effective structure is 64 bits in size. Any |
2097 | * given tracepoint will emit either requested or effective data, but not both. |
2098 | * |
2099 | * A tracepoint may emit any of task, thread, or task & thread data. |
2100 | * |
2101 | * The type of data emitted varies with pointer size. Where possible, both |
2102 | * task and thread data are emitted. In LP32 systems, the first and second |
2103 | * halves of either the task or thread data is emitted. |
2104 | * |
2105 | * The code uses uintptr_t array indexes instead of high/low to avoid |
2106 | * confusion WRT big vs little endian. |
2107 | * |
2108 | * The truth table for the tracepoint data functions is below, and has the |
2109 | * following invariants: |
2110 | * |
2111 | * 1) task and thread are uintptr_t* |
2112 | * 2) task may never be NULL |
2113 | * |
2114 | * |
2115 | * LP32 LP64 |
2116 | * trequested_0(task, NULL) task[0] task[0] |
2117 | * trequested_1(task, NULL) task[1] NULL |
2118 | * trequested_0(task, thread) thread[0] task[0] |
2119 | * trequested_1(task, thread) thread[1] thread[0] |
2120 | * |
2121 | * Basically, you get a full task or thread on LP32, and both on LP64. |
2122 | * |
2123 | * The uintptr_t munging here is squicky enough to deserve a comment. |
2124 | * |
2125 | * The variables we are accessing are laid out in memory like this: |
2126 | * |
2127 | * [ LP64 uintptr_t 0 ] |
2128 | * [ LP32 uintptr_t 0 ] [ LP32 uintptr_t 1 ] |
2129 | * |
2130 | * 1 2 3 4 5 6 7 8 |
2131 | * |
2132 | */ |
2133 | |
2134 | static uintptr_t |
2135 | trequested_0(task_t task) |
2136 | { |
2137 | static_assert(sizeof(struct task_requested_policy) == sizeof(uint64_t), "size invariant violated" ); |
2138 | |
2139 | uintptr_t* raw = (uintptr_t*)&task->requested_policy; |
2140 | |
2141 | return raw[0]; |
2142 | } |
2143 | |
2144 | static uintptr_t |
2145 | trequested_1(task_t task) |
2146 | { |
2147 | #if defined __LP64__ |
2148 | (void)task; |
2149 | return 0; |
2150 | #else |
2151 | uintptr_t* raw = (uintptr_t*)(&task->requested_policy); |
2152 | return raw[1]; |
2153 | #endif |
2154 | } |
2155 | |
2156 | static uintptr_t |
2157 | teffective_0(task_t task) |
2158 | { |
2159 | uintptr_t* raw = (uintptr_t*)&task->effective_policy; |
2160 | |
2161 | return raw[0]; |
2162 | } |
2163 | |
2164 | static uintptr_t |
2165 | teffective_1(task_t task) |
2166 | { |
2167 | #if defined __LP64__ |
2168 | (void)task; |
2169 | return 0; |
2170 | #else |
2171 | uintptr_t* raw = (uintptr_t*)(&task->effective_policy); |
2172 | return raw[1]; |
2173 | #endif |
2174 | } |
2175 | |
2176 | /* dump pending for tracepoint */ |
2177 | uint32_t tpending(task_pend_token_t pend_token) { return *(uint32_t*)(void*)(pend_token); } |
2178 | |
2179 | uint64_t |
2180 | task_requested_bitfield(task_t task) |
2181 | { |
2182 | uint64_t bits = 0; |
2183 | struct task_requested_policy requested = task->requested_policy; |
2184 | |
2185 | bits |= (requested.trp_int_darwinbg ? POLICY_REQ_INT_DARWIN_BG : 0); |
2186 | bits |= (requested.trp_ext_darwinbg ? POLICY_REQ_EXT_DARWIN_BG : 0); |
2187 | bits |= (requested.trp_int_iotier ? (((uint64_t)requested.trp_int_iotier) << POLICY_REQ_INT_IO_TIER_SHIFT) : 0); |
2188 | bits |= (requested.trp_ext_iotier ? (((uint64_t)requested.trp_ext_iotier) << POLICY_REQ_EXT_IO_TIER_SHIFT) : 0); |
2189 | bits |= (requested.trp_int_iopassive ? POLICY_REQ_INT_PASSIVE_IO : 0); |
2190 | bits |= (requested.trp_ext_iopassive ? POLICY_REQ_EXT_PASSIVE_IO : 0); |
2191 | bits |= (requested.trp_bg_iotier ? (((uint64_t)requested.trp_bg_iotier) << POLICY_REQ_BG_IOTIER_SHIFT) : 0); |
2192 | bits |= (requested.trp_terminated ? POLICY_REQ_TERMINATED : 0); |
2193 | |
2194 | bits |= (requested.trp_boosted ? POLICY_REQ_BOOSTED : 0); |
2195 | bits |= (requested.trp_tal_enabled ? POLICY_REQ_TAL_ENABLED : 0); |
2196 | bits |= (requested.trp_apptype ? (((uint64_t)requested.trp_apptype) << POLICY_REQ_APPTYPE_SHIFT) : 0); |
2197 | bits |= (requested.trp_role ? (((uint64_t)requested.trp_role) << POLICY_REQ_ROLE_SHIFT) : 0); |
2198 | |
2199 | bits |= (requested.trp_sup_active ? POLICY_REQ_SUP_ACTIVE : 0); |
2200 | bits |= (requested.trp_sup_lowpri_cpu ? POLICY_REQ_SUP_LOWPRI_CPU : 0); |
2201 | bits |= (requested.trp_sup_cpu ? POLICY_REQ_SUP_CPU : 0); |
2202 | bits |= (requested.trp_sup_timer ? (((uint64_t)requested.trp_sup_timer) << POLICY_REQ_SUP_TIMER_THROTTLE_SHIFT) : 0); |
2203 | bits |= (requested.trp_sup_throughput ? (((uint64_t)requested.trp_sup_throughput) << POLICY_REQ_SUP_THROUGHPUT_SHIFT) : 0); |
2204 | bits |= (requested.trp_sup_disk ? POLICY_REQ_SUP_DISK_THROTTLE : 0); |
2205 | bits |= (requested.trp_sup_bg_sockets ? POLICY_REQ_SUP_BG_SOCKETS : 0); |
2206 | |
2207 | bits |= (requested.trp_base_latency_qos ? (((uint64_t)requested.trp_base_latency_qos) << POLICY_REQ_BASE_LATENCY_QOS_SHIFT) : 0); |
2208 | bits |= (requested.trp_over_latency_qos ? (((uint64_t)requested.trp_over_latency_qos) << POLICY_REQ_OVER_LATENCY_QOS_SHIFT) : 0); |
2209 | bits |= (requested.trp_base_through_qos ? (((uint64_t)requested.trp_base_through_qos) << POLICY_REQ_BASE_THROUGH_QOS_SHIFT) : 0); |
2210 | bits |= (requested.trp_over_through_qos ? (((uint64_t)requested.trp_over_through_qos) << POLICY_REQ_OVER_THROUGH_QOS_SHIFT) : 0); |
2211 | bits |= (requested.trp_sfi_managed ? POLICY_REQ_SFI_MANAGED : 0); |
2212 | bits |= (requested.trp_qos_clamp ? (((uint64_t)requested.trp_qos_clamp) << POLICY_REQ_QOS_CLAMP_SHIFT) : 0); |
2213 | |
2214 | return bits; |
2215 | } |
2216 | |
2217 | uint64_t |
2218 | task_effective_bitfield(task_t task) |
2219 | { |
2220 | uint64_t bits = 0; |
2221 | struct task_effective_policy effective = task->effective_policy; |
2222 | |
2223 | bits |= (effective.tep_io_tier ? (((uint64_t)effective.tep_io_tier) << POLICY_EFF_IO_TIER_SHIFT) : 0); |
2224 | bits |= (effective.tep_io_passive ? POLICY_EFF_IO_PASSIVE : 0); |
2225 | bits |= (effective.tep_darwinbg ? POLICY_EFF_DARWIN_BG : 0); |
2226 | bits |= (effective.tep_lowpri_cpu ? POLICY_EFF_LOWPRI_CPU : 0); |
2227 | bits |= (effective.tep_terminated ? POLICY_EFF_TERMINATED : 0); |
2228 | bits |= (effective.tep_all_sockets_bg ? POLICY_EFF_ALL_SOCKETS_BG : 0); |
2229 | bits |= (effective.tep_new_sockets_bg ? POLICY_EFF_NEW_SOCKETS_BG : 0); |
2230 | bits |= (effective.tep_bg_iotier ? (((uint64_t)effective.tep_bg_iotier) << POLICY_EFF_BG_IOTIER_SHIFT) : 0); |
2231 | bits |= (effective.tep_qos_ui_is_urgent ? POLICY_EFF_QOS_UI_IS_URGENT : 0); |
2232 | |
2233 | bits |= (effective.tep_tal_engaged ? POLICY_EFF_TAL_ENGAGED : 0); |
2234 | bits |= (effective.tep_watchers_bg ? POLICY_EFF_WATCHERS_BG : 0); |
2235 | bits |= (effective.tep_sup_active ? POLICY_EFF_SUP_ACTIVE : 0); |
2236 | bits |= (effective.tep_suppressed_cpu ? POLICY_EFF_SUP_CPU : 0); |
2237 | bits |= (effective.tep_role ? (((uint64_t)effective.tep_role) << POLICY_EFF_ROLE_SHIFT) : 0); |
2238 | bits |= (effective.tep_latency_qos ? (((uint64_t)effective.tep_latency_qos) << POLICY_EFF_LATENCY_QOS_SHIFT) : 0); |
2239 | bits |= (effective.tep_through_qos ? (((uint64_t)effective.tep_through_qos) << POLICY_EFF_THROUGH_QOS_SHIFT) : 0); |
2240 | bits |= (effective.tep_sfi_managed ? POLICY_EFF_SFI_MANAGED : 0); |
2241 | bits |= (effective.tep_qos_ceiling ? (((uint64_t)effective.tep_qos_ceiling) << POLICY_EFF_QOS_CEILING_SHIFT) : 0); |
2242 | |
2243 | return bits; |
2244 | } |
2245 | |
2246 | |
2247 | /* |
2248 | * Resource usage and CPU related routines |
2249 | */ |
2250 | |
2251 | int |
2252 | proc_get_task_ruse_cpu(task_t task, uint32_t *policyp, uint8_t *percentagep, uint64_t *intervalp, uint64_t *deadlinep) |
2253 | { |
2254 | |
2255 | int error = 0; |
2256 | int scope; |
2257 | |
2258 | task_lock(task); |
2259 | |
2260 | |
2261 | error = task_get_cpuusage(task, percentagep, intervalp, deadlinep, &scope); |
2262 | task_unlock(task); |
2263 | |
2264 | /* |
2265 | * Reverse-map from CPU resource limit scopes back to policies (see comment below). |
2266 | */ |
2267 | if (scope == TASK_RUSECPU_FLAGS_PERTHR_LIMIT) { |
2268 | *policyp = TASK_POLICY_RESOURCE_ATTRIBUTE_NOTIFY_EXC; |
2269 | } else if (scope == TASK_RUSECPU_FLAGS_PROC_LIMIT) { |
2270 | *policyp = TASK_POLICY_RESOURCE_ATTRIBUTE_THROTTLE; |
2271 | } else if (scope == TASK_RUSECPU_FLAGS_DEADLINE) { |
2272 | *policyp = TASK_POLICY_RESOURCE_ATTRIBUTE_NONE; |
2273 | } |
2274 | |
2275 | return(error); |
2276 | } |
2277 | |
2278 | /* |
2279 | * Configure the default CPU usage monitor parameters. |
2280 | * |
2281 | * For tasks which have this mechanism activated: if any thread in the |
2282 | * process consumes more CPU than this, an EXC_RESOURCE exception will be generated. |
2283 | */ |
2284 | void |
2285 | proc_init_cpumon_params(void) |
2286 | { |
2287 | /* |
2288 | * The max CPU percentage can be configured via the boot-args and |
2289 | * a key in the device tree. The boot-args are honored first, then the |
2290 | * device tree. |
2291 | */ |
2292 | if (!PE_parse_boot_argn("max_cpumon_percentage" , &proc_max_cpumon_percentage, |
2293 | sizeof (proc_max_cpumon_percentage))) |
2294 | { |
2295 | uint64_t max_percentage = 0ULL; |
2296 | |
2297 | if (!PE_get_default("kern.max_cpumon_percentage" , &max_percentage, |
2298 | sizeof(max_percentage))) |
2299 | { |
2300 | max_percentage = DEFAULT_CPUMON_PERCENTAGE; |
2301 | } |
2302 | |
2303 | assert(max_percentage <= UINT8_MAX); |
2304 | proc_max_cpumon_percentage = (uint8_t) max_percentage; |
2305 | } |
2306 | |
2307 | if (proc_max_cpumon_percentage > 100) { |
2308 | proc_max_cpumon_percentage = 100; |
2309 | } |
2310 | |
2311 | /* |
2312 | * The interval should be specified in seconds. |
2313 | * |
2314 | * Like the max CPU percentage, the max CPU interval can be configured |
2315 | * via boot-args and the device tree. |
2316 | */ |
2317 | if (!PE_parse_boot_argn("max_cpumon_interval" , &proc_max_cpumon_interval, |
2318 | sizeof (proc_max_cpumon_interval))) |
2319 | { |
2320 | if (!PE_get_default("kern.max_cpumon_interval" , &proc_max_cpumon_interval, |
2321 | sizeof(proc_max_cpumon_interval))) |
2322 | { |
2323 | proc_max_cpumon_interval = DEFAULT_CPUMON_INTERVAL; |
2324 | } |
2325 | } |
2326 | |
2327 | proc_max_cpumon_interval *= NSEC_PER_SEC; |
2328 | |
2329 | /* TEMPORARY boot arg to control App suppression */ |
2330 | PE_parse_boot_argn("task_policy_suppression_flags" , |
2331 | &task_policy_suppression_flags, |
2332 | sizeof(task_policy_suppression_flags)); |
2333 | |
2334 | /* adjust suppression disk policy if called for in boot arg */ |
2335 | if (task_policy_suppression_flags & TASK_POLICY_SUPPRESSION_IOTIER2) { |
2336 | proc_suppressed_disk_tier = THROTTLE_LEVEL_TIER2; |
2337 | } |
2338 | } |
2339 | |
2340 | /* |
2341 | * Currently supported configurations for CPU limits. |
2342 | * |
2343 | * Policy | Deadline-based CPU limit | Percentage-based CPU limit |
2344 | * -------------------------------------+--------------------------+------------------------------ |
2345 | * PROC_POLICY_RSRCACT_THROTTLE | ENOTSUP | Task-wide scope only |
2346 | * PROC_POLICY_RSRCACT_SUSPEND | Task-wide scope only | ENOTSUP |
2347 | * PROC_POLICY_RSRCACT_TERMINATE | Task-wide scope only | ENOTSUP |
2348 | * PROC_POLICY_RSRCACT_NOTIFY_KQ | Task-wide scope only | ENOTSUP |
2349 | * PROC_POLICY_RSRCACT_NOTIFY_EXC | ENOTSUP | Per-thread scope only |
2350 | * |
2351 | * A deadline-based CPU limit is actually a simple wallclock timer - the requested action is performed |
2352 | * after the specified amount of wallclock time has elapsed. |
2353 | * |
2354 | * A percentage-based CPU limit performs the requested action after the specified amount of actual CPU time |
2355 | * has been consumed -- regardless of how much wallclock time has elapsed -- by either the task as an |
2356 | * aggregate entity (so-called "Task-wide" or "Proc-wide" scope, whereby the CPU time consumed by all threads |
2357 | * in the task are added together), or by any one thread in the task (so-called "per-thread" scope). |
2358 | * |
2359 | * We support either deadline != 0 OR percentage != 0, but not both. The original intention in having them |
2360 | * share an API was to use actual CPU time as the basis of the deadline-based limit (as in: perform an action |
2361 | * after I have used some amount of CPU time; this is different than the recurring percentage/interval model) |
2362 | * but the potential consumer of the API at the time was insisting on wallclock time instead. |
2363 | * |
2364 | * Currently, requesting notification via an exception is the only way to get per-thread scope for a |
2365 | * CPU limit. All other types of notifications force task-wide scope for the limit. |
2366 | */ |
2367 | int |
2368 | proc_set_task_ruse_cpu(task_t task, uint32_t policy, uint8_t percentage, uint64_t interval, uint64_t deadline, |
2369 | int cpumon_entitled) |
2370 | { |
2371 | int error = 0; |
2372 | int scope; |
2373 | |
2374 | /* |
2375 | * Enforce the matrix of supported configurations for policy, percentage, and deadline. |
2376 | */ |
2377 | switch (policy) { |
2378 | // If no policy is explicitly given, the default is to throttle. |
2379 | case TASK_POLICY_RESOURCE_ATTRIBUTE_NONE: |
2380 | case TASK_POLICY_RESOURCE_ATTRIBUTE_THROTTLE: |
2381 | if (deadline != 0) |
2382 | return (ENOTSUP); |
2383 | scope = TASK_RUSECPU_FLAGS_PROC_LIMIT; |
2384 | break; |
2385 | case TASK_POLICY_RESOURCE_ATTRIBUTE_SUSPEND: |
2386 | case TASK_POLICY_RESOURCE_ATTRIBUTE_TERMINATE: |
2387 | case TASK_POLICY_RESOURCE_ATTRIBUTE_NOTIFY_KQ: |
2388 | if (percentage != 0) |
2389 | return (ENOTSUP); |
2390 | scope = TASK_RUSECPU_FLAGS_DEADLINE; |
2391 | break; |
2392 | case TASK_POLICY_RESOURCE_ATTRIBUTE_NOTIFY_EXC: |
2393 | if (deadline != 0) |
2394 | return (ENOTSUP); |
2395 | scope = TASK_RUSECPU_FLAGS_PERTHR_LIMIT; |
2396 | #ifdef CONFIG_NOMONITORS |
2397 | return (error); |
2398 | #endif /* CONFIG_NOMONITORS */ |
2399 | break; |
2400 | default: |
2401 | return (EINVAL); |
2402 | } |
2403 | |
2404 | task_lock(task); |
2405 | if (task != current_task()) { |
2406 | task->policy_ru_cpu_ext = policy; |
2407 | } else { |
2408 | task->policy_ru_cpu = policy; |
2409 | } |
2410 | error = task_set_cpuusage(task, percentage, interval, deadline, scope, cpumon_entitled); |
2411 | task_unlock(task); |
2412 | return(error); |
2413 | } |
2414 | |
2415 | /* TODO: get rid of these */ |
2416 | #define TASK_POLICY_CPU_RESOURCE_USAGE 0 |
2417 | #define TASK_POLICY_WIREDMEM_RESOURCE_USAGE 1 |
2418 | #define TASK_POLICY_VIRTUALMEM_RESOURCE_USAGE 2 |
2419 | #define TASK_POLICY_DISK_RESOURCE_USAGE 3 |
2420 | #define TASK_POLICY_NETWORK_RESOURCE_USAGE 4 |
2421 | #define TASK_POLICY_POWER_RESOURCE_USAGE 5 |
2422 | |
2423 | #define TASK_POLICY_RESOURCE_USAGE_COUNT 6 |
2424 | |
2425 | int |
2426 | proc_clear_task_ruse_cpu(task_t task, int cpumon_entitled) |
2427 | { |
2428 | int error = 0; |
2429 | int action; |
2430 | void * bsdinfo = NULL; |
2431 | |
2432 | task_lock(task); |
2433 | if (task != current_task()) { |
2434 | task->policy_ru_cpu_ext = TASK_POLICY_RESOURCE_ATTRIBUTE_DEFAULT; |
2435 | } else { |
2436 | task->policy_ru_cpu = TASK_POLICY_RESOURCE_ATTRIBUTE_DEFAULT; |
2437 | } |
2438 | |
2439 | error = task_clear_cpuusage_locked(task, cpumon_entitled); |
2440 | if (error != 0) |
2441 | goto out; |
2442 | |
2443 | action = task->applied_ru_cpu; |
2444 | if (task->applied_ru_cpu_ext != TASK_POLICY_RESOURCE_ATTRIBUTE_NONE) { |
2445 | /* reset action */ |
2446 | task->applied_ru_cpu_ext = TASK_POLICY_RESOURCE_ATTRIBUTE_NONE; |
2447 | } |
2448 | if (action != TASK_POLICY_RESOURCE_ATTRIBUTE_NONE) { |
2449 | bsdinfo = task->bsd_info; |
2450 | task_unlock(task); |
2451 | proc_restore_resource_actions(bsdinfo, TASK_POLICY_CPU_RESOURCE_USAGE, action); |
2452 | goto out1; |
2453 | } |
2454 | |
2455 | out: |
2456 | task_unlock(task); |
2457 | out1: |
2458 | return(error); |
2459 | |
2460 | } |
2461 | |
2462 | /* used to apply resource limit related actions */ |
2463 | static int |
2464 | task_apply_resource_actions(task_t task, int type) |
2465 | { |
2466 | int action = TASK_POLICY_RESOURCE_ATTRIBUTE_NONE; |
2467 | void * bsdinfo = NULL; |
2468 | |
2469 | switch (type) { |
2470 | case TASK_POLICY_CPU_RESOURCE_USAGE: |
2471 | break; |
2472 | case TASK_POLICY_WIREDMEM_RESOURCE_USAGE: |
2473 | case TASK_POLICY_VIRTUALMEM_RESOURCE_USAGE: |
2474 | case TASK_POLICY_DISK_RESOURCE_USAGE: |
2475 | case TASK_POLICY_NETWORK_RESOURCE_USAGE: |
2476 | case TASK_POLICY_POWER_RESOURCE_USAGE: |
2477 | return(0); |
2478 | |
2479 | default: |
2480 | return(1); |
2481 | }; |
2482 | |
2483 | /* only cpu actions for now */ |
2484 | task_lock(task); |
2485 | |
2486 | if (task->applied_ru_cpu_ext == TASK_POLICY_RESOURCE_ATTRIBUTE_NONE) { |
2487 | /* apply action */ |
2488 | task->applied_ru_cpu_ext = task->policy_ru_cpu_ext; |
2489 | action = task->applied_ru_cpu_ext; |
2490 | } else { |
2491 | action = task->applied_ru_cpu_ext; |
2492 | } |
2493 | |
2494 | if (action != TASK_POLICY_RESOURCE_ATTRIBUTE_NONE) { |
2495 | bsdinfo = task->bsd_info; |
2496 | task_unlock(task); |
2497 | proc_apply_resource_actions(bsdinfo, TASK_POLICY_CPU_RESOURCE_USAGE, action); |
2498 | } else |
2499 | task_unlock(task); |
2500 | |
2501 | return(0); |
2502 | } |
2503 | |
2504 | /* |
2505 | * XXX This API is somewhat broken; we support multiple simultaneous CPU limits, but the get/set API |
2506 | * only allows for one at a time. This means that if there is a per-thread limit active, the other |
2507 | * "scopes" will not be accessible via this API. We could change it to pass in the scope of interest |
2508 | * to the caller, and prefer that, but there's no need for that at the moment. |
2509 | */ |
2510 | static int |
2511 | task_get_cpuusage(task_t task, uint8_t *percentagep, uint64_t *intervalp, uint64_t *deadlinep, int *scope) |
2512 | { |
2513 | *percentagep = 0; |
2514 | *intervalp = 0; |
2515 | *deadlinep = 0; |
2516 | |
2517 | if ((task->rusage_cpu_flags & TASK_RUSECPU_FLAGS_PERTHR_LIMIT) != 0) { |
2518 | *scope = TASK_RUSECPU_FLAGS_PERTHR_LIMIT; |
2519 | *percentagep = task->rusage_cpu_perthr_percentage; |
2520 | *intervalp = task->rusage_cpu_perthr_interval; |
2521 | } else if ((task->rusage_cpu_flags & TASK_RUSECPU_FLAGS_PROC_LIMIT) != 0) { |
2522 | *scope = TASK_RUSECPU_FLAGS_PROC_LIMIT; |
2523 | *percentagep = task->rusage_cpu_percentage; |
2524 | *intervalp = task->rusage_cpu_interval; |
2525 | } else if ((task->rusage_cpu_flags & TASK_RUSECPU_FLAGS_DEADLINE) != 0) { |
2526 | *scope = TASK_RUSECPU_FLAGS_DEADLINE; |
2527 | *deadlinep = task->rusage_cpu_deadline; |
2528 | } else { |
2529 | *scope = 0; |
2530 | } |
2531 | |
2532 | return(0); |
2533 | } |
2534 | |
2535 | /* |
2536 | * Suspend the CPU usage monitor for the task. Return value indicates |
2537 | * if the mechanism was actually enabled. |
2538 | */ |
2539 | int |
2540 | task_suspend_cpumon(task_t task) |
2541 | { |
2542 | thread_t thread; |
2543 | |
2544 | task_lock_assert_owned(task); |
2545 | |
2546 | if ((task->rusage_cpu_flags & TASK_RUSECPU_FLAGS_PERTHR_LIMIT) == 0) { |
2547 | return KERN_INVALID_ARGUMENT; |
2548 | } |
2549 | |
2550 | #if CONFIG_TELEMETRY |
2551 | /* |
2552 | * Disable task-wide telemetry if it was ever enabled by the CPU usage |
2553 | * monitor's warning zone. |
2554 | */ |
2555 | telemetry_task_ctl_locked(task, TF_CPUMON_WARNING, 0); |
2556 | #endif |
2557 | |
2558 | /* |
2559 | * Suspend monitoring for the task, and propagate that change to each thread. |
2560 | */ |
2561 | task->rusage_cpu_flags &= ~(TASK_RUSECPU_FLAGS_PERTHR_LIMIT | TASK_RUSECPU_FLAGS_FATAL_CPUMON); |
2562 | queue_iterate(&task->threads, thread, thread_t, task_threads) { |
2563 | act_set_astledger(thread); |
2564 | } |
2565 | |
2566 | return KERN_SUCCESS; |
2567 | } |
2568 | |
2569 | /* |
2570 | * Remove all traces of the CPU monitor. |
2571 | */ |
2572 | int |
2573 | task_disable_cpumon(task_t task) |
2574 | { |
2575 | int kret; |
2576 | |
2577 | task_lock_assert_owned(task); |
2578 | |
2579 | kret = task_suspend_cpumon(task); |
2580 | if (kret) return kret; |
2581 | |
2582 | /* Once we clear these values, the monitor can't be resumed */ |
2583 | task->rusage_cpu_perthr_percentage = 0; |
2584 | task->rusage_cpu_perthr_interval = 0; |
2585 | |
2586 | return (KERN_SUCCESS); |
2587 | } |
2588 | |
2589 | |
2590 | static int |
2591 | task_enable_cpumon_locked(task_t task) |
2592 | { |
2593 | thread_t thread; |
2594 | task_lock_assert_owned(task); |
2595 | |
2596 | if (task->rusage_cpu_perthr_percentage == 0 || |
2597 | task->rusage_cpu_perthr_interval == 0) { |
2598 | return KERN_INVALID_ARGUMENT; |
2599 | } |
2600 | |
2601 | task->rusage_cpu_flags |= TASK_RUSECPU_FLAGS_PERTHR_LIMIT; |
2602 | queue_iterate(&task->threads, thread, thread_t, task_threads) { |
2603 | act_set_astledger(thread); |
2604 | } |
2605 | |
2606 | return KERN_SUCCESS; |
2607 | } |
2608 | |
2609 | int |
2610 | task_resume_cpumon(task_t task) |
2611 | { |
2612 | kern_return_t kret; |
2613 | |
2614 | if (!task) { |
2615 | return EINVAL; |
2616 | } |
2617 | |
2618 | task_lock(task); |
2619 | kret = task_enable_cpumon_locked(task); |
2620 | task_unlock(task); |
2621 | |
2622 | return kret; |
2623 | } |
2624 | |
2625 | |
2626 | /* duplicate values from bsd/sys/process_policy.h */ |
2627 | #define PROC_POLICY_CPUMON_DISABLE 0xFF |
2628 | #define PROC_POLICY_CPUMON_DEFAULTS 0xFE |
2629 | |
2630 | static int |
2631 | task_set_cpuusage(task_t task, uint8_t percentage, uint64_t interval, uint64_t deadline, int scope, int cpumon_entitled) |
2632 | { |
2633 | uint64_t abstime = 0; |
2634 | uint64_t limittime = 0; |
2635 | |
2636 | lck_mtx_assert(&task->lock, LCK_MTX_ASSERT_OWNED); |
2637 | |
2638 | /* By default, refill once per second */ |
2639 | if (interval == 0) |
2640 | interval = NSEC_PER_SEC; |
2641 | |
2642 | if (percentage != 0) { |
2643 | if (scope == TASK_RUSECPU_FLAGS_PERTHR_LIMIT) { |
2644 | boolean_t warn = FALSE; |
2645 | |
2646 | /* |
2647 | * A per-thread CPU limit on a task generates an exception |
2648 | * (LEDGER_ACTION_EXCEPTION) if any one thread in the task |
2649 | * exceeds the limit. |
2650 | */ |
2651 | |
2652 | if (percentage == PROC_POLICY_CPUMON_DISABLE) { |
2653 | if (cpumon_entitled) { |
2654 | /* 25095698 - task_disable_cpumon() should be reliable */ |
2655 | task_disable_cpumon(task); |
2656 | return 0; |
2657 | } |
2658 | |
2659 | /* |
2660 | * This task wishes to disable the CPU usage monitor, but it's |
2661 | * missing the required entitlement: |
2662 | * com.apple.private.kernel.override-cpumon |
2663 | * |
2664 | * Instead, treat this as a request to reset its params |
2665 | * back to the defaults. |
2666 | */ |
2667 | warn = TRUE; |
2668 | percentage = PROC_POLICY_CPUMON_DEFAULTS; |
2669 | } |
2670 | |
2671 | if (percentage == PROC_POLICY_CPUMON_DEFAULTS) { |
2672 | percentage = proc_max_cpumon_percentage; |
2673 | interval = proc_max_cpumon_interval; |
2674 | } |
2675 | |
2676 | if (percentage > 100) { |
2677 | percentage = 100; |
2678 | } |
2679 | |
2680 | /* |
2681 | * Passing in an interval of -1 means either: |
2682 | * - Leave the interval as-is, if there's already a per-thread |
2683 | * limit configured |
2684 | * - Use the system default. |
2685 | */ |
2686 | if (interval == -1ULL) { |
2687 | if (task->rusage_cpu_flags & TASK_RUSECPU_FLAGS_PERTHR_LIMIT) { |
2688 | interval = task->rusage_cpu_perthr_interval; |
2689 | } else { |
2690 | interval = proc_max_cpumon_interval; |
2691 | } |
2692 | } |
2693 | |
2694 | /* |
2695 | * Enforce global caps on CPU usage monitor here if the process is not |
2696 | * entitled to escape the global caps. |
2697 | */ |
2698 | if ((percentage > proc_max_cpumon_percentage) && (cpumon_entitled == 0)) { |
2699 | warn = TRUE; |
2700 | percentage = proc_max_cpumon_percentage; |
2701 | } |
2702 | |
2703 | if ((interval > proc_max_cpumon_interval) && (cpumon_entitled == 0)) { |
2704 | warn = TRUE; |
2705 | interval = proc_max_cpumon_interval; |
2706 | } |
2707 | |
2708 | if (warn) { |
2709 | int pid = 0; |
2710 | const char *procname = "unknown" ; |
2711 | |
2712 | #ifdef MACH_BSD |
2713 | pid = proc_selfpid(); |
2714 | if (current_task()->bsd_info != NULL) { |
2715 | procname = proc_name_address(current_task()->bsd_info); |
2716 | } |
2717 | #endif |
2718 | |
2719 | printf("process %s[%d] denied attempt to escape CPU monitor" |
2720 | " (missing required entitlement).\n" , procname, pid); |
2721 | } |
2722 | |
2723 | /* configure the limit values */ |
2724 | task->rusage_cpu_perthr_percentage = percentage; |
2725 | task->rusage_cpu_perthr_interval = interval; |
2726 | |
2727 | /* and enable the CPU monitor */ |
2728 | (void)task_enable_cpumon_locked(task); |
2729 | } else if (scope == TASK_RUSECPU_FLAGS_PROC_LIMIT) { |
2730 | /* |
2731 | * Currently, a proc-wide CPU limit always blocks if the limit is |
2732 | * exceeded (LEDGER_ACTION_BLOCK). |
2733 | */ |
2734 | task->rusage_cpu_flags |= TASK_RUSECPU_FLAGS_PROC_LIMIT; |
2735 | task->rusage_cpu_percentage = percentage; |
2736 | task->rusage_cpu_interval = interval; |
2737 | |
2738 | limittime = (interval * percentage) / 100; |
2739 | nanoseconds_to_absolutetime(limittime, &abstime); |
2740 | |
2741 | ledger_set_limit(task->ledger, task_ledgers.cpu_time, abstime, 0); |
2742 | ledger_set_period(task->ledger, task_ledgers.cpu_time, interval); |
2743 | ledger_set_action(task->ledger, task_ledgers.cpu_time, LEDGER_ACTION_BLOCK); |
2744 | } |
2745 | } |
2746 | |
2747 | if (deadline != 0) { |
2748 | assert(scope == TASK_RUSECPU_FLAGS_DEADLINE); |
2749 | |
2750 | /* if already in use, cancel and wait for it to cleanout */ |
2751 | if (task->rusage_cpu_callt != NULL) { |
2752 | task_unlock(task); |
2753 | thread_call_cancel_wait(task->rusage_cpu_callt); |
2754 | task_lock(task); |
2755 | } |
2756 | if (task->rusage_cpu_callt == NULL) { |
2757 | task->rusage_cpu_callt = thread_call_allocate_with_priority(task_action_cpuusage, (thread_call_param_t)task, THREAD_CALL_PRIORITY_KERNEL); |
2758 | } |
2759 | /* setup callout */ |
2760 | if (task->rusage_cpu_callt != 0) { |
2761 | uint64_t save_abstime = 0; |
2762 | |
2763 | task->rusage_cpu_flags |= TASK_RUSECPU_FLAGS_DEADLINE; |
2764 | task->rusage_cpu_deadline = deadline; |
2765 | |
2766 | nanoseconds_to_absolutetime(deadline, &abstime); |
2767 | save_abstime = abstime; |
2768 | clock_absolutetime_interval_to_deadline(save_abstime, &abstime); |
2769 | thread_call_enter_delayed(task->rusage_cpu_callt, abstime); |
2770 | } |
2771 | } |
2772 | |
2773 | return(0); |
2774 | } |
2775 | |
2776 | int |
2777 | task_clear_cpuusage(task_t task, int cpumon_entitled) |
2778 | { |
2779 | int retval = 0; |
2780 | |
2781 | task_lock(task); |
2782 | retval = task_clear_cpuusage_locked(task, cpumon_entitled); |
2783 | task_unlock(task); |
2784 | |
2785 | return(retval); |
2786 | } |
2787 | |
2788 | static int |
2789 | task_clear_cpuusage_locked(task_t task, int cpumon_entitled) |
2790 | { |
2791 | thread_call_t savecallt; |
2792 | |
2793 | /* cancel percentage handling if set */ |
2794 | if (task->rusage_cpu_flags & TASK_RUSECPU_FLAGS_PROC_LIMIT) { |
2795 | task->rusage_cpu_flags &= ~TASK_RUSECPU_FLAGS_PROC_LIMIT; |
2796 | ledger_set_limit(task->ledger, task_ledgers.cpu_time, LEDGER_LIMIT_INFINITY, 0); |
2797 | task->rusage_cpu_percentage = 0; |
2798 | task->rusage_cpu_interval = 0; |
2799 | } |
2800 | |
2801 | /* |
2802 | * Disable the CPU usage monitor. |
2803 | */ |
2804 | if (cpumon_entitled) { |
2805 | task_disable_cpumon(task); |
2806 | } |
2807 | |
2808 | /* cancel deadline handling if set */ |
2809 | if (task->rusage_cpu_flags & TASK_RUSECPU_FLAGS_DEADLINE) { |
2810 | task->rusage_cpu_flags &= ~TASK_RUSECPU_FLAGS_DEADLINE; |
2811 | if (task->rusage_cpu_callt != 0) { |
2812 | savecallt = task->rusage_cpu_callt; |
2813 | task->rusage_cpu_callt = NULL; |
2814 | task->rusage_cpu_deadline = 0; |
2815 | task_unlock(task); |
2816 | thread_call_cancel_wait(savecallt); |
2817 | thread_call_free(savecallt); |
2818 | task_lock(task); |
2819 | } |
2820 | } |
2821 | return(0); |
2822 | } |
2823 | |
2824 | /* called by ledger unit to enforce action due to resource usage criteria being met */ |
2825 | static void |
2826 | task_action_cpuusage(thread_call_param_t param0, __unused thread_call_param_t param1) |
2827 | { |
2828 | task_t task = (task_t)param0; |
2829 | (void)task_apply_resource_actions(task, TASK_POLICY_CPU_RESOURCE_USAGE); |
2830 | return; |
2831 | } |
2832 | |
2833 | |
2834 | /* |
2835 | * Routines for taskwatch and pidbind |
2836 | */ |
2837 | |
2838 | #if CONFIG_EMBEDDED |
2839 | |
2840 | lck_mtx_t task_watch_mtx; |
2841 | |
2842 | void |
2843 | task_watch_init(void) |
2844 | { |
2845 | lck_mtx_init(&task_watch_mtx, &task_lck_grp, &task_lck_attr); |
2846 | } |
2847 | |
2848 | static void |
2849 | task_watch_lock(void) |
2850 | { |
2851 | lck_mtx_lock(&task_watch_mtx); |
2852 | } |
2853 | |
2854 | static void |
2855 | task_watch_unlock(void) |
2856 | { |
2857 | lck_mtx_unlock(&task_watch_mtx); |
2858 | } |
2859 | |
2860 | static void |
2861 | add_taskwatch_locked(task_t task, task_watch_t * twp) |
2862 | { |
2863 | queue_enter(&task->task_watchers, twp, task_watch_t *, tw_links); |
2864 | task->num_taskwatchers++; |
2865 | |
2866 | } |
2867 | |
2868 | static void |
2869 | remove_taskwatch_locked(task_t task, task_watch_t * twp) |
2870 | { |
2871 | queue_remove(&task->task_watchers, twp, task_watch_t *, tw_links); |
2872 | task->num_taskwatchers--; |
2873 | } |
2874 | |
2875 | |
2876 | int |
2877 | proc_lf_pidbind(task_t curtask, uint64_t tid, task_t target_task, int bind) |
2878 | { |
2879 | thread_t target_thread = NULL; |
2880 | int ret = 0, setbg = 0; |
2881 | task_watch_t *twp = NULL; |
2882 | task_t task = TASK_NULL; |
2883 | |
2884 | target_thread = task_findtid(curtask, tid); |
2885 | if (target_thread == NULL) |
2886 | return ESRCH; |
2887 | /* holds thread reference */ |
2888 | |
2889 | if (bind != 0) { |
2890 | /* task is still active ? */ |
2891 | task_lock(target_task); |
2892 | if (target_task->active == 0) { |
2893 | task_unlock(target_task); |
2894 | ret = ESRCH; |
2895 | goto out; |
2896 | } |
2897 | task_unlock(target_task); |
2898 | |
2899 | twp = (task_watch_t *)kalloc(sizeof(task_watch_t)); |
2900 | if (twp == NULL) { |
2901 | task_watch_unlock(); |
2902 | ret = ENOMEM; |
2903 | goto out; |
2904 | } |
2905 | |
2906 | bzero(twp, sizeof(task_watch_t)); |
2907 | |
2908 | task_watch_lock(); |
2909 | |
2910 | if (target_thread->taskwatch != NULL){ |
2911 | /* already bound to another task */ |
2912 | task_watch_unlock(); |
2913 | |
2914 | kfree(twp, sizeof(task_watch_t)); |
2915 | ret = EBUSY; |
2916 | goto out; |
2917 | } |
2918 | |
2919 | task_reference(target_task); |
2920 | |
2921 | setbg = proc_get_effective_task_policy(target_task, TASK_POLICY_WATCHERS_BG); |
2922 | |
2923 | twp->tw_task = target_task; /* holds the task reference */ |
2924 | twp->tw_thread = target_thread; /* holds the thread reference */ |
2925 | twp->tw_state = setbg; |
2926 | twp->tw_importance = target_thread->importance; |
2927 | |
2928 | add_taskwatch_locked(target_task, twp); |
2929 | |
2930 | target_thread->taskwatch = twp; |
2931 | |
2932 | task_watch_unlock(); |
2933 | |
2934 | if (setbg) |
2935 | set_thread_appbg(target_thread, setbg, INT_MIN); |
2936 | |
2937 | /* retain the thread reference as it is in twp */ |
2938 | target_thread = NULL; |
2939 | } else { |
2940 | /* unbind */ |
2941 | task_watch_lock(); |
2942 | if ((twp = target_thread->taskwatch) != NULL) { |
2943 | task = twp->tw_task; |
2944 | target_thread->taskwatch = NULL; |
2945 | remove_taskwatch_locked(task, twp); |
2946 | |
2947 | task_watch_unlock(); |
2948 | |
2949 | task_deallocate(task); /* drop task ref in twp */ |
2950 | set_thread_appbg(target_thread, 0, twp->tw_importance); |
2951 | thread_deallocate(target_thread); /* drop thread ref in twp */ |
2952 | kfree(twp, sizeof(task_watch_t)); |
2953 | } else { |
2954 | task_watch_unlock(); |
2955 | ret = 0; /* return success if it not alredy bound */ |
2956 | goto out; |
2957 | } |
2958 | } |
2959 | out: |
2960 | thread_deallocate(target_thread); /* drop thread ref acquired in this routine */ |
2961 | return(ret); |
2962 | } |
2963 | |
2964 | static void |
2965 | set_thread_appbg(thread_t thread, int setbg, __unused int importance) |
2966 | { |
2967 | int enable = (setbg ? TASK_POLICY_ENABLE : TASK_POLICY_DISABLE); |
2968 | |
2969 | proc_set_thread_policy(thread, TASK_POLICY_ATTRIBUTE, TASK_POLICY_PIDBIND_BG, enable); |
2970 | } |
2971 | |
2972 | static void |
2973 | apply_appstate_watchers(task_t task) |
2974 | { |
2975 | int numwatchers = 0, i, j, setbg; |
2976 | thread_watchlist_t * threadlist; |
2977 | task_watch_t * twp; |
2978 | |
2979 | retry: |
2980 | /* if no watchers on the list return */ |
2981 | if ((numwatchers = task->num_taskwatchers) == 0) |
2982 | return; |
2983 | |
2984 | threadlist = (thread_watchlist_t *)kalloc(numwatchers*sizeof(thread_watchlist_t)); |
2985 | if (threadlist == NULL) |
2986 | return; |
2987 | |
2988 | bzero(threadlist, numwatchers*sizeof(thread_watchlist_t)); |
2989 | |
2990 | task_watch_lock(); |
2991 | /*serialize application of app state changes */ |
2992 | |
2993 | if (task->watchapplying != 0) { |
2994 | lck_mtx_sleep(&task_watch_mtx, LCK_SLEEP_DEFAULT, &task->watchapplying, THREAD_UNINT); |
2995 | task_watch_unlock(); |
2996 | kfree(threadlist, numwatchers*sizeof(thread_watchlist_t)); |
2997 | goto retry; |
2998 | } |
2999 | |
3000 | if (numwatchers != task->num_taskwatchers) { |
3001 | task_watch_unlock(); |
3002 | kfree(threadlist, numwatchers*sizeof(thread_watchlist_t)); |
3003 | goto retry; |
3004 | } |
3005 | |
3006 | setbg = proc_get_effective_task_policy(task, TASK_POLICY_WATCHERS_BG); |
3007 | |
3008 | task->watchapplying = 1; |
3009 | i = 0; |
3010 | queue_iterate(&task->task_watchers, twp, task_watch_t *, tw_links) { |
3011 | |
3012 | threadlist[i].thread = twp->tw_thread; |
3013 | thread_reference(threadlist[i].thread); |
3014 | if (setbg != 0) { |
3015 | twp->tw_importance = twp->tw_thread->importance; |
3016 | threadlist[i].importance = INT_MIN; |
3017 | } else |
3018 | threadlist[i].importance = twp->tw_importance; |
3019 | i++; |
3020 | if (i > numwatchers) |
3021 | break; |
3022 | } |
3023 | |
3024 | task_watch_unlock(); |
3025 | |
3026 | for (j = 0; j< i; j++) { |
3027 | set_thread_appbg(threadlist[j].thread, setbg, threadlist[j].importance); |
3028 | thread_deallocate(threadlist[j].thread); |
3029 | } |
3030 | kfree(threadlist, numwatchers*sizeof(thread_watchlist_t)); |
3031 | |
3032 | |
3033 | task_watch_lock(); |
3034 | task->watchapplying = 0; |
3035 | thread_wakeup_one(&task->watchapplying); |
3036 | task_watch_unlock(); |
3037 | } |
3038 | |
3039 | void |
3040 | thead_remove_taskwatch(thread_t thread) |
3041 | { |
3042 | task_watch_t * twp; |
3043 | int importance = 0; |
3044 | |
3045 | task_watch_lock(); |
3046 | if ((twp = thread->taskwatch) != NULL) { |
3047 | thread->taskwatch = NULL; |
3048 | remove_taskwatch_locked(twp->tw_task, twp); |
3049 | } |
3050 | task_watch_unlock(); |
3051 | if (twp != NULL) { |
3052 | thread_deallocate(twp->tw_thread); |
3053 | task_deallocate(twp->tw_task); |
3054 | importance = twp->tw_importance; |
3055 | kfree(twp, sizeof(task_watch_t)); |
3056 | /* remove the thread and networkbg */ |
3057 | set_thread_appbg(thread, 0, importance); |
3058 | } |
3059 | } |
3060 | |
3061 | void |
3062 | task_removewatchers(task_t task) |
3063 | { |
3064 | int numwatchers = 0, i, j; |
3065 | task_watch_t ** twplist = NULL; |
3066 | task_watch_t * twp = NULL; |
3067 | |
3068 | retry: |
3069 | if ((numwatchers = task->num_taskwatchers) == 0) |
3070 | return; |
3071 | |
3072 | twplist = (task_watch_t **)kalloc(numwatchers*sizeof(task_watch_t *)); |
3073 | if (twplist == NULL) |
3074 | return; |
3075 | |
3076 | bzero(twplist, numwatchers*sizeof(task_watch_t *)); |
3077 | |
3078 | task_watch_lock(); |
3079 | if (task->num_taskwatchers == 0) { |
3080 | task_watch_unlock(); |
3081 | goto out; |
3082 | } |
3083 | |
3084 | if (numwatchers != task->num_taskwatchers) { |
3085 | task_watch_unlock(); |
3086 | kfree(twplist, numwatchers*sizeof(task_watch_t *)); |
3087 | numwatchers = 0; |
3088 | goto retry; |
3089 | } |
3090 | |
3091 | i = 0; |
3092 | while((twp = (task_watch_t *)dequeue_head(&task->task_watchers)) != NULL) |
3093 | { |
3094 | twplist[i] = twp; |
3095 | task->num_taskwatchers--; |
3096 | |
3097 | /* |
3098 | * Since the linkage is removed and thead state cleanup is already set up, |
3099 | * remove the refernce from the thread. |
3100 | */ |
3101 | twp->tw_thread->taskwatch = NULL; /* removed linkage, clear thread holding ref */ |
3102 | i++; |
3103 | if ((task->num_taskwatchers == 0) || (i > numwatchers)) |
3104 | break; |
3105 | } |
3106 | |
3107 | task_watch_unlock(); |
3108 | |
3109 | for (j = 0; j< i; j++) { |
3110 | |
3111 | twp = twplist[j]; |
3112 | /* remove thread and network bg */ |
3113 | set_thread_appbg(twp->tw_thread, 0, twp->tw_importance); |
3114 | thread_deallocate(twp->tw_thread); |
3115 | task_deallocate(twp->tw_task); |
3116 | kfree(twp, sizeof(task_watch_t)); |
3117 | } |
3118 | |
3119 | out: |
3120 | kfree(twplist, numwatchers*sizeof(task_watch_t *)); |
3121 | |
3122 | } |
3123 | #endif /* CONFIG_EMBEDDED */ |
3124 | |
3125 | /* |
3126 | * Routines for importance donation/inheritance/boosting |
3127 | */ |
3128 | |
3129 | static void |
3130 | task_importance_update_live_donor(task_t target_task) |
3131 | { |
3132 | #if IMPORTANCE_INHERITANCE |
3133 | |
3134 | ipc_importance_task_t task_imp; |
3135 | |
3136 | task_imp = ipc_importance_for_task(target_task, FALSE); |
3137 | if (IIT_NULL != task_imp) { |
3138 | ipc_importance_task_update_live_donor(task_imp); |
3139 | ipc_importance_task_release(task_imp); |
3140 | } |
3141 | #endif /* IMPORTANCE_INHERITANCE */ |
3142 | } |
3143 | |
3144 | void |
3145 | task_importance_mark_donor(task_t task, boolean_t donating) |
3146 | { |
3147 | #if IMPORTANCE_INHERITANCE |
3148 | ipc_importance_task_t task_imp; |
3149 | |
3150 | task_imp = ipc_importance_for_task(task, FALSE); |
3151 | if (IIT_NULL != task_imp) { |
3152 | ipc_importance_task_mark_donor(task_imp, donating); |
3153 | ipc_importance_task_release(task_imp); |
3154 | } |
3155 | #endif /* IMPORTANCE_INHERITANCE */ |
3156 | } |
3157 | |
3158 | void |
3159 | task_importance_mark_live_donor(task_t task, boolean_t live_donating) |
3160 | { |
3161 | #if IMPORTANCE_INHERITANCE |
3162 | ipc_importance_task_t task_imp; |
3163 | |
3164 | task_imp = ipc_importance_for_task(task, FALSE); |
3165 | if (IIT_NULL != task_imp) { |
3166 | ipc_importance_task_mark_live_donor(task_imp, live_donating); |
3167 | ipc_importance_task_release(task_imp); |
3168 | } |
3169 | #endif /* IMPORTANCE_INHERITANCE */ |
3170 | } |
3171 | |
3172 | void |
3173 | task_importance_mark_receiver(task_t task, boolean_t receiving) |
3174 | { |
3175 | #if IMPORTANCE_INHERITANCE |
3176 | ipc_importance_task_t task_imp; |
3177 | |
3178 | task_imp = ipc_importance_for_task(task, FALSE); |
3179 | if (IIT_NULL != task_imp) { |
3180 | ipc_importance_task_mark_receiver(task_imp, receiving); |
3181 | ipc_importance_task_release(task_imp); |
3182 | } |
3183 | #endif /* IMPORTANCE_INHERITANCE */ |
3184 | } |
3185 | |
3186 | void |
3187 | task_importance_mark_denap_receiver(task_t task, boolean_t denap) |
3188 | { |
3189 | #if IMPORTANCE_INHERITANCE |
3190 | ipc_importance_task_t task_imp; |
3191 | |
3192 | task_imp = ipc_importance_for_task(task, FALSE); |
3193 | if (IIT_NULL != task_imp) { |
3194 | ipc_importance_task_mark_denap_receiver(task_imp, denap); |
3195 | ipc_importance_task_release(task_imp); |
3196 | } |
3197 | #endif /* IMPORTANCE_INHERITANCE */ |
3198 | } |
3199 | |
3200 | void |
3201 | task_importance_reset(__imp_only task_t task) |
3202 | { |
3203 | #if IMPORTANCE_INHERITANCE |
3204 | ipc_importance_task_t task_imp; |
3205 | |
3206 | /* TODO: Lower importance downstream before disconnect */ |
3207 | task_imp = task->task_imp_base; |
3208 | ipc_importance_reset(task_imp, FALSE); |
3209 | task_importance_update_live_donor(task); |
3210 | #endif /* IMPORTANCE_INHERITANCE */ |
3211 | } |
3212 | |
3213 | void |
3214 | task_importance_init_from_parent(__imp_only task_t new_task, __imp_only task_t parent_task) |
3215 | { |
3216 | #if IMPORTANCE_INHERITANCE |
3217 | ipc_importance_task_t new_task_imp = IIT_NULL; |
3218 | |
3219 | new_task->task_imp_base = NULL; |
3220 | if (!parent_task) return; |
3221 | |
3222 | if (task_is_marked_importance_donor(parent_task)) { |
3223 | new_task_imp = ipc_importance_for_task(new_task, FALSE); |
3224 | assert(IIT_NULL != new_task_imp); |
3225 | ipc_importance_task_mark_donor(new_task_imp, TRUE); |
3226 | } |
3227 | if (task_is_marked_live_importance_donor(parent_task)) { |
3228 | if (IIT_NULL == new_task_imp) |
3229 | new_task_imp = ipc_importance_for_task(new_task, FALSE); |
3230 | assert(IIT_NULL != new_task_imp); |
3231 | ipc_importance_task_mark_live_donor(new_task_imp, TRUE); |
3232 | } |
3233 | /* Do not inherit 'receiver' on fork, vfexec or true spawn */ |
3234 | if (task_is_exec_copy(new_task) && |
3235 | task_is_marked_importance_receiver(parent_task)) { |
3236 | if (IIT_NULL == new_task_imp) |
3237 | new_task_imp = ipc_importance_for_task(new_task, FALSE); |
3238 | assert(IIT_NULL != new_task_imp); |
3239 | ipc_importance_task_mark_receiver(new_task_imp, TRUE); |
3240 | } |
3241 | if (task_is_marked_importance_denap_receiver(parent_task)) { |
3242 | if (IIT_NULL == new_task_imp) |
3243 | new_task_imp = ipc_importance_for_task(new_task, FALSE); |
3244 | assert(IIT_NULL != new_task_imp); |
3245 | ipc_importance_task_mark_denap_receiver(new_task_imp, TRUE); |
3246 | } |
3247 | if (IIT_NULL != new_task_imp) { |
3248 | assert(new_task->task_imp_base == new_task_imp); |
3249 | ipc_importance_task_release(new_task_imp); |
3250 | } |
3251 | #endif /* IMPORTANCE_INHERITANCE */ |
3252 | } |
3253 | |
3254 | #if IMPORTANCE_INHERITANCE |
3255 | /* |
3256 | * Sets the task boost bit to the provided value. Does NOT run the update function. |
3257 | * |
3258 | * Task lock must be held. |
3259 | */ |
3260 | static void |
3261 | task_set_boost_locked(task_t task, boolean_t boost_active) |
3262 | { |
3263 | #if IMPORTANCE_TRACE |
3264 | KERNEL_DEBUG_CONSTANT_IST(KDEBUG_TRACE, (IMPORTANCE_CODE(IMP_BOOST, (boost_active ? IMP_BOOSTED : IMP_UNBOOSTED)) | DBG_FUNC_START), |
3265 | proc_selfpid(), task_pid(task), trequested_0(task), trequested_1(task), 0); |
3266 | #endif /* IMPORTANCE_TRACE */ |
3267 | |
3268 | task->requested_policy.trp_boosted = boost_active; |
3269 | |
3270 | #if IMPORTANCE_TRACE |
3271 | if (boost_active == TRUE){ |
3272 | DTRACE_BOOST2(boost, task_t, task, int, task_pid(task)); |
3273 | } else { |
3274 | DTRACE_BOOST2(unboost, task_t, task, int, task_pid(task)); |
3275 | } |
3276 | KERNEL_DEBUG_CONSTANT_IST(KDEBUG_TRACE, (IMPORTANCE_CODE(IMP_BOOST, (boost_active ? IMP_BOOSTED : IMP_UNBOOSTED)) | DBG_FUNC_END), |
3277 | proc_selfpid(), task_pid(task), |
3278 | trequested_0(task), trequested_1(task), 0); |
3279 | #endif /* IMPORTANCE_TRACE */ |
3280 | } |
3281 | |
3282 | /* |
3283 | * Sets the task boost bit to the provided value and applies the update. |
3284 | * |
3285 | * Task lock must be held. Must call update complete after unlocking the task. |
3286 | */ |
3287 | void |
3288 | task_update_boost_locked(task_t task, boolean_t boost_active, task_pend_token_t pend_token) |
3289 | { |
3290 | task_set_boost_locked(task, boost_active); |
3291 | |
3292 | task_policy_update_locked(task, pend_token); |
3293 | } |
3294 | |
3295 | /* |
3296 | * Check if this task should donate importance. |
3297 | * |
3298 | * May be called without taking the task lock. In that case, donor status can change |
3299 | * so you must check only once for each donation event. |
3300 | */ |
3301 | boolean_t |
3302 | task_is_importance_donor(task_t task) |
3303 | { |
3304 | if (task->task_imp_base == IIT_NULL) |
3305 | return FALSE; |
3306 | return ipc_importance_task_is_donor(task->task_imp_base); |
3307 | } |
3308 | |
3309 | /* |
3310 | * Query the status of the task's donor mark. |
3311 | */ |
3312 | boolean_t |
3313 | task_is_marked_importance_donor(task_t task) |
3314 | { |
3315 | if (task->task_imp_base == IIT_NULL) |
3316 | return FALSE; |
3317 | return ipc_importance_task_is_marked_donor(task->task_imp_base); |
3318 | } |
3319 | |
3320 | /* |
3321 | * Query the status of the task's live donor and donor mark. |
3322 | */ |
3323 | boolean_t |
3324 | task_is_marked_live_importance_donor(task_t task) |
3325 | { |
3326 | if (task->task_imp_base == IIT_NULL) |
3327 | return FALSE; |
3328 | return ipc_importance_task_is_marked_live_donor(task->task_imp_base); |
3329 | } |
3330 | |
3331 | |
3332 | /* |
3333 | * This routine may be called without holding task lock |
3334 | * since the value of imp_receiver can never be unset. |
3335 | */ |
3336 | boolean_t |
3337 | task_is_importance_receiver(task_t task) |
3338 | { |
3339 | if (task->task_imp_base == IIT_NULL) |
3340 | return FALSE; |
3341 | return ipc_importance_task_is_marked_receiver(task->task_imp_base); |
3342 | } |
3343 | |
3344 | /* |
3345 | * Query the task's receiver mark. |
3346 | */ |
3347 | boolean_t |
3348 | task_is_marked_importance_receiver(task_t task) |
3349 | { |
3350 | if (task->task_imp_base == IIT_NULL) |
3351 | return FALSE; |
3352 | return ipc_importance_task_is_marked_receiver(task->task_imp_base); |
3353 | } |
3354 | |
3355 | /* |
3356 | * This routine may be called without holding task lock |
3357 | * since the value of de-nap receiver can never be unset. |
3358 | */ |
3359 | boolean_t |
3360 | task_is_importance_denap_receiver(task_t task) |
3361 | { |
3362 | if (task->task_imp_base == IIT_NULL) |
3363 | return FALSE; |
3364 | return ipc_importance_task_is_denap_receiver(task->task_imp_base); |
3365 | } |
3366 | |
3367 | /* |
3368 | * Query the task's de-nap receiver mark. |
3369 | */ |
3370 | boolean_t |
3371 | task_is_marked_importance_denap_receiver(task_t task) |
3372 | { |
3373 | if (task->task_imp_base == IIT_NULL) |
3374 | return FALSE; |
3375 | return ipc_importance_task_is_marked_denap_receiver(task->task_imp_base); |
3376 | } |
3377 | |
3378 | /* |
3379 | * This routine may be called without holding task lock |
3380 | * since the value of imp_receiver can never be unset. |
3381 | */ |
3382 | boolean_t |
3383 | task_is_importance_receiver_type(task_t task) |
3384 | { |
3385 | if (task->task_imp_base == IIT_NULL) |
3386 | return FALSE; |
3387 | return (task_is_importance_receiver(task) || |
3388 | task_is_importance_denap_receiver(task)); |
3389 | } |
3390 | |
3391 | /* |
3392 | * External importance assertions are managed by the process in userspace |
3393 | * Internal importance assertions are the responsibility of the kernel |
3394 | * Assertions are changed from internal to external via task_importance_externalize_assertion |
3395 | */ |
3396 | |
3397 | int |
3398 | task_importance_hold_internal_assertion(task_t target_task, uint32_t count) |
3399 | { |
3400 | ipc_importance_task_t task_imp; |
3401 | kern_return_t ret; |
3402 | |
3403 | /* may be first time, so allow for possible importance setup */ |
3404 | task_imp = ipc_importance_for_task(target_task, FALSE); |
3405 | if (IIT_NULL == task_imp) { |
3406 | return EOVERFLOW; |
3407 | } |
3408 | ret = ipc_importance_task_hold_internal_assertion(task_imp, count); |
3409 | ipc_importance_task_release(task_imp); |
3410 | |
3411 | return (KERN_SUCCESS != ret) ? ENOTSUP : 0; |
3412 | } |
3413 | |
3414 | int |
3415 | task_importance_hold_file_lock_assertion(task_t target_task, uint32_t count) |
3416 | { |
3417 | ipc_importance_task_t task_imp; |
3418 | kern_return_t ret; |
3419 | |
3420 | /* may be first time, so allow for possible importance setup */ |
3421 | task_imp = ipc_importance_for_task(target_task, FALSE); |
3422 | if (IIT_NULL == task_imp) { |
3423 | return EOVERFLOW; |
3424 | } |
3425 | ret = ipc_importance_task_hold_file_lock_assertion(task_imp, count); |
3426 | ipc_importance_task_release(task_imp); |
3427 | |
3428 | return (KERN_SUCCESS != ret) ? ENOTSUP : 0; |
3429 | } |
3430 | |
3431 | int |
3432 | task_importance_hold_legacy_external_assertion(task_t target_task, uint32_t count) |
3433 | { |
3434 | ipc_importance_task_t task_imp; |
3435 | kern_return_t ret; |
3436 | |
3437 | /* must already have set up an importance */ |
3438 | task_imp = target_task->task_imp_base; |
3439 | if (IIT_NULL == task_imp) { |
3440 | return EOVERFLOW; |
3441 | } |
3442 | ret = ipc_importance_task_hold_legacy_external_assertion(task_imp, count); |
3443 | return (KERN_SUCCESS != ret) ? ENOTSUP : 0; |
3444 | } |
3445 | |
3446 | int |
3447 | task_importance_drop_file_lock_assertion(task_t target_task, uint32_t count) |
3448 | { |
3449 | ipc_importance_task_t task_imp; |
3450 | kern_return_t ret; |
3451 | |
3452 | /* must already have set up an importance */ |
3453 | task_imp = target_task->task_imp_base; |
3454 | if (IIT_NULL == task_imp) { |
3455 | return EOVERFLOW; |
3456 | } |
3457 | ret = ipc_importance_task_drop_file_lock_assertion(target_task->task_imp_base, count); |
3458 | return (KERN_SUCCESS != ret) ? EOVERFLOW : 0; |
3459 | } |
3460 | |
3461 | int |
3462 | task_importance_drop_legacy_external_assertion(task_t target_task, uint32_t count) |
3463 | { |
3464 | ipc_importance_task_t task_imp; |
3465 | kern_return_t ret; |
3466 | |
3467 | /* must already have set up an importance */ |
3468 | task_imp = target_task->task_imp_base; |
3469 | if (IIT_NULL == task_imp) { |
3470 | return EOVERFLOW; |
3471 | } |
3472 | ret = ipc_importance_task_drop_legacy_external_assertion(task_imp, count); |
3473 | return (KERN_SUCCESS != ret) ? EOVERFLOW : 0; |
3474 | } |
3475 | |
3476 | static void |
3477 | task_add_importance_watchport(task_t task, mach_port_t port, int *boostp) |
3478 | { |
3479 | int boost = 0; |
3480 | |
3481 | __imptrace_only int released_pid = 0; |
3482 | __imptrace_only int pid = task_pid(task); |
3483 | |
3484 | ipc_importance_task_t release_imp_task = IIT_NULL; |
3485 | |
3486 | if (IP_VALID(port) != 0) { |
3487 | ipc_importance_task_t new_imp_task = ipc_importance_for_task(task, FALSE); |
3488 | |
3489 | ip_lock(port); |
3490 | |
3491 | /* |
3492 | * The port must have been marked tempowner already. |
3493 | * This also filters out ports whose receive rights |
3494 | * are already enqueued in a message, as you can't |
3495 | * change the right's destination once it's already |
3496 | * on its way. |
3497 | */ |
3498 | if (port->ip_tempowner != 0) { |
3499 | assert(port->ip_impdonation != 0); |
3500 | |
3501 | boost = port->ip_impcount; |
3502 | if (IIT_NULL != port->ip_imp_task) { |
3503 | /* |
3504 | * if this port is already bound to a task, |
3505 | * release the task reference and drop any |
3506 | * watchport-forwarded boosts |
3507 | */ |
3508 | release_imp_task = port->ip_imp_task; |
3509 | port->ip_imp_task = IIT_NULL; |
3510 | } |
3511 | |
3512 | /* mark the port is watching another task (reference held in port->ip_imp_task) */ |
3513 | if (ipc_importance_task_is_marked_receiver(new_imp_task)) { |
3514 | port->ip_imp_task = new_imp_task; |
3515 | new_imp_task = IIT_NULL; |
3516 | } |
3517 | } |
3518 | ip_unlock(port); |
3519 | |
3520 | if (IIT_NULL != new_imp_task) { |
3521 | ipc_importance_task_release(new_imp_task); |
3522 | } |
3523 | |
3524 | if (IIT_NULL != release_imp_task) { |
3525 | if (boost > 0) |
3526 | ipc_importance_task_drop_internal_assertion(release_imp_task, boost); |
3527 | |
3528 | // released_pid = task_pid(release_imp_task); /* TODO: Need ref-safe way to get pid */ |
3529 | ipc_importance_task_release(release_imp_task); |
3530 | } |
3531 | #if IMPORTANCE_TRACE |
3532 | KERNEL_DEBUG_CONSTANT_IST(KDEBUG_TRACE, (IMPORTANCE_CODE(IMP_WATCHPORT, 0)) | DBG_FUNC_NONE, |
3533 | proc_selfpid(), pid, boost, released_pid, 0); |
3534 | #endif /* IMPORTANCE_TRACE */ |
3535 | } |
3536 | |
3537 | *boostp = boost; |
3538 | return; |
3539 | } |
3540 | |
3541 | #endif /* IMPORTANCE_INHERITANCE */ |
3542 | |
3543 | /* |
3544 | * Routines for VM to query task importance |
3545 | */ |
3546 | |
3547 | |
3548 | /* |
3549 | * Order to be considered while estimating importance |
3550 | * for low memory notification and purging purgeable memory. |
3551 | */ |
3552 | #define TASK_IMPORTANCE_FOREGROUND 4 |
3553 | #define TASK_IMPORTANCE_NOTDARWINBG 1 |
3554 | |
3555 | |
3556 | /* |
3557 | * (Un)Mark the task as a privileged listener for memory notifications. |
3558 | * if marked, this task will be among the first to be notified amongst |
3559 | * the bulk of all other tasks when the system enters a pressure level |
3560 | * of interest to this task. |
3561 | */ |
3562 | int |
3563 | task_low_mem_privileged_listener(task_t task, boolean_t new_value, boolean_t *old_value) |
3564 | { |
3565 | if (old_value != NULL) { |
3566 | *old_value = (boolean_t)task->low_mem_privileged_listener; |
3567 | } else { |
3568 | task_lock(task); |
3569 | task->low_mem_privileged_listener = (uint32_t)new_value; |
3570 | task_unlock(task); |
3571 | } |
3572 | |
3573 | return 0; |
3574 | } |
3575 | |
3576 | /* |
3577 | * Checks if the task is already notified. |
3578 | * |
3579 | * Condition: task lock should be held while calling this function. |
3580 | */ |
3581 | boolean_t |
3582 | task_has_been_notified(task_t task, int pressurelevel) |
3583 | { |
3584 | if (task == NULL) { |
3585 | return FALSE; |
3586 | } |
3587 | |
3588 | if (pressurelevel == kVMPressureWarning) |
3589 | return (task->low_mem_notified_warn ? TRUE : FALSE); |
3590 | else if (pressurelevel == kVMPressureCritical) |
3591 | return (task->low_mem_notified_critical ? TRUE : FALSE); |
3592 | else |
3593 | return TRUE; |
3594 | } |
3595 | |
3596 | |
3597 | /* |
3598 | * Checks if the task is used for purging. |
3599 | * |
3600 | * Condition: task lock should be held while calling this function. |
3601 | */ |
3602 | boolean_t |
3603 | task_used_for_purging(task_t task, int pressurelevel) |
3604 | { |
3605 | if (task == NULL) { |
3606 | return FALSE; |
3607 | } |
3608 | |
3609 | if (pressurelevel == kVMPressureWarning) |
3610 | return (task->purged_memory_warn ? TRUE : FALSE); |
3611 | else if (pressurelevel == kVMPressureCritical) |
3612 | return (task->purged_memory_critical ? TRUE : FALSE); |
3613 | else |
3614 | return TRUE; |
3615 | } |
3616 | |
3617 | |
3618 | /* |
3619 | * Mark the task as notified with memory notification. |
3620 | * |
3621 | * Condition: task lock should be held while calling this function. |
3622 | */ |
3623 | void |
3624 | task_mark_has_been_notified(task_t task, int pressurelevel) |
3625 | { |
3626 | if (task == NULL) { |
3627 | return; |
3628 | } |
3629 | |
3630 | if (pressurelevel == kVMPressureWarning) |
3631 | task->low_mem_notified_warn = 1; |
3632 | else if (pressurelevel == kVMPressureCritical) |
3633 | task->low_mem_notified_critical = 1; |
3634 | } |
3635 | |
3636 | |
3637 | /* |
3638 | * Mark the task as purged. |
3639 | * |
3640 | * Condition: task lock should be held while calling this function. |
3641 | */ |
3642 | void |
3643 | task_mark_used_for_purging(task_t task, int pressurelevel) |
3644 | { |
3645 | if (task == NULL) { |
3646 | return; |
3647 | } |
3648 | |
3649 | if (pressurelevel == kVMPressureWarning) |
3650 | task->purged_memory_warn = 1; |
3651 | else if (pressurelevel == kVMPressureCritical) |
3652 | task->purged_memory_critical = 1; |
3653 | } |
3654 | |
3655 | |
3656 | /* |
3657 | * Mark the task eligible for low memory notification. |
3658 | * |
3659 | * Condition: task lock should be held while calling this function. |
3660 | */ |
3661 | void |
3662 | task_clear_has_been_notified(task_t task, int pressurelevel) |
3663 | { |
3664 | if (task == NULL) { |
3665 | return; |
3666 | } |
3667 | |
3668 | if (pressurelevel == kVMPressureWarning) |
3669 | task->low_mem_notified_warn = 0; |
3670 | else if (pressurelevel == kVMPressureCritical) |
3671 | task->low_mem_notified_critical = 0; |
3672 | } |
3673 | |
3674 | |
3675 | /* |
3676 | * Mark the task eligible for purging its purgeable memory. |
3677 | * |
3678 | * Condition: task lock should be held while calling this function. |
3679 | */ |
3680 | void |
3681 | task_clear_used_for_purging(task_t task) |
3682 | { |
3683 | if (task == NULL) { |
3684 | return; |
3685 | } |
3686 | |
3687 | task->purged_memory_warn = 0; |
3688 | task->purged_memory_critical = 0; |
3689 | } |
3690 | |
3691 | |
3692 | /* |
3693 | * Estimate task importance for purging its purgeable memory |
3694 | * and low memory notification. |
3695 | * |
3696 | * Importance is calculated in the following order of criteria: |
3697 | * -Task role : Background vs Foreground |
3698 | * -Boost status: Not boosted vs Boosted |
3699 | * -Darwin BG status. |
3700 | * |
3701 | * Returns: Estimated task importance. Less important task will have lower |
3702 | * estimated importance. |
3703 | */ |
3704 | int |
3705 | task_importance_estimate(task_t task) |
3706 | { |
3707 | int task_importance = 0; |
3708 | |
3709 | if (task == NULL) { |
3710 | return 0; |
3711 | } |
3712 | |
3713 | if (proc_get_effective_task_policy(task, TASK_POLICY_ROLE) == TASK_FOREGROUND_APPLICATION) |
3714 | task_importance += TASK_IMPORTANCE_FOREGROUND; |
3715 | |
3716 | if (proc_get_effective_task_policy(task, TASK_POLICY_DARWIN_BG) == 0) |
3717 | task_importance += TASK_IMPORTANCE_NOTDARWINBG; |
3718 | |
3719 | return task_importance; |
3720 | } |
3721 | |
3722 | boolean_t |
3723 | task_has_assertions(task_t task) |
3724 | { |
3725 | return (task->task_imp_base->iit_assertcnt? TRUE : FALSE); |
3726 | } |
3727 | |
3728 | |
3729 | kern_return_t |
3730 | send_resource_violation(typeof(send_cpu_usage_violation) sendfunc, |
3731 | task_t violator, |
3732 | struct ledger_entry_info *linfo, |
3733 | resource_notify_flags_t flags) |
3734 | { |
3735 | #ifndef MACH_BSD |
3736 | return KERN_NOT_SUPPORTED; |
3737 | #else |
3738 | kern_return_t kr = KERN_SUCCESS; |
3739 | proc_t proc = NULL; |
3740 | posix_path_t proc_path = "" ; |
3741 | proc_name_t procname = "<unknown>" ; |
3742 | int pid = -1; |
3743 | clock_sec_t secs; |
3744 | clock_nsec_t nsecs; |
3745 | mach_timespec_t timestamp; |
3746 | thread_t curthread = current_thread(); |
3747 | ipc_port_t dstport = MACH_PORT_NULL; |
3748 | |
3749 | if (!violator) { |
3750 | kr = KERN_INVALID_ARGUMENT; goto finish; |
3751 | } |
3752 | |
3753 | /* extract violator information */ |
3754 | task_lock(violator); |
3755 | if (!(proc = get_bsdtask_info(violator))) { |
3756 | task_unlock(violator); |
3757 | kr = KERN_INVALID_ARGUMENT; goto finish; |
3758 | } |
3759 | (void)mig_strncpy(procname, proc_best_name(proc), sizeof(procname)); |
3760 | pid = task_pid(violator); |
3761 | if (flags & kRNFatalLimitFlag) { |
3762 | kr = proc_pidpathinfo_internal(proc, 0, proc_path, |
3763 | sizeof(proc_path), NULL); |
3764 | } |
3765 | task_unlock(violator); |
3766 | if (kr) goto finish; |
3767 | |
3768 | /* violation time ~ now */ |
3769 | clock_get_calendar_nanotime(&secs, &nsecs); |
3770 | timestamp.tv_sec = (int32_t)secs; |
3771 | timestamp.tv_nsec = (int32_t)nsecs; |
3772 | /* 25567702 tracks widening mach_timespec_t */ |
3773 | |
3774 | /* send message */ |
3775 | kr = host_get_special_port(host_priv_self(), HOST_LOCAL_NODE, |
3776 | HOST_RESOURCE_NOTIFY_PORT, &dstport); |
3777 | if (kr) goto finish; |
3778 | |
3779 | thread_set_honor_qlimit(curthread); |
3780 | kr = sendfunc(dstport, |
3781 | procname, pid, proc_path, timestamp, |
3782 | linfo->lei_balance, linfo->lei_last_refill, |
3783 | linfo->lei_limit, linfo->lei_refill_period, |
3784 | flags); |
3785 | thread_clear_honor_qlimit(curthread); |
3786 | |
3787 | ipc_port_release_send(dstport); |
3788 | |
3789 | finish: |
3790 | return kr; |
3791 | #endif /* MACH_BSD */ |
3792 | } |
3793 | |
3794 | |
3795 | /* |
3796 | * Resource violations trace four 64-bit integers. For K32, two additional |
3797 | * codes are allocated, the first with the low nibble doubled. So if the K64 |
3798 | * code is 0x042, the K32 codes would be 0x044 and 0x45. |
3799 | */ |
3800 | #ifdef __LP64__ |
3801 | void |
3802 | trace_resource_violation(uint16_t code, |
3803 | struct ledger_entry_info *linfo) |
3804 | { |
3805 | KERNEL_DBG_IST_SANE(KDBG_CODE(DBG_MACH, DBG_MACH_RESOURCE, code), |
3806 | linfo->lei_balance, linfo->lei_last_refill, |
3807 | linfo->lei_limit, linfo->lei_refill_period); |
3808 | } |
3809 | #else /* K32 */ |
3810 | /* TODO: create/find a trace_two_LLs() for K32 systems */ |
3811 | #define MASK32 0xffffffff |
3812 | void |
3813 | trace_resource_violation(uint16_t code, |
3814 | struct ledger_entry_info *linfo) |
3815 | { |
3816 | int8_t lownibble = (code & 0x3) * 2; |
3817 | int16_t codeA = (code & 0xffc) | lownibble; |
3818 | int16_t codeB = codeA + 1; |
3819 | |
3820 | int32_t balance_high = (linfo->lei_balance >> 32) & MASK32; |
3821 | int32_t balance_low = linfo->lei_balance & MASK32; |
3822 | int32_t last_refill_high = (linfo->lei_last_refill >> 32) & MASK32; |
3823 | int32_t last_refill_low = linfo->lei_last_refill & MASK32; |
3824 | |
3825 | int32_t limit_high = (linfo->lei_limit >> 32) & MASK32; |
3826 | int32_t limit_low = linfo->lei_limit & MASK32; |
3827 | int32_t refill_period_high = (linfo->lei_refill_period >> 32) & MASK32; |
3828 | int32_t refill_period_low = linfo->lei_refill_period & MASK32; |
3829 | |
3830 | KERNEL_DBG_IST_SANE(KDBG_CODE(DBG_MACH, DBG_MACH_RESOURCE, codeA), |
3831 | balance_high, balance_low, |
3832 | last_refill_high, last_refill_low); |
3833 | KERNEL_DBG_IST_SANE(KDBG_CODE(DBG_MACH, DBG_MACH_RESOURCE, codeB), |
3834 | limit_high, limit_low, |
3835 | refill_period_high, refill_period_low); |
3836 | } |
3837 | #endif /* K64/K32 */ |
3838 | |