1 | /* |
2 | * Copyright (c) 2006-2019 Apple Inc. All rights reserved. |
3 | * |
4 | * @APPLE_OSREFERENCE_LICENSE_HEADER_START@ |
5 | * |
6 | * This file contains Original Code and/or Modifications of Original Code |
7 | * as defined in and that are subject to the Apple Public Source License |
8 | * Version 2.0 (the 'License'). You may not use this file except in |
9 | * compliance with the License. The rights granted to you under the License |
10 | * may not be used to create, or enable the creation or redistribution of, |
11 | * unlawful or unlicensed copies of an Apple operating system, or to |
12 | * circumvent, violate, or enable the circumvention or violation of, any |
13 | * terms of an Apple operating system software license agreement. |
14 | * |
15 | * Please obtain a copy of the License at |
16 | * http://www.opensource.apple.com/apsl/ and read it before using this file. |
17 | * |
18 | * The Original Code and all software distributed under the License are |
19 | * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER |
20 | * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES, |
21 | * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY, |
22 | * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT. |
23 | * Please see the License for the specific language governing rights and |
24 | * limitations under the License. |
25 | * |
26 | * @APPLE_OSREFERENCE_LICENSE_HEADER_END@ |
27 | * |
28 | */ |
29 | |
30 | #include <kern/sched_prim.h> |
31 | #include <kern/kalloc.h> |
32 | #include <kern/assert.h> |
33 | #include <kern/debug.h> |
34 | #include <kern/locks.h> |
35 | #include <kern/task.h> |
36 | #include <kern/thread.h> |
37 | #include <kern/host.h> |
38 | #include <kern/policy_internal.h> |
39 | #include <kern/thread_group.h> |
40 | |
41 | #include <corpses/task_corpse.h> |
42 | #include <libkern/libkern.h> |
43 | #include <mach/mach_time.h> |
44 | #include <mach/task.h> |
45 | #include <mach/host_priv.h> |
46 | #include <mach/mach_host.h> |
47 | #include <pexpert/pexpert.h> |
48 | #include <sys/coalition.h> |
49 | #include <sys/code_signing.h> |
50 | #include <sys/kern_event.h> |
51 | #include <sys/proc.h> |
52 | #include <sys/proc_info.h> |
53 | #include <sys/reason.h> |
54 | #include <sys/signal.h> |
55 | #include <sys/signalvar.h> |
56 | #include <sys/sysctl.h> |
57 | #include <sys/sysproto.h> |
58 | #include <sys/spawn_internal.h> |
59 | #include <sys/wait.h> |
60 | #include <sys/tree.h> |
61 | #include <sys/priv.h> |
62 | #include <vm/pmap.h> |
63 | #include <vm/vm_reclaim_internal.h> |
64 | #include <vm/vm_pageout.h> |
65 | #include <vm/vm_protos.h> |
66 | #include <mach/machine/sdt.h> |
67 | #include <libkern/section_keywords.h> |
68 | #include <stdatomic.h> |
69 | #include <os/atomic_private.h> |
70 | |
71 | #include <IOKit/IOBSD.h> |
72 | |
73 | #if CONFIG_MACF |
74 | #include <security/mac_framework.h> |
75 | #endif |
76 | |
77 | #if CONFIG_FREEZE |
78 | #include <vm/vm_map.h> |
79 | #endif /* CONFIG_FREEZE */ |
80 | |
81 | #include <kern/kern_memorystatus_internal.h> |
82 | #include <sys/kern_memorystatus.h> |
83 | #include <sys/kern_memorystatus_freeze.h> |
84 | #include <sys/kern_memorystatus_notify.h> |
85 | #include <sys/kdebug_triage.h> |
86 | |
87 | |
88 | extern uint32_t vm_compressor_pool_size(void); |
89 | extern uint32_t vm_compressor_fragmentation_level(void); |
90 | extern uint32_t vm_compression_ratio(void); |
91 | |
92 | pid_t memorystatus_freeze_last_pid_thawed = 0; |
93 | uint64_t memorystatus_freeze_last_pid_thawed_ts = 0; |
94 | |
95 | int block_corpses = 0; /* counter to block new corpses if jetsam purges them */ |
96 | |
97 | /* For logging clarity */ |
98 | static const char *memorystatus_kill_cause_name[] = { |
99 | "" , /* kMemorystatusInvalid */ |
100 | "jettisoned" , /* kMemorystatusKilled */ |
101 | "highwater" , /* kMemorystatusKilledHiwat */ |
102 | "vnode-limit" , /* kMemorystatusKilledVnodes */ |
103 | "vm-pageshortage" , /* kMemorystatusKilledVMPageShortage */ |
104 | "proc-thrashing" , /* kMemorystatusKilledProcThrashing */ |
105 | "fc-thrashing" , /* kMemorystatusKilledFCThrashing */ |
106 | "per-process-limit" , /* kMemorystatusKilledPerProcessLimit */ |
107 | "disk-space-shortage" , /* kMemorystatusKilledDiskSpaceShortage */ |
108 | "idle-exit" , /* kMemorystatusKilledIdleExit */ |
109 | "zone-map-exhaustion" , /* kMemorystatusKilledZoneMapExhaustion */ |
110 | "vm-compressor-thrashing" , /* kMemorystatusKilledVMCompressorThrashing */ |
111 | "vm-compressor-space-shortage" , /* kMemorystatusKilledVMCompressorSpaceShortage */ |
112 | "low-swap" , /* kMemorystatusKilledLowSwap */ |
113 | "sustained-memory-pressure" , /* kMemorystatusKilledSustainedPressure */ |
114 | "vm-pageout-starvation" , /* kMemorystatusKilledVMPageoutStarvation */ |
115 | }; |
116 | |
117 | static const char * |
118 | memorystatus_priority_band_name(int32_t priority) |
119 | { |
120 | switch (priority) { |
121 | case JETSAM_PRIORITY_FOREGROUND: |
122 | return "FOREGROUND" ; |
123 | case JETSAM_PRIORITY_AUDIO_AND_ACCESSORY: |
124 | return "AUDIO_AND_ACCESSORY" ; |
125 | case JETSAM_PRIORITY_CONDUCTOR: |
126 | return "CONDUCTOR" ; |
127 | case JETSAM_PRIORITY_DRIVER_APPLE: |
128 | return "DRIVER_APPLE" ; |
129 | case JETSAM_PRIORITY_HOME: |
130 | return "HOME" ; |
131 | case JETSAM_PRIORITY_EXECUTIVE: |
132 | return "EXECUTIVE" ; |
133 | case JETSAM_PRIORITY_IMPORTANT: |
134 | return "IMPORTANT" ; |
135 | case JETSAM_PRIORITY_CRITICAL: |
136 | return "CRITICAL" ; |
137 | } |
138 | |
139 | return "?" ; |
140 | } |
141 | |
142 | bool |
143 | is_reason_thrashing(unsigned cause) |
144 | { |
145 | switch (cause) { |
146 | case kMemorystatusKilledFCThrashing: |
147 | case kMemorystatusKilledVMCompressorThrashing: |
148 | case kMemorystatusKilledVMCompressorSpaceShortage: |
149 | return true; |
150 | default: |
151 | return false; |
152 | } |
153 | } |
154 | |
155 | bool |
156 | is_reason_zone_map_exhaustion(unsigned cause) |
157 | { |
158 | return cause == kMemorystatusKilledZoneMapExhaustion; |
159 | } |
160 | |
161 | /* |
162 | * Returns the current zone map size and capacity to include in the jetsam snapshot. |
163 | * Defined in zalloc.c |
164 | */ |
165 | extern void get_zone_map_size(uint64_t *current_size, uint64_t *capacity); |
166 | |
167 | /* |
168 | * Returns the name of the largest zone and its size to include in the jetsam snapshot. |
169 | * Defined in zalloc.c |
170 | */ |
171 | extern void get_largest_zone_info(char *zone_name, size_t zone_name_len, uint64_t *zone_size); |
172 | |
173 | /* |
174 | * Active / Inactive limit support |
175 | * proc list must be locked |
176 | * |
177 | * The SET_*** macros are used to initialize a limit |
178 | * for the first time. |
179 | * |
180 | * The CACHE_*** macros are use to cache the limit that will |
181 | * soon be in effect down in the ledgers. |
182 | */ |
183 | |
184 | #define SET_ACTIVE_LIMITS_LOCKED(p, limit, is_fatal) \ |
185 | MACRO_BEGIN \ |
186 | (p)->p_memstat_memlimit_active = (limit); \ |
187 | if (is_fatal) { \ |
188 | (p)->p_memstat_state |= P_MEMSTAT_MEMLIMIT_ACTIVE_FATAL; \ |
189 | } else { \ |
190 | (p)->p_memstat_state &= ~P_MEMSTAT_MEMLIMIT_ACTIVE_FATAL; \ |
191 | } \ |
192 | MACRO_END |
193 | |
194 | #define SET_INACTIVE_LIMITS_LOCKED(p, limit, is_fatal) \ |
195 | MACRO_BEGIN \ |
196 | (p)->p_memstat_memlimit_inactive = (limit); \ |
197 | if (is_fatal) { \ |
198 | (p)->p_memstat_state |= P_MEMSTAT_MEMLIMIT_INACTIVE_FATAL; \ |
199 | } else { \ |
200 | (p)->p_memstat_state &= ~P_MEMSTAT_MEMLIMIT_INACTIVE_FATAL; \ |
201 | } \ |
202 | MACRO_END |
203 | |
204 | #define CACHE_ACTIVE_LIMITS_LOCKED(p, is_fatal) \ |
205 | MACRO_BEGIN \ |
206 | (p)->p_memstat_memlimit = (p)->p_memstat_memlimit_active; \ |
207 | if ((p)->p_memstat_state & P_MEMSTAT_MEMLIMIT_ACTIVE_FATAL) { \ |
208 | (p)->p_memstat_state |= P_MEMSTAT_FATAL_MEMLIMIT; \ |
209 | is_fatal = TRUE; \ |
210 | } else { \ |
211 | (p)->p_memstat_state &= ~P_MEMSTAT_FATAL_MEMLIMIT; \ |
212 | is_fatal = FALSE; \ |
213 | } \ |
214 | MACRO_END |
215 | |
216 | #define CACHE_INACTIVE_LIMITS_LOCKED(p, is_fatal) \ |
217 | MACRO_BEGIN \ |
218 | (p)->p_memstat_memlimit = (p)->p_memstat_memlimit_inactive; \ |
219 | if ((p)->p_memstat_state & P_MEMSTAT_MEMLIMIT_INACTIVE_FATAL) { \ |
220 | (p)->p_memstat_state |= P_MEMSTAT_FATAL_MEMLIMIT; \ |
221 | is_fatal = TRUE; \ |
222 | } else { \ |
223 | (p)->p_memstat_state &= ~P_MEMSTAT_FATAL_MEMLIMIT; \ |
224 | is_fatal = FALSE; \ |
225 | } \ |
226 | MACRO_END |
227 | |
228 | |
229 | #pragma mark General Tunables |
230 | |
231 | #define MEMORYSTATUS_SMALL_MEMORY_THRESHOLD (3UL * (1UL << 30)) |
232 | #define MEMORYSTATUS_MEDIUM_MEMORY_THRESHOLD (6UL * (1UL << 30)) |
233 | |
234 | #define MEMORYSTATUS_MORE_FREE_OFFSET_PERCENTAGE 5UL |
235 | #define MEMORYSTATUS_AGGR_SYSPROC_AGING_PERCENTAGE 7UL |
236 | #define MEMORYSTATUS_DELTA_PERCENTAGE_LARGE 4UL |
237 | #define MEMORYSTATUS_DELTA_PERCENTAGE_SMALL 5UL |
238 | |
239 | /* |
240 | * Fall back to these percentages/ratios if a mb value is not provided via EDT |
241 | * DRAM (GB) | critical | idle | pressure | freeze |
242 | * (0,3] | 5% | 10% | 15% | 50% |
243 | * (3,6] | 4% | 9% | 15% | 50% |
244 | * (6,∞) | 4% | 8% | 12% | 50% |
245 | */ |
246 | |
247 | #define MEMORYSTATUS_CRITICAL_BASE_PERCENTAGE_SMALL 5UL |
248 | #define MEMORYSTATUS_CRITICAL_BASE_PERCENTAGE_LARGE 4UL |
249 | |
250 | #define MEMORYSTATUS_CRITICAL_IDLE_RATIO_NUM 2UL |
251 | #define MEMORYSTATUS_CRITICAL_IDLE_RATIO_DENOM 1UL |
252 | #define MEMORYSTATUS_PRESSURE_RATIO_NUM 3UL |
253 | #define MEMORYSTATUS_PRESSURE_RATIO_DENOM 1UL |
254 | |
255 | /* |
256 | * For historical reasons, devices with "medium"-sized memory configs have a critical:idle:pressure ratio of |
257 | * 4:9:15. This ratio is preserved for these devices when a fixed-mb base value has not been provided by EDT/boot-arg; |
258 | * all other devices use a 1:2:3 ratio. |
259 | */ |
260 | #define MEMORYSTATUS_CRITICAL_IDLE_RATIO_NUM_MEDIUM 9UL |
261 | #define MEMORYSTATUS_CRITICAL_IDLE_RATIO_DENOM_MEDIUM 4UL |
262 | #define MEMORYSTATUS_PRESSURE_RATIO_NUM_MEDIUM 15UL |
263 | #define MEMORYSTATUS_PRESSURE_RATIO_DENOM_MEDIUM 4UL |
264 | |
265 | #if CONFIG_JETSAM |
266 | static int32_t memorystatus_get_default_task_active_limit(proc_t p); |
267 | #endif /* CONFIG_JETSAM */ |
268 | |
269 | /* |
270 | * default jetsam snapshot support |
271 | */ |
272 | memorystatus_jetsam_snapshot_t *memorystatus_jetsam_snapshot; |
273 | |
274 | #if CONFIG_FREEZE |
275 | memorystatus_jetsam_snapshot_t *memorystatus_jetsam_snapshot_freezer; |
276 | /* |
277 | * The size of the freezer snapshot is given by memorystatus_jetsam_snapshot_max / JETSAM_SNAPSHOT_FREEZER_MAX_FACTOR |
278 | * The freezer snapshot can be much smaller than the default snapshot |
279 | * because it only includes apps that have been killed and dasd consumes it every 30 minutes. |
280 | * Since the snapshots are always wired we don't want to overallocate too much. |
281 | */ |
282 | #define JETSAM_SNAPSHOT_FREEZER_MAX_FACTOR 20 |
283 | unsigned int memorystatus_jetsam_snapshot_freezer_max; |
284 | unsigned int memorystatus_jetsam_snapshot_freezer_size; |
285 | TUNABLE(bool, memorystatus_jetsam_use_freezer_snapshot, "kern.jetsam_user_freezer_snapshot" , true); |
286 | |
287 | #define MEMORYSTATUS_FREEZE_THRESHOLD_PERCENTAGE 50UL |
288 | TUNABLE_DT(uint32_t, memorystatus_freeze_threshold_mb, "/defaults" , "kern.memstat_freeze_mb" , |
289 | "memorystatus_freeze_threshold_mb" , 0, TUNABLE_DT_NONE); |
290 | #endif /* CONFIG_FREEZE */ |
291 | |
292 | unsigned int memorystatus_jetsam_snapshot_count = 0; |
293 | unsigned int memorystatus_jetsam_snapshot_max = 0; |
294 | unsigned int memorystatus_jetsam_snapshot_size = 0; |
295 | uint64_t memorystatus_jetsam_snapshot_last_timestamp = 0; |
296 | uint64_t memorystatus_jetsam_snapshot_timeout = 0; |
297 | |
298 | #if DEVELOPMENT || DEBUG |
299 | /* |
300 | * On development and debug kernels, we allow one pid to take ownership |
301 | * of some memorystatus data structures for testing purposes (via memorystatus_control). |
302 | * If there's an owner, then only they may consume the jetsam snapshot & set freezer probabilities. |
303 | * This is used when testing these interface to avoid racing with other |
304 | * processes on the system that typically use them (namely OSAnalytics & dasd). |
305 | */ |
306 | static pid_t memorystatus_testing_pid = 0; |
307 | SYSCTL_INT(_kern, OID_AUTO, memorystatus_testing_pid, CTLTYPE_INT | CTLFLAG_RD | CTLFLAG_LOCKED, &memorystatus_testing_pid, 0, "" ); |
308 | #endif /* DEVELOPMENT || DEBUG */ |
309 | static void memorystatus_init_jetsam_snapshot_header(memorystatus_jetsam_snapshot_t *snapshot); |
310 | |
311 | /* General memorystatus stuff */ |
312 | |
313 | uint64_t memorystatus_sysprocs_idle_delay_time = 0; |
314 | uint64_t memorystatus_apps_idle_delay_time = 0; |
315 | /* 2GB devices support an entitlement for a higher app memory limit of "almost 2GB". */ |
316 | static int32_t = 1800; |
317 | |
318 | /* Some devices give entitled apps a higher memory limit */ |
319 | TUNABLE_DT_WRITEABLE(int32_t, , "/defaults" , "kern.entitled_max_task_pmem" , "entitled_max_task_pmem" , 0, TUNABLE_DT_NONE); |
320 | |
321 | #if __arm64__ |
322 | #if DEVELOPMENT || DEBUG |
323 | SYSCTL_INT(_kern, OID_AUTO, ios13extended_footprint_limit_mb, CTLTYPE_INT | CTLFLAG_RD | CTLFLAG_LOCKED, &memorystatus_ios13extended_footprint_limit_mb, 0, "" ); |
324 | SYSCTL_INT(_kern, OID_AUTO, entitled_max_task_pmem, CTLTYPE_INT | CTLFLAG_RW | CTLFLAG_LOCKED | CTLFLAG_KERN, &memorystatus_entitled_max_task_footprint_mb, 0, "" ); |
325 | #else /* !(DEVELOPMENT || DEBUG) */ |
326 | SYSCTL_INT(_kern, OID_AUTO, entitled_max_task_pmem, CTLTYPE_INT | CTLFLAG_RD | CTLFLAG_LOCKED | CTLFLAG_MASKED | CTLFLAG_KERN, &memorystatus_entitled_max_task_footprint_mb, 0, "" ); |
327 | #endif /* DEVELOPMENT || DEBUG */ |
328 | #endif /* __arm64__ */ |
329 | |
330 | #pragma mark Logging |
331 | |
332 | os_log_t memorystatus_log_handle; |
333 | |
334 | TUNABLE_WRITEABLE(memorystatus_log_level_t, memorystatus_log_level, "memorystatus_log_level" , MEMORYSTATUS_LOG_LEVEL_DEFAULT); |
335 | |
336 | #if DEBUG || DEVELOPMENT |
337 | SYSCTL_UINT(_kern, OID_AUTO, memorystatus_log_level, CTLFLAG_RW | CTLFLAG_LOCKED, &memorystatus_log_level, MEMORYSTATUS_LOG_LEVEL_DEFAULT, "" ); |
338 | #endif |
339 | |
340 | static LCK_GRP_DECLARE(memorystatus_jetsam_fg_band_lock_grp, |
341 | "memorystatus_jetsam_fg_band" ); |
342 | LCK_MTX_DECLARE(memorystatus_jetsam_fg_band_lock, |
343 | &memorystatus_jetsam_fg_band_lock_grp); |
344 | |
345 | /* Idle guard handling */ |
346 | |
347 | static int32_t memorystatus_scheduled_idle_demotions_sysprocs = 0; |
348 | static int32_t memorystatus_scheduled_idle_demotions_apps = 0; |
349 | |
350 | static void memorystatus_perform_idle_demotion(__unused void *spare1, __unused void *spare2); |
351 | static void memorystatus_schedule_idle_demotion_locked(proc_t p, boolean_t set_state); |
352 | static void memorystatus_reschedule_idle_demotion_locked(void); |
353 | int memorystatus_update_priority_for_appnap(proc_t p, boolean_t is_appnap); |
354 | vm_pressure_level_t convert_internal_pressure_level_to_dispatch_level(vm_pressure_level_t); |
355 | boolean_t is_knote_registered_modify_task_pressure_bits(struct knote*, int, task_t, vm_pressure_level_t, vm_pressure_level_t); |
356 | void memorystatus_klist_reset_all_for_level(vm_pressure_level_t pressure_level_to_clear); |
357 | void memorystatus_send_low_swap_note(void); |
358 | boolean_t memorystatus_kill_elevated_process(uint32_t cause, os_reason_t jetsam_reason, unsigned int band, int aggr_count, |
359 | uint32_t *errors, uint64_t *memory_reclaimed); |
360 | uint64_t memorystatus_available_memory_internal(proc_t p); |
361 | void memorystatus_thread_wake(void); |
362 | |
363 | unsigned int memorystatus_level = 0; |
364 | static int memorystatus_list_count = 0; |
365 | memstat_bucket_t memstat_bucket[MEMSTAT_BUCKET_COUNT]; |
366 | static thread_call_t memorystatus_idle_demotion_call; |
367 | uint64_t memstat_idle_demotion_deadline = 0; |
368 | |
369 | #ifdef XNU_TARGET_OS_OSX |
370 | /* |
371 | * Effectively disable the system process and application demotion |
372 | * logic on macOS. This means system processes and apps won't get the |
373 | * 10 second protection before landing in the IDLE band after moving |
374 | * out of their active band. Reasons:- |
375 | * - daemons + extensions + apps on macOS don't behave the way they |
376 | * do on iOS and so they are confusing the demotion logic. For example, |
377 | * not all apps go from FG to IDLE. Some sit in higher bands instead. This |
378 | * is causing multiple asserts to fire internally. |
379 | * - we use the aging bands to protect processes from jetsam. But on macOS, |
380 | * we have a very limited jetsam that is only invoked under extreme conditions |
381 | * where we have no more swap / compressor space OR are under critical pressure. |
382 | */ |
383 | int system_procs_aging_band = 0; |
384 | int applications_aging_band = 0; |
385 | #else /* XNU_TARGET_OS_OSX */ |
386 | int system_procs_aging_band = JETSAM_PRIORITY_AGING_BAND1; |
387 | int applications_aging_band = JETSAM_PRIORITY_AGING_BAND2; |
388 | #endif /* XNU_TARGET_OS_OSX */ |
389 | |
390 | _Atomic bool memorystatus_zone_map_is_exhausted = false; |
391 | _Atomic bool memorystatus_compressor_space_shortage = false; |
392 | _Atomic bool memorystatus_pageout_starved = false; |
393 | #if CONFIG_PHANTOM_CACHE |
394 | _Atomic bool memorystatus_phantom_cache_pressure = false; |
395 | #endif /* CONFIG_PHANTOM_CACHE */ |
396 | |
397 | #define isProcessInAgingBands(p) ((isSysProc(p) && system_procs_aging_band && (p->p_memstat_effectivepriority == system_procs_aging_band)) || (isApp(p) && applications_aging_band && (p->p_memstat_effectivepriority == applications_aging_band))) |
398 | |
399 | /* |
400 | * For a while we had support for a couple of different aging policies in the kernel, |
401 | * but the sysproc aging policy is now the default on all platforms. |
402 | * This flag was exported as RO via sysctl & is only kept for backwards compatability. |
403 | */ |
404 | unsigned int jetsam_aging_policy = kJetsamAgingPolicySysProcsReclaimedFirst; |
405 | bool memorystatus_should_issue_fg_band_notify = true; |
406 | |
407 | extern uint64_t vm_purgeable_purge_task_owned(task_t task); |
408 | extern void coalition_mark_swappable(coalition_t coal); |
409 | extern bool coalition_is_swappable(coalition_t coal); |
410 | boolean_t memorystatus_allowed_vm_map_fork(task_t, bool *); |
411 | #if DEVELOPMENT || DEBUG |
412 | void memorystatus_abort_vm_map_fork(task_t); |
413 | #endif |
414 | |
415 | /* |
416 | * Idle delay timeout factors for daemons based on relaunch behavior. Only used in |
417 | * kJetsamAgingPolicySysProcsReclaimedFirst aging policy. |
418 | */ |
419 | #define kJetsamSysProcsIdleDelayTimeLowRatio (5) |
420 | #define kJetsamSysProcsIdleDelayTimeMedRatio (2) |
421 | #define kJetsamSysProcsIdleDelayTimeHighRatio (1) |
422 | static_assert(kJetsamSysProcsIdleDelayTimeLowRatio <= DEFERRED_IDLE_EXIT_TIME_SECS, "sysproc idle delay time for low relaunch daemons would be 0" ); |
423 | |
424 | /* |
425 | * For the kJetsamAgingPolicySysProcsReclaimedFirst aging policy, treat apps as well |
426 | * behaved daemons for aging purposes. |
427 | */ |
428 | #define kJetsamAppsIdleDelayTimeRatio (kJetsamSysProcsIdleDelayTimeLowRatio) |
429 | |
430 | static uint64_t |
431 | memorystatus_sysprocs_idle_time(proc_t p) |
432 | { |
433 | uint64_t idle_delay_time = 0; |
434 | /* |
435 | * For system processes, base the idle delay time on the |
436 | * jetsam relaunch behavior specified by launchd. The idea |
437 | * is to provide extra protection to the daemons which would |
438 | * relaunch immediately after jetsam. |
439 | */ |
440 | switch (p->p_memstat_relaunch_flags) { |
441 | case P_MEMSTAT_RELAUNCH_UNKNOWN: |
442 | case P_MEMSTAT_RELAUNCH_LOW: |
443 | idle_delay_time = memorystatus_sysprocs_idle_delay_time / kJetsamSysProcsIdleDelayTimeLowRatio; |
444 | break; |
445 | case P_MEMSTAT_RELAUNCH_MED: |
446 | idle_delay_time = memorystatus_sysprocs_idle_delay_time / kJetsamSysProcsIdleDelayTimeMedRatio; |
447 | break; |
448 | case P_MEMSTAT_RELAUNCH_HIGH: |
449 | idle_delay_time = memorystatus_sysprocs_idle_delay_time / kJetsamSysProcsIdleDelayTimeHighRatio; |
450 | break; |
451 | default: |
452 | panic("Unknown relaunch flags on process!" ); |
453 | break; |
454 | } |
455 | return idle_delay_time; |
456 | } |
457 | |
458 | static uint64_t |
459 | memorystatus_apps_idle_time(__unused proc_t p) |
460 | { |
461 | return memorystatus_apps_idle_delay_time / kJetsamAppsIdleDelayTimeRatio; |
462 | } |
463 | |
464 | |
465 | static int |
466 | sysctl_jetsam_set_sysprocs_idle_delay_time SYSCTL_HANDLER_ARGS |
467 | { |
468 | #pragma unused(oidp, arg1, arg2) |
469 | |
470 | int error = 0, val = 0, old_time_in_secs = 0; |
471 | uint64_t old_time_in_ns = 0; |
472 | |
473 | absolutetime_to_nanoseconds(abstime: memorystatus_sysprocs_idle_delay_time, result: &old_time_in_ns); |
474 | old_time_in_secs = (int) (old_time_in_ns / NSEC_PER_SEC); |
475 | |
476 | error = sysctl_io_number(req, bigValue: old_time_in_secs, valueSize: sizeof(int), pValue: &val, NULL); |
477 | if (error || !req->newptr) { |
478 | return error; |
479 | } |
480 | |
481 | if ((val < 0) || (val > INT32_MAX)) { |
482 | memorystatus_log_error("jetsam: new idle delay interval has invalid value.\n" ); |
483 | return EINVAL; |
484 | } |
485 | |
486 | nanoseconds_to_absolutetime(nanoseconds: (uint64_t)val * NSEC_PER_SEC, result: &memorystatus_sysprocs_idle_delay_time); |
487 | |
488 | return 0; |
489 | } |
490 | |
491 | SYSCTL_PROC(_kern, OID_AUTO, memorystatus_sysprocs_idle_delay_time, CTLTYPE_INT | CTLFLAG_RW, |
492 | 0, 0, sysctl_jetsam_set_sysprocs_idle_delay_time, "I" , "Aging window for system processes" ); |
493 | |
494 | |
495 | static int |
496 | sysctl_jetsam_set_apps_idle_delay_time SYSCTL_HANDLER_ARGS |
497 | { |
498 | #pragma unused(oidp, arg1, arg2) |
499 | |
500 | int error = 0, val = 0, old_time_in_secs = 0; |
501 | uint64_t old_time_in_ns = 0; |
502 | |
503 | absolutetime_to_nanoseconds(abstime: memorystatus_apps_idle_delay_time, result: &old_time_in_ns); |
504 | old_time_in_secs = (int) (old_time_in_ns / NSEC_PER_SEC); |
505 | |
506 | error = sysctl_io_number(req, bigValue: old_time_in_secs, valueSize: sizeof(int), pValue: &val, NULL); |
507 | if (error || !req->newptr) { |
508 | return error; |
509 | } |
510 | |
511 | if ((val < 0) || (val > INT32_MAX)) { |
512 | memorystatus_log_error("jetsam: new idle delay interval has invalid value.\n" ); |
513 | return EINVAL; |
514 | } |
515 | |
516 | nanoseconds_to_absolutetime(nanoseconds: (uint64_t)val * NSEC_PER_SEC, result: &memorystatus_apps_idle_delay_time); |
517 | |
518 | return 0; |
519 | } |
520 | |
521 | SYSCTL_PROC(_kern, OID_AUTO, memorystatus_apps_idle_delay_time, CTLTYPE_INT | CTLFLAG_RW, |
522 | 0, 0, sysctl_jetsam_set_apps_idle_delay_time, "I" , "Aging window for applications" ); |
523 | |
524 | SYSCTL_INT(_kern, OID_AUTO, jetsam_aging_policy, CTLTYPE_INT | CTLFLAG_RD, &jetsam_aging_policy, 0, "" ); |
525 | |
526 | static unsigned int memorystatus_dirty_count = 0; |
527 | |
528 | SYSCTL_INT(_kern, OID_AUTO, max_task_pmem, CTLFLAG_RD | CTLFLAG_LOCKED | CTLFLAG_MASKED | CTLFLAG_KERN, &max_task_footprint_mb, 0, "" ); |
529 | |
530 | static int memorystatus_highwater_enabled = 1; /* Update the cached memlimit data. */ |
531 | static boolean_t proc_jetsam_state_is_active_locked(proc_t); |
532 | |
533 | #if __arm64__ |
534 | int = 50; /* This value was chosen after looking at the top 30 apps |
535 | * that needed the additional room in their footprint when |
536 | * the 'correct' accounting methods were applied to them. |
537 | */ |
538 | |
539 | #if DEVELOPMENT || DEBUG |
540 | SYSCTL_INT(_kern, OID_AUTO, legacy_footprint_bonus_mb, CTLFLAG_RW | CTLFLAG_LOCKED, &legacy_footprint_bonus_mb, 0, "" ); |
541 | #endif /* DEVELOPMENT || DEBUG */ |
542 | /* |
543 | * Raise the inactive and active memory limits to new values. |
544 | * Will only raise the limits and will do nothing if either of the current |
545 | * limits are 0. |
546 | * Caller must hold the proc_list_lock |
547 | */ |
548 | static void |
549 | memorystatus_raise_memlimit(proc_t p, int new_memlimit_active, int new_memlimit_inactive) |
550 | { |
551 | int memlimit_mb_active = 0, memlimit_mb_inactive = 0; |
552 | boolean_t memlimit_active_is_fatal = FALSE, memlimit_inactive_is_fatal = FALSE, use_active_limit = FALSE; |
553 | |
554 | LCK_MTX_ASSERT(&proc_list_mlock, LCK_MTX_ASSERT_OWNED); |
555 | |
556 | if (p->p_memstat_memlimit_active > 0) { |
557 | memlimit_mb_active = p->p_memstat_memlimit_active; |
558 | } else if (p->p_memstat_memlimit_active == -1) { |
559 | memlimit_mb_active = max_task_footprint_mb; |
560 | } else { |
561 | /* |
562 | * Nothing to do for '0' which is |
563 | * a special value only used internally |
564 | * to test 'no limits'. |
565 | */ |
566 | return; |
567 | } |
568 | |
569 | if (p->p_memstat_memlimit_inactive > 0) { |
570 | memlimit_mb_inactive = p->p_memstat_memlimit_inactive; |
571 | } else if (p->p_memstat_memlimit_inactive == -1) { |
572 | memlimit_mb_inactive = max_task_footprint_mb; |
573 | } else { |
574 | /* |
575 | * Nothing to do for '0' which is |
576 | * a special value only used internally |
577 | * to test 'no limits'. |
578 | */ |
579 | return; |
580 | } |
581 | |
582 | memlimit_mb_active = MAX(new_memlimit_active, memlimit_mb_active); |
583 | memlimit_mb_inactive = MAX(new_memlimit_inactive, memlimit_mb_inactive); |
584 | |
585 | memlimit_active_is_fatal = (p->p_memstat_state & P_MEMSTAT_MEMLIMIT_ACTIVE_FATAL); |
586 | memlimit_inactive_is_fatal = (p->p_memstat_state & P_MEMSTAT_MEMLIMIT_INACTIVE_FATAL); |
587 | |
588 | SET_ACTIVE_LIMITS_LOCKED(p, memlimit_mb_active, memlimit_active_is_fatal); |
589 | SET_INACTIVE_LIMITS_LOCKED(p, memlimit_mb_inactive, memlimit_inactive_is_fatal); |
590 | |
591 | if (proc_jetsam_state_is_active_locked(p) == TRUE) { |
592 | use_active_limit = TRUE; |
593 | CACHE_ACTIVE_LIMITS_LOCKED(p, memlimit_active_is_fatal); |
594 | } else { |
595 | CACHE_INACTIVE_LIMITS_LOCKED(p, memlimit_inactive_is_fatal); |
596 | } |
597 | |
598 | if (memorystatus_highwater_enabled) { |
599 | task_set_phys_footprint_limit_internal(proc_task(p), |
600 | (p->p_memstat_memlimit > 0) ? p->p_memstat_memlimit : -1, |
601 | NULL, /*return old value */ |
602 | use_active_limit, /*active limit?*/ |
603 | (use_active_limit ? memlimit_active_is_fatal : memlimit_inactive_is_fatal)); |
604 | } |
605 | } |
606 | |
607 | void |
608 | (proc_t p, boolean_t ) |
609 | { |
610 | int memlimit_mb_active = 0, memlimit_mb_inactive = 0; |
611 | |
612 | if (p == NULL) { |
613 | return; |
614 | } |
615 | |
616 | proc_list_lock(); |
617 | |
618 | if (p->p_memstat_memlimit_active > 0) { |
619 | memlimit_mb_active = p->p_memstat_memlimit_active; |
620 | } else if (p->p_memstat_memlimit_active == -1) { |
621 | memlimit_mb_active = max_task_footprint_mb; |
622 | } else { |
623 | /* |
624 | * Nothing to do for '0' which is |
625 | * a special value only used internally |
626 | * to test 'no limits'. |
627 | */ |
628 | proc_list_unlock(); |
629 | return; |
630 | } |
631 | |
632 | if (p->p_memstat_memlimit_inactive > 0) { |
633 | memlimit_mb_inactive = p->p_memstat_memlimit_inactive; |
634 | } else if (p->p_memstat_memlimit_inactive == -1) { |
635 | memlimit_mb_inactive = max_task_footprint_mb; |
636 | } else { |
637 | /* |
638 | * Nothing to do for '0' which is |
639 | * a special value only used internally |
640 | * to test 'no limits'. |
641 | */ |
642 | proc_list_unlock(); |
643 | return; |
644 | } |
645 | |
646 | if (footprint_increase) { |
647 | memlimit_mb_active += legacy_footprint_bonus_mb; |
648 | memlimit_mb_inactive += legacy_footprint_bonus_mb; |
649 | } else { |
650 | memlimit_mb_active -= legacy_footprint_bonus_mb; |
651 | if (memlimit_mb_active == max_task_footprint_mb) { |
652 | memlimit_mb_active = -1; /* reverting back to default system limit */ |
653 | } |
654 | |
655 | memlimit_mb_inactive -= legacy_footprint_bonus_mb; |
656 | if (memlimit_mb_inactive == max_task_footprint_mb) { |
657 | memlimit_mb_inactive = -1; /* reverting back to default system limit */ |
658 | } |
659 | } |
660 | memorystatus_raise_memlimit(p, new_memlimit_active: memlimit_mb_active, new_memlimit_inactive: memlimit_mb_inactive); |
661 | |
662 | proc_list_unlock(); |
663 | } |
664 | |
665 | void |
666 | (proc_t p) |
667 | { |
668 | proc_list_lock(); |
669 | memorystatus_raise_memlimit(p, new_memlimit_active: memorystatus_ios13extended_footprint_limit_mb, |
670 | new_memlimit_inactive: memorystatus_ios13extended_footprint_limit_mb); |
671 | proc_list_unlock(); |
672 | } |
673 | |
674 | void |
675 | memorystatus_act_on_entitled_task_limit(proc_t p) |
676 | { |
677 | if (memorystatus_entitled_max_task_footprint_mb == 0) { |
678 | // Entitlement is not supported on this device. |
679 | return; |
680 | } |
681 | proc_list_lock(); |
682 | memorystatus_raise_memlimit(p, new_memlimit_active: memorystatus_entitled_max_task_footprint_mb, new_memlimit_inactive: memorystatus_entitled_max_task_footprint_mb); |
683 | proc_list_unlock(); |
684 | } |
685 | #endif /* __arm64__ */ |
686 | |
687 | SYSCTL_INT(_kern, OID_AUTO, memorystatus_level, CTLFLAG_RD | CTLFLAG_LOCKED, &memorystatus_level, 0, "" ); |
688 | |
689 | int |
690 | memorystatus_get_level(__unused struct proc *p, struct memorystatus_get_level_args *args, __unused int *ret) |
691 | { |
692 | user_addr_t level = 0; |
693 | |
694 | level = args->level; |
695 | |
696 | if (copyout(&memorystatus_level, level, sizeof(memorystatus_level)) != 0) { |
697 | return EFAULT; |
698 | } |
699 | |
700 | return 0; |
701 | } |
702 | |
703 | static void memorystatus_thread(void *param __unused, wait_result_t wr __unused); |
704 | |
705 | /* Memory Limits */ |
706 | |
707 | static boolean_t memorystatus_kill_specific_process(pid_t victim_pid, uint32_t cause, os_reason_t jetsam_reason); |
708 | static boolean_t memorystatus_kill_process_sync(pid_t victim_pid, uint32_t cause, os_reason_t jetsam_reason); |
709 | |
710 | |
711 | static int memorystatus_cmd_set_memlimit_properties(pid_t pid, user_addr_t buffer, size_t buffer_size, __unused int32_t *retval); |
712 | |
713 | #if DEBUG || DEVELOPMENT |
714 | static int memorystatus_cmd_set_diag_memlimit_properties(pid_t pid, user_addr_t buffer, size_t buffer_size, __unused int32_t *retval); |
715 | static int memorystatus_cmd_get_diag_memlimit_properties(pid_t pid, user_addr_t buffer, size_t buffer_size, __unused int32_t *retval); |
716 | static int memorystatus_set_diag_memlimit_properties_internal(proc_t p, memorystatus_diag_memlimit_properties_t *p_entry); |
717 | static int memorystatus_get_diag_memlimit_properties_internal(proc_t p, memorystatus_diag_memlimit_properties_t *p_entry); |
718 | #endif // DEBUG || DEVELOPMENT |
719 | static int memorystatus_set_memlimit_properties(pid_t pid, memorystatus_memlimit_properties_t *entry); |
720 | |
721 | static int memorystatus_cmd_get_memlimit_properties(pid_t pid, user_addr_t buffer, size_t buffer_size, __unused int32_t *retval); |
722 | |
723 | static int memorystatus_cmd_get_memlimit_excess_np(pid_t pid, uint32_t flags, user_addr_t buffer, size_t buffer_size, __unused int32_t *retval); |
724 | |
725 | static void memorystatus_get_memlimit_properties_internal(proc_t p, memorystatus_memlimit_properties_t *p_entry); |
726 | static int memorystatus_set_memlimit_properties_internal(proc_t p, memorystatus_memlimit_properties_t *p_entry); |
727 | |
728 | int proc_get_memstat_priority(proc_t, boolean_t); |
729 | |
730 | static boolean_t memorystatus_idle_snapshot = 0; |
731 | |
732 | unsigned int memorystatus_delta = 0; |
733 | |
734 | /* Jetsam Loop Detection */ |
735 | boolean_t memorystatus_jld_enabled = FALSE; /* Enable jetsam loop detection */ |
736 | uint32_t memorystatus_jld_eval_period_msecs = 0; /* Init pass sets this based on device memory size */ |
737 | int memorystatus_jld_eval_aggressive_count = 3; /* Raise the priority max after 'n' aggressive loops */ |
738 | int memorystatus_jld_eval_aggressive_priority_band_max = 15; /* Kill aggressively up through this band */ |
739 | int memorystatus_jld_max_kill_loops = 2; /* How many times should we try and kill up to the target band */ |
740 | |
741 | /* |
742 | * A FG app can request that the aggressive jetsam mechanism display some leniency in the FG band. This 'lenient' mode is described as: |
743 | * --- if aggressive jetsam kills an app in the FG band and gets back >=AGGRESSIVE_JETSAM_LENIENT_MODE_THRESHOLD memory, it will stop the aggressive march further into and up the jetsam bands. |
744 | * |
745 | * RESTRICTIONS: |
746 | * - Such a request is respected/acknowledged only once while that 'requesting' app is in the FG band i.e. if aggressive jetsam was |
747 | * needed and the 'lenient' mode was deployed then that's it for this special mode while the app is in the FG band. |
748 | * |
749 | * - If the app is still in the FG band and aggressive jetsam is needed again, there will be no stop-and-check the next time around. |
750 | * |
751 | * - Also, the transition of the 'requesting' app away from the FG band will void this special behavior. |
752 | */ |
753 | |
754 | #define AGGRESSIVE_JETSAM_LENIENT_MODE_THRESHOLD 25 |
755 | boolean_t memorystatus_aggressive_jetsam_lenient_allowed = FALSE; |
756 | boolean_t memorystatus_aggressive_jetsam_lenient = FALSE; |
757 | |
758 | #if DEVELOPMENT || DEBUG |
759 | /* |
760 | * Jetsam Loop Detection tunables. |
761 | */ |
762 | |
763 | SYSCTL_UINT(_kern, OID_AUTO, memorystatus_jld_eval_period_msecs, CTLFLAG_RW | CTLFLAG_LOCKED, &memorystatus_jld_eval_period_msecs, 0, "" ); |
764 | SYSCTL_UINT(_kern, OID_AUTO, memorystatus_jld_eval_aggressive_count, CTLFLAG_RW | CTLFLAG_LOCKED, &memorystatus_jld_eval_aggressive_count, 0, "" ); |
765 | SYSCTL_UINT(_kern, OID_AUTO, memorystatus_jld_eval_aggressive_priority_band_max, CTLFLAG_RW | CTLFLAG_LOCKED, &memorystatus_jld_eval_aggressive_priority_band_max, 0, "" ); |
766 | SYSCTL_UINT(_kern, OID_AUTO, memorystatus_jld_max_kill_loops, CTLFLAG_RW | CTLFLAG_LOCKED, &memorystatus_jld_max_kill_loops, 0, "" ); |
767 | #endif /* DEVELOPMENT || DEBUG */ |
768 | |
769 | /* |
770 | * snapshot support for memstats collected at boot. |
771 | */ |
772 | static memorystatus_jetsam_snapshot_t memorystatus_at_boot_snapshot; |
773 | |
774 | static void memorystatus_init_jetsam_snapshot_locked(memorystatus_jetsam_snapshot_t *od_snapshot, uint32_t ods_list_count); |
775 | static boolean_t memorystatus_init_jetsam_snapshot_entry_locked(proc_t p, memorystatus_jetsam_snapshot_entry_t *entry, uint64_t gencount); |
776 | static void memorystatus_update_jetsam_snapshot_entry_locked(proc_t p, uint32_t kill_cause, uint64_t killtime); |
777 | |
778 | static void memorystatus_clear_errors(void); |
779 | |
780 | static void memorystatus_get_task_phys_footprint_page_counts(task_t task, |
781 | uint64_t *internal_pages, uint64_t *internal_compressed_pages, |
782 | uint64_t *purgeable_nonvolatile_pages, uint64_t *purgeable_nonvolatile_compressed_pages, |
783 | uint64_t *alternate_accounting_pages, uint64_t *alternate_accounting_compressed_pages, |
784 | uint64_t *iokit_mapped_pages, uint64_t *page_table_pages, uint64_t *frozen_to_swap_pages); |
785 | |
786 | static void memorystatus_get_task_memory_region_count(task_t task, uint64_t *count); |
787 | |
788 | static uint32_t memorystatus_build_state(proc_t p); |
789 | //static boolean_t memorystatus_issue_pressure_kevent(boolean_t pressured); |
790 | |
791 | static bool memorystatus_kill_top_process(bool any, bool sort_flag, uint32_t cause, os_reason_t jetsam_reason, |
792 | int32_t max_priority, bool only_swappable, |
793 | int32_t *priority, uint32_t *errors, uint64_t *memory_reclaimed); |
794 | static boolean_t memorystatus_kill_processes_aggressive(uint32_t cause, int aggr_count, int32_t priority_max, int32_t max_kills, uint32_t *errors, uint64_t *memory_reclaimed); |
795 | static boolean_t memorystatus_kill_hiwat_proc(uint32_t *errors, boolean_t *purged, uint64_t *memory_reclaimed); |
796 | |
797 | /* Priority Band Sorting Routines */ |
798 | static int memorystatus_sort_bucket(unsigned int bucket_index, int sort_order); |
799 | static int memorystatus_sort_by_largest_coalition_locked(unsigned int bucket_index, int coal_sort_order); |
800 | static void memorystatus_sort_by_largest_process_locked(unsigned int bucket_index); |
801 | static int memorystatus_move_list_locked(unsigned int bucket_index, pid_t *pid_list, int list_sz); |
802 | |
803 | /* qsort routines */ |
804 | typedef int (*cmpfunc_t)(const void *a, const void *b); |
805 | extern void qsort(void *a, size_t n, size_t es, cmpfunc_t cmp); |
806 | static int memstat_asc_cmp(const void *a, const void *b); |
807 | |
808 | /* VM pressure */ |
809 | |
810 | #if CONFIG_SECLUDED_MEMORY |
811 | extern unsigned int vm_page_secluded_count; |
812 | extern unsigned int vm_page_secluded_count_over_target; |
813 | #endif /* CONFIG_SECLUDED_MEMORY */ |
814 | |
815 | /* Aggressive jetsam pages threshold for sysproc aging policy */ |
816 | unsigned int memorystatus_sysproc_aging_aggr_pages = 0; |
817 | |
818 | #if CONFIG_JETSAM |
819 | |
820 | /* Jetsam Thresholds in MB */ |
821 | TUNABLE_DT(uint32_t, memorystatus_critical_threshold_mb, "/defaults" , |
822 | "kern.memstat_critical_mb" , "memorystatus_critical_threshold_mb" , 0, TUNABLE_DT_NONE); |
823 | TUNABLE_DT(uint32_t, memorystatus_idle_threshold_mb, "/defaults" , |
824 | "kern.memstat_idle_mb" , "memorystatus_idle_threshold_mb" , 0, TUNABLE_DT_NONE); |
825 | TUNABLE_DT(uint32_t, memorystatus_pressure_threshold_mb, "/defaults" , |
826 | "kern.memstat_pressure_mb" , "memorystatus_pressure_threshold_mb" , 0, TUNABLE_DT_NONE); |
827 | TUNABLE_DT(uint32_t, memorystatus_more_free_offset_mb, "/defaults" , |
828 | "kern.memstat_more_free_mb" , "memorystatus_more_free_offset_mb" , 0, TUNABLE_DT_NONE); |
829 | |
830 | /* |
831 | * Available Pages Thresholds |
832 | * critical_base: jetsam above the idle band |
833 | * critical_idle: jetsam in the idle band |
834 | * more_free_offset: offset applied to critical/idle upon request from userspace |
835 | * sysproc_aging_aggr: allow aggressive jetsam due to sysproc aging |
836 | * pressure: jetsam hwm violators |
837 | */ |
838 | unsigned int memorystatus_available_pages = (unsigned int)-1; |
839 | unsigned int memorystatus_available_pages_pressure = 0; |
840 | unsigned int memorystatus_available_pages_critical = 0; |
841 | unsigned int memorystatus_available_pages_critical_base = 0; |
842 | unsigned int memorystatus_available_pages_critical_idle = 0; |
843 | TUNABLE_DT_WRITEABLE(unsigned int, memorystatus_swap_all_apps, "/defaults" , "kern.swap_all_apps" , "kern.swap_all_apps" , false, TUNABLE_DT_NONE); |
844 | /* Will compact the early swapin queue if there are >= this many csegs on it. */ |
845 | static unsigned int memorystatus_swapin_trigger_segments = 10; |
846 | unsigned int memorystatus_swapin_trigger_pages = 0; |
847 | |
848 | #if DEVELOPMENT || DEBUG |
849 | SYSCTL_UINT(_kern, OID_AUTO, memorystatus_available_pages, CTLFLAG_RD | CTLFLAG_LOCKED, &memorystatus_available_pages, 0, "" ); |
850 | SYSCTL_UINT(_kern, OID_AUTO, memorystatus_swapin_trigger_pages, CTLFLAG_RW | CTLFLAG_LOCKED, &memorystatus_swapin_trigger_pages, 0, "" ); |
851 | #else |
852 | SYSCTL_UINT(_kern, OID_AUTO, memorystatus_available_pages, CTLFLAG_RD | CTLFLAG_MASKED | CTLFLAG_LOCKED, &memorystatus_available_pages, 0, "" ); |
853 | #endif /* DEVELOPMENT || DEBUG */ |
854 | SYSCTL_UINT(_kern, OID_AUTO, memorystatus_swap_all_apps, CTLFLAG_RD | CTLFLAG_LOCKED, &memorystatus_swap_all_apps, 0, "" ); |
855 | |
856 | static unsigned int memorystatus_jetsam_policy = kPolicyDefault; |
857 | unsigned int memorystatus_policy_more_free_offset_pages = 0; |
858 | static void memorystatus_update_levels_locked(void); |
859 | |
860 | static int memorystatus_cmd_set_jetsam_memory_limit(pid_t pid, int32_t high_water_mark, __unused int32_t *retval, boolean_t is_fatal_limit); |
861 | |
862 | int32_t max_kill_priority = JETSAM_PRIORITY_MAX; |
863 | |
864 | proc_name_t memorystatus_jetsam_proc_name_panic; /* Panic when we are about to jetsam this process. */ |
865 | uint32_t memorystatus_jetsam_proc_cause_panic = 0; /* If specified, panic only when we are about to jetsam the process above for this cause. */ |
866 | uint32_t memorystatus_jetsam_proc_size_panic = 0; /* If specified, panic only when we are about to jetsam the process above and its footprint is more than this in MB. */ |
867 | |
868 | /* If set, kill swappable processes when we're low on swap space. Currently off until we can allocate more swap space (rdar://87800902) */ |
869 | uint32_t jetsam_kill_on_low_swap = 0; |
870 | #else /* CONFIG_JETSAM */ |
871 | |
872 | uint64_t memorystatus_available_pages = (uint64_t)-1; |
873 | uint64_t memorystatus_available_pages_pressure = (uint64_t)-1; |
874 | uint64_t memorystatus_available_pages_critical = (uint64_t)-1; |
875 | |
876 | int32_t max_kill_priority = JETSAM_PRIORITY_IDLE; |
877 | #endif /* CONFIG_JETSAM */ |
878 | |
879 | #if DEVELOPMENT || DEBUG |
880 | |
881 | static LCK_GRP_DECLARE(disconnect_page_mappings_lck_grp, "disconnect_page_mappings" ); |
882 | static LCK_MTX_DECLARE(disconnect_page_mappings_mutex, &disconnect_page_mappings_lck_grp); |
883 | |
884 | extern bool kill_on_no_paging_space; |
885 | #endif /* DEVELOPMENT || DEBUG */ |
886 | |
887 | #if DEVELOPMENT || DEBUG |
888 | static inline uint32_t |
889 | roundToNearestMB(uint32_t in) |
890 | { |
891 | return (in + ((1 << 20) - 1)) >> 20; |
892 | } |
893 | |
894 | static int memorystatus_cmd_increase_jetsam_task_limit(pid_t pid, uint32_t byte_increase); |
895 | #endif |
896 | |
897 | #if __arm64__ |
898 | extern int ; |
899 | #endif /* __arm64__ */ |
900 | |
901 | /* Debug */ |
902 | |
903 | extern struct knote *vm_find_knote_from_pid(pid_t, struct klist *); |
904 | |
905 | #if DEVELOPMENT || DEBUG |
906 | |
907 | static unsigned int memorystatus_debug_dump_this_bucket = 0; |
908 | |
909 | static void |
910 | memorystatus_debug_dump_bucket_locked(unsigned int bucket_index) |
911 | { |
912 | proc_t p = NULL; |
913 | uint64_t bytes = 0; |
914 | int ledger_limit = 0; |
915 | unsigned int b = bucket_index; |
916 | boolean_t traverse_all_buckets = FALSE; |
917 | |
918 | if (bucket_index >= MEMSTAT_BUCKET_COUNT) { |
919 | traverse_all_buckets = TRUE; |
920 | b = 0; |
921 | } else { |
922 | traverse_all_buckets = FALSE; |
923 | b = bucket_index; |
924 | } |
925 | |
926 | /* |
927 | * footprint reported in [pages / MB ] |
928 | * limits reported as: |
929 | * L-limit proc's Ledger limit |
930 | * C-limit proc's Cached limit, should match Ledger |
931 | * A-limit proc's Active limit |
932 | * IA-limit proc's Inactive limit |
933 | * F==Fatal, NF==NonFatal |
934 | */ |
935 | |
936 | memorystatus_log_debug("memorystatus_debug_dump ***START*(PAGE_SIZE_64=%llu)**\n" , PAGE_SIZE_64); |
937 | memorystatus_log_debug("bucket [pid] [pages / MB] [state] [EP / RP / AP] dirty deadline [L-limit / C-limit / A-limit / IA-limit] name\n" ); |
938 | p = memorystatus_get_first_proc_locked(&b, traverse_all_buckets); |
939 | while (p) { |
940 | bytes = get_task_phys_footprint(proc_task(p)); |
941 | task_get_phys_footprint_limit(proc_task(p), &ledger_limit); |
942 | memorystatus_log_debug("%2d [%5d] [%5lld /%3lldMB] 0x%-8x [%2d / %2d / %2d] 0x%-3x %10lld [%3d / %3d%s / %3d%s / %3d%s] %s\n" , |
943 | b, proc_getpid(p), |
944 | (bytes / PAGE_SIZE_64), /* task's footprint converted from bytes to pages */ |
945 | (bytes / (1024ULL * 1024ULL)), /* task's footprint converted from bytes to MB */ |
946 | p->p_memstat_state, p->p_memstat_effectivepriority, p->p_memstat_requestedpriority, p->p_memstat_assertionpriority, |
947 | p->p_memstat_dirty, p->p_memstat_idledeadline, |
948 | ledger_limit, |
949 | p->p_memstat_memlimit, |
950 | (p->p_memstat_state & P_MEMSTAT_FATAL_MEMLIMIT ? "F " : "NF" ), |
951 | p->p_memstat_memlimit_active, |
952 | (p->p_memstat_state & P_MEMSTAT_MEMLIMIT_ACTIVE_FATAL ? "F " : "NF" ), |
953 | p->p_memstat_memlimit_inactive, |
954 | (p->p_memstat_state & P_MEMSTAT_MEMLIMIT_INACTIVE_FATAL ? "F " : "NF" ), |
955 | (*p->p_name ? p->p_name : "unknown" )); |
956 | p = memorystatus_get_next_proc_locked(&b, p, traverse_all_buckets); |
957 | } |
958 | memorystatus_log_debug("memorystatus_debug_dump ***END***\n" ); |
959 | } |
960 | |
961 | static int |
962 | sysctl_memorystatus_debug_dump_bucket SYSCTL_HANDLER_ARGS |
963 | { |
964 | #pragma unused(oidp, arg2) |
965 | int bucket_index = 0; |
966 | int error; |
967 | error = SYSCTL_OUT(req, arg1, sizeof(int)); |
968 | if (error || !req->newptr) { |
969 | return error; |
970 | } |
971 | error = SYSCTL_IN(req, &bucket_index, sizeof(int)); |
972 | if (error || !req->newptr) { |
973 | return error; |
974 | } |
975 | if (bucket_index >= MEMSTAT_BUCKET_COUNT) { |
976 | /* |
977 | * All jetsam buckets will be dumped. |
978 | */ |
979 | } else { |
980 | /* |
981 | * Only a single bucket will be dumped. |
982 | */ |
983 | } |
984 | |
985 | proc_list_lock(); |
986 | memorystatus_debug_dump_bucket_locked(bucket_index); |
987 | proc_list_unlock(); |
988 | memorystatus_debug_dump_this_bucket = bucket_index; |
989 | return error; |
990 | } |
991 | |
992 | /* |
993 | * Debug aid to look at jetsam buckets and proc jetsam fields. |
994 | * Use this sysctl to act on a particular jetsam bucket. |
995 | * Writing the sysctl triggers the dump. |
996 | * Usage: sysctl kern.memorystatus_debug_dump_this_bucket=<bucket_index> |
997 | */ |
998 | |
999 | SYSCTL_PROC(_kern, OID_AUTO, memorystatus_debug_dump_this_bucket, CTLTYPE_INT | CTLFLAG_RW | CTLFLAG_LOCKED, &memorystatus_debug_dump_this_bucket, 0, sysctl_memorystatus_debug_dump_bucket, "I" , "" ); |
1000 | |
1001 | |
1002 | /* Debug aid to aid determination of limit */ |
1003 | |
1004 | static int |
1005 | sysctl_memorystatus_highwater_enable SYSCTL_HANDLER_ARGS |
1006 | { |
1007 | #pragma unused(oidp, arg2) |
1008 | proc_t p; |
1009 | unsigned int b = 0; |
1010 | int error, enable = 0; |
1011 | boolean_t use_active; /* use the active limit and active limit attributes */ |
1012 | boolean_t is_fatal; |
1013 | |
1014 | error = SYSCTL_OUT(req, arg1, sizeof(int)); |
1015 | if (error || !req->newptr) { |
1016 | return error; |
1017 | } |
1018 | |
1019 | error = SYSCTL_IN(req, &enable, sizeof(int)); |
1020 | if (error || !req->newptr) { |
1021 | return error; |
1022 | } |
1023 | |
1024 | if (!(enable == 0 || enable == 1)) { |
1025 | return EINVAL; |
1026 | } |
1027 | |
1028 | proc_list_lock(); |
1029 | |
1030 | p = memorystatus_get_first_proc_locked(&b, TRUE); |
1031 | while (p) { |
1032 | use_active = proc_jetsam_state_is_active_locked(p); |
1033 | |
1034 | if (enable) { |
1035 | if (use_active == TRUE) { |
1036 | CACHE_ACTIVE_LIMITS_LOCKED(p, is_fatal); |
1037 | } else { |
1038 | CACHE_INACTIVE_LIMITS_LOCKED(p, is_fatal); |
1039 | } |
1040 | } else { |
1041 | /* |
1042 | * Disabling limits does not touch the stored variants. |
1043 | * Set the cached limit fields to system_wide defaults. |
1044 | */ |
1045 | p->p_memstat_memlimit = -1; |
1046 | p->p_memstat_state |= P_MEMSTAT_FATAL_MEMLIMIT; |
1047 | is_fatal = TRUE; |
1048 | } |
1049 | |
1050 | /* |
1051 | * Enforce the cached limit by writing to the ledger. |
1052 | */ |
1053 | task_set_phys_footprint_limit_internal(proc_task(p), (p->p_memstat_memlimit > 0) ? p->p_memstat_memlimit: -1, NULL, use_active, is_fatal); |
1054 | |
1055 | p = memorystatus_get_next_proc_locked(&b, p, TRUE); |
1056 | } |
1057 | |
1058 | memorystatus_highwater_enabled = enable; |
1059 | |
1060 | proc_list_unlock(); |
1061 | |
1062 | return 0; |
1063 | } |
1064 | |
1065 | SYSCTL_PROC(_kern, OID_AUTO, memorystatus_highwater_enabled, CTLTYPE_INT | CTLFLAG_RW | CTLFLAG_LOCKED, &memorystatus_highwater_enabled, 0, sysctl_memorystatus_highwater_enable, "I" , "" ); |
1066 | |
1067 | SYSCTL_INT(_kern, OID_AUTO, memorystatus_idle_snapshot, CTLFLAG_RW | CTLFLAG_LOCKED, &memorystatus_idle_snapshot, 0, "" ); |
1068 | |
1069 | #if CONFIG_JETSAM |
1070 | SYSCTL_UINT(_kern, OID_AUTO, memorystatus_available_pages_critical, CTLFLAG_RD | CTLFLAG_LOCKED, &memorystatus_available_pages_critical, 0, "" ); |
1071 | SYSCTL_UINT(_kern, OID_AUTO, memorystatus_available_pages_critical_base, CTLFLAG_RD | CTLFLAG_LOCKED, &memorystatus_available_pages_critical_base, 0, "" ); |
1072 | SYSCTL_UINT(_kern, OID_AUTO, memorystatus_available_pages_critical_idle, CTLFLAG_RD | CTLFLAG_LOCKED, &memorystatus_available_pages_critical_idle, 0, "" ); |
1073 | SYSCTL_UINT(_kern, OID_AUTO, memorystatus_policy_more_free_offset_pages, CTLFLAG_RD, &memorystatus_policy_more_free_offset_pages, 0, "" ); |
1074 | SYSCTL_UINT(_kern, OID_AUTO, memorystatus_available_pages_aggr_sysproc_aging, CTLFLAG_RD, &memorystatus_sysproc_aging_aggr_pages, 0, "" ); |
1075 | SYSCTL_UINT(_kern, OID_AUTO, memorystatus_kill_on_low_swap, CTLFLAG_RW, &jetsam_kill_on_low_swap, 0, "" ); |
1076 | #if VM_PRESSURE_EVENTS |
1077 | |
1078 | SYSCTL_UINT(_kern, OID_AUTO, memorystatus_available_pages_pressure, CTLFLAG_RW | CTLFLAG_LOCKED, &memorystatus_available_pages_pressure, 0, "" ); |
1079 | |
1080 | #endif /* VM_PRESSURE_EVENTS */ |
1081 | |
1082 | #endif /* CONFIG_JETSAM */ |
1083 | |
1084 | #endif /* DEVELOPMENT || DEBUG */ |
1085 | |
1086 | extern kern_return_t kernel_thread_start_priority(thread_continue_t continuation, |
1087 | void *parameter, |
1088 | integer_t priority, |
1089 | thread_t *new_thread); |
1090 | |
1091 | #if DEVELOPMENT || DEBUG |
1092 | |
1093 | static int |
1094 | sysctl_memorystatus_disconnect_page_mappings SYSCTL_HANDLER_ARGS |
1095 | { |
1096 | #pragma unused(arg1, arg2) |
1097 | int error = 0, pid = 0; |
1098 | proc_t p; |
1099 | |
1100 | error = sysctl_handle_int(oidp, &pid, 0, req); |
1101 | if (error || !req->newptr) { |
1102 | return error; |
1103 | } |
1104 | |
1105 | lck_mtx_lock(&disconnect_page_mappings_mutex); |
1106 | |
1107 | if (pid == -1) { |
1108 | vm_pageout_disconnect_all_pages(); |
1109 | } else { |
1110 | p = proc_find(pid); |
1111 | |
1112 | if (p != NULL) { |
1113 | error = task_disconnect_page_mappings(proc_task(p)); |
1114 | |
1115 | proc_rele(p); |
1116 | |
1117 | if (error) { |
1118 | error = EIO; |
1119 | } |
1120 | } else { |
1121 | error = EINVAL; |
1122 | } |
1123 | } |
1124 | lck_mtx_unlock(&disconnect_page_mappings_mutex); |
1125 | |
1126 | return error; |
1127 | } |
1128 | |
1129 | SYSCTL_PROC(_kern, OID_AUTO, memorystatus_disconnect_page_mappings, CTLTYPE_INT | CTLFLAG_WR | CTLFLAG_LOCKED | CTLFLAG_MASKED, |
1130 | 0, 0, &sysctl_memorystatus_disconnect_page_mappings, "I" , "" ); |
1131 | |
1132 | #endif /* DEVELOPMENT || DEBUG */ |
1133 | |
1134 | /* |
1135 | * Sorts the given bucket. |
1136 | * |
1137 | * Input: |
1138 | * bucket_index - jetsam priority band to be sorted. |
1139 | * sort_order - JETSAM_SORT_xxx from kern_memorystatus.h |
1140 | * Currently sort_order is only meaningful when handling |
1141 | * coalitions. |
1142 | * |
1143 | * proc_list_lock must be held by the caller. |
1144 | */ |
1145 | static void |
1146 | memorystatus_sort_bucket_locked(unsigned int bucket_index, int sort_order) |
1147 | { |
1148 | LCK_MTX_ASSERT(&proc_list_mlock, LCK_MTX_ASSERT_OWNED); |
1149 | if (memstat_bucket[bucket_index].count == 0) { |
1150 | return; |
1151 | } |
1152 | |
1153 | switch (bucket_index) { |
1154 | case JETSAM_PRIORITY_FOREGROUND: |
1155 | if (memorystatus_sort_by_largest_coalition_locked(bucket_index, coal_sort_order: sort_order) == 0) { |
1156 | /* |
1157 | * Fall back to per process sorting when zero coalitions are found. |
1158 | */ |
1159 | memorystatus_sort_by_largest_process_locked(bucket_index); |
1160 | } |
1161 | break; |
1162 | default: |
1163 | memorystatus_sort_by_largest_process_locked(bucket_index); |
1164 | break; |
1165 | } |
1166 | } |
1167 | |
1168 | /* |
1169 | * Picks the sorting routine for a given jetsam priority band. |
1170 | * |
1171 | * Input: |
1172 | * bucket_index - jetsam priority band to be sorted. |
1173 | * sort_order - JETSAM_SORT_xxx from kern_memorystatus.h |
1174 | * Currently sort_order is only meaningful when handling |
1175 | * coalitions. |
1176 | * |
1177 | * Return: |
1178 | * 0 on success |
1179 | * non-0 on failure |
1180 | */ |
1181 | static int |
1182 | memorystatus_sort_bucket(unsigned int bucket_index, int sort_order) |
1183 | { |
1184 | int coal_sort_order; |
1185 | |
1186 | /* |
1187 | * Verify the jetsam priority |
1188 | */ |
1189 | if (bucket_index >= MEMSTAT_BUCKET_COUNT) { |
1190 | return EINVAL; |
1191 | } |
1192 | |
1193 | #if DEVELOPMENT || DEBUG |
1194 | if (sort_order == JETSAM_SORT_DEFAULT) { |
1195 | coal_sort_order = COALITION_SORT_DEFAULT; |
1196 | } else { |
1197 | coal_sort_order = sort_order; /* only used for testing scenarios */ |
1198 | } |
1199 | #else |
1200 | /* Verify default */ |
1201 | if (sort_order == JETSAM_SORT_DEFAULT) { |
1202 | coal_sort_order = COALITION_SORT_DEFAULT; |
1203 | } else { |
1204 | return EINVAL; |
1205 | } |
1206 | #endif |
1207 | |
1208 | proc_list_lock(); |
1209 | memorystatus_sort_bucket_locked(bucket_index, sort_order: coal_sort_order); |
1210 | proc_list_unlock(); |
1211 | |
1212 | return 0; |
1213 | } |
1214 | |
1215 | /* |
1216 | * Sort processes by size for a single jetsam bucket. |
1217 | */ |
1218 | |
1219 | static void |
1220 | memorystatus_sort_by_largest_process_locked(unsigned int bucket_index) |
1221 | { |
1222 | proc_t p = NULL, insert_after_proc = NULL, max_proc = NULL; |
1223 | proc_t next_p = NULL, prev_max_proc = NULL; |
1224 | uint32_t pages = 0, max_pages = 0; |
1225 | memstat_bucket_t *current_bucket; |
1226 | |
1227 | if (bucket_index >= MEMSTAT_BUCKET_COUNT) { |
1228 | return; |
1229 | } |
1230 | |
1231 | current_bucket = &memstat_bucket[bucket_index]; |
1232 | |
1233 | p = TAILQ_FIRST(¤t_bucket->list); |
1234 | |
1235 | while (p) { |
1236 | memorystatus_get_task_page_counts(task: proc_task(p), footprint: &pages, NULL, NULL); |
1237 | max_pages = pages; |
1238 | max_proc = p; |
1239 | prev_max_proc = p; |
1240 | |
1241 | while ((next_p = TAILQ_NEXT(p, p_memstat_list)) != NULL) { |
1242 | /* traversing list until we find next largest process */ |
1243 | p = next_p; |
1244 | memorystatus_get_task_page_counts(task: proc_task(p), footprint: &pages, NULL, NULL); |
1245 | if (pages > max_pages) { |
1246 | max_pages = pages; |
1247 | max_proc = p; |
1248 | } |
1249 | } |
1250 | |
1251 | if (prev_max_proc != max_proc) { |
1252 | /* found a larger process, place it in the list */ |
1253 | TAILQ_REMOVE(¤t_bucket->list, max_proc, p_memstat_list); |
1254 | if (insert_after_proc == NULL) { |
1255 | TAILQ_INSERT_HEAD(¤t_bucket->list, max_proc, p_memstat_list); |
1256 | } else { |
1257 | TAILQ_INSERT_AFTER(¤t_bucket->list, insert_after_proc, max_proc, p_memstat_list); |
1258 | } |
1259 | prev_max_proc = max_proc; |
1260 | } |
1261 | |
1262 | insert_after_proc = max_proc; |
1263 | |
1264 | p = TAILQ_NEXT(max_proc, p_memstat_list); |
1265 | } |
1266 | } |
1267 | |
1268 | proc_t |
1269 | memorystatus_get_first_proc_locked(unsigned int *bucket_index, boolean_t search) |
1270 | { |
1271 | memstat_bucket_t *current_bucket; |
1272 | proc_t next_p; |
1273 | |
1274 | if ((*bucket_index) >= MEMSTAT_BUCKET_COUNT) { |
1275 | return NULL; |
1276 | } |
1277 | |
1278 | current_bucket = &memstat_bucket[*bucket_index]; |
1279 | next_p = TAILQ_FIRST(¤t_bucket->list); |
1280 | if (!next_p && search) { |
1281 | while (!next_p && (++(*bucket_index) < MEMSTAT_BUCKET_COUNT)) { |
1282 | current_bucket = &memstat_bucket[*bucket_index]; |
1283 | next_p = TAILQ_FIRST(¤t_bucket->list); |
1284 | } |
1285 | } |
1286 | |
1287 | return next_p; |
1288 | } |
1289 | |
1290 | proc_t |
1291 | memorystatus_get_next_proc_locked(unsigned int *bucket_index, proc_t p, boolean_t search) |
1292 | { |
1293 | memstat_bucket_t *current_bucket; |
1294 | proc_t next_p; |
1295 | |
1296 | if (!p || ((*bucket_index) >= MEMSTAT_BUCKET_COUNT)) { |
1297 | return NULL; |
1298 | } |
1299 | |
1300 | next_p = TAILQ_NEXT(p, p_memstat_list); |
1301 | while (!next_p && search && (++(*bucket_index) < MEMSTAT_BUCKET_COUNT)) { |
1302 | current_bucket = &memstat_bucket[*bucket_index]; |
1303 | next_p = TAILQ_FIRST(¤t_bucket->list); |
1304 | } |
1305 | |
1306 | return next_p; |
1307 | } |
1308 | |
1309 | jetsam_thread_state_t *jetsam_threads; |
1310 | |
1311 | /* Maximum number of jetsam threads allowed */ |
1312 | #define JETSAM_THREADS_LIMIT 3 |
1313 | |
1314 | /* Number of active jetsam threads */ |
1315 | _Atomic int active_jetsam_threads = 1; |
1316 | |
1317 | /* Number of maximum jetsam threads configured */ |
1318 | int max_jetsam_threads = JETSAM_THREADS_LIMIT; |
1319 | |
1320 | /* |
1321 | * Global switch for enabling fast jetsam. Fast jetsam is |
1322 | * hooked up via the system_override() system call. It has the |
1323 | * following effects: |
1324 | * - Raise the jetsam threshold ("clear-the-deck") |
1325 | * - Enabled parallel jetsam on eligible devices |
1326 | */ |
1327 | #if __AMP__ |
1328 | int fast_jetsam_enabled = 1; |
1329 | #else /* __AMP__ */ |
1330 | int fast_jetsam_enabled = 0; |
1331 | #endif /* __AMP__ */ |
1332 | |
1333 | static jetsam_thread_state_t * |
1334 | jetsam_current_thread() |
1335 | { |
1336 | for (int thr_id = 0; thr_id < max_jetsam_threads; thr_id++) { |
1337 | if (jetsam_threads[thr_id].thread == current_thread()) { |
1338 | return &(jetsam_threads[thr_id]); |
1339 | } |
1340 | } |
1341 | return NULL; |
1342 | } |
1343 | |
1344 | #if CONFIG_JETSAM |
1345 | static void |
1346 | initialize_entitled_max_task_limit() |
1347 | { |
1348 | /** |
1349 | * We've already stored the potential boot-arg "entitled_max_task_pmem" in |
1350 | * memorystatus_entitled_max_task_footprint_mb as a TUNABLE_DT. We provide |
1351 | * argptr=NULL and max_len=0 here to check only for existence of the boot-arg. |
1352 | * |
1353 | * The boot-arg takes precedence over memorystatus_swap_all_apps. |
1354 | */ |
1355 | if (!PE_parse_boot_argn("entitled_max_task_pmem" , NULL, 0) && memorystatus_swap_all_apps) { |
1356 | /* |
1357 | * When we have swap, we let entitled apps go up to the dram config |
1358 | * regardless of what's set in EDT, |
1359 | * This can still be overriden with the entitled_max_task_pmem boot-arg. |
1360 | */ |
1361 | memorystatus_entitled_max_task_footprint_mb = (int32_t) (max_mem_actual / (1ULL << 20)); |
1362 | } |
1363 | |
1364 | if (memorystatus_entitled_max_task_footprint_mb < 0) { |
1365 | memorystatus_log_error("Invalid value (%d) for entitled_max_task_pmem. Setting to 0\n" , |
1366 | memorystatus_entitled_max_task_footprint_mb); |
1367 | memorystatus_entitled_max_task_footprint_mb = 0; |
1368 | } |
1369 | } |
1370 | |
1371 | #endif /* CONFIG_JETSAM */ |
1372 | |
1373 | |
1374 | __private_extern__ void |
1375 | memorystatus_init(void) |
1376 | { |
1377 | kern_return_t result; |
1378 | int i; |
1379 | |
1380 | #if CONFIG_FREEZE |
1381 | memorystatus_freeze_jetsam_band = JETSAM_PRIORITY_FREEZER; |
1382 | memorystatus_frozen_processes_max = FREEZE_PROCESSES_MAX; |
1383 | memorystatus_frozen_shared_mb_max = ((MAX_FROZEN_SHARED_MB_PERCENT * max_task_footprint_mb) / 100); /* 10% of the system wide task limit */ |
1384 | memorystatus_freeze_shared_mb_per_process_max = (memorystatus_frozen_shared_mb_max / 4); |
1385 | memorystatus_freeze_pages_min = FREEZE_PAGES_MIN; |
1386 | memorystatus_freeze_pages_max = FREEZE_PAGES_MAX; |
1387 | memorystatus_max_frozen_demotions_daily = MAX_FROZEN_PROCESS_DEMOTIONS; |
1388 | memorystatus_thaw_count_demotion_threshold = MIN_THAW_DEMOTION_THRESHOLD; |
1389 | memorystatus_min_thaw_refreeze_threshold = MIN_THAW_REFREEZE_THRESHOLD; |
1390 | #endif /* CONFIG_FREEZE */ |
1391 | |
1392 | #if DEVELOPMENT || DEBUG |
1393 | if (kill_on_no_paging_space) { |
1394 | max_kill_priority = JETSAM_PRIORITY_MAX; |
1395 | } |
1396 | #endif |
1397 | // Note: no-op pending rdar://27006343 (Custom kernel log handles) |
1398 | memorystatus_log_handle = os_log_create(subsystem: "com.apple.xnu" , category: "memorystatus" ); |
1399 | |
1400 | /* Init buckets */ |
1401 | for (i = 0; i < MEMSTAT_BUCKET_COUNT; i++) { |
1402 | TAILQ_INIT(&memstat_bucket[i].list); |
1403 | memstat_bucket[i].count = 0; |
1404 | memstat_bucket[i].relaunch_high_count = 0; |
1405 | } |
1406 | memorystatus_idle_demotion_call = thread_call_allocate(func: (thread_call_func_t)memorystatus_perform_idle_demotion, NULL); |
1407 | |
1408 | nanoseconds_to_absolutetime(nanoseconds: (uint64_t)DEFERRED_IDLE_EXIT_TIME_SECS * NSEC_PER_SEC, result: &memorystatus_sysprocs_idle_delay_time); |
1409 | nanoseconds_to_absolutetime(nanoseconds: (uint64_t)DEFERRED_IDLE_EXIT_TIME_SECS * NSEC_PER_SEC, result: &memorystatus_apps_idle_delay_time); |
1410 | |
1411 | #if CONFIG_JETSAM |
1412 | bzero(memorystatus_jetsam_proc_name_panic, sizeof(memorystatus_jetsam_proc_name_panic)); |
1413 | if (PE_parse_boot_argn("jetsam_proc_name_panic" , &memorystatus_jetsam_proc_name_panic, sizeof(memorystatus_jetsam_proc_name_panic))) { |
1414 | /* |
1415 | * No bounds check to see if this is a valid cause. |
1416 | * This is a debugging aid. The callers should know precisely which cause they wish to track. |
1417 | */ |
1418 | PE_parse_boot_argn("jetsam_proc_cause_panic" , &memorystatus_jetsam_proc_cause_panic, sizeof(memorystatus_jetsam_proc_cause_panic)); |
1419 | PE_parse_boot_argn("jetsam_proc_size_panic" , &memorystatus_jetsam_proc_size_panic, sizeof(memorystatus_jetsam_proc_size_panic)); |
1420 | } |
1421 | |
1422 | if (memorystatus_swap_all_apps && vm_page_donate_mode == VM_PAGE_DONATE_DISABLED) { |
1423 | panic("kern.swap_all_apps is not supported on this platform" ); |
1424 | } |
1425 | |
1426 | /* |
1427 | * The aging bands cannot overlap with the JETSAM_PRIORITY_ELEVATED_INACTIVE |
1428 | * band and must be below it in priority. This is so that we don't have to make |
1429 | * our 'aging' code worry about a mix of processes, some of which need to age |
1430 | * and some others that need to stay elevated in the jetsam bands. |
1431 | */ |
1432 | assert(JETSAM_PRIORITY_ELEVATED_INACTIVE > system_procs_aging_band); |
1433 | assert(JETSAM_PRIORITY_ELEVATED_INACTIVE > applications_aging_band); |
1434 | |
1435 | /* Take snapshots for idle-exit kills by default? First check the boot-arg... */ |
1436 | if (!PE_parse_boot_argn("jetsam_idle_snapshot" , &memorystatus_idle_snapshot, sizeof(memorystatus_idle_snapshot))) { |
1437 | /* ...no boot-arg, so check the device tree */ |
1438 | PE_get_default("kern.jetsam_idle_snapshot" , &memorystatus_idle_snapshot, sizeof(memorystatus_idle_snapshot)); |
1439 | } |
1440 | |
1441 | memorystatus_sysproc_aging_aggr_pages = (unsigned int)MEMSTAT_PERCENT_TOTAL_PAGES(MEMORYSTATUS_AGGR_SYSPROC_AGING_PERCENTAGE); |
1442 | |
1443 | if (max_mem <= MEMORYSTATUS_SMALL_MEMORY_THRESHOLD) { |
1444 | memorystatus_delta = (unsigned int)MEMSTAT_PERCENT_TOTAL_PAGES(MEMORYSTATUS_DELTA_PERCENTAGE_SMALL); |
1445 | } else { |
1446 | memorystatus_delta = (unsigned int)MEMSTAT_PERCENT_TOTAL_PAGES(MEMORYSTATUS_DELTA_PERCENTAGE_LARGE); |
1447 | } |
1448 | |
1449 | if (memorystatus_critical_threshold_mb != 0) { |
1450 | memorystatus_available_pages_critical_base = (unsigned int)atop_64((uint64_t)memorystatus_critical_threshold_mb << 20); |
1451 | } else if (max_mem <= MEMORYSTATUS_SMALL_MEMORY_THRESHOLD) { |
1452 | memorystatus_available_pages_critical_base = (unsigned int)MEMSTAT_PERCENT_TOTAL_PAGES(MEMORYSTATUS_CRITICAL_BASE_PERCENTAGE_SMALL); |
1453 | } else { |
1454 | memorystatus_available_pages_critical_base = (unsigned int)MEMSTAT_PERCENT_TOTAL_PAGES(MEMORYSTATUS_CRITICAL_BASE_PERCENTAGE_LARGE); |
1455 | } |
1456 | assert(memorystatus_available_pages_critical_base < (unsigned int)atop_64(max_mem)); |
1457 | |
1458 | /* |
1459 | * For historical reasons, devices with "medium"-sized memory configs have a different critical:idle:pressure ratio |
1460 | */ |
1461 | if ((memorystatus_idle_threshold_mb != 0)) { |
1462 | memorystatus_available_pages_critical_idle = (unsigned int)atop_64((uint64_t)memorystatus_idle_threshold_mb << 20); |
1463 | } else { |
1464 | if ((max_mem > MEMORYSTATUS_SMALL_MEMORY_THRESHOLD) && |
1465 | (max_mem <= MEMORYSTATUS_MEDIUM_MEMORY_THRESHOLD)) { |
1466 | memorystatus_available_pages_critical_idle = (MEMORYSTATUS_CRITICAL_IDLE_RATIO_NUM_MEDIUM * memorystatus_available_pages_critical_base) / |
1467 | MEMORYSTATUS_CRITICAL_IDLE_RATIO_DENOM_MEDIUM; |
1468 | } else { |
1469 | memorystatus_available_pages_critical_idle = (MEMORYSTATUS_CRITICAL_IDLE_RATIO_NUM * memorystatus_available_pages_critical_base) / |
1470 | MEMORYSTATUS_CRITICAL_IDLE_RATIO_DENOM; |
1471 | } |
1472 | } |
1473 | assert(memorystatus_available_pages_critical_idle < (unsigned int)atop_64(max_mem)); |
1474 | |
1475 | if (memorystatus_pressure_threshold_mb != 0) { |
1476 | memorystatus_available_pages_pressure = (unsigned int)atop_64((uint64_t)memorystatus_pressure_threshold_mb << 20); |
1477 | } else { |
1478 | if ((max_mem > MEMORYSTATUS_SMALL_MEMORY_THRESHOLD) && |
1479 | (max_mem <= MEMORYSTATUS_MEDIUM_MEMORY_THRESHOLD)) { |
1480 | memorystatus_available_pages_pressure = (MEMORYSTATUS_PRESSURE_RATIO_NUM_MEDIUM * memorystatus_available_pages_critical_base) / |
1481 | MEMORYSTATUS_PRESSURE_RATIO_DENOM_MEDIUM; |
1482 | } else { |
1483 | memorystatus_available_pages_pressure = (MEMORYSTATUS_PRESSURE_RATIO_NUM * memorystatus_available_pages_critical_base) / |
1484 | MEMORYSTATUS_PRESSURE_RATIO_DENOM; |
1485 | } |
1486 | } |
1487 | assert(memorystatus_available_pages_pressure < (unsigned int)atop_64(max_mem)); |
1488 | |
1489 | if (memorystatus_more_free_offset_mb != 0) { |
1490 | memorystatus_policy_more_free_offset_pages = (unsigned int)atop_64((uint64_t)memorystatus_more_free_offset_mb); |
1491 | } else { |
1492 | memorystatus_policy_more_free_offset_pages = (unsigned int)MEMSTAT_PERCENT_TOTAL_PAGES(MEMORYSTATUS_MORE_FREE_OFFSET_PERCENTAGE); |
1493 | } |
1494 | assert(memorystatus_policy_more_free_offset_pages < (unsigned int)atop_64(max_mem)); |
1495 | |
1496 | /* Set the swapin trigger in pages based on the maximum size allocated for each c_seg */ |
1497 | memorystatus_swapin_trigger_pages = (unsigned int) atop_64(memorystatus_swapin_trigger_segments * c_seg_allocsize); |
1498 | |
1499 | /* Jetsam Loop Detection */ |
1500 | if (max_mem <= (512 * 1024 * 1024)) { |
1501 | /* 512 MB devices */ |
1502 | memorystatus_jld_eval_period_msecs = 8000; /* 8000 msecs == 8 second window */ |
1503 | } else { |
1504 | /* 1GB and larger devices */ |
1505 | memorystatus_jld_eval_period_msecs = 6000; /* 6000 msecs == 6 second window */ |
1506 | } |
1507 | |
1508 | memorystatus_jld_enabled = TRUE; |
1509 | |
1510 | /* No contention at this point */ |
1511 | memorystatus_update_levels_locked(); |
1512 | |
1513 | initialize_entitled_max_task_limit(); |
1514 | #endif /* CONFIG_JETSAM */ |
1515 | |
1516 | memorystatus_jetsam_snapshot_max = maxproc; |
1517 | |
1518 | memorystatus_jetsam_snapshot_size = sizeof(memorystatus_jetsam_snapshot_t) + |
1519 | (sizeof(memorystatus_jetsam_snapshot_entry_t) * memorystatus_jetsam_snapshot_max); |
1520 | |
1521 | memorystatus_jetsam_snapshot = kalloc_data(memorystatus_jetsam_snapshot_size, Z_WAITOK | Z_ZERO); |
1522 | if (!memorystatus_jetsam_snapshot) { |
1523 | panic("Could not allocate memorystatus_jetsam_snapshot" ); |
1524 | } |
1525 | |
1526 | #if CONFIG_FREEZE |
1527 | memorystatus_jetsam_snapshot_freezer_max = memorystatus_jetsam_snapshot_max / JETSAM_SNAPSHOT_FREEZER_MAX_FACTOR; |
1528 | memorystatus_jetsam_snapshot_freezer_size = sizeof(memorystatus_jetsam_snapshot_t) + |
1529 | (sizeof(memorystatus_jetsam_snapshot_entry_t) * memorystatus_jetsam_snapshot_freezer_max); |
1530 | |
1531 | memorystatus_jetsam_snapshot_freezer = |
1532 | zalloc_permanent(memorystatus_jetsam_snapshot_freezer_size, ZALIGN_PTR); |
1533 | #endif /* CONFIG_FREEZE */ |
1534 | |
1535 | nanoseconds_to_absolutetime(nanoseconds: (uint64_t)JETSAM_SNAPSHOT_TIMEOUT_SECS * NSEC_PER_SEC, result: &memorystatus_jetsam_snapshot_timeout); |
1536 | |
1537 | memset(s: &memorystatus_at_boot_snapshot, c: 0, n: sizeof(memorystatus_jetsam_snapshot_t)); |
1538 | |
1539 | #if CONFIG_FREEZE |
1540 | if (memorystatus_freeze_threshold_mb != 0) { |
1541 | memorystatus_freeze_threshold = (unsigned int)atop_64((uint64_t)memorystatus_freeze_threshold_mb << 20); |
1542 | } else { |
1543 | memorystatus_freeze_threshold = (unsigned int)MEMSTAT_PERCENT_TOTAL_PAGES(MEMORYSTATUS_FREEZE_THRESHOLD_PERCENTAGE); |
1544 | } |
1545 | assert(memorystatus_freeze_threshold < (unsigned int)atop_64(max_mem)); |
1546 | |
1547 | if (memorystatus_swap_all_apps) { |
1548 | /* |
1549 | * Swap is enabled, so we expect a larger working set & larger apps. |
1550 | * Adjust thresholds accordingly. |
1551 | */ |
1552 | memorystatus_freeze_configure_for_swap(); |
1553 | } |
1554 | #endif |
1555 | |
1556 | /* Check the boot-arg to see if fast jetsam is allowed */ |
1557 | if (!PE_parse_boot_argn(arg_string: "fast_jetsam_enabled" , arg_ptr: &fast_jetsam_enabled, max_arg: sizeof(fast_jetsam_enabled))) { |
1558 | fast_jetsam_enabled = 0; |
1559 | } |
1560 | |
1561 | /* Check the boot-arg to configure the maximum number of jetsam threads */ |
1562 | if (!PE_parse_boot_argn(arg_string: "max_jetsam_threads" , arg_ptr: &max_jetsam_threads, max_arg: sizeof(max_jetsam_threads))) { |
1563 | max_jetsam_threads = JETSAM_THREADS_LIMIT; |
1564 | } |
1565 | |
1566 | /* Restrict the maximum number of jetsam threads to JETSAM_THREADS_LIMIT */ |
1567 | if (max_jetsam_threads > JETSAM_THREADS_LIMIT) { |
1568 | max_jetsam_threads = JETSAM_THREADS_LIMIT; |
1569 | } |
1570 | |
1571 | /* For low CPU systems disable fast jetsam mechanism */ |
1572 | if (vm_pageout_state.vm_restricted_to_single_processor == TRUE) { |
1573 | max_jetsam_threads = 1; |
1574 | fast_jetsam_enabled = 0; |
1575 | } |
1576 | |
1577 | #if DEVELOPMENT || DEBUG |
1578 | if (PE_parse_boot_argn("-memorystatus-skip-fg-notify" , &i, sizeof(i))) { |
1579 | memorystatus_should_issue_fg_band_notify = false; |
1580 | } |
1581 | #endif /* DEVELOPMENT || DEBUG */ |
1582 | |
1583 | /* Initialize the jetsam_threads state array */ |
1584 | jetsam_threads = zalloc_permanent(sizeof(jetsam_thread_state_t) * |
1585 | max_jetsam_threads, ZALIGN(jetsam_thread_state_t)); |
1586 | |
1587 | /* Initialize all the jetsam threads */ |
1588 | for (i = 0; i < max_jetsam_threads; i++) { |
1589 | jetsam_threads[i].inited = FALSE; |
1590 | jetsam_threads[i].index = i; |
1591 | result = kernel_thread_start_priority(continuation: memorystatus_thread, NULL, priority: 95 /* MAXPRI_KERNEL */, new_thread: &jetsam_threads[i].thread); |
1592 | if (result != KERN_SUCCESS) { |
1593 | panic("Could not create memorystatus_thread %d" , i); |
1594 | } |
1595 | thread_deallocate(thread: jetsam_threads[i].thread); |
1596 | } |
1597 | |
1598 | #if VM_PRESSURE_EVENTS |
1599 | memorystatus_notify_init(); |
1600 | #endif /* VM_PRESSURE_EVENTS */ |
1601 | } |
1602 | |
1603 | #if CONFIG_JETSAM |
1604 | bool |
1605 | memorystatus_disable_swap(void) |
1606 | { |
1607 | #if DEVELOPMENT || DEBUG |
1608 | int boot_arg_val = 0; |
1609 | if (PE_parse_boot_argn("kern.swap_all_apps" , &boot_arg_val, sizeof(boot_arg_val))) { |
1610 | if (boot_arg_val) { |
1611 | /* Can't disable app swap if it was set via a boot-arg */ |
1612 | return false; |
1613 | } |
1614 | } |
1615 | #endif /* DEVELOPMENT || DEBUG */ |
1616 | memorystatus_swap_all_apps = false; |
1617 | #if CONFIG_FREEZE |
1618 | /* Go back to the smaller freezer thresholds */ |
1619 | memorystatus_freeze_disable_swap(); |
1620 | #endif /* CONFIG_FREEZE */ |
1621 | initialize_entitled_max_task_limit(); |
1622 | return true; |
1623 | } |
1624 | #endif /* CONFIG_JETSAM */ |
1625 | |
1626 | /* Centralised for the purposes of allowing panic-on-jetsam */ |
1627 | extern void |
1628 | vm_run_compactor(void); |
1629 | extern void |
1630 | vm_wake_compactor_swapper(void); |
1631 | |
1632 | /* |
1633 | * The jetsam no frills kill call |
1634 | * Return: 0 on success |
1635 | * error code on failure (EINVAL...) |
1636 | */ |
1637 | static int |
1638 | jetsam_do_kill(proc_t p, int jetsam_flags, os_reason_t jetsam_reason) |
1639 | { |
1640 | int error = 0; |
1641 | error = exit_with_reason(p, W_EXITCODE(0, SIGKILL), (int *)NULL, FALSE, FALSE, jetsam_flags, jetsam_reason); |
1642 | return error; |
1643 | } |
1644 | |
1645 | /* |
1646 | * Wrapper for processes exiting with memorystatus details |
1647 | */ |
1648 | static boolean_t |
1649 | memorystatus_do_kill(proc_t p, uint32_t cause, os_reason_t jetsam_reason, uint64_t *) |
1650 | { |
1651 | int error = 0; |
1652 | __unused pid_t victim_pid = proc_getpid(p); |
1653 | uint64_t = get_task_phys_footprint(proc_task(p)); |
1654 | #if (KDEBUG_LEVEL >= KDEBUG_LEVEL_STANDARD) |
1655 | int32_t memstat_effectivepriority = p->p_memstat_effectivepriority; |
1656 | #endif /* (KDEBUG_LEVEL >= KDEBUG_LEVEL_STANDARD) */ |
1657 | |
1658 | KDBG(MEMSTAT_CODE(BSD_MEMSTAT_DO_KILL) | DBG_FUNC_START, |
1659 | victim_pid, cause, vm_page_free_count, footprint); |
1660 | DTRACE_MEMORYSTATUS4(memorystatus_do_kill, proc_t, p, os_reason_t, jetsam_reason, uint32_t, cause, uint64_t, footprint); |
1661 | |
1662 | #if CONFIG_JETSAM |
1663 | if (*p->p_name && !strncmp(memorystatus_jetsam_proc_name_panic, p->p_name, sizeof(p->p_name))) { /* name */ |
1664 | if ((!memorystatus_jetsam_proc_cause_panic || cause == memorystatus_jetsam_proc_cause_panic) && /* cause */ |
1665 | (!memorystatus_jetsam_proc_size_panic || (footprint >> 20) >= memorystatus_jetsam_proc_size_panic)) { /* footprint */ |
1666 | panic("memorystatus_do_kill(): requested panic on jetsam of %s (cause: %d and footprint: %llu mb)" , |
1667 | memorystatus_jetsam_proc_name_panic, cause, footprint >> 20); |
1668 | } |
1669 | } |
1670 | #else /* CONFIG_JETSAM */ |
1671 | #pragma unused(cause) |
1672 | #endif /* CONFIG_JETSAM */ |
1673 | |
1674 | if (p->p_memstat_effectivepriority >= JETSAM_PRIORITY_FOREGROUND) { |
1675 | memorystatus_log( |
1676 | "memorystatus: killing process %d [%s] in high band %s (%d) - memorystatus_available_pages: %llu\n" , |
1677 | proc_getpid(p), (*p->p_name ? p->p_name : "unknown" ), |
1678 | memorystatus_priority_band_name(p->p_memstat_effectivepriority), p->p_memstat_effectivepriority, |
1679 | (uint64_t)MEMORYSTATUS_LOG_AVAILABLE_PAGES); |
1680 | } |
1681 | |
1682 | /* |
1683 | * The jetsam_reason (os_reason_t) has enough information about the kill cause. |
1684 | * We don't really need jetsam_flags anymore, so it's okay that not all possible kill causes have been mapped. |
1685 | */ |
1686 | int jetsam_flags = P_LTERM_JETSAM; |
1687 | switch (cause) { |
1688 | case kMemorystatusKilledHiwat: jetsam_flags |= P_JETSAM_HIWAT; break; |
1689 | case kMemorystatusKilledVnodes: jetsam_flags |= P_JETSAM_VNODE; break; |
1690 | case kMemorystatusKilledVMPageShortage: jetsam_flags |= P_JETSAM_VMPAGESHORTAGE; break; |
1691 | case kMemorystatusKilledVMCompressorThrashing: |
1692 | case kMemorystatusKilledVMCompressorSpaceShortage: jetsam_flags |= P_JETSAM_VMTHRASHING; break; |
1693 | case kMemorystatusKilledFCThrashing: jetsam_flags |= P_JETSAM_FCTHRASHING; break; |
1694 | case kMemorystatusKilledPerProcessLimit: jetsam_flags |= P_JETSAM_PID; break; |
1695 | case kMemorystatusKilledIdleExit: jetsam_flags |= P_JETSAM_IDLEEXIT; break; |
1696 | } |
1697 | /* jetsam_do_kill drops a reference. */ |
1698 | os_reason_ref(cur_reason: jetsam_reason); |
1699 | error = jetsam_do_kill(p, jetsam_flags, jetsam_reason); |
1700 | *footprint_of_killed_proc = ((error == 0) ? footprint : 0); |
1701 | |
1702 | KDBG(MEMSTAT_CODE(BSD_MEMSTAT_DO_KILL) | DBG_FUNC_END, |
1703 | victim_pid, memstat_effectivepriority, vm_page_free_count, error); |
1704 | |
1705 | KDBG(MEMSTAT_CODE(BSD_MEMSTAT_COMPACTOR_RUN) | DBG_FUNC_START, |
1706 | victim_pid, cause, vm_page_free_count, *footprint_of_killed_proc); |
1707 | |
1708 | if (jetsam_reason->osr_code == JETSAM_REASON_VNODE) { |
1709 | /* |
1710 | * vnode jetsams are syncronous and not caused by memory pressure. |
1711 | * Running the compactor on this thread adds significant latency to the filesystem operation |
1712 | * that triggered this jetsam. |
1713 | * Kick of compactor thread asyncronously instead. |
1714 | */ |
1715 | vm_wake_compactor_swapper(); |
1716 | } else { |
1717 | vm_run_compactor(); |
1718 | } |
1719 | |
1720 | KDBG(MEMSTAT_CODE(BSD_MEMSTAT_COMPACTOR_RUN) | DBG_FUNC_END, |
1721 | victim_pid, cause, vm_page_free_count); |
1722 | |
1723 | os_reason_free(cur_reason: jetsam_reason); |
1724 | return error == 0; |
1725 | } |
1726 | |
1727 | /* |
1728 | * Node manipulation |
1729 | */ |
1730 | |
1731 | static void |
1732 | memorystatus_check_levels_locked(void) |
1733 | { |
1734 | #if CONFIG_JETSAM |
1735 | /* Update levels */ |
1736 | memorystatus_update_levels_locked(); |
1737 | #else /* CONFIG_JETSAM */ |
1738 | /* |
1739 | * Nothing to do here currently since we update |
1740 | * memorystatus_available_pages in vm_pressure_response. |
1741 | */ |
1742 | #endif /* CONFIG_JETSAM */ |
1743 | } |
1744 | |
1745 | /* |
1746 | * Pin a process to a particular jetsam band when it is in the background i.e. not doing active work. |
1747 | * For an application: that means no longer in the FG band |
1748 | * For a daemon: that means no longer in its 'requested' jetsam priority band |
1749 | */ |
1750 | |
1751 | int |
1752 | memorystatus_update_inactive_jetsam_priority_band(pid_t pid, uint32_t op_flags, int jetsam_prio, boolean_t effective_now) |
1753 | { |
1754 | int error = 0; |
1755 | boolean_t enable = FALSE; |
1756 | proc_t p = NULL; |
1757 | |
1758 | if (op_flags == MEMORYSTATUS_CMD_ELEVATED_INACTIVEJETSAMPRIORITY_ENABLE) { |
1759 | enable = TRUE; |
1760 | } else if (op_flags == MEMORYSTATUS_CMD_ELEVATED_INACTIVEJETSAMPRIORITY_DISABLE) { |
1761 | enable = FALSE; |
1762 | } else { |
1763 | return EINVAL; |
1764 | } |
1765 | |
1766 | p = proc_find(pid); |
1767 | if (p != NULL) { |
1768 | if ((enable && ((p->p_memstat_state & P_MEMSTAT_USE_ELEVATED_INACTIVE_BAND) == P_MEMSTAT_USE_ELEVATED_INACTIVE_BAND)) || |
1769 | (!enable && ((p->p_memstat_state & P_MEMSTAT_USE_ELEVATED_INACTIVE_BAND) == 0))) { |
1770 | /* |
1771 | * No change in state. |
1772 | */ |
1773 | } else { |
1774 | proc_list_lock(); |
1775 | |
1776 | if (enable) { |
1777 | p->p_memstat_state |= P_MEMSTAT_USE_ELEVATED_INACTIVE_BAND; |
1778 | memorystatus_invalidate_idle_demotion_locked(p, TRUE); |
1779 | |
1780 | if (effective_now) { |
1781 | if (p->p_memstat_effectivepriority < jetsam_prio) { |
1782 | if (memorystatus_highwater_enabled) { |
1783 | /* |
1784 | * Process is about to transition from |
1785 | * inactive --> active |
1786 | * assign active state |
1787 | */ |
1788 | boolean_t is_fatal; |
1789 | boolean_t use_active = TRUE; |
1790 | CACHE_ACTIVE_LIMITS_LOCKED(p, is_fatal); |
1791 | task_set_phys_footprint_limit_internal(proc_task(p), (p->p_memstat_memlimit > 0) ? p->p_memstat_memlimit : -1, NULL, use_active, is_fatal); |
1792 | } |
1793 | memorystatus_update_priority_locked(p, priority: jetsam_prio, FALSE, FALSE); |
1794 | } |
1795 | } else { |
1796 | if (isProcessInAgingBands(p)) { |
1797 | memorystatus_update_priority_locked(p, JETSAM_PRIORITY_IDLE, FALSE, TRUE); |
1798 | } |
1799 | } |
1800 | } else { |
1801 | p->p_memstat_state &= ~P_MEMSTAT_USE_ELEVATED_INACTIVE_BAND; |
1802 | memorystatus_invalidate_idle_demotion_locked(p, TRUE); |
1803 | |
1804 | if (effective_now) { |
1805 | if (p->p_memstat_effectivepriority == jetsam_prio) { |
1806 | memorystatus_update_priority_locked(p, JETSAM_PRIORITY_IDLE, FALSE, TRUE); |
1807 | } |
1808 | } else { |
1809 | if (isProcessInAgingBands(p)) { |
1810 | memorystatus_update_priority_locked(p, JETSAM_PRIORITY_IDLE, FALSE, TRUE); |
1811 | } |
1812 | } |
1813 | } |
1814 | |
1815 | proc_list_unlock(); |
1816 | } |
1817 | proc_rele(p); |
1818 | error = 0; |
1819 | } else { |
1820 | error = ESRCH; |
1821 | } |
1822 | |
1823 | return error; |
1824 | } |
1825 | |
1826 | static void |
1827 | memorystatus_perform_idle_demotion(__unused void *spare1, __unused void *spare2) |
1828 | { |
1829 | proc_t p; |
1830 | uint64_t current_time = 0, idle_delay_time = 0; |
1831 | int demote_prio_band = 0; |
1832 | memstat_bucket_t *demotion_bucket; |
1833 | |
1834 | memorystatus_log_debug("memorystatus_perform_idle_demotion()\n" ); |
1835 | |
1836 | if (!system_procs_aging_band && !applications_aging_band) { |
1837 | return; |
1838 | } |
1839 | |
1840 | KDBG(MEMSTAT_CODE(BSD_MEMSTAT_IDLE_DEMOTE) | DBG_FUNC_START); |
1841 | |
1842 | current_time = mach_absolute_time(); |
1843 | |
1844 | proc_list_lock(); |
1845 | |
1846 | demote_prio_band = JETSAM_PRIORITY_IDLE + 1; |
1847 | |
1848 | for (; demote_prio_band < JETSAM_PRIORITY_MAX; demote_prio_band++) { |
1849 | if (demote_prio_band != system_procs_aging_band && demote_prio_band != applications_aging_band) { |
1850 | continue; |
1851 | } |
1852 | |
1853 | demotion_bucket = &memstat_bucket[demote_prio_band]; |
1854 | p = TAILQ_FIRST(&demotion_bucket->list); |
1855 | |
1856 | while (p) { |
1857 | memorystatus_log_debug("memorystatus_perform_idle_demotion() found %d\n" , proc_getpid(p)); |
1858 | |
1859 | assert(p->p_memstat_idledeadline); |
1860 | |
1861 | assert(p->p_memstat_dirty & P_DIRTY_AGING_IN_PROGRESS); |
1862 | |
1863 | if (current_time >= p->p_memstat_idledeadline) { |
1864 | if ((isSysProc(p) && |
1865 | ((p->p_memstat_dirty & (P_DIRTY_IDLE_EXIT_ENABLED | P_DIRTY_IS_DIRTY)) != P_DIRTY_IDLE_EXIT_ENABLED)) || /* system proc marked dirty*/ |
1866 | task_has_assertions(task: (struct task *)(proc_task(p)))) { /* has outstanding assertions which might indicate outstanding work too */ |
1867 | idle_delay_time = (isSysProc(p)) ? memorystatus_sysprocs_idle_time(p) : memorystatus_apps_idle_time(p); |
1868 | |
1869 | p->p_memstat_idledeadline += idle_delay_time; |
1870 | p = TAILQ_NEXT(p, p_memstat_list); |
1871 | } else { |
1872 | proc_t next_proc = NULL; |
1873 | |
1874 | next_proc = TAILQ_NEXT(p, p_memstat_list); |
1875 | memorystatus_invalidate_idle_demotion_locked(p, TRUE); |
1876 | |
1877 | memorystatus_update_priority_locked(p, JETSAM_PRIORITY_IDLE, false, true); |
1878 | |
1879 | p = next_proc; |
1880 | continue; |
1881 | } |
1882 | } else { |
1883 | // No further candidates |
1884 | break; |
1885 | } |
1886 | } |
1887 | } |
1888 | |
1889 | memorystatus_reschedule_idle_demotion_locked(); |
1890 | |
1891 | proc_list_unlock(); |
1892 | |
1893 | KDBG(MEMSTAT_CODE(BSD_MEMSTAT_IDLE_DEMOTE) | DBG_FUNC_END); |
1894 | } |
1895 | |
1896 | static void |
1897 | memorystatus_schedule_idle_demotion_locked(proc_t p, boolean_t set_state) |
1898 | { |
1899 | boolean_t present_in_sysprocs_aging_bucket = FALSE; |
1900 | boolean_t present_in_apps_aging_bucket = FALSE; |
1901 | uint64_t idle_delay_time = 0; |
1902 | |
1903 | if (!system_procs_aging_band && !applications_aging_band) { |
1904 | return; |
1905 | } |
1906 | |
1907 | if ((p->p_memstat_state & P_MEMSTAT_USE_ELEVATED_INACTIVE_BAND) || |
1908 | (p->p_memstat_state & P_MEMSTAT_PRIORITY_ASSERTION)) { |
1909 | /* |
1910 | * This process isn't going to be making the trip to the lower bands. |
1911 | */ |
1912 | return; |
1913 | } |
1914 | |
1915 | if (isProcessInAgingBands(p)) { |
1916 | assert((p->p_memstat_dirty & P_DIRTY_AGING_IN_PROGRESS) != P_DIRTY_AGING_IN_PROGRESS); |
1917 | |
1918 | if (isSysProc(p) && system_procs_aging_band) { |
1919 | present_in_sysprocs_aging_bucket = TRUE; |
1920 | } else if (isApp(p) && applications_aging_band) { |
1921 | present_in_apps_aging_bucket = TRUE; |
1922 | } |
1923 | } |
1924 | |
1925 | assert(!present_in_sysprocs_aging_bucket); |
1926 | assert(!present_in_apps_aging_bucket); |
1927 | |
1928 | memorystatus_log_info( |
1929 | "memorystatus_schedule_idle_demotion_locked: scheduling demotion to idle band for pid %d (dirty:0x%x, set_state %d, demotions %d).\n" , |
1930 | proc_getpid(p), p->p_memstat_dirty, set_state, (memorystatus_scheduled_idle_demotions_sysprocs + memorystatus_scheduled_idle_demotions_apps)); |
1931 | |
1932 | if (isSysProc(p)) { |
1933 | assert((p->p_memstat_dirty & P_DIRTY_IDLE_EXIT_ENABLED) == P_DIRTY_IDLE_EXIT_ENABLED); |
1934 | } |
1935 | |
1936 | idle_delay_time = (isSysProc(p)) ? memorystatus_sysprocs_idle_time(p) : memorystatus_apps_idle_time(p); |
1937 | if (set_state) { |
1938 | p->p_memstat_dirty |= P_DIRTY_AGING_IN_PROGRESS; |
1939 | p->p_memstat_idledeadline = mach_absolute_time() + idle_delay_time; |
1940 | } |
1941 | |
1942 | assert(p->p_memstat_idledeadline); |
1943 | |
1944 | if (isSysProc(p) && present_in_sysprocs_aging_bucket == FALSE) { |
1945 | memorystatus_scheduled_idle_demotions_sysprocs++; |
1946 | } else if (isApp(p) && present_in_apps_aging_bucket == FALSE) { |
1947 | memorystatus_scheduled_idle_demotions_apps++; |
1948 | } |
1949 | } |
1950 | |
1951 | void |
1952 | memorystatus_invalidate_idle_demotion_locked(proc_t p, boolean_t clear_state) |
1953 | { |
1954 | boolean_t present_in_sysprocs_aging_bucket = FALSE; |
1955 | boolean_t present_in_apps_aging_bucket = FALSE; |
1956 | |
1957 | if (!system_procs_aging_band && !applications_aging_band) { |
1958 | return; |
1959 | } |
1960 | |
1961 | if ((p->p_memstat_dirty & P_DIRTY_AGING_IN_PROGRESS) == 0) { |
1962 | return; |
1963 | } |
1964 | |
1965 | if (isProcessInAgingBands(p)) { |
1966 | assert((p->p_memstat_dirty & P_DIRTY_AGING_IN_PROGRESS) == P_DIRTY_AGING_IN_PROGRESS); |
1967 | |
1968 | if (isSysProc(p) && system_procs_aging_band) { |
1969 | assert(p->p_memstat_effectivepriority == system_procs_aging_band); |
1970 | assert(p->p_memstat_idledeadline); |
1971 | present_in_sysprocs_aging_bucket = TRUE; |
1972 | } else if (isApp(p) && applications_aging_band) { |
1973 | assert(p->p_memstat_effectivepriority == applications_aging_band); |
1974 | assert(p->p_memstat_idledeadline); |
1975 | present_in_apps_aging_bucket = TRUE; |
1976 | } |
1977 | } |
1978 | |
1979 | memorystatus_log_info( |
1980 | "memorystatus_invalidate_idle_demotion(): invalidating demotion to idle band for pid %d (clear_state %d, demotions %d).\n" , |
1981 | proc_getpid(p), clear_state, (memorystatus_scheduled_idle_demotions_sysprocs + memorystatus_scheduled_idle_demotions_apps)); |
1982 | |
1983 | |
1984 | if (clear_state) { |
1985 | p->p_memstat_idledeadline = 0; |
1986 | p->p_memstat_dirty &= ~P_DIRTY_AGING_IN_PROGRESS; |
1987 | } |
1988 | |
1989 | if (isSysProc(p) && present_in_sysprocs_aging_bucket == TRUE) { |
1990 | memorystatus_scheduled_idle_demotions_sysprocs--; |
1991 | assert(memorystatus_scheduled_idle_demotions_sysprocs >= 0); |
1992 | } else if (isApp(p) && present_in_apps_aging_bucket == TRUE) { |
1993 | memorystatus_scheduled_idle_demotions_apps--; |
1994 | assert(memorystatus_scheduled_idle_demotions_apps >= 0); |
1995 | } |
1996 | |
1997 | assert((memorystatus_scheduled_idle_demotions_sysprocs + memorystatus_scheduled_idle_demotions_apps) >= 0); |
1998 | } |
1999 | |
2000 | static void |
2001 | memorystatus_reschedule_idle_demotion_locked(void) |
2002 | { |
2003 | if (!system_procs_aging_band && !applications_aging_band) { |
2004 | return; |
2005 | } |
2006 | |
2007 | if (0 == (memorystatus_scheduled_idle_demotions_sysprocs + memorystatus_scheduled_idle_demotions_apps)) { |
2008 | if (memstat_idle_demotion_deadline) { |
2009 | /* Transitioned 1->0, so cancel next call */ |
2010 | thread_call_cancel(call: memorystatus_idle_demotion_call); |
2011 | memstat_idle_demotion_deadline = 0; |
2012 | } |
2013 | } else { |
2014 | memstat_bucket_t *demotion_bucket; |
2015 | proc_t p = NULL, p1 = NULL, p2 = NULL; |
2016 | |
2017 | if (system_procs_aging_band) { |
2018 | demotion_bucket = &memstat_bucket[system_procs_aging_band]; |
2019 | p1 = TAILQ_FIRST(&demotion_bucket->list); |
2020 | |
2021 | p = p1; |
2022 | } |
2023 | |
2024 | if (applications_aging_band) { |
2025 | demotion_bucket = &memstat_bucket[applications_aging_band]; |
2026 | p2 = TAILQ_FIRST(&demotion_bucket->list); |
2027 | |
2028 | if (p1 && p2) { |
2029 | p = (p1->p_memstat_idledeadline > p2->p_memstat_idledeadline) ? p2 : p1; |
2030 | } else { |
2031 | p = (p1 == NULL) ? p2 : p1; |
2032 | } |
2033 | } |
2034 | |
2035 | assert(p); |
2036 | |
2037 | if (p != NULL) { |
2038 | assert(p && p->p_memstat_idledeadline); |
2039 | if (memstat_idle_demotion_deadline != p->p_memstat_idledeadline) { |
2040 | thread_call_enter_delayed(call: memorystatus_idle_demotion_call, deadline: p->p_memstat_idledeadline); |
2041 | memstat_idle_demotion_deadline = p->p_memstat_idledeadline; |
2042 | } |
2043 | } |
2044 | } |
2045 | } |
2046 | |
2047 | /* |
2048 | * List manipulation |
2049 | */ |
2050 | |
2051 | int |
2052 | memorystatus_add(proc_t p, boolean_t locked) |
2053 | { |
2054 | memstat_bucket_t *bucket; |
2055 | |
2056 | memorystatus_log_debug("memorystatus_list_add(): adding pid %d with priority %d.\n" , |
2057 | proc_getpid(p), p->p_memstat_effectivepriority); |
2058 | |
2059 | if (!locked) { |
2060 | proc_list_lock(); |
2061 | } |
2062 | |
2063 | DTRACE_MEMORYSTATUS2(memorystatus_add, proc_t, p, int32_t, p->p_memstat_effectivepriority); |
2064 | |
2065 | /* Processes marked internal do not have priority tracked */ |
2066 | if (p->p_memstat_state & P_MEMSTAT_INTERNAL) { |
2067 | goto exit; |
2068 | } |
2069 | |
2070 | /* |
2071 | * Opt out system processes from being frozen by default. |
2072 | * For coalition-based freezing, we only want to freeze sysprocs that have specifically opted in. |
2073 | */ |
2074 | if (isSysProc(p)) { |
2075 | p->p_memstat_state |= P_MEMSTAT_FREEZE_DISABLED; |
2076 | } |
2077 | #if CONFIG_FREEZE |
2078 | memorystatus_freeze_init_proc(p); |
2079 | #endif |
2080 | |
2081 | bucket = &memstat_bucket[p->p_memstat_effectivepriority]; |
2082 | |
2083 | if (isSysProc(p) && system_procs_aging_band && (p->p_memstat_effectivepriority == system_procs_aging_band)) { |
2084 | assert(bucket->count == memorystatus_scheduled_idle_demotions_sysprocs - 1); |
2085 | } else if (isApp(p) && applications_aging_band && (p->p_memstat_effectivepriority == applications_aging_band)) { |
2086 | assert(bucket->count == memorystatus_scheduled_idle_demotions_apps - 1); |
2087 | } else if (p->p_memstat_effectivepriority == JETSAM_PRIORITY_IDLE) { |
2088 | /* |
2089 | * Entering the idle band. |
2090 | * Record idle start time. |
2091 | */ |
2092 | p->p_memstat_idle_start = mach_absolute_time(); |
2093 | } |
2094 | |
2095 | TAILQ_INSERT_TAIL(&bucket->list, p, p_memstat_list); |
2096 | bucket->count++; |
2097 | if (p->p_memstat_relaunch_flags & (P_MEMSTAT_RELAUNCH_HIGH)) { |
2098 | bucket->relaunch_high_count++; |
2099 | } |
2100 | |
2101 | memorystatus_list_count++; |
2102 | |
2103 | memorystatus_check_levels_locked(); |
2104 | |
2105 | exit: |
2106 | if (!locked) { |
2107 | proc_list_unlock(); |
2108 | } |
2109 | |
2110 | return 0; |
2111 | } |
2112 | |
2113 | /* |
2114 | * Description: |
2115 | * Moves a process from one jetsam bucket to another. |
2116 | * which changes the LRU position of the process. |
2117 | * |
2118 | * Monitors transition between buckets and if necessary |
2119 | * will update cached memory limits accordingly. |
2120 | * |
2121 | * skip_demotion_check: |
2122 | * - if the 'jetsam aging policy' is NOT 'legacy': |
2123 | * When this flag is TRUE, it means we are going |
2124 | * to age the ripe processes out of the aging bands and into the |
2125 | * IDLE band and apply their inactive memory limits. |
2126 | * |
2127 | * - if the 'jetsam aging policy' is 'legacy': |
2128 | * When this flag is TRUE, it might mean the above aging mechanism |
2129 | * OR |
2130 | * It might be that we have a process that has used up its 'idle deferral' |
2131 | * stay that is given to it once per lifetime. And in this case, the process |
2132 | * won't be going through any aging codepaths. But we still need to apply |
2133 | * the right inactive limits and so we explicitly set this to TRUE if the |
2134 | * new priority for the process is the IDLE band. |
2135 | */ |
2136 | void |
2137 | memorystatus_update_priority_locked(proc_t p, int priority, boolean_t head_insert, boolean_t skip_demotion_check) |
2138 | { |
2139 | memstat_bucket_t *old_bucket, *new_bucket; |
2140 | |
2141 | assert(priority < MEMSTAT_BUCKET_COUNT); |
2142 | |
2143 | /* Ensure that exit isn't underway, leaving the proc retained but removed from its bucket */ |
2144 | if (proc_list_exited(p)) { |
2145 | return; |
2146 | } |
2147 | |
2148 | memorystatus_log_info("memorystatus_update_priority_locked(): setting %s(%d) to priority %d, inserting at %s\n" , |
2149 | (*p->p_name ? p->p_name : "unknown" ), proc_getpid(p), priority, head_insert ? "head" : "tail" ); |
2150 | |
2151 | DTRACE_MEMORYSTATUS3(memorystatus_update_priority, proc_t, p, int32_t, p->p_memstat_effectivepriority, int, priority); |
2152 | |
2153 | old_bucket = &memstat_bucket[p->p_memstat_effectivepriority]; |
2154 | |
2155 | if (skip_demotion_check == FALSE) { |
2156 | if (isSysProc(p)) { |
2157 | /* |
2158 | * For system processes, the memorystatus_dirty_* routines take care of adding/removing |
2159 | * the processes from the aging bands and balancing the demotion counts. |
2160 | * We can, however, override that if the process has an 'elevated inactive jetsam band' attribute. |
2161 | */ |
2162 | |
2163 | if (p->p_memstat_state & P_MEMSTAT_USE_ELEVATED_INACTIVE_BAND) { |
2164 | /* |
2165 | * 2 types of processes can use the non-standard elevated inactive band: |
2166 | * - Frozen processes that always land in memorystatus_freeze_jetsam_band |
2167 | * OR |
2168 | * - processes that specifically opt-in to the elevated inactive support e.g. docked processes. |
2169 | */ |
2170 | #if CONFIG_FREEZE |
2171 | if (p->p_memstat_state & P_MEMSTAT_FROZEN) { |
2172 | if (priority <= memorystatus_freeze_jetsam_band) { |
2173 | priority = memorystatus_freeze_jetsam_band; |
2174 | } |
2175 | } else |
2176 | #endif /* CONFIG_FREEZE */ |
2177 | { |
2178 | if (priority <= JETSAM_PRIORITY_ELEVATED_INACTIVE) { |
2179 | priority = JETSAM_PRIORITY_ELEVATED_INACTIVE; |
2180 | } |
2181 | } |
2182 | assert(!(p->p_memstat_dirty & P_DIRTY_AGING_IN_PROGRESS)); |
2183 | } |
2184 | } else if (isApp(p)) { |
2185 | /* |
2186 | * Check to see if the application is being lowered in jetsam priority. If so, and: |
2187 | * - it has an 'elevated inactive jetsam band' attribute, then put it in the appropriate band. |
2188 | * - it is a normal application, then let it age in the aging band if that policy is in effect. |
2189 | */ |
2190 | |
2191 | if (p->p_memstat_state & P_MEMSTAT_USE_ELEVATED_INACTIVE_BAND) { |
2192 | #if CONFIG_FREEZE |
2193 | if (p->p_memstat_state & P_MEMSTAT_FROZEN) { |
2194 | if (priority <= memorystatus_freeze_jetsam_band) { |
2195 | priority = memorystatus_freeze_jetsam_band; |
2196 | } |
2197 | } else |
2198 | #endif /* CONFIG_FREEZE */ |
2199 | { |
2200 | if (priority <= JETSAM_PRIORITY_ELEVATED_INACTIVE) { |
2201 | priority = JETSAM_PRIORITY_ELEVATED_INACTIVE; |
2202 | } |
2203 | } |
2204 | } else { |
2205 | if (applications_aging_band) { |
2206 | if (p->p_memstat_effectivepriority == applications_aging_band) { |
2207 | assert(old_bucket->count == (memorystatus_scheduled_idle_demotions_apps + 1)); |
2208 | } |
2209 | |
2210 | if (priority <= applications_aging_band) { |
2211 | assert(!(p->p_memstat_dirty & P_DIRTY_AGING_IN_PROGRESS)); |
2212 | priority = applications_aging_band; |
2213 | memorystatus_schedule_idle_demotion_locked(p, TRUE); |
2214 | } |
2215 | } |
2216 | } |
2217 | } |
2218 | } |
2219 | |
2220 | if ((system_procs_aging_band && (priority == system_procs_aging_band)) || (applications_aging_band && (priority == applications_aging_band))) { |
2221 | assert(p->p_memstat_dirty & P_DIRTY_AGING_IN_PROGRESS); |
2222 | } |
2223 | |
2224 | #if DEVELOPMENT || DEBUG |
2225 | if (priority == JETSAM_PRIORITY_IDLE && /* if the process is on its way into the IDLE band */ |
2226 | (system_procs_aging_band && applications_aging_band) && /* we have support for _both_ aging bands */ |
2227 | (skip_demotion_check == FALSE) && /* and it isn't via the path that will set the INACTIVE memlimits */ |
2228 | (p->p_memstat_dirty & P_DIRTY_TRACK) && /* and it has 'DIRTY' tracking enabled */ |
2229 | ((p->p_memstat_memlimit != p->p_memstat_memlimit_inactive) || /* and we notice that the current limit isn't the right value (inactive) */ |
2230 | ((p->p_memstat_state & P_MEMSTAT_MEMLIMIT_INACTIVE_FATAL) ? (!(p->p_memstat_state & P_MEMSTAT_FATAL_MEMLIMIT)) : (p->p_memstat_state & P_MEMSTAT_FATAL_MEMLIMIT)))) { /* OR type (fatal vs non-fatal) */ |
2231 | memorystatus_log_error("memorystatus_update_priority_locked: on %s with 0x%x, prio: %d and %d\n" , |
2232 | p->p_name, p->p_memstat_state, priority, p->p_memstat_memlimit); /* then we must catch this */ |
2233 | } |
2234 | #endif /* DEVELOPMENT || DEBUG */ |
2235 | |
2236 | TAILQ_REMOVE(&old_bucket->list, p, p_memstat_list); |
2237 | old_bucket->count--; |
2238 | if (p->p_memstat_relaunch_flags & (P_MEMSTAT_RELAUNCH_HIGH)) { |
2239 | old_bucket->relaunch_high_count--; |
2240 | } |
2241 | |
2242 | new_bucket = &memstat_bucket[priority]; |
2243 | if (head_insert) { |
2244 | TAILQ_INSERT_HEAD(&new_bucket->list, p, p_memstat_list); |
2245 | } else { |
2246 | TAILQ_INSERT_TAIL(&new_bucket->list, p, p_memstat_list); |
2247 | } |
2248 | new_bucket->count++; |
2249 | if (p->p_memstat_relaunch_flags & (P_MEMSTAT_RELAUNCH_HIGH)) { |
2250 | new_bucket->relaunch_high_count++; |
2251 | } |
2252 | |
2253 | if (memorystatus_highwater_enabled) { |
2254 | boolean_t is_fatal; |
2255 | boolean_t use_active; |
2256 | |
2257 | /* |
2258 | * If cached limit data is updated, then the limits |
2259 | * will be enforced by writing to the ledgers. |
2260 | */ |
2261 | boolean_t ledger_update_needed = TRUE; |
2262 | |
2263 | /* |
2264 | * Here, we must update the cached memory limit if the task |
2265 | * is transitioning between: |
2266 | * active <--> inactive |
2267 | * FG <--> BG |
2268 | * but: |
2269 | * dirty <--> clean is ignored |
2270 | * |
2271 | * We bypass non-idle processes that have opted into dirty tracking because |
2272 | * a move between buckets does not imply a transition between the |
2273 | * dirty <--> clean state. |
2274 | */ |
2275 | |
2276 | if (p->p_memstat_dirty & P_DIRTY_TRACK) { |
2277 | if (skip_demotion_check == TRUE && priority == JETSAM_PRIORITY_IDLE) { |
2278 | CACHE_INACTIVE_LIMITS_LOCKED(p, is_fatal); |
2279 | use_active = FALSE; |
2280 | } else { |
2281 | ledger_update_needed = FALSE; |
2282 | } |
2283 | } else if ((priority >= JETSAM_PRIORITY_FOREGROUND) && (p->p_memstat_effectivepriority < JETSAM_PRIORITY_FOREGROUND)) { |
2284 | /* |
2285 | * inactive --> active |
2286 | * BG --> FG |
2287 | * assign active state |
2288 | */ |
2289 | CACHE_ACTIVE_LIMITS_LOCKED(p, is_fatal); |
2290 | use_active = TRUE; |
2291 | } else if ((priority < JETSAM_PRIORITY_FOREGROUND) && (p->p_memstat_effectivepriority >= JETSAM_PRIORITY_FOREGROUND)) { |
2292 | /* |
2293 | * active --> inactive |
2294 | * FG --> BG |
2295 | * assign inactive state |
2296 | */ |
2297 | CACHE_INACTIVE_LIMITS_LOCKED(p, is_fatal); |
2298 | use_active = FALSE; |
2299 | } else { |
2300 | /* |
2301 | * The transition between jetsam priority buckets apparently did |
2302 | * not affect active/inactive state. |
2303 | * This is not unusual... especially during startup when |
2304 | * processes are getting established in their respective bands. |
2305 | */ |
2306 | ledger_update_needed = FALSE; |
2307 | } |
2308 | |
2309 | /* |
2310 | * Enforce the new limits by writing to the ledger |
2311 | */ |
2312 | if (ledger_update_needed) { |
2313 | task_set_phys_footprint_limit_internal(proc_task(p), (p->p_memstat_memlimit > 0) ? p->p_memstat_memlimit : -1, NULL, use_active, is_fatal); |
2314 | |
2315 | memorystatus_log_info("memorystatus_update_priority_locked: new limit on pid %d (%dMB %s) priority old --> new (%d --> %d) dirty?=0x%x %s\n" , |
2316 | proc_getpid(p), (p->p_memstat_memlimit > 0 ? p->p_memstat_memlimit : -1), |
2317 | (p->p_memstat_state & P_MEMSTAT_FATAL_MEMLIMIT ? "F " : "NF" ), p->p_memstat_effectivepriority, priority, p->p_memstat_dirty, |
2318 | (p->p_memstat_dirty ? ((p->p_memstat_dirty & P_DIRTY) ? "isdirty" : "isclean" ) : "" )); |
2319 | } |
2320 | } |
2321 | |
2322 | /* |
2323 | * Record idle start or idle delta. |
2324 | */ |
2325 | if (p->p_memstat_effectivepriority == priority) { |
2326 | /* |
2327 | * This process is not transitioning between |
2328 | * jetsam priority buckets. Do nothing. |
2329 | */ |
2330 | } else if (p->p_memstat_effectivepriority == JETSAM_PRIORITY_IDLE) { |
2331 | uint64_t now; |
2332 | /* |
2333 | * Transitioning out of the idle priority bucket. |
2334 | * Record idle delta. |
2335 | */ |
2336 | assert(p->p_memstat_idle_start != 0); |
2337 | now = mach_absolute_time(); |
2338 | if (now > p->p_memstat_idle_start) { |
2339 | p->p_memstat_idle_delta = now - p->p_memstat_idle_start; |
2340 | } |
2341 | |
2342 | /* |
2343 | * About to become active and so memory footprint could change. |
2344 | * So mark it eligible for freeze-considerations next time around. |
2345 | */ |
2346 | if (p->p_memstat_state & P_MEMSTAT_FREEZE_IGNORE) { |
2347 | p->p_memstat_state &= ~P_MEMSTAT_FREEZE_IGNORE; |
2348 | } |
2349 | } else if (priority == JETSAM_PRIORITY_IDLE) { |
2350 | /* |
2351 | * Transitioning into the idle priority bucket. |
2352 | * Record idle start. |
2353 | */ |
2354 | p->p_memstat_idle_start = mach_absolute_time(); |
2355 | } |
2356 | |
2357 | KDBG(MEMSTAT_CODE(BSD_MEMSTAT_CHANGE_PRIORITY), proc_getpid(p), priority, p->p_memstat_effectivepriority); |
2358 | |
2359 | p->p_memstat_effectivepriority = priority; |
2360 | |
2361 | #if CONFIG_SECLUDED_MEMORY |
2362 | if (secluded_for_apps && |
2363 | task_could_use_secluded_mem(proc_task(p))) { |
2364 | task_set_can_use_secluded_mem( |
2365 | proc_task(p), |
2366 | (priority >= JETSAM_PRIORITY_FOREGROUND)); |
2367 | } |
2368 | #endif /* CONFIG_SECLUDED_MEMORY */ |
2369 | |
2370 | memorystatus_check_levels_locked(); |
2371 | } |
2372 | |
2373 | int |
2374 | memorystatus_relaunch_flags_update(proc_t p, int relaunch_flags) |
2375 | { |
2376 | p->p_memstat_relaunch_flags = relaunch_flags; |
2377 | KDBG(BSDDBG_CODE(DBG_BSD_MEMSTAT, BSD_MEMSTAT_RELAUNCH_FLAGS), proc_getpid(p), relaunch_flags); |
2378 | return 0; |
2379 | } |
2380 | |
2381 | #if DEVELOPMENT || DEBUG |
2382 | static int sysctl_memorystatus_relaunch_flags SYSCTL_HANDLER_ARGS { |
2383 | #pragma unused(oidp, arg1, arg2) |
2384 | proc_t p; |
2385 | int relaunch_flags = 0; |
2386 | |
2387 | p = current_proc(); |
2388 | relaunch_flags = p->p_memstat_relaunch_flags; |
2389 | switch (relaunch_flags) { |
2390 | case P_MEMSTAT_RELAUNCH_LOW: |
2391 | relaunch_flags = POSIX_SPAWN_JETSAM_RELAUNCH_BEHAVIOR_LOW; |
2392 | break; |
2393 | case P_MEMSTAT_RELAUNCH_MED: |
2394 | relaunch_flags = POSIX_SPAWN_JETSAM_RELAUNCH_BEHAVIOR_MED; |
2395 | break; |
2396 | case P_MEMSTAT_RELAUNCH_HIGH: |
2397 | relaunch_flags = POSIX_SPAWN_JETSAM_RELAUNCH_BEHAVIOR_HIGH; |
2398 | break; |
2399 | } |
2400 | |
2401 | return SYSCTL_OUT(req, &relaunch_flags, sizeof(relaunch_flags)); |
2402 | } |
2403 | SYSCTL_PROC(_kern, OID_AUTO, memorystatus_relaunch_flags, CTLTYPE_INT | CTLFLAG_RD | |
2404 | CTLFLAG_LOCKED | CTLFLAG_MASKED, 0, 0, sysctl_memorystatus_relaunch_flags, "I" , "get relaunch flags for current process" ); |
2405 | #endif /* DEVELOPMENT || DEBUG */ |
2406 | |
2407 | /* |
2408 | * Everything between the idle band and the application agining band |
2409 | * are reserved for internal use. We allow some entitled user space programs |
2410 | * to use this range for experimentation. |
2411 | */ |
2412 | static bool |
2413 | current_task_can_use_entitled_range() |
2414 | { |
2415 | static const char kInternalJetsamRangeEntitlement[] = "com.apple.private.internal-jetsam-range" ; |
2416 | task_t task = current_task(); |
2417 | if (task == kernel_task) { |
2418 | return true; |
2419 | } |
2420 | return IOTaskHasEntitlement(task, entitlement: kInternalJetsamRangeEntitlement); |
2421 | } |
2422 | |
2423 | /* |
2424 | * |
2425 | * Description: Update the jetsam priority and memory limit attributes for a given process. |
2426 | * |
2427 | * Parameters: |
2428 | * p init this process's jetsam information. |
2429 | * priority The jetsam priority band |
2430 | * user_data user specific data, unused by the kernel |
2431 | * is_assertion When true, a priority update is driven by an assertion. |
2432 | * effective guards against race if process's update already occurred |
2433 | * update_memlimit When true we know this is the init step via the posix_spawn path. |
2434 | * |
2435 | * memlimit_active Value in megabytes; The monitored footprint level while the |
2436 | * process is active. Exceeding it may result in termination |
2437 | * based on it's associated fatal flag. |
2438 | * |
2439 | * memlimit_active_is_fatal When a process is active and exceeds its memory footprint, |
2440 | * this describes whether or not it should be immediately fatal. |
2441 | * |
2442 | * memlimit_inactive Value in megabytes; The monitored footprint level while the |
2443 | * process is inactive. Exceeding it may result in termination |
2444 | * based on it's associated fatal flag. |
2445 | * |
2446 | * memlimit_inactive_is_fatal When a process is inactive and exceeds its memory footprint, |
2447 | * this describes whether or not it should be immediatly fatal. |
2448 | * |
2449 | * Returns: 0 Success |
2450 | * non-0 Failure |
2451 | */ |
2452 | |
2453 | int |
2454 | memorystatus_update(proc_t p, int priority, uint64_t user_data, boolean_t is_assertion, boolean_t effective, boolean_t update_memlimit, |
2455 | int32_t memlimit_active, boolean_t memlimit_active_is_fatal, |
2456 | int32_t memlimit_inactive, boolean_t memlimit_inactive_is_fatal) |
2457 | { |
2458 | int ret; |
2459 | boolean_t head_insert = false; |
2460 | |
2461 | memorystatus_log_info("memorystatus_update: changing (%s) pid %d: priority %d, user_data 0x%llx\n" , |
2462 | (*p->p_name ? p->p_name : "unknown" ), proc_getpid(p), priority, user_data); |
2463 | |
2464 | KDBG(MEMSTAT_CODE(BSD_MEMSTAT_UPDATE) | DBG_FUNC_START, proc_getpid(p), priority, user_data, effective); |
2465 | |
2466 | if (priority == -1) { |
2467 | /* Use as shorthand for default priority */ |
2468 | priority = JETSAM_PRIORITY_DEFAULT; |
2469 | } else if (priority > JETSAM_PRIORITY_IDLE && priority <= applications_aging_band) { |
2470 | /* |
2471 | * Everything between idle and the aging bands are reserved for internal use. |
2472 | * if requested, adjust to JETSAM_PRIORITY_IDLE. |
2473 | * Entitled processes (just munch) can use a subset of this range for testing. |
2474 | */ |
2475 | if (priority > JETSAM_PRIORITY_ENTITLED_MAX || |
2476 | !current_task_can_use_entitled_range()) { |
2477 | priority = JETSAM_PRIORITY_IDLE; |
2478 | } |
2479 | } else if (priority == JETSAM_PRIORITY_IDLE_HEAD) { |
2480 | /* JETSAM_PRIORITY_IDLE_HEAD inserts at the head of the idle queue */ |
2481 | priority = JETSAM_PRIORITY_IDLE; |
2482 | head_insert = TRUE; |
2483 | } else if ((priority < 0) || (priority >= MEMSTAT_BUCKET_COUNT)) { |
2484 | /* Sanity check */ |
2485 | ret = EINVAL; |
2486 | goto out; |
2487 | } |
2488 | |
2489 | proc_list_lock(); |
2490 | |
2491 | assert(!(p->p_memstat_state & P_MEMSTAT_INTERNAL)); |
2492 | |
2493 | if (effective && (p->p_memstat_state & P_MEMSTAT_PRIORITYUPDATED)) { |
2494 | ret = EALREADY; |
2495 | proc_list_unlock(); |
2496 | memorystatus_log_debug("memorystatus_update: effective change specified for pid %d, but change already occurred.\n" , |
2497 | proc_getpid(p)); |
2498 | goto out; |
2499 | } |
2500 | |
2501 | if ((p->p_memstat_state & (P_MEMSTAT_TERMINATED | P_MEMSTAT_SKIP)) || proc_list_exited(p)) { |
2502 | /* |
2503 | * This could happen when a process calling posix_spawn() is exiting on the jetsam thread. |
2504 | */ |
2505 | ret = EBUSY; |
2506 | proc_list_unlock(); |
2507 | goto out; |
2508 | } |
2509 | |
2510 | p->p_memstat_state |= P_MEMSTAT_PRIORITYUPDATED; |
2511 | p->p_memstat_userdata = user_data; |
2512 | |
2513 | if (is_assertion) { |
2514 | if (priority == JETSAM_PRIORITY_IDLE) { |
2515 | /* |
2516 | * Assertions relinquish control when the process is heading to IDLE. |
2517 | */ |
2518 | if (p->p_memstat_state & P_MEMSTAT_PRIORITY_ASSERTION) { |
2519 | /* |
2520 | * Mark the process as no longer being managed by assertions. |
2521 | */ |
2522 | p->p_memstat_state &= ~P_MEMSTAT_PRIORITY_ASSERTION; |
2523 | } else { |
2524 | /* |
2525 | * Ignore an idle priority transition if the process is not |
2526 | * already managed by assertions. We won't treat this as |
2527 | * an error, but we will log the unexpected behavior and bail. |
2528 | */ |
2529 | memorystatus_log_error( |
2530 | "memorystatus: Ignore assertion driven idle priority. Process not previously controlled %s:%d\n" , |
2531 | (*p->p_name ? p->p_name : "unknown" ), proc_getpid(p)); |
2532 | |
2533 | ret = 0; |
2534 | proc_list_unlock(); |
2535 | goto out; |
2536 | } |
2537 | } else { |
2538 | /* |
2539 | * Process is now being managed by assertions, |
2540 | */ |
2541 | p->p_memstat_state |= P_MEMSTAT_PRIORITY_ASSERTION; |
2542 | } |
2543 | |
2544 | /* Always update the assertion priority in this path */ |
2545 | |
2546 | p->p_memstat_assertionpriority = priority; |
2547 | |
2548 | int memstat_dirty_flags = memorystatus_dirty_get(p, TRUE); /* proc_list_lock is held */ |
2549 | |
2550 | if (memstat_dirty_flags != 0) { |
2551 | /* |
2552 | * Calculate maximum priority only when dirty tracking processes are involved. |
2553 | */ |
2554 | int maxpriority; |
2555 | if (memstat_dirty_flags & PROC_DIRTY_IS_DIRTY) { |
2556 | maxpriority = MAX(p->p_memstat_assertionpriority, p->p_memstat_requestedpriority); |
2557 | } else { |
2558 | /* clean */ |
2559 | |
2560 | if (memstat_dirty_flags & PROC_DIRTY_ALLOWS_IDLE_EXIT) { |
2561 | /* |
2562 | * The aging policy must be evaluated and applied here because runnningboardd |
2563 | * has relinquished its hold on the jetsam priority by attempting to move a |
2564 | * clean process to the idle band. |
2565 | */ |
2566 | |
2567 | int newpriority = JETSAM_PRIORITY_IDLE; |
2568 | if ((p->p_memstat_dirty & (P_DIRTY_IDLE_EXIT_ENABLED | P_DIRTY_IS_DIRTY)) == P_DIRTY_IDLE_EXIT_ENABLED) { |
2569 | newpriority = (p->p_memstat_dirty & P_DIRTY_AGING_IN_PROGRESS) ? system_procs_aging_band : JETSAM_PRIORITY_IDLE; |
2570 | } |
2571 | |
2572 | maxpriority = MAX(p->p_memstat_assertionpriority, newpriority ); |
2573 | |
2574 | if (newpriority == system_procs_aging_band) { |
2575 | memorystatus_schedule_idle_demotion_locked(p, FALSE); |
2576 | } |
2577 | } else { |
2578 | /* |
2579 | * Preserves requestedpriority when the process does not support pressured exit. |
2580 | */ |
2581 | maxpriority = MAX(p->p_memstat_assertionpriority, p->p_memstat_requestedpriority); |
2582 | } |
2583 | } |
2584 | priority = maxpriority; |
2585 | } |
2586 | } else { |
2587 | p->p_memstat_requestedpriority = priority; |
2588 | } |
2589 | |
2590 | if (update_memlimit) { |
2591 | boolean_t is_fatal; |
2592 | boolean_t use_active; |
2593 | |
2594 | /* |
2595 | * Posix_spawn'd processes come through this path to instantiate ledger limits. |
2596 | * Forked processes do not come through this path, so no ledger limits exist. |
2597 | * (That's why forked processes can consume unlimited memory.) |
2598 | */ |
2599 | |
2600 | memorystatus_log_info( |
2601 | "memorystatus_update: update memlimit (%s) pid %d, priority %d, dirty=0x%x, Active(%dMB %s), Inactive(%dMB, %s)\n" , |
2602 | (*p->p_name ? p->p_name : "unknown" ), proc_getpid(p), priority, p->p_memstat_dirty, |
2603 | memlimit_active, (memlimit_active_is_fatal ? "F " : "NF" ), |
2604 | memlimit_inactive, (memlimit_inactive_is_fatal ? "F " : "NF" )); |
2605 | |
2606 | if (memlimit_active <= 0) { |
2607 | /* |
2608 | * This process will have a system_wide task limit when active. |
2609 | * System_wide task limit is always fatal. |
2610 | * It's quite common to see non-fatal flag passed in here. |
2611 | * It's not an error, we just ignore it. |
2612 | */ |
2613 | |
2614 | /* |
2615 | * For backward compatibility with some unexplained launchd behavior, |
2616 | * we allow a zero sized limit. But we still enforce system_wide limit |
2617 | * when written to the ledgers. |
2618 | */ |
2619 | |
2620 | if (memlimit_active < 0) { |
2621 | memlimit_active = -1; /* enforces system_wide task limit */ |
2622 | } |
2623 | memlimit_active_is_fatal = TRUE; |
2624 | } |
2625 | |
2626 | if (memlimit_inactive <= 0) { |
2627 | /* |
2628 | * This process will have a system_wide task limit when inactive. |
2629 | * System_wide task limit is always fatal. |
2630 | */ |
2631 | |
2632 | memlimit_inactive = -1; |
2633 | memlimit_inactive_is_fatal = TRUE; |
2634 | } |
2635 | |
2636 | /* |
2637 | * Initialize the active limit variants for this process. |
2638 | */ |
2639 | SET_ACTIVE_LIMITS_LOCKED(p, memlimit_active, memlimit_active_is_fatal); |
2640 | |
2641 | /* |
2642 | * Initialize the inactive limit variants for this process. |
2643 | */ |
2644 | SET_INACTIVE_LIMITS_LOCKED(p, memlimit_inactive, memlimit_inactive_is_fatal); |
2645 | |
2646 | /* |
2647 | * Initialize the cached limits for target process. |
2648 | * When the target process is dirty tracked, it's typically |
2649 | * in a clean state. Non dirty tracked processes are |
2650 | * typically active (Foreground or above). |
2651 | * But just in case, we don't make assumptions... |
2652 | */ |
2653 | |
2654 | if (proc_jetsam_state_is_active_locked(p) == TRUE) { |
2655 | CACHE_ACTIVE_LIMITS_LOCKED(p, is_fatal); |
2656 | use_active = TRUE; |
2657 | } else { |
2658 | CACHE_INACTIVE_LIMITS_LOCKED(p, is_fatal); |
2659 | use_active = FALSE; |
2660 | } |
2661 | |
2662 | /* |
2663 | * Enforce the cached limit by writing to the ledger. |
2664 | */ |
2665 | if (memorystatus_highwater_enabled) { |
2666 | /* apply now */ |
2667 | task_set_phys_footprint_limit_internal(proc_task(p), |
2668 | ((p->p_memstat_memlimit > 0) ? p->p_memstat_memlimit : -1), NULL, use_active, is_fatal); |
2669 | } |
2670 | } |
2671 | |
2672 | /* |
2673 | * We can't add to the aging bands buckets here. |
2674 | * But, we could be removing it from those buckets. |
2675 | * Check and take appropriate steps if so. |
2676 | */ |
2677 | |
2678 | if (isProcessInAgingBands(p)) { |
2679 | if (isApp(p) && (priority > applications_aging_band)) { |
2680 | /* |
2681 | * Runningboardd is pulling up an application that is in the aging band. |
2682 | * We reset the app's state here so that it'll get a fresh stay in the |
2683 | * aging band on the way back. |
2684 | * |
2685 | * We always handled the app 'aging' in the memorystatus_update_priority_locked() |
2686 | * function. Daemons used to be handled via the dirty 'set/clear/track' path. |
2687 | * But with extensions (daemon-app hybrid), runningboardd is now going through |
2688 | * this routine for daemons too and things have gotten a bit tangled. This should |
2689 | * be simplified/untangled at some point and might require some assistance from |
2690 | * runningboardd. |
2691 | */ |
2692 | memorystatus_invalidate_idle_demotion_locked(p, TRUE); |
2693 | } else { |
2694 | memorystatus_invalidate_idle_demotion_locked(p, FALSE); |
2695 | } |
2696 | memorystatus_update_priority_locked(p, JETSAM_PRIORITY_IDLE, FALSE, TRUE); |
2697 | } |
2698 | |
2699 | memorystatus_update_priority_locked(p, priority, head_insert, FALSE); |
2700 | |
2701 | proc_list_unlock(); |
2702 | ret = 0; |
2703 | |
2704 | out: |
2705 | KDBG(MEMSTAT_CODE(BSD_MEMSTAT_UPDATE) | DBG_FUNC_END, ret); |
2706 | |
2707 | return ret; |
2708 | } |
2709 | |
2710 | int |
2711 | memorystatus_remove(proc_t p) |
2712 | { |
2713 | int ret; |
2714 | memstat_bucket_t *bucket; |
2715 | boolean_t reschedule = FALSE; |
2716 | |
2717 | memorystatus_log_debug("memorystatus_list_remove: removing pid %d\n" , proc_getpid(p)); |
2718 | |
2719 | /* Processes marked internal do not have priority tracked */ |
2720 | if (p->p_memstat_state & P_MEMSTAT_INTERNAL) { |
2721 | return 0; |
2722 | } |
2723 | |
2724 | /* |
2725 | * Check if this proc is locked (because we're performing a freeze). |
2726 | * If so, we fail and instruct the caller to try again later. |
2727 | */ |
2728 | if (p->p_memstat_state & P_MEMSTAT_LOCKED) { |
2729 | return EAGAIN; |
2730 | } |
2731 | |
2732 | assert(!(p->p_memstat_state & P_MEMSTAT_INTERNAL)); |
2733 | |
2734 | bucket = &memstat_bucket[p->p_memstat_effectivepriority]; |
2735 | |
2736 | if (isSysProc(p) && system_procs_aging_band && (p->p_memstat_effectivepriority == system_procs_aging_band)) { |
2737 | assert(bucket->count == memorystatus_scheduled_idle_demotions_sysprocs); |
2738 | reschedule = TRUE; |
2739 | } else if (isApp(p) && applications_aging_band && (p->p_memstat_effectivepriority == applications_aging_band)) { |
2740 | assert(bucket->count == memorystatus_scheduled_idle_demotions_apps); |
2741 | reschedule = TRUE; |
2742 | } |
2743 | |
2744 | /* |
2745 | * Record idle delta |
2746 | */ |
2747 | |
2748 | if (p->p_memstat_effectivepriority == JETSAM_PRIORITY_IDLE) { |
2749 | uint64_t now = mach_absolute_time(); |
2750 | if (now > p->p_memstat_idle_start) { |
2751 | p->p_memstat_idle_delta = now - p->p_memstat_idle_start; |
2752 | } |
2753 | } |
2754 | |
2755 | TAILQ_REMOVE(&bucket->list, p, p_memstat_list); |
2756 | bucket->count--; |
2757 | if (p->p_memstat_relaunch_flags & (P_MEMSTAT_RELAUNCH_HIGH)) { |
2758 | bucket->relaunch_high_count--; |
2759 | } |
2760 | |
2761 | memorystatus_list_count--; |
2762 | |
2763 | /* If awaiting demotion to the idle band, clean up */ |
2764 | if (reschedule) { |
2765 | memorystatus_invalidate_idle_demotion_locked(p, TRUE); |
2766 | memorystatus_reschedule_idle_demotion_locked(); |
2767 | } |
2768 | |
2769 | memorystatus_check_levels_locked(); |
2770 | |
2771 | #if CONFIG_FREEZE |
2772 | if (p->p_memstat_state & (P_MEMSTAT_FROZEN)) { |
2773 | if (p->p_memstat_state & P_MEMSTAT_REFREEZE_ELIGIBLE) { |
2774 | p->p_memstat_state &= ~P_MEMSTAT_REFREEZE_ELIGIBLE; |
2775 | memorystatus_refreeze_eligible_count--; |
2776 | } |
2777 | |
2778 | memorystatus_frozen_count--; |
2779 | if (p->p_memstat_state & P_MEMSTAT_FROZEN_XPC_SERVICE) { |
2780 | memorystatus_frozen_count_xpc_service--; |
2781 | } |
2782 | if (strcmp(p->p_name, "com.apple.WebKit.WebContent" ) == 0) { |
2783 | memorystatus_frozen_count_webcontent--; |
2784 | } |
2785 | memorystatus_frozen_shared_mb -= p->p_memstat_freeze_sharedanon_pages; |
2786 | p->p_memstat_freeze_sharedanon_pages = 0; |
2787 | } |
2788 | |
2789 | if (p->p_memstat_state & P_MEMSTAT_SUSPENDED) { |
2790 | memorystatus_suspended_count--; |
2791 | } |
2792 | #endif |
2793 | |
2794 | #if DEVELOPMENT || DEBUG |
2795 | if (proc_getpid(p) == memorystatus_testing_pid) { |
2796 | memorystatus_testing_pid = 0; |
2797 | } |
2798 | #endif /* DEVELOPMENT || DEBUG */ |
2799 | |
2800 | if (p) { |
2801 | ret = 0; |
2802 | } else { |
2803 | ret = ESRCH; |
2804 | } |
2805 | |
2806 | return ret; |
2807 | } |
2808 | |
2809 | /* |
2810 | * Validate dirty tracking flags with process state. |
2811 | * |
2812 | * Return: |
2813 | * 0 on success |
2814 | * non-0 on failure |
2815 | * |
2816 | * The proc_list_lock is held by the caller. |
2817 | */ |
2818 | |
2819 | static int |
2820 | memorystatus_validate_track_flags(struct proc *target_p, uint32_t pcontrol) |
2821 | { |
2822 | /* See that the process isn't marked for termination */ |
2823 | if (target_p->p_memstat_dirty & P_DIRTY_TERMINATED) { |
2824 | return EBUSY; |
2825 | } |
2826 | |
2827 | /* Idle exit requires that process be tracked */ |
2828 | if ((pcontrol & PROC_DIRTY_ALLOW_IDLE_EXIT) && |
2829 | !(pcontrol & PROC_DIRTY_TRACK)) { |
2830 | return EINVAL; |
2831 | } |
2832 | |
2833 | /* 'Launch in progress' tracking requires that process have enabled dirty tracking too. */ |
2834 | if ((pcontrol & PROC_DIRTY_LAUNCH_IN_PROGRESS) && |
2835 | !(pcontrol & PROC_DIRTY_TRACK)) { |
2836 | return EINVAL; |
2837 | } |
2838 | |
2839 | /* Only one type of DEFER behavior is allowed.*/ |
2840 | if ((pcontrol & PROC_DIRTY_DEFER) && |
2841 | (pcontrol & PROC_DIRTY_DEFER_ALWAYS)) { |
2842 | return EINVAL; |
2843 | } |
2844 | |
2845 | /* Deferral is only relevant if idle exit is specified */ |
2846 | if (((pcontrol & PROC_DIRTY_DEFER) || |
2847 | (pcontrol & PROC_DIRTY_DEFER_ALWAYS)) && |
2848 | !(pcontrol & PROC_DIRTY_ALLOWS_IDLE_EXIT)) { |
2849 | return EINVAL; |
2850 | } |
2851 | |
2852 | return 0; |
2853 | } |
2854 | |
2855 | static void |
2856 | memorystatus_update_idle_priority_locked(proc_t p) |
2857 | { |
2858 | int32_t priority; |
2859 | |
2860 | memorystatus_log_debug("memorystatus_update_idle_priority_locked(): pid %d dirty 0x%X\n" , |
2861 | proc_getpid(p), p->p_memstat_dirty); |
2862 | |
2863 | assert(isSysProc(p)); |
2864 | |
2865 | if ((p->p_memstat_dirty & (P_DIRTY_IDLE_EXIT_ENABLED | P_DIRTY_IS_DIRTY)) == P_DIRTY_IDLE_EXIT_ENABLED) { |
2866 | priority = (p->p_memstat_dirty & P_DIRTY_AGING_IN_PROGRESS) ? system_procs_aging_band : JETSAM_PRIORITY_IDLE; |
2867 | } else { |
2868 | priority = p->p_memstat_requestedpriority; |
2869 | } |
2870 | |
2871 | if (p->p_memstat_state & P_MEMSTAT_PRIORITY_ASSERTION) { |
2872 | /* |
2873 | * This process has a jetsam priority managed by an assertion. |
2874 | * Policy is to choose the max priority. |
2875 | */ |
2876 | if (p->p_memstat_assertionpriority > priority) { |
2877 | memorystatus_log_debug("memorystatus: assertion priority %d overrides priority %d for %s:%d\n" , |
2878 | p->p_memstat_assertionpriority, priority, |
2879 | (*p->p_name ? p->p_name : "unknown" ), proc_getpid(p)); |
2880 | priority = p->p_memstat_assertionpriority; |
2881 | } |
2882 | } |
2883 | |
2884 | if (priority != p->p_memstat_effectivepriority) { |
2885 | memorystatus_update_priority_locked(p, priority, false, false); |
2886 | } |
2887 | } |
2888 | |
2889 | /* |
2890 | * Processes can opt to have their state tracked by the kernel, indicating when they are busy (dirty) or idle |
2891 | * (clean). They may also indicate that they support termination when idle, with the result that they are promoted |
2892 | * to their desired, higher, jetsam priority when dirty (and are therefore killed later), and demoted to the low |
2893 | * priority idle band when clean (and killed earlier, protecting higher priority procesess). |
2894 | * |
2895 | * If the deferral flag is set, then newly tracked processes will be protected for an initial period (as determined by |
2896 | * memorystatus_sysprocs_idle_delay_time); if they go clean during this time, then they will be moved to a deferred-idle band |
2897 | * with a slightly higher priority, guarding against immediate termination under memory pressure and being unable to |
2898 | * make forward progress. Finally, when the guard expires, they will be moved to the standard, lowest-priority, idle |
2899 | * band. The deferral can be cleared early by clearing the appropriate flag. |
2900 | * |
2901 | * The deferral timer is active only for the duration that the process is marked as guarded and clean; if the process |
2902 | * is marked dirty, the timer will be cancelled. Upon being subsequently marked clean, the deferment will either be |
2903 | * re-enabled or the guard state cleared, depending on whether the guard deadline has passed. |
2904 | */ |
2905 | |
2906 | int |
2907 | memorystatus_dirty_track(proc_t p, uint32_t pcontrol) |
2908 | { |
2909 | unsigned int old_dirty; |
2910 | boolean_t reschedule = FALSE; |
2911 | boolean_t already_deferred = FALSE; |
2912 | boolean_t defer_now = FALSE; |
2913 | int ret = 0; |
2914 | |
2915 | KDBG(MEMSTAT_CODE(BSD_MEMSTAT_DIRTY_TRACK), |
2916 | proc_getpid(p), p->p_memstat_dirty, pcontrol); |
2917 | |
2918 | proc_list_lock(); |
2919 | |
2920 | if (proc_list_exited(p)) { |
2921 | /* |
2922 | * Process is on its way out. |
2923 | */ |
2924 | ret = EBUSY; |
2925 | goto exit; |
2926 | } |
2927 | |
2928 | if (p->p_memstat_state & P_MEMSTAT_INTERNAL) { |
2929 | ret = EPERM; |
2930 | goto exit; |
2931 | } |
2932 | |
2933 | if ((ret = memorystatus_validate_track_flags(target_p: p, pcontrol)) != 0) { |
2934 | /* error */ |
2935 | goto exit; |
2936 | } |
2937 | |
2938 | old_dirty = p->p_memstat_dirty; |
2939 | |
2940 | /* These bits are cumulative, as per <rdar://problem/11159924> */ |
2941 | if (pcontrol & PROC_DIRTY_TRACK) { |
2942 | /*Request to turn ON Dirty tracking...*/ |
2943 | if (p->p_memstat_state & P_MEMSTAT_MANAGED) { |
2944 | /* on a process managed by RunningBoard or its equivalent...*/ |
2945 | if (!(p->p_memstat_state & P_MEMSTAT_FATAL_MEMLIMIT)) { |
2946 | /* but this might be an app because there's no fatal limits |
2947 | * NB: This _big_ assumption is not universal. What we really |
2948 | * need is a way to say this is an _APP_ and we can't have dirty |
2949 | * tracking turned ON for it. Lacking that functionality we clump |
2950 | * together some checks and try to do the best detection we can. |
2951 | * Reason we can't allow addition of these flags is because, per the |
2952 | * kernel checks, they change the role of a process from app to daemon. And the |
2953 | * AGING_IN_PROGRESS bits might still be set i.e. it needs to be demoted |
2954 | * correctly from the right aging band (app or sysproc). We can't simply try |
2955 | * to invalidate the demotion here because, owing to assertion priorities, we |
2956 | * might not be in the aging bands. |
2957 | */ |
2958 | #if DEVELOPMENT || DEBUG |
2959 | memorystatus_log_info( |
2960 | "memorystatus: Denying dirty-tracking opt-in for app %s (pid %d)\n" , |
2961 | (*p->p_name ? p->p_name : "unknown" ), proc_getpid(p)); |
2962 | #endif /*DEVELOPMENT || DEBUG*/ |
2963 | /* fail silently to avoid an XPC assertion... */ |
2964 | ret = 0; |
2965 | goto exit; |
2966 | } |
2967 | } |
2968 | |
2969 | p->p_memstat_dirty |= P_DIRTY_TRACK; |
2970 | } |
2971 | |
2972 | if (pcontrol & PROC_DIRTY_ALLOW_IDLE_EXIT) { |
2973 | p->p_memstat_dirty |= P_DIRTY_ALLOW_IDLE_EXIT; |
2974 | } |
2975 | |
2976 | if (pcontrol & PROC_DIRTY_LAUNCH_IN_PROGRESS) { |
2977 | p->p_memstat_dirty |= P_DIRTY_LAUNCH_IN_PROGRESS; |
2978 | } |
2979 | |
2980 | if (old_dirty & P_DIRTY_AGING_IN_PROGRESS) { |
2981 | already_deferred = TRUE; |
2982 | } |
2983 | |
2984 | |
2985 | /* This can be set and cleared exactly once. */ |
2986 | if (pcontrol & (PROC_DIRTY_DEFER | PROC_DIRTY_DEFER_ALWAYS)) { |
2987 | if ((pcontrol & (PROC_DIRTY_DEFER)) && |
2988 | !(old_dirty & P_DIRTY_DEFER)) { |
2989 | p->p_memstat_dirty |= P_DIRTY_DEFER; |
2990 | } |
2991 | |
2992 | if ((pcontrol & (PROC_DIRTY_DEFER_ALWAYS)) && |
2993 | !(old_dirty & P_DIRTY_DEFER_ALWAYS)) { |
2994 | p->p_memstat_dirty |= P_DIRTY_DEFER_ALWAYS; |
2995 | } |
2996 | |
2997 | defer_now = TRUE; |
2998 | } |
2999 | |
3000 | memorystatus_log_info( |
3001 | "memorystatus_on_track_dirty(): set idle-exit %s / defer %s / dirty %s for pid %d\n" , |
3002 | ((p->p_memstat_dirty & P_DIRTY_IDLE_EXIT_ENABLED) == P_DIRTY_IDLE_EXIT_ENABLED) ? "Y" : "N" , |
3003 | defer_now ? "Y" : "N" , p->p_memstat_dirty & P_DIRTY ? "Y" : "N" , proc_getpid(p)); |
3004 | |
3005 | /* Kick off or invalidate the idle exit deferment if there's a state transition. */ |
3006 | if (!(p->p_memstat_dirty & P_DIRTY_IS_DIRTY)) { |
3007 | if ((p->p_memstat_dirty & P_DIRTY_IDLE_EXIT_ENABLED) == P_DIRTY_IDLE_EXIT_ENABLED) { |
3008 | if (defer_now && !already_deferred) { |
3009 | /* |
3010 | * Request to defer a clean process that's idle-exit enabled |
3011 | * and not already in the jetsam deferred band. Most likely a |
3012 | * new launch. |
3013 | */ |
3014 | memorystatus_schedule_idle_demotion_locked(p, TRUE); |
3015 | reschedule = TRUE; |
3016 | } else if (!defer_now) { |
3017 | /* |
3018 | * The process isn't asking for the 'aging' facility. |
3019 | * Could be that it is: |
3020 | */ |
3021 | |
3022 | if (already_deferred) { |
3023 | /* |
3024 | * already in the aging bands. Traditionally, |
3025 | * some processes have tried to use this to |
3026 | * opt out of the 'aging' facility. |
3027 | */ |
3028 | |
3029 | memorystatus_invalidate_idle_demotion_locked(p, TRUE); |
3030 | } else { |
3031 | /* |
3032 | * agnostic to the 'aging' facility. In that case, |
3033 | * we'll go ahead and opt it in because this is likely |
3034 | * a new launch (clean process, dirty tracking enabled) |
3035 | */ |
3036 | |
3037 | memorystatus_schedule_idle_demotion_locked(p, TRUE); |
3038 | } |
3039 | |
3040 | reschedule = TRUE; |
3041 | } |
3042 | } |
3043 | } else { |
3044 | /* |
3045 | * We are trying to operate on a dirty process. Dirty processes have to |
3046 | * be removed from the deferred band & their state has to be reset. |
3047 | * |
3048 | * This could be a legal request like: |
3049 | * - this process had opted into the 'aging' band |
3050 | * - but it's now dirty and requests to opt out. |
3051 | * In this case, we remove the process from the band and reset its |
3052 | * state too. It'll opt back in properly when needed. |
3053 | * |
3054 | * OR, this request could be a user-space bug. E.g.: |
3055 | * - this process had opted into the 'aging' band when clean |
3056 | * - and, then issues another request to again put it into the band except |
3057 | * this time the process is dirty. |
3058 | * The process going dirty, as a transition in memorystatus_dirty_set(), will pull the process out of |
3059 | * the deferred band with its state intact. So our request below is no-op. |
3060 | * But we do it here anyways for coverage. |
3061 | * |
3062 | * memorystatus_update_idle_priority_locked() |
3063 | * single-mindedly treats a dirty process as "cannot be in the aging band". |
3064 | */ |
3065 | |
3066 | memorystatus_invalidate_idle_demotion_locked(p, TRUE); |
3067 | reschedule = TRUE; |
3068 | } |
3069 | |
3070 | memorystatus_update_idle_priority_locked(p); |
3071 | |
3072 | if (reschedule) { |
3073 | memorystatus_reschedule_idle_demotion_locked(); |
3074 | } |
3075 | |
3076 | ret = 0; |
3077 | |
3078 | exit: |
3079 | proc_list_unlock(); |
3080 | |
3081 | return ret; |
3082 | } |
3083 | |
3084 | int |
3085 | memorystatus_dirty_set(proc_t p, boolean_t self, uint32_t pcontrol) |
3086 | { |
3087 | int ret; |
3088 | boolean_t kill = false; |
3089 | boolean_t reschedule = FALSE; |
3090 | boolean_t was_dirty = FALSE; |
3091 | boolean_t now_dirty = FALSE; |
3092 | |
3093 | memorystatus_log_debug("memorystatus_dirty_set(): %d %d 0x%x 0x%x\n" , self, proc_getpid(p), pcontrol, p->p_memstat_dirty); |
3094 | KDBG(MEMSTAT_CODE(BSD_MEMSTAT_DIRTY_SET), proc_getpid(p), self, pcontrol); |
3095 | |
3096 | proc_list_lock(); |
3097 | |
3098 | if (proc_list_exited(p)) { |
3099 | /* |
3100 | * Process is on its way out. |
3101 | */ |
3102 | ret = EBUSY; |
3103 | goto exit; |
3104 | } |
3105 | |
3106 | if (p->p_memstat_state & P_MEMSTAT_INTERNAL) { |
3107 | ret = EPERM; |
3108 | goto exit; |
3109 | } |
3110 | |
3111 | if (p->p_memstat_dirty & P_DIRTY_IS_DIRTY) { |
3112 | was_dirty = TRUE; |
3113 | } |
3114 | |
3115 | if (!(p->p_memstat_dirty & P_DIRTY_TRACK)) { |
3116 | /* Dirty tracking not enabled */ |
3117 | ret = EINVAL; |
3118 | } else if (pcontrol && (p->p_memstat_dirty & P_DIRTY_TERMINATED)) { |
3119 | /* |
3120 | * Process is set to be terminated and we're attempting to mark it dirty. |
3121 | * Set for termination and marking as clean is OK - see <rdar://problem/10594349>. |
3122 | */ |
3123 | ret = EBUSY; |
3124 | } else { |
3125 | int flag = (self == TRUE) ? P_DIRTY : P_DIRTY_SHUTDOWN; |
3126 | if (pcontrol && !(p->p_memstat_dirty & flag)) { |
3127 | /* Mark the process as having been dirtied at some point */ |
3128 | p->p_memstat_dirty |= (flag | P_DIRTY_MARKED); |
3129 | memorystatus_dirty_count++; |
3130 | ret = 0; |
3131 | } else if ((pcontrol == 0) && (p->p_memstat_dirty & flag)) { |
3132 | if ((flag == P_DIRTY_SHUTDOWN) && (!(p->p_memstat_dirty & P_DIRTY))) { |
3133 | /* Clearing the dirty shutdown flag, and the process is otherwise clean - kill */ |
3134 | p->p_memstat_dirty |= P_DIRTY_TERMINATED; |
3135 | kill = true; |
3136 | } else if ((flag == P_DIRTY) && (p->p_memstat_dirty & P_DIRTY_TERMINATED)) { |
3137 | /* Kill previously terminated processes if set clean */ |
3138 | kill = true; |
3139 | } |
3140 | p->p_memstat_dirty &= ~flag; |
3141 | memorystatus_dirty_count--; |
3142 | ret = 0; |
3143 | } else { |
3144 | /* Already set */ |
3145 | ret = EALREADY; |
3146 | } |
3147 | } |
3148 | |
3149 | if (ret != 0) { |
3150 | goto exit; |
3151 | } |
3152 | |
3153 | if (p->p_memstat_dirty & P_DIRTY_IS_DIRTY) { |
3154 | now_dirty = TRUE; |
3155 | } |
3156 | |
3157 | if ((was_dirty == TRUE && now_dirty == FALSE) || |
3158 | (was_dirty == FALSE && now_dirty == TRUE)) { |
3159 | /* Manage idle exit deferral, if applied */ |
3160 | if ((p->p_memstat_dirty & P_DIRTY_IDLE_EXIT_ENABLED) == P_DIRTY_IDLE_EXIT_ENABLED) { |
3161 | /* |
3162 | * Legacy mode: P_DIRTY_AGING_IN_PROGRESS means the process is in the aging band OR it might be heading back |
3163 | * there once it's clean again. For the legacy case, this only applies if it has some protection window left. |
3164 | * P_DIRTY_DEFER: one-time protection window given at launch |
3165 | * P_DIRTY_DEFER_ALWAYS: protection window given for every dirty->clean transition. Like non-legacy mode. |
3166 | * |
3167 | * Non-Legacy mode: P_DIRTY_AGING_IN_PROGRESS means the process is in the aging band. It will always stop over |
3168 | * in that band on it's way to IDLE. |
3169 | */ |
3170 | |
3171 | if (p->p_memstat_dirty & P_DIRTY_IS_DIRTY) { |
3172 | /* |
3173 | * New dirty process i.e. "was_dirty == FALSE && now_dirty == TRUE" |
3174 | * |
3175 | * The process will move from its aging band to its higher requested |
3176 | * jetsam band. |
3177 | */ |
3178 | memorystatus_invalidate_idle_demotion_locked(p, TRUE); |
3179 | reschedule = TRUE; |
3180 | } else { |
3181 | /* |
3182 | * Process is back from "dirty" to "clean". |
3183 | */ |
3184 | |
3185 | memorystatus_schedule_idle_demotion_locked(p, TRUE); |
3186 | reschedule = TRUE; |
3187 | } |
3188 | } |
3189 | |
3190 | memorystatus_update_idle_priority_locked(p); |
3191 | |
3192 | if (memorystatus_highwater_enabled) { |
3193 | boolean_t ledger_update_needed = TRUE; |
3194 | boolean_t use_active; |
3195 | boolean_t is_fatal; |
3196 | /* |
3197 | * We are in this path because this process transitioned between |
3198 | * dirty <--> clean state. Update the cached memory limits. |
3199 | */ |
3200 | |
3201 | if (proc_jetsam_state_is_active_locked(p) == TRUE) { |
3202 | /* |
3203 | * process is pinned in elevated band |
3204 | * or |
3205 | * process is dirty |
3206 | */ |
3207 | CACHE_ACTIVE_LIMITS_LOCKED(p, is_fatal); |
3208 | use_active = TRUE; |
3209 | ledger_update_needed = TRUE; |
3210 | } else { |
3211 | /* |
3212 | * process is clean...but if it has opted into pressured-exit |
3213 | * we don't apply the INACTIVE limit till the process has aged |
3214 | * out and is entering the IDLE band. |
3215 | * See memorystatus_update_priority_locked() for that. |
3216 | */ |
3217 | |
3218 | if (p->p_memstat_dirty & P_DIRTY_ALLOW_IDLE_EXIT) { |
3219 | ledger_update_needed = FALSE; |
3220 | } else { |
3221 | CACHE_INACTIVE_LIMITS_LOCKED(p, is_fatal); |
3222 | use_active = FALSE; |
3223 | ledger_update_needed = TRUE; |
3224 | } |
3225 | } |
3226 | |
3227 | /* |
3228 | * Enforce the new limits by writing to the ledger. |
3229 | * |
3230 | * This is a hot path and holding the proc_list_lock while writing to the ledgers, |
3231 | * (where the task lock is taken) is bad. So, we temporarily drop the proc_list_lock. |
3232 | * We aren't traversing the jetsam bucket list here, so we should be safe. |
3233 | * See rdar://21394491. |
3234 | */ |
3235 | |
3236 | if (ledger_update_needed && proc_ref(p, true) == p) { |
3237 | int ledger_limit; |
3238 | if (p->p_memstat_memlimit > 0) { |
3239 | ledger_limit = p->p_memstat_memlimit; |
3240 | } else { |
3241 | ledger_limit = -1; |
3242 | } |
3243 | proc_list_unlock(); |
3244 | task_set_phys_footprint_limit_internal(proc_task(p), ledger_limit, NULL, use_active, is_fatal); |
3245 | proc_list_lock(); |
3246 | proc_rele(p); |
3247 | |
3248 | memorystatus_log_debug( |
3249 | "memorystatus_dirty_set: new limit on pid %d (%dMB %s) priority(%d) dirty?=0x%x %s\n" , |
3250 | proc_getpid(p), (p->p_memstat_memlimit > 0 ? p->p_memstat_memlimit : -1), |
3251 | (p->p_memstat_state & P_MEMSTAT_FATAL_MEMLIMIT ? "F " : "NF" ), p->p_memstat_effectivepriority, p->p_memstat_dirty, |
3252 | (p->p_memstat_dirty ? ((p->p_memstat_dirty & P_DIRTY) ? "isdirty" : "isclean" ) : "" )); |
3253 | } |
3254 | } |
3255 | |
3256 | /* If the deferral state changed, reschedule the demotion timer */ |
3257 | if (reschedule) { |
3258 | memorystatus_reschedule_idle_demotion_locked(); |
3259 | } |
3260 | |
3261 | /* Settle dirty time in ledger, and update transition timestamp */ |
3262 | task_t t = proc_task(p); |
3263 | if (was_dirty) { |
3264 | task_ledger_settle_dirty_time(t); |
3265 | task_set_dirty_start(task: t, start: 0); |
3266 | } else { |
3267 | task_set_dirty_start(task: t, start: mach_absolute_time()); |
3268 | } |
3269 | } |
3270 | |
3271 | if (kill) { |
3272 | if (proc_ref(p, true) == p) { |
3273 | proc_list_unlock(); |
3274 | psignal(p, SIGKILL); |
3275 | proc_list_lock(); |
3276 | proc_rele(p); |
3277 | } |
3278 | } |
3279 | |
3280 | exit: |
3281 | proc_list_unlock(); |
3282 | |
3283 | return ret; |
3284 | } |
3285 | |
3286 | int |
3287 | memorystatus_dirty_clear(proc_t p, uint32_t pcontrol) |
3288 | { |
3289 | int ret = 0; |
3290 | |
3291 | memorystatus_log_debug("memorystatus_dirty_clear(): %d 0x%x 0x%x\n" , proc_getpid(p), pcontrol, p->p_memstat_dirty); |
3292 | |
3293 | KDBG(MEMSTAT_CODE(BSD_MEMSTAT_DIRTY_CLEAR), proc_getpid(p), pcontrol); |
3294 | |
3295 | proc_list_lock(); |
3296 | |
3297 | if (proc_list_exited(p)) { |
3298 | /* |
3299 | * Process is on its way out. |
3300 | */ |
3301 | ret = EBUSY; |
3302 | goto exit; |
3303 | } |
3304 | |
3305 | if (p->p_memstat_state & P_MEMSTAT_INTERNAL) { |
3306 | ret = EPERM; |
3307 | goto exit; |
3308 | } |
3309 | |
3310 | if (!(p->p_memstat_dirty & P_DIRTY_TRACK)) { |
3311 | /* Dirty tracking not enabled */ |
3312 | ret = EINVAL; |
3313 | goto exit; |
3314 | } |
3315 | |
3316 | if (!pcontrol || (pcontrol & (PROC_DIRTY_LAUNCH_IN_PROGRESS | PROC_DIRTY_DEFER | PROC_DIRTY_DEFER_ALWAYS)) == 0) { |
3317 | ret = EINVAL; |
3318 | goto exit; |
3319 | } |
3320 | |
3321 | if (pcontrol & PROC_DIRTY_LAUNCH_IN_PROGRESS) { |
3322 | p->p_memstat_dirty &= ~P_DIRTY_LAUNCH_IN_PROGRESS; |
3323 | } |
3324 | |
3325 | /* This can be set and cleared exactly once. */ |
3326 | if (pcontrol & (PROC_DIRTY_DEFER | PROC_DIRTY_DEFER_ALWAYS)) { |
3327 | if (p->p_memstat_dirty & P_DIRTY_DEFER) { |
3328 | p->p_memstat_dirty &= ~(P_DIRTY_DEFER); |
3329 | } |
3330 | |
3331 | if (p->p_memstat_dirty & P_DIRTY_DEFER_ALWAYS) { |
3332 | p->p_memstat_dirty &= ~(P_DIRTY_DEFER_ALWAYS); |
3333 | } |
3334 | |
3335 | memorystatus_invalidate_idle_demotion_locked(p, TRUE); |
3336 | memorystatus_update_idle_priority_locked(p); |
3337 | memorystatus_reschedule_idle_demotion_locked(); |
3338 | } |
3339 | |
3340 | ret = 0; |
3341 | exit: |
3342 | proc_list_unlock(); |
3343 | |
3344 | return ret; |
3345 | } |
3346 | |
3347 | int |
3348 | memorystatus_dirty_get(proc_t p, boolean_t locked) |
3349 | { |
3350 | int ret = 0; |
3351 | |
3352 | if (!locked) { |
3353 | proc_list_lock(); |
3354 | } |
3355 | |
3356 | if (p->p_memstat_dirty & P_DIRTY_TRACK) { |
3357 | ret |= PROC_DIRTY_TRACKED; |
3358 | if (p->p_memstat_dirty & P_DIRTY_ALLOW_IDLE_EXIT) { |
3359 | ret |= PROC_DIRTY_ALLOWS_IDLE_EXIT; |
3360 | } |
3361 | if (p->p_memstat_dirty & P_DIRTY) { |
3362 | ret |= PROC_DIRTY_IS_DIRTY; |
3363 | } |
3364 | if (p->p_memstat_dirty & P_DIRTY_LAUNCH_IN_PROGRESS) { |
3365 | ret |= PROC_DIRTY_LAUNCH_IS_IN_PROGRESS; |
3366 | } |
3367 | } |
3368 | |
3369 | if (!locked) { |
3370 | proc_list_unlock(); |
3371 | } |
3372 | |
3373 | return ret; |
3374 | } |
3375 | |
3376 | int |
3377 | memorystatus_on_terminate(proc_t p) |
3378 | { |
3379 | int sig; |
3380 | |
3381 | proc_list_lock(); |
3382 | |
3383 | p->p_memstat_dirty |= P_DIRTY_TERMINATED; |
3384 | |
3385 | if (((p->p_memstat_dirty & (P_DIRTY_TRACK | P_DIRTY_IS_DIRTY)) == P_DIRTY_TRACK) || |
3386 | (p->p_memstat_state & P_MEMSTAT_SUSPENDED)) { |
3387 | /* |
3388 | * Mark as terminated and issue SIGKILL if:- |
3389 | * - process is clean, or, |
3390 | * - if process is dirty but suspended. This case is likely |
3391 | * an extension because apps don't opt into dirty-tracking |
3392 | * and daemons aren't suspended. |
3393 | */ |
3394 | #if DEVELOPMENT || DEBUG |
3395 | if (p->p_memstat_state & P_MEMSTAT_SUSPENDED) { |
3396 | memorystatus_log_info( |
3397 | "memorystatus: sending suspended process %s (pid %d) SIGKILL\n" , |
3398 | (*p->p_name ? p->p_name : "unknown" ), proc_getpid(p)); |
3399 | } |
3400 | #endif /* DEVELOPMENT || DEBUG */ |
3401 | sig = SIGKILL; |
3402 | } else { |
3403 | /* Dirty, terminated, or state tracking is unsupported; issue SIGTERM to allow cleanup */ |
3404 | sig = SIGTERM; |
3405 | } |
3406 | |
3407 | proc_list_unlock(); |
3408 | |
3409 | return sig; |
3410 | } |
3411 | |
3412 | void |
3413 | memorystatus_on_suspend(proc_t p) |
3414 | { |
3415 | #if CONFIG_FREEZE |
3416 | uint32_t pages; |
3417 | memorystatus_get_task_page_counts(proc_task(p), &pages, NULL, NULL); |
3418 | #endif |
3419 | proc_list_lock(); |
3420 | #if CONFIG_FREEZE |
3421 | memorystatus_suspended_count++; |
3422 | #endif |
3423 | p->p_memstat_state |= P_MEMSTAT_SUSPENDED; |
3424 | |
3425 | /* Check if proc is marked for termination */ |
3426 | bool kill_process = !!(p->p_memstat_dirty & P_DIRTY_TERMINATED); |
3427 | proc_list_unlock(); |
3428 | |
3429 | if (kill_process) { |
3430 | psignal(p, SIGKILL); |
3431 | } |
3432 | |
3433 | #if CONFIG_DEFERRED_RECLAIM |
3434 | vm_deferred_reclamation_reclaim_from_task_async(task: proc_task(p)); |
3435 | #endif /* CONFIG_DEFERRED_RECLAIM */ |
3436 | } |
3437 | |
3438 | extern uint64_t memorystatus_thaw_count_since_boot; |
3439 | |
3440 | void |
3441 | memorystatus_on_resume(proc_t p) |
3442 | { |
3443 | #if CONFIG_FREEZE |
3444 | boolean_t frozen; |
3445 | pid_t pid; |
3446 | #endif |
3447 | |
3448 | proc_list_lock(); |
3449 | |
3450 | #if CONFIG_FREEZE |
3451 | frozen = (p->p_memstat_state & P_MEMSTAT_FROZEN); |
3452 | if (frozen) { |
3453 | /* |
3454 | * Now that we don't _thaw_ a process completely, |
3455 | * resuming it (and having some on-demand swapins) |
3456 | * shouldn't preclude it from being counted as frozen. |
3457 | * |
3458 | * memorystatus_frozen_count--; |
3459 | * |
3460 | * We preserve the P_MEMSTAT_FROZEN state since the process |
3461 | * could have state on disk AND so will deserve some protection |
3462 | * in the jetsam bands. |
3463 | */ |
3464 | if ((p->p_memstat_state & P_MEMSTAT_REFREEZE_ELIGIBLE) == 0) { |
3465 | p->p_memstat_state |= P_MEMSTAT_REFREEZE_ELIGIBLE; |
3466 | memorystatus_refreeze_eligible_count++; |
3467 | } |
3468 | if (p->p_memstat_thaw_count == 0 || p->p_memstat_last_thaw_interval < memorystatus_freeze_current_interval) { |
3469 | os_atomic_inc(&(memorystatus_freezer_stats.mfs_processes_thawed), relaxed); |
3470 | if (strcmp(p->p_name, "com.apple.WebKit.WebContent" ) == 0) { |
3471 | os_atomic_inc(&(memorystatus_freezer_stats.mfs_processes_thawed_webcontent), relaxed); |
3472 | } |
3473 | } |
3474 | p->p_memstat_last_thaw_interval = memorystatus_freeze_current_interval; |
3475 | p->p_memstat_thaw_count++; |
3476 | |
3477 | memorystatus_freeze_last_pid_thawed = p->p_pid; |
3478 | memorystatus_freeze_last_pid_thawed_ts = mach_absolute_time(); |
3479 | |
3480 | memorystatus_thaw_count++; |
3481 | memorystatus_thaw_count_since_boot++; |
3482 | } |
3483 | |
3484 | if (p->p_memstat_state & P_MEMSTAT_SUSPENDED) { |
3485 | memorystatus_suspended_count--; |
3486 | } |
3487 | |
3488 | pid = proc_getpid(p); |
3489 | #endif |
3490 | |
3491 | /* |
3492 | * P_MEMSTAT_FROZEN will remain unchanged. This used to be: |
3493 | * p->p_memstat_state &= ~(P_MEMSTAT_SUSPENDED | P_MEMSTAT_FROZEN); |
3494 | */ |
3495 | p->p_memstat_state &= ~P_MEMSTAT_SUSPENDED; |
3496 | |
3497 | proc_list_unlock(); |
3498 | |
3499 | #if CONFIG_FREEZE |
3500 | if (frozen) { |
3501 | memorystatus_freeze_entry_t data = { pid, FALSE, 0 }; |
3502 | memorystatus_send_note(kMemorystatusFreezeNote, &data, sizeof(data)); |
3503 | } |
3504 | #endif |
3505 | } |
3506 | |
3507 | void |
3508 | memorystatus_on_inactivity(proc_t p) |
3509 | { |
3510 | #pragma unused(p) |
3511 | #if CONFIG_FREEZE |
3512 | /* Wake the freeze thread */ |
3513 | thread_wakeup((event_t)&memorystatus_freeze_wakeup); |
3514 | #endif |
3515 | } |
3516 | |
3517 | /* |
3518 | * The proc_list_lock is held by the caller. |
3519 | */ |
3520 | static uint32_t |
3521 | memorystatus_build_state(proc_t p) |
3522 | { |
3523 | uint32_t snapshot_state = 0; |
3524 | |
3525 | /* General */ |
3526 | if (p->p_memstat_state & P_MEMSTAT_SUSPENDED) { |
3527 | snapshot_state |= kMemorystatusSuspended; |
3528 | } |
3529 | if (p->p_memstat_state & P_MEMSTAT_FROZEN) { |
3530 | snapshot_state |= kMemorystatusFrozen; |
3531 | } |
3532 | if (p->p_memstat_state & P_MEMSTAT_REFREEZE_ELIGIBLE) { |
3533 | snapshot_state |= kMemorystatusWasThawed; |
3534 | } |
3535 | if (p->p_memstat_state & P_MEMSTAT_PRIORITY_ASSERTION) { |
3536 | snapshot_state |= kMemorystatusAssertion; |
3537 | } |
3538 | |
3539 | /* Tracking */ |
3540 | if (p->p_memstat_dirty & P_DIRTY_TRACK) { |
3541 | snapshot_state |= kMemorystatusTracked; |
3542 | } |
3543 | if ((p->p_memstat_dirty & P_DIRTY_IDLE_EXIT_ENABLED) == P_DIRTY_IDLE_EXIT_ENABLED) { |
3544 | snapshot_state |= kMemorystatusSupportsIdleExit; |
3545 | } |
3546 | if (p->p_memstat_dirty & P_DIRTY_IS_DIRTY) { |
3547 | snapshot_state |= kMemorystatusDirty; |
3548 | } |
3549 | |
3550 | return snapshot_state; |
3551 | } |
3552 | |
3553 | static boolean_t |
3554 | kill_idle_exit_proc(void) |
3555 | { |
3556 | proc_t p, victim_p = PROC_NULL; |
3557 | uint64_t current_time, ; |
3558 | boolean_t killed = FALSE; |
3559 | unsigned int i = 0; |
3560 | os_reason_t jetsam_reason = OS_REASON_NULL; |
3561 | |
3562 | /* Pick next idle exit victim. */ |
3563 | current_time = mach_absolute_time(); |
3564 | |
3565 | jetsam_reason = os_reason_create(OS_REASON_JETSAM, JETSAM_REASON_MEMORY_IDLE_EXIT); |
3566 | if (jetsam_reason == OS_REASON_NULL) { |
3567 | memorystatus_log_error("kill_idle_exit_proc: failed to allocate jetsam reason\n" ); |
3568 | } |
3569 | |
3570 | proc_list_lock(); |
3571 | |
3572 | p = memorystatus_get_first_proc_locked(bucket_index: &i, FALSE); |
3573 | while (p) { |
3574 | /* No need to look beyond the idle band */ |
3575 | if (p->p_memstat_effectivepriority != JETSAM_PRIORITY_IDLE) { |
3576 | break; |
3577 | } |
3578 | |
3579 | if ((p->p_memstat_dirty & (P_DIRTY_ALLOW_IDLE_EXIT | P_DIRTY_IS_DIRTY | P_DIRTY_TERMINATED)) == (P_DIRTY_ALLOW_IDLE_EXIT)) { |
3580 | if (current_time >= p->p_memstat_idledeadline) { |
3581 | p->p_memstat_dirty |= P_DIRTY_TERMINATED; |
3582 | victim_p = proc_ref(p, true); |
3583 | break; |
3584 | } |
3585 | } |
3586 | |
3587 | p = memorystatus_get_next_proc_locked(bucket_index: &i, p, FALSE); |
3588 | } |
3589 | |
3590 | proc_list_unlock(); |
3591 | |
3592 | if (victim_p) { |
3593 | memorystatus_log( |
3594 | "memorystatus: killing_idle_process pid %d [%s] jetsam_reason->osr_code: %llu\n" , |
3595 | proc_getpid(victim_p), (*victim_p->p_name ? victim_p->p_name : "unknown" ), jetsam_reason->osr_code); |
3596 | killed = memorystatus_do_kill(p: victim_p, cause: kMemorystatusKilledIdleExit, jetsam_reason, footprint_of_killed_proc: &footprint_of_killed_proc); |
3597 | proc_rele(p: victim_p); |
3598 | } else { |
3599 | os_reason_free(cur_reason: jetsam_reason); |
3600 | } |
3601 | |
3602 | return killed; |
3603 | } |
3604 | |
3605 | void |
3606 | memorystatus_thread_wake() |
3607 | { |
3608 | int thr_id = 0; |
3609 | int active_thr = atomic_load(&active_jetsam_threads); |
3610 | |
3611 | /* Wakeup all the jetsam threads */ |
3612 | for (thr_id = 0; thr_id < active_thr; thr_id++) { |
3613 | jetsam_thread_state_t *jetsam_thread = &jetsam_threads[thr_id]; |
3614 | sched_cond_signal(cond: &(jetsam_thread->jt_wakeup_cond), thread: jetsam_thread->thread); |
3615 | } |
3616 | } |
3617 | |
3618 | #if CONFIG_JETSAM |
3619 | |
3620 | static void |
3621 | memorystatus_thread_pool_max() |
3622 | { |
3623 | /* Increase the jetsam thread pool to max_jetsam_threads */ |
3624 | int max_threads = max_jetsam_threads; |
3625 | memorystatus_log_info("Expanding memorystatus pool to %d!\n" , max_threads); |
3626 | atomic_store(&active_jetsam_threads, max_threads); |
3627 | } |
3628 | |
3629 | static void |
3630 | memorystatus_thread_pool_default() |
3631 | { |
3632 | /* Restore the jetsam thread pool to a single thread */ |
3633 | memorystatus_log_info("Reverting memorystatus pool back to 1\n" ); |
3634 | atomic_store(&active_jetsam_threads, 1); |
3635 | } |
3636 | |
3637 | #endif /* CONFIG_JETSAM */ |
3638 | |
3639 | extern void vm_pressure_response(void); |
3640 | |
3641 | bool |
3642 | memorystatus_avail_pages_below_pressure(void) |
3643 | { |
3644 | #if CONFIG_JETSAM |
3645 | return memorystatus_available_pages <= memorystatus_available_pages_pressure; |
3646 | #else /* CONFIG_JETSAM */ |
3647 | return false; |
3648 | #endif /* CONFIG_JETSAM */ |
3649 | } |
3650 | |
3651 | bool |
3652 | memorystatus_avail_pages_below_critical(void) |
3653 | { |
3654 | #if CONFIG_JETSAM |
3655 | return memorystatus_available_pages <= memorystatus_available_pages_critical; |
3656 | #else /* CONFIG_JETSAM */ |
3657 | return false; |
3658 | #endif /* CONFIG_JETSAM */ |
3659 | } |
3660 | |
3661 | #if CONFIG_JETSAM |
3662 | static uint64_t |
3663 | memorystatus_swap_trigger_pages(void) |
3664 | { |
3665 | /* |
3666 | * The swapout trigger varies based on the current memorystatus_level. |
3667 | * When available memory is somewhat high (at memorystatus_available_pages_pressure) |
3668 | * we keep more swappable compressor segments in memory. |
3669 | * However, as available memory drops to our idle and eventually critical kill |
3670 | * thresholds we start swapping more aggressively. |
3671 | */ |
3672 | static uint32_t available_pages_factor[] = {0, 1, 1, 1, 2, 2, 3, 5, 7, 8, 10, 13, 15, 17, 20}; |
3673 | size_t index = MIN(memorystatus_level, sizeof(available_pages_factor) / sizeof(uint32_t) - 1); |
3674 | return available_pages_factor[index] * memorystatus_available_pages / 10; |
3675 | } |
3676 | |
3677 | static int |
3678 | sysctl_memorystatus_swap_trigger_pages SYSCTL_HANDLER_ARGS |
3679 | { |
3680 | #pragma unused(arg1, arg2) |
3681 | uint64_t trigger_pages = memorystatus_swap_trigger_pages(); |
3682 | return SYSCTL_OUT(req, &trigger_pages, sizeof(trigger_pages)); |
3683 | } |
3684 | |
3685 | SYSCTL_PROC(_kern, OID_AUTO, memorystatus_swap_trigger_pages, CTLTYPE_INT | CTLFLAG_RD | CTLFLAG_LOCKED, |
3686 | 0, 0, &sysctl_memorystatus_swap_trigger_pages, "I" , "" ); |
3687 | |
3688 | /* |
3689 | * Check if the number of full swappable csegments is over the trigger |
3690 | * threshold to start swapping. |
3691 | * The adjustment_factor is applied to the trigger to raise or lower |
3692 | * it. For example an adjustement factor of 110 will raise the threshold by 10%. |
3693 | */ |
3694 | bool |
3695 | memorystatus_swap_over_trigger(uint64_t adjustment_factor) |
3696 | { |
3697 | if (!memorystatus_swap_all_apps) { |
3698 | return false; |
3699 | } |
3700 | uint64_t trigger_pages = memorystatus_swap_trigger_pages(); |
3701 | trigger_pages = trigger_pages * adjustment_factor / 100; |
3702 | return atop_64(c_late_swapout_count * c_seg_allocsize) > trigger_pages; |
3703 | } |
3704 | |
3705 | /* |
3706 | * Check if the number of segments on the early swapin queue |
3707 | * is over the trigger to start compacting it. |
3708 | */ |
3709 | bool |
3710 | memorystatus_swapin_over_trigger(void) |
3711 | { |
3712 | return atop_64(c_late_swappedin_count * c_seg_allocsize) > memorystatus_swapin_trigger_pages; |
3713 | } |
3714 | #endif /* CONFIG_JETSAM */ |
3715 | |
3716 | #if DEVELOPMENT || DEBUG |
3717 | SYSCTL_UINT(_vm, OID_AUTO, c_late_swapout_count, CTLTYPE_INT | CTLFLAG_RD | CTLFLAG_LOCKED, &c_late_swapout_count, 0, "" ); |
3718 | SYSCTL_UINT(_vm, OID_AUTO, c_seg_allocsize, CTLTYPE_INT | CTLFLAG_RD | CTLFLAG_LOCKED, &c_seg_allocsize, 0, "" ); |
3719 | #if CONFIG_FREEZE |
3720 | extern int32_t c_segment_pages_compressed_incore_late_swapout; |
3721 | SYSCTL_INT(_vm, OID_AUTO, c_segment_pages_compressed_incore_late_swapout, CTLTYPE_INT | CTLFLAG_RD | CTLFLAG_LOCKED, &c_segment_pages_compressed_incore_late_swapout, 0, "" ); |
3722 | #endif /* CONFIG_FREEZE */ |
3723 | #endif /* DEVELOPMENT || DEBUG */ |
3724 | |
3725 | static boolean_t |
3726 | memorystatus_should_post_snapshot(int32_t priority, uint32_t cause) |
3727 | { |
3728 | boolean_t is_idle_priority; |
3729 | |
3730 | is_idle_priority = (priority == JETSAM_PRIORITY_IDLE || priority == JETSAM_PRIORITY_IDLE_DEFERRED); |
3731 | #if CONFIG_JETSAM |
3732 | #pragma unused(cause) |
3733 | /* |
3734 | * Don't generate logs for steady-state idle-exit kills, |
3735 | * unless it is overridden for debug or by the device |
3736 | * tree. |
3737 | */ |
3738 | |
3739 | return !is_idle_priority || memorystatus_idle_snapshot; |
3740 | |
3741 | #else /* CONFIG_JETSAM */ |
3742 | /* |
3743 | * Don't generate logs for steady-state idle-exit kills, |
3744 | * unless |
3745 | * - it is overridden for debug or by the device |
3746 | * tree. |
3747 | * OR |
3748 | * - the kill causes are important i.e. not kMemorystatusKilledIdleExit |
3749 | */ |
3750 | |
3751 | boolean_t snapshot_eligible_kill_cause = (is_reason_thrashing(cause) || is_reason_zone_map_exhaustion(cause)); |
3752 | return !is_idle_priority || memorystatus_idle_snapshot || snapshot_eligible_kill_cause; |
3753 | #endif /* CONFIG_JETSAM */ |
3754 | } |
3755 | |
3756 | |
3757 | static boolean_t |
3758 | memorystatus_act_on_hiwat_processes(uint32_t *errors, uint32_t *hwm_kill, bool *post_snapshot, uint64_t *memory_reclaimed) |
3759 | { |
3760 | boolean_t purged = FALSE, killed = FALSE; |
3761 | |
3762 | *memory_reclaimed = 0; |
3763 | killed = memorystatus_kill_hiwat_proc(errors, purged: &purged, memory_reclaimed); |
3764 | |
3765 | if (killed) { |
3766 | *hwm_kill = *hwm_kill + 1; |
3767 | *post_snapshot = TRUE; |
3768 | return TRUE; |
3769 | } else { |
3770 | if (purged == FALSE) { |
3771 | /* couldn't purge and couldn't kill */ |
3772 | memorystatus_hwm_candidates = FALSE; |
3773 | } |
3774 | } |
3775 | |
3776 | return killed; |
3777 | } |
3778 | |
3779 | static bool |
3780 | memorystatus_dump_caches(bool purge_corpses) |
3781 | { |
3782 | pmap_release_pages_fast(); |
3783 | if (purge_corpses && total_corpses_count() > 0) { |
3784 | os_atomic_inc(&block_corpses, relaxed); |
3785 | assert(block_corpses > 0); |
3786 | task_purge_all_corpses(); |
3787 | return true; |
3788 | } |
3789 | return false; |
3790 | } |
3791 | |
3792 | /* |
3793 | * Called before jetsamming in the foreground band in the hope that we'll |
3794 | * avoid a jetsam. |
3795 | */ |
3796 | static void |
3797 | memorystatus_approaching_fg_band(bool *corpse_list_purged) |
3798 | { |
3799 | bool corpses_purged = false; |
3800 | assert(corpse_list_purged != NULL); |
3801 | if (memorystatus_should_issue_fg_band_notify) { |
3802 | memorystatus_issue_fg_band_notify(); |
3803 | } |
3804 | corpses_purged = memorystatus_dump_caches(purge_corpses: !(*corpse_list_purged)); |
3805 | *corpse_list_purged |= corpses_purged; |
3806 | #if CONFIG_DEFERRED_RECLAIM |
3807 | vm_deferred_reclamation_reclaim_all_memory(); |
3808 | #endif /* CONFIG_DEFERRED_RECLAIM */ |
3809 | } |
3810 | |
3811 | int jld_eval_aggressive_count = 0; |
3812 | int32_t jld_priority_band_max = JETSAM_PRIORITY_UI_SUPPORT; |
3813 | uint64_t jld_timestamp_msecs = 0; |
3814 | int jld_idle_kill_candidates = 0; |
3815 | |
3816 | static boolean_t |
3817 | memorystatus_act_aggressive(uint32_t cause, os_reason_t jetsam_reason, int *jld_idle_kills, bool *corpse_list_purged, bool *post_snapshot, uint64_t *memory_reclaimed) |
3818 | { |
3819 | boolean_t killed; |
3820 | uint32_t errors = 0; |
3821 | uint64_t = 0; |
3822 | int elevated_bucket_count = 0, maximum_kills = 0, band = 0; |
3823 | *memory_reclaimed = 0; |
3824 | |
3825 | jld_eval_aggressive_count++; |
3826 | |
3827 | if (jld_eval_aggressive_count == memorystatus_jld_eval_aggressive_count) { |
3828 | memorystatus_approaching_fg_band(corpse_list_purged); |
3829 | } else if (jld_eval_aggressive_count > memorystatus_jld_eval_aggressive_count) { |
3830 | /* |
3831 | * Bump up the jetsam priority limit (eg: the bucket index) |
3832 | * Enforce bucket index sanity. |
3833 | */ |
3834 | if ((memorystatus_jld_eval_aggressive_priority_band_max < 0) || |
3835 | (memorystatus_jld_eval_aggressive_priority_band_max >= MEMSTAT_BUCKET_COUNT)) { |
3836 | /* |
3837 | * Do nothing. Stick with the default level. |
3838 | */ |
3839 | } else { |
3840 | jld_priority_band_max = memorystatus_jld_eval_aggressive_priority_band_max; |
3841 | } |
3842 | } |
3843 | |
3844 | proc_list_lock(); |
3845 | elevated_bucket_count = memstat_bucket[JETSAM_PRIORITY_ELEVATED_INACTIVE].count; |
3846 | proc_list_unlock(); |
3847 | |
3848 | /* Visit elevated processes first */ |
3849 | while (elevated_bucket_count) { |
3850 | elevated_bucket_count--; |
3851 | |
3852 | /* |
3853 | * memorystatus_kill_elevated_process() drops a reference, |
3854 | * so take another one so we can continue to use this exit reason |
3855 | * even after it returns. |
3856 | */ |
3857 | |
3858 | os_reason_ref(cur_reason: jetsam_reason); |
3859 | killed = memorystatus_kill_elevated_process( |
3860 | cause, |
3861 | jetsam_reason, |
3862 | JETSAM_PRIORITY_ELEVATED_INACTIVE, |
3863 | aggr_count: jld_eval_aggressive_count, |
3864 | errors: &errors, memory_reclaimed: &footprint_of_killed_proc); |
3865 | if (killed) { |
3866 | *post_snapshot = true; |
3867 | *memory_reclaimed += footprint_of_killed_proc; |
3868 | if (memorystatus_avail_pages_below_pressure()) { |
3869 | /* |
3870 | * Still under pressure. |
3871 | * Find another pinned processes. |
3872 | */ |
3873 | continue; |
3874 | } else { |
3875 | return TRUE; |
3876 | } |
3877 | } else { |
3878 | /* |
3879 | * No pinned processes left to kill. |
3880 | * Abandon elevated band. |
3881 | */ |
3882 | break; |
3883 | } |
3884 | } |
3885 | |
3886 | proc_list_lock(); |
3887 | for (band = 0; band < jld_priority_band_max; band++) { |
3888 | maximum_kills += memstat_bucket[band].count; |
3889 | } |
3890 | proc_list_unlock(); |
3891 | maximum_kills *= memorystatus_jld_max_kill_loops; |
3892 | /* |
3893 | * memorystatus_kill_processes_aggressive() allocates its own |
3894 | * jetsam_reason so the kMemorystatusKilledProcThrashing cause |
3895 | * is consistent throughout the aggressive march. |
3896 | */ |
3897 | killed = memorystatus_kill_processes_aggressive( |
3898 | cause: kMemorystatusKilledProcThrashing, |
3899 | aggr_count: jld_eval_aggressive_count, |
3900 | priority_max: jld_priority_band_max, |
3901 | max_kills: maximum_kills, |
3902 | errors: &errors, memory_reclaimed: &footprint_of_killed_proc); |
3903 | |
3904 | if (killed) { |
3905 | /* Always generate logs after aggressive kill */ |
3906 | *post_snapshot = true; |
3907 | *memory_reclaimed += footprint_of_killed_proc; |
3908 | *jld_idle_kills = 0; |
3909 | return TRUE; |
3910 | } |
3911 | |
3912 | return FALSE; |
3913 | } |
3914 | |
3915 | /* |
3916 | * Sets up a new jetsam thread. |
3917 | */ |
3918 | static void |
3919 | memorystatus_thread_init(jetsam_thread_state_t *jetsam_thread) |
3920 | { |
3921 | char name[32]; |
3922 | thread_wire_internal(host_priv: host_priv_self(), thread: current_thread(), TRUE, NULL); |
3923 | snprintf(name, count: 32, "VM_memorystatus_%d" , jetsam_thread->index + 1); |
3924 | |
3925 | /* Limit all but one thread to the lower jetsam bands, as that's where most of the victims are. */ |
3926 | if (jetsam_thread->index == 0) { |
3927 | if (vm_pageout_state.vm_restricted_to_single_processor == TRUE) { |
3928 | thread_vm_bind_group_add(); |
3929 | } |
3930 | jetsam_thread->limit_to_low_bands = FALSE; |
3931 | } else { |
3932 | jetsam_thread->limit_to_low_bands = TRUE; |
3933 | } |
3934 | #if CONFIG_THREAD_GROUPS |
3935 | thread_group_vm_add(); |
3936 | #endif |
3937 | thread_set_thread_name(th: current_thread(), name); |
3938 | sched_cond_init(cond: &(jetsam_thread->jt_wakeup_cond)); |
3939 | jetsam_thread->inited = TRUE; |
3940 | } |
3941 | |
3942 | /* |
3943 | * Create a new jetsam reason from the given kill cause. |
3944 | */ |
3945 | static os_reason_t |
3946 | create_jetsam_reason(memorystatus_kill_cause_t cause) |
3947 | { |
3948 | os_reason_t jetsam_reason = OS_REASON_NULL; |
3949 | |
3950 | jetsam_reason_t reason_code = (jetsam_reason_t)cause; |
3951 | assert3u(reason_code, <=, JETSAM_REASON_MEMORYSTATUS_MAX); |
3952 | |
3953 | jetsam_reason = os_reason_create(OS_REASON_JETSAM, osr_code: reason_code); |
3954 | if (jetsam_reason == OS_REASON_NULL) { |
3955 | memorystatus_log_error("memorystatus: failed to allocate jetsam reason for cause %u\n" , cause); |
3956 | } |
3957 | return jetsam_reason; |
3958 | } |
3959 | |
3960 | /* |
3961 | * Do one kill as we're marching up the priority bands. |
3962 | * This is a wrapper around memorystatus_kill_top_process that also |
3963 | * sets post_snapshot, tracks jld_idle_kills, and notifies if we're appraoching the fg band. |
3964 | */ |
3965 | static bool |
3966 | memorystatus_do_priority_kill(jetsam_thread_state_t *thread, |
3967 | uint32_t kill_cause, int32_t max_priority, bool only_swappable) |
3968 | { |
3969 | os_reason_t jetsam_reason = OS_REASON_NULL; |
3970 | bool killed = false; |
3971 | int priority; |
3972 | |
3973 | jetsam_reason = create_jetsam_reason(cause: kill_cause); |
3974 | /* |
3975 | * memorystatus_kill_top_process() drops a reference, |
3976 | * so take another one so we can continue to use this exit reason |
3977 | * even after it returns |
3978 | */ |
3979 | os_reason_ref(cur_reason: jetsam_reason); |
3980 | |
3981 | /* LRU */ |
3982 | killed = memorystatus_kill_top_process(true, sort_flag: thread->sort_flag, cause: kill_cause, jetsam_reason, max_priority, |
3983 | only_swappable, priority: &priority, errors: &thread->errors, memory_reclaimed: &thread->memory_reclaimed); |
3984 | thread->sort_flag = false; |
3985 | |
3986 | if (killed) { |
3987 | if (memorystatus_should_post_snapshot(priority, cause: kill_cause) == TRUE) { |
3988 | thread->post_snapshot = true; |
3989 | } |
3990 | |
3991 | /* Jetsam Loop Detection */ |
3992 | if (memorystatus_jld_enabled == TRUE) { |
3993 | if (priority <= applications_aging_band) { |
3994 | thread->jld_idle_kills++; |
3995 | } else { |
3996 | /* |
3997 | * We've reached into bands beyond idle deferred. |
3998 | * We make no attempt to monitor them |
3999 | */ |
4000 | } |
4001 | } |
4002 | |
4003 | /* |
4004 | * If we have jetsammed a process in or above JETSAM_PRIORITY_FREEZER |
4005 | * then we attempt to relieve pressure by purging corpse memory and notifying |
4006 | * anybody wanting to know this. |
4007 | */ |
4008 | if (priority >= JETSAM_PRIORITY_FREEZER) { |
4009 | memorystatus_approaching_fg_band(corpse_list_purged: &thread->corpse_list_purged); |
4010 | } |
4011 | } |
4012 | os_reason_free(cur_reason: jetsam_reason); |
4013 | |
4014 | return killed; |
4015 | } |
4016 | |
4017 | static bool |
4018 | memorystatus_do_action(jetsam_thread_state_t *thread, memorystatus_action_t action, uint32_t kill_cause) |
4019 | { |
4020 | bool killed = false; |
4021 | os_reason_t jetsam_reason = OS_REASON_NULL; |
4022 | |
4023 | switch (action) { |
4024 | case MEMORYSTATUS_KILL_HIWATER: |
4025 | killed = memorystatus_act_on_hiwat_processes(errors: &thread->errors, hwm_kill: &thread->hwm_kills, |
4026 | post_snapshot: &thread->post_snapshot, memory_reclaimed: &thread->memory_reclaimed); |
4027 | break; |
4028 | case MEMORYSTATUS_KILL_AGGRESSIVE: |
4029 | jetsam_reason = create_jetsam_reason(cause: kill_cause); |
4030 | killed = memorystatus_act_aggressive(cause: kill_cause, jetsam_reason, |
4031 | jld_idle_kills: &thread->jld_idle_kills, corpse_list_purged: &thread->corpse_list_purged, post_snapshot: &thread->post_snapshot, |
4032 | memory_reclaimed: &thread->memory_reclaimed); |
4033 | os_reason_free(cur_reason: jetsam_reason); |
4034 | break; |
4035 | case MEMORYSTATUS_KILL_TOP_PROCESS: |
4036 | killed = memorystatus_do_priority_kill(thread, kill_cause, max_priority: max_kill_priority, false); |
4037 | break; |
4038 | case MEMORYSTATUS_WAKE_SWAPPER: |
4039 | memorystatus_log_info( |
4040 | "memorystatus_do_action: Waking up swap thread. memorystatus_available_pages: %llu\n" , |
4041 | (uint64_t)MEMORYSTATUS_LOG_AVAILABLE_PAGES); |
4042 | os_atomic_store(&vm_swapout_wake_pending, true, relaxed); |
4043 | thread_wakeup((event_t)&vm_swapout_thread); |
4044 | break; |
4045 | case MEMORYSTATUS_PROCESS_SWAPIN_QUEUE: |
4046 | memorystatus_log_info( |
4047 | "memorystatus_do_action: Processing swapin queue of length: %u memorystatus_available_pages: %llu\n" , |
4048 | c_late_swappedin_count, (uint64_t) MEMORYSTATUS_LOG_AVAILABLE_PAGES); |
4049 | vm_compressor_process_special_swapped_in_segments(); |
4050 | break; |
4051 | case MEMORYSTATUS_KILL_SUSPENDED_SWAPPABLE: |
4052 | killed = memorystatus_do_priority_kill(thread, kill_cause, JETSAM_PRIORITY_BACKGROUND - 1, true); |
4053 | break; |
4054 | case MEMORYSTATUS_KILL_SWAPPABLE: |
4055 | killed = memorystatus_do_priority_kill(thread, kill_cause, max_priority: max_kill_priority, true); |
4056 | break; |
4057 | case MEMORYSTATUS_KILL_NONE: |
4058 | panic("memorystatus_do_action: Impossible! memorystatus_do_action called with action = NONE\n" ); |
4059 | } |
4060 | return killed; |
4061 | } |
4062 | |
4063 | static void |
4064 | memorystatus_post_snapshot() |
4065 | { |
4066 | proc_list_lock(); |
4067 | size_t snapshot_size = sizeof(memorystatus_jetsam_snapshot_t) + |
4068 | sizeof(memorystatus_jetsam_snapshot_entry_t) * (memorystatus_jetsam_snapshot_count); |
4069 | uint64_t timestamp_now = mach_absolute_time(); |
4070 | memorystatus_jetsam_snapshot->notification_time = timestamp_now; |
4071 | memorystatus_jetsam_snapshot->js_gencount++; |
4072 | if (memorystatus_jetsam_snapshot_count > 0 && (memorystatus_jetsam_snapshot_last_timestamp == 0 || |
4073 | timestamp_now > memorystatus_jetsam_snapshot_last_timestamp + memorystatus_jetsam_snapshot_timeout)) { |
4074 | proc_list_unlock(); |
4075 | int ret = memorystatus_send_note(event_code: kMemorystatusSnapshotNote, data: &snapshot_size, data_length: sizeof(snapshot_size)); |
4076 | if (!ret) { |
4077 | proc_list_lock(); |
4078 | memorystatus_jetsam_snapshot_last_timestamp = timestamp_now; proc_list_unlock(); |
4079 | } |
4080 | } else { |
4081 | proc_list_unlock(); |
4082 | } |
4083 | } |
4084 | |
4085 | |
4086 | /* Callback into vm_compressor.c to signal that thrashing has been mitigated. */ |
4087 | extern void vm_thrashing_jetsam_done(void); |
4088 | |
4089 | /* |
4090 | * Main entrypoint for the memorystatus thread. |
4091 | * This thread is woken up when we're low on one of the following resources: |
4092 | * - available pages (free + filebacked) |
4093 | * - zone memory |
4094 | * - compressor space |
4095 | * |
4096 | * Or when thrashing is detected in the compressor or file cache. |
4097 | */ |
4098 | static void |
4099 | memorystatus_thread_internal(jetsam_thread_state_t *jetsam_thread) |
4100 | { |
4101 | uint64_t total_memory_reclaimed = 0; |
4102 | bool highwater_remaining = true; |
4103 | bool swappable_apps_remaining = false; |
4104 | bool suspended_swappable_apps_remaining = false; |
4105 | |
4106 | #if CONFIG_JETSAM |
4107 | swappable_apps_remaining = memorystatus_swap_all_apps; |
4108 | suspended_swappable_apps_remaining = memorystatus_swap_all_apps; |
4109 | #endif /* CONFIG_JETSAM */ |
4110 | |
4111 | assert(jetsam_thread != NULL); |
4112 | jetsam_thread->jld_idle_kills = 0; |
4113 | jetsam_thread->errors = 0; |
4114 | jetsam_thread->hwm_kills = 0; |
4115 | jetsam_thread->sort_flag = true; |
4116 | jetsam_thread->corpse_list_purged = false; |
4117 | jetsam_thread->post_snapshot = FALSE; |
4118 | jetsam_thread->memory_reclaimed = 0; |
4119 | |
4120 | if (jetsam_thread->inited == FALSE) { |
4121 | /* |
4122 | * It's the first time the thread has run, so just mark the thread as privileged and block. |
4123 | */ |
4124 | memorystatus_thread_init(jetsam_thread); |
4125 | sched_cond_wait(cond: &(jetsam_thread->jt_wakeup_cond), THREAD_UNINT, continuation: memorystatus_thread); |
4126 | } |
4127 | |
4128 | KDBG(MEMSTAT_CODE(BSD_MEMSTAT_SCAN) | DBG_FUNC_START, |
4129 | MEMORYSTATUS_LOG_AVAILABLE_PAGES, memorystatus_jld_enabled, memorystatus_jld_eval_period_msecs, memorystatus_jld_eval_aggressive_count); |
4130 | |
4131 | extern uint32_t c_segment_count; |
4132 | extern mach_timespec_t major_compact_ts; |
4133 | clock_sec_t now; |
4134 | clock_nsec_t nsec; |
4135 | clock_get_system_nanotime(secs: &now, nanosecs: &nsec); |
4136 | mach_timespec_t major_compact_diff = {.tv_sec = (int)now, .tv_nsec = nsec}; |
4137 | SUB_MACH_TIMESPEC(&major_compact_diff, &major_compact_ts); |
4138 | memorystatus_log_info( |
4139 | "memorystatus: c_segment_count=%u major compaction occurred %u seconds ago\n" , |
4140 | c_segment_count, major_compact_diff.tv_sec); |
4141 | |
4142 | /* |
4143 | * Jetsam aware version. |
4144 | * |
4145 | * The VM pressure notification thread is working its way through clients in parallel. |
4146 | * |
4147 | * So, while the pressure notification thread is targeting processes in order of |
4148 | * increasing jetsam priority, we can hopefully reduce / stop its work by killing |
4149 | * any processes that have exceeded their highwater mark. |
4150 | * |
4151 | * If we run out of HWM processes and our available pages drops below the critical threshold, then, |
4152 | * we target the least recently used process in order of increasing jetsam priority (exception: the FG band). |
4153 | */ |
4154 | while (true) { |
4155 | bool killed; |
4156 | jetsam_thread->memory_reclaimed = 0; |
4157 | uint32_t cause = 0; |
4158 | |
4159 | memorystatus_action_t action = memorystatus_pick_action(jetsam_thread, kill_cause: &cause, |
4160 | highwater_remaining, suspended_swappable_apps_remaining, swappable_apps_remaining, |
4161 | jld_idle_kills: &jetsam_thread->jld_idle_kills); |
4162 | if (action == MEMORYSTATUS_KILL_NONE) { |
4163 | break; |
4164 | } |
4165 | |
4166 | if (cause == kMemorystatusKilledVMCompressorThrashing || cause == kMemorystatusKilledVMCompressorSpaceShortage) { |
4167 | memorystatus_log("memorystatus: killing due to \"%s\" - compression_ratio=%u\n" , memorystatus_kill_cause_name[cause], vm_compression_ratio()); |
4168 | } |
4169 | |
4170 | killed = memorystatus_do_action(thread: jetsam_thread, action, kill_cause: cause); |
4171 | total_memory_reclaimed += jetsam_thread->memory_reclaimed; |
4172 | |
4173 | if (!killed) { |
4174 | if (action == MEMORYSTATUS_KILL_HIWATER) { |
4175 | highwater_remaining = false; |
4176 | } else if (action == MEMORYSTATUS_KILL_SWAPPABLE) { |
4177 | swappable_apps_remaining = false; |
4178 | suspended_swappable_apps_remaining = false; |
4179 | } else if (action == MEMORYSTATUS_KILL_SUSPENDED_SWAPPABLE) { |
4180 | suspended_swappable_apps_remaining = false; |
4181 | } |
4182 | } else { |
4183 | if (cause == kMemorystatusKilledVMCompressorThrashing || cause == kMemorystatusKilledVMCompressorSpaceShortage) { |
4184 | memorystatus_log("memorystatus: post-jetsam compressor fragmentation_level=%u\n" , vm_compressor_fragmentation_level()); |
4185 | } |
4186 | /* Always re-check for highwater and swappable kills after doing a kill. */ |
4187 | highwater_remaining = true; |
4188 | swappable_apps_remaining = true; |
4189 | suspended_swappable_apps_remaining = true; |
4190 | } |
4191 | |
4192 | if ((action == MEMORYSTATUS_KILL_TOP_PROCESS || action == MEMORYSTATUS_KILL_AGGRESSIVE) && !killed && total_memory_reclaimed == 0 && memorystatus_avail_pages_below_critical()) { |
4193 | /* |
4194 | * Still under pressure and unable to kill a process - purge corpse memory |
4195 | * and get everything back from the pmap. |
4196 | */ |
4197 | memorystatus_dump_caches(true); |
4198 | |
4199 | if (!jetsam_thread->limit_to_low_bands && memorystatus_avail_pages_below_critical()) { |
4200 | /* |
4201 | * Still under pressure and unable to kill a process - panic |
4202 | */ |
4203 | panic("memorystatus_jetsam_thread: no victim! available pages:%llu" , (uint64_t)MEMORYSTATUS_LOG_AVAILABLE_PAGES); |
4204 | } |
4205 | } |
4206 | |
4207 | /* |
4208 | * If we did a kill on behalf of another subsystem (compressor or zalloc) |
4209 | * notify them. |
4210 | */ |
4211 | if (killed && is_reason_thrashing(cause)) { |
4212 | os_atomic_store(&memorystatus_compressor_space_shortage, false, release); |
4213 | #if CONFIG_PHANTOM_CACHE |
4214 | os_atomic_store(&memorystatus_phantom_cache_pressure, false, release); |
4215 | #endif /* CONFIG_PHANTOM_CACHE */ |
4216 | #if CONFIG_JETSAM |
4217 | vm_thrashing_jetsam_done(); |
4218 | #endif /* CONFIG_JETSAM */ |
4219 | } else if (killed && is_reason_zone_map_exhaustion(cause)) { |
4220 | os_atomic_store(&memorystatus_zone_map_is_exhausted, false, release); |
4221 | } else if (killed && cause == kMemorystatusKilledVMPageoutStarvation) { |
4222 | os_atomic_store(&memorystatus_pageout_starved, false, release); |
4223 | } |
4224 | } |
4225 | |
4226 | if (jetsam_thread->errors) { |
4227 | memorystatus_clear_errors(); |
4228 | } |
4229 | |
4230 | if (jetsam_thread->post_snapshot) { |
4231 | memorystatus_post_snapshot(); |
4232 | } |
4233 | |
4234 | KDBG(MEMSTAT_CODE(BSD_MEMSTAT_SCAN) | DBG_FUNC_END, |
4235 | MEMORYSTATUS_LOG_AVAILABLE_PAGES, total_memory_reclaimed); |
4236 | |
4237 | if (jetsam_thread->corpse_list_purged) { |
4238 | os_atomic_dec(&block_corpses, relaxed); |
4239 | assert(block_corpses >= 0); |
4240 | } |
4241 | } |
4242 | |
4243 | OS_NORETURN |
4244 | static void |
4245 | memorystatus_thread(void *param __unused, wait_result_t wr __unused) |
4246 | { |
4247 | jetsam_thread_state_t *jetsam_thread = jetsam_current_thread(); |
4248 | sched_cond_ack(cond: &(jetsam_thread->jt_wakeup_cond)); |
4249 | while (1) { |
4250 | memorystatus_thread_internal(jetsam_thread); |
4251 | sched_cond_wait(cond: &(jetsam_thread->jt_wakeup_cond), THREAD_UNINT, continuation: memorystatus_thread); |
4252 | } |
4253 | } |
4254 | |
4255 | /* |
4256 | * This section defines when we deploy aggressive jetsam. |
4257 | * Aggressive jetsam kills everything up to the jld_priority_band_max band. |
4258 | */ |
4259 | |
4260 | /* |
4261 | * Returns TRUE: |
4262 | * when an idle-exitable proc was killed |
4263 | * Returns FALSE: |
4264 | * when there are no more idle-exitable procs found |
4265 | * when the attempt to kill an idle-exitable proc failed |
4266 | */ |
4267 | boolean_t |
4268 | memorystatus_idle_exit_from_VM(void) |
4269 | { |
4270 | /* |
4271 | * This routine should no longer be needed since we are |
4272 | * now using jetsam bands on all platforms and so will deal |
4273 | * with IDLE processes within the memorystatus thread itself. |
4274 | * |
4275 | * But we still use it because we observed that macos systems |
4276 | * started heavy compression/swapping with a bunch of |
4277 | * idle-exitable processes alive and doing nothing. We decided |
4278 | * to rather kill those processes than start swapping earlier. |
4279 | */ |
4280 | |
4281 | return kill_idle_exit_proc(); |
4282 | } |
4283 | |
4284 | /* |
4285 | * Callback invoked when allowable physical memory footprint exceeded |
4286 | * (dirty pages + IOKit mappings) |
4287 | * |
4288 | * This is invoked for both advisory, non-fatal per-task high watermarks, |
4289 | * as well as the fatal task memory limits. |
4290 | */ |
4291 | void |
4292 | (boolean_t warning, boolean_t memlimit_is_active, boolean_t memlimit_is_fatal) |
4293 | { |
4294 | os_reason_t jetsam_reason = OS_REASON_NULL; |
4295 | |
4296 | proc_t p = current_proc(); |
4297 | |
4298 | #if VM_PRESSURE_EVENTS |
4299 | if (warning == TRUE) { |
4300 | /* |
4301 | * This is a warning path which implies that the current process is close, but has |
4302 | * not yet exceeded its per-process memory limit. |
4303 | */ |
4304 | if (memorystatus_warn_process(p, is_active: memlimit_is_active, is_fatal: memlimit_is_fatal, FALSE /* not exceeded */) != TRUE) { |
4305 | /* Print warning, since it's possible that task has not registered for pressure notifications */ |
4306 | memorystatus_log_error( |
4307 | "memorystatus_on_ledger_footprint_exceeded: failed to warn the current task (%d exiting, or no handler registered?).\n" , |
4308 | proc_getpid(p)); |
4309 | } |
4310 | return; |
4311 | } |
4312 | #endif /* VM_PRESSURE_EVENTS */ |
4313 | |
4314 | if (memlimit_is_fatal) { |
4315 | /* |
4316 | * If this process has no high watermark or has a fatal task limit, then we have been invoked because the task |
4317 | * has violated either the system-wide per-task memory limit OR its own task limit. |
4318 | */ |
4319 | jetsam_reason = os_reason_create(OS_REASON_JETSAM, JETSAM_REASON_MEMORY_PERPROCESSLIMIT); |
4320 | if (jetsam_reason == NULL) { |
4321 | memorystatus_log_error("task_exceeded footprint: failed to allocate jetsam reason\n" ); |
4322 | } else if (corpse_for_fatal_memkill && proc_send_synchronous_EXC_RESOURCE(p) == FALSE) { |
4323 | /* Set OS_REASON_FLAG_GENERATE_CRASH_REPORT to generate corpse */ |
4324 | jetsam_reason->osr_flags |= OS_REASON_FLAG_GENERATE_CRASH_REPORT; |
4325 | } |
4326 | |
4327 | if (memorystatus_kill_process_sync(victim_pid: proc_getpid(p), cause: kMemorystatusKilledPerProcessLimit, jetsam_reason) != TRUE) { |
4328 | memorystatus_log_error("task_exceeded_footprint: failed to kill the current task (exiting?).\n" ); |
4329 | } |
4330 | } else { |
4331 | /* |
4332 | * HWM offender exists. Done without locks or synchronization. |
4333 | * See comment near its declaration for more details. |
4334 | */ |
4335 | memorystatus_hwm_candidates = TRUE; |
4336 | |
4337 | #if VM_PRESSURE_EVENTS |
4338 | /* |
4339 | * The current process is not in the warning path. |
4340 | * This path implies the current process has exceeded a non-fatal (soft) memory limit. |
4341 | * Failure to send note is ignored here. |
4342 | */ |
4343 | (void)memorystatus_warn_process(p, is_active: memlimit_is_active, is_fatal: memlimit_is_fatal, TRUE /* exceeded */); |
4344 | |
4345 | #endif /* VM_PRESSURE_EVENTS */ |
4346 | } |
4347 | } |
4348 | |
4349 | inline void |
4350 | memorystatus_log_exception(const int , boolean_t memlimit_is_active, boolean_t memlimit_is_fatal) |
4351 | { |
4352 | proc_t p = current_proc(); |
4353 | |
4354 | /* |
4355 | * The limit violation is logged here, but only once per process per limit. |
4356 | * Soft memory limit is a non-fatal high-water-mark |
4357 | * Hard memory limit is a fatal custom-task-limit or system-wide per-task memory limit. |
4358 | */ |
4359 | |
4360 | memorystatus_log("EXC_RESOURCE -> %s[%d] exceeded mem limit: %s%s %d MB (%s)\n" , |
4361 | ((p && *p->p_name) ? p->p_name : "unknown" ), (p ? proc_getpid(p) : -1), (memlimit_is_active ? "Active" : "Inactive" ), |
4362 | (memlimit_is_fatal ? "Hard" : "Soft" ), max_footprint_mb, |
4363 | (memlimit_is_fatal ? "fatal" : "non-fatal" )); |
4364 | } |
4365 | |
4366 | inline void |
4367 | memorystatus_log_diag_threshold_exception(const int diag_threshold_value) |
4368 | { |
4369 | proc_t p = current_proc(); |
4370 | |
4371 | /* |
4372 | * The limit violation is logged here, but only once per process per limit. |
4373 | * Soft memory limit is a non-fatal high-water-mark |
4374 | * Hard memory limit is a fatal custom-task-limit or system-wide per-task memory limit. |
4375 | */ |
4376 | |
4377 | memorystatus_log("EXC_RESOURCE -> %s[%d] exceeded diag threshold limit: %d MB \n" , |
4378 | ((p && *p->p_name) ? p->p_name : "unknown" ), (p ? proc_getpid(p) : -1), diag_threshold_value); |
4379 | } |
4380 | |
4381 | /* |
4382 | * Description: |
4383 | * Evaluates process state to determine which limit |
4384 | * should be applied (active vs. inactive limit). |
4385 | * |
4386 | * Processes that have the 'elevated inactive jetsam band' attribute |
4387 | * are first evaluated based on their current priority band. |
4388 | * presently elevated ==> active |
4389 | * |
4390 | * Processes that opt into dirty tracking are evaluated |
4391 | * based on clean vs dirty state. |
4392 | * dirty ==> active |
4393 | * clean ==> inactive |
4394 | * |
4395 | * Process that do not opt into dirty tracking are |
4396 | * evalulated based on priority level. |
4397 | * Foreground or above ==> active |
4398 | * Below Foreground ==> inactive |
4399 | * |
4400 | * Return: TRUE if active |
4401 | * False if inactive |
4402 | */ |
4403 | |
4404 | static boolean_t |
4405 | proc_jetsam_state_is_active_locked(proc_t p) |
4406 | { |
4407 | if ((p->p_memstat_state & P_MEMSTAT_USE_ELEVATED_INACTIVE_BAND) && |
4408 | (p->p_memstat_effectivepriority == JETSAM_PRIORITY_ELEVATED_INACTIVE)) { |
4409 | /* |
4410 | * process has the 'elevated inactive jetsam band' attribute |
4411 | * and process is present in the elevated band |
4412 | * implies active state |
4413 | */ |
4414 | return TRUE; |
4415 | } else if (p->p_memstat_dirty & P_DIRTY_TRACK) { |
4416 | /* |
4417 | * process has opted into dirty tracking |
4418 | * active state is based on dirty vs. clean |
4419 | */ |
4420 | if (p->p_memstat_dirty & P_DIRTY_IS_DIRTY) { |
4421 | /* |
4422 | * process is dirty |
4423 | * implies active state |
4424 | */ |
4425 | return TRUE; |
4426 | } else { |
4427 | /* |
4428 | * process is clean |
4429 | * implies inactive state |
4430 | */ |
4431 | return FALSE; |
4432 | } |
4433 | } else if (p->p_memstat_effectivepriority >= JETSAM_PRIORITY_FOREGROUND) { |
4434 | /* |
4435 | * process is Foreground or higher |
4436 | * implies active state |
4437 | */ |
4438 | return TRUE; |
4439 | } else { |
4440 | /* |
4441 | * process found below Foreground |
4442 | * implies inactive state |
4443 | */ |
4444 | return FALSE; |
4445 | } |
4446 | } |
4447 | |
4448 | static boolean_t |
4449 | memorystatus_kill_process_sync(pid_t victim_pid, uint32_t cause, os_reason_t jetsam_reason) |
4450 | { |
4451 | boolean_t res; |
4452 | |
4453 | uint32_t errors = 0; |
4454 | uint64_t memory_reclaimed = 0; |
4455 | |
4456 | if (victim_pid == -1) { |
4457 | /* No pid, so kill first process */ |
4458 | res = memorystatus_kill_top_process(true, true, cause, jetsam_reason, |
4459 | max_priority: max_kill_priority, false, NULL, errors: &errors, memory_reclaimed: &memory_reclaimed); |
4460 | } else { |
4461 | res = memorystatus_kill_specific_process(victim_pid, cause, jetsam_reason); |
4462 | } |
4463 | |
4464 | if (errors) { |
4465 | memorystatus_clear_errors(); |
4466 | } |
4467 | |
4468 | if (res == TRUE) { |
4469 | /* Fire off snapshot notification */ |
4470 | proc_list_lock(); |
4471 | size_t snapshot_size = sizeof(memorystatus_jetsam_snapshot_t) + |
4472 | sizeof(memorystatus_jetsam_snapshot_entry_t) * memorystatus_jetsam_snapshot_count; |
4473 | uint64_t timestamp_now = mach_absolute_time(); |
4474 | memorystatus_jetsam_snapshot->notification_time = timestamp_now; |
4475 | if (memorystatus_jetsam_snapshot_count > 0 && (memorystatus_jetsam_snapshot_last_timestamp == 0 || |
4476 | timestamp_now > memorystatus_jetsam_snapshot_last_timestamp + memorystatus_jetsam_snapshot_timeout)) { |
4477 | proc_list_unlock(); |
4478 | int ret = memorystatus_send_note(event_code: kMemorystatusSnapshotNote, data: &snapshot_size, data_length: sizeof(snapshot_size)); |
4479 | if (!ret) { |
4480 | proc_list_lock(); |
4481 | memorystatus_jetsam_snapshot_last_timestamp = timestamp_now; |
4482 | proc_list_unlock(); |
4483 | } |
4484 | } else { |
4485 | proc_list_unlock(); |
4486 | } |
4487 | } |
4488 | |
4489 | return res; |
4490 | } |
4491 | |
4492 | /* |
4493 | * Jetsam a specific process. |
4494 | */ |
4495 | static boolean_t |
4496 | memorystatus_kill_specific_process(pid_t victim_pid, uint32_t cause, os_reason_t jetsam_reason) |
4497 | { |
4498 | boolean_t killed; |
4499 | proc_t p; |
4500 | uint64_t killtime = 0; |
4501 | uint64_t ; |
4502 | clock_sec_t tv_sec; |
4503 | clock_usec_t tv_usec; |
4504 | uint32_t tv_msec; |
4505 | |
4506 | /* TODO - add a victim queue and push this into the main jetsam thread */ |
4507 | |
4508 | p = proc_find(pid: victim_pid); |
4509 | if (!p) { |
4510 | os_reason_free(cur_reason: jetsam_reason); |
4511 | return FALSE; |
4512 | } |
4513 | |
4514 | proc_list_lock(); |
4515 | |
4516 | if (p->p_memstat_state & P_MEMSTAT_TERMINATED) { |
4517 | /* |
4518 | * Someone beat us to this kill. |
4519 | * Nothing to do here. |
4520 | */ |
4521 | proc_list_unlock(); |
4522 | os_reason_free(cur_reason: jetsam_reason); |
4523 | proc_rele(p); |
4524 | return FALSE; |
4525 | } |
4526 | p->p_memstat_state |= P_MEMSTAT_TERMINATED; |
4527 | |
4528 | if (memorystatus_jetsam_snapshot_count == 0) { |
4529 | memorystatus_init_jetsam_snapshot_locked(NULL, ods_list_count: 0); |
4530 | } |
4531 | |
4532 | killtime = mach_absolute_time(); |
4533 | absolutetime_to_microtime(abstime: killtime, secs: &tv_sec, microsecs: &tv_usec); |
4534 | tv_msec = tv_usec / 1000; |
4535 | |
4536 | memorystatus_update_jetsam_snapshot_entry_locked(p, kill_cause: cause, killtime); |
4537 | |
4538 | proc_list_unlock(); |
4539 | |
4540 | killed = memorystatus_do_kill(p, cause, jetsam_reason, footprint_of_killed_proc: &footprint_of_killed_proc); |
4541 | |
4542 | memorystatus_log("%lu.%03d memorystatus: killing_specific_process pid %d [%s] (%s %d) %lluKB - memorystatus_available_pages: %llu\n" , |
4543 | (unsigned long)tv_sec, tv_msec, victim_pid, ((p && *p->p_name) ? p->p_name : "unknown" ), |
4544 | memorystatus_kill_cause_name[cause], (p ? p->p_memstat_effectivepriority: -1), |
4545 | footprint_of_killed_proc >> 10, (uint64_t)MEMORYSTATUS_LOG_AVAILABLE_PAGES); |
4546 | |
4547 | if (!killed) { |
4548 | proc_list_lock(); |
4549 | p->p_memstat_state &= ~P_MEMSTAT_TERMINATED; |
4550 | proc_list_unlock(); |
4551 | } |
4552 | |
4553 | proc_rele(p); |
4554 | |
4555 | return killed; |
4556 | } |
4557 | |
4558 | |
4559 | /* |
4560 | * Toggle the P_MEMSTAT_SKIP bit. |
4561 | * Takes the proc_list_lock. |
4562 | */ |
4563 | void |
4564 | proc_memstat_skip(proc_t p, boolean_t set) |
4565 | { |
4566 | #if DEVELOPMENT || DEBUG |
4567 | if (p) { |
4568 | proc_list_lock(); |
4569 | if (set == TRUE) { |
4570 | p->p_memstat_state |= P_MEMSTAT_SKIP; |
4571 | } else { |
4572 | p->p_memstat_state &= ~P_MEMSTAT_SKIP; |
4573 | } |
4574 | proc_list_unlock(); |
4575 | } |
4576 | #else |
4577 | #pragma unused(p, set) |
4578 | /* |
4579 | * do nothing |
4580 | */ |
4581 | #endif /* DEVELOPMENT || DEBUG */ |
4582 | return; |
4583 | } |
4584 | |
4585 | |
4586 | #if CONFIG_JETSAM |
4587 | /* |
4588 | * This is invoked when cpulimits have been exceeded while in fatal mode. |
4589 | * The jetsam_flags do not apply as those are for memory related kills. |
4590 | * We call this routine so that the offending process is killed with |
4591 | * a non-zero exit status. |
4592 | */ |
4593 | void |
4594 | jetsam_on_ledger_cpulimit_exceeded(void) |
4595 | { |
4596 | int retval = 0; |
4597 | int jetsam_flags = 0; /* make it obvious */ |
4598 | proc_t p = current_proc(); |
4599 | os_reason_t jetsam_reason = OS_REASON_NULL; |
4600 | |
4601 | memorystatus_log("task_exceeded_cpulimit: killing pid %d [%s]\n" , proc_getpid(p), (*p->p_name ? p->p_name : "(unknown)" )); |
4602 | |
4603 | jetsam_reason = os_reason_create(OS_REASON_JETSAM, JETSAM_REASON_CPULIMIT); |
4604 | if (jetsam_reason == OS_REASON_NULL) { |
4605 | memorystatus_log_error("task_exceeded_cpulimit: unable to allocate memory for jetsam reason\n" ); |
4606 | } |
4607 | |
4608 | retval = jetsam_do_kill(p, jetsam_flags, jetsam_reason); |
4609 | |
4610 | if (retval) { |
4611 | memorystatus_log_error("task_exceeded_cpulimit: failed to kill current task (exiting?).\n" ); |
4612 | } |
4613 | } |
4614 | |
4615 | #endif /* CONFIG_JETSAM */ |
4616 | |
4617 | static void |
4618 | memorystatus_get_task_memory_region_count(task_t task, uint64_t *count) |
4619 | { |
4620 | assert(task); |
4621 | assert(count); |
4622 | |
4623 | *count = get_task_memory_region_count(task); |
4624 | } |
4625 | |
4626 | |
4627 | #define MEMORYSTATUS_VM_MAP_FORK_ALLOWED 0x100000000 |
4628 | #define MEMORYSTATUS_VM_MAP_FORK_NOT_ALLOWED 0x200000000 |
4629 | |
4630 | #if DEVELOPMENT || DEBUG |
4631 | |
4632 | /* |
4633 | * Sysctl only used to test memorystatus_allowed_vm_map_fork() path. |
4634 | * set a new pidwatch value |
4635 | * or |
4636 | * get the current pidwatch value |
4637 | * |
4638 | * The pidwatch_val starts out with a PID to watch for in the map_fork path. |
4639 | * Its value is: |
4640 | * - OR'd with MEMORYSTATUS_VM_MAP_FORK_ALLOWED if we allow the map_fork. |
4641 | * - OR'd with MEMORYSTATUS_VM_MAP_FORK_NOT_ALLOWED if we disallow the map_fork. |
4642 | * - set to -1ull if the map_fork() is aborted for other reasons. |
4643 | */ |
4644 | |
4645 | uint64_t memorystatus_vm_map_fork_pidwatch_val = 0; |
4646 | |
4647 | static int sysctl_memorystatus_vm_map_fork_pidwatch SYSCTL_HANDLER_ARGS { |
4648 | #pragma unused(oidp, arg1, arg2) |
4649 | |
4650 | uint64_t new_value = 0; |
4651 | uint64_t old_value = 0; |
4652 | int error = 0; |
4653 | |
4654 | /* |
4655 | * The pid is held in the low 32 bits. |
4656 | * The 'allowed' flags are in the upper 32 bits. |
4657 | */ |
4658 | old_value = memorystatus_vm_map_fork_pidwatch_val; |
4659 | |
4660 | error = sysctl_io_number(req, old_value, sizeof(old_value), &new_value, NULL); |
4661 | |
4662 | if (error || !req->newptr) { |
4663 | /* |
4664 | * No new value passed in. |
4665 | */ |
4666 | return error; |
4667 | } |
4668 | |
4669 | /* |
4670 | * A new pid was passed in via req->newptr. |
4671 | * Ignore any attempt to set the higher order bits. |
4672 | */ |
4673 | memorystatus_vm_map_fork_pidwatch_val = new_value & 0xFFFFFFFF; |
4674 | memorystatus_log_debug("memorystatus: pidwatch old_value = 0x%llx, new_value = 0x%llx\n" , old_value, new_value); |
4675 | |
4676 | return error; |
4677 | } |
4678 | |
4679 | SYSCTL_PROC(_kern, OID_AUTO, memorystatus_vm_map_fork_pidwatch, CTLTYPE_QUAD | CTLFLAG_RW | CTLFLAG_LOCKED | CTLFLAG_MASKED, |
4680 | 0, 0, sysctl_memorystatus_vm_map_fork_pidwatch, "Q" , "get/set pid watched for in vm_map_fork" ); |
4681 | |
4682 | |
4683 | /* |
4684 | * Record if a watched process fails to qualify for a vm_map_fork(). |
4685 | */ |
4686 | void |
4687 | memorystatus_abort_vm_map_fork(task_t task) |
4688 | { |
4689 | if (memorystatus_vm_map_fork_pidwatch_val != 0) { |
4690 | proc_t p = get_bsdtask_info(task); |
4691 | if (p != NULL && memorystatus_vm_map_fork_pidwatch_val == (uint64_t)proc_getpid(p)) { |
4692 | memorystatus_vm_map_fork_pidwatch_val = -1ull; |
4693 | } |
4694 | } |
4695 | } |
4696 | |
4697 | static void |
4698 | set_vm_map_fork_pidwatch(task_t task, uint64_t x) |
4699 | { |
4700 | if (memorystatus_vm_map_fork_pidwatch_val != 0) { |
4701 | proc_t p = get_bsdtask_info(task); |
4702 | if (p && (memorystatus_vm_map_fork_pidwatch_val == (uint64_t)proc_getpid(p))) { |
4703 | memorystatus_vm_map_fork_pidwatch_val |= x; |
4704 | } |
4705 | } |
4706 | } |
4707 | |
4708 | #else /* DEVELOPMENT || DEBUG */ |
4709 | |
4710 | |
4711 | static void |
4712 | set_vm_map_fork_pidwatch(task_t task, uint64_t x) |
4713 | { |
4714 | #pragma unused(task) |
4715 | #pragma unused(x) |
4716 | } |
4717 | |
4718 | #endif /* DEVELOPMENT || DEBUG */ |
4719 | |
4720 | /* |
4721 | * Called during EXC_RESOURCE handling when a process exceeds a soft |
4722 | * memory limit. This is the corpse fork path and here we decide if |
4723 | * vm_map_fork will be allowed when creating the corpse. |
4724 | * The task being considered is suspended. |
4725 | * |
4726 | * By default, a vm_map_fork is allowed to proceed. |
4727 | * |
4728 | * A few simple policy assumptions: |
4729 | * If the device has a zero system-wide task limit, |
4730 | * then the vm_map_fork is allowed. macOS always has a zero |
4731 | * system wide task limit (unless overriden by a boot-arg). |
4732 | * |
4733 | * And if a process's memory footprint calculates less |
4734 | * than or equal to quarter of the system-wide task limit, |
4735 | * then the vm_map_fork is allowed. This calculation |
4736 | * is based on the assumption that a process can |
4737 | * munch memory up to the system-wide task limit. |
4738 | * |
4739 | * For watchOS, which has a low task limit, we use a |
4740 | * different value. Current task limit has been reduced |
4741 | * to 300MB and it's been decided the limit should be 200MB. |
4742 | */ |
4743 | int large_corpse_count = 0; |
4744 | boolean_t |
4745 | memorystatus_allowed_vm_map_fork(task_t task, bool *is_large) |
4746 | { |
4747 | boolean_t is_allowed = TRUE; /* default */ |
4748 | uint64_t ; |
4749 | uint64_t max_allowed_bytes; |
4750 | thread_t self = current_thread(); |
4751 | |
4752 | *is_large = false; |
4753 | |
4754 | /* Jetsam in high bands blocks any new corpse */ |
4755 | if (os_atomic_load(&block_corpses, relaxed) != 0) { |
4756 | memorystatus_log("memorystatus_allowed_vm_map_fork: corpse for pid %d blocked by jetsam).\n" , task_pid(task)); |
4757 | ktriage_record(thread_id: thread_tid(thread: self), KDBG_TRIAGE_EVENTID(KDBG_TRIAGE_SUBSYS_CORPSE, KDBG_TRIAGE_RESERVED, KDBG_TRIAGE_CORPSE_BLOCKED_JETSAM), arg: 0 /* arg */); |
4758 | return FALSE; |
4759 | } |
4760 | |
4761 | if (max_task_footprint_mb == 0) { |
4762 | set_vm_map_fork_pidwatch(task, MEMORYSTATUS_VM_MAP_FORK_ALLOWED); |
4763 | return is_allowed; |
4764 | } |
4765 | |
4766 | footprint_in_bytes = get_task_phys_footprint(task); |
4767 | |
4768 | /* |
4769 | * Maximum is 1/4 of the system-wide task limit by default. |
4770 | */ |
4771 | max_allowed_bytes = ((uint64_t)max_task_footprint_mb * 1024 * 1024) >> 2; |
4772 | |
4773 | #if XNU_TARGET_OS_WATCH |
4774 | /* |
4775 | * For watches with > 1G, use a limit of 200MB and allow |
4776 | * one corpse at a time of up to 300MB. |
4777 | */ |
4778 | #define LARGE_CORPSE_LIMIT 1 |
4779 | if (sane_size > 1 * 1024 * 1024 * 1024) { |
4780 | int cnt = large_corpse_count; |
4781 | if (footprint_in_bytes > 200 * 1024 * 1024 && |
4782 | footprint_in_bytes <= 300 * 1024 * 1024 && |
4783 | cnt < LARGE_CORPSE_LIMIT && |
4784 | OSCompareAndSwap(cnt, cnt + 1, &large_corpse_count)) { |
4785 | *is_large = true; |
4786 | max_allowed_bytes = MAX(max_allowed_bytes, 300 * 1024 * 1024); |
4787 | } else { |
4788 | max_allowed_bytes = MAX(max_allowed_bytes, 200 * 1024 * 1024); |
4789 | } |
4790 | } |
4791 | #endif /* XNU_TARGET_OS_WATCH */ |
4792 | |
4793 | #if DEBUG || DEVELOPMENT |
4794 | if (corpse_threshold_system_limit) { |
4795 | max_allowed_bytes = (uint64_t)max_task_footprint_mb * (1UL << 20); |
4796 | } |
4797 | #endif /* DEBUG || DEVELOPMENT */ |
4798 | |
4799 | if (footprint_in_bytes > max_allowed_bytes) { |
4800 | memorystatus_log("memorystatus disallowed vm_map_fork %lld %lld\n" , footprint_in_bytes, max_allowed_bytes); |
4801 | set_vm_map_fork_pidwatch(task, MEMORYSTATUS_VM_MAP_FORK_NOT_ALLOWED); |
4802 | ktriage_record(thread_id: thread_tid(thread: self), KDBG_TRIAGE_EVENTID(KDBG_TRIAGE_SUBSYS_CORPSE, KDBG_TRIAGE_RESERVED, KDBG_TRIAGE_CORPSE_PROC_TOO_BIG), arg: 0 /* arg */); |
4803 | return !is_allowed; |
4804 | } |
4805 | |
4806 | set_vm_map_fork_pidwatch(task, MEMORYSTATUS_VM_MAP_FORK_ALLOWED); |
4807 | return is_allowed; |
4808 | } |
4809 | |
4810 | void |
4811 | memorystatus_get_task_page_counts(task_t task, uint32_t *, uint32_t *, uint32_t *purgeable_pages) |
4812 | { |
4813 | assert(task); |
4814 | assert(footprint); |
4815 | |
4816 | uint64_t pages; |
4817 | |
4818 | pages = (get_task_phys_footprint(task) / PAGE_SIZE_64); |
4819 | assert(((uint32_t)pages) == pages); |
4820 | *footprint = (uint32_t)pages; |
4821 | |
4822 | if (max_footprint_lifetime) { |
4823 | pages = (get_task_phys_footprint_lifetime_max(task) / PAGE_SIZE_64); |
4824 | assert(((uint32_t)pages) == pages); |
4825 | *max_footprint_lifetime = (uint32_t)pages; |
4826 | } |
4827 | if (purgeable_pages) { |
4828 | pages = (get_task_purgeable_size(task) / PAGE_SIZE_64); |
4829 | assert(((uint32_t)pages) == pages); |
4830 | *purgeable_pages = (uint32_t)pages; |
4831 | } |
4832 | } |
4833 | |
4834 | static void |
4835 | (task_t task, |
4836 | uint64_t *internal_pages, uint64_t *internal_compressed_pages, |
4837 | uint64_t *purgeable_nonvolatile_pages, uint64_t *purgeable_nonvolatile_compressed_pages, |
4838 | uint64_t *alternate_accounting_pages, uint64_t *alternate_accounting_compressed_pages, |
4839 | uint64_t *iokit_mapped_pages, uint64_t *page_table_pages, uint64_t *frozen_to_swap_pages) |
4840 | { |
4841 | assert(task); |
4842 | |
4843 | if (internal_pages) { |
4844 | *internal_pages = (get_task_internal(task) / PAGE_SIZE_64); |
4845 | } |
4846 | |
4847 | if (internal_compressed_pages) { |
4848 | *internal_compressed_pages = (get_task_internal_compressed(task) / PAGE_SIZE_64); |
4849 | } |
4850 | |
4851 | if (purgeable_nonvolatile_pages) { |
4852 | *purgeable_nonvolatile_pages = (get_task_purgeable_nonvolatile(task) / PAGE_SIZE_64); |
4853 | } |
4854 | |
4855 | if (purgeable_nonvolatile_compressed_pages) { |
4856 | *purgeable_nonvolatile_compressed_pages = (get_task_purgeable_nonvolatile_compressed(task) / PAGE_SIZE_64); |
4857 | } |
4858 | |
4859 | if (alternate_accounting_pages) { |
4860 | *alternate_accounting_pages = (get_task_alternate_accounting(task) / PAGE_SIZE_64); |
4861 | } |
4862 | |
4863 | if (alternate_accounting_compressed_pages) { |
4864 | *alternate_accounting_compressed_pages = (get_task_alternate_accounting_compressed(task) / PAGE_SIZE_64); |
4865 | } |
4866 | |
4867 | if (iokit_mapped_pages) { |
4868 | *iokit_mapped_pages = (get_task_iokit_mapped(task) / PAGE_SIZE_64); |
4869 | } |
4870 | |
4871 | if (page_table_pages) { |
4872 | *page_table_pages = (get_task_page_table(task) / PAGE_SIZE_64); |
4873 | } |
4874 | |
4875 | #if CONFIG_FREEZE |
4876 | if (frozen_to_swap_pages) { |
4877 | *frozen_to_swap_pages = (get_task_frozen_to_swap(task) / PAGE_SIZE_64); |
4878 | } |
4879 | #else /* CONFIG_FREEZE */ |
4880 | #pragma unused(frozen_to_swap_pages) |
4881 | #endif /* CONFIG_FREEZE */ |
4882 | } |
4883 | |
4884 | #if CONFIG_FREEZE |
4885 | /* |
4886 | * Copies the source entry into the destination snapshot. |
4887 | * Returns true on success. Fails if the destination snapshot is full. |
4888 | * Caller must hold the proc list lock. |
4889 | */ |
4890 | static bool |
4891 | memorystatus_jetsam_snapshot_copy_entry_locked(memorystatus_jetsam_snapshot_t *dst_snapshot, unsigned int dst_snapshot_size, const memorystatus_jetsam_snapshot_entry_t *src_entry) |
4892 | { |
4893 | LCK_MTX_ASSERT(&proc_list_mlock, LCK_MTX_ASSERT_OWNED); |
4894 | assert(dst_snapshot); |
4895 | |
4896 | if (dst_snapshot->entry_count == dst_snapshot_size) { |
4897 | /* Destination snapshot is full. Can not be updated until it is consumed. */ |
4898 | return false; |
4899 | } |
4900 | if (dst_snapshot->entry_count == 0) { |
4901 | memorystatus_init_jetsam_snapshot_header(dst_snapshot); |
4902 | } |
4903 | memorystatus_jetsam_snapshot_entry_t *dst_entry = &dst_snapshot->entries[dst_snapshot->entry_count++]; |
4904 | memcpy(dst_entry, src_entry, sizeof(memorystatus_jetsam_snapshot_entry_t)); |
4905 | return true; |
4906 | } |
4907 | #endif /* CONFIG_FREEZE */ |
4908 | |
4909 | static bool |
4910 | memorystatus_init_jetsam_snapshot_entry_with_kill_locked(memorystatus_jetsam_snapshot_t *snapshot, proc_t p, uint32_t kill_cause, uint64_t killtime, memorystatus_jetsam_snapshot_entry_t **entry) |
4911 | { |
4912 | LCK_MTX_ASSERT(&proc_list_mlock, LCK_MTX_ASSERT_OWNED); |
4913 | memorystatus_jetsam_snapshot_entry_t *snapshot_list = snapshot->entries; |
4914 | size_t i = snapshot->entry_count; |
4915 | |
4916 | if (memorystatus_init_jetsam_snapshot_entry_locked(p, entry: &snapshot_list[i], gencount: (snapshot->js_gencount)) == TRUE) { |
4917 | *entry = &snapshot_list[i]; |
4918 | (*entry)->killed = kill_cause; |
4919 | (*entry)->jse_killtime = killtime; |
4920 | |
4921 | snapshot->entry_count = i + 1; |
4922 | return true; |
4923 | } |
4924 | return false; |
4925 | } |
4926 | |
4927 | /* |
4928 | * This routine only acts on the global jetsam event snapshot. |
4929 | * Updating the process's entry can race when the memorystatus_thread |
4930 | * has chosen to kill a process that is racing to exit on another core. |
4931 | */ |
4932 | static void |
4933 | memorystatus_update_jetsam_snapshot_entry_locked(proc_t p, uint32_t kill_cause, uint64_t killtime) |
4934 | { |
4935 | memorystatus_jetsam_snapshot_entry_t *entry = NULL; |
4936 | memorystatus_jetsam_snapshot_t *snapshot = NULL; |
4937 | memorystatus_jetsam_snapshot_entry_t *snapshot_list = NULL; |
4938 | |
4939 | unsigned int i; |
4940 | #if CONFIG_FREEZE |
4941 | bool copied_to_freezer_snapshot = false; |
4942 | #endif /* CONFIG_FREEZE */ |
4943 | |
4944 | LCK_MTX_ASSERT(&proc_list_mlock, LCK_MTX_ASSERT_OWNED); |
4945 | |
4946 | if (memorystatus_jetsam_snapshot_count == 0) { |
4947 | /* |
4948 | * No active snapshot. |
4949 | * Nothing to do. |
4950 | */ |
4951 | goto exit; |
4952 | } |
4953 | |
4954 | /* |
4955 | * Sanity check as this routine should only be called |
4956 | * from a jetsam kill path. |
4957 | */ |
4958 | assert(kill_cause != 0 && killtime != 0); |
4959 | |
4960 | snapshot = memorystatus_jetsam_snapshot; |
4961 | snapshot_list = memorystatus_jetsam_snapshot->entries; |
4962 | |
4963 | for (i = 0; i < memorystatus_jetsam_snapshot_count; i++) { |
4964 | if (snapshot_list[i].pid == proc_getpid(p)) { |
4965 | entry = &snapshot_list[i]; |
4966 | |
4967 | if (entry->killed || entry->jse_killtime) { |
4968 | /* |
4969 | * We apparently raced on the exit path |
4970 | * for this process, as it's snapshot entry |
4971 | * has already recorded a kill. |
4972 | */ |
4973 | assert(entry->killed && entry->jse_killtime); |
4974 | break; |
4975 | } |
4976 | |
4977 | /* |
4978 | * Update the entry we just found in the snapshot. |
4979 | */ |
4980 | |
4981 | entry->killed = kill_cause; |
4982 | entry->jse_killtime = killtime; |
4983 | entry->jse_gencount = snapshot->js_gencount; |
4984 | entry->jse_idle_delta = p->p_memstat_idle_delta; |
4985 | #if CONFIG_FREEZE |
4986 | entry->jse_thaw_count = p->p_memstat_thaw_count; |
4987 | entry->jse_freeze_skip_reason = p->p_memstat_freeze_skip_reason; |
4988 | #else /* CONFIG_FREEZE */ |
4989 | entry->jse_thaw_count = 0; |
4990 | entry->jse_freeze_skip_reason = kMemorystatusFreezeSkipReasonNone; |
4991 | #endif /* CONFIG_FREEZE */ |
4992 | |
4993 | /* |
4994 | * If a process has moved between bands since snapshot was |
4995 | * initialized, then likely these fields changed too. |
4996 | */ |
4997 | if (entry->priority != p->p_memstat_effectivepriority) { |
4998 | strlcpy(dst: entry->name, src: p->p_name, n: sizeof(entry->name)); |
4999 | entry->priority = p->p_memstat_effectivepriority; |
5000 | entry->state = memorystatus_build_state(p); |
5001 | entry->user_data = p->p_memstat_userdata; |
5002 | entry->fds = p->p_fd.fd_nfiles; |
5003 | } |
5004 | |
5005 | /* |
5006 | * Always update the page counts on a kill. |
5007 | */ |
5008 | |
5009 | uint32_t pages = 0; |
5010 | uint32_t max_pages_lifetime = 0; |
5011 | uint32_t purgeable_pages = 0; |
5012 | |
5013 | memorystatus_get_task_page_counts(task: proc_task(p), footprint: &pages, max_footprint_lifetime: &max_pages_lifetime, purgeable_pages: &purgeable_pages); |
5014 | entry->pages = (uint64_t)pages; |
5015 | entry->max_pages_lifetime = (uint64_t)max_pages_lifetime; |
5016 | entry->purgeable_pages = (uint64_t)purgeable_pages; |
5017 | |
5018 | uint64_t internal_pages = 0; |
5019 | uint64_t internal_compressed_pages = 0; |
5020 | uint64_t purgeable_nonvolatile_pages = 0; |
5021 | uint64_t purgeable_nonvolatile_compressed_pages = 0; |
5022 | uint64_t alternate_accounting_pages = 0; |
5023 | uint64_t alternate_accounting_compressed_pages = 0; |
5024 | uint64_t iokit_mapped_pages = 0; |
5025 | uint64_t page_table_pages = 0; |
5026 | uint64_t frozen_to_swap_pages = 0; |
5027 | |
5028 | memorystatus_get_task_phys_footprint_page_counts(task: proc_task(p), internal_pages: &internal_pages, internal_compressed_pages: &internal_compressed_pages, |
5029 | purgeable_nonvolatile_pages: &purgeable_nonvolatile_pages, purgeable_nonvolatile_compressed_pages: &purgeable_nonvolatile_compressed_pages, |
5030 | alternate_accounting_pages: &alternate_accounting_pages, alternate_accounting_compressed_pages: &alternate_accounting_compressed_pages, |
5031 | iokit_mapped_pages: &iokit_mapped_pages, page_table_pages: &page_table_pages, frozen_to_swap_pages: &frozen_to_swap_pages); |
5032 | |
5033 | entry->jse_internal_pages = internal_pages; |
5034 | entry->jse_internal_compressed_pages = internal_compressed_pages; |
5035 | entry->jse_purgeable_nonvolatile_pages = purgeable_nonvolatile_pages; |
5036 | entry->jse_purgeable_nonvolatile_compressed_pages = purgeable_nonvolatile_compressed_pages; |
5037 | entry->jse_alternate_accounting_pages = alternate_accounting_pages; |
5038 | entry->jse_alternate_accounting_compressed_pages = alternate_accounting_compressed_pages; |
5039 | entry->jse_iokit_mapped_pages = iokit_mapped_pages; |
5040 | entry->jse_page_table_pages = page_table_pages; |
5041 | entry->jse_frozen_to_swap_pages = frozen_to_swap_pages; |
5042 | |
5043 | uint64_t region_count = 0; |
5044 | memorystatus_get_task_memory_region_count(task: proc_task(p), count: ®ion_count); |
5045 | entry->jse_memory_region_count = region_count; |
5046 | entry->csflags = proc_getcsflags(p); |
5047 | goto exit; |
5048 | } |
5049 | } |
5050 | |
5051 | if (entry == NULL) { |
5052 | /* |
5053 | * The entry was not found in the snapshot, so the process must have |
5054 | * launched after the snapshot was initialized. |
5055 | * Let's try to append the new entry. |
5056 | */ |
5057 | if (memorystatus_jetsam_snapshot_count < memorystatus_jetsam_snapshot_max) { |
5058 | /* |
5059 | * A populated snapshot buffer exists |
5060 | * and there is room to init a new entry. |
5061 | */ |
5062 | assert(memorystatus_jetsam_snapshot_count == snapshot->entry_count); |
5063 | |
5064 | if (memorystatus_init_jetsam_snapshot_entry_with_kill_locked(snapshot, p, kill_cause, killtime, entry: &entry)) { |
5065 | memorystatus_jetsam_snapshot_count++; |
5066 | |
5067 | if (memorystatus_jetsam_snapshot_count >= memorystatus_jetsam_snapshot_max) { |
5068 | /* |
5069 | * We just used the last slot in the snapshot buffer. |
5070 | * We only want to log it once... so we do it here |
5071 | * when we notice we've hit the max. |
5072 | */ |
5073 | memorystatus_log_error("memorystatus: WARNING snapshot buffer is full, count %d\n" , memorystatus_jetsam_snapshot_count); |
5074 | } |
5075 | } |
5076 | } |
5077 | } |
5078 | |
5079 | exit: |
5080 | if (entry) { |
5081 | #if CONFIG_FREEZE |
5082 | if (memorystatus_jetsam_use_freezer_snapshot && isApp(p)) { |
5083 | /* This is an app kill. Record it in the freezer snapshot so dasd can incorporate this in its recommendations. */ |
5084 | copied_to_freezer_snapshot = memorystatus_jetsam_snapshot_copy_entry_locked(memorystatus_jetsam_snapshot_freezer, memorystatus_jetsam_snapshot_freezer_max, entry); |
5085 | if (copied_to_freezer_snapshot && memorystatus_jetsam_snapshot_freezer->entry_count == memorystatus_jetsam_snapshot_freezer_max) { |
5086 | /* |
5087 | * We just used the last slot in the freezer snapshot buffer. |
5088 | * We only want to log it once... so we do it here |
5089 | * when we notice we've hit the max. |
5090 | */ |
5091 | memorystatus_log_error("memorystatus: WARNING freezer snapshot buffer is full, count %zu\n" , |
5092 | memorystatus_jetsam_snapshot_freezer->entry_count); |
5093 | } |
5094 | } |
5095 | #endif /* CONFIG_FREEZE */ |
5096 | } else { |
5097 | /* |
5098 | * If we reach here, the snapshot buffer could not be updated. |
5099 | * Most likely, the buffer is full, in which case we would have |
5100 | * logged a warning in the previous call. |
5101 | * |
5102 | * For now, we will stop appending snapshot entries. |
5103 | * When the buffer is consumed, the snapshot state will reset. |
5104 | */ |
5105 | |
5106 | memorystatus_log_error( |
5107 | "memorystatus_update_jetsam_snapshot_entry_locked: failed to update pid %d, priority %d, count %d\n" , |
5108 | proc_getpid(p), p->p_memstat_effectivepriority, memorystatus_jetsam_snapshot_count); |
5109 | |
5110 | #if CONFIG_FREEZE |
5111 | /* We still attempt to record this in the freezer snapshot */ |
5112 | if (memorystatus_jetsam_use_freezer_snapshot && isApp(p)) { |
5113 | snapshot = memorystatus_jetsam_snapshot_freezer; |
5114 | if (snapshot->entry_count < memorystatus_jetsam_snapshot_freezer_max) { |
5115 | copied_to_freezer_snapshot = memorystatus_init_jetsam_snapshot_entry_with_kill_locked(snapshot, p, kill_cause, killtime, &entry); |
5116 | if (copied_to_freezer_snapshot && memorystatus_jetsam_snapshot_freezer->entry_count == memorystatus_jetsam_snapshot_freezer_max) { |
5117 | /* |
5118 | * We just used the last slot in the freezer snapshot buffer. |
5119 | * We only want to log it once... so we do it here |
5120 | * when we notice we've hit the max. |
5121 | */ |
5122 | memorystatus_log_error("memorystatus: WARNING freezer snapshot buffer is full, count %zu\n" , |
5123 | memorystatus_jetsam_snapshot_freezer->entry_count); |
5124 | } |
5125 | } |
5126 | } |
5127 | #endif /* CONFIG_FREEZE */ |
5128 | } |
5129 | |
5130 | return; |
5131 | } |
5132 | |
5133 | #if CONFIG_JETSAM |
5134 | |
5135 | void |
5136 | memorystatus_pages_update(unsigned int pages_avail) |
5137 | { |
5138 | memorystatus_available_pages = pages_avail; |
5139 | |
5140 | #if VM_PRESSURE_EVENTS |
5141 | /* |
5142 | * Since memorystatus_available_pages changes, we should |
5143 | * re-evaluate the pressure levels on the system and |
5144 | * check if we need to wake the pressure thread. |
5145 | * We also update memorystatus_level in that routine. |
5146 | */ |
5147 | vm_pressure_response(); |
5148 | |
5149 | if (memorystatus_available_pages <= memorystatus_available_pages_pressure) { |
5150 | if (memorystatus_hwm_candidates || (memorystatus_available_pages <= memorystatus_available_pages_critical)) { |
5151 | memorystatus_thread_wake(); |
5152 | } |
5153 | } |
5154 | #if CONFIG_FREEZE |
5155 | /* |
5156 | * We can't grab the freezer_mutex here even though that synchronization would be correct to inspect |
5157 | * the # of frozen processes and wakeup the freezer thread. Reason being that we come here into this |
5158 | * code with (possibly) the page-queue locks held and preemption disabled. So trying to grab a mutex here |
5159 | * will result in the "mutex with preemption disabled" panic. |
5160 | */ |
5161 | |
5162 | if (memorystatus_freeze_thread_should_run()) { |
5163 | /* |
5164 | * The freezer thread is usually woken up by some user-space call i.e. pid_hibernate(any process). |
5165 | * That trigger isn't invoked often enough and so we are enabling this explicit wakeup here. |
5166 | */ |
5167 | if (VM_CONFIG_FREEZER_SWAP_IS_ACTIVE) { |
5168 | thread_wakeup((event_t)&memorystatus_freeze_wakeup); |
5169 | } |
5170 | } |
5171 | #endif /* CONFIG_FREEZE */ |
5172 | |
5173 | #else /* VM_PRESSURE_EVENTS */ |
5174 | |
5175 | boolean_t critical, delta; |
5176 | |
5177 | if (!memorystatus_delta) { |
5178 | return; |
5179 | } |
5180 | |
5181 | critical = (pages_avail < memorystatus_available_pages_critical) ? TRUE : FALSE; |
5182 | delta = ((pages_avail >= (memorystatus_available_pages + memorystatus_delta)) |
5183 | || (memorystatus_available_pages >= (pages_avail + memorystatus_delta))) ? TRUE : FALSE; |
5184 | |
5185 | if (critical || delta) { |
5186 | unsigned int total_pages; |
5187 | |
5188 | total_pages = (unsigned int) atop_64(max_mem); |
5189 | #if CONFIG_SECLUDED_MEMORY |
5190 | total_pages -= vm_page_secluded_count; |
5191 | #endif /* CONFIG_SECLUDED_MEMORY */ |
5192 | memorystatus_level = memorystatus_available_pages * 100 / total_pages; |
5193 | memorystatus_thread_wake(); |
5194 | } |
5195 | #endif /* VM_PRESSURE_EVENTS */ |
5196 | } |
5197 | #endif /* CONFIG_JETSAM */ |
5198 | |
5199 | static boolean_t |
5200 | memorystatus_init_jetsam_snapshot_entry_locked(proc_t p, memorystatus_jetsam_snapshot_entry_t *entry, uint64_t gencount) |
5201 | { |
5202 | clock_sec_t tv_sec; |
5203 | clock_usec_t tv_usec; |
5204 | uint32_t pages = 0; |
5205 | uint32_t max_pages_lifetime = 0; |
5206 | uint32_t purgeable_pages = 0; |
5207 | uint64_t internal_pages = 0; |
5208 | uint64_t internal_compressed_pages = 0; |
5209 | uint64_t purgeable_nonvolatile_pages = 0; |
5210 | uint64_t purgeable_nonvolatile_compressed_pages = 0; |
5211 | uint64_t alternate_accounting_pages = 0; |
5212 | uint64_t alternate_accounting_compressed_pages = 0; |
5213 | uint64_t iokit_mapped_pages = 0; |
5214 | uint64_t page_table_pages = 0; |
5215 | uint64_t frozen_to_swap_pages = 0; |
5216 | uint64_t region_count = 0; |
5217 | uint64_t cids[COALITION_NUM_TYPES]; |
5218 | uint32_t trust = 0; |
5219 | kern_return_t ret = 0; |
5220 | memset(s: entry, c: 0, n: sizeof(memorystatus_jetsam_snapshot_entry_t)); |
5221 | |
5222 | entry->pid = proc_getpid(p); |
5223 | strlcpy(dst: &entry->name[0], src: p->p_name, n: sizeof(entry->name)); |
5224 | entry->priority = p->p_memstat_effectivepriority; |
5225 | |
5226 | memorystatus_get_task_page_counts(task: proc_task(p), footprint: &pages, max_footprint_lifetime: &max_pages_lifetime, purgeable_pages: &purgeable_pages); |
5227 | entry->pages = (uint64_t)pages; |
5228 | entry->max_pages_lifetime = (uint64_t)max_pages_lifetime; |
5229 | entry->purgeable_pages = (uint64_t)purgeable_pages; |
5230 | |
5231 | memorystatus_get_task_phys_footprint_page_counts(task: proc_task(p), internal_pages: &internal_pages, internal_compressed_pages: &internal_compressed_pages, |
5232 | purgeable_nonvolatile_pages: &purgeable_nonvolatile_pages, purgeable_nonvolatile_compressed_pages: &purgeable_nonvolatile_compressed_pages, |
5233 | alternate_accounting_pages: &alternate_accounting_pages, alternate_accounting_compressed_pages: &alternate_accounting_compressed_pages, |
5234 | iokit_mapped_pages: &iokit_mapped_pages, page_table_pages: &page_table_pages, frozen_to_swap_pages: &frozen_to_swap_pages); |
5235 | |
5236 | entry->jse_internal_pages = internal_pages; |
5237 | entry->jse_internal_compressed_pages = internal_compressed_pages; |
5238 | entry->jse_purgeable_nonvolatile_pages = purgeable_nonvolatile_pages; |
5239 | entry->jse_purgeable_nonvolatile_compressed_pages = purgeable_nonvolatile_compressed_pages; |
5240 | entry->jse_alternate_accounting_pages = alternate_accounting_pages; |
5241 | entry->jse_alternate_accounting_compressed_pages = alternate_accounting_compressed_pages; |
5242 | entry->jse_iokit_mapped_pages = iokit_mapped_pages; |
5243 | entry->jse_page_table_pages = page_table_pages; |
5244 | entry->jse_frozen_to_swap_pages = frozen_to_swap_pages; |
5245 | |
5246 | memorystatus_get_task_memory_region_count(task: proc_task(p), count: ®ion_count); |
5247 | entry->jse_memory_region_count = region_count; |
5248 | |
5249 | entry->state = memorystatus_build_state(p); |
5250 | entry->user_data = p->p_memstat_userdata; |
5251 | proc_getexecutableuuid(p, &entry->uuid[0], sizeof(entry->uuid)); |
5252 | entry->fds = p->p_fd.fd_nfiles; |
5253 | |
5254 | absolutetime_to_microtime(abstime: get_task_cpu_time(proc_task(p)), secs: &tv_sec, microsecs: &tv_usec); |
5255 | entry->cpu_time.tv_sec = (int64_t)tv_sec; |
5256 | entry->cpu_time.tv_usec = (int64_t)tv_usec; |
5257 | |
5258 | assert(p->p_stats != NULL); |
5259 | entry->jse_starttime = p->p_stats->ps_start; /* abstime process started */ |
5260 | entry->jse_killtime = 0; /* abstime jetsam chose to kill process */ |
5261 | entry->killed = 0; /* the jetsam kill cause */ |
5262 | entry->jse_gencount = gencount; /* indicates a pass through jetsam thread, when process was targeted to be killed */ |
5263 | |
5264 | entry->jse_idle_delta = p->p_memstat_idle_delta; /* Most recent timespan spent in idle-band */ |
5265 | |
5266 | #if CONFIG_FREEZE |
5267 | entry->jse_freeze_skip_reason = p->p_memstat_freeze_skip_reason; |
5268 | entry->jse_thaw_count = p->p_memstat_thaw_count; |
5269 | #else /* CONFIG_FREEZE */ |
5270 | entry->jse_thaw_count = 0; |
5271 | entry->jse_freeze_skip_reason = kMemorystatusFreezeSkipReasonNone; |
5272 | #endif /* CONFIG_FREEZE */ |
5273 | |
5274 | proc_coalitionids(p, cids); |
5275 | entry->jse_coalition_jetsam_id = cids[COALITION_TYPE_JETSAM]; |
5276 | entry->csflags = proc_getcsflags(p); |
5277 | ret = get_trust_level_kdp(pmap: get_task_pmap(proc_task(p)), trust_level: &trust); |
5278 | if (ret != KERN_SUCCESS) { |
5279 | trust = KCDATA_INVALID_CS_TRUST_LEVEL; |
5280 | } |
5281 | entry->cs_trust_level = trust; |
5282 | return TRUE; |
5283 | } |
5284 | |
5285 | static void |
5286 | memorystatus_init_snapshot_vmstats(memorystatus_jetsam_snapshot_t *snapshot) |
5287 | { |
5288 | kern_return_t kr = KERN_SUCCESS; |
5289 | mach_msg_type_number_t count = HOST_VM_INFO64_COUNT; |
5290 | vm_statistics64_data_t vm_stat; |
5291 | |
5292 | if ((kr = host_statistics64(host_priv: host_self(), HOST_VM_INFO64, host_info64_out: (host_info64_t)&vm_stat, host_info64_outCnt: &count)) != KERN_SUCCESS) { |
5293 | memorystatus_log_error("memorystatus_init_jetsam_snapshot_stats: host_statistics64 failed with %d\n" , kr); |
5294 | memset(s: &snapshot->stats, c: 0, n: sizeof(snapshot->stats)); |
5295 | } else { |
5296 | snapshot->stats.free_pages = vm_stat.free_count; |
5297 | snapshot->stats.active_pages = vm_stat.active_count; |
5298 | snapshot->stats.inactive_pages = vm_stat.inactive_count; |
5299 | snapshot->stats.throttled_pages = vm_stat.throttled_count; |
5300 | snapshot->stats.purgeable_pages = vm_stat.purgeable_count; |
5301 | snapshot->stats.wired_pages = vm_stat.wire_count; |
5302 | |
5303 | snapshot->stats.speculative_pages = vm_stat.speculative_count; |
5304 | snapshot->stats.filebacked_pages = vm_stat.external_page_count; |
5305 | snapshot->stats.anonymous_pages = vm_stat.internal_page_count; |
5306 | snapshot->stats.compressions = vm_stat.compressions; |
5307 | snapshot->stats.decompressions = vm_stat.decompressions; |
5308 | snapshot->stats.compressor_pages = vm_stat.compressor_page_count; |
5309 | snapshot->stats.total_uncompressed_pages_in_compressor = vm_stat.total_uncompressed_pages_in_compressor; |
5310 | } |
5311 | |
5312 | get_zone_map_size(current_size: &snapshot->stats.zone_map_size, capacity: &snapshot->stats.zone_map_capacity); |
5313 | |
5314 | bzero(s: snapshot->stats.largest_zone_name, n: sizeof(snapshot->stats.largest_zone_name)); |
5315 | get_largest_zone_info(zone_name: snapshot->stats.largest_zone_name, zone_name_len: sizeof(snapshot->stats.largest_zone_name), |
5316 | zone_size: &snapshot->stats.largest_zone_size); |
5317 | } |
5318 | |
5319 | /* |
5320 | * Collect vm statistics at boot. |
5321 | * Called only once (see kern_exec.c) |
5322 | * Data can be consumed at any time. |
5323 | */ |
5324 | void |
5325 | memorystatus_init_at_boot_snapshot() |
5326 | { |
5327 | memorystatus_init_snapshot_vmstats(snapshot: &memorystatus_at_boot_snapshot); |
5328 | memorystatus_at_boot_snapshot.entry_count = 0; |
5329 | memorystatus_at_boot_snapshot.notification_time = 0; /* updated when consumed */ |
5330 | memorystatus_at_boot_snapshot.snapshot_time = mach_absolute_time(); |
5331 | } |
5332 | |
5333 | static void |
5334 | (memorystatus_jetsam_snapshot_t *snapshot) |
5335 | { |
5336 | memorystatus_init_snapshot_vmstats(snapshot); |
5337 | snapshot->snapshot_time = mach_absolute_time(); |
5338 | snapshot->notification_time = 0; |
5339 | snapshot->js_gencount = 0; |
5340 | } |
5341 | |
5342 | static void |
5343 | memorystatus_init_jetsam_snapshot_locked(memorystatus_jetsam_snapshot_t *od_snapshot, uint32_t ods_list_count ) |
5344 | { |
5345 | proc_t p, next_p; |
5346 | unsigned int b = 0, i = 0; |
5347 | |
5348 | memorystatus_jetsam_snapshot_t *snapshot = NULL; |
5349 | memorystatus_jetsam_snapshot_entry_t *snapshot_list = NULL; |
5350 | unsigned int snapshot_max = 0; |
5351 | |
5352 | LCK_MTX_ASSERT(&proc_list_mlock, LCK_MTX_ASSERT_OWNED); |
5353 | |
5354 | if (od_snapshot) { |
5355 | /* |
5356 | * This is an on_demand snapshot |
5357 | */ |
5358 | snapshot = od_snapshot; |
5359 | snapshot_list = od_snapshot->entries; |
5360 | snapshot_max = ods_list_count; |
5361 | } else { |
5362 | /* |
5363 | * This is a jetsam event snapshot |
5364 | */ |
5365 | snapshot = memorystatus_jetsam_snapshot; |
5366 | snapshot_list = memorystatus_jetsam_snapshot->entries; |
5367 | snapshot_max = memorystatus_jetsam_snapshot_max; |
5368 | } |
5369 | |
5370 | memorystatus_init_jetsam_snapshot_header(snapshot); |
5371 | |
5372 | next_p = memorystatus_get_first_proc_locked(bucket_index: &b, TRUE); |
5373 | while (next_p) { |
5374 | p = next_p; |
5375 | next_p = memorystatus_get_next_proc_locked(bucket_index: &b, p, TRUE); |
5376 | |
5377 | if (FALSE == memorystatus_init_jetsam_snapshot_entry_locked(p, entry: &snapshot_list[i], gencount: snapshot->js_gencount)) { |
5378 | continue; |
5379 | } |
5380 | |
5381 | if (++i == snapshot_max) { |
5382 | break; |
5383 | } |
5384 | } |
5385 | |
5386 | /* Log launchd and kernel_task as well to see more context, even though jetsam doesn't apply to them. */ |
5387 | if (i < snapshot_max) { |
5388 | memorystatus_init_jetsam_snapshot_entry_locked(p: initproc, entry: &snapshot_list[i], gencount: snapshot->js_gencount); |
5389 | i++; |
5390 | } |
5391 | |
5392 | if (i < snapshot_max) { |
5393 | memorystatus_init_jetsam_snapshot_entry_locked(p: kernproc, entry: &snapshot_list[i], gencount: snapshot->js_gencount); |
5394 | i++; |
5395 | } |
5396 | |
5397 | snapshot->entry_count = i; |
5398 | |
5399 | if (!od_snapshot) { |
5400 | /* update the system buffer count */ |
5401 | memorystatus_jetsam_snapshot_count = i; |
5402 | } |
5403 | } |
5404 | |
5405 | #if DEVELOPMENT || DEBUG |
5406 | |
5407 | /* |
5408 | * Verify that the given bucket has been sorted correctly. |
5409 | * |
5410 | * Walks through the bucket and verifies that all pids in the |
5411 | * expected_order buffer are in that bucket and in the same |
5412 | * relative order. |
5413 | * |
5414 | * The proc_list_lock must be held by the caller. |
5415 | */ |
5416 | static int |
5417 | memorystatus_verify_sort_order(unsigned int bucket_index, pid_t *expected_order, size_t num_pids) |
5418 | { |
5419 | LCK_MTX_ASSERT(&proc_list_mlock, LCK_MTX_ASSERT_OWNED); |
5420 | |
5421 | int error = 0; |
5422 | proc_t p = NULL; |
5423 | size_t i = 0; |
5424 | |
5425 | /* |
5426 | * NB: We allow other procs to be mixed in within the expected ones. |
5427 | * We just need the expected procs to be in the right order relative to each other. |
5428 | */ |
5429 | p = memorystatus_get_first_proc_locked(&bucket_index, FALSE); |
5430 | while (p) { |
5431 | if (proc_getpid(p) == expected_order[i]) { |
5432 | i++; |
5433 | } |
5434 | if (i == num_pids) { |
5435 | break; |
5436 | } |
5437 | p = memorystatus_get_next_proc_locked(&bucket_index, p, FALSE); |
5438 | } |
5439 | if (i != num_pids) { |
5440 | char buffer[128]; |
5441 | size_t len = sizeof(buffer); |
5442 | size_t buffer_idx = 0; |
5443 | memorystatus_log_error("memorystatus_verify_sort_order: Processes in bucket %d were not sorted properly\n" , bucket_index); |
5444 | for (i = 0; i < num_pids; i++) { |
5445 | int num_written = snprintf(buffer + buffer_idx, len - buffer_idx, "%d," , expected_order[i]); |
5446 | if (num_written <= 0) { |
5447 | break; |
5448 | } |
5449 | if (buffer_idx + (unsigned int) num_written >= len) { |
5450 | break; |
5451 | } |
5452 | buffer_idx += num_written; |
5453 | } |
5454 | memorystatus_log_error("memorystatus_verify_sort_order: Expected order [%s]\n" , buffer); |
5455 | memset(buffer, 0, len); |
5456 | buffer_idx = 0; |
5457 | p = memorystatus_get_first_proc_locked(&bucket_index, FALSE); |
5458 | i = 0; |
5459 | memorystatus_log_error("memorystatus_verify_sort_order: Actual order:\n" ); |
5460 | while (p) { |
5461 | int num_written; |
5462 | if (buffer_idx == 0) { |
5463 | num_written = snprintf(buffer + buffer_idx, len - buffer_idx, "%zu: %d," , i, proc_getpid(p)); |
5464 | } else { |
5465 | num_written = snprintf(buffer + buffer_idx, len - buffer_idx, "%d," , proc_getpid(p)); |
5466 | } |
5467 | if (num_written <= 0) { |
5468 | break; |
5469 | } |
5470 | buffer_idx += (unsigned int) num_written; |
5471 | assert(buffer_idx <= len); |
5472 | if (i % 10 == 0) { |
5473 | memorystatus_log_error("memorystatus_verify_sort_order: %s\n" , buffer); |
5474 | buffer_idx = 0; |
5475 | } |
5476 | p = memorystatus_get_next_proc_locked(&bucket_index, p, FALSE); |
5477 | i++; |
5478 | } |
5479 | if (buffer_idx != 0) { |
5480 | memorystatus_log_error("memorystatus_verify_sort_order: %s\n" , buffer); |
5481 | } |
5482 | error = EINVAL; |
5483 | } |
5484 | return error; |
5485 | } |
5486 | |
5487 | /* |
5488 | * Triggers a sort_order on a specified jetsam priority band. |
5489 | * This is for testing only, used to force a path through the sort |
5490 | * function. |
5491 | */ |
5492 | static int |
5493 | memorystatus_cmd_test_jetsam_sort(int priority, |
5494 | int sort_order, |
5495 | user_addr_t expected_order_user, |
5496 | size_t expected_order_user_len) |
5497 | { |
5498 | int error = 0; |
5499 | unsigned int bucket_index = 0; |
5500 | static size_t kMaxPids = 8; |
5501 | pid_t expected_order[kMaxPids]; |
5502 | size_t copy_size = sizeof(expected_order); |
5503 | size_t num_pids; |
5504 | |
5505 | if (expected_order_user_len < copy_size) { |
5506 | copy_size = expected_order_user_len; |
5507 | } |
5508 | num_pids = copy_size / sizeof(pid_t); |
5509 | |
5510 | error = copyin(expected_order_user, expected_order, copy_size); |
5511 | if (error != 0) { |
5512 | return error; |
5513 | } |
5514 | |
5515 | if (priority == -1) { |
5516 | /* Use as shorthand for default priority */ |
5517 | bucket_index = JETSAM_PRIORITY_DEFAULT; |
5518 | } else { |
5519 | bucket_index = (unsigned int)priority; |
5520 | } |
5521 | |
5522 | /* |
5523 | * Acquire lock before sorting so we can check the sort order |
5524 | * while still holding the lock. |
5525 | */ |
5526 | proc_list_lock(); |
5527 | |
5528 | memorystatus_sort_bucket_locked(bucket_index, sort_order); |
5529 | |
5530 | if (expected_order_user != CAST_USER_ADDR_T(NULL) && expected_order_user_len > 0) { |
5531 | error = memorystatus_verify_sort_order(bucket_index, expected_order, num_pids); |
5532 | } |
5533 | |
5534 | proc_list_unlock(); |
5535 | |
5536 | return error; |
5537 | } |
5538 | |
5539 | #endif /* DEVELOPMENT || DEBUG */ |
5540 | |
5541 | /* |
5542 | * Prepare the process to be killed (set state, update snapshot) and kill it. |
5543 | */ |
5544 | static uint64_t memorystatus_purge_before_jetsam_success = 0; |
5545 | |
5546 | static boolean_t |
5547 | memorystatus_kill_proc(proc_t p, uint32_t cause, os_reason_t jetsam_reason, bool *killed, uint64_t *) |
5548 | { |
5549 | pid_t aPid = 0; |
5550 | uint32_t aPid_ep = 0; |
5551 | |
5552 | uint64_t killtime = 0; |
5553 | clock_sec_t tv_sec; |
5554 | clock_usec_t tv_usec; |
5555 | uint32_t tv_msec; |
5556 | boolean_t retval = FALSE; |
5557 | |
5558 | aPid = proc_getpid(p); |
5559 | aPid_ep = p->p_memstat_effectivepriority; |
5560 | |
5561 | if (cause != kMemorystatusKilledVnodes && cause != kMemorystatusKilledZoneMapExhaustion) { |
5562 | /* |
5563 | * Genuine memory pressure and not other (vnode/zone) resource exhaustion. |
5564 | */ |
5565 | boolean_t success = FALSE; |
5566 | uint64_t num_pages_purged; |
5567 | uint64_t num_pages_reclaimed = 0; |
5568 | uint64_t num_pages_unsecluded = 0; |
5569 | |
5570 | networking_memstatus_callout(p, cause); |
5571 | num_pages_purged = vm_purgeable_purge_task_owned(task: proc_task(p)); |
5572 | num_pages_reclaimed += num_pages_purged; |
5573 | #if CONFIG_SECLUDED_MEMORY |
5574 | if (cause == kMemorystatusKilledVMPageShortage && |
5575 | vm_page_secluded_count > 0 && |
5576 | task_can_use_secluded_mem(proc_task(p), FALSE)) { |
5577 | /* |
5578 | * We're about to kill a process that has access |
5579 | * to the secluded pool. Drain that pool into the |
5580 | * free or active queues to make these pages re-appear |
5581 | * as "available", which might make us no longer need |
5582 | * to kill that process. |
5583 | * Since the secluded pool does not get refilled while |
5584 | * a process has access to it, it should remain |
5585 | * drained. |
5586 | */ |
5587 | num_pages_unsecluded = vm_page_secluded_drain(); |
5588 | num_pages_reclaimed += num_pages_unsecluded; |
5589 | } |
5590 | #endif /* CONFIG_SECLUDED_MEMORY */ |
5591 | |
5592 | if (num_pages_reclaimed) { |
5593 | /* |
5594 | * We actually reclaimed something and so let's |
5595 | * check if we need to continue with the kill. |
5596 | */ |
5597 | if (cause == kMemorystatusKilledHiwat) { |
5598 | uint64_t = get_task_phys_footprint(proc_task(p)); |
5599 | uint64_t memlimit_in_bytes = (((uint64_t)p->p_memstat_memlimit) * 1024ULL * 1024ULL); /* convert MB to bytes */ |
5600 | success = (footprint_in_bytes <= memlimit_in_bytes); |
5601 | } else { |
5602 | success = !memorystatus_avail_pages_below_pressure(); |
5603 | #if CONFIG_SECLUDED_MEMORY |
5604 | if (!success && num_pages_unsecluded) { |
5605 | /* |
5606 | * We just drained the secluded pool |
5607 | * because we're about to kill a |
5608 | * process that has access to it. |
5609 | * This is an important process and |
5610 | * we'd rather not kill it unless |
5611 | * absolutely necessary, so declare |
5612 | * success even if draining the pool |
5613 | * did not quite get us out of the |
5614 | * "pressure" level but still got |
5615 | * us out of the "critical" level. |
5616 | */ |
5617 | success = !memorystatus_avail_pages_below_critical(); |
5618 | } |
5619 | #endif /* CONFIG_SECLUDED_MEMORY */ |
5620 | } |
5621 | |
5622 | if (success) { |
5623 | memorystatus_purge_before_jetsam_success++; |
5624 | |
5625 | memorystatus_log_info("memorystatus: reclaimed %llu pages (%llu purged, %llu unsecluded) from pid %d [%s] and avoided %s\n" , |
5626 | num_pages_reclaimed, num_pages_purged, num_pages_unsecluded, aPid, ((p && *p->p_name) ? p->p_name : "unknown" ), memorystatus_kill_cause_name[cause]); |
5627 | |
5628 | *killed = false; |
5629 | *footprint_of_killed_proc = num_pages_reclaimed + num_pages_purged + num_pages_unsecluded; |
5630 | |
5631 | return TRUE; |
5632 | } |
5633 | } |
5634 | } |
5635 | |
5636 | killtime = mach_absolute_time(); |
5637 | absolutetime_to_microtime(abstime: killtime, secs: &tv_sec, microsecs: &tv_usec); |
5638 | tv_msec = tv_usec / 1000; |
5639 | |
5640 | proc_list_lock(); |
5641 | memorystatus_update_jetsam_snapshot_entry_locked(p, kill_cause: cause, killtime); |
5642 | proc_list_unlock(); |
5643 | |
5644 | char kill_reason_string[128]; |
5645 | |
5646 | if (cause == kMemorystatusKilledHiwat) { |
5647 | strlcpy(dst: kill_reason_string, src: "killing_highwater_process" , n: 128); |
5648 | } else { |
5649 | if (aPid_ep == JETSAM_PRIORITY_IDLE) { |
5650 | strlcpy(dst: kill_reason_string, src: "killing_idle_process" , n: 128); |
5651 | } else { |
5652 | strlcpy(dst: kill_reason_string, src: "killing_top_process" , n: 128); |
5653 | } |
5654 | } |
5655 | |
5656 | /* |
5657 | * memorystatus_do_kill drops a reference, so take another one so we can |
5658 | * continue to use this exit reason even after memorystatus_do_kill() |
5659 | * returns |
5660 | */ |
5661 | os_reason_ref(cur_reason: jetsam_reason); |
5662 | |
5663 | retval = memorystatus_do_kill(p, cause, jetsam_reason, footprint_of_killed_proc); |
5664 | *killed = retval; |
5665 | |
5666 | memorystatus_log("%lu.%03d memorystatus: %s pid %d [%s] (%s %d) %lluKB - memorystatus_available_pages: %llu compressor_size:%u\n" , |
5667 | (unsigned long)tv_sec, tv_msec, kill_reason_string, |
5668 | aPid, ((p && *p->p_name) ? p->p_name : "unknown" ), |
5669 | memorystatus_kill_cause_name[cause], aPid_ep, |
5670 | (*footprint_of_killed_proc) >> 10, (uint64_t)MEMORYSTATUS_LOG_AVAILABLE_PAGES, vm_compressor_pool_size()); |
5671 | |
5672 | return retval; |
5673 | } |
5674 | |
5675 | /* |
5676 | * Jetsam the first process in the queue. |
5677 | */ |
5678 | static bool |
5679 | memorystatus_kill_top_process(bool any, bool sort_flag, uint32_t cause, os_reason_t jetsam_reason, |
5680 | int32_t max_priority, bool only_swappable, |
5681 | int32_t *priority, uint32_t *errors, uint64_t *memory_reclaimed) |
5682 | { |
5683 | pid_t aPid; |
5684 | proc_t p = PROC_NULL, next_p = PROC_NULL; |
5685 | bool new_snapshot = false, force_new_snapshot = false, killed = false, freed_mem = false; |
5686 | unsigned int i = 0; |
5687 | uint32_t aPid_ep; |
5688 | int32_t local_max_kill_prio = JETSAM_PRIORITY_IDLE; |
5689 | uint64_t = 0; |
5690 | |
5691 | #ifndef CONFIG_FREEZE |
5692 | #pragma unused(any) |
5693 | #endif |
5694 | |
5695 | KDBG(MEMSTAT_CODE(BSD_MEMSTAT_JETSAM) | DBG_FUNC_START, |
5696 | MEMORYSTATUS_LOG_AVAILABLE_PAGES); |
5697 | |
5698 | |
5699 | #if CONFIG_JETSAM |
5700 | if (sort_flag) { |
5701 | (void)memorystatus_sort_bucket(JETSAM_PRIORITY_FOREGROUND, JETSAM_SORT_DEFAULT); |
5702 | } |
5703 | |
5704 | *memory_reclaimed = 0; |
5705 | local_max_kill_prio = MIN(max_kill_priority, max_priority); |
5706 | |
5707 | #if VM_PRESSURE_EVENTS |
5708 | if (cause == kMemorystatusKilledSustainedPressure) { |
5709 | local_max_kill_prio = memorystatus_sustained_pressure_maximum_band; |
5710 | } |
5711 | #endif /* VM_PRESSURE_EVENTS */ |
5712 | |
5713 | force_new_snapshot = false; |
5714 | |
5715 | #else /* CONFIG_JETSAM */ |
5716 | (void) max_priority; |
5717 | |
5718 | if (sort_flag) { |
5719 | (void)memorystatus_sort_bucket(JETSAM_PRIORITY_IDLE, JETSAM_SORT_DEFAULT); |
5720 | } |
5721 | |
5722 | /* |
5723 | * On macos, we currently only have 2 reasons to be here: |
5724 | * |
5725 | * kMemorystatusKilledZoneMapExhaustion |
5726 | * AND |
5727 | * kMemorystatusKilledVMCompressorSpaceShortage |
5728 | * |
5729 | * If we are here because of kMemorystatusKilledZoneMapExhaustion, we will consider |
5730 | * any and all processes as eligible kill candidates since we need to avoid a panic. |
5731 | * |
5732 | * Since this function can be called async. it is harder to toggle the max_kill_priority |
5733 | * value before and after a call. And so we use this local variable to set the upper band |
5734 | * on the eligible kill bands. |
5735 | */ |
5736 | if (cause == kMemorystatusKilledZoneMapExhaustion) { |
5737 | local_max_kill_prio = JETSAM_PRIORITY_MAX; |
5738 | } else { |
5739 | local_max_kill_prio = max_kill_priority; |
5740 | } |
5741 | |
5742 | /* |
5743 | * And, because we are here under extreme circumstances, we force a snapshot even for |
5744 | * IDLE kills. |
5745 | */ |
5746 | force_new_snapshot = true; |
5747 | |
5748 | #endif /* CONFIG_JETSAM */ |
5749 | |
5750 | if (cause != kMemorystatusKilledZoneMapExhaustion && |
5751 | jetsam_current_thread() != NULL && |
5752 | jetsam_current_thread()->limit_to_low_bands && |
5753 | local_max_kill_prio > JETSAM_PRIORITY_MAIL) { |
5754 | local_max_kill_prio = JETSAM_PRIORITY_MAIL; |
5755 | } |
5756 | |
5757 | proc_list_lock(); |
5758 | |
5759 | next_p = memorystatus_get_first_proc_locked(bucket_index: &i, TRUE); |
5760 | while (next_p && (next_p->p_memstat_effectivepriority <= local_max_kill_prio)) { |
5761 | p = next_p; |
5762 | next_p = memorystatus_get_next_proc_locked(bucket_index: &i, p, TRUE); |
5763 | |
5764 | |
5765 | aPid = proc_getpid(p); |
5766 | aPid_ep = p->p_memstat_effectivepriority; |
5767 | |
5768 | if (p->p_memstat_state & (P_MEMSTAT_ERROR | P_MEMSTAT_TERMINATED | P_MEMSTAT_SKIP)) { |
5769 | continue; /* with lock held */ |
5770 | } |
5771 | |
5772 | if (cause == kMemorystatusKilledVnodes) { |
5773 | /* |
5774 | * If the system runs out of vnodes, we systematically jetsam |
5775 | * processes in hopes of stumbling onto a vnode gain that helps |
5776 | * the system recover. The process that happens to trigger |
5777 | * this path has no known relationship to the vnode shortage. |
5778 | * Deadlock avoidance: attempt to safeguard the caller. |
5779 | */ |
5780 | |
5781 | if (p == current_proc()) { |
5782 | /* do not jetsam the current process */ |
5783 | continue; |
5784 | } |
5785 | } |
5786 | |
5787 | if (only_swappable && !task_donates_own_pages(task: proc_task(p))) { |
5788 | continue; |
5789 | } |
5790 | |
5791 | #if CONFIG_FREEZE |
5792 | boolean_t skip; |
5793 | boolean_t reclaim_proc = !(p->p_memstat_state & P_MEMSTAT_LOCKED); |
5794 | if (any || reclaim_proc) { |
5795 | skip = FALSE; |
5796 | } else { |
5797 | skip = TRUE; |
5798 | } |
5799 | |
5800 | if (skip) { |
5801 | continue; |
5802 | } else |
5803 | #endif |
5804 | { |
5805 | if (proc_ref(p, true) == p) { |
5806 | /* |
5807 | * Mark as terminated so that if exit1() indicates success, but the process (for example) |
5808 | * is blocked in task_exception_notify(), it'll be skipped if encountered again - see |
5809 | * <rdar://problem/13553476>. This is cheaper than examining P_LEXIT, which requires the |
5810 | * acquisition of the proc lock. |
5811 | */ |
5812 | p->p_memstat_state |= P_MEMSTAT_TERMINATED; |
5813 | } else { |
5814 | /* |
5815 | * We need to restart the search again because |
5816 | * proc_ref _can_ drop the proc_list lock |
5817 | * and we could have lost our stored next_p via |
5818 | * an exit() on another core. |
5819 | */ |
5820 | i = 0; |
5821 | next_p = memorystatus_get_first_proc_locked(bucket_index: &i, TRUE); |
5822 | continue; |
5823 | } |
5824 | |
5825 | /* |
5826 | * Capture a snapshot if none exists and: |
5827 | * - we are forcing a new snapshot creation, either because: |
5828 | * - on a particular platform we need these snapshots every time, OR |
5829 | * - a boot-arg/embedded device tree property has been set. |
5830 | * - priority was not requested (this is something other than an ambient kill) |
5831 | * - the priority was requested *and* the targeted process is not at idle priority |
5832 | */ |
5833 | if ((memorystatus_jetsam_snapshot_count == 0) && |
5834 | (force_new_snapshot || memorystatus_idle_snapshot || ((!priority) || (priority && (aPid_ep != JETSAM_PRIORITY_IDLE))))) { |
5835 | memorystatus_init_jetsam_snapshot_locked(NULL, ods_list_count: 0); |
5836 | new_snapshot = true; |
5837 | } |
5838 | |
5839 | proc_list_unlock(); |
5840 | |
5841 | freed_mem = memorystatus_kill_proc(p, cause, jetsam_reason, killed: &killed, footprint_of_killed_proc: &footprint_of_killed_proc); /* purged and/or killed 'p' */ |
5842 | /* Success? */ |
5843 | if (freed_mem) { |
5844 | *memory_reclaimed = footprint_of_killed_proc; |
5845 | if (killed) { |
5846 | if (priority) { |
5847 | *priority = aPid_ep; |
5848 | } |
5849 | } else { |
5850 | /* purged */ |
5851 | proc_list_lock(); |
5852 | p->p_memstat_state &= ~P_MEMSTAT_TERMINATED; |
5853 | proc_list_unlock(); |
5854 | } |
5855 | proc_rele(p); |
5856 | goto exit; |
5857 | } |
5858 | |
5859 | /* |
5860 | * Failure - first unwind the state, |
5861 | * then fall through to restart the search. |
5862 | */ |
5863 | proc_list_lock(); |
5864 | proc_rele(p); |
5865 | p->p_memstat_state &= ~P_MEMSTAT_TERMINATED; |
5866 | p->p_memstat_state |= P_MEMSTAT_ERROR; |
5867 | *errors += 1; |
5868 | |
5869 | i = 0; |
5870 | next_p = memorystatus_get_first_proc_locked(bucket_index: &i, TRUE); |
5871 | } |
5872 | } |
5873 | |
5874 | proc_list_unlock(); |
5875 | |
5876 | exit: |
5877 | os_reason_free(cur_reason: jetsam_reason); |
5878 | |
5879 | if (!killed) { |
5880 | /* Clear snapshot if freshly captured and no target was found */ |
5881 | if (new_snapshot) { |
5882 | proc_list_lock(); |
5883 | memorystatus_jetsam_snapshot->entry_count = memorystatus_jetsam_snapshot_count = 0; |
5884 | proc_list_unlock(); |
5885 | } |
5886 | } |
5887 | |
5888 | KDBG(MEMSTAT_CODE(BSD_MEMSTAT_JETSAM) | DBG_FUNC_END, |
5889 | MEMORYSTATUS_LOG_AVAILABLE_PAGES, killed ? aPid : 0, killed, *memory_reclaimed); |
5890 | |
5891 | return killed; |
5892 | } |
5893 | |
5894 | /* |
5895 | * Jetsam aggressively |
5896 | */ |
5897 | static boolean_t |
5898 | memorystatus_kill_processes_aggressive(uint32_t cause, int aggr_count, |
5899 | int32_t priority_max, int max_kills, uint32_t *errors, uint64_t *memory_reclaimed) |
5900 | { |
5901 | pid_t aPid; |
5902 | proc_t p = PROC_NULL, next_p = PROC_NULL; |
5903 | boolean_t new_snapshot = FALSE, killed = FALSE; |
5904 | int kill_count = 0; |
5905 | unsigned int i = 0; |
5906 | int32_t aPid_ep = 0; |
5907 | unsigned int memorystatus_level_snapshot = 0; |
5908 | uint64_t killtime = 0; |
5909 | clock_sec_t tv_sec; |
5910 | clock_usec_t tv_usec; |
5911 | uint32_t tv_msec; |
5912 | os_reason_t jetsam_reason = OS_REASON_NULL; |
5913 | uint64_t = 0; |
5914 | |
5915 | *memory_reclaimed = 0; |
5916 | |
5917 | KDBG(MEMSTAT_CODE(BSD_MEMSTAT_JETSAM) | DBG_FUNC_START, |
5918 | MEMORYSTATUS_LOG_AVAILABLE_PAGES, priority_max); |
5919 | |
5920 | if (priority_max >= JETSAM_PRIORITY_FOREGROUND) { |
5921 | /* |
5922 | * Check if aggressive jetsam has been asked to kill upto or beyond the |
5923 | * JETSAM_PRIORITY_FOREGROUND bucket. If yes, sort the FG band based on |
5924 | * coalition footprint. |
5925 | */ |
5926 | memorystatus_sort_bucket(JETSAM_PRIORITY_FOREGROUND, JETSAM_SORT_DEFAULT); |
5927 | } |
5928 | |
5929 | jetsam_reason = os_reason_create(OS_REASON_JETSAM, osr_code: cause); |
5930 | if (jetsam_reason == OS_REASON_NULL) { |
5931 | memorystatus_log_error("memorystatus_kill_processes_aggressive: failed to allocate exit reason\n" ); |
5932 | } |
5933 | memorystatus_log("memorystatus: aggressively killing up to %d processes below band %d.\n" , max_kills, priority_max + 1); |
5934 | proc_list_lock(); |
5935 | |
5936 | next_p = memorystatus_get_first_proc_locked(bucket_index: &i, TRUE); |
5937 | while (next_p) { |
5938 | if (proc_list_exited(p: next_p) || |
5939 | ((unsigned int)(next_p->p_memstat_effectivepriority) != i)) { |
5940 | /* |
5941 | * We have raced with next_p running on another core. |
5942 | * It may be exiting or it may have moved to a different |
5943 | * jetsam priority band. This means we have lost our |
5944 | * place in line while traversing the jetsam list. We |
5945 | * attempt to recover by rewinding to the beginning of the band |
5946 | * we were already traversing. By doing this, we do not guarantee |
5947 | * that no process escapes this aggressive march, but we can make |
5948 | * skipping an entire range of processes less likely. (PR-21069019) |
5949 | */ |
5950 | |
5951 | memorystatus_log_debug( |
5952 | "memorystatus: aggressive%d: rewinding band %d, %s(%d) moved or exiting.\n" , |
5953 | aggr_count, i, (*next_p->p_name ? next_p->p_name : "unknown" ), proc_getpid(next_p)); |
5954 | |
5955 | next_p = memorystatus_get_first_proc_locked(bucket_index: &i, TRUE); |
5956 | continue; |
5957 | } |
5958 | |
5959 | p = next_p; |
5960 | next_p = memorystatus_get_next_proc_locked(bucket_index: &i, p, TRUE); |
5961 | |
5962 | if (p->p_memstat_effectivepriority > priority_max) { |
5963 | /* |
5964 | * Bail out of this killing spree if we have |
5965 | * reached beyond the priority_max jetsam band. |
5966 | * That is, we kill up to and through the |
5967 | * priority_max jetsam band. |
5968 | */ |
5969 | proc_list_unlock(); |
5970 | goto exit; |
5971 | } |
5972 | |
5973 | aPid = proc_getpid(p); |
5974 | aPid_ep = p->p_memstat_effectivepriority; |
5975 | |
5976 | if (p->p_memstat_state & (P_MEMSTAT_ERROR | P_MEMSTAT_TERMINATED | P_MEMSTAT_SKIP)) { |
5977 | continue; |
5978 | } |
5979 | |
5980 | /* |
5981 | * Capture a snapshot if none exists. |
5982 | */ |
5983 | if (memorystatus_jetsam_snapshot_count == 0) { |
5984 | memorystatus_init_jetsam_snapshot_locked(NULL, ods_list_count: 0); |
5985 | new_snapshot = TRUE; |
5986 | } |
5987 | |
5988 | /* |
5989 | * Mark as terminated so that if exit1() indicates success, but the process (for example) |
5990 | * is blocked in task_exception_notify(), it'll be skipped if encountered again - see |
5991 | * <rdar://problem/13553476>. This is cheaper than examining P_LEXIT, which requires the |
5992 | * acquisition of the proc lock. |
5993 | */ |
5994 | p->p_memstat_state |= P_MEMSTAT_TERMINATED; |
5995 | |
5996 | killtime = mach_absolute_time(); |
5997 | absolutetime_to_microtime(abstime: killtime, secs: &tv_sec, microsecs: &tv_usec); |
5998 | tv_msec = tv_usec / 1000; |
5999 | |
6000 | /* Shift queue, update stats */ |
6001 | memorystatus_update_jetsam_snapshot_entry_locked(p, kill_cause: cause, killtime); |
6002 | |
6003 | /* |
6004 | * In order to kill the target process, we will drop the proc_list_lock. |
6005 | * To guaranteee that p and next_p don't disappear out from under the lock, |
6006 | * we must take a ref on both. |
6007 | * If we cannot get a reference, then it's likely we've raced with |
6008 | * that process exiting on another core. |
6009 | */ |
6010 | if (proc_ref(p, true) == p) { |
6011 | if (next_p) { |
6012 | while (next_p && (proc_ref(p: next_p, true) != next_p)) { |
6013 | proc_t temp_p; |
6014 | |
6015 | /* |
6016 | * We must have raced with next_p exiting on another core. |
6017 | * Recover by getting the next eligible process in the band. |
6018 | */ |
6019 | |
6020 | memorystatus_log_debug( |
6021 | "memorystatus: aggressive%d: skipping %d [%s] (exiting?)\n" , |
6022 | aggr_count, proc_getpid(next_p), (*next_p->p_name ? next_p->p_name : "(unknown)" )); |
6023 | |
6024 | temp_p = next_p; |
6025 | next_p = memorystatus_get_next_proc_locked(bucket_index: &i, p: temp_p, TRUE); |
6026 | } |
6027 | } |
6028 | proc_list_unlock(); |
6029 | |
6030 | memorystatus_log( |
6031 | "%lu.%03d memorystatus: %s%d pid %d [%s] (%s %d) - memorystatus_available_pages: %llu\n" , |
6032 | (unsigned long)tv_sec, tv_msec, |
6033 | ((aPid_ep == JETSAM_PRIORITY_IDLE) ? "killing_idle_process_aggressive" : "killing_top_process_aggressive" ), |
6034 | aggr_count, aPid, (*p->p_name ? p->p_name : "unknown" ), |
6035 | memorystatus_kill_cause_name[cause], aPid_ep, (uint64_t)MEMORYSTATUS_LOG_AVAILABLE_PAGES); |
6036 | |
6037 | memorystatus_level_snapshot = memorystatus_level; |
6038 | |
6039 | /* |
6040 | * memorystatus_do_kill() drops a reference, so take another one so we can |
6041 | * continue to use this exit reason even after memorystatus_do_kill() |
6042 | * returns. |
6043 | */ |
6044 | os_reason_ref(cur_reason: jetsam_reason); |
6045 | killed = memorystatus_do_kill(p, cause, jetsam_reason, footprint_of_killed_proc: &footprint_of_killed_proc); |
6046 | |
6047 | /* Success? */ |
6048 | if (killed) { |
6049 | *memory_reclaimed += footprint_of_killed_proc; |
6050 | proc_rele(p); |
6051 | kill_count++; |
6052 | p = NULL; |
6053 | killed = FALSE; |
6054 | |
6055 | /* |
6056 | * Continue the killing spree. |
6057 | */ |
6058 | proc_list_lock(); |
6059 | if (next_p) { |
6060 | proc_rele(p: next_p); |
6061 | } |
6062 | |
6063 | if (kill_count == max_kills) { |
6064 | memorystatus_log_info( |
6065 | "memorystatus: giving up aggressive kill after killing %d processes below band %d.\n" , max_kills, priority_max + 1); |
6066 | break; |
6067 | } |
6068 | |
6069 | if (aPid_ep == JETSAM_PRIORITY_FOREGROUND && memorystatus_aggressive_jetsam_lenient == TRUE) { |
6070 | if (memorystatus_level > memorystatus_level_snapshot && ((memorystatus_level - memorystatus_level_snapshot) >= AGGRESSIVE_JETSAM_LENIENT_MODE_THRESHOLD)) { |
6071 | #if DEVELOPMENT || DEBUG |
6072 | memorystatus_log_info("Disabling Lenient mode after one-time deployment.\n" ); |
6073 | #endif /* DEVELOPMENT || DEBUG */ |
6074 | memorystatus_aggressive_jetsam_lenient = FALSE; |
6075 | break; |
6076 | } |
6077 | } |
6078 | |
6079 | continue; |
6080 | } |
6081 | |
6082 | /* |
6083 | * Failure - first unwind the state, |
6084 | * then fall through to restart the search. |
6085 | */ |
6086 | proc_list_lock(); |
6087 | proc_rele(p); |
6088 | if (next_p) { |
6089 | proc_rele(p: next_p); |
6090 | } |
6091 | p->p_memstat_state &= ~P_MEMSTAT_TERMINATED; |
6092 | p->p_memstat_state |= P_MEMSTAT_ERROR; |
6093 | *errors += 1; |
6094 | p = NULL; |
6095 | } |
6096 | |
6097 | /* |
6098 | * Failure - restart the search at the beginning of |
6099 | * the band we were already traversing. |
6100 | * |
6101 | * We might have raced with "p" exiting on another core, resulting in no |
6102 | * ref on "p". Or, we may have failed to kill "p". |
6103 | * |
6104 | * Either way, we fall thru to here, leaving the proc in the |
6105 | * P_MEMSTAT_TERMINATED or P_MEMSTAT_ERROR state. |
6106 | * |
6107 | * And, we hold the the proc_list_lock at this point. |
6108 | */ |
6109 | |
6110 | next_p = memorystatus_get_first_proc_locked(bucket_index: &i, TRUE); |
6111 | } |
6112 | |
6113 | proc_list_unlock(); |
6114 | |
6115 | exit: |
6116 | os_reason_free(cur_reason: jetsam_reason); |
6117 | |
6118 | /* Clear snapshot if freshly captured and no target was found */ |
6119 | if (new_snapshot && (kill_count == 0)) { |
6120 | proc_list_lock(); |
6121 | memorystatus_jetsam_snapshot->entry_count = memorystatus_jetsam_snapshot_count = 0; |
6122 | proc_list_unlock(); |
6123 | } |
6124 | |
6125 | KDBG(MEMSTAT_CODE(BSD_MEMSTAT_JETSAM) | DBG_FUNC_END, |
6126 | MEMORYSTATUS_LOG_AVAILABLE_PAGES, 0, kill_count, *memory_reclaimed); |
6127 | |
6128 | if (kill_count > 0) { |
6129 | return TRUE; |
6130 | } else { |
6131 | return FALSE; |
6132 | } |
6133 | } |
6134 | |
6135 | static boolean_t |
6136 | memorystatus_kill_hiwat_proc(uint32_t *errors, boolean_t *purged, uint64_t *memory_reclaimed) |
6137 | { |
6138 | pid_t aPid = 0; |
6139 | proc_t p = PROC_NULL, next_p = PROC_NULL; |
6140 | bool new_snapshot = false, killed = false, freed_mem = false; |
6141 | unsigned int i = 0; |
6142 | uint32_t aPid_ep; |
6143 | os_reason_t jetsam_reason = OS_REASON_NULL; |
6144 | KDBG(MEMSTAT_CODE(BSD_MEMSTAT_JETSAM_HIWAT) | DBG_FUNC_START, |
6145 | MEMORYSTATUS_LOG_AVAILABLE_PAGES); |
6146 | |
6147 | jetsam_reason = os_reason_create(OS_REASON_JETSAM, JETSAM_REASON_MEMORY_HIGHWATER); |
6148 | if (jetsam_reason == OS_REASON_NULL) { |
6149 | memorystatus_log_error("memorystatus_kill_hiwat_proc: failed to allocate exit reason\n" ); |
6150 | } |
6151 | |
6152 | proc_list_lock(); |
6153 | |
6154 | next_p = memorystatus_get_first_proc_locked(bucket_index: &i, TRUE); |
6155 | while (next_p) { |
6156 | uint64_t = 0; |
6157 | uint64_t memlimit_in_bytes = 0; |
6158 | boolean_t skip = 0; |
6159 | |
6160 | p = next_p; |
6161 | next_p = memorystatus_get_next_proc_locked(bucket_index: &i, p, TRUE); |
6162 | |
6163 | aPid = proc_getpid(p); |
6164 | aPid_ep = p->p_memstat_effectivepriority; |
6165 | |
6166 | if (p->p_memstat_state & (P_MEMSTAT_ERROR | P_MEMSTAT_TERMINATED | P_MEMSTAT_SKIP)) { |
6167 | continue; |
6168 | } |
6169 | |
6170 | /* skip if no limit set */ |
6171 | if (p->p_memstat_memlimit <= 0) { |
6172 | continue; |
6173 | } |
6174 | |
6175 | footprint_in_bytes = get_task_phys_footprint(proc_task(p)); |
6176 | memlimit_in_bytes = (((uint64_t)p->p_memstat_memlimit) * 1024ULL * 1024ULL); /* convert MB to bytes */ |
6177 | skip = (footprint_in_bytes <= memlimit_in_bytes); |
6178 | |
6179 | #if CONFIG_FREEZE |
6180 | if (!skip) { |
6181 | if (p->p_memstat_state & P_MEMSTAT_LOCKED) { |
6182 | skip = TRUE; |
6183 | } else { |
6184 | skip = FALSE; |
6185 | } |
6186 | } |
6187 | #endif |
6188 | |
6189 | if (skip) { |
6190 | continue; |
6191 | } else { |
6192 | if (memorystatus_jetsam_snapshot_count == 0) { |
6193 | memorystatus_init_jetsam_snapshot_locked(NULL, ods_list_count: 0); |
6194 | new_snapshot = true; |
6195 | } |
6196 | |
6197 | if (proc_ref(p, true) == p) { |
6198 | /* |
6199 | * Mark as terminated so that if exit1() indicates success, but the process (for example) |
6200 | * is blocked in task_exception_notify(), it'll be skipped if encountered again - see |
6201 | * <rdar://problem/13553476>. This is cheaper than examining P_LEXIT, which requires the |
6202 | * acquisition of the proc lock. |
6203 | */ |
6204 | p->p_memstat_state |= P_MEMSTAT_TERMINATED; |
6205 | |
6206 | proc_list_unlock(); |
6207 | } else { |
6208 | /* |
6209 | * We need to restart the search again because |
6210 | * proc_ref _can_ drop the proc_list lock |
6211 | * and we could have lost our stored next_p via |
6212 | * an exit() on another core. |
6213 | */ |
6214 | i = 0; |
6215 | next_p = memorystatus_get_first_proc_locked(bucket_index: &i, TRUE); |
6216 | continue; |
6217 | } |
6218 | |
6219 | footprint_in_bytes = 0; |
6220 | freed_mem = memorystatus_kill_proc(p, cause: kMemorystatusKilledHiwat, jetsam_reason, killed: &killed, footprint_of_killed_proc: &footprint_in_bytes); /* purged and/or killed 'p' */ |
6221 | |
6222 | /* Success? */ |
6223 | if (freed_mem) { |
6224 | if (!killed) { |
6225 | /* purged 'p'..don't reset HWM candidate count */ |
6226 | *purged = TRUE; |
6227 | |
6228 | proc_list_lock(); |
6229 | p->p_memstat_state &= ~P_MEMSTAT_TERMINATED; |
6230 | proc_list_unlock(); |
6231 | } else { |
6232 | *memory_reclaimed = footprint_in_bytes; |
6233 | } |
6234 | proc_rele(p); |
6235 | goto exit; |
6236 | } |
6237 | /* |
6238 | * Failure - first unwind the state, |
6239 | * then fall through to restart the search. |
6240 | */ |
6241 | proc_list_lock(); |
6242 | proc_rele(p); |
6243 | p->p_memstat_state &= ~P_MEMSTAT_TERMINATED; |
6244 | p->p_memstat_state |= P_MEMSTAT_ERROR; |
6245 | *errors += 1; |
6246 | |
6247 | i = 0; |
6248 | next_p = memorystatus_get_first_proc_locked(bucket_index: &i, TRUE); |
6249 | } |
6250 | } |
6251 | |
6252 | proc_list_unlock(); |
6253 | |
6254 | exit: |
6255 | os_reason_free(cur_reason: jetsam_reason); |
6256 | |
6257 | if (!killed) { |
6258 | *memory_reclaimed = 0; |
6259 | |
6260 | /* Clear snapshot if freshly captured and no target was found */ |
6261 | if (new_snapshot) { |
6262 | proc_list_lock(); |
6263 | memorystatus_jetsam_snapshot->entry_count = memorystatus_jetsam_snapshot_count = 0; |
6264 | proc_list_unlock(); |
6265 | } |
6266 | } |
6267 | |
6268 | KDBG(MEMSTAT_CODE(BSD_MEMSTAT_JETSAM_HIWAT) | DBG_FUNC_END, |
6269 | MEMORYSTATUS_LOG_AVAILABLE_PAGES, killed ? aPid : 0, killed, *memory_reclaimed, 0); |
6270 | |
6271 | return killed; |
6272 | } |
6273 | |
6274 | /* |
6275 | * Jetsam a process pinned in the elevated band. |
6276 | * |
6277 | * Return: true -- a pinned process was jetsammed |
6278 | * false -- no pinned process was jetsammed |
6279 | */ |
6280 | boolean_t |
6281 | memorystatus_kill_elevated_process(uint32_t cause, os_reason_t jetsam_reason, unsigned int band, int aggr_count, uint32_t *errors, uint64_t *memory_reclaimed) |
6282 | { |
6283 | pid_t aPid = 0; |
6284 | proc_t p = PROC_NULL, next_p = PROC_NULL; |
6285 | boolean_t new_snapshot = FALSE, killed = FALSE; |
6286 | int kill_count = 0; |
6287 | uint32_t aPid_ep; |
6288 | uint64_t killtime = 0; |
6289 | clock_sec_t tv_sec; |
6290 | clock_usec_t tv_usec; |
6291 | uint32_t tv_msec; |
6292 | uint64_t = 0; |
6293 | |
6294 | |
6295 | KDBG(MEMSTAT_CODE(BSD_MEMSTAT_JETSAM) | DBG_FUNC_START, |
6296 | MEMORYSTATUS_LOG_AVAILABLE_PAGES); |
6297 | |
6298 | #if CONFIG_FREEZE |
6299 | boolean_t consider_frozen_only = FALSE; |
6300 | |
6301 | if (band == (unsigned int) memorystatus_freeze_jetsam_band) { |
6302 | consider_frozen_only = TRUE; |
6303 | } |
6304 | #endif /* CONFIG_FREEZE */ |
6305 | |
6306 | proc_list_lock(); |
6307 | |
6308 | next_p = memorystatus_get_first_proc_locked(bucket_index: &band, FALSE); |
6309 | while (next_p) { |
6310 | p = next_p; |
6311 | next_p = memorystatus_get_next_proc_locked(bucket_index: &band, p, FALSE); |
6312 | |
6313 | aPid = proc_getpid(p); |
6314 | aPid_ep = p->p_memstat_effectivepriority; |
6315 | |
6316 | /* |
6317 | * Only pick a process pinned in this elevated band |
6318 | */ |
6319 | if (!(p->p_memstat_state & P_MEMSTAT_USE_ELEVATED_INACTIVE_BAND)) { |
6320 | continue; |
6321 | } |
6322 | |
6323 | if (p->p_memstat_state & (P_MEMSTAT_ERROR | P_MEMSTAT_TERMINATED | P_MEMSTAT_SKIP)) { |
6324 | continue; |
6325 | } |
6326 | |
6327 | #if CONFIG_FREEZE |
6328 | if (consider_frozen_only && !(p->p_memstat_state & P_MEMSTAT_FROZEN)) { |
6329 | continue; |
6330 | } |
6331 | |
6332 | if (p->p_memstat_state & P_MEMSTAT_LOCKED) { |
6333 | continue; |
6334 | } |
6335 | #endif /* CONFIG_FREEZE */ |
6336 | |
6337 | #if DEVELOPMENT || DEBUG |
6338 | memorystatus_log_info( |
6339 | "jetsam: elevated%d process pid %d [%s] - memorystatus_available_pages: %d\n" , |
6340 | aggr_count, aPid, (*p->p_name ? p->p_name : "unknown" ), MEMORYSTATUS_LOG_AVAILABLE_PAGES); |
6341 | #endif /* DEVELOPMENT || DEBUG */ |
6342 | |
6343 | if (memorystatus_jetsam_snapshot_count == 0) { |
6344 | memorystatus_init_jetsam_snapshot_locked(NULL, ods_list_count: 0); |
6345 | new_snapshot = TRUE; |
6346 | } |
6347 | |
6348 | p->p_memstat_state |= P_MEMSTAT_TERMINATED; |
6349 | |
6350 | killtime = mach_absolute_time(); |
6351 | absolutetime_to_microtime(abstime: killtime, secs: &tv_sec, microsecs: &tv_usec); |
6352 | tv_msec = tv_usec / 1000; |
6353 | |
6354 | memorystatus_update_jetsam_snapshot_entry_locked(p, kill_cause: cause, killtime); |
6355 | |
6356 | if (proc_ref(p, true) == p) { |
6357 | proc_list_unlock(); |
6358 | |
6359 | /* |
6360 | * memorystatus_do_kill drops a reference, so take another one so we can |
6361 | * continue to use this exit reason even after memorystatus_do_kill() |
6362 | * returns |
6363 | */ |
6364 | os_reason_ref(cur_reason: jetsam_reason); |
6365 | killed = memorystatus_do_kill(p, cause, jetsam_reason, footprint_of_killed_proc: &footprint_of_killed_proc); |
6366 | |
6367 | memorystatus_log("%lu.%03d memorystatus: killing_top_process_elevated%d pid %d [%s] (%s %d) %lluKB - memorystatus_available_pages: %llu\n" , |
6368 | (unsigned long)tv_sec, tv_msec, |
6369 | aggr_count, |
6370 | aPid, ((p && *p->p_name) ? p->p_name : "unknown" ), |
6371 | memorystatus_kill_cause_name[cause], aPid_ep, |
6372 | footprint_of_killed_proc >> 10, (uint64_t)MEMORYSTATUS_LOG_AVAILABLE_PAGES); |
6373 | |
6374 | /* Success? */ |
6375 | if (killed) { |
6376 | *memory_reclaimed = footprint_of_killed_proc; |
6377 | proc_rele(p); |
6378 | kill_count++; |
6379 | goto exit; |
6380 | } |
6381 | |
6382 | /* |
6383 | * Failure - first unwind the state, |
6384 | * then fall through to restart the search. |
6385 | */ |
6386 | proc_list_lock(); |
6387 | proc_rele(p); |
6388 | p->p_memstat_state &= ~P_MEMSTAT_TERMINATED; |
6389 | p->p_memstat_state |= P_MEMSTAT_ERROR; |
6390 | *errors += 1; |
6391 | } |
6392 | |
6393 | /* |
6394 | * Failure - restart the search. |
6395 | * |
6396 | * We might have raced with "p" exiting on another core, resulting in no |
6397 | * ref on "p". Or, we may have failed to kill "p". |
6398 | * |
6399 | * Either way, we fall thru to here, leaving the proc in the |
6400 | * P_MEMSTAT_TERMINATED state or P_MEMSTAT_ERROR state. |
6401 | * |
6402 | * And, we hold the the proc_list_lock at this point. |
6403 | */ |
6404 | |
6405 | next_p = memorystatus_get_first_proc_locked(bucket_index: &band, FALSE); |
6406 | } |
6407 | |
6408 | proc_list_unlock(); |
6409 | |
6410 | exit: |
6411 | os_reason_free(cur_reason: jetsam_reason); |
6412 | |
6413 | if (kill_count == 0) { |
6414 | *memory_reclaimed = 0; |
6415 | |
6416 | /* Clear snapshot if freshly captured and no target was found */ |
6417 | if (new_snapshot) { |
6418 | proc_list_lock(); |
6419 | memorystatus_jetsam_snapshot->entry_count = memorystatus_jetsam_snapshot_count = 0; |
6420 | proc_list_unlock(); |
6421 | } |
6422 | } |
6423 | |
6424 | KDBG(MEMSTAT_CODE(BSD_MEMSTAT_JETSAM) | DBG_FUNC_END, |
6425 | MEMORYSTATUS_LOG_AVAILABLE_PAGES, killed ? aPid : 0, kill_count, *memory_reclaimed); |
6426 | |
6427 | return killed; |
6428 | } |
6429 | |
6430 | boolean_t |
6431 | memorystatus_kill_on_VM_compressor_space_shortage(boolean_t async) |
6432 | { |
6433 | if (async) { |
6434 | os_atomic_store(&memorystatus_compressor_space_shortage, true, release); |
6435 | memorystatus_thread_wake(); |
6436 | return true; |
6437 | } else { |
6438 | os_reason_t jetsam_reason = os_reason_create(OS_REASON_JETSAM, JETSAM_REASON_MEMORY_VMCOMPRESSOR_SPACE_SHORTAGE); |
6439 | if (jetsam_reason == OS_REASON_NULL) { |
6440 | memorystatus_log_error("memorystatus_kill_on_VM_compressor_space_shortage -- sync: failed to allocate jetsam reason\n" ); |
6441 | } |
6442 | |
6443 | return memorystatus_kill_process_sync(victim_pid: -1, cause: kMemorystatusKilledVMCompressorSpaceShortage, jetsam_reason); |
6444 | } |
6445 | } |
6446 | |
6447 | #if CONFIG_JETSAM |
6448 | |
6449 | void |
6450 | memorystatus_kill_on_vps_starvation(void) |
6451 | { |
6452 | os_atomic_store(&memorystatus_pageout_starved, true, release); |
6453 | memorystatus_thread_wake(); |
6454 | } |
6455 | |
6456 | boolean_t |
6457 | memorystatus_kill_on_vnode_limit(void) |
6458 | { |
6459 | os_reason_t jetsam_reason = os_reason_create(OS_REASON_JETSAM, JETSAM_REASON_VNODE); |
6460 | if (jetsam_reason == OS_REASON_NULL) { |
6461 | memorystatus_log_error("memorystatus_kill_on_vnode_limit: failed to allocate jetsam reason\n" ); |
6462 | } |
6463 | |
6464 | return memorystatus_kill_process_sync(-1, kMemorystatusKilledVnodes, jetsam_reason); |
6465 | } |
6466 | |
6467 | boolean_t |
6468 | memorystatus_kill_on_sustained_pressure() |
6469 | { |
6470 | os_reason_t jetsam_reason = os_reason_create(OS_REASON_JETSAM, JETSAM_REASON_MEMORY_SUSTAINED_PRESSURE); |
6471 | if (jetsam_reason == OS_REASON_NULL) { |
6472 | memorystatus_log_error("memorystatus_kill_on_FC_thrashing -- sync: failed to allocate jetsam reason\n" ); |
6473 | } |
6474 | |
6475 | return memorystatus_kill_process_sync(-1, kMemorystatusKilledSustainedPressure, jetsam_reason); |
6476 | } |
6477 | |
6478 | boolean_t |
6479 | memorystatus_kill_with_jetsam_reason_sync(pid_t pid, os_reason_t jetsam_reason) |
6480 | { |
6481 | uint32_t kill_cause = jetsam_reason->osr_code <= JETSAM_REASON_MEMORYSTATUS_MAX ? |
6482 | (uint32_t) jetsam_reason->osr_code : JETSAM_REASON_INVALID; |
6483 | return memorystatus_kill_process_sync(pid, kill_cause, jetsam_reason); |
6484 | } |
6485 | |
6486 | #endif /* CONFIG_JETSAM */ |
6487 | |
6488 | boolean_t |
6489 | memorystatus_kill_on_zone_map_exhaustion(pid_t pid) |
6490 | { |
6491 | boolean_t res = FALSE; |
6492 | if (pid == -1) { |
6493 | os_atomic_store(&memorystatus_zone_map_is_exhausted, true, release); |
6494 | memorystatus_thread_wake(); |
6495 | return true; |
6496 | } else { |
6497 | os_reason_t jetsam_reason = os_reason_create(OS_REASON_JETSAM, JETSAM_REASON_ZONE_MAP_EXHAUSTION); |
6498 | if (jetsam_reason == OS_REASON_NULL) { |
6499 | memorystatus_log_error("memorystatus_kill_on_zone_map_exhaustion: failed to allocate jetsam reason\n" ); |
6500 | } |
6501 | |
6502 | res = memorystatus_kill_process_sync(victim_pid: pid, cause: kMemorystatusKilledZoneMapExhaustion, jetsam_reason); |
6503 | } |
6504 | return res; |
6505 | } |
6506 | |
6507 | void |
6508 | memorystatus_on_pageout_scan_end(void) |
6509 | { |
6510 | /* No-op */ |
6511 | } |
6512 | |
6513 | /* Return both allocated and actual size, since there's a race between allocation and list compilation */ |
6514 | static int |
6515 | memorystatus_get_priority_list(memorystatus_priority_entry_t **list_ptr, size_t *buffer_size, size_t *list_size, boolean_t size_only) |
6516 | { |
6517 | uint32_t list_count, i = 0; |
6518 | memorystatus_priority_entry_t *list_entry; |
6519 | proc_t p; |
6520 | |
6521 | list_count = memorystatus_list_count; |
6522 | *list_size = sizeof(memorystatus_priority_entry_t) * list_count; |
6523 | |
6524 | /* Just a size check? */ |
6525 | if (size_only) { |
6526 | return 0; |
6527 | } |
6528 | |
6529 | /* Otherwise, validate the size of the buffer */ |
6530 | if (*buffer_size < *list_size) { |
6531 | return EINVAL; |
6532 | } |
6533 | |
6534 | *list_ptr = kalloc_data(*list_size, Z_WAITOK | Z_ZERO); |
6535 | if (!*list_ptr) { |
6536 | return ENOMEM; |
6537 | } |
6538 | |
6539 | *buffer_size = *list_size; |
6540 | *list_size = 0; |
6541 | |
6542 | list_entry = *list_ptr; |
6543 | |
6544 | proc_list_lock(); |
6545 | |
6546 | p = memorystatus_get_first_proc_locked(bucket_index: &i, TRUE); |
6547 | while (p && (*list_size < *buffer_size)) { |
6548 | list_entry->pid = proc_getpid(p); |
6549 | list_entry->priority = p->p_memstat_effectivepriority; |
6550 | list_entry->user_data = p->p_memstat_userdata; |
6551 | |
6552 | if (p->p_memstat_memlimit <= 0) { |
6553 | task_get_phys_footprint_limit(task: proc_task(p), limit_mb: &list_entry->limit); |
6554 | } else { |
6555 | list_entry->limit = p->p_memstat_memlimit; |
6556 | } |
6557 | |
6558 | list_entry->state = memorystatus_build_state(p); |
6559 | list_entry++; |
6560 | |
6561 | *list_size += sizeof(memorystatus_priority_entry_t); |
6562 | |
6563 | p = memorystatus_get_next_proc_locked(bucket_index: &i, p, TRUE); |
6564 | } |
6565 | |
6566 | proc_list_unlock(); |
6567 | |
6568 | memorystatus_log_debug("memorystatus_get_priority_list: returning %lu for size\n" , (unsigned long)*list_size); |
6569 | |
6570 | return 0; |
6571 | } |
6572 | |
6573 | static int |
6574 | memorystatus_get_priority_pid(pid_t pid, user_addr_t buffer, size_t buffer_size) |
6575 | { |
6576 | int error = 0; |
6577 | memorystatus_priority_entry_t mp_entry; |
6578 | kern_return_t ret; |
6579 | |
6580 | /* Validate inputs */ |
6581 | if ((pid == 0) || (buffer == USER_ADDR_NULL) || (buffer_size != sizeof(memorystatus_priority_entry_t))) { |
6582 | return EINVAL; |
6583 | } |
6584 | |
6585 | proc_t p = proc_find(pid); |
6586 | if (!p) { |
6587 | return ESRCH; |
6588 | } |
6589 | |
6590 | memset(s: &mp_entry, c: 0, n: sizeof(memorystatus_priority_entry_t)); |
6591 | |
6592 | mp_entry.pid = proc_getpid(p); |
6593 | mp_entry.priority = p->p_memstat_effectivepriority; |
6594 | mp_entry.user_data = p->p_memstat_userdata; |
6595 | if (p->p_memstat_memlimit <= 0) { |
6596 | ret = task_get_phys_footprint_limit(task: proc_task(p), limit_mb: &mp_entry.limit); |
6597 | if (ret != KERN_SUCCESS) { |
6598 | proc_rele(p); |
6599 | return EINVAL; |
6600 | } |
6601 | } else { |
6602 | mp_entry.limit = p->p_memstat_memlimit; |
6603 | } |
6604 | mp_entry.state = memorystatus_build_state(p); |
6605 | |
6606 | proc_rele(p); |
6607 | |
6608 | error = copyout(&mp_entry, buffer, buffer_size); |
6609 | |
6610 | return error; |
6611 | } |
6612 | |
6613 | static int |
6614 | memorystatus_cmd_get_priority_list(pid_t pid, user_addr_t buffer, size_t buffer_size, int32_t *retval) |
6615 | { |
6616 | int error = 0; |
6617 | boolean_t size_only; |
6618 | size_t list_size; |
6619 | |
6620 | /* |
6621 | * When a non-zero pid is provided, the 'list' has only one entry. |
6622 | */ |
6623 | |
6624 | size_only = ((buffer == USER_ADDR_NULL) ? TRUE: FALSE); |
6625 | |
6626 | if (pid != 0) { |
6627 | list_size = sizeof(memorystatus_priority_entry_t) * 1; |
6628 | if (!size_only) { |
6629 | error = memorystatus_get_priority_pid(pid, buffer, buffer_size); |
6630 | } |
6631 | } else { |
6632 | memorystatus_priority_entry_t *list = NULL; |
6633 | error = memorystatus_get_priority_list(list_ptr: &list, buffer_size: &buffer_size, list_size: &list_size, size_only); |
6634 | |
6635 | if (error == 0) { |
6636 | if (!size_only) { |
6637 | error = copyout(list, buffer, list_size); |
6638 | } |
6639 | |
6640 | kfree_data(list, buffer_size); |
6641 | } |
6642 | } |
6643 | |
6644 | if (error == 0) { |
6645 | assert(list_size <= INT32_MAX); |
6646 | *retval = (int32_t) list_size; |
6647 | } |
6648 | |
6649 | return error; |
6650 | } |
6651 | |
6652 | static void |
6653 | memorystatus_clear_errors(void) |
6654 | { |
6655 | proc_t p; |
6656 | unsigned int i = 0; |
6657 | |
6658 | KDBG(MEMSTAT_CODE(BSD_MEMSTAT_CLEAR_ERRORS) | DBG_FUNC_START); |
6659 | |
6660 | proc_list_lock(); |
6661 | |
6662 | p = memorystatus_get_first_proc_locked(bucket_index: &i, TRUE); |
6663 | while (p) { |
6664 | if (p->p_memstat_state & P_MEMSTAT_ERROR) { |
6665 | p->p_memstat_state &= ~P_MEMSTAT_ERROR; |
6666 | } |
6667 | p = memorystatus_get_next_proc_locked(bucket_index: &i, p, TRUE); |
6668 | } |
6669 | |
6670 | proc_list_unlock(); |
6671 | |
6672 | KDBG(MEMSTAT_CODE(BSD_MEMSTAT_CLEAR_ERRORS) | DBG_FUNC_END); |
6673 | } |
6674 | |
6675 | #if CONFIG_JETSAM |
6676 | static void |
6677 | memorystatus_update_levels_locked(void) |
6678 | { |
6679 | /* |
6680 | * If there's an entry in the first bucket, we have idle processes. |
6681 | */ |
6682 | memstat_bucket_t *first_bucket = &memstat_bucket[JETSAM_PRIORITY_IDLE]; |
6683 | if (first_bucket->count) { |
6684 | memorystatus_available_pages_critical = memorystatus_available_pages_critical_idle; |
6685 | } else { |
6686 | memorystatus_available_pages_critical = memorystatus_available_pages_critical_base; |
6687 | } |
6688 | |
6689 | if (memorystatus_available_pages_critical > memorystatus_available_pages_pressure) { |
6690 | /* |
6691 | * The critical threshold must never exceed the pressure threshold |
6692 | */ |
6693 | memorystatus_available_pages_critical = memorystatus_available_pages_pressure; |
6694 | } |
6695 | |
6696 | if (memorystatus_jetsam_policy & kPolicyMoreFree) { |
6697 | memorystatus_available_pages_critical += memorystatus_policy_more_free_offset_pages; |
6698 | } |
6699 | } |
6700 | |
6701 | void |
6702 | memorystatus_fast_jetsam_override(boolean_t enable_override) |
6703 | { |
6704 | /* If fast jetsam is not enabled, simply return */ |
6705 | if (!fast_jetsam_enabled) { |
6706 | return; |
6707 | } |
6708 | |
6709 | if (enable_override) { |
6710 | if ((memorystatus_jetsam_policy & kPolicyMoreFree) == kPolicyMoreFree) { |
6711 | return; |
6712 | } |
6713 | proc_list_lock(); |
6714 | memorystatus_jetsam_policy |= kPolicyMoreFree; |
6715 | memorystatus_thread_pool_max(); |
6716 | memorystatus_update_levels_locked(); |
6717 | proc_list_unlock(); |
6718 | } else { |
6719 | if ((memorystatus_jetsam_policy & kPolicyMoreFree) == 0) { |
6720 | return; |
6721 | } |
6722 | proc_list_lock(); |
6723 | memorystatus_jetsam_policy &= ~kPolicyMoreFree; |
6724 | memorystatus_thread_pool_default(); |
6725 | memorystatus_update_levels_locked(); |
6726 | proc_list_unlock(); |
6727 | } |
6728 | } |
6729 | |
6730 | |
6731 | static int |
6732 | sysctl_kern_memorystatus_policy_more_free SYSCTL_HANDLER_ARGS |
6733 | { |
6734 | #pragma unused(arg1, arg2, oidp) |
6735 | int error = 0, more_free = 0; |
6736 | |
6737 | /* |
6738 | * TODO: Enable this privilege check? |
6739 | * |
6740 | * error = priv_check_cred(kauth_cred_get(), PRIV_VM_JETSAM, 0); |
6741 | * if (error) |
6742 | * return (error); |
6743 | */ |
6744 | |
6745 | error = sysctl_handle_int(oidp, &more_free, 0, req); |
6746 | if (error || !req->newptr) { |
6747 | return error; |
6748 | } |
6749 | |
6750 | if (more_free) { |
6751 | memorystatus_fast_jetsam_override(true); |
6752 | } else { |
6753 | memorystatus_fast_jetsam_override(false); |
6754 | } |
6755 | |
6756 | return 0; |
6757 | } |
6758 | SYSCTL_PROC(_kern, OID_AUTO, memorystatus_policy_more_free, CTLTYPE_INT | CTLFLAG_WR | CTLFLAG_LOCKED | CTLFLAG_MASKED, |
6759 | 0, 0, &sysctl_kern_memorystatus_policy_more_free, "I" , "" ); |
6760 | |
6761 | #endif /* CONFIG_JETSAM */ |
6762 | |
6763 | /* |
6764 | * Get the at_boot snapshot |
6765 | */ |
6766 | static int |
6767 | memorystatus_get_at_boot_snapshot(memorystatus_jetsam_snapshot_t **snapshot, size_t *snapshot_size, boolean_t size_only) |
6768 | { |
6769 | size_t input_size = *snapshot_size; |
6770 | |
6771 | /* |
6772 | * The at_boot snapshot has no entry list. |
6773 | */ |
6774 | *snapshot_size = sizeof(memorystatus_jetsam_snapshot_t); |
6775 | |
6776 | if (size_only) { |
6777 | return 0; |
6778 | } |
6779 | |
6780 | /* |
6781 | * Validate the size of the snapshot buffer |
6782 | */ |
6783 | if (input_size < *snapshot_size) { |
6784 | return EINVAL; |
6785 | } |
6786 | |
6787 | /* |
6788 | * Update the notification_time only |
6789 | */ |
6790 | memorystatus_at_boot_snapshot.notification_time = mach_absolute_time(); |
6791 | *snapshot = &memorystatus_at_boot_snapshot; |
6792 | |
6793 | memorystatus_log_debug( |
6794 | "memorystatus_get_at_boot_snapshot: returned inputsize (%ld), snapshot_size(%ld), listcount(%d)\n" , |
6795 | (long)input_size, (long)*snapshot_size, 0); |
6796 | return 0; |
6797 | } |
6798 | |
6799 | #if CONFIG_FREEZE |
6800 | static int |
6801 | memorystatus_get_jetsam_snapshot_freezer(memorystatus_jetsam_snapshot_t **snapshot, size_t *snapshot_size, boolean_t size_only) |
6802 | { |
6803 | size_t input_size = *snapshot_size; |
6804 | |
6805 | if (memorystatus_jetsam_snapshot_freezer->entry_count > 0) { |
6806 | *snapshot_size = sizeof(memorystatus_jetsam_snapshot_t) + (sizeof(memorystatus_jetsam_snapshot_entry_t) * (memorystatus_jetsam_snapshot_freezer->entry_count)); |
6807 | } else { |
6808 | *snapshot_size = 0; |
6809 | } |
6810 | assert(*snapshot_size <= memorystatus_jetsam_snapshot_freezer_size); |
6811 | |
6812 | if (size_only) { |
6813 | return 0; |
6814 | } |
6815 | |
6816 | if (input_size < *snapshot_size) { |
6817 | return EINVAL; |
6818 | } |
6819 | |
6820 | *snapshot = memorystatus_jetsam_snapshot_freezer; |
6821 | |
6822 | memorystatus_log_debug( |
6823 | "memorystatus_get_jetsam_snapshot_freezer: returned inputsize (%ld), snapshot_size(%ld), listcount(%ld)\n" , |
6824 | (long)input_size, (long)*snapshot_size, (long)memorystatus_jetsam_snapshot_freezer->entry_count); |
6825 | |
6826 | return 0; |
6827 | } |
6828 | #endif /* CONFIG_FREEZE */ |
6829 | |
6830 | static int |
6831 | memorystatus_get_on_demand_snapshot(memorystatus_jetsam_snapshot_t **snapshot, size_t *snapshot_size, boolean_t size_only) |
6832 | { |
6833 | size_t input_size = *snapshot_size; |
6834 | uint32_t ods_list_count = memorystatus_list_count; |
6835 | memorystatus_jetsam_snapshot_t *ods = NULL; /* The on_demand snapshot buffer */ |
6836 | |
6837 | *snapshot_size = sizeof(memorystatus_jetsam_snapshot_t) + (sizeof(memorystatus_jetsam_snapshot_entry_t) * (ods_list_count)); |
6838 | |
6839 | if (size_only) { |
6840 | return 0; |
6841 | } |
6842 | |
6843 | /* |
6844 | * Validate the size of the snapshot buffer. |
6845 | * This is inherently racey. May want to revisit |
6846 | * this error condition and trim the output when |
6847 | * it doesn't fit. |
6848 | */ |
6849 | if (input_size < *snapshot_size) { |
6850 | return EINVAL; |
6851 | } |
6852 | |
6853 | /* |
6854 | * Allocate and initialize a snapshot buffer. |
6855 | */ |
6856 | ods = kalloc_data(*snapshot_size, Z_WAITOK | Z_ZERO); |
6857 | if (!ods) { |
6858 | return ENOMEM; |
6859 | } |
6860 | |
6861 | proc_list_lock(); |
6862 | memorystatus_init_jetsam_snapshot_locked(od_snapshot: ods, ods_list_count); |
6863 | proc_list_unlock(); |
6864 | |
6865 | /* |
6866 | * Return the kernel allocated, on_demand buffer. |
6867 | * The caller of this routine will copy the data out |
6868 | * to user space and then free the kernel allocated |
6869 | * buffer. |
6870 | */ |
6871 | *snapshot = ods; |
6872 | |
6873 | memorystatus_log_debug( |
6874 | "memorystatus_get_on_demand_snapshot: returned inputsize (%ld), snapshot_size(%ld), listcount(%ld)\n" , |
6875 | (long)input_size, (long)*snapshot_size, (long)ods_list_count); |
6876 | |
6877 | return 0; |
6878 | } |
6879 | |
6880 | static int |
6881 | memorystatus_get_jetsam_snapshot(memorystatus_jetsam_snapshot_t **snapshot, size_t *snapshot_size, boolean_t size_only) |
6882 | { |
6883 | size_t input_size = *snapshot_size; |
6884 | |
6885 | if (memorystatus_jetsam_snapshot_count > 0) { |
6886 | *snapshot_size = sizeof(memorystatus_jetsam_snapshot_t) + (sizeof(memorystatus_jetsam_snapshot_entry_t) * (memorystatus_jetsam_snapshot_count)); |
6887 | } else { |
6888 | *snapshot_size = 0; |
6889 | } |
6890 | |
6891 | if (size_only) { |
6892 | return 0; |
6893 | } |
6894 | |
6895 | if (input_size < *snapshot_size) { |
6896 | return EINVAL; |
6897 | } |
6898 | |
6899 | *snapshot = memorystatus_jetsam_snapshot; |
6900 | |
6901 | memorystatus_log_debug( |
6902 | "memorystatus_get_jetsam_snapshot: returned inputsize (%ld), snapshot_size(%ld), listcount(%ld)\n" , |
6903 | (long)input_size, (long)*snapshot_size, (long)memorystatus_jetsam_snapshot_count); |
6904 | |
6905 | return 0; |
6906 | } |
6907 | |
6908 | |
6909 | static int |
6910 | memorystatus_cmd_get_jetsam_snapshot(int32_t flags, user_addr_t buffer, size_t buffer_size, int32_t *retval) |
6911 | { |
6912 | int error = EINVAL; |
6913 | boolean_t size_only; |
6914 | boolean_t is_default_snapshot = FALSE; |
6915 | boolean_t is_on_demand_snapshot = FALSE; |
6916 | boolean_t is_at_boot_snapshot = FALSE; |
6917 | #if CONFIG_FREEZE |
6918 | bool is_freezer_snapshot = false; |
6919 | #endif /* CONFIG_FREEZE */ |
6920 | memorystatus_jetsam_snapshot_t *snapshot; |
6921 | |
6922 | size_only = ((buffer == USER_ADDR_NULL) ? TRUE : FALSE); |
6923 | |
6924 | if (flags == 0) { |
6925 | /* Default */ |
6926 | is_default_snapshot = TRUE; |
6927 | error = memorystatus_get_jetsam_snapshot(snapshot: &snapshot, snapshot_size: &buffer_size, size_only); |
6928 | } else { |
6929 | if (flags & ~(MEMORYSTATUS_SNAPSHOT_ON_DEMAND | MEMORYSTATUS_SNAPSHOT_AT_BOOT | MEMORYSTATUS_FLAGS_SNAPSHOT_FREEZER)) { |
6930 | /* |
6931 | * Unsupported bit set in flag. |
6932 | */ |
6933 | return EINVAL; |
6934 | } |
6935 | |
6936 | if (flags & (flags - 0x1)) { |
6937 | /* |
6938 | * Can't have multiple flags set at the same time. |
6939 | */ |
6940 | return EINVAL; |
6941 | } |
6942 | |
6943 | if (flags & MEMORYSTATUS_SNAPSHOT_ON_DEMAND) { |
6944 | is_on_demand_snapshot = TRUE; |
6945 | /* |
6946 | * When not requesting the size only, the following call will allocate |
6947 | * an on_demand snapshot buffer, which is freed below. |
6948 | */ |
6949 | error = memorystatus_get_on_demand_snapshot(snapshot: &snapshot, snapshot_size: &buffer_size, size_only); |
6950 | } else if (flags & MEMORYSTATUS_SNAPSHOT_AT_BOOT) { |
6951 | is_at_boot_snapshot = TRUE; |
6952 | error = memorystatus_get_at_boot_snapshot(snapshot: &snapshot, snapshot_size: &buffer_size, size_only); |
6953 | #if CONFIG_FREEZE |
6954 | } else if (flags & MEMORYSTATUS_FLAGS_SNAPSHOT_FREEZER) { |
6955 | is_freezer_snapshot = true; |
6956 | error = memorystatus_get_jetsam_snapshot_freezer(&snapshot, &buffer_size, size_only); |
6957 | #endif /* CONFIG_FREEZE */ |
6958 | } else { |
6959 | /* |
6960 | * Invalid flag setting. |
6961 | */ |
6962 | return EINVAL; |
6963 | } |
6964 | } |
6965 | |
6966 | if (error) { |
6967 | goto out; |
6968 | } |
6969 | |
6970 | /* |
6971 | * Copy the data out to user space and clear the snapshot buffer. |
6972 | * If working with the jetsam snapshot, |
6973 | * clearing the buffer means, reset the count. |
6974 | * If working with an on_demand snapshot |
6975 | * clearing the buffer means, free it. |
6976 | * If working with the at_boot snapshot |
6977 | * there is nothing to clear or update. |
6978 | * If working with a copy of the snapshot |
6979 | * there is nothing to clear or update. |
6980 | * If working with the freezer snapshot |
6981 | * clearing the buffer means, reset the count. |
6982 | */ |
6983 | if (!size_only) { |
6984 | if ((error = copyout(snapshot, buffer, buffer_size)) == 0) { |
6985 | #if CONFIG_FREEZE |
6986 | if (is_default_snapshot || is_freezer_snapshot) { |
6987 | #else |
6988 | if (is_default_snapshot) { |
6989 | #endif /* CONFIG_FREEZE */ |
6990 | /* |
6991 | * The jetsam snapshot is never freed, its count is simply reset. |
6992 | * However, we make a copy for any parties that might be interested |
6993 | * in the previous fully populated snapshot. |
6994 | */ |
6995 | proc_list_lock(); |
6996 | #if DEVELOPMENT || DEBUG |
6997 | if (memorystatus_testing_pid != 0 && memorystatus_testing_pid != proc_getpid(current_proc())) { |
6998 | /* Snapshot is currently owned by someone else. Don't consume it. */ |
6999 | proc_list_unlock(); |
7000 | goto out; |
7001 | } |
7002 | #endif /* (DEVELOPMENT || DEBUG)*/ |
7003 | if (is_default_snapshot) { |
7004 | snapshot->entry_count = memorystatus_jetsam_snapshot_count = 0; |
7005 | memorystatus_jetsam_snapshot_last_timestamp = 0; |
7006 | } |
7007 | #if CONFIG_FREEZE |
7008 | else if (is_freezer_snapshot) { |
7009 | memorystatus_jetsam_snapshot_freezer->entry_count = 0; |
7010 | } |
7011 | #endif /* CONFIG_FREEZE */ |
7012 | proc_list_unlock(); |
7013 | } |
7014 | } |
7015 | |
7016 | if (is_on_demand_snapshot) { |
7017 | /* |
7018 | * The on_demand snapshot is always freed, |
7019 | * even if the copyout failed. |
7020 | */ |
7021 | kfree_data(snapshot, buffer_size); |
7022 | } |
7023 | } |
7024 | |
7025 | out: |
7026 | if (error == 0) { |
7027 | assert(buffer_size <= INT32_MAX); |
7028 | *retval = (int32_t) buffer_size; |
7029 | } |
7030 | return error; |
7031 | } |
7032 | |
7033 | #if DEVELOPMENT || DEBUG |
7034 | static int |
7035 | memorystatus_cmd_set_testing_pid(int32_t flags) |
7036 | { |
7037 | int error = EINVAL; |
7038 | proc_t caller = current_proc(); |
7039 | assert(caller != kernproc); |
7040 | proc_list_lock(); |
7041 | if (flags & MEMORYSTATUS_FLAGS_SET_TESTING_PID) { |
7042 | if (memorystatus_testing_pid == 0) { |
7043 | memorystatus_testing_pid = proc_getpid(caller); |
7044 | error = 0; |
7045 | } else if (memorystatus_testing_pid == proc_getpid(caller)) { |
7046 | error = 0; |
7047 | } else { |
7048 | /* We don't allow ownership to be taken from another proc. */ |
7049 | error = EBUSY; |
7050 | } |
7051 | } else if (flags & MEMORYSTATUS_FLAGS_UNSET_TESTING_PID) { |
7052 | if (memorystatus_testing_pid == proc_getpid(caller)) { |
7053 | memorystatus_testing_pid = 0; |
7054 | error = 0; |
7055 | } else if (memorystatus_testing_pid != 0) { |
7056 | /* We don't allow ownership to be taken from another proc. */ |
7057 | error = EPERM; |
7058 | } |
7059 | } |
7060 | proc_list_unlock(); |
7061 | |
7062 | return error; |
7063 | } |
7064 | #endif /* DEVELOPMENT || DEBUG */ |
7065 | |
7066 | /* |
7067 | * Routine: memorystatus_cmd_grp_set_priorities |
7068 | * Purpose: Update priorities for a group of processes. |
7069 | * |
7070 | * [priority] |
7071 | * Move each process out of its effective priority |
7072 | * band and into a new priority band. |
7073 | * Maintains relative order from lowest to highest priority. |
7074 | * In single band, maintains relative order from head to tail. |
7075 | * |
7076 | * eg: before [effectivepriority | pid] |
7077 | * [18 | p101 ] |
7078 | * [17 | p55, p67, p19 ] |
7079 | * [12 | p103 p10 ] |
7080 | * [ 7 | p25 ] |
7081 | * [ 0 | p71, p82, ] |
7082 | * |
7083 | * after [ new band | pid] |
7084 | * [ xxx | p71, p82, p25, p103, p10, p55, p67, p19, p101] |
7085 | * |
7086 | * Returns: 0 on success, else non-zero. |
7087 | * |
7088 | * Caveat: We know there is a race window regarding recycled pids. |
7089 | * A process could be killed before the kernel can act on it here. |
7090 | * If a pid cannot be found in any of the jetsam priority bands, |
7091 | * then we simply ignore it. No harm. |
7092 | * But, if the pid has been recycled then it could be an issue. |
7093 | * In that scenario, we might move an unsuspecting process to the new |
7094 | * priority band. It's not clear how the kernel can safeguard |
7095 | * against this, but it would be an extremely rare case anyway. |
7096 | * The caller of this api might avoid such race conditions by |
7097 | * ensuring that the processes passed in the pid list are suspended. |
7098 | */ |
7099 | |
7100 | |
7101 | static int |
7102 | memorystatus_cmd_grp_set_priorities(user_addr_t buffer, size_t buffer_size) |
7103 | { |
7104 | /* |
7105 | * We only handle setting priority |
7106 | * per process |
7107 | */ |
7108 | |
7109 | int error = 0; |
7110 | memorystatus_properties_entry_v1_t *entries = NULL; |
7111 | size_t entry_count = 0; |
7112 | |
7113 | /* This will be the ordered proc list */ |
7114 | typedef struct memorystatus_internal_properties { |
7115 | proc_t proc; |
7116 | int32_t priority; |
7117 | } memorystatus_internal_properties_t; |
7118 | |
7119 | memorystatus_internal_properties_t *table = NULL; |
7120 | uint32_t table_count = 0; |
7121 | |
7122 | size_t i = 0; |
7123 | uint32_t bucket_index = 0; |
7124 | boolean_t head_insert; |
7125 | int32_t new_priority; |
7126 | |
7127 | proc_t p; |
7128 | |
7129 | /* Verify inputs */ |
7130 | if ((buffer == USER_ADDR_NULL) || (buffer_size == 0)) { |
7131 | error = EINVAL; |
7132 | goto out; |
7133 | } |
7134 | |
7135 | entry_count = (buffer_size / sizeof(memorystatus_properties_entry_v1_t)); |
7136 | if (entry_count == 0) { |
7137 | /* buffer size was not large enough for a single entry */ |
7138 | error = EINVAL; |
7139 | goto out; |
7140 | } |
7141 | |
7142 | if ((entries = kalloc_data(buffer_size, Z_WAITOK)) == NULL) { |
7143 | error = ENOMEM; |
7144 | goto out; |
7145 | } |
7146 | |
7147 | KDBG(MEMSTAT_CODE(BSD_MEMSTAT_GRP_SET_PROP) | DBG_FUNC_START, MEMORYSTATUS_FLAGS_GRP_SET_PRIORITY, entry_count); |
7148 | |
7149 | if ((error = copyin(buffer, entries, buffer_size)) != 0) { |
7150 | goto out; |
7151 | } |
7152 | |
7153 | /* Verify sanity of input priorities */ |
7154 | if (entries[0].version == MEMORYSTATUS_MPE_VERSION_1) { |
7155 | if ((buffer_size % MEMORYSTATUS_MPE_VERSION_1_SIZE) != 0) { |
7156 | error = EINVAL; |
7157 | goto out; |
7158 | } |
7159 | } else { |
7160 | error = EINVAL; |
7161 | goto out; |
7162 | } |
7163 | |
7164 | for (i = 0; i < entry_count; i++) { |
7165 | if (entries[i].priority == -1) { |
7166 | /* Use as shorthand for default priority */ |
7167 | entries[i].priority = JETSAM_PRIORITY_DEFAULT; |
7168 | } else if (entries[i].priority > JETSAM_PRIORITY_IDLE && entries[i].priority <= applications_aging_band) { |
7169 | /* |
7170 | * Everything between idle and the aging bands are reserved for internal use. |
7171 | * if requested, adjust to JETSAM_PRIORITY_IDLE. |
7172 | * Entitled processes (just munch) can use a subset of this range for testing. |
7173 | */ |
7174 | if (entries[i].priority > JETSAM_PRIORITY_ENTITLED_MAX || |
7175 | !current_task_can_use_entitled_range()) { |
7176 | entries[i].priority = JETSAM_PRIORITY_IDLE; |
7177 | } |
7178 | } else if (entries[i].priority == JETSAM_PRIORITY_IDLE_HEAD) { |
7179 | /* JETSAM_PRIORITY_IDLE_HEAD inserts at the head of the idle |
7180 | * queue */ |
7181 | /* Deal with this later */ |
7182 | } else if ((entries[i].priority < 0) || (entries[i].priority >= MEMSTAT_BUCKET_COUNT)) { |
7183 | /* Sanity check */ |
7184 | error = EINVAL; |
7185 | goto out; |
7186 | } |
7187 | } |
7188 | |
7189 | table = kalloc_type(memorystatus_internal_properties_t, entry_count, |
7190 | Z_WAITOK | Z_ZERO); |
7191 | if (table == NULL) { |
7192 | error = ENOMEM; |
7193 | goto out; |
7194 | } |
7195 | |
7196 | |
7197 | /* |
7198 | * For each jetsam bucket entry, spin through the input property list. |
7199 | * When a matching pid is found, populate an adjacent table with the |
7200 | * appropriate proc pointer and new property values. |
7201 | * This traversal automatically preserves order from lowest |
7202 | * to highest priority. |
7203 | */ |
7204 | |
7205 | bucket_index = 0; |
7206 | |
7207 | proc_list_lock(); |
7208 | |
7209 | /* Create the ordered table */ |
7210 | p = memorystatus_get_first_proc_locked(bucket_index: &bucket_index, TRUE); |
7211 | while (p && (table_count < entry_count)) { |
7212 | for (i = 0; i < entry_count; i++) { |
7213 | if (proc_getpid(p) == entries[i].pid) { |
7214 | /* Build the table data */ |
7215 | table[table_count].proc = p; |
7216 | table[table_count].priority = entries[i].priority; |
7217 | table_count++; |
7218 | break; |
7219 | } |
7220 | } |
7221 | p = memorystatus_get_next_proc_locked(bucket_index: &bucket_index, p, TRUE); |
7222 | } |
7223 | |
7224 | /* We now have ordered list of procs ready to move */ |
7225 | for (i = 0; i < table_count; i++) { |
7226 | p = table[i].proc; |
7227 | assert(p != NULL); |
7228 | |
7229 | /* Allow head inserts -- but relative order is now */ |
7230 | if (table[i].priority == JETSAM_PRIORITY_IDLE_HEAD) { |
7231 | new_priority = JETSAM_PRIORITY_IDLE; |
7232 | head_insert = true; |
7233 | } else { |
7234 | new_priority = table[i].priority; |
7235 | head_insert = false; |
7236 | } |
7237 | |
7238 | /* Not allowed */ |
7239 | if (p->p_memstat_state & P_MEMSTAT_INTERNAL) { |
7240 | continue; |
7241 | } |
7242 | |
7243 | /* |
7244 | * Take appropriate steps if moving proc out of |
7245 | * either of the aging bands. |
7246 | */ |
7247 | if ((p->p_memstat_effectivepriority == system_procs_aging_band) || (p->p_memstat_effectivepriority == applications_aging_band)) { |
7248 | memorystatus_invalidate_idle_demotion_locked(p, TRUE); |
7249 | } |
7250 | |
7251 | memorystatus_update_priority_locked(p, priority: new_priority, head_insert, false); |
7252 | } |
7253 | |
7254 | proc_list_unlock(); |
7255 | |
7256 | /* |
7257 | * if (table_count != entry_count) |
7258 | * then some pids were not found in a jetsam band. |
7259 | * harmless but interesting... |
7260 | */ |
7261 | out: |
7262 | KDBG(MEMSTAT_CODE(BSD_MEMSTAT_GRP_SET_PROP) | DBG_FUNC_END, MEMORYSTATUS_FLAGS_GRP_SET_PRIORITY, entry_count, table_count); |
7263 | |
7264 | kfree_data(entries, buffer_size); |
7265 | kfree_type(memorystatus_internal_properties_t, entry_count, table); |
7266 | |
7267 | return error; |
7268 | } |
7269 | |
7270 | memorystatus_internal_probabilities_t *memorystatus_global_probabilities_table = NULL; |
7271 | size_t memorystatus_global_probabilities_size = 0; |
7272 | |
7273 | static int |
7274 | memorystatus_cmd_grp_set_probabilities(user_addr_t buffer, size_t buffer_size) |
7275 | { |
7276 | int error = 0; |
7277 | memorystatus_properties_entry_v1_t *entries = NULL; |
7278 | size_t entry_count = 0, i = 0; |
7279 | memorystatus_internal_probabilities_t *tmp_table_new = NULL, *tmp_table_old = NULL; |
7280 | size_t tmp_table_new_size = 0, tmp_table_old_size = 0; |
7281 | #if DEVELOPMENT || DEBUG |
7282 | if (memorystatus_testing_pid != 0 && memorystatus_testing_pid != proc_getpid(current_proc())) { |
7283 | /* probabilites are currently owned by someone else. Don't change them. */ |
7284 | error = EPERM; |
7285 | goto out; |
7286 | } |
7287 | #endif /* (DEVELOPMENT || DEBUG)*/ |
7288 | |
7289 | /* Verify inputs */ |
7290 | if ((buffer == USER_ADDR_NULL) || (buffer_size == 0)) { |
7291 | error = EINVAL; |
7292 | goto out; |
7293 | } |
7294 | |
7295 | entry_count = (buffer_size / sizeof(memorystatus_properties_entry_v1_t)); |
7296 | if (entry_count == 0) { |
7297 | error = EINVAL; |
7298 | goto out; |
7299 | } |
7300 | |
7301 | if ((entries = kalloc_data(buffer_size, Z_WAITOK)) == NULL) { |
7302 | error = ENOMEM; |
7303 | goto out; |
7304 | } |
7305 | |
7306 | KDBG(MEMSTAT_CODE(BSD_MEMSTAT_GRP_SET_PROP) | DBG_FUNC_START, MEMORYSTATUS_FLAGS_GRP_SET_PROBABILITY, entry_count); |
7307 | |
7308 | if ((error = copyin(buffer, entries, buffer_size)) != 0) { |
7309 | goto out; |
7310 | } |
7311 | |
7312 | if (entries[0].version == MEMORYSTATUS_MPE_VERSION_1) { |
7313 | if ((buffer_size % MEMORYSTATUS_MPE_VERSION_1_SIZE) != 0) { |
7314 | error = EINVAL; |
7315 | goto out; |
7316 | } |
7317 | } else { |
7318 | error = EINVAL; |
7319 | goto out; |
7320 | } |
7321 | |
7322 | /* Verify sanity of input priorities */ |
7323 | for (i = 0; i < entry_count; i++) { |
7324 | /* |
7325 | * 0 - low probability of use. |
7326 | * 1 - high probability of use. |
7327 | * |
7328 | * Keeping this field an int (& not a bool) to allow |
7329 | * us to experiment with different values/approaches |
7330 | * later on. |
7331 | */ |
7332 | if (entries[i].use_probability > 1) { |
7333 | error = EINVAL; |
7334 | goto out; |
7335 | } |
7336 | } |
7337 | |
7338 | tmp_table_new_size = sizeof(memorystatus_internal_probabilities_t) * entry_count; |
7339 | |
7340 | if ((tmp_table_new = kalloc_data(tmp_table_new_size, Z_WAITOK | Z_ZERO)) == NULL) { |
7341 | error = ENOMEM; |
7342 | goto out; |
7343 | } |
7344 | |
7345 | proc_list_lock(); |
7346 | |
7347 | if (memorystatus_global_probabilities_table) { |
7348 | tmp_table_old = memorystatus_global_probabilities_table; |
7349 | tmp_table_old_size = memorystatus_global_probabilities_size; |
7350 | } |
7351 | |
7352 | memorystatus_global_probabilities_table = tmp_table_new; |
7353 | memorystatus_global_probabilities_size = tmp_table_new_size; |
7354 | tmp_table_new = NULL; |
7355 | |
7356 | for (i = 0; i < entry_count; i++) { |
7357 | /* Build the table data */ |
7358 | strlcpy(dst: memorystatus_global_probabilities_table[i].proc_name, src: entries[i].proc_name, MAXCOMLEN + 1); |
7359 | memorystatus_global_probabilities_table[i].use_probability = entries[i].use_probability; |
7360 | } |
7361 | |
7362 | proc_list_unlock(); |
7363 | |
7364 | out: |
7365 | KDBG(MEMSTAT_CODE(BSD_MEMSTAT_GRP_SET_PROP) | DBG_FUNC_END, MEMORYSTATUS_FLAGS_GRP_SET_PROBABILITY, entry_count, tmp_table_new_size); |
7366 | |
7367 | kfree_data(entries, buffer_size); |
7368 | kfree_data(tmp_table_old, tmp_table_old_size); |
7369 | |
7370 | return error; |
7371 | } |
7372 | |
7373 | static int |
7374 | memorystatus_cmd_grp_set_properties(int32_t flags, user_addr_t buffer, size_t buffer_size, __unused int32_t *retval) |
7375 | { |
7376 | int error = 0; |
7377 | |
7378 | if ((flags & MEMORYSTATUS_FLAGS_GRP_SET_PRIORITY) == MEMORYSTATUS_FLAGS_GRP_SET_PRIORITY) { |
7379 | error = memorystatus_cmd_grp_set_priorities(buffer, buffer_size); |
7380 | } else if ((flags & MEMORYSTATUS_FLAGS_GRP_SET_PROBABILITY) == MEMORYSTATUS_FLAGS_GRP_SET_PROBABILITY) { |
7381 | error = memorystatus_cmd_grp_set_probabilities(buffer, buffer_size); |
7382 | #if CONFIG_FREEZE |
7383 | } else if ((flags & MEMORYSTATUS_FLAGS_GRP_SET_FREEZE_PRIORITY) == MEMORYSTATUS_FLAGS_GRP_SET_FREEZE_PRIORITY) { |
7384 | error = memorystatus_cmd_grp_set_freeze_list(buffer, buffer_size); |
7385 | } else if ((flags & MEMORYSTATUS_FLAGS_GRP_SET_DEMOTE_PRIORITY) == MEMORYSTATUS_FLAGS_GRP_SET_DEMOTE_PRIORITY) { |
7386 | error = memorystatus_cmd_grp_set_demote_list(buffer, buffer_size); |
7387 | #endif /* CONFIG_FREEZE */ |
7388 | } else { |
7389 | error = EINVAL; |
7390 | } |
7391 | |
7392 | return error; |
7393 | } |
7394 | |
7395 | /* |
7396 | * This routine is used to update a process's jetsam priority position and stored user_data. |
7397 | * It is not used for the setting of memory limits, which is why the last 6 args to the |
7398 | * memorystatus_update() call are 0 or FALSE. |
7399 | * |
7400 | * Flags passed into this call are used to distinguish the motivation behind a jetsam priority |
7401 | * transition. By default, the kernel updates the process's original requested priority when |
7402 | * no flag is passed. But when the MEMORYSTATUS_SET_PRIORITY_ASSERTION flag is used, the kernel |
7403 | * updates the process's assertion driven priority. |
7404 | * |
7405 | * The assertion flag was introduced for use by the device's assertion mediator (eg: runningboardd). |
7406 | * When an assertion is controlling a process's jetsam priority, it may conflict with that process's |
7407 | * dirty/clean (active/inactive) jetsam state. The kernel attempts to resolve a priority transition |
7408 | * conflict by reviewing the process state and then choosing the maximum jetsam band at play, |
7409 | * eg: requested priority versus assertion priority. |
7410 | */ |
7411 | |
7412 | static int |
7413 | memorystatus_cmd_set_priority_properties(pid_t pid, uint32_t flags, user_addr_t buffer, size_t buffer_size, __unused int32_t *retval) |
7414 | { |
7415 | int error = 0; |
7416 | boolean_t is_assertion = FALSE; /* priority is driven by an assertion */ |
7417 | memorystatus_priority_properties_t mpp_entry; |
7418 | |
7419 | /* Validate inputs */ |
7420 | if ((pid == 0) || (buffer == USER_ADDR_NULL) || (buffer_size != sizeof(memorystatus_priority_properties_t))) { |
7421 | return EINVAL; |
7422 | } |
7423 | |
7424 | /* Validate flags */ |
7425 | if (flags == 0) { |
7426 | /* |
7427 | * Default. This path updates requestedpriority. |
7428 | */ |
7429 | } else { |
7430 | if (flags & ~(MEMORYSTATUS_SET_PRIORITY_ASSERTION)) { |
7431 | /* |
7432 | * Unsupported bit set in flag. |
7433 | */ |
7434 | return EINVAL; |
7435 | } else if (flags & MEMORYSTATUS_SET_PRIORITY_ASSERTION) { |
7436 | is_assertion = TRUE; |
7437 | } |
7438 | } |
7439 | |
7440 | error = copyin(buffer, &mpp_entry, buffer_size); |
7441 | |
7442 | if (error == 0) { |
7443 | proc_t p; |
7444 | |
7445 | p = proc_find(pid); |
7446 | if (!p) { |
7447 | return ESRCH; |
7448 | } |
7449 | |
7450 | if (p->p_memstat_state & P_MEMSTAT_INTERNAL) { |
7451 | proc_rele(p); |
7452 | return EPERM; |
7453 | } |
7454 | |
7455 | if (is_assertion) { |
7456 | memorystatus_log_debug("memorystatus: set assertion priority(%d) target %s:%d\n" , |
7457 | mpp_entry.priority, (*p->p_name ? p->p_name : "unknown" ), proc_getpid(p)); |
7458 | } |
7459 | |
7460 | error = memorystatus_update(p, priority: mpp_entry.priority, user_data: mpp_entry.user_data, is_assertion, FALSE, FALSE, memlimit_active: 0, memlimit_active_is_fatal: 0, FALSE, FALSE); |
7461 | proc_rele(p); |
7462 | } |
7463 | |
7464 | return error; |
7465 | } |
7466 | |
7467 | static int |
7468 | memorystatus_cmd_set_memlimit_properties(pid_t pid, user_addr_t buffer, size_t buffer_size, __unused int32_t *retval) |
7469 | { |
7470 | int error = 0; |
7471 | memorystatus_memlimit_properties_t mmp_entry; |
7472 | |
7473 | /* Validate inputs */ |
7474 | if ((pid == 0) || (buffer == USER_ADDR_NULL) || (buffer_size != sizeof(memorystatus_memlimit_properties_t))) { |
7475 | return EINVAL; |
7476 | } |
7477 | |
7478 | error = copyin(buffer, &mmp_entry, buffer_size); |
7479 | |
7480 | if (error == 0) { |
7481 | error = memorystatus_set_memlimit_properties(pid, entry: &mmp_entry); |
7482 | } |
7483 | |
7484 | return error; |
7485 | } |
7486 | |
7487 | #if DEBUG || DEVELOPMENT |
7488 | static int |
7489 | memorystatus_cmd_set_diag_memlimit_properties(pid_t pid, user_addr_t buffer, size_t buffer_size, __unused int32_t *retval) |
7490 | { |
7491 | int error = 0; |
7492 | memorystatus_diag_memlimit_properties_t mmp_entry; |
7493 | proc_t p = proc_find(pid); |
7494 | if (!p) { |
7495 | return ESRCH; |
7496 | } |
7497 | |
7498 | /* Validate inputs */ |
7499 | if ((pid == 0) || (buffer == USER_ADDR_NULL) || (buffer_size != sizeof(memorystatus_diag_memlimit_properties_t))) { |
7500 | proc_rele(p); |
7501 | return EINVAL; |
7502 | } |
7503 | |
7504 | error = copyin(buffer, &mmp_entry, buffer_size); |
7505 | |
7506 | if (error == 0) { |
7507 | proc_list_lock(); |
7508 | error = memorystatus_set_diag_memlimit_properties_internal(p, &mmp_entry); |
7509 | proc_list_unlock(); |
7510 | } |
7511 | proc_rele(p); |
7512 | return error; |
7513 | } |
7514 | |
7515 | static int |
7516 | memorystatus_cmd_get_diag_memlimit_properties(pid_t pid, user_addr_t buffer, size_t buffer_size, __unused int32_t *retval) |
7517 | { |
7518 | int error = 0; |
7519 | memorystatus_diag_memlimit_properties_t mmp_entry; |
7520 | proc_t p = proc_find(pid); |
7521 | if (!p) { |
7522 | return ESRCH; |
7523 | } |
7524 | |
7525 | /* Validate inputs */ |
7526 | if ((pid == 0) || (buffer == USER_ADDR_NULL) || (buffer_size != sizeof(memorystatus_diag_memlimit_properties_t))) { |
7527 | proc_rele(p); |
7528 | return EINVAL; |
7529 | } |
7530 | proc_list_lock(); |
7531 | error = memorystatus_get_diag_memlimit_properties_internal(p, &mmp_entry); |
7532 | proc_list_unlock(); |
7533 | proc_rele(p); |
7534 | if (error == 0) { |
7535 | error = copyout(&mmp_entry, buffer, buffer_size); |
7536 | } |
7537 | |
7538 | |
7539 | return error; |
7540 | } |
7541 | #endif //DEBUG || DEVELOPMENT |
7542 | |
7543 | static void |
7544 | memorystatus_get_memlimit_properties_internal(proc_t p, memorystatus_memlimit_properties_t* p_entry) |
7545 | { |
7546 | memset(s: p_entry, c: 0, n: sizeof(memorystatus_memlimit_properties_t)); |
7547 | |
7548 | if (p->p_memstat_memlimit_active > 0) { |
7549 | p_entry->memlimit_active = p->p_memstat_memlimit_active; |
7550 | } else { |
7551 | task_convert_phys_footprint_limit(-1, &p_entry->memlimit_active); |
7552 | } |
7553 | |
7554 | if (p->p_memstat_state & P_MEMSTAT_MEMLIMIT_ACTIVE_FATAL) { |
7555 | p_entry->memlimit_active_attr |= MEMORYSTATUS_MEMLIMIT_ATTR_FATAL; |
7556 | } |
7557 | |
7558 | /* |
7559 | * Get the inactive limit and attributes |
7560 | */ |
7561 | if (p->p_memstat_memlimit_inactive <= 0) { |
7562 | task_convert_phys_footprint_limit(-1, &p_entry->memlimit_inactive); |
7563 | } else { |
7564 | p_entry->memlimit_inactive = p->p_memstat_memlimit_inactive; |
7565 | } |
7566 | if (p->p_memstat_state & P_MEMSTAT_MEMLIMIT_INACTIVE_FATAL) { |
7567 | p_entry->memlimit_inactive_attr |= MEMORYSTATUS_MEMLIMIT_ATTR_FATAL; |
7568 | } |
7569 | } |
7570 | |
7571 | /* |
7572 | * When getting the memlimit settings, we can't simply call task_get_phys_footprint_limit(). |
7573 | * That gets the proc's cached memlimit and there is no guarantee that the active/inactive |
7574 | * limits will be the same in the no-limit case. Instead we convert limits <= 0 using |
7575 | * task_convert_phys_footprint_limit(). It computes the same limit value that would be written |
7576 | * to the task's ledgers via task_set_phys_footprint_limit(). |
7577 | */ |
7578 | static int |
7579 | memorystatus_cmd_get_memlimit_properties(pid_t pid, user_addr_t buffer, size_t buffer_size, __unused int32_t *retval) |
7580 | { |
7581 | memorystatus_memlimit_properties2_t mmp_entry; |
7582 | |
7583 | /* Validate inputs */ |
7584 | if ((pid == 0) || (buffer == USER_ADDR_NULL) || |
7585 | ((buffer_size != sizeof(memorystatus_memlimit_properties_t)) && |
7586 | (buffer_size != sizeof(memorystatus_memlimit_properties2_t)))) { |
7587 | return EINVAL; |
7588 | } |
7589 | |
7590 | memset(s: &mmp_entry, c: 0, n: sizeof(memorystatus_memlimit_properties2_t)); |
7591 | |
7592 | proc_t p = proc_find(pid); |
7593 | if (!p) { |
7594 | return ESRCH; |
7595 | } |
7596 | |
7597 | /* |
7598 | * Get the active limit and attributes. |
7599 | * No locks taken since we hold a reference to the proc. |
7600 | */ |
7601 | |
7602 | memorystatus_get_memlimit_properties_internal(p, p_entry: &mmp_entry.v1); |
7603 | |
7604 | #if CONFIG_JETSAM |
7605 | #if DEVELOPMENT || DEBUG |
7606 | /* |
7607 | * Get the limit increased via SPI |
7608 | */ |
7609 | mmp_entry.memlimit_increase = roundToNearestMB(p->p_memlimit_increase); |
7610 | mmp_entry.memlimit_increase_bytes = p->p_memlimit_increase; |
7611 | #endif /* DEVELOPMENT || DEBUG */ |
7612 | #endif /* CONFIG_JETSAM */ |
7613 | |
7614 | proc_rele(p); |
7615 | |
7616 | int error = copyout(&mmp_entry, buffer, buffer_size); |
7617 | |
7618 | return error; |
7619 | } |
7620 | |
7621 | |
7622 | /* |
7623 | * SPI for kbd - pr24956468 |
7624 | * This is a very simple snapshot that calculates how much a |
7625 | * process's phys_footprint exceeds a specific memory limit. |
7626 | * Only the inactive memory limit is supported for now. |
7627 | * The delta is returned as bytes in excess or zero. |
7628 | */ |
7629 | static int |
7630 | memorystatus_cmd_get_memlimit_excess_np(pid_t pid, uint32_t flags, user_addr_t buffer, size_t buffer_size, __unused int32_t *retval) |
7631 | { |
7632 | int error = 0; |
7633 | uint64_t = 0; |
7634 | uint64_t delta_in_bytes = 0; |
7635 | int32_t memlimit_mb = 0; |
7636 | uint64_t memlimit_bytes = 0; |
7637 | |
7638 | /* Validate inputs */ |
7639 | if ((pid == 0) || (buffer == USER_ADDR_NULL) || (buffer_size != sizeof(uint64_t)) || (flags != 0)) { |
7640 | return EINVAL; |
7641 | } |
7642 | |
7643 | proc_t p = proc_find(pid); |
7644 | if (!p) { |
7645 | return ESRCH; |
7646 | } |
7647 | |
7648 | /* |
7649 | * Get the inactive limit. |
7650 | * No locks taken since we hold a reference to the proc. |
7651 | */ |
7652 | |
7653 | if (p->p_memstat_memlimit_inactive <= 0) { |
7654 | task_convert_phys_footprint_limit(-1, &memlimit_mb); |
7655 | } else { |
7656 | memlimit_mb = p->p_memstat_memlimit_inactive; |
7657 | } |
7658 | |
7659 | footprint_in_bytes = get_task_phys_footprint(proc_task(p)); |
7660 | |
7661 | proc_rele(p); |
7662 | |
7663 | memlimit_bytes = memlimit_mb * 1024 * 1024; /* MB to bytes */ |
7664 | |
7665 | /* |
7666 | * Computed delta always returns >= 0 bytes |
7667 | */ |
7668 | if (footprint_in_bytes > memlimit_bytes) { |
7669 | delta_in_bytes = footprint_in_bytes - memlimit_bytes; |
7670 | } |
7671 | |
7672 | error = copyout(&delta_in_bytes, buffer, sizeof(delta_in_bytes)); |
7673 | |
7674 | return error; |
7675 | } |
7676 | |
7677 | |
7678 | static int |
7679 | memorystatus_cmd_get_pressure_status(int32_t *retval) |
7680 | { |
7681 | int error; |
7682 | |
7683 | /* Need privilege for check */ |
7684 | error = priv_check_cred(cred: kauth_cred_get(), PRIV_VM_PRESSURE, flags: 0); |
7685 | if (error) { |
7686 | return error; |
7687 | } |
7688 | |
7689 | /* Inherently racy, so it's not worth taking a lock here */ |
7690 | *retval = (kVMPressureNormal != memorystatus_vm_pressure_level) ? 1 : 0; |
7691 | |
7692 | return error; |
7693 | } |
7694 | |
7695 | int |
7696 | memorystatus_get_pressure_status_kdp() |
7697 | { |
7698 | return (kVMPressureNormal != memorystatus_vm_pressure_level) ? 1 : 0; |
7699 | } |
7700 | |
7701 | /* |
7702 | * Every process, including a P_MEMSTAT_INTERNAL process (currently only pid 1), is allowed to set a HWM. |
7703 | * |
7704 | * This call is inflexible -- it does not distinguish between active/inactive, fatal/non-fatal |
7705 | * So, with 2-level HWM preserving previous behavior will map as follows. |
7706 | * - treat the limit passed in as both an active and inactive limit. |
7707 | * - treat the is_fatal_limit flag as though it applies to both active and inactive limits. |
7708 | * |
7709 | * When invoked via MEMORYSTATUS_CMD_SET_JETSAM_HIGH_WATER_MARK |
7710 | * - the is_fatal_limit is FALSE, meaning the active and inactive limits are non-fatal/soft |
7711 | * - so mapping is (active/non-fatal, inactive/non-fatal) |
7712 | * |
7713 | * When invoked via MEMORYSTATUS_CMD_SET_JETSAM_TASK_LIMIT |
7714 | * - the is_fatal_limit is TRUE, meaning the process's active and inactive limits are fatal/hard |
7715 | * - so mapping is (active/fatal, inactive/fatal) |
7716 | */ |
7717 | |
7718 | #if CONFIG_JETSAM |
7719 | static int |
7720 | memorystatus_cmd_set_jetsam_memory_limit(pid_t pid, int32_t high_water_mark, __unused int32_t *retval, boolean_t is_fatal_limit) |
7721 | { |
7722 | int error = 0; |
7723 | memorystatus_memlimit_properties_t entry; |
7724 | |
7725 | entry.memlimit_active = high_water_mark; |
7726 | entry.memlimit_active_attr = 0; |
7727 | entry.memlimit_inactive = high_water_mark; |
7728 | entry.memlimit_inactive_attr = 0; |
7729 | |
7730 | if (is_fatal_limit == TRUE) { |
7731 | entry.memlimit_active_attr |= MEMORYSTATUS_MEMLIMIT_ATTR_FATAL; |
7732 | entry.memlimit_inactive_attr |= MEMORYSTATUS_MEMLIMIT_ATTR_FATAL; |
7733 | } |
7734 | |
7735 | error = memorystatus_set_memlimit_properties(pid, &entry); |
7736 | return error; |
7737 | } |
7738 | |
7739 | static int |
7740 | memorystatus_cmd_mark_process_coalition_swappable(pid_t pid, __unused int32_t *retval) |
7741 | { |
7742 | int error = 0; |
7743 | proc_t p = PROC_NULL; |
7744 | coalition_t coal = COALITION_NULL; |
7745 | |
7746 | if (!memorystatus_swap_all_apps) { |
7747 | /* Swap is not supported on this device. */ |
7748 | return ENOTSUP; |
7749 | } |
7750 | p = proc_find(pid); |
7751 | if (!p) { |
7752 | return ESRCH; |
7753 | } |
7754 | coal = task_get_coalition((task_t) proc_task(p), COALITION_TYPE_JETSAM); |
7755 | if (coal && coalition_is_leader((task_t) proc_task(p), coal)) { |
7756 | coalition_mark_swappable(coal); |
7757 | } else { |
7758 | /* This SPI is only supported on coalition leaders. */ |
7759 | error = EINVAL; |
7760 | } |
7761 | |
7762 | proc_rele(p); |
7763 | return error; |
7764 | } |
7765 | |
7766 | static int |
7767 | memorystatus_cmd_get_process_coalition_is_swappable(pid_t pid, int32_t *retval) |
7768 | { |
7769 | int error = 0; |
7770 | proc_t p = PROC_NULL; |
7771 | coalition_t coal = COALITION_NULL; |
7772 | |
7773 | if (!memorystatus_swap_all_apps) { |
7774 | /* Swap is not supported on this device. */ |
7775 | return ENOTSUP; |
7776 | } |
7777 | p = proc_find(pid); |
7778 | if (!p) { |
7779 | return ESRCH; |
7780 | } |
7781 | coal = task_get_coalition((task_t) proc_task(p), COALITION_TYPE_JETSAM); |
7782 | if (coal) { |
7783 | *retval = coalition_is_swappable(coal); |
7784 | } else { |
7785 | error = EINVAL; |
7786 | } |
7787 | |
7788 | proc_rele(p); |
7789 | return error; |
7790 | } |
7791 | |
7792 | static int |
7793 | memorystatus_cmd_convert_memlimit_mb(pid_t pid, int32_t limit, int32_t *retval) |
7794 | { |
7795 | int error = 0; |
7796 | proc_t p; |
7797 | p = proc_find(pid); |
7798 | if (!p) { |
7799 | return ESRCH; |
7800 | } |
7801 | if (limit <= 0) { |
7802 | /* |
7803 | * A limit of <= 0 implies that the task gets its default limit. |
7804 | */ |
7805 | limit = memorystatus_get_default_task_active_limit(p); |
7806 | if (limit <= 0) { |
7807 | /* Task uses system wide default limit */ |
7808 | limit = max_task_footprint_mb ? max_task_footprint_mb : INT32_MAX; |
7809 | } |
7810 | *retval = limit; |
7811 | } else { |
7812 | #if DEVELOPMENT || DEBUG |
7813 | /* add the current increase to it, for roots */ |
7814 | limit += roundToNearestMB(p->p_memlimit_increase); |
7815 | #endif /* DEVELOPMENT || DEBUG */ |
7816 | *retval = limit; |
7817 | } |
7818 | |
7819 | proc_rele(p); |
7820 | return error; |
7821 | } |
7822 | #endif /* CONFIG_JETSAM */ |
7823 | |
7824 | static int |
7825 | memorystatus_set_memlimit_properties_internal(proc_t p, memorystatus_memlimit_properties_t *p_entry) |
7826 | { |
7827 | int error = 0; |
7828 | |
7829 | LCK_MTX_ASSERT(&proc_list_mlock, LCK_MTX_ASSERT_OWNED); |
7830 | |
7831 | /* |
7832 | * Store the active limit variants in the proc. |
7833 | */ |
7834 | SET_ACTIVE_LIMITS_LOCKED(p, p_entry->memlimit_active, p_entry->memlimit_active_attr); |
7835 | |
7836 | /* |
7837 | * Store the inactive limit variants in the proc. |
7838 | */ |
7839 | SET_INACTIVE_LIMITS_LOCKED(p, p_entry->memlimit_inactive, p_entry->memlimit_inactive_attr); |
7840 | |
7841 | /* |
7842 | * Enforce appropriate limit variant by updating the cached values |
7843 | * and writing the ledger. |
7844 | * Limit choice is based on process active/inactive state. |
7845 | */ |
7846 | |
7847 | if (memorystatus_highwater_enabled) { |
7848 | boolean_t is_fatal; |
7849 | boolean_t use_active; |
7850 | |
7851 | if (proc_jetsam_state_is_active_locked(p) == TRUE) { |
7852 | CACHE_ACTIVE_LIMITS_LOCKED(p, is_fatal); |
7853 | use_active = TRUE; |
7854 | } else { |
7855 | CACHE_INACTIVE_LIMITS_LOCKED(p, is_fatal); |
7856 | use_active = FALSE; |
7857 | } |
7858 | |
7859 | /* Enforce the limit by writing to the ledgers */ |
7860 | error = (task_set_phys_footprint_limit_internal(proc_task(p), ((p->p_memstat_memlimit > 0) ? p->p_memstat_memlimit : -1), NULL, use_active, is_fatal) == 0) ? 0 : EINVAL; |
7861 | |
7862 | memorystatus_log_info( |
7863 | "memorystatus_set_memlimit_properties: new limit on pid %d (%dMB %s) current priority (%d) dirty_state?=0x%x %s\n" , |
7864 | proc_getpid(p), (p->p_memstat_memlimit > 0 ? p->p_memstat_memlimit : -1), |
7865 | (p->p_memstat_state & P_MEMSTAT_FATAL_MEMLIMIT ? "F " : "NF" ), p->p_memstat_effectivepriority, p->p_memstat_dirty, |
7866 | (p->p_memstat_dirty ? ((p->p_memstat_dirty & P_DIRTY) ? "isdirty" : "isclean" ) : "" )); |
7867 | DTRACE_MEMORYSTATUS2(memorystatus_set_memlimit, proc_t, p, int32_t, (p->p_memstat_memlimit > 0 ? p->p_memstat_memlimit : -1)); |
7868 | } |
7869 | |
7870 | return error; |
7871 | } |
7872 | |
7873 | #if DEBUG || DEVELOPMENT |
7874 | static int |
7875 | memorystatus_set_diag_memlimit_properties_internal(proc_t p, memorystatus_diag_memlimit_properties_t *p_entry) |
7876 | { |
7877 | int error = 0; |
7878 | uint64_t old_limit = 0; |
7879 | |
7880 | LCK_MTX_ASSERT(&proc_list_mlock, LCK_MTX_ASSERT_OWNED); |
7881 | /* Enforce the limit by writing to the ledgers */ |
7882 | error = (task_set_diag_footprint_limit_internal(proc_task(p), p_entry->memlimit, &old_limit) == KERN_SUCCESS) ? KERN_SUCCESS : EINVAL; |
7883 | |
7884 | memorystatus_log_debug( "memorystatus_set_diag_memlimit_properties: new limit on pid %d (%lluMB old %lluMB)\n" , |
7885 | proc_getpid(p), (p_entry->memlimit > 0 ? p_entry->memlimit : -1), (old_limit) |
7886 | ); |
7887 | DTRACE_MEMORYSTATUS2(memorystatus_diag_memlimit_properties_t, proc_t, p, int32_t, (p->p_memstat_memlimit > 0 ? p->p_memstat_memlimit : -1)); |
7888 | return error; |
7889 | } |
7890 | |
7891 | static int |
7892 | memorystatus_get_diag_memlimit_properties_internal(proc_t p, memorystatus_diag_memlimit_properties_t *p_entry) |
7893 | { |
7894 | int error = 0; |
7895 | /* Enforce the limit by writing to the ledgers */ |
7896 | error = (task_get_diag_footprint_limit_internal(proc_task(p), &p_entry->memlimit, &p_entry->threshold_enabled) == KERN_SUCCESS) ? KERN_SUCCESS : EINVAL; |
7897 | |
7898 | DTRACE_MEMORYSTATUS2(memorystatus_diag_memlimit_properties_t, proc_t, p, int32_t, (p->p_memstat_memlimit > 0 ? p->p_memstat_memlimit : -1)); |
7899 | return error; |
7900 | } |
7901 | #endif // DEBUG || DEVELOPMENT |
7902 | |
7903 | bool |
7904 | memorystatus_task_has_increased_memory_limit_entitlement(task_t task) |
7905 | { |
7906 | static const char kIncreasedMemoryLimitEntitlement[] = "com.apple.developer.kernel.increased-memory-limit" ; |
7907 | if (memorystatus_entitled_max_task_footprint_mb == 0) { |
7908 | // Entitlement is not supported on this device. |
7909 | return false; |
7910 | } |
7911 | |
7912 | return IOTaskHasEntitlement(task, entitlement: kIncreasedMemoryLimitEntitlement); |
7913 | } |
7914 | |
7915 | bool |
7916 | (task_t task) |
7917 | { |
7918 | return IOTaskHasEntitlement(task, entitlement: "com.apple.private.memory.legacy_footprint" ); |
7919 | } |
7920 | |
7921 | bool |
7922 | (task_t task) |
7923 | { |
7924 | if (max_mem < 1500ULL * 1024 * 1024 || |
7925 | max_mem > 2ULL * 1024 * 1024 * 1024) { |
7926 | /* ios13extended_footprint is only for 2GB devices */ |
7927 | return false; |
7928 | } |
7929 | return IOTaskHasEntitlement(task, entitlement: "com.apple.developer.memory.ios13extended_footprint" ); |
7930 | } |
7931 | |
7932 | static int32_t |
7933 | memorystatus_get_default_task_active_limit(proc_t p) |
7934 | { |
7935 | bool entitled = memorystatus_task_has_increased_memory_limit_entitlement(task: proc_task(p)); |
7936 | int32_t limit = -1; |
7937 | |
7938 | /* |
7939 | * Check for the various entitlement footprint hacks |
7940 | * and try to apply each one. Note that if multiple entitlements are present |
7941 | * whichever results in the largest limit applies. |
7942 | */ |
7943 | if (entitled) { |
7944 | limit = MAX(limit, memorystatus_entitled_max_task_footprint_mb); |
7945 | } |
7946 | #if __arm64__ |
7947 | if (legacy_footprint_entitlement_mode == LEGACY_FOOTPRINT_ENTITLEMENT_LIMIT_INCREASE && |
7948 | memorystatus_task_has_legacy_footprint_entitlement(task: proc_task(p))) { |
7949 | limit = MAX(limit, max_task_footprint_mb + legacy_footprint_bonus_mb); |
7950 | } |
7951 | #endif /* __arm64__ */ |
7952 | if (memorystatus_task_has_ios13extended_footprint_limit(task: proc_task(p))) { |
7953 | limit = MAX(limit, memorystatus_ios13extended_footprint_limit_mb); |
7954 | } |
7955 | |
7956 | return limit; |
7957 | } |
7958 | |
7959 | static int32_t |
7960 | memorystatus_get_default_task_inactive_limit(proc_t p) |
7961 | { |
7962 | // Currently the default active and inactive limits are always the same. |
7963 | return memorystatus_get_default_task_active_limit(p); |
7964 | } |
7965 | |
7966 | static int |
7967 | memorystatus_set_memlimit_properties(pid_t pid, memorystatus_memlimit_properties_t *entry) |
7968 | { |
7969 | memorystatus_memlimit_properties_t set_entry; |
7970 | |
7971 | proc_t p = proc_find(pid); |
7972 | if (!p) { |
7973 | return ESRCH; |
7974 | } |
7975 | |
7976 | /* |
7977 | * Check for valid attribute flags. |
7978 | */ |
7979 | const uint32_t valid_attrs = MEMORYSTATUS_MEMLIMIT_ATTR_FATAL; |
7980 | if ((entry->memlimit_active_attr & (~valid_attrs)) != 0) { |
7981 | proc_rele(p); |
7982 | return EINVAL; |
7983 | } |
7984 | if ((entry->memlimit_inactive_attr & (~valid_attrs)) != 0) { |
7985 | proc_rele(p); |
7986 | return EINVAL; |
7987 | } |
7988 | |
7989 | /* |
7990 | * Setup the active memlimit properties |
7991 | */ |
7992 | set_entry.memlimit_active = entry->memlimit_active; |
7993 | set_entry.memlimit_active_attr = entry->memlimit_active_attr & MEMORYSTATUS_MEMLIMIT_ATTR_FATAL; |
7994 | |
7995 | /* |
7996 | * Setup the inactive memlimit properties |
7997 | */ |
7998 | set_entry.memlimit_inactive = entry->memlimit_inactive; |
7999 | set_entry.memlimit_inactive_attr = entry->memlimit_inactive_attr & MEMORYSTATUS_MEMLIMIT_ATTR_FATAL; |
8000 | |
8001 | /* |
8002 | * Setting a limit of <= 0 implies that the process has no |
8003 | * high-water-mark and has no per-task-limit. That means |
8004 | * the system_wide task limit is in place, which by the way, |
8005 | * is always fatal. |
8006 | */ |
8007 | |
8008 | if (set_entry.memlimit_active <= 0) { |
8009 | /* |
8010 | * Enforce the fatal system_wide task limit while process is active. |
8011 | */ |
8012 | set_entry.memlimit_active = memorystatus_get_default_task_active_limit(p); |
8013 | set_entry.memlimit_active_attr = MEMORYSTATUS_MEMLIMIT_ATTR_FATAL; |
8014 | } |
8015 | #if CONFIG_JETSAM |
8016 | #if DEVELOPMENT || DEBUG |
8017 | else { |
8018 | /* add the current increase to it, for roots */ |
8019 | set_entry.memlimit_active += roundToNearestMB(p->p_memlimit_increase); |
8020 | } |
8021 | #endif /* DEVELOPMENT || DEBUG */ |
8022 | #endif /* CONFIG_JETSAM */ |
8023 | |
8024 | if (set_entry.memlimit_inactive <= 0) { |
8025 | /* |
8026 | * Enforce the fatal system_wide task limit while process is inactive. |
8027 | */ |
8028 | set_entry.memlimit_inactive = memorystatus_get_default_task_inactive_limit(p); |
8029 | set_entry.memlimit_inactive_attr = MEMORYSTATUS_MEMLIMIT_ATTR_FATAL; |
8030 | } |
8031 | #if CONFIG_JETSAM |
8032 | #if DEVELOPMENT || DEBUG |
8033 | else { |
8034 | /* add the current increase to it, for roots */ |
8035 | set_entry.memlimit_inactive += roundToNearestMB(p->p_memlimit_increase); |
8036 | } |
8037 | #endif /* DEVELOPMENT || DEBUG */ |
8038 | #endif /* CONFIG_JETSAM */ |
8039 | |
8040 | proc_list_lock(); |
8041 | |
8042 | int error = memorystatus_set_memlimit_properties_internal(p, p_entry: &set_entry); |
8043 | |
8044 | proc_list_unlock(); |
8045 | proc_rele(p); |
8046 | |
8047 | return error; |
8048 | } |
8049 | |
8050 | /* |
8051 | * Returns the jetsam priority (effective or requested) of the process |
8052 | * associated with this task. |
8053 | */ |
8054 | int |
8055 | proc_get_memstat_priority(proc_t p, boolean_t effective_priority) |
8056 | { |
8057 | if (p) { |
8058 | if (effective_priority) { |
8059 | return p->p_memstat_effectivepriority; |
8060 | } else { |
8061 | return p->p_memstat_requestedpriority; |
8062 | } |
8063 | } |
8064 | return 0; |
8065 | } |
8066 | |
8067 | static int |
8068 | memorystatus_get_process_is_managed(pid_t pid, int *is_managed) |
8069 | { |
8070 | proc_t p = NULL; |
8071 | |
8072 | /* Validate inputs */ |
8073 | if (pid == 0) { |
8074 | return EINVAL; |
8075 | } |
8076 | |
8077 | p = proc_find(pid); |
8078 | if (!p) { |
8079 | return ESRCH; |
8080 | } |
8081 | |
8082 | proc_list_lock(); |
8083 | *is_managed = ((p->p_memstat_state & P_MEMSTAT_MANAGED) ? 1 : 0); |
8084 | proc_rele(p); |
8085 | proc_list_unlock(); |
8086 | |
8087 | return 0; |
8088 | } |
8089 | |
8090 | static int |
8091 | memorystatus_set_process_is_managed(pid_t pid, boolean_t set_managed) |
8092 | { |
8093 | proc_t p = NULL; |
8094 | |
8095 | /* Validate inputs */ |
8096 | if (pid == 0) { |
8097 | return EINVAL; |
8098 | } |
8099 | |
8100 | p = proc_find(pid); |
8101 | if (!p) { |
8102 | return ESRCH; |
8103 | } |
8104 | |
8105 | proc_list_lock(); |
8106 | if (set_managed == TRUE) { |
8107 | p->p_memstat_state |= P_MEMSTAT_MANAGED; |
8108 | /* |
8109 | * The P_MEMSTAT_MANAGED bit is set by Runningboard for Apps. |
8110 | * Also opt them in to being frozen (they might have started |
8111 | * off with the P_MEMSTAT_FREEZE_DISABLED bit set.) |
8112 | */ |
8113 | p->p_memstat_state &= ~P_MEMSTAT_FREEZE_DISABLED; |
8114 | } else { |
8115 | p->p_memstat_state &= ~P_MEMSTAT_MANAGED; |
8116 | } |
8117 | proc_list_unlock(); |
8118 | |
8119 | proc_rele(p); |
8120 | |
8121 | return 0; |
8122 | } |
8123 | |
8124 | int |
8125 | memorystatus_control(struct proc *p, struct memorystatus_control_args *args, int *ret) |
8126 | { |
8127 | int error = EINVAL; |
8128 | boolean_t skip_auth_check = FALSE; |
8129 | os_reason_t jetsam_reason = OS_REASON_NULL; |
8130 | |
8131 | #if !CONFIG_JETSAM |
8132 | #pragma unused(ret) |
8133 | #pragma unused(jetsam_reason) |
8134 | #endif |
8135 | |
8136 | /* We don't need entitlements if we're setting / querying the freeze preference or frozen status for a process. */ |
8137 | if (args->command == MEMORYSTATUS_CMD_SET_PROCESS_IS_FREEZABLE || |
8138 | args->command == MEMORYSTATUS_CMD_GET_PROCESS_IS_FREEZABLE || |
8139 | args->command == MEMORYSTATUS_CMD_GET_PROCESS_IS_FROZEN) { |
8140 | skip_auth_check = TRUE; |
8141 | } |
8142 | |
8143 | /* |
8144 | * On development kernel, we don't need entitlements if we're adjusting the limit. |
8145 | * This required for limit adjustment by dyld when roots are detected, see rdar://99669958 |
8146 | */ |
8147 | #if DEVELOPMENT || DEBUG |
8148 | if (args->command == MEMORYSTATUS_CMD_INCREASE_JETSAM_TASK_LIMIT && proc_getpid(p) == args->pid) { |
8149 | skip_auth_check = TRUE; |
8150 | } |
8151 | #endif /* DEVELOPMENT || DEBUG */ |
8152 | |
8153 | /* Need to be root or have entitlement. */ |
8154 | if (!kauth_cred_issuser(cred: kauth_cred_get()) && !IOCurrentTaskHasEntitlement(MEMORYSTATUS_ENTITLEMENT) && !skip_auth_check) { |
8155 | error = EPERM; |
8156 | goto out; |
8157 | } |
8158 | |
8159 | /* |
8160 | * Sanity check. |
8161 | * Do not enforce it for snapshots. |
8162 | */ |
8163 | if (args->command != MEMORYSTATUS_CMD_GET_JETSAM_SNAPSHOT) { |
8164 | if (args->buffersize > MEMORYSTATUS_BUFFERSIZE_MAX) { |
8165 | error = EINVAL; |
8166 | goto out; |
8167 | } |
8168 | } |
8169 | |
8170 | #if CONFIG_MACF |
8171 | error = mac_proc_check_memorystatus_control(proc: p, command: args->command, pid: args->pid); |
8172 | if (error) { |
8173 | goto out; |
8174 | } |
8175 | #endif /* MAC */ |
8176 | |
8177 | switch (args->command) { |
8178 | case MEMORYSTATUS_CMD_GET_PRIORITY_LIST: |
8179 | error = memorystatus_cmd_get_priority_list(pid: args->pid, buffer: args->buffer, buffer_size: args->buffersize, retval: ret); |
8180 | break; |
8181 | case MEMORYSTATUS_CMD_SET_PRIORITY_PROPERTIES: |
8182 | error = memorystatus_cmd_set_priority_properties(pid: args->pid, flags: args->flags, buffer: args->buffer, buffer_size: args->buffersize, retval: ret); |
8183 | break; |
8184 | case MEMORYSTATUS_CMD_SET_MEMLIMIT_PROPERTIES: |
8185 | error = memorystatus_cmd_set_memlimit_properties(pid: args->pid, buffer: args->buffer, buffer_size: args->buffersize, retval: ret); |
8186 | break; |
8187 | case MEMORYSTATUS_CMD_GET_MEMLIMIT_PROPERTIES: |
8188 | error = memorystatus_cmd_get_memlimit_properties(pid: args->pid, buffer: args->buffer, buffer_size: args->buffersize, retval: ret); |
8189 | break; |
8190 | case MEMORYSTATUS_CMD_GET_MEMLIMIT_EXCESS: |
8191 | error = memorystatus_cmd_get_memlimit_excess_np(pid: args->pid, flags: args->flags, buffer: args->buffer, buffer_size: args->buffersize, retval: ret); |
8192 | break; |
8193 | case MEMORYSTATUS_CMD_GRP_SET_PROPERTIES: |
8194 | error = memorystatus_cmd_grp_set_properties(flags: (int32_t)args->flags, buffer: args->buffer, buffer_size: args->buffersize, retval: ret); |
8195 | break; |
8196 | case MEMORYSTATUS_CMD_GET_JETSAM_SNAPSHOT: |
8197 | error = memorystatus_cmd_get_jetsam_snapshot(flags: (int32_t)args->flags, buffer: args->buffer, buffer_size: args->buffersize, retval: ret); |
8198 | break; |
8199 | #if DEVELOPMENT || DEBUG |
8200 | case MEMORYSTATUS_CMD_SET_TESTING_PID: |
8201 | error = memorystatus_cmd_set_testing_pid((int32_t) args->flags); |
8202 | break; |
8203 | #endif |
8204 | case MEMORYSTATUS_CMD_GET_PRESSURE_STATUS: |
8205 | error = memorystatus_cmd_get_pressure_status(retval: ret); |
8206 | break; |
8207 | #if CONFIG_JETSAM |
8208 | case MEMORYSTATUS_CMD_SET_JETSAM_HIGH_WATER_MARK: |
8209 | /* |
8210 | * This call does not distinguish between active and inactive limits. |
8211 | * Default behavior in 2-level HWM world is to set both. |
8212 | * Non-fatal limit is also assumed for both. |
8213 | */ |
8214 | error = memorystatus_cmd_set_jetsam_memory_limit(args->pid, (int32_t)args->flags, ret, FALSE); |
8215 | break; |
8216 | case MEMORYSTATUS_CMD_SET_JETSAM_TASK_LIMIT: |
8217 | /* |
8218 | * This call does not distinguish between active and inactive limits. |
8219 | * Default behavior in 2-level HWM world is to set both. |
8220 | * Fatal limit is also assumed for both. |
8221 | */ |
8222 | error = memorystatus_cmd_set_jetsam_memory_limit(args->pid, (int32_t)args->flags, ret, TRUE); |
8223 | break; |
8224 | case MEMORYSTATUS_CMD_MARK_PROCESS_COALITION_SWAPPABLE: |
8225 | error = memorystatus_cmd_mark_process_coalition_swappable(args->pid, ret); |
8226 | break; |
8227 | |
8228 | case MEMORYSTATUS_CMD_GET_PROCESS_COALITION_IS_SWAPPABLE: |
8229 | error = memorystatus_cmd_get_process_coalition_is_swappable(args->pid, ret); |
8230 | break; |
8231 | |
8232 | case MEMORYSTATUS_CMD_CONVERT_MEMLIMIT_MB: |
8233 | error = memorystatus_cmd_convert_memlimit_mb(args->pid, (int32_t) args->flags, ret); |
8234 | break; |
8235 | #endif /* CONFIG_JETSAM */ |
8236 | /* Test commands */ |
8237 | #if DEVELOPMENT || DEBUG |
8238 | case MEMORYSTATUS_CMD_TEST_JETSAM: |
8239 | jetsam_reason = os_reason_create(OS_REASON_JETSAM, JETSAM_REASON_GENERIC); |
8240 | if (jetsam_reason == OS_REASON_NULL) { |
8241 | memorystatus_log_error("memorystatus_control: failed to allocate jetsam reason\n" ); |
8242 | } |
8243 | |
8244 | error = memorystatus_kill_process_sync(args->pid, kMemorystatusKilled, jetsam_reason) ? 0 : EINVAL; |
8245 | break; |
8246 | case MEMORYSTATUS_CMD_TEST_JETSAM_SORT: |
8247 | error = memorystatus_cmd_test_jetsam_sort(args->pid, (int32_t)args->flags, args->buffer, args->buffersize); |
8248 | break; |
8249 | #else /* DEVELOPMENT || DEBUG */ |
8250 | #pragma unused(jetsam_reason) |
8251 | #endif /* DEVELOPMENT || DEBUG */ |
8252 | case MEMORYSTATUS_CMD_AGGRESSIVE_JETSAM_LENIENT_MODE_ENABLE: |
8253 | if (memorystatus_aggressive_jetsam_lenient_allowed == FALSE) { |
8254 | #if DEVELOPMENT || DEBUG |
8255 | memorystatus_log_info("Enabling Lenient Mode\n" ); |
8256 | #endif /* DEVELOPMENT || DEBUG */ |
8257 | |
8258 | memorystatus_aggressive_jetsam_lenient_allowed = TRUE; |
8259 | memorystatus_aggressive_jetsam_lenient = TRUE; |
8260 | error = 0; |
8261 | } |
8262 | break; |
8263 | case MEMORYSTATUS_CMD_AGGRESSIVE_JETSAM_LENIENT_MODE_DISABLE: |
8264 | #if DEVELOPMENT || DEBUG |
8265 | memorystatus_log_info("Disabling Lenient mode\n" ); |
8266 | #endif /* DEVELOPMENT || DEBUG */ |
8267 | memorystatus_aggressive_jetsam_lenient_allowed = FALSE; |
8268 | memorystatus_aggressive_jetsam_lenient = FALSE; |
8269 | error = 0; |
8270 | break; |
8271 | case MEMORYSTATUS_CMD_GET_AGGRESSIVE_JETSAM_LENIENT_MODE: |
8272 | *ret = (memorystatus_aggressive_jetsam_lenient ? 1 : 0); |
8273 | error = 0; |
8274 | break; |
8275 | case MEMORYSTATUS_CMD_PRIVILEGED_LISTENER_ENABLE: |
8276 | case MEMORYSTATUS_CMD_PRIVILEGED_LISTENER_DISABLE: |
8277 | error = memorystatus_low_mem_privileged_listener(op_flags: args->command); |
8278 | break; |
8279 | |
8280 | case MEMORYSTATUS_CMD_ELEVATED_INACTIVEJETSAMPRIORITY_ENABLE: |
8281 | case MEMORYSTATUS_CMD_ELEVATED_INACTIVEJETSAMPRIORITY_DISABLE: |
8282 | error = memorystatus_update_inactive_jetsam_priority_band(pid: args->pid, op_flags: args->command, JETSAM_PRIORITY_ELEVATED_INACTIVE, effective_now: args->flags ? TRUE : FALSE); |
8283 | break; |
8284 | case MEMORYSTATUS_CMD_SET_PROCESS_IS_MANAGED: |
8285 | error = memorystatus_set_process_is_managed(pid: args->pid, set_managed: args->flags); |
8286 | break; |
8287 | |
8288 | case MEMORYSTATUS_CMD_GET_PROCESS_IS_MANAGED: |
8289 | error = memorystatus_get_process_is_managed(pid: args->pid, is_managed: ret); |
8290 | break; |
8291 | |
8292 | #if CONFIG_FREEZE |
8293 | case MEMORYSTATUS_CMD_SET_PROCESS_IS_FREEZABLE: |
8294 | error = memorystatus_set_process_is_freezable(args->pid, args->flags ? TRUE : FALSE); |
8295 | break; |
8296 | |
8297 | case MEMORYSTATUS_CMD_GET_PROCESS_IS_FREEZABLE: |
8298 | error = memorystatus_get_process_is_freezable(args->pid, ret); |
8299 | break; |
8300 | case MEMORYSTATUS_CMD_GET_PROCESS_IS_FROZEN: |
8301 | error = memorystatus_get_process_is_frozen(args->pid, ret); |
8302 | break; |
8303 | |
8304 | case MEMORYSTATUS_CMD_FREEZER_CONTROL: |
8305 | error = memorystatus_freezer_control(args->flags, args->buffer, args->buffersize, ret); |
8306 | break; |
8307 | #endif /* CONFIG_FREEZE */ |
8308 | |
8309 | #if DEVELOPMENT || DEBUG |
8310 | case MEMORYSTATUS_CMD_INCREASE_JETSAM_TASK_LIMIT: |
8311 | error = memorystatus_cmd_increase_jetsam_task_limit(args->pid, args->flags); |
8312 | break; |
8313 | case MEMORYSTATUS_CMD_SET_DIAG_LIMIT: |
8314 | error = memorystatus_cmd_set_diag_memlimit_properties(args->pid, args->buffer, args->buffersize, ret); |
8315 | break; |
8316 | case MEMORYSTATUS_CMD_GET_DIAG_LIMIT: |
8317 | error = memorystatus_cmd_get_diag_memlimit_properties(args->pid, args->buffer, args->buffersize, ret); |
8318 | break; |
8319 | #endif /* DEVELOPMENT || DEBUG */ |
8320 | |
8321 | default: |
8322 | error = EINVAL; |
8323 | break; |
8324 | } |
8325 | |
8326 | out: |
8327 | return error; |
8328 | } |
8329 | |
8330 | /* Coalition support */ |
8331 | |
8332 | /* sorting info for a particular priority bucket */ |
8333 | typedef struct memstat_sort_info { |
8334 | coalition_t msi_coal; |
8335 | uint64_t msi_page_count; |
8336 | pid_t msi_pid; |
8337 | int msi_ntasks; |
8338 | } memstat_sort_info_t; |
8339 | |
8340 | /* |
8341 | * qsort from smallest page count to largest page count |
8342 | * |
8343 | * return < 0 for a < b |
8344 | * 0 for a == b |
8345 | * > 0 for a > b |
8346 | */ |
8347 | static int |
8348 | memstat_asc_cmp(const void *a, const void *b) |
8349 | { |
8350 | const memstat_sort_info_t *msA = (const memstat_sort_info_t *)a; |
8351 | const memstat_sort_info_t *msB = (const memstat_sort_info_t *)b; |
8352 | |
8353 | return (int)((uint64_t)msA->msi_page_count - (uint64_t)msB->msi_page_count); |
8354 | } |
8355 | |
8356 | /* |
8357 | * Return the number of pids rearranged during this sort. |
8358 | */ |
8359 | static int |
8360 | memorystatus_sort_by_largest_coalition_locked(unsigned int bucket_index, int coal_sort_order) |
8361 | { |
8362 | #define MAX_SORT_PIDS 80 |
8363 | #define MAX_COAL_LEADERS 10 |
8364 | |
8365 | unsigned int b = bucket_index; |
8366 | int nleaders = 0; |
8367 | int ntasks = 0; |
8368 | proc_t p = NULL; |
8369 | coalition_t coal = COALITION_NULL; |
8370 | int pids_moved = 0; |
8371 | int total_pids_moved = 0; |
8372 | int i; |
8373 | |
8374 | /* |
8375 | * The system is typically under memory pressure when in this |
8376 | * path, hence, we want to avoid dynamic memory allocation. |
8377 | */ |
8378 | memstat_sort_info_t leaders[MAX_COAL_LEADERS]; |
8379 | pid_t pid_list[MAX_SORT_PIDS]; |
8380 | |
8381 | if (bucket_index >= MEMSTAT_BUCKET_COUNT) { |
8382 | return 0; |
8383 | } |
8384 | |
8385 | /* |
8386 | * Clear the array that holds coalition leader information |
8387 | */ |
8388 | for (i = 0; i < MAX_COAL_LEADERS; i++) { |
8389 | leaders[i].msi_coal = COALITION_NULL; |
8390 | leaders[i].msi_page_count = 0; /* will hold total coalition page count */ |
8391 | leaders[i].msi_pid = 0; /* will hold coalition leader pid */ |
8392 | leaders[i].msi_ntasks = 0; /* will hold the number of tasks in a coalition */ |
8393 | } |
8394 | |
8395 | p = memorystatus_get_first_proc_locked(bucket_index: &b, FALSE); |
8396 | while (p) { |
8397 | coal = task_get_coalition(task: proc_task(p), COALITION_TYPE_JETSAM); |
8398 | if (coalition_is_leader(task: proc_task(p), coal)) { |
8399 | if (nleaders < MAX_COAL_LEADERS) { |
8400 | int coal_ntasks = 0; |
8401 | uint64_t coal_page_count = coalition_get_page_count(coal, ntasks: &coal_ntasks); |
8402 | leaders[nleaders].msi_coal = coal; |
8403 | leaders[nleaders].msi_page_count = coal_page_count; |
8404 | leaders[nleaders].msi_pid = proc_getpid(p); /* the coalition leader */ |
8405 | leaders[nleaders].msi_ntasks = coal_ntasks; |
8406 | nleaders++; |
8407 | } else { |
8408 | /* |
8409 | * We've hit MAX_COAL_LEADERS meaning we can handle no more coalitions. |
8410 | * Abandoned coalitions will linger at the tail of the priority band |
8411 | * when this sort session ends. |
8412 | * TODO: should this be an assert? |
8413 | */ |
8414 | memorystatus_log_error( |
8415 | "%s: WARNING: more than %d leaders in priority band [%d]\n" , |
8416 | __FUNCTION__, MAX_COAL_LEADERS, bucket_index); |
8417 | break; |
8418 | } |
8419 | } |
8420 | p = memorystatus_get_next_proc_locked(bucket_index: &b, p, FALSE); |
8421 | } |
8422 | |
8423 | if (nleaders == 0) { |
8424 | /* Nothing to sort */ |
8425 | return 0; |
8426 | } |
8427 | |
8428 | /* |
8429 | * Sort the coalition leader array, from smallest coalition page count |
8430 | * to largest coalition page count. When inserted in the priority bucket, |
8431 | * smallest coalition is handled first, resulting in the last to be jetsammed. |
8432 | */ |
8433 | if (nleaders > 1) { |
8434 | qsort(a: leaders, n: nleaders, es: sizeof(memstat_sort_info_t), cmp: memstat_asc_cmp); |
8435 | } |
8436 | |
8437 | #if 0 |
8438 | for (i = 0; i < nleaders; i++) { |
8439 | printf("%s: coal_leader[%d of %d] pid[%d] pages[%llu] ntasks[%d]\n" , |
8440 | __FUNCTION__, i, nleaders, leaders[i].msi_pid, leaders[i].msi_page_count, |
8441 | leaders[i].msi_ntasks); |
8442 | } |
8443 | #endif |
8444 | |
8445 | /* |
8446 | * During coalition sorting, processes in a priority band are rearranged |
8447 | * by being re-inserted at the head of the queue. So, when handling a |
8448 | * list, the first process that gets moved to the head of the queue, |
8449 | * ultimately gets pushed toward the queue tail, and hence, jetsams last. |
8450 | * |
8451 | * So, for example, the coalition leader is expected to jetsam last, |
8452 | * after its coalition members. Therefore, the coalition leader is |
8453 | * inserted at the head of the queue first. |
8454 | * |
8455 | * After processing a coalition, the jetsam order is as follows: |
8456 | * undefs(jetsam first), extensions, xpc services, leader(jetsam last) |
8457 | */ |
8458 | |
8459 | /* |
8460 | * Coalition members are rearranged in the priority bucket here, |
8461 | * based on their coalition role. |
8462 | */ |
8463 | total_pids_moved = 0; |
8464 | for (i = 0; i < nleaders; i++) { |
8465 | /* a bit of bookkeeping */ |
8466 | pids_moved = 0; |
8467 | |
8468 | /* Coalition leaders are jetsammed last, so move into place first */ |
8469 | pid_list[0] = leaders[i].msi_pid; |
8470 | pids_moved += memorystatus_move_list_locked(bucket_index, pid_list, list_sz: 1); |
8471 | |
8472 | /* xpc services should jetsam after extensions */ |
8473 | ntasks = coalition_get_pid_list(coal: leaders[i].msi_coal, COALITION_ROLEMASK_XPC, |
8474 | sort_order: coal_sort_order, pid_list, MAX_SORT_PIDS); |
8475 | |
8476 | if (ntasks > 0) { |
8477 | pids_moved += memorystatus_move_list_locked(bucket_index, pid_list, |
8478 | list_sz: (ntasks <= MAX_SORT_PIDS ? ntasks : MAX_SORT_PIDS)); |
8479 | } |
8480 | |
8481 | /* extensions should jetsam after unmarked processes */ |
8482 | ntasks = coalition_get_pid_list(coal: leaders[i].msi_coal, COALITION_ROLEMASK_EXT, |
8483 | sort_order: coal_sort_order, pid_list, MAX_SORT_PIDS); |
8484 | |
8485 | if (ntasks > 0) { |
8486 | pids_moved += memorystatus_move_list_locked(bucket_index, pid_list, |
8487 | list_sz: (ntasks <= MAX_SORT_PIDS ? ntasks : MAX_SORT_PIDS)); |
8488 | } |
8489 | |
8490 | /* undefined coalition members should be the first to jetsam */ |
8491 | ntasks = coalition_get_pid_list(coal: leaders[i].msi_coal, COALITION_ROLEMASK_UNDEF, |
8492 | sort_order: coal_sort_order, pid_list, MAX_SORT_PIDS); |
8493 | |
8494 | if (ntasks > 0) { |
8495 | pids_moved += memorystatus_move_list_locked(bucket_index, pid_list, |
8496 | list_sz: (ntasks <= MAX_SORT_PIDS ? ntasks : MAX_SORT_PIDS)); |
8497 | } |
8498 | |
8499 | #if 0 |
8500 | if (pids_moved == leaders[i].msi_ntasks) { |
8501 | /* |
8502 | * All the pids in the coalition were found in this band. |
8503 | */ |
8504 | printf("%s: pids_moved[%d] equal total coalition ntasks[%d] \n" , __FUNCTION__, |
8505 | pids_moved, leaders[i].msi_ntasks); |
8506 | } else if (pids_moved > leaders[i].msi_ntasks) { |
8507 | /* |
8508 | * Apparently new coalition members showed up during the sort? |
8509 | */ |
8510 | printf("%s: pids_moved[%d] were greater than expected coalition ntasks[%d] \n" , __FUNCTION__, |
8511 | pids_moved, leaders[i].msi_ntasks); |
8512 | } else { |
8513 | /* |
8514 | * Apparently not all the pids in the coalition were found in this band? |
8515 | */ |
8516 | printf("%s: pids_moved[%d] were less than expected coalition ntasks[%d] \n" , __FUNCTION__, |
8517 | pids_moved, leaders[i].msi_ntasks); |
8518 | } |
8519 | #endif |
8520 | |
8521 | total_pids_moved += pids_moved; |
8522 | } /* end for */ |
8523 | |
8524 | return total_pids_moved; |
8525 | } |
8526 | |
8527 | |
8528 | /* |
8529 | * Traverse a list of pids, searching for each within the priority band provided. |
8530 | * If pid is found, move it to the front of the priority band. |
8531 | * Never searches outside the priority band provided. |
8532 | * |
8533 | * Input: |
8534 | * bucket_index - jetsam priority band. |
8535 | * pid_list - pointer to a list of pids. |
8536 | * list_sz - number of pids in the list. |
8537 | * |
8538 | * Pid list ordering is important in that, |
8539 | * pid_list[n] is expected to jetsam ahead of pid_list[n+1]. |
8540 | * The sort_order is set by the coalition default. |
8541 | * |
8542 | * Return: |
8543 | * the number of pids found and hence moved within the priority band. |
8544 | */ |
8545 | static int |
8546 | memorystatus_move_list_locked(unsigned int bucket_index, pid_t *pid_list, int list_sz) |
8547 | { |
8548 | memstat_bucket_t *current_bucket; |
8549 | int i; |
8550 | int found_pids = 0; |
8551 | |
8552 | if ((pid_list == NULL) || (list_sz <= 0)) { |
8553 | return 0; |
8554 | } |
8555 | |
8556 | if (bucket_index >= MEMSTAT_BUCKET_COUNT) { |
8557 | return 0; |
8558 | } |
8559 | |
8560 | current_bucket = &memstat_bucket[bucket_index]; |
8561 | for (i = 0; i < list_sz; i++) { |
8562 | unsigned int b = bucket_index; |
8563 | proc_t p = NULL; |
8564 | proc_t aProc = NULL; |
8565 | pid_t aPid; |
8566 | int list_index; |
8567 | |
8568 | list_index = ((list_sz - 1) - i); |
8569 | aPid = pid_list[list_index]; |
8570 | |
8571 | /* never search beyond bucket_index provided */ |
8572 | p = memorystatus_get_first_proc_locked(bucket_index: &b, FALSE); |
8573 | while (p) { |
8574 | if (proc_getpid(p) == aPid) { |
8575 | aProc = p; |
8576 | break; |
8577 | } |
8578 | p = memorystatus_get_next_proc_locked(bucket_index: &b, p, FALSE); |
8579 | } |
8580 | |
8581 | if (aProc == NULL) { |
8582 | /* pid not found in this band, just skip it */ |
8583 | continue; |
8584 | } else { |
8585 | TAILQ_REMOVE(¤t_bucket->list, aProc, p_memstat_list); |
8586 | TAILQ_INSERT_HEAD(¤t_bucket->list, aProc, p_memstat_list); |
8587 | found_pids++; |
8588 | } |
8589 | } |
8590 | return found_pids; |
8591 | } |
8592 | |
8593 | int |
8594 | memorystatus_get_proccnt_upto_priority(int32_t max_bucket_index) |
8595 | { |
8596 | int32_t i = JETSAM_PRIORITY_IDLE; |
8597 | int count = 0; |
8598 | |
8599 | if (max_bucket_index >= MEMSTAT_BUCKET_COUNT) { |
8600 | return -1; |
8601 | } |
8602 | |
8603 | while (i <= max_bucket_index) { |
8604 | count += memstat_bucket[i++].count; |
8605 | } |
8606 | |
8607 | return count; |
8608 | } |
8609 | |
8610 | int |
8611 | memorystatus_update_priority_for_appnap(proc_t p, boolean_t is_appnap) |
8612 | { |
8613 | #if !CONFIG_JETSAM |
8614 | if (!p || (!isApp(p)) || (p->p_memstat_state & (P_MEMSTAT_INTERNAL | P_MEMSTAT_MANAGED))) { |
8615 | /* |
8616 | * Ineligible processes OR system processes e.g. launchd. |
8617 | * |
8618 | * We also skip processes that have the P_MEMSTAT_MANAGED bit set, i.e. |
8619 | * they're managed by assertiond. These are iOS apps that have been ported |
8620 | * to macOS. assertiond might be in the process of modifying the app's |
8621 | * priority / memory limit - so it might have the proc_list lock, and then try |
8622 | * to take the task lock. Meanwhile we've entered this function with the task lock |
8623 | * held, and we need the proc_list lock below. So we'll deadlock with assertiond. |
8624 | * |
8625 | * It should be fine to read the P_MEMSTAT_MANAGED bit without the proc_list |
8626 | * lock here, since assertiond only sets this bit on process launch. |
8627 | */ |
8628 | return -1; |
8629 | } |
8630 | |
8631 | /* |
8632 | * For macOS only: |
8633 | * We would like to use memorystatus_update() here to move the processes |
8634 | * within the bands. Unfortunately memorystatus_update() calls |
8635 | * memorystatus_update_priority_locked() which uses any band transitions |
8636 | * as an indication to modify ledgers. For that it needs the task lock |
8637 | * and since we came into this function with the task lock held, we'll deadlock. |
8638 | * |
8639 | * Unfortunately we can't completely disable ledger updates because we still |
8640 | * need the ledger updates for a subset of processes i.e. daemons. |
8641 | * When all processes on all platforms support memory limits, we can simply call |
8642 | * memorystatus_update(). |
8643 | * |
8644 | * It also has some logic to deal with 'aging' which, currently, is only applicable |
8645 | * on CONFIG_JETSAM configs. So, till every platform has CONFIG_JETSAM we'll need |
8646 | * to do this explicit band transition. |
8647 | */ |
8648 | |
8649 | memstat_bucket_t *current_bucket, *new_bucket; |
8650 | int32_t priority = 0; |
8651 | |
8652 | proc_list_lock(); |
8653 | |
8654 | if (proc_list_exited(p) || |
8655 | (p->p_memstat_state & (P_MEMSTAT_ERROR | P_MEMSTAT_TERMINATED | P_MEMSTAT_SKIP))) { |
8656 | /* |
8657 | * If the process is on its way out OR |
8658 | * jetsam has alread tried and failed to kill this process, |
8659 | * let's skip the whole jetsam band transition. |
8660 | */ |
8661 | proc_list_unlock(); |
8662 | return 0; |
8663 | } |
8664 | |
8665 | if (is_appnap) { |
8666 | current_bucket = &memstat_bucket[p->p_memstat_effectivepriority]; |
8667 | new_bucket = &memstat_bucket[JETSAM_PRIORITY_IDLE]; |
8668 | priority = JETSAM_PRIORITY_IDLE; |
8669 | } else { |
8670 | if (p->p_memstat_effectivepriority != JETSAM_PRIORITY_IDLE) { |
8671 | /* |
8672 | * It is possible that someone pulled this process |
8673 | * out of the IDLE band without updating its app-nap |
8674 | * parameters. |
8675 | */ |
8676 | proc_list_unlock(); |
8677 | return 0; |
8678 | } |
8679 | |
8680 | current_bucket = &memstat_bucket[JETSAM_PRIORITY_IDLE]; |
8681 | new_bucket = &memstat_bucket[p->p_memstat_requestedpriority]; |
8682 | priority = p->p_memstat_requestedpriority; |
8683 | } |
8684 | |
8685 | TAILQ_REMOVE(¤t_bucket->list, p, p_memstat_list); |
8686 | current_bucket->count--; |
8687 | if (p->p_memstat_relaunch_flags & (P_MEMSTAT_RELAUNCH_HIGH)) { |
8688 | current_bucket->relaunch_high_count--; |
8689 | } |
8690 | TAILQ_INSERT_TAIL(&new_bucket->list, p, p_memstat_list); |
8691 | new_bucket->count++; |
8692 | if (p->p_memstat_relaunch_flags & (P_MEMSTAT_RELAUNCH_HIGH)) { |
8693 | new_bucket->relaunch_high_count++; |
8694 | } |
8695 | /* |
8696 | * Record idle start or idle delta. |
8697 | */ |
8698 | if (p->p_memstat_effectivepriority == priority) { |
8699 | /* |
8700 | * This process is not transitioning between |
8701 | * jetsam priority buckets. Do nothing. |
8702 | */ |
8703 | } else if (p->p_memstat_effectivepriority == JETSAM_PRIORITY_IDLE) { |
8704 | uint64_t now; |
8705 | /* |
8706 | * Transitioning out of the idle priority bucket. |
8707 | * Record idle delta. |
8708 | */ |
8709 | assert(p->p_memstat_idle_start != 0); |
8710 | now = mach_absolute_time(); |
8711 | if (now > p->p_memstat_idle_start) { |
8712 | p->p_memstat_idle_delta = now - p->p_memstat_idle_start; |
8713 | } |
8714 | } else if (priority == JETSAM_PRIORITY_IDLE) { |
8715 | /* |
8716 | * Transitioning into the idle priority bucket. |
8717 | * Record idle start. |
8718 | */ |
8719 | p->p_memstat_idle_start = mach_absolute_time(); |
8720 | } |
8721 | |
8722 | KDBG(MEMSTAT_CODE(BSD_MEMSTAT_CHANGE_PRIORITY), proc_getpid(p), priority, p->p_memstat_effectivepriority); |
8723 | |
8724 | p->p_memstat_effectivepriority = priority; |
8725 | |
8726 | proc_list_unlock(); |
8727 | |
8728 | return 0; |
8729 | |
8730 | #else /* !CONFIG_JETSAM */ |
8731 | #pragma unused(p) |
8732 | #pragma unused(is_appnap) |
8733 | return -1; |
8734 | #endif /* !CONFIG_JETSAM */ |
8735 | } |
8736 | |
8737 | uint64_t |
8738 | memorystatus_available_memory_internal(struct proc *p) |
8739 | { |
8740 | #ifdef XNU_TARGET_OS_OSX |
8741 | if (p->p_memstat_memlimit <= 0) { |
8742 | return 0; |
8743 | } |
8744 | #endif /* XNU_TARGET_OS_OSX */ |
8745 | const uint64_t = get_task_phys_footprint(proc_task(p)); |
8746 | int32_t memlimit_mb; |
8747 | int64_t memlimit_bytes; |
8748 | int64_t rc; |
8749 | |
8750 | if (isApp(p) == FALSE) { |
8751 | return 0; |
8752 | } |
8753 | |
8754 | if (p->p_memstat_memlimit > 0) { |
8755 | memlimit_mb = p->p_memstat_memlimit; |
8756 | } else if (task_convert_phys_footprint_limit(-1, &memlimit_mb) != KERN_SUCCESS) { |
8757 | return 0; |
8758 | } |
8759 | |
8760 | if (memlimit_mb <= 0) { |
8761 | memlimit_bytes = INT_MAX & ~((1 << 20) - 1); |
8762 | } else { |
8763 | memlimit_bytes = ((int64_t) memlimit_mb) << 20; |
8764 | } |
8765 | |
8766 | rc = memlimit_bytes - footprint_in_bytes; |
8767 | |
8768 | return (rc >= 0) ? rc : 0; |
8769 | } |
8770 | |
8771 | int |
8772 | memorystatus_available_memory(struct proc *p, __unused struct memorystatus_available_memory_args *args, uint64_t *ret) |
8773 | { |
8774 | *ret = memorystatus_available_memory_internal(p); |
8775 | |
8776 | return 0; |
8777 | } |
8778 | |
8779 | void |
8780 | memorystatus_log_system_health(const memorystatus_system_health_t *status) |
8781 | { |
8782 | static bool healthy = true; |
8783 | bool prev_healthy = healthy; |
8784 | |
8785 | healthy = memorystatus_is_system_healthy(status); |
8786 | |
8787 | /* |
8788 | * Avoid spamming logs by only logging when the health level has changed |
8789 | */ |
8790 | if (prev_healthy == healthy) { |
8791 | return; |
8792 | } |
8793 | |
8794 | #if CONFIG_JETSAM |
8795 | if (healthy && !status->msh_available_pages_below_pressure) { |
8796 | memorystatus_log("memorystatus: System is healthy. memorystatus_available_pages: %llu compressor_size:%u\n" , |
8797 | (uint64_t)MEMORYSTATUS_LOG_AVAILABLE_PAGES, vm_compressor_pool_size()); |
8798 | return; |
8799 | } |
8800 | if (healthy && status->msh_available_pages_below_pressure) { |
8801 | memorystatus_log( |
8802 | "memorystatus: System is below pressure level, but otherwise healthy. memorystatus_available_pages: %llu compressor_size:%u\n" , |
8803 | (uint64_t)MEMORYSTATUS_LOG_AVAILABLE_PAGES, vm_compressor_pool_size()); |
8804 | return; |
8805 | } |
8806 | memorystatus_log("memorystatus: System is unhealthy! memorystatus_available_pages: %llu compressor_size:%u\n" , |
8807 | (uint64_t)MEMORYSTATUS_LOG_AVAILABLE_PAGES, vm_compressor_pool_size()); |
8808 | memorystatus_log( |
8809 | "memorystatus: available_pages_below_critical=%d, compressor_needs_to_swap=%d, compressor_is_low_on_space=%d compressor_is_thrashing=%d compressed_pages_nearing_limit=%d filecache_is_thrashing=%d zone_map_is_exhausted=%d phantom_cache_pressure=%d swappable_compressor_segments_over_limit=%d swapin_queue_over_limit=%d swap_low=%d swap_full=%d\n" , |
8810 | status->msh_available_pages_below_critical, status->msh_compressor_needs_to_swap, |
8811 | status->msh_compressor_is_low_on_space, status->msh_compressor_is_thrashing, |
8812 | status->msh_compressed_pages_nearing_limit, status->msh_filecache_is_thrashing, |
8813 | status->msh_zone_map_is_exhausted, status->msh_phantom_cache_pressure, |
8814 | status->msh_swappable_compressor_segments_over_limit, status->msh_swapin_queue_over_limit, |
8815 | status->msh_swap_low_on_space, status->msh_swap_out_of_space); |
8816 | #else /* CONFIG_JETSAM */ |
8817 | memorystatus_log("memorystatus: System is %s. memorystatus_available_pages: %llu compressor_size:%u\n" , |
8818 | healthy ? "healthy" : "unhealthy" , |
8819 | (uint64_t)MEMORYSTATUS_LOG_AVAILABLE_PAGES, vm_compressor_pool_size()); |
8820 | if (!healthy) { |
8821 | memorystatus_log("memorystatus: zone_map_is_exhausted=%d\n" , |
8822 | status->msh_zone_map_is_exhausted); |
8823 | } |
8824 | #endif /* CONFIG_JETSAM */ |
8825 | } |
8826 | |
8827 | uint32_t |
8828 | memorystatus_pick_kill_cause(const memorystatus_system_health_t *status) |
8829 | { |
8830 | assert(!memorystatus_is_system_healthy(status)); |
8831 | #if CONFIG_JETSAM |
8832 | if (status->msh_compressor_is_thrashing) { |
8833 | return kMemorystatusKilledVMCompressorThrashing; |
8834 | } else if (status->msh_compressor_is_low_on_space) { |
8835 | return kMemorystatusKilledVMCompressorSpaceShortage; |
8836 | } else if (status->msh_filecache_is_thrashing) { |
8837 | return kMemorystatusKilledFCThrashing; |
8838 | } else if (status->msh_zone_map_is_exhausted) { |
8839 | return kMemorystatusKilledZoneMapExhaustion; |
8840 | } else if (status->msh_pageout_starved) { |
8841 | return kMemorystatusKilledVMPageoutStarvation; |
8842 | } else { |
8843 | assert(status->msh_available_pages_below_critical); |
8844 | return kMemorystatusKilledVMPageShortage; |
8845 | } |
8846 | #else /* CONFIG_JETSAM */ |
8847 | assert(status->msh_zone_map_is_exhausted); |
8848 | (void) status; |
8849 | return kMemorystatusKilledZoneMapExhaustion; |
8850 | #endif /* CONFIG_JETSAM */ |
8851 | } |
8852 | |
8853 | #if DEVELOPMENT || DEBUG |
8854 | static int |
8855 | memorystatus_cmd_increase_jetsam_task_limit(pid_t pid, uint32_t byte_increase) |
8856 | { |
8857 | memorystatus_memlimit_properties_t mmp_entry; |
8858 | |
8859 | /* Validate inputs */ |
8860 | if ((pid == 0) || (byte_increase == 0)) { |
8861 | return EINVAL; |
8862 | } |
8863 | |
8864 | proc_t p = proc_find(pid); |
8865 | |
8866 | if (!p) { |
8867 | return ESRCH; |
8868 | } |
8869 | |
8870 | const uint32_t current_memlimit_increase = roundToNearestMB(p->p_memlimit_increase); |
8871 | /* round to page */ |
8872 | const int32_t page_aligned_increase = (int32_t) MIN(round_page(p->p_memlimit_increase + byte_increase), INT32_MAX); |
8873 | |
8874 | proc_list_lock(); |
8875 | |
8876 | memorystatus_get_memlimit_properties_internal(p, &mmp_entry); |
8877 | |
8878 | if (mmp_entry.memlimit_active > 0) { |
8879 | mmp_entry.memlimit_active -= current_memlimit_increase; |
8880 | mmp_entry.memlimit_active += roundToNearestMB(page_aligned_increase); |
8881 | } |
8882 | |
8883 | if (mmp_entry.memlimit_inactive > 0) { |
8884 | mmp_entry.memlimit_inactive -= current_memlimit_increase; |
8885 | mmp_entry.memlimit_inactive += roundToNearestMB(page_aligned_increase); |
8886 | } |
8887 | |
8888 | /* |
8889 | * Store the updated delta limit in the proc. |
8890 | */ |
8891 | p->p_memlimit_increase = page_aligned_increase; |
8892 | |
8893 | int error = memorystatus_set_memlimit_properties_internal(p, &mmp_entry); |
8894 | |
8895 | proc_list_unlock(); |
8896 | proc_rele(p); |
8897 | |
8898 | return error; |
8899 | } |
8900 | #endif /* DEVELOPMENT */ |
8901 | |