1 | /* |
2 | * Copyright (c) 2019-2020 Apple Inc. All rights reserved. |
3 | * |
4 | * @APPLE_LICENSE_HEADER_START@ |
5 | * |
6 | * This file contains Original Code and/or Modifications of Original Code |
7 | * as defined in and that are subject to the Apple Public Source License |
8 | * Version 2.0 (the 'License'). You may not use this file except in |
9 | * compliance with the License. Please obtain a copy of the License at |
10 | * http://www.opensource.apple.com/apsl/ and read it before using this |
11 | * file. |
12 | * |
13 | * The Original Code and all software distributed under the License are |
14 | * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER |
15 | * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES, |
16 | * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY, |
17 | * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT. |
18 | * Please see the License for the specific language governing rights and |
19 | * limitations under the License. |
20 | * |
21 | * @APPLE_LICENSE_HEADER_END@ |
22 | */ |
23 | |
24 | #include <kern/sched_prim.h> |
25 | #include <kern/ledger.h> |
26 | #include <kern/policy_internal.h> |
27 | |
28 | #include <libkern/OSDebug.h> |
29 | |
30 | #include <mach/mach_types.h> |
31 | |
32 | #include <machine/limits.h> |
33 | |
34 | #include <os/hash.h> |
35 | |
36 | #include <vm/vm_compressor_pager.h> |
37 | #include <vm/vm_kern.h> /* kmem_alloc */ |
38 | #include <vm/vm_page.h> |
39 | #include <vm/vm_pageout.h> |
40 | #include <vm/vm_protos.h> |
41 | #include <vm/vm_purgeable_internal.h> |
42 | |
43 | #include <sys/kdebug.h> |
44 | |
45 | /* |
46 | * LOCK ORDERING for task-owned purgeable objects |
47 | * |
48 | * Whenever we need to hold multiple locks while adding to, removing from, |
49 | * or scanning a task's task_objq list of VM objects it owns, locks should |
50 | * be taken in this order: |
51 | * |
52 | * VM object ==> vm_purgeable_queue_lock ==> owner_task->task_objq_lock |
53 | * |
54 | * If one needs to acquire the VM object lock after any of the other 2 locks, |
55 | * one needs to use vm_object_lock_try() and, if that fails, release the |
56 | * other locks and retake them all in the correct order. |
57 | */ |
58 | |
59 | extern vm_pressure_level_t memorystatus_vm_pressure_level; |
60 | |
61 | struct token { |
62 | token_cnt_t count; |
63 | token_idx_t prev; |
64 | token_idx_t next; |
65 | }; |
66 | |
67 | struct token *tokens; |
68 | token_idx_t token_q_max_cnt = 0; |
69 | vm_size_t token_q_cur_size = 0; |
70 | |
71 | token_idx_t token_free_idx = 0; /* head of free queue */ |
72 | token_idx_t token_init_idx = 1; /* token 0 is reserved!! */ |
73 | int32_t token_new_pagecount = 0; /* count of pages that will |
74 | * be added onto token queue */ |
75 | |
76 | int available_for_purge = 0; /* increase when ripe token |
77 | * added, decrease when ripe |
78 | * token removed. |
79 | * protected by page_queue_lock |
80 | */ |
81 | |
82 | static int token_q_allocating = 0; /* flag for singlethreading |
83 | * allocator */ |
84 | |
85 | struct purgeable_q purgeable_queues[PURGEABLE_Q_TYPE_MAX]; |
86 | queue_head_t purgeable_nonvolatile_queue; |
87 | int purgeable_nonvolatile_count; |
88 | |
89 | decl_lck_mtx_data(, vm_purgeable_queue_lock); |
90 | |
91 | static token_idx_t vm_purgeable_token_remove_first(purgeable_q_t queue); |
92 | |
93 | static void vm_purgeable_stats_helper(vm_purgeable_stat_t *stat, purgeable_q_t queue, int group, task_t target_task); |
94 | |
95 | |
96 | #if MACH_ASSERT |
97 | static void |
98 | vm_purgeable_token_check_queue(purgeable_q_t queue) |
99 | { |
100 | int token_cnt = 0, page_cnt = 0; |
101 | token_idx_t token = queue->token_q_head; |
102 | token_idx_t unripe = 0; |
103 | int our_inactive_count; |
104 | |
105 | |
106 | #if DEVELOPMENT |
107 | static int lightweight_check = 0; |
108 | |
109 | /* |
110 | * Due to performance impact, perform this check less frequently on DEVELOPMENT kernels. |
111 | * Checking the queue scales linearly with its length, so we compensate by |
112 | * by performing this check less frequently as the queue grows. |
113 | */ |
114 | if (lightweight_check++ < (100 + queue->debug_count_tokens / 512)) { |
115 | return; |
116 | } |
117 | |
118 | lightweight_check = 0; |
119 | #endif |
120 | |
121 | while (token) { |
122 | if (tokens[token].count != 0) { |
123 | assert(queue->token_q_unripe); |
124 | if (unripe == 0) { |
125 | assert(token == queue->token_q_unripe); |
126 | unripe = token; |
127 | } |
128 | page_cnt += tokens[token].count; |
129 | } |
130 | if (tokens[token].next == 0) { |
131 | assert(queue->token_q_tail == token); |
132 | } |
133 | |
134 | token_cnt++; |
135 | token = tokens[token].next; |
136 | } |
137 | |
138 | if (unripe) { |
139 | assert(queue->token_q_unripe == unripe); |
140 | } |
141 | assert(token_cnt == queue->debug_count_tokens); |
142 | |
143 | /* obsolete queue doesn't maintain token counts */ |
144 | if (queue->type != PURGEABLE_Q_TYPE_OBSOLETE) { |
145 | our_inactive_count = page_cnt + queue->new_pages + token_new_pagecount; |
146 | assert(our_inactive_count >= 0); |
147 | assert((uint32_t) our_inactive_count == vm_page_inactive_count - vm_page_cleaned_count); |
148 | } |
149 | } |
150 | #endif |
151 | |
152 | /* |
153 | * Add a token. Allocate token queue memory if necessary. |
154 | * Call with page queue locked. |
155 | */ |
156 | kern_return_t |
157 | vm_purgeable_token_add(purgeable_q_t queue) |
158 | { |
159 | LCK_MTX_ASSERT(&vm_page_queue_lock, LCK_MTX_ASSERT_OWNED); |
160 | |
161 | /* new token */ |
162 | token_idx_t token; |
163 | enum purgeable_q_type i; |
164 | |
165 | find_available_token: |
166 | |
167 | if (token_free_idx) { /* unused tokens available */ |
168 | token = token_free_idx; |
169 | token_free_idx = tokens[token_free_idx].next; |
170 | } else if (token_init_idx < token_q_max_cnt) { /* lazy token array init */ |
171 | token = token_init_idx; |
172 | token_init_idx++; |
173 | } else { /* allocate more memory */ |
174 | /* Wait if another thread is inside the memory alloc section */ |
175 | while (token_q_allocating) { |
176 | wait_result_t res = lck_mtx_sleep(lck: &vm_page_queue_lock, |
177 | lck_sleep_action: LCK_SLEEP_DEFAULT, |
178 | event: (event_t)&token_q_allocating, |
179 | THREAD_UNINT); |
180 | if (res != THREAD_AWAKENED) { |
181 | return KERN_ABORTED; |
182 | } |
183 | } |
184 | |
185 | /* Check whether memory is still maxed out */ |
186 | if (token_init_idx < token_q_max_cnt) { |
187 | goto find_available_token; |
188 | } |
189 | |
190 | /* Still no memory. Allocate some. */ |
191 | token_q_allocating = 1; |
192 | |
193 | /* Drop page queue lock so we can allocate */ |
194 | vm_page_unlock_queues(); |
195 | |
196 | vm_size_t alloc_size = token_q_cur_size + PAGE_SIZE; |
197 | kmem_return_t kmr = { }; |
198 | kmem_guard_t guard = { |
199 | .kmg_atomic = true, |
200 | .kmg_tag = VM_KERN_MEMORY_OSFMK, |
201 | .kmg_context = os_hash_kernel_pointer(pointer: &tokens), |
202 | }; |
203 | |
204 | if (alloc_size <= TOKEN_COUNT_MAX * sizeof(struct token)) { |
205 | kmr = kmem_realloc_guard(map: kernel_map, |
206 | oldaddr: (vm_offset_t)tokens, oldsize: token_q_cur_size, newsize: alloc_size, |
207 | flags: KMR_ZERO | KMR_DATA, guard); |
208 | } |
209 | |
210 | vm_page_lock_queues(); |
211 | |
212 | if (kmr.kmr_ptr == NULL) { |
213 | /* Unblock waiting threads */ |
214 | token_q_allocating = 0; |
215 | thread_wakeup((event_t)&token_q_allocating); |
216 | return KERN_RESOURCE_SHORTAGE; |
217 | } |
218 | |
219 | /* If we get here, we allocated new memory. Update pointers and |
220 | * dealloc old range */ |
221 | struct token *old_tokens = tokens; |
222 | vm_size_t old_token_q_cur_size = token_q_cur_size; |
223 | |
224 | tokens = kmr.kmr_ptr; |
225 | token_q_cur_size = alloc_size; |
226 | token_q_max_cnt = (token_idx_t) (token_q_cur_size / |
227 | sizeof(struct token)); |
228 | assert(token_init_idx < token_q_max_cnt); /* We must have a free token now */ |
229 | |
230 | /* kmem_realloc_guard() might leave the old region mapped. */ |
231 | if (kmem_realloc_should_free(oldaddr: (vm_offset_t)old_tokens, kmr)) { |
232 | vm_page_unlock_queues(); |
233 | kmem_free_guard(map: kernel_map, addr: (vm_offset_t)old_tokens, |
234 | size: old_token_q_cur_size, flags: KMF_NONE, guard); |
235 | vm_page_lock_queues(); |
236 | } |
237 | |
238 | /* Unblock waiting threads */ |
239 | token_q_allocating = 0; |
240 | thread_wakeup((event_t)&token_q_allocating); |
241 | |
242 | goto find_available_token; |
243 | } |
244 | |
245 | assert(token); |
246 | |
247 | /* |
248 | * the new pagecount we got need to be applied to all queues except |
249 | * obsolete |
250 | */ |
251 | for (i = PURGEABLE_Q_TYPE_FIFO; i < PURGEABLE_Q_TYPE_MAX; i++) { |
252 | int64_t pages = purgeable_queues[i].new_pages += token_new_pagecount; |
253 | assert(pages >= 0); |
254 | assert(pages <= TOKEN_COUNT_MAX); |
255 | purgeable_queues[i].new_pages = (int32_t) pages; |
256 | assert(purgeable_queues[i].new_pages == pages); |
257 | } |
258 | token_new_pagecount = 0; |
259 | |
260 | /* set token counter value */ |
261 | if (queue->type != PURGEABLE_Q_TYPE_OBSOLETE) { |
262 | tokens[token].count = queue->new_pages; |
263 | } else { |
264 | tokens[token].count = 0; /* all obsolete items are |
265 | * ripe immediately */ |
266 | } |
267 | queue->new_pages = 0; |
268 | |
269 | /* put token on token counter list */ |
270 | tokens[token].next = 0; |
271 | if (queue->token_q_tail == 0) { |
272 | assert(queue->token_q_head == 0 && queue->token_q_unripe == 0); |
273 | queue->token_q_head = token; |
274 | tokens[token].prev = 0; |
275 | } else { |
276 | tokens[queue->token_q_tail].next = token; |
277 | tokens[token].prev = queue->token_q_tail; |
278 | } |
279 | if (queue->token_q_unripe == 0) { /* only ripe tokens (token |
280 | * count == 0) in queue */ |
281 | if (tokens[token].count > 0) { |
282 | queue->token_q_unripe = token; /* first unripe token */ |
283 | } else { |
284 | available_for_purge++; /* added a ripe token? |
285 | * increase available count */ |
286 | } |
287 | } |
288 | queue->token_q_tail = token; |
289 | |
290 | #if MACH_ASSERT |
291 | queue->debug_count_tokens++; |
292 | /* Check both queues, since we modified the new_pages count on each */ |
293 | vm_purgeable_token_check_queue(&purgeable_queues[PURGEABLE_Q_TYPE_FIFO]); |
294 | vm_purgeable_token_check_queue(&purgeable_queues[PURGEABLE_Q_TYPE_LIFO]); |
295 | |
296 | KERNEL_DEBUG_CONSTANT((MACHDBG_CODE(DBG_MACH_VM, TOKEN_ADD)), |
297 | queue->type, |
298 | tokens[token].count, /* num pages on token |
299 | * (last token) */ |
300 | queue->debug_count_tokens, |
301 | 0, |
302 | 0); |
303 | #endif |
304 | |
305 | return KERN_SUCCESS; |
306 | } |
307 | |
308 | /* |
309 | * Remove first token from queue and return its index. Add its count to the |
310 | * count of the next token. |
311 | * Call with page queue locked. |
312 | */ |
313 | static token_idx_t |
314 | vm_purgeable_token_remove_first(purgeable_q_t queue) |
315 | { |
316 | LCK_MTX_ASSERT(&vm_page_queue_lock, LCK_MTX_ASSERT_OWNED); |
317 | |
318 | token_idx_t token; |
319 | token = queue->token_q_head; |
320 | |
321 | assert(token); |
322 | |
323 | if (token) { |
324 | assert(queue->token_q_tail); |
325 | if (queue->token_q_head == queue->token_q_unripe) { |
326 | /* no ripe tokens... must move unripe pointer */ |
327 | queue->token_q_unripe = tokens[token].next; |
328 | } else { |
329 | /* we're removing a ripe token. decrease count */ |
330 | available_for_purge--; |
331 | assert(available_for_purge >= 0); |
332 | } |
333 | |
334 | if (queue->token_q_tail == queue->token_q_head) { |
335 | assert(tokens[token].next == 0); |
336 | } |
337 | |
338 | queue->token_q_head = tokens[token].next; |
339 | if (queue->token_q_head) { |
340 | tokens[queue->token_q_head].count += tokens[token].count; |
341 | tokens[queue->token_q_head].prev = 0; |
342 | } else { |
343 | /* currently no other tokens in the queue */ |
344 | /* |
345 | * the page count must be added to the next newly |
346 | * created token |
347 | */ |
348 | queue->new_pages += tokens[token].count; |
349 | /* if head is zero, tail is too */ |
350 | queue->token_q_tail = 0; |
351 | } |
352 | |
353 | #if MACH_ASSERT |
354 | queue->debug_count_tokens--; |
355 | vm_purgeable_token_check_queue(queue); |
356 | |
357 | KERNEL_DEBUG_CONSTANT((MACHDBG_CODE(DBG_MACH_VM, TOKEN_DELETE)), |
358 | queue->type, |
359 | tokens[queue->token_q_head].count, /* num pages on new |
360 | * first token */ |
361 | token_new_pagecount, /* num pages waiting for |
362 | * next token */ |
363 | available_for_purge, |
364 | 0); |
365 | #endif |
366 | } |
367 | return token; |
368 | } |
369 | |
370 | static token_idx_t |
371 | vm_purgeable_token_remove_last(purgeable_q_t queue) |
372 | { |
373 | LCK_MTX_ASSERT(&vm_page_queue_lock, LCK_MTX_ASSERT_OWNED); |
374 | |
375 | token_idx_t token; |
376 | token = queue->token_q_tail; |
377 | |
378 | assert(token); |
379 | |
380 | if (token) { |
381 | assert(queue->token_q_head); |
382 | |
383 | if (queue->token_q_tail == queue->token_q_head) { |
384 | assert(tokens[token].next == 0); |
385 | } |
386 | |
387 | if (queue->token_q_unripe == 0) { |
388 | /* we're removing a ripe token. decrease count */ |
389 | available_for_purge--; |
390 | assert(available_for_purge >= 0); |
391 | } else if (queue->token_q_unripe == token) { |
392 | /* we're removing the only unripe token */ |
393 | queue->token_q_unripe = 0; |
394 | } |
395 | |
396 | if (token == queue->token_q_head) { |
397 | /* token is the last one in the queue */ |
398 | queue->token_q_head = 0; |
399 | queue->token_q_tail = 0; |
400 | } else { |
401 | token_idx_t new_tail; |
402 | |
403 | new_tail = tokens[token].prev; |
404 | |
405 | assert(new_tail); |
406 | assert(tokens[new_tail].next == token); |
407 | |
408 | queue->token_q_tail = new_tail; |
409 | tokens[new_tail].next = 0; |
410 | } |
411 | |
412 | queue->new_pages += tokens[token].count; |
413 | |
414 | #if MACH_ASSERT |
415 | queue->debug_count_tokens--; |
416 | vm_purgeable_token_check_queue(queue); |
417 | |
418 | KERNEL_DEBUG_CONSTANT((MACHDBG_CODE(DBG_MACH_VM, TOKEN_DELETE)), |
419 | queue->type, |
420 | tokens[queue->token_q_head].count, /* num pages on new |
421 | * first token */ |
422 | token_new_pagecount, /* num pages waiting for |
423 | * next token */ |
424 | available_for_purge, |
425 | 0); |
426 | #endif |
427 | } |
428 | return token; |
429 | } |
430 | |
431 | /* |
432 | * Delete first token from queue. Return token to token queue. |
433 | * Call with page queue locked. |
434 | */ |
435 | void |
436 | vm_purgeable_token_delete_first(purgeable_q_t queue) |
437 | { |
438 | LCK_MTX_ASSERT(&vm_page_queue_lock, LCK_MTX_ASSERT_OWNED); |
439 | token_idx_t token = vm_purgeable_token_remove_first(queue); |
440 | |
441 | if (token) { |
442 | /* stick removed token on free queue */ |
443 | tokens[token].next = token_free_idx; |
444 | tokens[token].prev = 0; |
445 | token_free_idx = token; |
446 | } |
447 | } |
448 | |
449 | void |
450 | vm_purgeable_token_delete_last(purgeable_q_t queue) |
451 | { |
452 | LCK_MTX_ASSERT(&vm_page_queue_lock, LCK_MTX_ASSERT_OWNED); |
453 | token_idx_t token = vm_purgeable_token_remove_last(queue); |
454 | |
455 | if (token) { |
456 | /* stick removed token on free queue */ |
457 | tokens[token].next = token_free_idx; |
458 | tokens[token].prev = 0; |
459 | token_free_idx = token; |
460 | } |
461 | } |
462 | |
463 | |
464 | /* Call with page queue locked. */ |
465 | void |
466 | vm_purgeable_q_advance_all() |
467 | { |
468 | LCK_MTX_ASSERT(&vm_page_queue_lock, LCK_MTX_ASSERT_OWNED); |
469 | |
470 | /* check queue counters - if they get really large, scale them back. |
471 | * They tend to get that large when there is no purgeable queue action */ |
472 | int i; |
473 | if (token_new_pagecount > (TOKEN_NEW_PAGECOUNT_MAX >> 1)) { /* a system idling years might get there */ |
474 | for (i = PURGEABLE_Q_TYPE_FIFO; i < PURGEABLE_Q_TYPE_MAX; i++) { |
475 | int64_t pages = purgeable_queues[i].new_pages += token_new_pagecount; |
476 | assert(pages >= 0); |
477 | assert(pages <= TOKEN_COUNT_MAX); |
478 | purgeable_queues[i].new_pages = (int32_t) pages; |
479 | assert(purgeable_queues[i].new_pages == pages); |
480 | } |
481 | token_new_pagecount = 0; |
482 | } |
483 | |
484 | /* |
485 | * Decrement token counters. A token counter can be zero, this means the |
486 | * object is ripe to be purged. It is not purged immediately, because that |
487 | * could cause several objects to be purged even if purging one would satisfy |
488 | * the memory needs. Instead, the pageout thread purges one after the other |
489 | * by calling vm_purgeable_object_purge_one and then rechecking the memory |
490 | * balance. |
491 | * |
492 | * No need to advance obsolete queue - all items are ripe there, |
493 | * always |
494 | */ |
495 | for (i = PURGEABLE_Q_TYPE_FIFO; i < PURGEABLE_Q_TYPE_MAX; i++) { |
496 | purgeable_q_t queue = &purgeable_queues[i]; |
497 | uint32_t num_pages = 1; |
498 | |
499 | /* Iterate over tokens as long as there are unripe tokens. */ |
500 | while (queue->token_q_unripe) { |
501 | if (tokens[queue->token_q_unripe].count && num_pages) { |
502 | tokens[queue->token_q_unripe].count -= 1; |
503 | num_pages -= 1; |
504 | } |
505 | |
506 | if (tokens[queue->token_q_unripe].count == 0) { |
507 | queue->token_q_unripe = tokens[queue->token_q_unripe].next; |
508 | available_for_purge++; |
509 | KERNEL_DEBUG_CONSTANT((MACHDBG_CODE(DBG_MACH_VM, TOKEN_RIPEN)), |
510 | queue->type, |
511 | tokens[queue->token_q_head].count, /* num pages on new |
512 | * first token */ |
513 | 0, |
514 | available_for_purge, |
515 | 0); |
516 | continue; /* One token ripened. Make sure to |
517 | * check the next. */ |
518 | } |
519 | if (num_pages == 0) { |
520 | break; /* Current token not ripe and no more pages. |
521 | * Work done. */ |
522 | } |
523 | } |
524 | |
525 | /* |
526 | * if there are no unripe tokens in the queue, decrement the |
527 | * new_pages counter instead new_pages can be negative, but must be |
528 | * canceled out by token_new_pagecount -- since inactive queue as a |
529 | * whole always contains a nonnegative number of pages |
530 | */ |
531 | if (!queue->token_q_unripe) { |
532 | queue->new_pages -= num_pages; |
533 | assert((int32_t) token_new_pagecount + queue->new_pages >= 0); |
534 | } |
535 | #if MACH_ASSERT |
536 | vm_purgeable_token_check_queue(queue); |
537 | #endif |
538 | } |
539 | } |
540 | |
541 | /* |
542 | * grab any ripe object and purge it obsolete queue first. then, go through |
543 | * each volatile group. Select a queue with a ripe token. |
544 | * Start with first group (0) |
545 | * 1. Look at queue. Is there an object? |
546 | * Yes - purge it. Remove token. |
547 | * No - check other queue. Is there an object? |
548 | * No - increment group, then go to (1) |
549 | * Yes - purge it. Remove token. If there is no ripe token, remove ripe |
550 | * token from other queue and migrate unripe token from this |
551 | * queue to other queue. |
552 | * Call with page queue locked. |
553 | */ |
554 | static void |
555 | vm_purgeable_token_remove_ripe(purgeable_q_t queue) |
556 | { |
557 | LCK_MTX_ASSERT(&vm_page_queue_lock, LCK_MTX_ASSERT_OWNED); |
558 | assert(queue->token_q_head && tokens[queue->token_q_head].count == 0); |
559 | /* return token to free list. advance token list. */ |
560 | token_idx_t new_head = tokens[queue->token_q_head].next; |
561 | tokens[queue->token_q_head].next = token_free_idx; |
562 | tokens[queue->token_q_head].prev = 0; |
563 | token_free_idx = queue->token_q_head; |
564 | queue->token_q_head = new_head; |
565 | tokens[new_head].prev = 0; |
566 | if (new_head == 0) { |
567 | queue->token_q_tail = 0; |
568 | } |
569 | |
570 | #if MACH_ASSERT |
571 | queue->debug_count_tokens--; |
572 | vm_purgeable_token_check_queue(queue); |
573 | #endif |
574 | |
575 | available_for_purge--; |
576 | assert(available_for_purge >= 0); |
577 | } |
578 | |
579 | /* |
580 | * Delete a ripe token from the given queue. If there are no ripe tokens on |
581 | * that queue, delete a ripe token from queue2, and migrate an unripe token |
582 | * from queue to queue2 |
583 | * Call with page queue locked. |
584 | */ |
585 | static void |
586 | vm_purgeable_token_choose_and_delete_ripe(purgeable_q_t queue, purgeable_q_t queue2) |
587 | { |
588 | LCK_MTX_ASSERT(&vm_page_queue_lock, LCK_MTX_ASSERT_OWNED); |
589 | assert(queue->token_q_head); |
590 | |
591 | if (tokens[queue->token_q_head].count == 0) { |
592 | /* This queue has a ripe token. Remove. */ |
593 | vm_purgeable_token_remove_ripe(queue); |
594 | } else { |
595 | assert(queue2); |
596 | /* |
597 | * queue2 must have a ripe token. Remove, and migrate one |
598 | * from queue to queue2. |
599 | */ |
600 | vm_purgeable_token_remove_ripe(queue: queue2); |
601 | /* migrate unripe token */ |
602 | token_idx_t token; |
603 | token_cnt_t count; |
604 | |
605 | /* remove token from queue1 */ |
606 | assert(queue->token_q_unripe == queue->token_q_head); /* queue1 had no unripe |
607 | * tokens, remember? */ |
608 | token = vm_purgeable_token_remove_first(queue); |
609 | assert(token); |
610 | |
611 | count = tokens[token].count; |
612 | |
613 | /* migrate to queue2 */ |
614 | /* go to migration target loc */ |
615 | |
616 | token_idx_t token_to_insert_before = queue2->token_q_head, token_to_insert_after; |
617 | |
618 | while (token_to_insert_before != 0 && count > tokens[token_to_insert_before].count) { |
619 | count -= tokens[token_to_insert_before].count; |
620 | token_to_insert_before = tokens[token_to_insert_before].next; |
621 | } |
622 | |
623 | /* token_to_insert_before is now set correctly */ |
624 | |
625 | /* should the inserted token become the first unripe token? */ |
626 | if ((token_to_insert_before == queue2->token_q_unripe) || (queue2->token_q_unripe == 0)) { |
627 | queue2->token_q_unripe = token; /* if so, must update unripe pointer */ |
628 | } |
629 | /* |
630 | * insert token. |
631 | * if inserting at end, reduce new_pages by that value; |
632 | * otherwise, reduce counter of next token |
633 | */ |
634 | |
635 | tokens[token].count = count; |
636 | |
637 | if (token_to_insert_before != 0) { |
638 | token_to_insert_after = tokens[token_to_insert_before].prev; |
639 | |
640 | tokens[token].next = token_to_insert_before; |
641 | tokens[token_to_insert_before].prev = token; |
642 | |
643 | assert(tokens[token_to_insert_before].count >= count); |
644 | tokens[token_to_insert_before].count -= count; |
645 | } else { |
646 | /* if we ran off the end of the list, the token to insert after is the tail */ |
647 | token_to_insert_after = queue2->token_q_tail; |
648 | |
649 | tokens[token].next = 0; |
650 | queue2->token_q_tail = token; |
651 | |
652 | assert(queue2->new_pages >= (int32_t) count); |
653 | queue2->new_pages -= count; |
654 | } |
655 | |
656 | if (token_to_insert_after != 0) { |
657 | tokens[token].prev = token_to_insert_after; |
658 | tokens[token_to_insert_after].next = token; |
659 | } else { |
660 | /* is this case possible? */ |
661 | tokens[token].prev = 0; |
662 | queue2->token_q_head = token; |
663 | } |
664 | |
665 | #if MACH_ASSERT |
666 | queue2->debug_count_tokens++; |
667 | vm_purgeable_token_check_queue(queue2); |
668 | #endif |
669 | } |
670 | } |
671 | |
672 | /* Find an object that can be locked. Returns locked object. */ |
673 | /* Call with purgeable queue locked. */ |
674 | static vm_object_t |
675 | vm_purgeable_object_find_and_lock( |
676 | purgeable_q_t queue, |
677 | int group, |
678 | boolean_t pick_ripe) |
679 | { |
680 | vm_object_t object, best_object; |
681 | int object_task_importance; |
682 | int best_object_task_importance; |
683 | int best_object_skipped; |
684 | int num_objects_skipped; |
685 | int try_lock_failed = 0; |
686 | int try_lock_succeeded = 0; |
687 | task_t owner; |
688 | |
689 | best_object = VM_OBJECT_NULL; |
690 | best_object_task_importance = INT_MAX; |
691 | |
692 | LCK_MTX_ASSERT(&vm_purgeable_queue_lock, LCK_MTX_ASSERT_OWNED); |
693 | /* |
694 | * Usually we would pick the first element from a queue. However, we |
695 | * might not be able to get a lock on it, in which case we try the |
696 | * remaining elements in order. |
697 | */ |
698 | |
699 | KERNEL_DEBUG_CONSTANT_IST(KDEBUG_TRACE, (MACHDBG_CODE(DBG_MACH_VM, OBJECT_PURGE_LOOP) | DBG_FUNC_START), |
700 | pick_ripe, |
701 | group, |
702 | VM_KERNEL_UNSLIDE_OR_PERM(queue), |
703 | 0, |
704 | 0); |
705 | |
706 | num_objects_skipped = 0; |
707 | for (object = (vm_object_t) queue_first(&queue->objq[group]); |
708 | !queue_end(&queue->objq[group], (queue_entry_t) object); |
709 | object = (vm_object_t) queue_next(&object->objq), |
710 | num_objects_skipped++) { |
711 | /* |
712 | * To prevent us looping for an excessively long time, choose |
713 | * the best object we've seen after looking at PURGEABLE_LOOP_MAX elements. |
714 | * If we haven't seen an eligible object after PURGEABLE_LOOP_MAX elements, |
715 | * we keep going until we find the first eligible object. |
716 | */ |
717 | if ((num_objects_skipped >= PURGEABLE_LOOP_MAX) && (best_object != NULL)) { |
718 | break; |
719 | } |
720 | |
721 | if (pick_ripe && |
722 | !object->purgeable_when_ripe) { |
723 | /* we want an object that has a ripe token */ |
724 | continue; |
725 | } |
726 | |
727 | object_task_importance = 0; |
728 | |
729 | /* |
730 | * We don't want to use VM_OBJECT_OWNER() here: we want to |
731 | * distinguish kernel-owned and disowned objects. |
732 | * Disowned objects have no owner and will have no importance... |
733 | */ |
734 | owner = object->vo_owner; |
735 | if (owner != NULL && owner != VM_OBJECT_OWNER_DISOWNED) { |
736 | #if !XNU_TARGET_OS_OSX |
737 | #if CONFIG_JETSAM |
738 | object_task_importance = proc_get_memstat_priority((struct proc *)get_bsdtask_info(owner), TRUE); |
739 | #endif /* CONFIG_JETSAM */ |
740 | #else /* !XNU_TARGET_OS_OSX */ |
741 | object_task_importance = task_importance_estimate(task: owner); |
742 | #endif /* !XNU_TARGET_OS_OSX */ |
743 | } |
744 | |
745 | if (object_task_importance < best_object_task_importance) { |
746 | if (vm_object_lock_try(object)) { |
747 | try_lock_succeeded++; |
748 | if (best_object != VM_OBJECT_NULL) { |
749 | /* forget about previous best object */ |
750 | vm_object_unlock(best_object); |
751 | } |
752 | best_object = object; |
753 | best_object_task_importance = object_task_importance; |
754 | best_object_skipped = num_objects_skipped; |
755 | if (best_object_task_importance == 0) { |
756 | /* can't get any better: stop looking */ |
757 | break; |
758 | } |
759 | } else { |
760 | try_lock_failed++; |
761 | } |
762 | } |
763 | } |
764 | |
765 | KERNEL_DEBUG_CONSTANT_IST(KDEBUG_TRACE, (MACHDBG_CODE(DBG_MACH_VM, OBJECT_PURGE_LOOP) | DBG_FUNC_END), |
766 | num_objects_skipped, /* considered objects */ |
767 | try_lock_failed, |
768 | try_lock_succeeded, |
769 | VM_KERNEL_UNSLIDE_OR_PERM(best_object), |
770 | ((best_object == NULL) ? 0 : best_object->resident_page_count)); |
771 | |
772 | object = best_object; |
773 | |
774 | if (object == VM_OBJECT_NULL) { |
775 | return VM_OBJECT_NULL; |
776 | } |
777 | |
778 | /* Locked. Great. We'll take it. Remove and return. */ |
779 | // printf("FOUND PURGEABLE object %p skipped %d\n", object, num_objects_skipped); |
780 | |
781 | vm_object_lock_assert_exclusive(object); |
782 | |
783 | queue_remove(&queue->objq[group], object, |
784 | vm_object_t, objq); |
785 | object->objq.next = NULL; |
786 | object->objq.prev = NULL; |
787 | object->purgeable_queue_type = PURGEABLE_Q_TYPE_MAX; |
788 | object->purgeable_queue_group = 0; |
789 | /* one less volatile object for this object's owner */ |
790 | vm_purgeable_volatile_owner_update(VM_OBJECT_OWNER(object), delta: -1); |
791 | |
792 | #if DEBUG |
793 | object->vo_purgeable_volatilizer = NULL; |
794 | #endif /* DEBUG */ |
795 | |
796 | /* keep queue of non-volatile objects */ |
797 | queue_enter(&purgeable_nonvolatile_queue, object, |
798 | vm_object_t, objq); |
799 | assert(purgeable_nonvolatile_count >= 0); |
800 | purgeable_nonvolatile_count++; |
801 | assert(purgeable_nonvolatile_count > 0); |
802 | /* one more nonvolatile object for this object's owner */ |
803 | vm_purgeable_nonvolatile_owner_update(VM_OBJECT_OWNER(object), delta: +1); |
804 | |
805 | #if MACH_ASSERT |
806 | queue->debug_count_objects--; |
807 | #endif |
808 | return object; |
809 | } |
810 | |
811 | /* Can be called without holding locks */ |
812 | void |
813 | vm_purgeable_object_purge_all(void) |
814 | { |
815 | enum purgeable_q_type i; |
816 | int group; |
817 | vm_object_t object; |
818 | unsigned int purged_count; |
819 | uint32_t collisions; |
820 | |
821 | purged_count = 0; |
822 | collisions = 0; |
823 | |
824 | restart: |
825 | lck_mtx_lock(lck: &vm_purgeable_queue_lock); |
826 | /* Cycle through all queues */ |
827 | for (i = PURGEABLE_Q_TYPE_OBSOLETE; i < PURGEABLE_Q_TYPE_MAX; i++) { |
828 | purgeable_q_t queue; |
829 | |
830 | queue = &purgeable_queues[i]; |
831 | |
832 | /* |
833 | * Look through all groups, starting from the lowest. If |
834 | * we find an object in that group, try to lock it (this can |
835 | * fail). If locking is successful, we can drop the queue |
836 | * lock, remove a token and then purge the object. |
837 | */ |
838 | for (group = 0; group < NUM_VOLATILE_GROUPS; group++) { |
839 | while (!queue_empty(&queue->objq[group])) { |
840 | object = vm_purgeable_object_find_and_lock(queue, group, FALSE); |
841 | if (object == VM_OBJECT_NULL) { |
842 | lck_mtx_unlock(lck: &vm_purgeable_queue_lock); |
843 | mutex_pause(collisions++); |
844 | goto restart; |
845 | } |
846 | |
847 | lck_mtx_unlock(lck: &vm_purgeable_queue_lock); |
848 | |
849 | /* Lock the page queue here so we don't hold it |
850 | * over the whole, legthy operation */ |
851 | if (object->purgeable_when_ripe) { |
852 | vm_page_lock_queues(); |
853 | vm_purgeable_token_remove_first(queue); |
854 | vm_page_unlock_queues(); |
855 | } |
856 | |
857 | (void) vm_object_purge(object, flags: 0); |
858 | assert(object->purgable == VM_PURGABLE_EMPTY); |
859 | /* no change in purgeable accounting */ |
860 | |
861 | vm_object_unlock(object); |
862 | purged_count++; |
863 | goto restart; |
864 | } |
865 | assert(queue->debug_count_objects >= 0); |
866 | } |
867 | } |
868 | KERNEL_DEBUG_CONSTANT((MACHDBG_CODE(DBG_MACH_VM, OBJECT_PURGE_ALL)), |
869 | purged_count, /* # of purged objects */ |
870 | 0, |
871 | available_for_purge, |
872 | 0, |
873 | 0); |
874 | lck_mtx_unlock(lck: &vm_purgeable_queue_lock); |
875 | return; |
876 | } |
877 | |
878 | boolean_t |
879 | vm_purgeable_object_purge_one_unlocked( |
880 | int force_purge_below_group) |
881 | { |
882 | boolean_t retval; |
883 | |
884 | vm_page_lock_queues(); |
885 | retval = vm_purgeable_object_purge_one(force_purge_below_group, flags: 0); |
886 | vm_page_unlock_queues(); |
887 | |
888 | return retval; |
889 | } |
890 | |
891 | boolean_t |
892 | vm_purgeable_object_purge_one( |
893 | int force_purge_below_group, |
894 | int flags) |
895 | { |
896 | enum purgeable_q_type i; |
897 | int group; |
898 | vm_object_t object = 0; |
899 | purgeable_q_t queue, queue2; |
900 | boolean_t forced_purge; |
901 | unsigned int resident_page_count; |
902 | |
903 | |
904 | KERNEL_DEBUG_CONSTANT((MACHDBG_CODE(DBG_MACH_VM, OBJECT_PURGE)) | DBG_FUNC_START, |
905 | force_purge_below_group, flags, 0, 0, 0); |
906 | |
907 | /* Need the page queue lock since we'll be changing the token queue. */ |
908 | LCK_MTX_ASSERT(&vm_page_queue_lock, LCK_MTX_ASSERT_OWNED); |
909 | lck_mtx_lock(lck: &vm_purgeable_queue_lock); |
910 | |
911 | /* Cycle through all queues */ |
912 | for (i = PURGEABLE_Q_TYPE_OBSOLETE; i < PURGEABLE_Q_TYPE_MAX; i++) { |
913 | queue = &purgeable_queues[i]; |
914 | |
915 | if (force_purge_below_group == 0) { |
916 | /* |
917 | * Are there any ripe tokens on this queue? If yes, |
918 | * we'll find an object to purge there |
919 | */ |
920 | if (!queue->token_q_head) { |
921 | /* no token: look at next purgeable queue */ |
922 | continue; |
923 | } |
924 | |
925 | if (tokens[queue->token_q_head].count != 0) { |
926 | /* no ripe token: next queue */ |
927 | continue; |
928 | } |
929 | } |
930 | |
931 | /* |
932 | * Now look through all groups, starting from the lowest. If |
933 | * we find an object in that group, try to lock it (this can |
934 | * fail). If locking is successful, we can drop the queue |
935 | * lock, remove a token and then purge the object. |
936 | */ |
937 | for (group = 0; group < NUM_VOLATILE_GROUPS; group++) { |
938 | if (!queue->token_q_head || |
939 | tokens[queue->token_q_head].count != 0) { |
940 | /* no tokens or no ripe tokens */ |
941 | |
942 | if (group >= force_purge_below_group) { |
943 | /* no more groups to force-purge */ |
944 | break; |
945 | } |
946 | |
947 | /* |
948 | * Try and purge an object in this group |
949 | * even though no tokens are ripe. |
950 | */ |
951 | if (!queue_empty(&queue->objq[group]) && |
952 | (object = vm_purgeable_object_find_and_lock(queue, group, FALSE))) { |
953 | lck_mtx_unlock(lck: &vm_purgeable_queue_lock); |
954 | if (object->purgeable_when_ripe) { |
955 | vm_purgeable_token_delete_first(queue); |
956 | } |
957 | forced_purge = TRUE; |
958 | goto purge_now; |
959 | } |
960 | |
961 | /* nothing to purge in this group: next group */ |
962 | continue; |
963 | } |
964 | if (!queue_empty(&queue->objq[group]) && |
965 | (object = vm_purgeable_object_find_and_lock(queue, group, TRUE))) { |
966 | lck_mtx_unlock(lck: &vm_purgeable_queue_lock); |
967 | if (object->purgeable_when_ripe) { |
968 | vm_purgeable_token_choose_and_delete_ripe(queue, queue2: 0); |
969 | } |
970 | forced_purge = FALSE; |
971 | goto purge_now; |
972 | } |
973 | if (i != PURGEABLE_Q_TYPE_OBSOLETE) { |
974 | /* This is the token migration case, and it works between |
975 | * FIFO and LIFO only */ |
976 | queue2 = &purgeable_queues[i != PURGEABLE_Q_TYPE_FIFO ? |
977 | PURGEABLE_Q_TYPE_FIFO : |
978 | PURGEABLE_Q_TYPE_LIFO]; |
979 | |
980 | if (!queue_empty(&queue2->objq[group]) && |
981 | (object = vm_purgeable_object_find_and_lock(queue: queue2, group, TRUE))) { |
982 | lck_mtx_unlock(lck: &vm_purgeable_queue_lock); |
983 | if (object->purgeable_when_ripe) { |
984 | vm_purgeable_token_choose_and_delete_ripe(queue: queue2, queue2: queue); |
985 | } |
986 | forced_purge = FALSE; |
987 | goto purge_now; |
988 | } |
989 | } |
990 | assert(queue->debug_count_objects >= 0); |
991 | } |
992 | } |
993 | /* |
994 | * because we have to do a try_lock on the objects which could fail, |
995 | * we could end up with no object to purge at this time, even though |
996 | * we have objects in a purgeable state |
997 | */ |
998 | lck_mtx_unlock(lck: &vm_purgeable_queue_lock); |
999 | |
1000 | KERNEL_DEBUG_CONSTANT((MACHDBG_CODE(DBG_MACH_VM, OBJECT_PURGE)) | DBG_FUNC_END, |
1001 | 0, 0, available_for_purge, 0, 0); |
1002 | |
1003 | return FALSE; |
1004 | |
1005 | purge_now: |
1006 | |
1007 | assert(object); |
1008 | vm_page_unlock_queues(); /* Unlock for call to vm_object_purge() */ |
1009 | // printf("%sPURGING object %p task %p importance %d queue %d group %d force_purge_below_group %d memorystatus_vm_pressure_level %d\n", forced_purge ? "FORCED " : "", object, object->vo_owner, task_importance_estimate(object->vo_owner), i, group, force_purge_below_group, memorystatus_vm_pressure_level); |
1010 | resident_page_count = object->resident_page_count; |
1011 | (void) vm_object_purge(object, flags); |
1012 | assert(object->purgable == VM_PURGABLE_EMPTY); |
1013 | /* no change in purgeable accounting */ |
1014 | vm_object_unlock(object); |
1015 | vm_page_lock_queues(); |
1016 | |
1017 | vm_pageout_vminfo.vm_pageout_pages_purged += resident_page_count; |
1018 | |
1019 | KERNEL_DEBUG_CONSTANT((MACHDBG_CODE(DBG_MACH_VM, OBJECT_PURGE)) | DBG_FUNC_END, |
1020 | VM_KERNEL_UNSLIDE_OR_PERM(object), /* purged object */ |
1021 | resident_page_count, |
1022 | available_for_purge, |
1023 | 0, |
1024 | 0); |
1025 | |
1026 | return TRUE; |
1027 | } |
1028 | |
1029 | /* Called with object lock held */ |
1030 | void |
1031 | vm_purgeable_object_add(vm_object_t object, purgeable_q_t queue, int group) |
1032 | { |
1033 | vm_object_lock_assert_exclusive(object); |
1034 | lck_mtx_lock(lck: &vm_purgeable_queue_lock); |
1035 | |
1036 | assert(object->objq.next != NULL); |
1037 | assert(object->objq.prev != NULL); |
1038 | queue_remove(&purgeable_nonvolatile_queue, object, |
1039 | vm_object_t, objq); |
1040 | object->objq.next = NULL; |
1041 | object->objq.prev = NULL; |
1042 | assert(purgeable_nonvolatile_count > 0); |
1043 | purgeable_nonvolatile_count--; |
1044 | assert(purgeable_nonvolatile_count >= 0); |
1045 | /* one less nonvolatile object for this object's owner */ |
1046 | vm_purgeable_nonvolatile_owner_update(VM_OBJECT_OWNER(object), delta: -1); |
1047 | |
1048 | if (queue->type == PURGEABLE_Q_TYPE_OBSOLETE) { |
1049 | group = 0; |
1050 | } |
1051 | |
1052 | if (queue->type != PURGEABLE_Q_TYPE_LIFO) { /* fifo and obsolete are |
1053 | * fifo-queued */ |
1054 | queue_enter(&queue->objq[group], object, vm_object_t, objq); /* last to die */ |
1055 | } else { |
1056 | queue_enter_first(&queue->objq[group], object, vm_object_t, objq); /* first to die */ |
1057 | } |
1058 | /* one more volatile object for this object's owner */ |
1059 | vm_purgeable_volatile_owner_update(VM_OBJECT_OWNER(object), delta: +1); |
1060 | |
1061 | object->purgeable_queue_type = queue->type; |
1062 | object->purgeable_queue_group = group; |
1063 | |
1064 | #if DEBUG |
1065 | assert(object->vo_purgeable_volatilizer == NULL); |
1066 | object->vo_purgeable_volatilizer = current_task(); |
1067 | OSBacktrace(&object->purgeable_volatilizer_bt[0], |
1068 | ARRAY_COUNT(object->purgeable_volatilizer_bt)); |
1069 | #endif /* DEBUG */ |
1070 | |
1071 | #if MACH_ASSERT |
1072 | queue->debug_count_objects++; |
1073 | KERNEL_DEBUG_CONSTANT((MACHDBG_CODE(DBG_MACH_VM, OBJECT_ADD)), |
1074 | 0, |
1075 | tokens[queue->token_q_head].count, |
1076 | queue->type, |
1077 | group, |
1078 | 0); |
1079 | #endif |
1080 | |
1081 | lck_mtx_unlock(lck: &vm_purgeable_queue_lock); |
1082 | } |
1083 | |
1084 | /* Look for object. If found, remove from purgeable queue. */ |
1085 | /* Called with object lock held */ |
1086 | purgeable_q_t |
1087 | vm_purgeable_object_remove(vm_object_t object) |
1088 | { |
1089 | int group; |
1090 | enum purgeable_q_type type; |
1091 | purgeable_q_t queue; |
1092 | |
1093 | vm_object_lock_assert_exclusive(object); |
1094 | |
1095 | type = object->purgeable_queue_type; |
1096 | group = object->purgeable_queue_group; |
1097 | |
1098 | if (type == PURGEABLE_Q_TYPE_MAX) { |
1099 | if (object->objq.prev || object->objq.next) { |
1100 | panic("unmarked object on purgeable q" ); |
1101 | } |
1102 | |
1103 | return NULL; |
1104 | } else if (!(object->objq.prev && object->objq.next)) { |
1105 | panic("marked object not on purgeable q" ); |
1106 | } |
1107 | |
1108 | lck_mtx_lock(lck: &vm_purgeable_queue_lock); |
1109 | |
1110 | queue = &purgeable_queues[type]; |
1111 | |
1112 | queue_remove(&queue->objq[group], object, vm_object_t, objq); |
1113 | object->objq.next = NULL; |
1114 | object->objq.prev = NULL; |
1115 | /* one less volatile object for this object's owner */ |
1116 | vm_purgeable_volatile_owner_update(VM_OBJECT_OWNER(object), delta: -1); |
1117 | #if DEBUG |
1118 | object->vo_purgeable_volatilizer = NULL; |
1119 | #endif /* DEBUG */ |
1120 | /* keep queue of non-volatile objects */ |
1121 | if (object->alive && !object->terminating) { |
1122 | queue_enter(&purgeable_nonvolatile_queue, object, |
1123 | vm_object_t, objq); |
1124 | assert(purgeable_nonvolatile_count >= 0); |
1125 | purgeable_nonvolatile_count++; |
1126 | assert(purgeable_nonvolatile_count > 0); |
1127 | /* one more nonvolatile object for this object's owner */ |
1128 | vm_purgeable_nonvolatile_owner_update(VM_OBJECT_OWNER(object), delta: +1); |
1129 | } |
1130 | |
1131 | #if MACH_ASSERT |
1132 | queue->debug_count_objects--; |
1133 | KERNEL_DEBUG_CONSTANT((MACHDBG_CODE(DBG_MACH_VM, OBJECT_REMOVE)), |
1134 | 0, |
1135 | tokens[queue->token_q_head].count, |
1136 | queue->type, |
1137 | group, |
1138 | 0); |
1139 | #endif |
1140 | |
1141 | lck_mtx_unlock(lck: &vm_purgeable_queue_lock); |
1142 | |
1143 | object->purgeable_queue_type = PURGEABLE_Q_TYPE_MAX; |
1144 | object->purgeable_queue_group = 0; |
1145 | |
1146 | vm_object_lock_assert_exclusive(object); |
1147 | |
1148 | return &purgeable_queues[type]; |
1149 | } |
1150 | |
1151 | void |
1152 | vm_purgeable_stats_helper(vm_purgeable_stat_t *stat, purgeable_q_t queue, int group, task_t target_task) |
1153 | { |
1154 | LCK_MTX_ASSERT(&vm_purgeable_queue_lock, LCK_MTX_ASSERT_OWNED); |
1155 | |
1156 | stat->count = stat->size = 0; |
1157 | vm_object_t object; |
1158 | for (object = (vm_object_t) queue_first(&queue->objq[group]); |
1159 | !queue_end(&queue->objq[group], (queue_entry_t) object); |
1160 | object = (vm_object_t) queue_next(&object->objq)) { |
1161 | if (!target_task || VM_OBJECT_OWNER(object) == target_task) { |
1162 | stat->count++; |
1163 | stat->size += (object->resident_page_count * PAGE_SIZE); |
1164 | } |
1165 | } |
1166 | return; |
1167 | } |
1168 | |
1169 | void |
1170 | vm_purgeable_stats(vm_purgeable_info_t info, task_t target_task) |
1171 | { |
1172 | purgeable_q_t queue; |
1173 | int group; |
1174 | |
1175 | lck_mtx_lock(lck: &vm_purgeable_queue_lock); |
1176 | |
1177 | /* Populate fifo_data */ |
1178 | queue = &purgeable_queues[PURGEABLE_Q_TYPE_FIFO]; |
1179 | for (group = 0; group < NUM_VOLATILE_GROUPS; group++) { |
1180 | vm_purgeable_stats_helper(stat: &(info->fifo_data[group]), queue, group, target_task); |
1181 | } |
1182 | |
1183 | /* Populate lifo_data */ |
1184 | queue = &purgeable_queues[PURGEABLE_Q_TYPE_LIFO]; |
1185 | for (group = 0; group < NUM_VOLATILE_GROUPS; group++) { |
1186 | vm_purgeable_stats_helper(stat: &(info->lifo_data[group]), queue, group, target_task); |
1187 | } |
1188 | |
1189 | /* Populate obsolete data */ |
1190 | queue = &purgeable_queues[PURGEABLE_Q_TYPE_OBSOLETE]; |
1191 | vm_purgeable_stats_helper(stat: &(info->obsolete_data), queue, group: 0, target_task); |
1192 | |
1193 | lck_mtx_unlock(lck: &vm_purgeable_queue_lock); |
1194 | return; |
1195 | } |
1196 | |
1197 | #if DEVELOPMENT || DEBUG |
1198 | static void |
1199 | vm_purgeable_account_volatile_queue( |
1200 | purgeable_q_t queue, |
1201 | int group, |
1202 | task_t task, |
1203 | pvm_account_info_t acnt_info) |
1204 | { |
1205 | vm_object_t object; |
1206 | uint64_t compressed_count; |
1207 | |
1208 | for (object = (vm_object_t) queue_first(&queue->objq[group]); |
1209 | !queue_end(&queue->objq[group], (queue_entry_t) object); |
1210 | object = (vm_object_t) queue_next(&object->objq)) { |
1211 | if (VM_OBJECT_OWNER(object) == task) { |
1212 | compressed_count = vm_compressor_pager_get_count(object->pager); |
1213 | acnt_info->pvm_volatile_compressed_count += compressed_count; |
1214 | acnt_info->pvm_volatile_count += (object->resident_page_count - object->wired_page_count); |
1215 | acnt_info->pvm_nonvolatile_count += object->wired_page_count; |
1216 | } |
1217 | } |
1218 | } |
1219 | |
1220 | /* |
1221 | * Walks the purgeable object queues and calculates the usage |
1222 | * associated with the objects for the given task. |
1223 | */ |
1224 | kern_return_t |
1225 | vm_purgeable_account( |
1226 | task_t task, |
1227 | pvm_account_info_t acnt_info) |
1228 | { |
1229 | queue_head_t *nonvolatile_q; |
1230 | vm_object_t object; |
1231 | int group; |
1232 | int state; |
1233 | uint64_t compressed_count; |
1234 | purgeable_q_t volatile_q; |
1235 | |
1236 | |
1237 | if ((task == NULL) || (acnt_info == NULL)) { |
1238 | return KERN_INVALID_ARGUMENT; |
1239 | } |
1240 | |
1241 | acnt_info->pvm_volatile_count = 0; |
1242 | acnt_info->pvm_volatile_compressed_count = 0; |
1243 | acnt_info->pvm_nonvolatile_count = 0; |
1244 | acnt_info->pvm_nonvolatile_compressed_count = 0; |
1245 | |
1246 | lck_mtx_lock(&vm_purgeable_queue_lock); |
1247 | |
1248 | nonvolatile_q = &purgeable_nonvolatile_queue; |
1249 | for (object = (vm_object_t) queue_first(nonvolatile_q); |
1250 | !queue_end(nonvolatile_q, (queue_entry_t) object); |
1251 | object = (vm_object_t) queue_next(&object->objq)) { |
1252 | if (VM_OBJECT_OWNER(object) == task) { |
1253 | state = object->purgable; |
1254 | compressed_count = vm_compressor_pager_get_count(object->pager); |
1255 | if (state == VM_PURGABLE_EMPTY) { |
1256 | acnt_info->pvm_volatile_count += (object->resident_page_count - object->wired_page_count); |
1257 | acnt_info->pvm_volatile_compressed_count += compressed_count; |
1258 | } else { |
1259 | acnt_info->pvm_nonvolatile_count += (object->resident_page_count - object->wired_page_count); |
1260 | acnt_info->pvm_nonvolatile_compressed_count += compressed_count; |
1261 | } |
1262 | acnt_info->pvm_nonvolatile_count += object->wired_page_count; |
1263 | } |
1264 | } |
1265 | |
1266 | volatile_q = &purgeable_queues[PURGEABLE_Q_TYPE_OBSOLETE]; |
1267 | vm_purgeable_account_volatile_queue(volatile_q, 0, task, acnt_info); |
1268 | |
1269 | volatile_q = &purgeable_queues[PURGEABLE_Q_TYPE_FIFO]; |
1270 | for (group = 0; group < NUM_VOLATILE_GROUPS; group++) { |
1271 | vm_purgeable_account_volatile_queue(volatile_q, group, task, acnt_info); |
1272 | } |
1273 | |
1274 | volatile_q = &purgeable_queues[PURGEABLE_Q_TYPE_LIFO]; |
1275 | for (group = 0; group < NUM_VOLATILE_GROUPS; group++) { |
1276 | vm_purgeable_account_volatile_queue(volatile_q, group, task, acnt_info); |
1277 | } |
1278 | lck_mtx_unlock(&vm_purgeable_queue_lock); |
1279 | |
1280 | acnt_info->pvm_volatile_count = (acnt_info->pvm_volatile_count * PAGE_SIZE); |
1281 | acnt_info->pvm_volatile_compressed_count = (acnt_info->pvm_volatile_compressed_count * PAGE_SIZE); |
1282 | acnt_info->pvm_nonvolatile_count = (acnt_info->pvm_nonvolatile_count * PAGE_SIZE); |
1283 | acnt_info->pvm_nonvolatile_compressed_count = (acnt_info->pvm_nonvolatile_compressed_count * PAGE_SIZE); |
1284 | |
1285 | return KERN_SUCCESS; |
1286 | } |
1287 | #endif /* DEVELOPMENT || DEBUG */ |
1288 | |
1289 | static uint64_t |
1290 | vm_purgeable_queue_purge_task_owned( |
1291 | purgeable_q_t queue, |
1292 | int group, |
1293 | task_t task) |
1294 | { |
1295 | vm_object_t object = VM_OBJECT_NULL; |
1296 | int collisions = 0; |
1297 | uint64_t num_pages_purged = 0; |
1298 | |
1299 | num_pages_purged = 0; |
1300 | collisions = 0; |
1301 | |
1302 | look_again: |
1303 | lck_mtx_lock(lck: &vm_purgeable_queue_lock); |
1304 | |
1305 | for (object = (vm_object_t) queue_first(&queue->objq[group]); |
1306 | !queue_end(&queue->objq[group], (queue_entry_t) object); |
1307 | object = (vm_object_t) queue_next(&object->objq)) { |
1308 | if (object->vo_owner != task) { |
1309 | continue; |
1310 | } |
1311 | |
1312 | /* found an object: try and grab it */ |
1313 | if (!vm_object_lock_try(object)) { |
1314 | lck_mtx_unlock(lck: &vm_purgeable_queue_lock); |
1315 | mutex_pause(collisions++); |
1316 | goto look_again; |
1317 | } |
1318 | /* got it ! */ |
1319 | |
1320 | collisions = 0; |
1321 | |
1322 | /* remove object from purgeable queue */ |
1323 | queue_remove(&queue->objq[group], object, |
1324 | vm_object_t, objq); |
1325 | object->objq.next = NULL; |
1326 | object->objq.prev = NULL; |
1327 | object->purgeable_queue_type = PURGEABLE_Q_TYPE_MAX; |
1328 | object->purgeable_queue_group = 0; |
1329 | /* one less volatile object for this object's owner */ |
1330 | assert(object->vo_owner == task); |
1331 | vm_purgeable_volatile_owner_update(owner: task, delta: -1); |
1332 | |
1333 | #if DEBUG |
1334 | object->vo_purgeable_volatilizer = NULL; |
1335 | #endif /* DEBUG */ |
1336 | queue_enter(&purgeable_nonvolatile_queue, object, |
1337 | vm_object_t, objq); |
1338 | assert(purgeable_nonvolatile_count >= 0); |
1339 | purgeable_nonvolatile_count++; |
1340 | assert(purgeable_nonvolatile_count > 0); |
1341 | /* one more nonvolatile object for this object's owner */ |
1342 | assert(object->vo_owner == task); |
1343 | vm_purgeable_nonvolatile_owner_update(owner: task, delta: +1); |
1344 | |
1345 | /* unlock purgeable queues */ |
1346 | lck_mtx_unlock(lck: &vm_purgeable_queue_lock); |
1347 | |
1348 | if (object->purgeable_when_ripe) { |
1349 | /* remove a token */ |
1350 | vm_page_lock_queues(); |
1351 | vm_purgeable_token_remove_first(queue); |
1352 | vm_page_unlock_queues(); |
1353 | } |
1354 | |
1355 | /* purge the object */ |
1356 | num_pages_purged += vm_object_purge(object, flags: 0); |
1357 | |
1358 | assert(object->purgable == VM_PURGABLE_EMPTY); |
1359 | /* no change for purgeable accounting */ |
1360 | vm_object_unlock(object); |
1361 | |
1362 | /* we unlocked the purgeable queues, so start over */ |
1363 | goto look_again; |
1364 | } |
1365 | |
1366 | lck_mtx_unlock(lck: &vm_purgeable_queue_lock); |
1367 | |
1368 | return num_pages_purged; |
1369 | } |
1370 | |
1371 | uint64_t |
1372 | vm_purgeable_purge_task_owned( |
1373 | task_t task) |
1374 | { |
1375 | purgeable_q_t queue = NULL; |
1376 | int group = 0; |
1377 | uint64_t num_pages_purged = 0; |
1378 | |
1379 | num_pages_purged = 0; |
1380 | |
1381 | queue = &purgeable_queues[PURGEABLE_Q_TYPE_OBSOLETE]; |
1382 | num_pages_purged += vm_purgeable_queue_purge_task_owned(queue, |
1383 | group: 0, |
1384 | task); |
1385 | |
1386 | queue = &purgeable_queues[PURGEABLE_Q_TYPE_FIFO]; |
1387 | for (group = 0; group < NUM_VOLATILE_GROUPS; group++) { |
1388 | num_pages_purged += vm_purgeable_queue_purge_task_owned(queue, |
1389 | group, |
1390 | task); |
1391 | } |
1392 | |
1393 | queue = &purgeable_queues[PURGEABLE_Q_TYPE_LIFO]; |
1394 | for (group = 0; group < NUM_VOLATILE_GROUPS; group++) { |
1395 | num_pages_purged += vm_purgeable_queue_purge_task_owned(queue, |
1396 | group, |
1397 | task); |
1398 | } |
1399 | |
1400 | return num_pages_purged; |
1401 | } |
1402 | |
1403 | void |
1404 | vm_purgeable_nonvolatile_enqueue( |
1405 | vm_object_t object, |
1406 | task_t owner) |
1407 | { |
1408 | int ledger_flags; |
1409 | kern_return_t kr; |
1410 | |
1411 | vm_object_lock_assert_exclusive(object); |
1412 | |
1413 | assert(object->purgable == VM_PURGABLE_NONVOLATILE); |
1414 | assert(object->vo_owner == NULL); |
1415 | |
1416 | lck_mtx_lock(lck: &vm_purgeable_queue_lock); |
1417 | |
1418 | if (owner != NULL && |
1419 | owner->task_objects_disowning) { |
1420 | /* task is exiting and no longer tracking purgeable objects */ |
1421 | owner = VM_OBJECT_OWNER_DISOWNED; |
1422 | } |
1423 | if (owner == NULL) { |
1424 | owner = kernel_task; |
1425 | } |
1426 | #if DEBUG |
1427 | OSBacktrace(&object->purgeable_owner_bt[0], |
1428 | ARRAY_COUNT(object->purgeable_owner_bt)); |
1429 | object->vo_purgeable_volatilizer = NULL; |
1430 | #endif /* DEBUG */ |
1431 | |
1432 | ledger_flags = 0; |
1433 | if (object->vo_no_footprint) { |
1434 | ledger_flags |= VM_LEDGER_FLAG_NO_FOOTPRINT; |
1435 | } |
1436 | kr = vm_object_ownership_change(object, |
1437 | new_ledger_tag: object->vo_ledger_tag, /* tag unchanged */ |
1438 | new_owner: owner, |
1439 | new_ledger_flags: ledger_flags, |
1440 | FALSE); /* task_objq_locked */ |
1441 | assert(kr == KERN_SUCCESS); |
1442 | |
1443 | assert(object->objq.next == NULL); |
1444 | assert(object->objq.prev == NULL); |
1445 | |
1446 | queue_enter(&purgeable_nonvolatile_queue, object, |
1447 | vm_object_t, objq); |
1448 | assert(purgeable_nonvolatile_count >= 0); |
1449 | purgeable_nonvolatile_count++; |
1450 | assert(purgeable_nonvolatile_count > 0); |
1451 | lck_mtx_unlock(lck: &vm_purgeable_queue_lock); |
1452 | |
1453 | vm_object_lock_assert_exclusive(object); |
1454 | } |
1455 | |
1456 | void |
1457 | vm_purgeable_nonvolatile_dequeue( |
1458 | vm_object_t object) |
1459 | { |
1460 | task_t owner; |
1461 | kern_return_t kr; |
1462 | |
1463 | vm_object_lock_assert_exclusive(object); |
1464 | |
1465 | owner = VM_OBJECT_OWNER(object); |
1466 | #if DEBUG |
1467 | assert(object->vo_purgeable_volatilizer == NULL); |
1468 | #endif /* DEBUG */ |
1469 | if (owner != NULL) { |
1470 | /* |
1471 | * Update the owner's ledger to stop accounting |
1472 | * for this object. |
1473 | */ |
1474 | /* transfer ownership to the kernel */ |
1475 | assert(VM_OBJECT_OWNER(object) != kernel_task); |
1476 | kr = vm_object_ownership_change( |
1477 | object, |
1478 | new_ledger_tag: object->vo_ledger_tag, /* unchanged */ |
1479 | VM_OBJECT_OWNER_DISOWNED, /* new owner */ |
1480 | new_ledger_flags: 0, /* ledger_flags */ |
1481 | FALSE); /* old_owner->task_objq locked */ |
1482 | assert(kr == KERN_SUCCESS); |
1483 | assert(object->vo_owner == VM_OBJECT_OWNER_DISOWNED); |
1484 | } |
1485 | |
1486 | lck_mtx_lock(lck: &vm_purgeable_queue_lock); |
1487 | assert(object->objq.next != NULL); |
1488 | assert(object->objq.prev != NULL); |
1489 | queue_remove(&purgeable_nonvolatile_queue, object, |
1490 | vm_object_t, objq); |
1491 | object->objq.next = NULL; |
1492 | object->objq.prev = NULL; |
1493 | assert(purgeable_nonvolatile_count > 0); |
1494 | purgeable_nonvolatile_count--; |
1495 | assert(purgeable_nonvolatile_count >= 0); |
1496 | lck_mtx_unlock(lck: &vm_purgeable_queue_lock); |
1497 | |
1498 | vm_object_lock_assert_exclusive(object); |
1499 | } |
1500 | |
1501 | void |
1502 | vm_purgeable_accounting( |
1503 | vm_object_t object, |
1504 | vm_purgable_t old_state) |
1505 | { |
1506 | task_t owner; |
1507 | int resident_page_count; |
1508 | int wired_page_count; |
1509 | int compressed_page_count; |
1510 | int ledger_idx_volatile; |
1511 | int ledger_idx_nonvolatile; |
1512 | int ledger_idx_volatile_compressed; |
1513 | int ledger_idx_nonvolatile_compressed; |
1514 | boolean_t ; |
1515 | |
1516 | vm_object_lock_assert_exclusive(object); |
1517 | assert(object->purgable != VM_PURGABLE_DENY); |
1518 | |
1519 | owner = VM_OBJECT_OWNER(object); |
1520 | if (owner == NULL || |
1521 | object->purgable == VM_PURGABLE_DENY) { |
1522 | return; |
1523 | } |
1524 | |
1525 | vm_object_ledger_tag_ledgers(object, |
1526 | ledger_idx_volatile: &ledger_idx_volatile, |
1527 | ledger_idx_nonvolatile: &ledger_idx_nonvolatile, |
1528 | ledger_idx_volatile_compressed: &ledger_idx_volatile_compressed, |
1529 | ledger_idx_nonvolatile_compressed: &ledger_idx_nonvolatile_compressed, |
1530 | do_footprint: &do_footprint); |
1531 | |
1532 | resident_page_count = object->resident_page_count; |
1533 | wired_page_count = object->wired_page_count; |
1534 | if (VM_CONFIG_COMPRESSOR_IS_PRESENT && |
1535 | object->pager != NULL) { |
1536 | compressed_page_count = |
1537 | vm_compressor_pager_get_count(mem_obj: object->pager); |
1538 | } else { |
1539 | compressed_page_count = 0; |
1540 | } |
1541 | |
1542 | if (old_state == VM_PURGABLE_VOLATILE || |
1543 | old_state == VM_PURGABLE_EMPTY) { |
1544 | /* less volatile bytes in ledger */ |
1545 | ledger_debit(ledger: owner->ledger, |
1546 | entry: ledger_idx_volatile, |
1547 | ptoa_64(resident_page_count - wired_page_count)); |
1548 | /* less compressed volatile bytes in ledger */ |
1549 | ledger_debit(ledger: owner->ledger, |
1550 | entry: ledger_idx_volatile_compressed, |
1551 | ptoa_64(compressed_page_count)); |
1552 | |
1553 | /* more non-volatile bytes in ledger */ |
1554 | ledger_credit(ledger: owner->ledger, |
1555 | entry: ledger_idx_nonvolatile, |
1556 | ptoa_64(resident_page_count - wired_page_count)); |
1557 | /* more compressed non-volatile bytes in ledger */ |
1558 | ledger_credit(ledger: owner->ledger, |
1559 | entry: ledger_idx_nonvolatile_compressed, |
1560 | ptoa_64(compressed_page_count)); |
1561 | if (do_footprint) { |
1562 | /* more footprint */ |
1563 | ledger_credit(ledger: owner->ledger, |
1564 | entry: task_ledgers.phys_footprint, |
1565 | ptoa_64(resident_page_count |
1566 | + compressed_page_count |
1567 | - wired_page_count)); |
1568 | } |
1569 | } else if (old_state == VM_PURGABLE_NONVOLATILE) { |
1570 | /* less non-volatile bytes in ledger */ |
1571 | ledger_debit(ledger: owner->ledger, |
1572 | entry: ledger_idx_nonvolatile, |
1573 | ptoa_64(resident_page_count - wired_page_count)); |
1574 | /* less compressed non-volatile bytes in ledger */ |
1575 | ledger_debit(ledger: owner->ledger, |
1576 | entry: ledger_idx_nonvolatile_compressed, |
1577 | ptoa_64(compressed_page_count)); |
1578 | if (do_footprint) { |
1579 | /* less footprint */ |
1580 | ledger_debit(ledger: owner->ledger, |
1581 | entry: task_ledgers.phys_footprint, |
1582 | ptoa_64(resident_page_count |
1583 | + compressed_page_count |
1584 | - wired_page_count)); |
1585 | } |
1586 | |
1587 | /* more volatile bytes in ledger */ |
1588 | ledger_credit(ledger: owner->ledger, |
1589 | entry: ledger_idx_volatile, |
1590 | ptoa_64(resident_page_count - wired_page_count)); |
1591 | /* more compressed volatile bytes in ledger */ |
1592 | ledger_credit(ledger: owner->ledger, |
1593 | entry: ledger_idx_volatile_compressed, |
1594 | ptoa_64(compressed_page_count)); |
1595 | } else { |
1596 | panic("vm_purgeable_accounting(%p): " |
1597 | "unexpected old_state=%d\n" , |
1598 | object, old_state); |
1599 | } |
1600 | |
1601 | vm_object_lock_assert_exclusive(object); |
1602 | } |
1603 | |
1604 | void |
1605 | vm_purgeable_nonvolatile_owner_update( |
1606 | task_t owner, |
1607 | int delta) |
1608 | { |
1609 | if (owner == NULL || delta == 0) { |
1610 | return; |
1611 | } |
1612 | |
1613 | if (delta > 0) { |
1614 | assert(owner->task_nonvolatile_objects >= 0); |
1615 | OSAddAtomic(delta, &owner->task_nonvolatile_objects); |
1616 | assert(owner->task_nonvolatile_objects > 0); |
1617 | } else { |
1618 | assert(owner->task_nonvolatile_objects > delta); |
1619 | OSAddAtomic(delta, &owner->task_nonvolatile_objects); |
1620 | assert(owner->task_nonvolatile_objects >= 0); |
1621 | } |
1622 | } |
1623 | |
1624 | void |
1625 | vm_purgeable_volatile_owner_update( |
1626 | task_t owner, |
1627 | int delta) |
1628 | { |
1629 | if (owner == NULL || delta == 0) { |
1630 | return; |
1631 | } |
1632 | |
1633 | if (delta > 0) { |
1634 | assert(owner->task_volatile_objects >= 0); |
1635 | OSAddAtomic(delta, &owner->task_volatile_objects); |
1636 | assert(owner->task_volatile_objects > 0); |
1637 | } else { |
1638 | assert(owner->task_volatile_objects > delta); |
1639 | OSAddAtomic(delta, &owner->task_volatile_objects); |
1640 | assert(owner->task_volatile_objects >= 0); |
1641 | } |
1642 | } |
1643 | |
1644 | void |
1645 | vm_object_owner_compressed_update( |
1646 | vm_object_t object, |
1647 | int delta) |
1648 | { |
1649 | task_t owner; |
1650 | int ledger_idx_volatile; |
1651 | int ledger_idx_nonvolatile; |
1652 | int ledger_idx_volatile_compressed; |
1653 | int ledger_idx_nonvolatile_compressed; |
1654 | boolean_t ; |
1655 | |
1656 | vm_object_lock_assert_exclusive(object); |
1657 | |
1658 | owner = VM_OBJECT_OWNER(object); |
1659 | |
1660 | if (delta == 0 || |
1661 | !object->internal || |
1662 | (object->purgable == VM_PURGABLE_DENY && |
1663 | !object->vo_ledger_tag) || |
1664 | owner == NULL) { |
1665 | /* not an owned purgeable (or tagged) VM object: nothing to update */ |
1666 | return; |
1667 | } |
1668 | |
1669 | vm_object_ledger_tag_ledgers(object, |
1670 | ledger_idx_volatile: &ledger_idx_volatile, |
1671 | ledger_idx_nonvolatile: &ledger_idx_nonvolatile, |
1672 | ledger_idx_volatile_compressed: &ledger_idx_volatile_compressed, |
1673 | ledger_idx_nonvolatile_compressed: &ledger_idx_nonvolatile_compressed, |
1674 | do_footprint: &do_footprint); |
1675 | switch (object->purgable) { |
1676 | case VM_PURGABLE_DENY: |
1677 | /* not purgeable: must be ledger-tagged */ |
1678 | assert(object->vo_ledger_tag != VM_LEDGER_TAG_NONE); |
1679 | OS_FALLTHROUGH; |
1680 | case VM_PURGABLE_NONVOLATILE: |
1681 | if (delta > 0) { |
1682 | ledger_credit(ledger: owner->ledger, |
1683 | entry: ledger_idx_nonvolatile_compressed, |
1684 | ptoa_64(delta)); |
1685 | if (do_footprint) { |
1686 | ledger_credit(ledger: owner->ledger, |
1687 | entry: task_ledgers.phys_footprint, |
1688 | ptoa_64(delta)); |
1689 | } |
1690 | } else { |
1691 | ledger_debit(ledger: owner->ledger, |
1692 | entry: ledger_idx_nonvolatile_compressed, |
1693 | ptoa_64(-delta)); |
1694 | if (do_footprint) { |
1695 | ledger_debit(ledger: owner->ledger, |
1696 | entry: task_ledgers.phys_footprint, |
1697 | ptoa_64(-delta)); |
1698 | } |
1699 | } |
1700 | break; |
1701 | case VM_PURGABLE_VOLATILE: |
1702 | case VM_PURGABLE_EMPTY: |
1703 | if (delta > 0) { |
1704 | ledger_credit(ledger: owner->ledger, |
1705 | entry: ledger_idx_volatile_compressed, |
1706 | ptoa_64(delta)); |
1707 | } else { |
1708 | ledger_debit(ledger: owner->ledger, |
1709 | entry: ledger_idx_volatile_compressed, |
1710 | ptoa_64(-delta)); |
1711 | } |
1712 | break; |
1713 | default: |
1714 | panic("vm_purgeable_compressed_update(): " |
1715 | "unexpected purgable %d for object %p\n" , |
1716 | object->purgable, object); |
1717 | } |
1718 | } |
1719 | |