1 | /* |
2 | * Copyright (c) 2000-2020 Apple Inc. All rights reserved. |
3 | * |
4 | * @APPLE_OSREFERENCE_LICENSE_HEADER_START@ |
5 | * |
6 | * This file contains Original Code and/or Modifications of Original Code |
7 | * as defined in and that are subject to the Apple Public Source License |
8 | * Version 2.0 (the 'License'). You may not use this file except in |
9 | * compliance with the License. The rights granted to you under the License |
10 | * may not be used to create, or enable the creation or redistribution of, |
11 | * unlawful or unlicensed copies of an Apple operating system, or to |
12 | * circumvent, violate, or enable the circumvention or violation of, any |
13 | * terms of an Apple operating system software license agreement. |
14 | * |
15 | * Please obtain a copy of the License at |
16 | * http://www.opensource.apple.com/apsl/ and read it before using this file. |
17 | * |
18 | * The Original Code and all software distributed under the License are |
19 | * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER |
20 | * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES, |
21 | * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY, |
22 | * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT. |
23 | * Please see the License for the specific language governing rights and |
24 | * limitations under the License. |
25 | * |
26 | * @APPLE_OSREFERENCE_LICENSE_HEADER_END@ |
27 | */ |
28 | /* |
29 | * @OSF_COPYRIGHT@ |
30 | */ |
31 | /* |
32 | * Mach Operating System |
33 | * Copyright (c) 1991,1990,1989,1988,1987 Carnegie Mellon University |
34 | * All Rights Reserved. |
35 | * |
36 | * Permission to use, copy, modify and distribute this software and its |
37 | * documentation is hereby granted, provided that both the copyright |
38 | * notice and this permission notice appear in all copies of the |
39 | * software, derivative works or modified versions, and any portions |
40 | * thereof, and that both notices appear in supporting documentation. |
41 | * |
42 | * CARNEGIE MELLON ALLOWS FREE USE OF THIS SOFTWARE IN ITS "AS IS" |
43 | * CONDITION. CARNEGIE MELLON DISCLAIMS ANY LIABILITY OF ANY KIND FOR |
44 | * ANY DAMAGES WHATSOEVER RESULTING FROM THE USE OF THIS SOFTWARE. |
45 | * |
46 | * Carnegie Mellon requests users of this software to return to |
47 | * |
48 | * Software Distribution Coordinator or Software.Distribution@CS.CMU.EDU |
49 | * School of Computer Science |
50 | * Carnegie Mellon University |
51 | * Pittsburgh PA 15213-3890 |
52 | * |
53 | * any improvements or extensions that they make and grant Carnegie Mellon |
54 | * the rights to redistribute these changes. |
55 | */ |
56 | /* |
57 | */ |
58 | /* |
59 | * File: vm/vm_object.c |
60 | * Author: Avadis Tevanian, Jr., Michael Wayne Young |
61 | * |
62 | * Virtual memory object module. |
63 | */ |
64 | |
65 | #include <debug.h> |
66 | |
67 | #include <mach/mach_types.h> |
68 | #include <mach/memory_object.h> |
69 | #include <mach/vm_param.h> |
70 | |
71 | #include <mach/sdt.h> |
72 | |
73 | #include <ipc/ipc_types.h> |
74 | #include <ipc/ipc_port.h> |
75 | |
76 | #include <kern/kern_types.h> |
77 | #include <kern/assert.h> |
78 | #include <kern/queue.h> |
79 | #include <kern/kalloc.h> |
80 | #include <kern/zalloc.h> |
81 | #include <kern/host.h> |
82 | #include <kern/host_statistics.h> |
83 | #include <kern/processor.h> |
84 | #include <kern/misc_protos.h> |
85 | #include <kern/policy_internal.h> |
86 | |
87 | #include <sys/kdebug_triage.h> |
88 | |
89 | #include <vm/memory_object.h> |
90 | #include <vm/vm_compressor_pager.h> |
91 | #include <vm/vm_fault.h> |
92 | #include <vm/vm_map.h> |
93 | #include <vm/vm_object.h> |
94 | #include <vm/vm_page.h> |
95 | #include <vm/vm_pageout.h> |
96 | #include <vm/vm_protos.h> |
97 | #include <vm/vm_purgeable_internal.h> |
98 | |
99 | #include <vm/vm_compressor.h> |
100 | |
101 | #if CONFIG_PHANTOM_CACHE |
102 | #include <vm/vm_phantom_cache.h> |
103 | #endif |
104 | |
105 | #if VM_OBJECT_ACCESS_TRACKING |
106 | uint64_t vm_object_access_tracking_reads = 0; |
107 | uint64_t vm_object_access_tracking_writes = 0; |
108 | #endif /* VM_OBJECT_ACCESS_TRACKING */ |
109 | |
110 | boolean_t vm_object_collapse_compressor_allowed = TRUE; |
111 | |
112 | struct vm_counters vm_counters; |
113 | |
114 | #if DEVELOPMENT || DEBUG |
115 | extern struct memory_object_pager_ops shared_region_pager_ops; |
116 | extern unsigned int shared_region_pagers_resident_count; |
117 | extern unsigned int shared_region_pagers_resident_peak; |
118 | #endif /* DEVELOPMENT || DEBUG */ |
119 | |
120 | #if VM_OBJECT_TRACKING |
121 | btlog_t vm_object_tracking_btlog; |
122 | |
123 | void |
124 | vm_object_tracking_init(void) |
125 | { |
126 | int vm_object_tracking; |
127 | |
128 | vm_object_tracking = 1; |
129 | PE_parse_boot_argn("vm_object_tracking" , &vm_object_tracking, |
130 | sizeof(vm_object_tracking)); |
131 | |
132 | if (vm_object_tracking) { |
133 | vm_object_tracking_btlog = btlog_create(BTLOG_HASH, |
134 | VM_OBJECT_TRACKING_NUM_RECORDS); |
135 | assert(vm_object_tracking_btlog); |
136 | } |
137 | } |
138 | #endif /* VM_OBJECT_TRACKING */ |
139 | |
140 | /* |
141 | * Virtual memory objects maintain the actual data |
142 | * associated with allocated virtual memory. A given |
143 | * page of memory exists within exactly one object. |
144 | * |
145 | * An object is only deallocated when all "references" |
146 | * are given up. |
147 | * |
148 | * Associated with each object is a list of all resident |
149 | * memory pages belonging to that object; this list is |
150 | * maintained by the "vm_page" module, but locked by the object's |
151 | * lock. |
152 | * |
153 | * Each object also records the memory object reference |
154 | * that is used by the kernel to request and write |
155 | * back data (the memory object, field "pager"), etc... |
156 | * |
157 | * Virtual memory objects are allocated to provide |
158 | * zero-filled memory (vm_allocate) or map a user-defined |
159 | * memory object into a virtual address space (vm_map). |
160 | * |
161 | * Virtual memory objects that refer to a user-defined |
162 | * memory object are called "permanent", because all changes |
163 | * made in virtual memory are reflected back to the |
164 | * memory manager, which may then store it permanently. |
165 | * Other virtual memory objects are called "temporary", |
166 | * meaning that changes need be written back only when |
167 | * necessary to reclaim pages, and that storage associated |
168 | * with the object can be discarded once it is no longer |
169 | * mapped. |
170 | * |
171 | * A permanent memory object may be mapped into more |
172 | * than one virtual address space. Moreover, two threads |
173 | * may attempt to make the first mapping of a memory |
174 | * object concurrently. Only one thread is allowed to |
175 | * complete this mapping; all others wait for the |
176 | * "pager_initialized" field is asserted, indicating |
177 | * that the first thread has initialized all of the |
178 | * necessary fields in the virtual memory object structure. |
179 | * |
180 | * The kernel relies on a *default memory manager* to |
181 | * provide backing storage for the zero-filled virtual |
182 | * memory objects. The pager memory objects associated |
183 | * with these temporary virtual memory objects are only |
184 | * requested from the default memory manager when it |
185 | * becomes necessary. Virtual memory objects |
186 | * that depend on the default memory manager are called |
187 | * "internal". The "pager_created" field is provided to |
188 | * indicate whether these ports have ever been allocated. |
189 | * |
190 | * The kernel may also create virtual memory objects to |
191 | * hold changed pages after a copy-on-write operation. |
192 | * In this case, the virtual memory object (and its |
193 | * backing storage -- its memory object) only contain |
194 | * those pages that have been changed. The "shadow" |
195 | * field refers to the virtual memory object that contains |
196 | * the remainder of the contents. The "shadow_offset" |
197 | * field indicates where in the "shadow" these contents begin. |
198 | * The "copy" field refers to a virtual memory object |
199 | * to which changed pages must be copied before changing |
200 | * this object, in order to implement another form |
201 | * of copy-on-write optimization. |
202 | * |
203 | * The virtual memory object structure also records |
204 | * the attributes associated with its memory object. |
205 | * The "pager_ready", "can_persist" and "copy_strategy" |
206 | * fields represent those attributes. The "cached_list" |
207 | * field is used in the implementation of the persistence |
208 | * attribute. |
209 | * |
210 | * ZZZ Continue this comment. |
211 | */ |
212 | |
213 | /* Forward declarations for internal functions. */ |
214 | static kern_return_t vm_object_terminate( |
215 | vm_object_t object); |
216 | |
217 | static void vm_object_do_collapse( |
218 | vm_object_t object, |
219 | vm_object_t backing_object); |
220 | |
221 | static void vm_object_do_bypass( |
222 | vm_object_t object, |
223 | vm_object_t backing_object); |
224 | |
225 | static void vm_object_release_pager( |
226 | memory_object_t ); |
227 | |
228 | SECURITY_READ_ONLY_LATE(zone_t) vm_object_zone; /* vm backing store zone */ |
229 | |
230 | /* |
231 | * All wired-down kernel memory belongs to this memory object |
232 | * memory object (kernel_object) by default to avoid wasting data structures. |
233 | */ |
234 | static struct vm_object kernel_object_store VM_PAGE_PACKED_ALIGNED; |
235 | const vm_object_t kernel_object_default = &kernel_object_store; |
236 | |
237 | static struct vm_object compressor_object_store VM_PAGE_PACKED_ALIGNED; |
238 | const vm_object_t compressor_object = &compressor_object_store; |
239 | |
240 | /* |
241 | * This object holds all pages that have been retired due to errors like ECC. |
242 | * The system should never use the page or look at its contents. The offset |
243 | * in this object is the same as the page's physical address. |
244 | */ |
245 | static struct vm_object retired_pages_object_store VM_PAGE_PACKED_ALIGNED; |
246 | const vm_object_t retired_pages_object = &retired_pages_object_store; |
247 | |
248 | static struct vm_object exclaves_object_store VM_PAGE_PACKED_ALIGNED; |
249 | const vm_object_t exclaves_object = &exclaves_object_store; |
250 | |
251 | |
252 | /* |
253 | * Virtual memory objects are initialized from |
254 | * a template (see vm_object_allocate). |
255 | * |
256 | * When adding a new field to the virtual memory |
257 | * object structure, be sure to add initialization |
258 | * (see _vm_object_allocate()). |
259 | */ |
260 | static const struct vm_object vm_object_template = { |
261 | .memq.prev = 0, |
262 | .memq.next = 0, |
263 | /* |
264 | * The lock will be initialized for each allocated object in |
265 | * _vm_object_allocate(), so we don't need to initialize it in |
266 | * the vm_object_template. |
267 | */ |
268 | .vo_size = 0, |
269 | .memq_hint = VM_PAGE_NULL, |
270 | .ref_count = 1, |
271 | .resident_page_count = 0, |
272 | .wired_page_count = 0, |
273 | .reusable_page_count = 0, |
274 | .vo_copy = VM_OBJECT_NULL, |
275 | .vo_copy_version = 0, |
276 | .shadow = VM_OBJECT_NULL, |
277 | .vo_shadow_offset = (vm_object_offset_t) 0, |
278 | .pager = MEMORY_OBJECT_NULL, |
279 | .paging_offset = 0, |
280 | .pager_control = MEMORY_OBJECT_CONTROL_NULL, |
281 | .copy_strategy = MEMORY_OBJECT_COPY_SYMMETRIC, |
282 | .paging_in_progress = 0, |
283 | .vo_size_delta = 0, |
284 | .activity_in_progress = 0, |
285 | |
286 | /* Begin bitfields */ |
287 | .all_wanted = 0, /* all bits FALSE */ |
288 | .pager_created = FALSE, |
289 | .pager_initialized = FALSE, |
290 | .pager_ready = FALSE, |
291 | .pager_trusted = FALSE, |
292 | .can_persist = FALSE, |
293 | .internal = TRUE, |
294 | .private = FALSE, |
295 | .pageout = FALSE, |
296 | .alive = TRUE, |
297 | .purgable = VM_PURGABLE_DENY, |
298 | .purgeable_when_ripe = FALSE, |
299 | .purgeable_only_by_kernel = FALSE, |
300 | .shadowed = FALSE, |
301 | .true_share = FALSE, |
302 | .terminating = FALSE, |
303 | .named = FALSE, |
304 | .shadow_severed = FALSE, |
305 | .phys_contiguous = FALSE, |
306 | .nophyscache = FALSE, |
307 | /* End bitfields */ |
308 | |
309 | .cached_list.prev = NULL, |
310 | .cached_list.next = NULL, |
311 | |
312 | .last_alloc = (vm_object_offset_t) 0, |
313 | .sequential = (vm_object_offset_t) 0, |
314 | .pages_created = 0, |
315 | .pages_used = 0, |
316 | .scan_collisions = 0, |
317 | #if CONFIG_PHANTOM_CACHE |
318 | .phantom_object_id = 0, |
319 | #endif |
320 | .cow_hint = ~(vm_offset_t)0, |
321 | |
322 | /* cache bitfields */ |
323 | .wimg_bits = VM_WIMG_USE_DEFAULT, |
324 | .set_cache_attr = FALSE, |
325 | .object_is_shared_cache = FALSE, |
326 | .code_signed = FALSE, |
327 | .transposed = FALSE, |
328 | .mapping_in_progress = FALSE, |
329 | .phantom_isssd = FALSE, |
330 | .volatile_empty = FALSE, |
331 | .volatile_fault = FALSE, |
332 | .all_reusable = FALSE, |
333 | .blocked_access = FALSE, |
334 | .vo_ledger_tag = VM_LEDGER_TAG_NONE, |
335 | .vo_no_footprint = FALSE, |
336 | #if CONFIG_IOSCHED || UPL_DEBUG |
337 | .uplq.prev = NULL, |
338 | .uplq.next = NULL, |
339 | #endif /* UPL_DEBUG */ |
340 | #ifdef VM_PIP_DEBUG |
341 | .pip_holders = {0}, |
342 | #endif /* VM_PIP_DEBUG */ |
343 | |
344 | .objq.next = NULL, |
345 | .objq.prev = NULL, |
346 | .task_objq.next = NULL, |
347 | .task_objq.prev = NULL, |
348 | |
349 | .purgeable_queue_type = PURGEABLE_Q_TYPE_MAX, |
350 | .purgeable_queue_group = 0, |
351 | |
352 | .wire_tag = VM_KERN_MEMORY_NONE, |
353 | #if !VM_TAG_ACTIVE_UPDATE |
354 | .wired_objq.next = NULL, |
355 | .wired_objq.prev = NULL, |
356 | #endif /* ! VM_TAG_ACTIVE_UPDATE */ |
357 | |
358 | .io_tracking = FALSE, |
359 | |
360 | #if CONFIG_SECLUDED_MEMORY |
361 | .eligible_for_secluded = FALSE, |
362 | .can_grab_secluded = FALSE, |
363 | #else /* CONFIG_SECLUDED_MEMORY */ |
364 | .__object3_unused_bits = 0, |
365 | #endif /* CONFIG_SECLUDED_MEMORY */ |
366 | |
367 | .for_realtime = false, |
368 | .no_pager_reason = VM_OBJECT_DESTROY_UNKNOWN_REASON, |
369 | |
370 | #if VM_OBJECT_ACCESS_TRACKING |
371 | .access_tracking = FALSE, |
372 | .access_tracking_reads = 0, |
373 | .access_tracking_writes = 0, |
374 | #endif /* VM_OBJECT_ACCESS_TRACKING */ |
375 | |
376 | #if DEBUG |
377 | .purgeable_owner_bt = {0}, |
378 | .vo_purgeable_volatilizer = NULL, |
379 | .purgeable_volatilizer_bt = {0}, |
380 | #endif /* DEBUG */ |
381 | }; |
382 | |
383 | LCK_GRP_DECLARE(vm_object_lck_grp, "vm_object" ); |
384 | LCK_GRP_DECLARE(vm_object_cache_lck_grp, "vm_object_cache" ); |
385 | LCK_ATTR_DECLARE(vm_object_lck_attr, 0, 0); |
386 | LCK_ATTR_DECLARE(kernel_object_lck_attr, 0, LCK_ATTR_DEBUG); |
387 | LCK_ATTR_DECLARE(compressor_object_lck_attr, 0, LCK_ATTR_DEBUG); |
388 | |
389 | unsigned int vm_page_purged_wired = 0; |
390 | unsigned int vm_page_purged_busy = 0; |
391 | unsigned int vm_page_purged_others = 0; |
392 | |
393 | static queue_head_t vm_object_cached_list; |
394 | static uint32_t vm_object_cache_pages_freed = 0; |
395 | static uint32_t vm_object_cache_pages_moved = 0; |
396 | static uint32_t vm_object_cache_pages_skipped = 0; |
397 | static uint32_t vm_object_cache_adds = 0; |
398 | static uint32_t vm_object_cached_count = 0; |
399 | static LCK_MTX_DECLARE_ATTR(vm_object_cached_lock_data, |
400 | &vm_object_cache_lck_grp, &vm_object_lck_attr); |
401 | |
402 | static uint32_t vm_object_page_grab_failed = 0; |
403 | static uint32_t vm_object_page_grab_skipped = 0; |
404 | static uint32_t vm_object_page_grab_returned = 0; |
405 | static uint32_t vm_object_page_grab_pmapped = 0; |
406 | static uint32_t vm_object_page_grab_reactivations = 0; |
407 | |
408 | #define vm_object_cache_lock_spin() \ |
409 | lck_mtx_lock_spin(&vm_object_cached_lock_data) |
410 | #define vm_object_cache_unlock() \ |
411 | lck_mtx_unlock(&vm_object_cached_lock_data) |
412 | |
413 | static void vm_object_cache_remove_locked(vm_object_t); |
414 | |
415 | |
416 | static void vm_object_reap(vm_object_t object); |
417 | static void vm_object_reap_async(vm_object_t object); |
418 | static void vm_object_reaper_thread(void); |
419 | |
420 | static LCK_MTX_DECLARE_ATTR(vm_object_reaper_lock_data, |
421 | &vm_object_lck_grp, &vm_object_lck_attr); |
422 | |
423 | static queue_head_t vm_object_reaper_queue; /* protected by vm_object_reaper_lock() */ |
424 | unsigned int vm_object_reap_count = 0; |
425 | unsigned int vm_object_reap_count_async = 0; |
426 | |
427 | #define vm_object_reaper_lock() \ |
428 | lck_mtx_lock(&vm_object_reaper_lock_data) |
429 | #define vm_object_reaper_lock_spin() \ |
430 | lck_mtx_lock_spin(&vm_object_reaper_lock_data) |
431 | #define vm_object_reaper_unlock() \ |
432 | lck_mtx_unlock(&vm_object_reaper_lock_data) |
433 | |
434 | #if CONFIG_IOSCHED |
435 | /* I/O Re-prioritization request list */ |
436 | queue_head_t io_reprioritize_list = QUEUE_HEAD_INITIALIZER(io_reprioritize_list); |
437 | |
438 | LCK_SPIN_DECLARE_ATTR(io_reprioritize_list_lock, |
439 | &vm_object_lck_grp, &vm_object_lck_attr); |
440 | |
441 | #define IO_REPRIORITIZE_LIST_LOCK() \ |
442 | lck_spin_lock_grp(&io_reprioritize_list_lock, &vm_object_lck_grp) |
443 | #define IO_REPRIORITIZE_LIST_UNLOCK() \ |
444 | lck_spin_unlock(&io_reprioritize_list_lock) |
445 | |
446 | ZONE_DEFINE_TYPE(io_reprioritize_req_zone, "io_reprioritize_req" , |
447 | struct io_reprioritize_req, ZC_NONE); |
448 | |
449 | /* I/O Re-prioritization thread */ |
450 | int io_reprioritize_wakeup = 0; |
451 | static void io_reprioritize_thread(void *param __unused, wait_result_t wr __unused); |
452 | |
453 | #define IO_REPRIO_THREAD_WAKEUP() thread_wakeup((event_t)&io_reprioritize_wakeup) |
454 | #define IO_REPRIO_THREAD_CONTINUATION() \ |
455 | { \ |
456 | assert_wait(&io_reprioritize_wakeup, THREAD_UNINT); \ |
457 | thread_block(io_reprioritize_thread); \ |
458 | } |
459 | |
460 | void vm_page_request_reprioritize(vm_object_t, uint64_t, uint32_t, int); |
461 | void vm_page_handle_prio_inversion(vm_object_t, vm_page_t); |
462 | void vm_decmp_upl_reprioritize(upl_t, int); |
463 | #endif |
464 | |
465 | #if 0 |
466 | #undef KERNEL_DEBUG |
467 | #define KERNEL_DEBUG KERNEL_DEBUG_CONSTANT |
468 | #endif |
469 | |
470 | |
471 | void |
472 | vm_object_set_size( |
473 | vm_object_t object, |
474 | vm_object_size_t outer_size, |
475 | vm_object_size_t inner_size) |
476 | { |
477 | object->vo_size = vm_object_round_page(outer_size); |
478 | #if KASAN |
479 | assert(object->vo_size - inner_size <= USHRT_MAX); |
480 | object->vo_size_delta = (unsigned short)(object->vo_size - inner_size); |
481 | #else |
482 | (void)inner_size; |
483 | #endif |
484 | } |
485 | |
486 | |
487 | /* |
488 | * vm_object_allocate: |
489 | * |
490 | * Returns a new object with the given size. |
491 | */ |
492 | |
493 | __private_extern__ void |
494 | _vm_object_allocate( |
495 | vm_object_size_t size, |
496 | vm_object_t object) |
497 | { |
498 | *object = vm_object_template; |
499 | vm_page_queue_init(&object->memq); |
500 | #if UPL_DEBUG || CONFIG_IOSCHED |
501 | queue_init(&object->uplq); |
502 | #endif |
503 | vm_object_lock_init(object); |
504 | vm_object_set_size(object, outer_size: size, inner_size: size); |
505 | |
506 | #if VM_OBJECT_TRACKING_OP_CREATED |
507 | if (vm_object_tracking_btlog) { |
508 | btlog_record(vm_object_tracking_btlog, object, |
509 | VM_OBJECT_TRACKING_OP_CREATED, |
510 | btref_get(__builtin_frame_address(0), 0)); |
511 | } |
512 | #endif /* VM_OBJECT_TRACKING_OP_CREATED */ |
513 | } |
514 | |
515 | __private_extern__ vm_object_t |
516 | vm_object_allocate( |
517 | vm_object_size_t size) |
518 | { |
519 | vm_object_t object; |
520 | |
521 | object = zalloc_flags(vm_object_zone, Z_WAITOK | Z_NOFAIL); |
522 | _vm_object_allocate(size, object); |
523 | |
524 | return object; |
525 | } |
526 | |
527 | TUNABLE(bool, workaround_41447923, "workaround_41447923" , false); |
528 | |
529 | /* |
530 | * vm_object_bootstrap: |
531 | * |
532 | * Initialize the VM objects module. |
533 | */ |
534 | __startup_func |
535 | void |
536 | vm_object_bootstrap(void) |
537 | { |
538 | vm_size_t vm_object_size; |
539 | |
540 | assert(sizeof(mo_ipc_object_bits_t) == sizeof(ipc_object_bits_t)); |
541 | |
542 | vm_object_size = (sizeof(struct vm_object) + (VM_PAGE_PACKED_PTR_ALIGNMENT - 1)) & |
543 | ~(VM_PAGE_PACKED_PTR_ALIGNMENT - 1); |
544 | |
545 | vm_object_zone = zone_create(name: "vm objects" , size: vm_object_size, |
546 | flags: ZC_NOENCRYPT | ZC_ALIGNMENT_REQUIRED | ZC_VM | ZC_NOTBITAG); |
547 | |
548 | queue_init(&vm_object_cached_list); |
549 | |
550 | queue_init(&vm_object_reaper_queue); |
551 | |
552 | /* |
553 | * Initialize the "kernel object" |
554 | */ |
555 | |
556 | /* |
557 | * Note that in the following size specifications, we need to add 1 because |
558 | * VM_MAX_KERNEL_ADDRESS (vm_last_addr) is a maximum address, not a size. |
559 | */ |
560 | _vm_object_allocate(VM_MAX_KERNEL_ADDRESS + 1, object: kernel_object_default); |
561 | _vm_object_allocate(VM_MAX_KERNEL_ADDRESS + 1, object: compressor_object); |
562 | kernel_object_default->copy_strategy = MEMORY_OBJECT_COPY_NONE; |
563 | compressor_object->copy_strategy = MEMORY_OBJECT_COPY_NONE; |
564 | kernel_object_default->no_tag_update = TRUE; |
565 | |
566 | /* |
567 | * The object to hold retired VM pages. |
568 | */ |
569 | _vm_object_allocate(VM_MAX_KERNEL_ADDRESS + 1, object: retired_pages_object); |
570 | retired_pages_object->copy_strategy = MEMORY_OBJECT_COPY_NONE; |
571 | |
572 | /** |
573 | * The object to hold pages owned by exclaves. |
574 | */ |
575 | _vm_object_allocate(VM_MAX_KERNEL_ADDRESS + 1, object: exclaves_object); |
576 | exclaves_object->copy_strategy = MEMORY_OBJECT_COPY_NONE; |
577 | } |
578 | |
579 | #if CONFIG_IOSCHED |
580 | void |
581 | vm_io_reprioritize_init(void) |
582 | { |
583 | kern_return_t result; |
584 | thread_t thread = THREAD_NULL; |
585 | |
586 | result = kernel_thread_start_priority(continuation: io_reprioritize_thread, NULL, priority: 95 /* MAXPRI_KERNEL */, new_thread: &thread); |
587 | if (result == KERN_SUCCESS) { |
588 | thread_set_thread_name(th: thread, name: "VM_io_reprioritize_thread" ); |
589 | thread_deallocate(thread); |
590 | } else { |
591 | panic("Could not create io_reprioritize_thread" ); |
592 | } |
593 | } |
594 | #endif |
595 | |
596 | void |
597 | vm_object_reaper_init(void) |
598 | { |
599 | kern_return_t kr; |
600 | thread_t thread; |
601 | |
602 | kr = kernel_thread_start_priority( |
603 | continuation: (thread_continue_t) vm_object_reaper_thread, |
604 | NULL, |
605 | BASEPRI_VM, |
606 | new_thread: &thread); |
607 | if (kr != KERN_SUCCESS) { |
608 | panic("failed to launch vm_object_reaper_thread kr=0x%x" , kr); |
609 | } |
610 | thread_set_thread_name(th: thread, name: "VM_object_reaper_thread" ); |
611 | thread_deallocate(thread); |
612 | } |
613 | |
614 | |
615 | /* |
616 | * vm_object_deallocate: |
617 | * |
618 | * Release a reference to the specified object, |
619 | * gained either through a vm_object_allocate |
620 | * or a vm_object_reference call. When all references |
621 | * are gone, storage associated with this object |
622 | * may be relinquished. |
623 | * |
624 | * No object may be locked. |
625 | */ |
626 | unsigned long vm_object_deallocate_shared_successes = 0; |
627 | unsigned long vm_object_deallocate_shared_failures = 0; |
628 | unsigned long vm_object_deallocate_shared_swap_failures = 0; |
629 | |
630 | __private_extern__ void |
631 | vm_object_deallocate( |
632 | vm_object_t object) |
633 | { |
634 | vm_object_t shadow = VM_OBJECT_NULL; |
635 | |
636 | // if(object)dbgLog(object, object->ref_count, object->can_persist, 3); /* (TEST/DEBUG) */ |
637 | // else dbgLog(object, 0, 0, 3); /* (TEST/DEBUG) */ |
638 | |
639 | if (object == VM_OBJECT_NULL) { |
640 | return; |
641 | } |
642 | |
643 | if (is_kernel_object(object) || object == compressor_object || object == retired_pages_object) { |
644 | vm_object_lock_shared(object); |
645 | |
646 | OSAddAtomic(-1, &object->ref_count); |
647 | |
648 | if (object->ref_count == 0) { |
649 | if (is_kernel_object(object)) { |
650 | panic("vm_object_deallocate: losing a kernel_object" ); |
651 | } else if (object == retired_pages_object) { |
652 | panic("vm_object_deallocate: losing retired_pages_object" ); |
653 | } else { |
654 | panic("vm_object_deallocate: losing compressor_object" ); |
655 | } |
656 | } |
657 | vm_object_unlock(object); |
658 | return; |
659 | } |
660 | |
661 | if (object->ref_count == 2 && |
662 | object->named) { |
663 | /* |
664 | * This "named" object's reference count is about to |
665 | * drop from 2 to 1: |
666 | * we'll need to call memory_object_last_unmap(). |
667 | */ |
668 | } else if (object->ref_count == 2 && |
669 | object->internal && |
670 | object->shadow != VM_OBJECT_NULL) { |
671 | /* |
672 | * This internal object's reference count is about to |
673 | * drop from 2 to 1 and it has a shadow object: |
674 | * we'll want to try and collapse this object with its |
675 | * shadow. |
676 | */ |
677 | } else if (object->ref_count >= 2) { |
678 | UInt32 original_ref_count; |
679 | volatile UInt32 *ref_count_p; |
680 | Boolean atomic_swap; |
681 | |
682 | /* |
683 | * The object currently looks like it is not being |
684 | * kept alive solely by the reference we're about to release. |
685 | * Let's try and release our reference without taking |
686 | * all the locks we would need if we had to terminate the |
687 | * object (cache lock + exclusive object lock). |
688 | * Lock the object "shared" to make sure we don't race with |
689 | * anyone holding it "exclusive". |
690 | */ |
691 | vm_object_lock_shared(object); |
692 | ref_count_p = (volatile UInt32 *) &object->ref_count; |
693 | original_ref_count = object->ref_count; |
694 | /* |
695 | * Test again as "ref_count" could have changed. |
696 | * "named" shouldn't change. |
697 | */ |
698 | if (original_ref_count == 2 && |
699 | object->named) { |
700 | /* need to take slow path for m_o_last_unmap() */ |
701 | atomic_swap = FALSE; |
702 | } else if (original_ref_count == 2 && |
703 | object->internal && |
704 | object->shadow != VM_OBJECT_NULL) { |
705 | /* need to take slow path for vm_object_collapse() */ |
706 | atomic_swap = FALSE; |
707 | } else if (original_ref_count < 2) { |
708 | /* need to take slow path for vm_object_terminate() */ |
709 | atomic_swap = FALSE; |
710 | } else { |
711 | /* try an atomic update with the shared lock */ |
712 | atomic_swap = OSCompareAndSwap( |
713 | original_ref_count, |
714 | original_ref_count - 1, |
715 | (UInt32 *) &object->ref_count); |
716 | if (atomic_swap == FALSE) { |
717 | vm_object_deallocate_shared_swap_failures++; |
718 | /* fall back to the slow path... */ |
719 | } |
720 | } |
721 | |
722 | vm_object_unlock(object); |
723 | |
724 | if (atomic_swap) { |
725 | /* |
726 | * ref_count was updated atomically ! |
727 | */ |
728 | vm_object_deallocate_shared_successes++; |
729 | return; |
730 | } |
731 | |
732 | /* |
733 | * Someone else updated the ref_count at the same |
734 | * time and we lost the race. Fall back to the usual |
735 | * slow but safe path... |
736 | */ |
737 | vm_object_deallocate_shared_failures++; |
738 | } |
739 | |
740 | while (object != VM_OBJECT_NULL) { |
741 | vm_object_lock(object); |
742 | |
743 | assert(object->ref_count > 0); |
744 | |
745 | /* |
746 | * If the object has a named reference, and only |
747 | * that reference would remain, inform the pager |
748 | * about the last "mapping" reference going away. |
749 | */ |
750 | if ((object->ref_count == 2) && (object->named)) { |
751 | memory_object_t = object->pager; |
752 | |
753 | /* Notify the Pager that there are no */ |
754 | /* more mappers for this object */ |
755 | |
756 | if (pager != MEMORY_OBJECT_NULL) { |
757 | vm_object_mapping_wait(object, THREAD_UNINT); |
758 | vm_object_mapping_begin(object); |
759 | vm_object_unlock(object); |
760 | |
761 | memory_object_last_unmap(memory_object: pager); |
762 | |
763 | vm_object_lock(object); |
764 | vm_object_mapping_end(object); |
765 | } |
766 | assert(object->ref_count > 0); |
767 | } |
768 | |
769 | /* |
770 | * Lose the reference. If other references |
771 | * remain, then we are done, unless we need |
772 | * to retry a cache trim. |
773 | * If it is the last reference, then keep it |
774 | * until any pending initialization is completed. |
775 | */ |
776 | |
777 | /* if the object is terminating, it cannot go into */ |
778 | /* the cache and we obviously should not call */ |
779 | /* terminate again. */ |
780 | |
781 | if ((object->ref_count > 1) || object->terminating) { |
782 | vm_object_lock_assert_exclusive(object); |
783 | object->ref_count--; |
784 | |
785 | if (object->ref_count == 1 && |
786 | object->shadow != VM_OBJECT_NULL) { |
787 | /* |
788 | * There's only one reference left on this |
789 | * VM object. We can't tell if it's a valid |
790 | * one (from a mapping for example) or if this |
791 | * object is just part of a possibly stale and |
792 | * useless shadow chain. |
793 | * We would like to try and collapse it into |
794 | * its parent, but we don't have any pointers |
795 | * back to this parent object. |
796 | * But we can try and collapse this object with |
797 | * its own shadows, in case these are useless |
798 | * too... |
799 | * We can't bypass this object though, since we |
800 | * don't know if this last reference on it is |
801 | * meaningful or not. |
802 | */ |
803 | vm_object_collapse(object, offset: 0, FALSE); |
804 | } |
805 | vm_object_unlock(object); |
806 | return; |
807 | } |
808 | |
809 | /* |
810 | * We have to wait for initialization |
811 | * before destroying or caching the object. |
812 | */ |
813 | |
814 | if (object->pager_created && !object->pager_initialized) { |
815 | assert(!object->can_persist); |
816 | vm_object_assert_wait(object, |
817 | VM_OBJECT_EVENT_INITIALIZED, |
818 | THREAD_UNINT); |
819 | vm_object_unlock(object); |
820 | |
821 | thread_block(THREAD_CONTINUE_NULL); |
822 | continue; |
823 | } |
824 | |
825 | /* |
826 | * Terminate this object. If it had a shadow, |
827 | * then deallocate it; otherwise, if we need |
828 | * to retry a cache trim, do so now; otherwise, |
829 | * we are done. "pageout" objects have a shadow, |
830 | * but maintain a "paging reference" rather than |
831 | * a normal reference. |
832 | */ |
833 | shadow = object->pageout?VM_OBJECT_NULL:object->shadow; |
834 | |
835 | if (vm_object_terminate(object) != KERN_SUCCESS) { |
836 | return; |
837 | } |
838 | if (shadow != VM_OBJECT_NULL) { |
839 | object = shadow; |
840 | continue; |
841 | } |
842 | return; |
843 | } |
844 | } |
845 | |
846 | |
847 | |
848 | vm_page_t |
849 | vm_object_page_grab( |
850 | vm_object_t object) |
851 | { |
852 | vm_page_t p, next_p; |
853 | int p_limit = 0; |
854 | int p_skipped = 0; |
855 | |
856 | vm_object_lock_assert_exclusive(object); |
857 | |
858 | next_p = (vm_page_t)vm_page_queue_first(&object->memq); |
859 | p_limit = MIN(50, object->resident_page_count); |
860 | |
861 | while (!vm_page_queue_end(&object->memq, (vm_page_queue_entry_t)next_p) && --p_limit > 0) { |
862 | p = next_p; |
863 | next_p = (vm_page_t)vm_page_queue_next(&next_p->vmp_listq); |
864 | |
865 | if (VM_PAGE_WIRED(p) || p->vmp_busy || p->vmp_cleaning || p->vmp_laundry || p->vmp_fictitious) { |
866 | goto move_page_in_obj; |
867 | } |
868 | |
869 | if (p->vmp_pmapped || p->vmp_dirty || p->vmp_precious) { |
870 | vm_page_lockspin_queues(); |
871 | |
872 | if (p->vmp_pmapped) { |
873 | int refmod_state; |
874 | |
875 | vm_object_page_grab_pmapped++; |
876 | |
877 | if (p->vmp_reference == FALSE || p->vmp_dirty == FALSE) { |
878 | refmod_state = pmap_get_refmod(pn: VM_PAGE_GET_PHYS_PAGE(m: p)); |
879 | |
880 | if (refmod_state & VM_MEM_REFERENCED) { |
881 | p->vmp_reference = TRUE; |
882 | } |
883 | if (refmod_state & VM_MEM_MODIFIED) { |
884 | SET_PAGE_DIRTY(p, FALSE); |
885 | } |
886 | } |
887 | if (p->vmp_dirty == FALSE && p->vmp_precious == FALSE) { |
888 | vm_page_lockconvert_queues(); |
889 | refmod_state = pmap_disconnect(phys: VM_PAGE_GET_PHYS_PAGE(m: p)); |
890 | |
891 | if (refmod_state & VM_MEM_REFERENCED) { |
892 | p->vmp_reference = TRUE; |
893 | } |
894 | if (refmod_state & VM_MEM_MODIFIED) { |
895 | SET_PAGE_DIRTY(p, FALSE); |
896 | } |
897 | |
898 | if (p->vmp_dirty == FALSE) { |
899 | goto take_page; |
900 | } |
901 | } |
902 | } |
903 | if ((p->vmp_q_state != VM_PAGE_ON_ACTIVE_Q) && p->vmp_reference == TRUE) { |
904 | vm_page_activate(page: p); |
905 | |
906 | counter_inc(&vm_statistics_reactivations); |
907 | vm_object_page_grab_reactivations++; |
908 | } |
909 | vm_page_unlock_queues(); |
910 | move_page_in_obj: |
911 | vm_page_queue_remove(&object->memq, p, vmp_listq); |
912 | vm_page_queue_enter(&object->memq, p, vmp_listq); |
913 | |
914 | p_skipped++; |
915 | continue; |
916 | } |
917 | vm_page_lockspin_queues(); |
918 | take_page: |
919 | vm_page_free_prepare_queues(page: p); |
920 | vm_object_page_grab_returned++; |
921 | vm_object_page_grab_skipped += p_skipped; |
922 | |
923 | vm_page_unlock_queues(); |
924 | |
925 | vm_page_free_prepare_object(page: p, TRUE); |
926 | |
927 | return p; |
928 | } |
929 | vm_object_page_grab_skipped += p_skipped; |
930 | vm_object_page_grab_failed++; |
931 | |
932 | return NULL; |
933 | } |
934 | |
935 | |
936 | |
937 | #define EVICT_PREPARE_LIMIT 64 |
938 | #define EVICT_AGE 10 |
939 | |
940 | static clock_sec_t vm_object_cache_aging_ts = 0; |
941 | |
942 | static void |
943 | vm_object_cache_remove_locked( |
944 | vm_object_t object) |
945 | { |
946 | assert(object->purgable == VM_PURGABLE_DENY); |
947 | |
948 | queue_remove(&vm_object_cached_list, object, vm_object_t, cached_list); |
949 | object->cached_list.next = NULL; |
950 | object->cached_list.prev = NULL; |
951 | |
952 | vm_object_cached_count--; |
953 | } |
954 | |
955 | void |
956 | vm_object_cache_remove( |
957 | vm_object_t object) |
958 | { |
959 | vm_object_cache_lock_spin(); |
960 | |
961 | if (object->cached_list.next && |
962 | object->cached_list.prev) { |
963 | vm_object_cache_remove_locked(object); |
964 | } |
965 | |
966 | vm_object_cache_unlock(); |
967 | } |
968 | |
969 | void |
970 | vm_object_cache_add( |
971 | vm_object_t object) |
972 | { |
973 | clock_sec_t sec; |
974 | clock_nsec_t nsec; |
975 | |
976 | assert(object->purgable == VM_PURGABLE_DENY); |
977 | |
978 | if (object->resident_page_count == 0) { |
979 | return; |
980 | } |
981 | clock_get_system_nanotime(secs: &sec, nanosecs: &nsec); |
982 | |
983 | vm_object_cache_lock_spin(); |
984 | |
985 | if (object->cached_list.next == NULL && |
986 | object->cached_list.prev == NULL) { |
987 | queue_enter(&vm_object_cached_list, object, vm_object_t, cached_list); |
988 | object->vo_cache_ts = sec + EVICT_AGE; |
989 | object->vo_cache_pages_to_scan = object->resident_page_count; |
990 | |
991 | vm_object_cached_count++; |
992 | vm_object_cache_adds++; |
993 | } |
994 | vm_object_cache_unlock(); |
995 | } |
996 | |
997 | int |
998 | vm_object_cache_evict( |
999 | int num_to_evict, |
1000 | int max_objects_to_examine) |
1001 | { |
1002 | vm_object_t object = VM_OBJECT_NULL; |
1003 | vm_object_t next_obj = VM_OBJECT_NULL; |
1004 | vm_page_t local_free_q = VM_PAGE_NULL; |
1005 | vm_page_t p; |
1006 | vm_page_t next_p; |
1007 | int object_cnt = 0; |
1008 | vm_page_t ep_array[EVICT_PREPARE_LIMIT]; |
1009 | int ep_count; |
1010 | int ep_limit; |
1011 | int ep_index; |
1012 | int ep_freed = 0; |
1013 | int ep_moved = 0; |
1014 | uint32_t ep_skipped = 0; |
1015 | clock_sec_t sec; |
1016 | clock_nsec_t nsec; |
1017 | |
1018 | KERNEL_DEBUG(0x13001ec | DBG_FUNC_START, 0, 0, 0, 0, 0); |
1019 | /* |
1020 | * do a couple of quick checks to see if it's |
1021 | * worthwhile grabbing the lock |
1022 | */ |
1023 | if (queue_empty(&vm_object_cached_list)) { |
1024 | KERNEL_DEBUG(0x13001ec | DBG_FUNC_END, 0, 0, 0, 0, 0); |
1025 | return 0; |
1026 | } |
1027 | clock_get_system_nanotime(secs: &sec, nanosecs: &nsec); |
1028 | |
1029 | /* |
1030 | * the object on the head of the queue has not |
1031 | * yet sufficiently aged |
1032 | */ |
1033 | if (sec < vm_object_cache_aging_ts) { |
1034 | KERNEL_DEBUG(0x13001ec | DBG_FUNC_END, 0, 0, 0, 0, 0); |
1035 | return 0; |
1036 | } |
1037 | /* |
1038 | * don't need the queue lock to find |
1039 | * and lock an object on the cached list |
1040 | */ |
1041 | vm_page_unlock_queues(); |
1042 | |
1043 | vm_object_cache_lock_spin(); |
1044 | |
1045 | for (;;) { |
1046 | next_obj = (vm_object_t)queue_first(&vm_object_cached_list); |
1047 | |
1048 | while (!queue_end(&vm_object_cached_list, (queue_entry_t)next_obj) && object_cnt++ < max_objects_to_examine) { |
1049 | object = next_obj; |
1050 | next_obj = (vm_object_t)queue_next(&next_obj->cached_list); |
1051 | |
1052 | assert(object->purgable == VM_PURGABLE_DENY); |
1053 | |
1054 | if (sec < object->vo_cache_ts) { |
1055 | KERNEL_DEBUG(0x130020c, object, object->resident_page_count, object->vo_cache_ts, sec, 0); |
1056 | |
1057 | vm_object_cache_aging_ts = object->vo_cache_ts; |
1058 | object = VM_OBJECT_NULL; |
1059 | break; |
1060 | } |
1061 | if (!vm_object_lock_try_scan(object)) { |
1062 | /* |
1063 | * just skip over this guy for now... if we find |
1064 | * an object to steal pages from, we'll revist in a bit... |
1065 | * hopefully, the lock will have cleared |
1066 | */ |
1067 | KERNEL_DEBUG(0x13001f8, object, object->resident_page_count, 0, 0, 0); |
1068 | |
1069 | object = VM_OBJECT_NULL; |
1070 | continue; |
1071 | } |
1072 | if (vm_page_queue_empty(&object->memq) || object->vo_cache_pages_to_scan == 0) { |
1073 | /* |
1074 | * this case really shouldn't happen, but it's not fatal |
1075 | * so deal with it... if we don't remove the object from |
1076 | * the list, we'll never move past it. |
1077 | */ |
1078 | KERNEL_DEBUG(0x13001fc, object, object->resident_page_count, ep_freed, ep_moved, 0); |
1079 | |
1080 | vm_object_cache_remove_locked(object); |
1081 | vm_object_unlock(object); |
1082 | object = VM_OBJECT_NULL; |
1083 | continue; |
1084 | } |
1085 | /* |
1086 | * we have a locked object with pages... |
1087 | * time to start harvesting |
1088 | */ |
1089 | break; |
1090 | } |
1091 | vm_object_cache_unlock(); |
1092 | |
1093 | if (object == VM_OBJECT_NULL) { |
1094 | break; |
1095 | } |
1096 | |
1097 | /* |
1098 | * object is locked at this point and |
1099 | * has resident pages |
1100 | */ |
1101 | next_p = (vm_page_t)vm_page_queue_first(&object->memq); |
1102 | |
1103 | /* |
1104 | * break the page scan into 2 pieces to minimize the time spent |
1105 | * behind the page queue lock... |
1106 | * the list of pages on these unused objects is likely to be cold |
1107 | * w/r to the cpu cache which increases the time to scan the list |
1108 | * tenfold... and we may have a 'run' of pages we can't utilize that |
1109 | * needs to be skipped over... |
1110 | */ |
1111 | if ((ep_limit = num_to_evict - (ep_freed + ep_moved)) > EVICT_PREPARE_LIMIT) { |
1112 | ep_limit = EVICT_PREPARE_LIMIT; |
1113 | } |
1114 | ep_count = 0; |
1115 | |
1116 | while (!vm_page_queue_end(&object->memq, (vm_page_queue_entry_t)next_p) && object->vo_cache_pages_to_scan && ep_count < ep_limit) { |
1117 | p = next_p; |
1118 | next_p = (vm_page_t)vm_page_queue_next(&next_p->vmp_listq); |
1119 | |
1120 | object->vo_cache_pages_to_scan--; |
1121 | |
1122 | if (VM_PAGE_WIRED(p) || p->vmp_busy || p->vmp_cleaning || p->vmp_laundry) { |
1123 | vm_page_queue_remove(&object->memq, p, vmp_listq); |
1124 | vm_page_queue_enter(&object->memq, p, vmp_listq); |
1125 | |
1126 | ep_skipped++; |
1127 | continue; |
1128 | } |
1129 | if (p->vmp_wpmapped || p->vmp_dirty || p->vmp_precious) { |
1130 | vm_page_queue_remove(&object->memq, p, vmp_listq); |
1131 | vm_page_queue_enter(&object->memq, p, vmp_listq); |
1132 | |
1133 | pmap_clear_reference(pn: VM_PAGE_GET_PHYS_PAGE(m: p)); |
1134 | } |
1135 | ep_array[ep_count++] = p; |
1136 | } |
1137 | KERNEL_DEBUG(0x13001f4 | DBG_FUNC_START, object, object->resident_page_count, ep_freed, ep_moved, 0); |
1138 | |
1139 | vm_page_lockspin_queues(); |
1140 | |
1141 | for (ep_index = 0; ep_index < ep_count; ep_index++) { |
1142 | p = ep_array[ep_index]; |
1143 | |
1144 | if (p->vmp_wpmapped || p->vmp_dirty || p->vmp_precious) { |
1145 | p->vmp_reference = FALSE; |
1146 | p->vmp_no_cache = FALSE; |
1147 | |
1148 | /* |
1149 | * we've already filtered out pages that are in the laundry |
1150 | * so if we get here, this page can't be on the pageout queue |
1151 | */ |
1152 | vm_page_queues_remove(mem: p, FALSE); |
1153 | vm_page_enqueue_inactive(mem: p, TRUE); |
1154 | |
1155 | ep_moved++; |
1156 | } else { |
1157 | #if CONFIG_PHANTOM_CACHE |
1158 | vm_phantom_cache_add_ghost(p); |
1159 | #endif |
1160 | vm_page_free_prepare_queues(page: p); |
1161 | |
1162 | assert(p->vmp_pageq.next == 0 && p->vmp_pageq.prev == 0); |
1163 | /* |
1164 | * Add this page to our list of reclaimed pages, |
1165 | * to be freed later. |
1166 | */ |
1167 | p->vmp_snext = local_free_q; |
1168 | local_free_q = p; |
1169 | |
1170 | ep_freed++; |
1171 | } |
1172 | } |
1173 | vm_page_unlock_queues(); |
1174 | |
1175 | KERNEL_DEBUG(0x13001f4 | DBG_FUNC_END, object, object->resident_page_count, ep_freed, ep_moved, 0); |
1176 | |
1177 | if (local_free_q) { |
1178 | vm_page_free_list(mem: local_free_q, TRUE); |
1179 | local_free_q = VM_PAGE_NULL; |
1180 | } |
1181 | if (object->vo_cache_pages_to_scan == 0) { |
1182 | KERNEL_DEBUG(0x1300208, object, object->resident_page_count, ep_freed, ep_moved, 0); |
1183 | |
1184 | vm_object_cache_remove(object); |
1185 | |
1186 | KERNEL_DEBUG(0x13001fc, object, object->resident_page_count, ep_freed, ep_moved, 0); |
1187 | } |
1188 | /* |
1189 | * done with this object |
1190 | */ |
1191 | vm_object_unlock(object); |
1192 | object = VM_OBJECT_NULL; |
1193 | |
1194 | /* |
1195 | * at this point, we are not holding any locks |
1196 | */ |
1197 | if ((ep_freed + ep_moved) >= num_to_evict) { |
1198 | /* |
1199 | * we've reached our target for the |
1200 | * number of pages to evict |
1201 | */ |
1202 | break; |
1203 | } |
1204 | vm_object_cache_lock_spin(); |
1205 | } |
1206 | /* |
1207 | * put the page queues lock back to the caller's |
1208 | * idea of it |
1209 | */ |
1210 | vm_page_lock_queues(); |
1211 | |
1212 | vm_object_cache_pages_freed += ep_freed; |
1213 | vm_object_cache_pages_moved += ep_moved; |
1214 | vm_object_cache_pages_skipped += ep_skipped; |
1215 | |
1216 | KERNEL_DEBUG(0x13001ec | DBG_FUNC_END, ep_freed, 0, 0, 0, 0); |
1217 | return ep_freed; |
1218 | } |
1219 | |
1220 | /* |
1221 | * Routine: vm_object_terminate |
1222 | * Purpose: |
1223 | * Free all resources associated with a vm_object. |
1224 | * In/out conditions: |
1225 | * Upon entry, the object must be locked, |
1226 | * and the object must have exactly one reference. |
1227 | * |
1228 | * The shadow object reference is left alone. |
1229 | * |
1230 | * The object must be unlocked if its found that pages |
1231 | * must be flushed to a backing object. If someone |
1232 | * manages to map the object while it is being flushed |
1233 | * the object is returned unlocked and unchanged. Otherwise, |
1234 | * upon exit, the cache will be unlocked, and the |
1235 | * object will cease to exist. |
1236 | */ |
1237 | static kern_return_t |
1238 | vm_object_terminate( |
1239 | vm_object_t object) |
1240 | { |
1241 | vm_object_t shadow_object; |
1242 | |
1243 | vm_object_lock_assert_exclusive(object); |
1244 | |
1245 | if (!object->pageout && (!object->internal && object->can_persist) && |
1246 | (object->pager != NULL || object->shadow_severed)) { |
1247 | /* |
1248 | * Clear pager_trusted bit so that the pages get yanked |
1249 | * out of the object instead of cleaned in place. This |
1250 | * prevents a deadlock in XMM and makes more sense anyway. |
1251 | */ |
1252 | VM_OBJECT_SET_PAGER_TRUSTED(object, FALSE); |
1253 | |
1254 | vm_object_reap_pages(object, REAP_TERMINATE); |
1255 | } |
1256 | /* |
1257 | * Make sure the object isn't already being terminated |
1258 | */ |
1259 | if (object->terminating) { |
1260 | vm_object_lock_assert_exclusive(object); |
1261 | object->ref_count--; |
1262 | assert(object->ref_count > 0); |
1263 | vm_object_unlock(object); |
1264 | return KERN_FAILURE; |
1265 | } |
1266 | |
1267 | /* |
1268 | * Did somebody get a reference to the object while we were |
1269 | * cleaning it? |
1270 | */ |
1271 | if (object->ref_count != 1) { |
1272 | vm_object_lock_assert_exclusive(object); |
1273 | object->ref_count--; |
1274 | assert(object->ref_count > 0); |
1275 | vm_object_unlock(object); |
1276 | return KERN_FAILURE; |
1277 | } |
1278 | |
1279 | /* |
1280 | * Make sure no one can look us up now. |
1281 | */ |
1282 | |
1283 | VM_OBJECT_SET_TERMINATING(object, TRUE); |
1284 | VM_OBJECT_SET_ALIVE(object, FALSE); |
1285 | |
1286 | if (!object->internal && |
1287 | object->cached_list.next && |
1288 | object->cached_list.prev) { |
1289 | vm_object_cache_remove(object); |
1290 | } |
1291 | |
1292 | /* |
1293 | * Detach the object from its shadow if we are the shadow's |
1294 | * copy. The reference we hold on the shadow must be dropped |
1295 | * by our caller. |
1296 | */ |
1297 | if (((shadow_object = object->shadow) != VM_OBJECT_NULL) && |
1298 | !(object->pageout)) { |
1299 | vm_object_lock(shadow_object); |
1300 | if (shadow_object->vo_copy == object) { |
1301 | VM_OBJECT_COPY_SET(object: shadow_object, VM_OBJECT_NULL); |
1302 | } |
1303 | vm_object_unlock(shadow_object); |
1304 | } |
1305 | |
1306 | if (object->paging_in_progress != 0 || |
1307 | object->activity_in_progress != 0) { |
1308 | /* |
1309 | * There are still some paging_in_progress references |
1310 | * on this object, meaning that there are some paging |
1311 | * or other I/O operations in progress for this VM object. |
1312 | * Such operations take some paging_in_progress references |
1313 | * up front to ensure that the object doesn't go away, but |
1314 | * they may also need to acquire a reference on the VM object, |
1315 | * to map it in kernel space, for example. That means that |
1316 | * they may end up releasing the last reference on the VM |
1317 | * object, triggering its termination, while still holding |
1318 | * paging_in_progress references. Waiting for these |
1319 | * pending paging_in_progress references to go away here would |
1320 | * deadlock. |
1321 | * |
1322 | * To avoid deadlocking, we'll let the vm_object_reaper_thread |
1323 | * complete the VM object termination if it still holds |
1324 | * paging_in_progress references at this point. |
1325 | * |
1326 | * No new paging_in_progress should appear now that the |
1327 | * VM object is "terminating" and not "alive". |
1328 | */ |
1329 | vm_object_reap_async(object); |
1330 | vm_object_unlock(object); |
1331 | /* |
1332 | * Return KERN_FAILURE to let the caller know that we |
1333 | * haven't completed the termination and it can't drop this |
1334 | * object's reference on its shadow object yet. |
1335 | * The reaper thread will take care of that once it has |
1336 | * completed this object's termination. |
1337 | */ |
1338 | return KERN_FAILURE; |
1339 | } |
1340 | /* |
1341 | * complete the VM object termination |
1342 | */ |
1343 | vm_object_reap(object); |
1344 | object = VM_OBJECT_NULL; |
1345 | |
1346 | /* |
1347 | * the object lock was released by vm_object_reap() |
1348 | * |
1349 | * KERN_SUCCESS means that this object has been terminated |
1350 | * and no longer needs its shadow object but still holds a |
1351 | * reference on it. |
1352 | * The caller is responsible for dropping that reference. |
1353 | * We can't call vm_object_deallocate() here because that |
1354 | * would create a recursion. |
1355 | */ |
1356 | return KERN_SUCCESS; |
1357 | } |
1358 | |
1359 | |
1360 | /* |
1361 | * vm_object_reap(): |
1362 | * |
1363 | * Complete the termination of a VM object after it's been marked |
1364 | * as "terminating" and "!alive" by vm_object_terminate(). |
1365 | * |
1366 | * The VM object must be locked by caller. |
1367 | * The lock will be released on return and the VM object is no longer valid. |
1368 | */ |
1369 | |
1370 | void |
1371 | vm_object_reap( |
1372 | vm_object_t object) |
1373 | { |
1374 | memory_object_t ; |
1375 | |
1376 | vm_object_lock_assert_exclusive(object); |
1377 | assert(object->paging_in_progress == 0); |
1378 | assert(object->activity_in_progress == 0); |
1379 | |
1380 | vm_object_reap_count++; |
1381 | |
1382 | /* |
1383 | * Disown this purgeable object to cleanup its owner's purgeable |
1384 | * ledgers. We need to do this before disconnecting the object |
1385 | * from its pager, to properly account for compressed pages. |
1386 | */ |
1387 | if (object->internal && |
1388 | (object->purgable != VM_PURGABLE_DENY || |
1389 | object->vo_ledger_tag)) { |
1390 | int ledger_flags; |
1391 | kern_return_t kr; |
1392 | |
1393 | ledger_flags = 0; |
1394 | if (object->vo_no_footprint) { |
1395 | ledger_flags |= VM_LEDGER_FLAG_NO_FOOTPRINT; |
1396 | } |
1397 | assert(!object->alive); |
1398 | assert(object->terminating); |
1399 | kr = vm_object_ownership_change(object, |
1400 | new_ledger_tag: object->vo_ledger_tag, /* unchanged */ |
1401 | NULL, /* no owner */ |
1402 | new_ledger_flags: ledger_flags, |
1403 | FALSE); /* task_objq not locked */ |
1404 | assert(kr == KERN_SUCCESS); |
1405 | assert(object->vo_owner == NULL); |
1406 | } |
1407 | |
1408 | #if DEVELOPMENT || DEBUG |
1409 | if (object->object_is_shared_cache && |
1410 | object->pager != NULL && |
1411 | object->pager->mo_pager_ops == &shared_region_pager_ops) { |
1412 | OSAddAtomic(-object->resident_page_count, &shared_region_pagers_resident_count); |
1413 | } |
1414 | #endif /* DEVELOPMENT || DEBUG */ |
1415 | |
1416 | pager = object->pager; |
1417 | object->pager = MEMORY_OBJECT_NULL; |
1418 | |
1419 | if (pager != MEMORY_OBJECT_NULL) { |
1420 | memory_object_control_disable(control: &object->pager_control); |
1421 | } |
1422 | |
1423 | object->ref_count--; |
1424 | assert(object->ref_count == 0); |
1425 | |
1426 | /* |
1427 | * remove from purgeable queue if it's on |
1428 | */ |
1429 | if (object->internal) { |
1430 | assert(VM_OBJECT_OWNER(object) == TASK_NULL); |
1431 | |
1432 | VM_OBJECT_UNWIRED(object); |
1433 | |
1434 | if (object->purgable == VM_PURGABLE_DENY) { |
1435 | /* not purgeable: nothing to do */ |
1436 | } else if (object->purgable == VM_PURGABLE_VOLATILE) { |
1437 | purgeable_q_t queue; |
1438 | |
1439 | queue = vm_purgeable_object_remove(object); |
1440 | assert(queue); |
1441 | |
1442 | if (object->purgeable_when_ripe) { |
1443 | /* |
1444 | * Must take page lock for this - |
1445 | * using it to protect token queue |
1446 | */ |
1447 | vm_page_lock_queues(); |
1448 | vm_purgeable_token_delete_first(queue); |
1449 | |
1450 | assert(queue->debug_count_objects >= 0); |
1451 | vm_page_unlock_queues(); |
1452 | } |
1453 | |
1454 | /* |
1455 | * Update "vm_page_purgeable_count" in bulk and mark |
1456 | * object as VM_PURGABLE_EMPTY to avoid updating |
1457 | * "vm_page_purgeable_count" again in vm_page_remove() |
1458 | * when reaping the pages. |
1459 | */ |
1460 | unsigned int delta; |
1461 | assert(object->resident_page_count >= |
1462 | object->wired_page_count); |
1463 | delta = (object->resident_page_count - |
1464 | object->wired_page_count); |
1465 | if (delta != 0) { |
1466 | assert(vm_page_purgeable_count >= delta); |
1467 | OSAddAtomic(-delta, |
1468 | (SInt32 *)&vm_page_purgeable_count); |
1469 | } |
1470 | if (object->wired_page_count != 0) { |
1471 | assert(vm_page_purgeable_wired_count >= |
1472 | object->wired_page_count); |
1473 | OSAddAtomic(-object->wired_page_count, |
1474 | (SInt32 *)&vm_page_purgeable_wired_count); |
1475 | } |
1476 | VM_OBJECT_SET_PURGABLE(object, VM_PURGABLE_EMPTY); |
1477 | } else if (object->purgable == VM_PURGABLE_NONVOLATILE || |
1478 | object->purgable == VM_PURGABLE_EMPTY) { |
1479 | /* remove from nonvolatile queue */ |
1480 | vm_purgeable_nonvolatile_dequeue(object); |
1481 | } else { |
1482 | panic("object %p in unexpected purgeable state 0x%x" , |
1483 | object, object->purgable); |
1484 | } |
1485 | if (object->transposed && |
1486 | object->cached_list.next != NULL && |
1487 | object->cached_list.prev == NULL) { |
1488 | /* |
1489 | * object->cached_list.next "points" to the |
1490 | * object that was transposed with this object. |
1491 | */ |
1492 | } else { |
1493 | assert(object->cached_list.next == NULL); |
1494 | } |
1495 | assert(object->cached_list.prev == NULL); |
1496 | } |
1497 | |
1498 | if (object->pageout) { |
1499 | /* |
1500 | * free all remaining pages tabled on |
1501 | * this object |
1502 | * clean up it's shadow |
1503 | */ |
1504 | assert(object->shadow != VM_OBJECT_NULL); |
1505 | |
1506 | vm_pageout_object_terminate(object); |
1507 | } else if (object->resident_page_count) { |
1508 | /* |
1509 | * free all remaining pages tabled on |
1510 | * this object |
1511 | */ |
1512 | vm_object_reap_pages(object, REAP_REAP); |
1513 | } |
1514 | assert(vm_page_queue_empty(&object->memq)); |
1515 | assert(object->paging_in_progress == 0); |
1516 | assert(object->activity_in_progress == 0); |
1517 | assert(object->ref_count == 0); |
1518 | |
1519 | /* |
1520 | * If the pager has not already been released by |
1521 | * vm_object_destroy, we need to terminate it and |
1522 | * release our reference to it here. |
1523 | */ |
1524 | if (pager != MEMORY_OBJECT_NULL) { |
1525 | vm_object_unlock(object); |
1526 | vm_object_release_pager(pager); |
1527 | vm_object_lock(object); |
1528 | } |
1529 | |
1530 | /* kick off anyone waiting on terminating */ |
1531 | VM_OBJECT_SET_TERMINATING(object, FALSE); |
1532 | vm_object_paging_begin(object); |
1533 | vm_object_paging_end(object); |
1534 | vm_object_unlock(object); |
1535 | |
1536 | object->shadow = VM_OBJECT_NULL; |
1537 | |
1538 | #if VM_OBJECT_TRACKING |
1539 | if (vm_object_tracking_btlog) { |
1540 | btlog_erase(vm_object_tracking_btlog, object); |
1541 | } |
1542 | #endif /* VM_OBJECT_TRACKING */ |
1543 | |
1544 | vm_object_lock_destroy(object); |
1545 | /* |
1546 | * Free the space for the object. |
1547 | */ |
1548 | zfree(vm_object_zone, object); |
1549 | object = VM_OBJECT_NULL; |
1550 | } |
1551 | |
1552 | |
1553 | unsigned int vm_max_batch = 256; |
1554 | |
1555 | #define V_O_R_MAX_BATCH 128 |
1556 | |
1557 | #define BATCH_LIMIT(max) (vm_max_batch >= max ? max : vm_max_batch) |
1558 | |
1559 | |
1560 | #define VM_OBJ_REAP_FREELIST(_local_free_q, do_disconnect) \ |
1561 | MACRO_BEGIN \ |
1562 | if (_local_free_q) { \ |
1563 | if (do_disconnect) { \ |
1564 | vm_page_t m; \ |
1565 | for (m = _local_free_q; \ |
1566 | m != VM_PAGE_NULL; \ |
1567 | m = m->vmp_snext) { \ |
1568 | if (m->vmp_pmapped) { \ |
1569 | pmap_disconnect(VM_PAGE_GET_PHYS_PAGE(m)); \ |
1570 | } \ |
1571 | } \ |
1572 | } \ |
1573 | vm_page_free_list(_local_free_q, TRUE); \ |
1574 | _local_free_q = VM_PAGE_NULL; \ |
1575 | } \ |
1576 | MACRO_END |
1577 | |
1578 | |
1579 | void |
1580 | vm_object_reap_pages( |
1581 | vm_object_t object, |
1582 | int reap_type) |
1583 | { |
1584 | vm_page_t p; |
1585 | vm_page_t next; |
1586 | vm_page_t local_free_q = VM_PAGE_NULL; |
1587 | int loop_count; |
1588 | boolean_t disconnect_on_release; |
1589 | pmap_flush_context pmap_flush_context_storage; |
1590 | |
1591 | if (reap_type == REAP_DATA_FLUSH) { |
1592 | /* |
1593 | * We need to disconnect pages from all pmaps before |
1594 | * releasing them to the free list |
1595 | */ |
1596 | disconnect_on_release = TRUE; |
1597 | } else { |
1598 | /* |
1599 | * Either the caller has already disconnected the pages |
1600 | * from all pmaps, or we disconnect them here as we add |
1601 | * them to out local list of pages to be released. |
1602 | * No need to re-disconnect them when we release the pages |
1603 | * to the free list. |
1604 | */ |
1605 | disconnect_on_release = FALSE; |
1606 | } |
1607 | |
1608 | restart_after_sleep: |
1609 | if (vm_page_queue_empty(&object->memq)) { |
1610 | return; |
1611 | } |
1612 | loop_count = BATCH_LIMIT(V_O_R_MAX_BATCH); |
1613 | |
1614 | if (reap_type == REAP_PURGEABLE) { |
1615 | pmap_flush_context_init(&pmap_flush_context_storage); |
1616 | } |
1617 | |
1618 | vm_page_lock_queues(); |
1619 | |
1620 | next = (vm_page_t)vm_page_queue_first(&object->memq); |
1621 | |
1622 | while (!vm_page_queue_end(&object->memq, (vm_page_queue_entry_t)next)) { |
1623 | p = next; |
1624 | next = (vm_page_t)vm_page_queue_next(&next->vmp_listq); |
1625 | |
1626 | if (--loop_count == 0) { |
1627 | vm_page_unlock_queues(); |
1628 | |
1629 | if (local_free_q) { |
1630 | if (reap_type == REAP_PURGEABLE) { |
1631 | pmap_flush(&pmap_flush_context_storage); |
1632 | pmap_flush_context_init(&pmap_flush_context_storage); |
1633 | } |
1634 | /* |
1635 | * Free the pages we reclaimed so far |
1636 | * and take a little break to avoid |
1637 | * hogging the page queue lock too long |
1638 | */ |
1639 | VM_OBJ_REAP_FREELIST(local_free_q, |
1640 | disconnect_on_release); |
1641 | } else { |
1642 | mutex_pause(0); |
1643 | } |
1644 | |
1645 | loop_count = BATCH_LIMIT(V_O_R_MAX_BATCH); |
1646 | |
1647 | vm_page_lock_queues(); |
1648 | } |
1649 | if (reap_type == REAP_DATA_FLUSH || reap_type == REAP_TERMINATE) { |
1650 | if (p->vmp_busy || p->vmp_cleaning) { |
1651 | vm_page_unlock_queues(); |
1652 | /* |
1653 | * free the pages reclaimed so far |
1654 | */ |
1655 | VM_OBJ_REAP_FREELIST(local_free_q, |
1656 | disconnect_on_release); |
1657 | |
1658 | PAGE_SLEEP(object, p, THREAD_UNINT); |
1659 | |
1660 | goto restart_after_sleep; |
1661 | } |
1662 | if (p->vmp_laundry) { |
1663 | vm_pageout_steal_laundry(page: p, TRUE); |
1664 | } |
1665 | } |
1666 | switch (reap_type) { |
1667 | case REAP_DATA_FLUSH: |
1668 | if (VM_PAGE_WIRED(p)) { |
1669 | /* |
1670 | * this is an odd case... perhaps we should |
1671 | * zero-fill this page since we're conceptually |
1672 | * tossing its data at this point, but leaving |
1673 | * it on the object to honor the 'wire' contract |
1674 | */ |
1675 | continue; |
1676 | } |
1677 | break; |
1678 | |
1679 | case REAP_PURGEABLE: |
1680 | if (VM_PAGE_WIRED(p)) { |
1681 | /* |
1682 | * can't purge a wired page |
1683 | */ |
1684 | vm_page_purged_wired++; |
1685 | continue; |
1686 | } |
1687 | if (p->vmp_laundry && !p->vmp_busy && !p->vmp_cleaning) { |
1688 | vm_pageout_steal_laundry(page: p, TRUE); |
1689 | } |
1690 | |
1691 | if (p->vmp_cleaning || p->vmp_laundry || p->vmp_absent) { |
1692 | /* |
1693 | * page is being acted upon, |
1694 | * so don't mess with it |
1695 | */ |
1696 | vm_page_purged_others++; |
1697 | continue; |
1698 | } |
1699 | if (p->vmp_busy) { |
1700 | /* |
1701 | * We can't reclaim a busy page but we can |
1702 | * make it more likely to be paged (it's not wired) to make |
1703 | * sure that it gets considered by |
1704 | * vm_pageout_scan() later. |
1705 | */ |
1706 | if (VM_PAGE_PAGEABLE(p)) { |
1707 | vm_page_deactivate(page: p); |
1708 | } |
1709 | vm_page_purged_busy++; |
1710 | continue; |
1711 | } |
1712 | |
1713 | assert(!is_kernel_object(VM_PAGE_OBJECT(p))); |
1714 | |
1715 | /* |
1716 | * we can discard this page... |
1717 | */ |
1718 | if (p->vmp_pmapped == TRUE) { |
1719 | /* |
1720 | * unmap the page |
1721 | */ |
1722 | pmap_disconnect_options(phys: VM_PAGE_GET_PHYS_PAGE(m: p), PMAP_OPTIONS_NOFLUSH | PMAP_OPTIONS_NOREFMOD, arg: (void *)&pmap_flush_context_storage); |
1723 | } |
1724 | vm_page_purged_count++; |
1725 | |
1726 | break; |
1727 | |
1728 | case REAP_TERMINATE: |
1729 | if (p->vmp_absent || p->vmp_private) { |
1730 | /* |
1731 | * For private pages, VM_PAGE_FREE just |
1732 | * leaves the page structure around for |
1733 | * its owner to clean up. For absent |
1734 | * pages, the structure is returned to |
1735 | * the appropriate pool. |
1736 | */ |
1737 | break; |
1738 | } |
1739 | if (p->vmp_fictitious) { |
1740 | assert(VM_PAGE_GET_PHYS_PAGE(p) == vm_page_guard_addr); |
1741 | break; |
1742 | } |
1743 | if (!p->vmp_dirty && p->vmp_wpmapped) { |
1744 | p->vmp_dirty = pmap_is_modified(pn: VM_PAGE_GET_PHYS_PAGE(m: p)); |
1745 | } |
1746 | |
1747 | if ((p->vmp_dirty || p->vmp_precious) && !VMP_ERROR_GET(p) && object->alive) { |
1748 | assert(!object->internal); |
1749 | |
1750 | p->vmp_free_when_done = TRUE; |
1751 | |
1752 | if (!p->vmp_laundry) { |
1753 | vm_page_queues_remove(mem: p, TRUE); |
1754 | /* |
1755 | * flush page... page will be freed |
1756 | * upon completion of I/O |
1757 | */ |
1758 | vm_pageout_cluster(m: p); |
1759 | } |
1760 | vm_page_unlock_queues(); |
1761 | /* |
1762 | * free the pages reclaimed so far |
1763 | */ |
1764 | VM_OBJ_REAP_FREELIST(local_free_q, |
1765 | disconnect_on_release); |
1766 | |
1767 | vm_object_paging_wait(object, THREAD_UNINT); |
1768 | |
1769 | goto restart_after_sleep; |
1770 | } |
1771 | break; |
1772 | |
1773 | case REAP_REAP: |
1774 | break; |
1775 | } |
1776 | vm_page_free_prepare_queues(page: p); |
1777 | assert(p->vmp_pageq.next == 0 && p->vmp_pageq.prev == 0); |
1778 | /* |
1779 | * Add this page to our list of reclaimed pages, |
1780 | * to be freed later. |
1781 | */ |
1782 | p->vmp_snext = local_free_q; |
1783 | local_free_q = p; |
1784 | } |
1785 | vm_page_unlock_queues(); |
1786 | |
1787 | /* |
1788 | * Free the remaining reclaimed pages |
1789 | */ |
1790 | if (reap_type == REAP_PURGEABLE) { |
1791 | pmap_flush(&pmap_flush_context_storage); |
1792 | } |
1793 | |
1794 | VM_OBJ_REAP_FREELIST(local_free_q, |
1795 | disconnect_on_release); |
1796 | } |
1797 | |
1798 | |
1799 | void |
1800 | vm_object_reap_async( |
1801 | vm_object_t object) |
1802 | { |
1803 | vm_object_lock_assert_exclusive(object); |
1804 | |
1805 | vm_object_reaper_lock_spin(); |
1806 | |
1807 | vm_object_reap_count_async++; |
1808 | |
1809 | /* enqueue the VM object... */ |
1810 | queue_enter(&vm_object_reaper_queue, object, |
1811 | vm_object_t, cached_list); |
1812 | |
1813 | vm_object_reaper_unlock(); |
1814 | |
1815 | /* ... and wake up the reaper thread */ |
1816 | thread_wakeup((event_t) &vm_object_reaper_queue); |
1817 | } |
1818 | |
1819 | |
1820 | void |
1821 | vm_object_reaper_thread(void) |
1822 | { |
1823 | vm_object_t object, shadow_object; |
1824 | |
1825 | vm_object_reaper_lock_spin(); |
1826 | |
1827 | while (!queue_empty(&vm_object_reaper_queue)) { |
1828 | queue_remove_first(&vm_object_reaper_queue, |
1829 | object, |
1830 | vm_object_t, |
1831 | cached_list); |
1832 | |
1833 | vm_object_reaper_unlock(); |
1834 | vm_object_lock(object); |
1835 | |
1836 | assert(object->terminating); |
1837 | assert(!object->alive); |
1838 | |
1839 | /* |
1840 | * The pageout daemon might be playing with our pages. |
1841 | * Now that the object is dead, it won't touch any more |
1842 | * pages, but some pages might already be on their way out. |
1843 | * Hence, we wait until the active paging activities have |
1844 | * ceased before we break the association with the pager |
1845 | * itself. |
1846 | */ |
1847 | while (object->paging_in_progress != 0 || |
1848 | object->activity_in_progress != 0) { |
1849 | vm_object_wait(object, |
1850 | VM_OBJECT_EVENT_PAGING_IN_PROGRESS, |
1851 | THREAD_UNINT); |
1852 | vm_object_lock(object); |
1853 | } |
1854 | |
1855 | shadow_object = |
1856 | object->pageout ? VM_OBJECT_NULL : object->shadow; |
1857 | |
1858 | vm_object_reap(object); |
1859 | /* cache is unlocked and object is no longer valid */ |
1860 | object = VM_OBJECT_NULL; |
1861 | |
1862 | if (shadow_object != VM_OBJECT_NULL) { |
1863 | /* |
1864 | * Drop the reference "object" was holding on |
1865 | * its shadow object. |
1866 | */ |
1867 | vm_object_deallocate(object: shadow_object); |
1868 | shadow_object = VM_OBJECT_NULL; |
1869 | } |
1870 | vm_object_reaper_lock_spin(); |
1871 | } |
1872 | |
1873 | /* wait for more work... */ |
1874 | assert_wait(event: (event_t) &vm_object_reaper_queue, THREAD_UNINT); |
1875 | |
1876 | vm_object_reaper_unlock(); |
1877 | |
1878 | thread_block(continuation: (thread_continue_t) vm_object_reaper_thread); |
1879 | /*NOTREACHED*/ |
1880 | } |
1881 | |
1882 | /* |
1883 | * Routine: vm_object_release_pager |
1884 | * Purpose: Terminate the pager and, upon completion, |
1885 | * release our last reference to it. |
1886 | */ |
1887 | static void |
1888 | ( |
1889 | memory_object_t ) |
1890 | { |
1891 | /* |
1892 | * Terminate the pager. |
1893 | */ |
1894 | |
1895 | (void) memory_object_terminate(memory_object: pager); |
1896 | |
1897 | /* |
1898 | * Release reference to pager. |
1899 | */ |
1900 | memory_object_deallocate(object: pager); |
1901 | } |
1902 | |
1903 | /* |
1904 | * Routine: vm_object_destroy |
1905 | * Purpose: |
1906 | * Shut down a VM object, despite the |
1907 | * presence of address map (or other) references |
1908 | * to the vm_object. |
1909 | */ |
1910 | #if MACH_ASSERT |
1911 | extern uint32_t system_inshutdown; |
1912 | int fbdp_no_panic = 1; |
1913 | #endif /* MACH_ASSERT */ |
1914 | kern_return_t |
1915 | vm_object_destroy( |
1916 | vm_object_t object, |
1917 | vm_object_destroy_reason_t reason) |
1918 | { |
1919 | memory_object_t ; |
1920 | |
1921 | if (object == VM_OBJECT_NULL) { |
1922 | return KERN_SUCCESS; |
1923 | } |
1924 | |
1925 | /* |
1926 | * Remove the pager association immediately. |
1927 | * |
1928 | * This will prevent the memory manager from further |
1929 | * meddling. [If it wanted to flush data or make |
1930 | * other changes, it should have done so before performing |
1931 | * the destroy call.] |
1932 | */ |
1933 | |
1934 | vm_object_lock(object); |
1935 | |
1936 | #if FBDP_DEBUG_OBJECT_NO_PAGER |
1937 | static bool fbdp_no_panic_retrieved = false; |
1938 | if (!fbdp_no_panic_retrieved) { |
1939 | PE_parse_boot_argn("fbdp_no_panic4" , &fbdp_no_panic, sizeof(fbdp_no_panic)); |
1940 | fbdp_no_panic_retrieved = true; |
1941 | } |
1942 | |
1943 | bool forced_unmount = false; |
1944 | if (object->named && |
1945 | object->ref_count > 2 && |
1946 | object->pager != NULL && |
1947 | vnode_pager_get_forced_unmount(object->pager, &forced_unmount) == KERN_SUCCESS && |
1948 | forced_unmount == false) { |
1949 | if (!fbdp_no_panic) { |
1950 | panic("FBDP rdar://99829401 object %p refs %d pager %p (no forced unmount)\n" , object, object->ref_count, object->pager); |
1951 | } |
1952 | DTRACE_VM3(vm_object_destroy_no_forced_unmount, |
1953 | vm_object_t, object, |
1954 | int, object->ref_count, |
1955 | memory_object_t, object->pager); |
1956 | } |
1957 | |
1958 | if (object->fbdp_tracked) { |
1959 | if (object->ref_count > 2 && !system_inshutdown) { |
1960 | if (!fbdp_no_panic) { |
1961 | panic("FBDP/4 rdar://99829401 object %p refs %d pager %p (tracked)\n" , object, object->ref_count, object->pager); |
1962 | } |
1963 | } |
1964 | VM_OBJECT_SET_FBDP_TRACKED(object, false); |
1965 | } |
1966 | #endif /* FBDP_DEBUG_OBJECT_NO_PAGER */ |
1967 | |
1968 | if (reason != VM_OBJECT_DESTROY_UNKNOWN_REASON) { |
1969 | VM_OBJECT_SET_NO_PAGER_REASON(object, value: reason); |
1970 | } |
1971 | |
1972 | VM_OBJECT_SET_CAN_PERSIST(object, FALSE); |
1973 | VM_OBJECT_SET_NAMED(object, FALSE); |
1974 | #if 00 |
1975 | VM_OBJECT_SET_ALIVE(object, FALSE); |
1976 | #endif /* 00 */ |
1977 | |
1978 | #if DEVELOPMENT || DEBUG |
1979 | if (object->object_is_shared_cache && |
1980 | object->pager != NULL && |
1981 | object->pager->mo_pager_ops == &shared_region_pager_ops) { |
1982 | OSAddAtomic(-object->resident_page_count, &shared_region_pagers_resident_count); |
1983 | } |
1984 | #endif /* DEVELOPMENT || DEBUG */ |
1985 | |
1986 | old_pager = object->pager; |
1987 | object->pager = MEMORY_OBJECT_NULL; |
1988 | if (old_pager != MEMORY_OBJECT_NULL) { |
1989 | memory_object_control_disable(control: &object->pager_control); |
1990 | } |
1991 | |
1992 | /* |
1993 | * Wait for the existing paging activity (that got |
1994 | * through before we nulled out the pager) to subside. |
1995 | */ |
1996 | |
1997 | vm_object_paging_wait(object, THREAD_UNINT); |
1998 | vm_object_unlock(object); |
1999 | |
2000 | /* |
2001 | * Terminate the object now. |
2002 | */ |
2003 | if (old_pager != MEMORY_OBJECT_NULL) { |
2004 | vm_object_release_pager(pager: old_pager); |
2005 | |
2006 | /* |
2007 | * JMM - Release the caller's reference. This assumes the |
2008 | * caller had a reference to release, which is a big (but |
2009 | * currently valid) assumption if this is driven from the |
2010 | * vnode pager (it is holding a named reference when making |
2011 | * this call).. |
2012 | */ |
2013 | vm_object_deallocate(object); |
2014 | } |
2015 | return KERN_SUCCESS; |
2016 | } |
2017 | |
2018 | /* |
2019 | * The "chunk" macros are used by routines below when looking for pages to deactivate. These |
2020 | * exist because of the need to handle shadow chains. When deactivating pages, we only |
2021 | * want to deactive the ones at the top most level in the object chain. In order to do |
2022 | * this efficiently, the specified address range is divided up into "chunks" and we use |
2023 | * a bit map to keep track of which pages have already been processed as we descend down |
2024 | * the shadow chain. These chunk macros hide the details of the bit map implementation |
2025 | * as much as we can. |
2026 | * |
2027 | * For convenience, we use a 64-bit data type as the bit map, and therefore a chunk is |
2028 | * set to 64 pages. The bit map is indexed from the low-order end, so that the lowest |
2029 | * order bit represents page 0 in the current range and highest order bit represents |
2030 | * page 63. |
2031 | * |
2032 | * For further convenience, we also use negative logic for the page state in the bit map. |
2033 | * The bit is set to 1 to indicate it has not yet been seen, and to 0 to indicate it has |
2034 | * been processed. This way we can simply test the 64-bit long word to see if it's zero |
2035 | * to easily tell if the whole range has been processed. Therefore, the bit map starts |
2036 | * out with all the bits set. The macros below hide all these details from the caller. |
2037 | */ |
2038 | |
2039 | #define PAGES_IN_A_CHUNK 64 /* The number of pages in the chunk must */ |
2040 | /* be the same as the number of bits in */ |
2041 | /* the chunk_state_t type. We use 64 */ |
2042 | /* just for convenience. */ |
2043 | |
2044 | #define CHUNK_SIZE (PAGES_IN_A_CHUNK * PAGE_SIZE_64) /* Size of a chunk in bytes */ |
2045 | |
2046 | typedef uint64_t chunk_state_t; |
2047 | |
2048 | /* |
2049 | * The bit map uses negative logic, so we start out with all 64 bits set to indicate |
2050 | * that no pages have been processed yet. Also, if len is less than the full CHUNK_SIZE, |
2051 | * then we mark pages beyond the len as having been "processed" so that we don't waste time |
2052 | * looking at pages in that range. This can save us from unnecessarily chasing down the |
2053 | * shadow chain. |
2054 | */ |
2055 | |
2056 | #define CHUNK_INIT(c, len) \ |
2057 | MACRO_BEGIN \ |
2058 | uint64_t p; \ |
2059 | \ |
2060 | (c) = 0xffffffffffffffffLL; \ |
2061 | \ |
2062 | for (p = (len) / PAGE_SIZE_64; p < PAGES_IN_A_CHUNK; p++) \ |
2063 | MARK_PAGE_HANDLED(c, p); \ |
2064 | MACRO_END |
2065 | |
2066 | |
2067 | /* |
2068 | * Return true if all pages in the chunk have not yet been processed. |
2069 | */ |
2070 | |
2071 | #define CHUNK_NOT_COMPLETE(c) ((c) != 0) |
2072 | |
2073 | /* |
2074 | * Return true if the page at offset 'p' in the bit map has already been handled |
2075 | * while processing a higher level object in the shadow chain. |
2076 | */ |
2077 | |
2078 | #define PAGE_ALREADY_HANDLED(c, p) (((c) & (1ULL << (p))) == 0) |
2079 | |
2080 | /* |
2081 | * Mark the page at offset 'p' in the bit map as having been processed. |
2082 | */ |
2083 | |
2084 | #define MARK_PAGE_HANDLED(c, p) \ |
2085 | MACRO_BEGIN \ |
2086 | (c) = (c) & ~(1ULL << (p)); \ |
2087 | MACRO_END |
2088 | |
2089 | |
2090 | /* |
2091 | * Return true if the page at the given offset has been paged out. Object is |
2092 | * locked upon entry and returned locked. |
2093 | */ |
2094 | |
2095 | static boolean_t |
2096 | page_is_paged_out( |
2097 | vm_object_t object, |
2098 | vm_object_offset_t offset) |
2099 | { |
2100 | if (object->internal && |
2101 | object->alive && |
2102 | !object->terminating && |
2103 | object->pager_ready) { |
2104 | if (VM_COMPRESSOR_PAGER_STATE_GET(object, offset) |
2105 | == VM_EXTERNAL_STATE_EXISTS) { |
2106 | return TRUE; |
2107 | } |
2108 | } |
2109 | return FALSE; |
2110 | } |
2111 | |
2112 | |
2113 | |
2114 | /* |
2115 | * madvise_free_debug |
2116 | * |
2117 | * To help debug madvise(MADV_FREE*) mis-usage, this triggers a |
2118 | * zero-fill as soon as a page is affected by a madvise(MADV_FREE*), to |
2119 | * simulate the loss of the page's contents as if the page had been |
2120 | * reclaimed and then re-faulted. |
2121 | */ |
2122 | #if DEVELOPMENT || DEBUG |
2123 | int madvise_free_debug = 0; |
2124 | int madvise_free_debug_sometimes = 1; |
2125 | #else /* DEBUG */ |
2126 | int madvise_free_debug = 0; |
2127 | int madvise_free_debug_sometimes = 0; |
2128 | #endif /* DEBUG */ |
2129 | int madvise_free_counter = 0; |
2130 | |
2131 | __options_decl(deactivate_flags_t, uint32_t, { |
2132 | DEACTIVATE_KILL = 0x1, |
2133 | DEACTIVATE_REUSABLE = 0x2, |
2134 | DEACTIVATE_ALL_REUSABLE = 0x4, |
2135 | DEACTIVATE_CLEAR_REFMOD = 0x8, |
2136 | DEACTIVATE_REUSABLE_NO_WRITE = 0x10 |
2137 | }); |
2138 | |
2139 | /* |
2140 | * Deactivate the pages in the specified object and range. If kill_page is set, also discard any |
2141 | * page modified state from the pmap. Update the chunk_state as we go along. The caller must specify |
2142 | * a size that is less than or equal to the CHUNK_SIZE. |
2143 | */ |
2144 | |
2145 | static void |
2146 | deactivate_pages_in_object( |
2147 | vm_object_t object, |
2148 | vm_object_offset_t offset, |
2149 | vm_object_size_t size, |
2150 | deactivate_flags_t flags, |
2151 | chunk_state_t *chunk_state, |
2152 | pmap_flush_context *pfc, |
2153 | struct pmap *pmap, |
2154 | vm_map_offset_t pmap_offset) |
2155 | { |
2156 | vm_page_t m; |
2157 | int p; |
2158 | struct vm_page_delayed_work dw_array; |
2159 | struct vm_page_delayed_work *dwp, *dwp_start; |
2160 | bool dwp_finish_ctx = TRUE; |
2161 | int dw_count; |
2162 | int dw_limit; |
2163 | unsigned int reusable = 0; |
2164 | |
2165 | /* |
2166 | * Examine each page in the chunk. The variable 'p' is the page number relative to the start of the |
2167 | * chunk. Since this routine is called once for each level in the shadow chain, the chunk_state may |
2168 | * have pages marked as having been processed already. We stop the loop early if we find we've handled |
2169 | * all the pages in the chunk. |
2170 | */ |
2171 | |
2172 | dwp_start = dwp = NULL; |
2173 | dw_count = 0; |
2174 | dw_limit = DELAYED_WORK_LIMIT(DEFAULT_DELAYED_WORK_LIMIT); |
2175 | dwp_start = vm_page_delayed_work_get_ctx(); |
2176 | if (dwp_start == NULL) { |
2177 | dwp_start = &dw_array; |
2178 | dw_limit = 1; |
2179 | dwp_finish_ctx = FALSE; |
2180 | } |
2181 | |
2182 | dwp = dwp_start; |
2183 | |
2184 | for (p = 0; size && CHUNK_NOT_COMPLETE(*chunk_state); p++, size -= PAGE_SIZE_64, offset += PAGE_SIZE_64, pmap_offset += PAGE_SIZE_64) { |
2185 | /* |
2186 | * If this offset has already been found and handled in a higher level object, then don't |
2187 | * do anything with it in the current shadow object. |
2188 | */ |
2189 | |
2190 | if (PAGE_ALREADY_HANDLED(*chunk_state, p)) { |
2191 | continue; |
2192 | } |
2193 | |
2194 | /* |
2195 | * See if the page at this offset is around. First check to see if the page is resident, |
2196 | * then if not, check the existence map or with the pager. |
2197 | */ |
2198 | |
2199 | if ((m = vm_page_lookup(object, offset)) != VM_PAGE_NULL) { |
2200 | /* |
2201 | * We found a page we were looking for. Mark it as "handled" now in the chunk_state |
2202 | * so that we won't bother looking for a page at this offset again if there are more |
2203 | * shadow objects. Then deactivate the page. |
2204 | */ |
2205 | |
2206 | MARK_PAGE_HANDLED(*chunk_state, p); |
2207 | |
2208 | if ((!VM_PAGE_WIRED(m)) && (!m->vmp_private) && (!m->vmp_gobbled) && (!m->vmp_busy) && |
2209 | (!m->vmp_laundry) && (!m->vmp_cleaning) && !(m->vmp_free_when_done)) { |
2210 | int clear_refmod_mask; |
2211 | int pmap_options; |
2212 | dwp->dw_mask = 0; |
2213 | |
2214 | pmap_options = 0; |
2215 | clear_refmod_mask = VM_MEM_REFERENCED; |
2216 | dwp->dw_mask |= DW_clear_reference; |
2217 | |
2218 | if ((flags & DEACTIVATE_KILL) && (object->internal)) { |
2219 | if (!(flags & DEACTIVATE_REUSABLE_NO_WRITE) && |
2220 | (madvise_free_debug || |
2221 | (madvise_free_debug_sometimes && |
2222 | madvise_free_counter++ & 0x1))) { |
2223 | /* |
2224 | * zero-fill the page (or every |
2225 | * other page) now to simulate |
2226 | * it being reclaimed and |
2227 | * re-faulted. |
2228 | */ |
2229 | #if CONFIG_TRACK_UNMODIFIED_ANON_PAGES |
2230 | if (!m->vmp_unmodified_ro) { |
2231 | #else /* CONFIG_TRACK_UNMODIFIED_ANON_PAGES */ |
2232 | if (true) { |
2233 | #endif /* CONFIG_TRACK_UNMODIFIED_ANON_PAGES */ |
2234 | pmap_zero_page(pn: VM_PAGE_GET_PHYS_PAGE(m)); |
2235 | } |
2236 | } |
2237 | m->vmp_precious = FALSE; |
2238 | m->vmp_dirty = FALSE; |
2239 | |
2240 | clear_refmod_mask |= VM_MEM_MODIFIED; |
2241 | if (m->vmp_q_state == VM_PAGE_ON_THROTTLED_Q) { |
2242 | /* |
2243 | * This page is now clean and |
2244 | * reclaimable. Move it out |
2245 | * of the throttled queue, so |
2246 | * that vm_pageout_scan() can |
2247 | * find it. |
2248 | */ |
2249 | dwp->dw_mask |= DW_move_page; |
2250 | } |
2251 | |
2252 | #if 0 |
2253 | #if CONFIG_TRACK_UNMODIFIED_ANON_PAGES |
2254 | /* |
2255 | * COMMENT BLOCK ON WHY THIS SHOULDN'T BE DONE. |
2256 | * |
2257 | * Since we are about to do a VM_COMPRESSOR_PAGER_STATE_CLR |
2258 | * below for this page, which drops any existing compressor |
2259 | * storage of this page (eg side-effect of a CoW operation or |
2260 | * a collapse operation), it is tempting to think that we should |
2261 | * treat this page as if it was just decompressed (during which |
2262 | * we also drop existing compressor storage) and so start its life |
2263 | * out with vmp_unmodified_ro set to FALSE. |
2264 | * |
2265 | * However, we can't do that here because we could swing around |
2266 | * and re-access this page in a read-only fault. |
2267 | * Clearing this bit means we'll try to zero it up above |
2268 | * and fail. |
2269 | * |
2270 | * Note that clearing the bit is unnecessary regardless because |
2271 | * dirty state has been cleared. During the next soft fault, the |
2272 | * right state will be restored and things will progress just fine. |
2273 | */ |
2274 | if (m->vmp_unmodified_ro == true) { |
2275 | /* Need object and pageq locks for bit manipulation*/ |
2276 | m->vmp_unmodified_ro = false; |
2277 | os_atomic_dec(&compressor_ro_uncompressed); |
2278 | } |
2279 | #endif /* CONFIG_TRACK_UNMODIFIED_ANON_PAGES */ |
2280 | #endif /* 0 */ |
2281 | VM_COMPRESSOR_PAGER_STATE_CLR(object, offset); |
2282 | |
2283 | if ((flags & DEACTIVATE_REUSABLE) && !m->vmp_reusable) { |
2284 | assert(!(flags & DEACTIVATE_ALL_REUSABLE)); |
2285 | assert(!object->all_reusable); |
2286 | m->vmp_reusable = TRUE; |
2287 | object->reusable_page_count++; |
2288 | assert(object->resident_page_count >= object->reusable_page_count); |
2289 | reusable++; |
2290 | /* |
2291 | * Tell pmap this page is now |
2292 | * "reusable" (to update pmap |
2293 | * stats for all mappings). |
2294 | */ |
2295 | pmap_options |= PMAP_OPTIONS_SET_REUSABLE; |
2296 | } |
2297 | } |
2298 | if (flags & DEACTIVATE_CLEAR_REFMOD) { |
2299 | /* |
2300 | * The caller didn't clear the refmod bits in advance. |
2301 | * Clear them for this page now. |
2302 | */ |
2303 | pmap_options |= PMAP_OPTIONS_NOFLUSH; |
2304 | pmap_clear_refmod_options(pn: VM_PAGE_GET_PHYS_PAGE(m), |
2305 | mask: clear_refmod_mask, |
2306 | options: pmap_options, |
2307 | (void *)pfc); |
2308 | } |
2309 | |
2310 | if ((m->vmp_q_state != VM_PAGE_ON_THROTTLED_Q) && |
2311 | !(flags & (DEACTIVATE_REUSABLE | DEACTIVATE_ALL_REUSABLE))) { |
2312 | dwp->dw_mask |= DW_move_page; |
2313 | } |
2314 | |
2315 | if (dwp->dw_mask) { |
2316 | VM_PAGE_ADD_DELAYED_WORK(dwp, m, |
2317 | dw_count); |
2318 | } |
2319 | |
2320 | if (dw_count >= dw_limit) { |
2321 | if (reusable) { |
2322 | OSAddAtomic(reusable, |
2323 | &vm_page_stats_reusable.reusable_count); |
2324 | vm_page_stats_reusable.reusable += reusable; |
2325 | reusable = 0; |
2326 | } |
2327 | vm_page_do_delayed_work(object, VM_KERN_MEMORY_NONE, dwp: dwp_start, dw_count); |
2328 | |
2329 | dwp = dwp_start; |
2330 | dw_count = 0; |
2331 | } |
2332 | } |
2333 | } else { |
2334 | /* |
2335 | * The page at this offset isn't memory resident, check to see if it's |
2336 | * been paged out. If so, mark it as handled so we don't bother looking |
2337 | * for it in the shadow chain. |
2338 | */ |
2339 | |
2340 | if (page_is_paged_out(object, offset)) { |
2341 | MARK_PAGE_HANDLED(*chunk_state, p); |
2342 | |
2343 | /* |
2344 | * If we're killing a non-resident page, then clear the page in the existence |
2345 | * map so we don't bother paging it back in if it's touched again in the future. |
2346 | */ |
2347 | |
2348 | if ((flags & DEACTIVATE_KILL) && (object->internal)) { |
2349 | VM_COMPRESSOR_PAGER_STATE_CLR(object, offset); |
2350 | |
2351 | if (pmap != PMAP_NULL) { |
2352 | /* |
2353 | * Tell pmap that this page |
2354 | * is no longer mapped, to |
2355 | * adjust the footprint ledger |
2356 | * because this page is no |
2357 | * longer compressed. |
2358 | */ |
2359 | pmap_remove_options( |
2360 | map: pmap, |
2361 | s: pmap_offset, |
2362 | e: (pmap_offset + |
2363 | PAGE_SIZE), |
2364 | PMAP_OPTIONS_REMOVE); |
2365 | } |
2366 | } |
2367 | } |
2368 | } |
2369 | } |
2370 | |
2371 | if (reusable) { |
2372 | OSAddAtomic(reusable, &vm_page_stats_reusable.reusable_count); |
2373 | vm_page_stats_reusable.reusable += reusable; |
2374 | reusable = 0; |
2375 | } |
2376 | |
2377 | if (dw_count) { |
2378 | vm_page_do_delayed_work(object, VM_KERN_MEMORY_NONE, dwp: dwp_start, dw_count); |
2379 | dwp = dwp_start; |
2380 | dw_count = 0; |
2381 | } |
2382 | |
2383 | if (dwp_start && dwp_finish_ctx) { |
2384 | vm_page_delayed_work_finish_ctx(dwp: dwp_start); |
2385 | dwp_start = dwp = NULL; |
2386 | } |
2387 | } |
2388 | |
2389 | |
2390 | /* |
2391 | * Deactive a "chunk" of the given range of the object starting at offset. A "chunk" |
2392 | * will always be less than or equal to the given size. The total range is divided up |
2393 | * into chunks for efficiency and performance related to the locks and handling the shadow |
2394 | * chain. This routine returns how much of the given "size" it actually processed. It's |
2395 | * up to the caler to loop and keep calling this routine until the entire range they want |
2396 | * to process has been done. |
2397 | * Iff clear_refmod is true, pmap_clear_refmod_options is called for each physical page in this range. |
2398 | */ |
2399 | |
2400 | static vm_object_size_t |
2401 | deactivate_a_chunk( |
2402 | vm_object_t orig_object, |
2403 | vm_object_offset_t offset, |
2404 | vm_object_size_t size, |
2405 | deactivate_flags_t flags, |
2406 | pmap_flush_context *pfc, |
2407 | struct pmap *pmap, |
2408 | vm_map_offset_t pmap_offset) |
2409 | { |
2410 | vm_object_t object; |
2411 | vm_object_t tmp_object; |
2412 | vm_object_size_t length; |
2413 | chunk_state_t chunk_state; |
2414 | |
2415 | |
2416 | /* |
2417 | * Get set to do a chunk. We'll do up to CHUNK_SIZE, but no more than the |
2418 | * remaining size the caller asked for. |
2419 | */ |
2420 | |
2421 | length = MIN(size, CHUNK_SIZE); |
2422 | |
2423 | /* |
2424 | * The chunk_state keeps track of which pages we've already processed if there's |
2425 | * a shadow chain on this object. At this point, we haven't done anything with this |
2426 | * range of pages yet, so initialize the state to indicate no pages processed yet. |
2427 | */ |
2428 | |
2429 | CHUNK_INIT(chunk_state, length); |
2430 | object = orig_object; |
2431 | |
2432 | /* |
2433 | * Start at the top level object and iterate around the loop once for each object |
2434 | * in the shadow chain. We stop processing early if we've already found all the pages |
2435 | * in the range. Otherwise we stop when we run out of shadow objects. |
2436 | */ |
2437 | |
2438 | while (object && CHUNK_NOT_COMPLETE(chunk_state)) { |
2439 | vm_object_paging_begin(object); |
2440 | |
2441 | deactivate_pages_in_object(object, offset, size: length, flags, chunk_state: &chunk_state, pfc, pmap, pmap_offset); |
2442 | |
2443 | vm_object_paging_end(object); |
2444 | |
2445 | /* |
2446 | * We've finished with this object, see if there's a shadow object. If |
2447 | * there is, update the offset and lock the new object. We also turn off |
2448 | * kill_page at this point since we only kill pages in the top most object. |
2449 | */ |
2450 | |
2451 | tmp_object = object->shadow; |
2452 | |
2453 | if (tmp_object) { |
2454 | assert(!(flags & DEACTIVATE_KILL) || (flags & DEACTIVATE_CLEAR_REFMOD)); |
2455 | flags &= ~(DEACTIVATE_KILL | DEACTIVATE_REUSABLE | DEACTIVATE_ALL_REUSABLE); |
2456 | offset += object->vo_shadow_offset; |
2457 | vm_object_lock(tmp_object); |
2458 | } |
2459 | |
2460 | if (object != orig_object) { |
2461 | vm_object_unlock(object); |
2462 | } |
2463 | |
2464 | object = tmp_object; |
2465 | } |
2466 | |
2467 | if (object && object != orig_object) { |
2468 | vm_object_unlock(object); |
2469 | } |
2470 | |
2471 | return length; |
2472 | } |
2473 | |
2474 | |
2475 | |
2476 | /* |
2477 | * Move any resident pages in the specified range to the inactive queue. If kill_page is set, |
2478 | * we also clear the modified status of the page and "forget" any changes that have been made |
2479 | * to the page. |
2480 | */ |
2481 | |
2482 | __private_extern__ void |
2483 | vm_object_deactivate_pages( |
2484 | vm_object_t object, |
2485 | vm_object_offset_t offset, |
2486 | vm_object_size_t size, |
2487 | boolean_t kill_page, |
2488 | boolean_t reusable_page, |
2489 | boolean_t reusable_no_write, |
2490 | struct pmap *pmap, |
2491 | vm_map_offset_t pmap_offset) |
2492 | { |
2493 | vm_object_size_t length; |
2494 | boolean_t all_reusable; |
2495 | pmap_flush_context pmap_flush_context_storage; |
2496 | unsigned int pmap_clear_refmod_mask = VM_MEM_REFERENCED; |
2497 | unsigned int pmap_clear_refmod_options = 0; |
2498 | deactivate_flags_t flags = DEACTIVATE_CLEAR_REFMOD; |
2499 | bool refmod_cleared = false; |
2500 | if (kill_page) { |
2501 | flags |= DEACTIVATE_KILL; |
2502 | } |
2503 | if (reusable_page) { |
2504 | flags |= DEACTIVATE_REUSABLE; |
2505 | } |
2506 | if (reusable_no_write) { |
2507 | flags |= DEACTIVATE_REUSABLE_NO_WRITE; |
2508 | } |
2509 | |
2510 | /* |
2511 | * We break the range up into chunks and do one chunk at a time. This is for |
2512 | * efficiency and performance while handling the shadow chains and the locks. |
2513 | * The deactivate_a_chunk() function returns how much of the range it processed. |
2514 | * We keep calling this routine until the given size is exhausted. |
2515 | */ |
2516 | |
2517 | |
2518 | all_reusable = FALSE; |
2519 | #if 11 |
2520 | /* |
2521 | * For the sake of accurate "reusable" pmap stats, we need |
2522 | * to tell pmap about each page that is no longer "reusable", |
2523 | * so we can't do the "all_reusable" optimization. |
2524 | * |
2525 | * If we do go with the all_reusable optimization, we can't |
2526 | * return if size is 0 since we could have "all_reusable == TRUE" |
2527 | * In this case, we save the overhead of doing the pmap_flush_context |
2528 | * work. |
2529 | */ |
2530 | if (size == 0) { |
2531 | return; |
2532 | } |
2533 | #else |
2534 | if (reusable_page && |
2535 | object->internal && |
2536 | object->vo_size != 0 && |
2537 | object->vo_size == size && |
2538 | object->reusable_page_count == 0) { |
2539 | all_reusable = TRUE; |
2540 | reusable_page = FALSE; |
2541 | flags |= DEACTIVATE_ALL_REUSABLE; |
2542 | } |
2543 | #endif |
2544 | |
2545 | if ((reusable_page || all_reusable) && object->all_reusable) { |
2546 | /* This means MADV_FREE_REUSABLE has been called twice, which |
2547 | * is probably illegal. */ |
2548 | return; |
2549 | } |
2550 | |
2551 | |
2552 | pmap_flush_context_init(&pmap_flush_context_storage); |
2553 | |
2554 | /* |
2555 | * If we're deactivating multiple pages, try to perform one bulk pmap operation. |
2556 | * We can't do this if we're killing pages and there's a shadow chain as |
2557 | * we don't yet know which pages are in the top object (pages in shadow copies aren't |
2558 | * safe to kill). |
2559 | * And we can only do this on hardware that supports it. |
2560 | */ |
2561 | if (size > PAGE_SIZE && (!kill_page || !object->shadow)) { |
2562 | if (kill_page && object->internal) { |
2563 | pmap_clear_refmod_mask |= VM_MEM_MODIFIED; |
2564 | } |
2565 | if (reusable_page) { |
2566 | pmap_clear_refmod_options |= PMAP_OPTIONS_SET_REUSABLE; |
2567 | } |
2568 | |
2569 | refmod_cleared = pmap_clear_refmod_range_options(pmap, start: pmap_offset, end: pmap_offset + size, mask: pmap_clear_refmod_mask, options: pmap_clear_refmod_options); |
2570 | if (refmod_cleared) { |
2571 | // We were able to clear all the refmod bits. So deactivate_a_chunk doesn't need to do it. |
2572 | flags &= ~DEACTIVATE_CLEAR_REFMOD; |
2573 | } |
2574 | } |
2575 | |
2576 | while (size) { |
2577 | length = deactivate_a_chunk(orig_object: object, offset, size, flags, |
2578 | pfc: &pmap_flush_context_storage, pmap, pmap_offset); |
2579 | |
2580 | size -= length; |
2581 | offset += length; |
2582 | pmap_offset += length; |
2583 | } |
2584 | pmap_flush(&pmap_flush_context_storage); |
2585 | |
2586 | if (all_reusable) { |
2587 | if (!object->all_reusable) { |
2588 | unsigned int reusable; |
2589 | |
2590 | object->all_reusable = TRUE; |
2591 | assert(object->reusable_page_count == 0); |
2592 | /* update global stats */ |
2593 | reusable = object->resident_page_count; |
2594 | OSAddAtomic(reusable, |
2595 | &vm_page_stats_reusable.reusable_count); |
2596 | vm_page_stats_reusable.reusable += reusable; |
2597 | vm_page_stats_reusable.all_reusable_calls++; |
2598 | } |
2599 | } else if (reusable_page) { |
2600 | vm_page_stats_reusable.partial_reusable_calls++; |
2601 | } |
2602 | } |
2603 | |
2604 | void |
2605 | vm_object_reuse_pages( |
2606 | vm_object_t object, |
2607 | vm_object_offset_t start_offset, |
2608 | vm_object_offset_t end_offset, |
2609 | boolean_t allow_partial_reuse) |
2610 | { |
2611 | vm_object_offset_t cur_offset; |
2612 | vm_page_t m; |
2613 | unsigned int reused, reusable; |
2614 | |
2615 | #define VM_OBJECT_REUSE_PAGE(object, m, reused) \ |
2616 | MACRO_BEGIN \ |
2617 | if ((m) != VM_PAGE_NULL && \ |
2618 | (m)->vmp_reusable) { \ |
2619 | assert((object)->reusable_page_count <= \ |
2620 | (object)->resident_page_count); \ |
2621 | assert((object)->reusable_page_count > 0); \ |
2622 | (object)->reusable_page_count--; \ |
2623 | (m)->vmp_reusable = FALSE; \ |
2624 | (reused)++; \ |
2625 | /* \ |
2626 | * Tell pmap that this page is no longer \ |
2627 | * "reusable", to update the "reusable" stats \ |
2628 | * for all the pmaps that have mapped this \ |
2629 | * page. \ |
2630 | */ \ |
2631 | pmap_clear_refmod_options(VM_PAGE_GET_PHYS_PAGE((m)), \ |
2632 | 0, /* refmod */ \ |
2633 | (PMAP_OPTIONS_CLEAR_REUSABLE \ |
2634 | | PMAP_OPTIONS_NOFLUSH), \ |
2635 | NULL); \ |
2636 | } \ |
2637 | MACRO_END |
2638 | |
2639 | reused = 0; |
2640 | reusable = 0; |
2641 | |
2642 | vm_object_lock_assert_exclusive(object); |
2643 | |
2644 | if (object->all_reusable) { |
2645 | panic("object %p all_reusable: can't update pmap stats" , |
2646 | object); |
2647 | assert(object->reusable_page_count == 0); |
2648 | object->all_reusable = FALSE; |
2649 | if (end_offset - start_offset == object->vo_size || |
2650 | !allow_partial_reuse) { |
2651 | vm_page_stats_reusable.all_reuse_calls++; |
2652 | reused = object->resident_page_count; |
2653 | } else { |
2654 | vm_page_stats_reusable.partial_reuse_calls++; |
2655 | vm_page_queue_iterate(&object->memq, m, vmp_listq) { |
2656 | if (m->vmp_offset < start_offset || |
2657 | m->vmp_offset >= end_offset) { |
2658 | m->vmp_reusable = TRUE; |
2659 | object->reusable_page_count++; |
2660 | assert(object->resident_page_count >= object->reusable_page_count); |
2661 | continue; |
2662 | } else { |
2663 | assert(!m->vmp_reusable); |
2664 | reused++; |
2665 | } |
2666 | } |
2667 | } |
2668 | } else if (object->resident_page_count > |
2669 | ((end_offset - start_offset) >> PAGE_SHIFT)) { |
2670 | vm_page_stats_reusable.partial_reuse_calls++; |
2671 | for (cur_offset = start_offset; |
2672 | cur_offset < end_offset; |
2673 | cur_offset += PAGE_SIZE_64) { |
2674 | if (object->reusable_page_count == 0) { |
2675 | break; |
2676 | } |
2677 | m = vm_page_lookup(object, offset: cur_offset); |
2678 | VM_OBJECT_REUSE_PAGE(object, m, reused); |
2679 | } |
2680 | } else { |
2681 | vm_page_stats_reusable.partial_reuse_calls++; |
2682 | vm_page_queue_iterate(&object->memq, m, vmp_listq) { |
2683 | if (object->reusable_page_count == 0) { |
2684 | break; |
2685 | } |
2686 | if (m->vmp_offset < start_offset || |
2687 | m->vmp_offset >= end_offset) { |
2688 | continue; |
2689 | } |
2690 | VM_OBJECT_REUSE_PAGE(object, m, reused); |
2691 | } |
2692 | } |
2693 | |
2694 | /* update global stats */ |
2695 | OSAddAtomic(reusable - reused, &vm_page_stats_reusable.reusable_count); |
2696 | vm_page_stats_reusable.reused += reused; |
2697 | vm_page_stats_reusable.reusable += reusable; |
2698 | } |
2699 | |
2700 | /* |
2701 | * This function determines if the zero operation can be run on the |
2702 | * object. The checks on the entry have already been performed by |
2703 | * vm_map_zero_entry_preflight. |
2704 | */ |
2705 | static kern_return_t |
2706 | vm_object_zero_preflight( |
2707 | vm_object_t object, |
2708 | vm_object_offset_t start, |
2709 | vm_object_offset_t end) |
2710 | { |
2711 | /* |
2712 | * Zeroing is further restricted to anonymous memory. |
2713 | */ |
2714 | if (!object->internal) { |
2715 | return KERN_PROTECTION_FAILURE; |
2716 | } |
2717 | |
2718 | /* |
2719 | * Zeroing for copy on write isn't yet supported |
2720 | */ |
2721 | if (object->shadow != NULL || |
2722 | object->vo_copy != NULL) { |
2723 | return KERN_NO_ACCESS; |
2724 | } |
2725 | |
2726 | /* |
2727 | * Ensure the that bounds makes sense wrt the object |
2728 | */ |
2729 | if (end - start > object->vo_size) { |
2730 | return KERN_INVALID_ADDRESS; |
2731 | } |
2732 | |
2733 | return KERN_SUCCESS; |
2734 | } |
2735 | |
2736 | /* |
2737 | * This function looks up a page and waits if it is busy or being cleaned. |
2738 | * It returns false when the page found is busy and it needs to wait. Caller |
2739 | * of this function should restart the operation for the cur_offset when |
2740 | * this function returns false. |
2741 | */ |
2742 | static bool |
2743 | vm_object_lookup_page_wait_busy( |
2744 | vm_object_t object, |
2745 | vm_object_offset_t cur_offset, |
2746 | vm_page_t *page) |
2747 | { |
2748 | vm_page_t m; |
2749 | |
2750 | m = vm_page_lookup(object, offset: cur_offset); |
2751 | if ((m == VM_PAGE_NULL) || |
2752 | (!m->vmp_busy && !m->vmp_cleaning)) { |
2753 | *page = m; |
2754 | return true; |
2755 | } |
2756 | |
2757 | PAGE_SLEEP(object, m, THREAD_UNINT); |
2758 | return false; |
2759 | } |
2760 | |
2761 | static void |
2762 | vm_object_zero_page(vm_page_t m) |
2763 | { |
2764 | if (m != VM_PAGE_NULL) { |
2765 | ppnum_t phy_page_num = VM_PAGE_GET_PHYS_PAGE(m); |
2766 | |
2767 | /* |
2768 | * Skip fictitious guard pages |
2769 | */ |
2770 | if (m->vmp_fictitious) { |
2771 | assert(phy_page_num == vm_page_guard_addr); |
2772 | return; |
2773 | } |
2774 | pmap_zero_page(pn: phy_page_num); |
2775 | } |
2776 | } |
2777 | |
2778 | /* |
2779 | * This function iterates the range of pages specified in the object and |
2780 | * discards the ones that are compressed and zeroes the ones that are wired. |
2781 | * This function may drop the object lock while waiting for a page that is |
2782 | * busy and will restart the operation for the specific offset. |
2783 | */ |
2784 | kern_return_t |
2785 | vm_object_zero( |
2786 | vm_object_t object, |
2787 | vm_object_offset_t cur_offset, |
2788 | vm_object_offset_t end_offset) |
2789 | { |
2790 | kern_return_t ret; |
2791 | |
2792 | ret = vm_object_zero_preflight(object, start: cur_offset, end: end_offset); |
2793 | if (ret != KERN_SUCCESS) { |
2794 | return ret; |
2795 | } |
2796 | |
2797 | while (cur_offset < end_offset) { |
2798 | vm_page_t m; |
2799 | |
2800 | /* |
2801 | * If the compressor has the page then just discard it instead |
2802 | * of faulting it in and zeroing it else zero the page if it exists. If |
2803 | * we dropped the object lock during the lookup retry the lookup for the |
2804 | * cur_offset. |
2805 | */ |
2806 | if (page_is_paged_out(object, offset: cur_offset)) { |
2807 | VM_COMPRESSOR_PAGER_STATE_CLR(object, cur_offset); |
2808 | } else if (vm_object_lookup_page_wait_busy(object, cur_offset, page: &m)) { |
2809 | vm_object_zero_page(m); |
2810 | } else { |
2811 | /* |
2812 | * If we dropped the lock then relookup the cur_offset in the object |
2813 | */ |
2814 | ret = vm_object_zero_preflight(object, start: cur_offset, end: end_offset); |
2815 | if (ret != KERN_SUCCESS) { |
2816 | return ret; |
2817 | } |
2818 | continue; |
2819 | } |
2820 | cur_offset += PAGE_SIZE_64; |
2821 | /* |
2822 | * TODO: May need a vm_object_lock_yield_shared in this loop if it takes |
2823 | * too long, as holding the object lock for too long can stall pageout |
2824 | * scan (or other users of the object) |
2825 | */ |
2826 | } |
2827 | |
2828 | return KERN_SUCCESS; |
2829 | } |
2830 | |
2831 | /* |
2832 | * Routine: vm_object_pmap_protect |
2833 | * |
2834 | * Purpose: |
2835 | * Reduces the permission for all physical |
2836 | * pages in the specified object range. |
2837 | * |
2838 | * If removing write permission only, it is |
2839 | * sufficient to protect only the pages in |
2840 | * the top-level object; only those pages may |
2841 | * have write permission. |
2842 | * |
2843 | * If removing all access, we must follow the |
2844 | * shadow chain from the top-level object to |
2845 | * remove access to all pages in shadowed objects. |
2846 | * |
2847 | * The object must *not* be locked. The object must |
2848 | * be internal. |
2849 | * |
2850 | * If pmap is not NULL, this routine assumes that |
2851 | * the only mappings for the pages are in that |
2852 | * pmap. |
2853 | */ |
2854 | |
2855 | __private_extern__ void |
2856 | vm_object_pmap_protect( |
2857 | vm_object_t object, |
2858 | vm_object_offset_t offset, |
2859 | vm_object_size_t size, |
2860 | pmap_t pmap, |
2861 | vm_map_size_t pmap_page_size, |
2862 | vm_map_offset_t pmap_start, |
2863 | vm_prot_t prot) |
2864 | { |
2865 | vm_object_pmap_protect_options(object, offset, size, pmap, |
2866 | pmap_page_size, |
2867 | pmap_start, prot, options: 0); |
2868 | } |
2869 | |
2870 | __private_extern__ void |
2871 | vm_object_pmap_protect_options( |
2872 | vm_object_t object, |
2873 | vm_object_offset_t offset, |
2874 | vm_object_size_t size, |
2875 | pmap_t pmap, |
2876 | vm_map_size_t pmap_page_size, |
2877 | vm_map_offset_t pmap_start, |
2878 | vm_prot_t prot, |
2879 | int options) |
2880 | { |
2881 | pmap_flush_context pmap_flush_context_storage; |
2882 | boolean_t delayed_pmap_flush = FALSE; |
2883 | vm_object_offset_t offset_in_object; |
2884 | vm_object_size_t size_in_object; |
2885 | |
2886 | if (object == VM_OBJECT_NULL) { |
2887 | return; |
2888 | } |
2889 | if (pmap_page_size > PAGE_SIZE) { |
2890 | /* for 16K map on 4K device... */ |
2891 | pmap_page_size = PAGE_SIZE; |
2892 | } |
2893 | /* |
2894 | * If we decide to work on the object itself, extend the range to |
2895 | * cover a full number of native pages. |
2896 | */ |
2897 | size_in_object = vm_object_round_page(offset + size) - vm_object_trunc_page(offset); |
2898 | offset_in_object = vm_object_trunc_page(offset); |
2899 | /* |
2900 | * If we decide to work on the pmap, use the exact range specified, |
2901 | * so no rounding/truncating offset and size. They should already |
2902 | * be aligned to pmap_page_size. |
2903 | */ |
2904 | assertf(!(offset & (pmap_page_size - 1)) && !(size & (pmap_page_size - 1)), |
2905 | "offset 0x%llx size 0x%llx pmap_page_size 0x%llx" , |
2906 | offset, size, (uint64_t)pmap_page_size); |
2907 | |
2908 | vm_object_lock(object); |
2909 | |
2910 | if (object->phys_contiguous) { |
2911 | if (pmap != NULL) { |
2912 | vm_object_unlock(object); |
2913 | pmap_protect_options(map: pmap, |
2914 | s: pmap_start, |
2915 | e: pmap_start + size, |
2916 | prot, |
2917 | options: options & ~PMAP_OPTIONS_NOFLUSH, |
2918 | NULL); |
2919 | } else { |
2920 | vm_object_offset_t phys_start, phys_end, phys_addr; |
2921 | |
2922 | phys_start = object->vo_shadow_offset + offset_in_object; |
2923 | phys_end = phys_start + size_in_object; |
2924 | assert(phys_start <= phys_end); |
2925 | assert(phys_end <= object->vo_shadow_offset + object->vo_size); |
2926 | vm_object_unlock(object); |
2927 | |
2928 | pmap_flush_context_init(&pmap_flush_context_storage); |
2929 | delayed_pmap_flush = FALSE; |
2930 | |
2931 | for (phys_addr = phys_start; |
2932 | phys_addr < phys_end; |
2933 | phys_addr += PAGE_SIZE_64) { |
2934 | pmap_page_protect_options( |
2935 | phys: (ppnum_t) (phys_addr >> PAGE_SHIFT), |
2936 | prot, |
2937 | options: options | PMAP_OPTIONS_NOFLUSH, |
2938 | arg: (void *)&pmap_flush_context_storage); |
2939 | delayed_pmap_flush = TRUE; |
2940 | } |
2941 | if (delayed_pmap_flush == TRUE) { |
2942 | pmap_flush(&pmap_flush_context_storage); |
2943 | } |
2944 | } |
2945 | return; |
2946 | } |
2947 | |
2948 | assert(object->internal); |
2949 | |
2950 | while (TRUE) { |
2951 | if (ptoa_64(object->resident_page_count) > size_in_object / 2 && pmap != PMAP_NULL) { |
2952 | vm_object_unlock(object); |
2953 | if (pmap_page_size < PAGE_SIZE) { |
2954 | DEBUG4K_PMAP("pmap %p start 0x%llx end 0x%llx prot 0x%x: pmap_protect()\n" , pmap, (uint64_t)pmap_start, pmap_start + size, prot); |
2955 | } |
2956 | pmap_protect_options(map: pmap, s: pmap_start, e: pmap_start + size, prot, |
2957 | options: options & ~PMAP_OPTIONS_NOFLUSH, NULL); |
2958 | return; |
2959 | } |
2960 | |
2961 | if (pmap_page_size < PAGE_SIZE) { |
2962 | DEBUG4K_PMAP("pmap %p start 0x%llx end 0x%llx prot 0x%x: offset 0x%llx size 0x%llx object %p offset 0x%llx size 0x%llx\n" , pmap, (uint64_t)pmap_start, pmap_start + size, prot, offset, size, object, offset_in_object, size_in_object); |
2963 | } |
2964 | |
2965 | pmap_flush_context_init(&pmap_flush_context_storage); |
2966 | delayed_pmap_flush = FALSE; |
2967 | |
2968 | /* |
2969 | * if we are doing large ranges with respect to resident |
2970 | * page count then we should interate over pages otherwise |
2971 | * inverse page look-up will be faster |
2972 | */ |
2973 | if (ptoa_64(object->resident_page_count / 4) < size_in_object) { |
2974 | vm_page_t p; |
2975 | vm_object_offset_t end; |
2976 | |
2977 | end = offset_in_object + size_in_object; |
2978 | |
2979 | vm_page_queue_iterate(&object->memq, p, vmp_listq) { |
2980 | if (!p->vmp_fictitious && (offset_in_object <= p->vmp_offset) && (p->vmp_offset < end)) { |
2981 | vm_map_offset_t start; |
2982 | |
2983 | /* |
2984 | * XXX FBDP 4K: intentionally using "offset" here instead |
2985 | * of "offset_in_object", since "start" is a pmap address. |
2986 | */ |
2987 | start = pmap_start + p->vmp_offset - offset; |
2988 | |
2989 | if (pmap != PMAP_NULL) { |
2990 | vm_map_offset_t curr; |
2991 | for (curr = start; |
2992 | curr < start + PAGE_SIZE_64; |
2993 | curr += pmap_page_size) { |
2994 | if (curr < pmap_start) { |
2995 | continue; |
2996 | } |
2997 | if (curr >= pmap_start + size) { |
2998 | break; |
2999 | } |
3000 | pmap_protect_options( |
3001 | map: pmap, |
3002 | s: curr, |
3003 | e: curr + pmap_page_size, |
3004 | prot, |
3005 | options: options | PMAP_OPTIONS_NOFLUSH, |
3006 | arg: &pmap_flush_context_storage); |
3007 | } |
3008 | } else { |
3009 | pmap_page_protect_options( |
3010 | phys: VM_PAGE_GET_PHYS_PAGE(m: p), |
3011 | prot, |
3012 | options: options | PMAP_OPTIONS_NOFLUSH, |
3013 | arg: &pmap_flush_context_storage); |
3014 | } |
3015 | delayed_pmap_flush = TRUE; |
3016 | } |
3017 | } |
3018 | } else { |
3019 | vm_page_t p; |
3020 | vm_object_offset_t end; |
3021 | vm_object_offset_t target_off; |
3022 | |
3023 | end = offset_in_object + size_in_object; |
3024 | |
3025 | for (target_off = offset_in_object; |
3026 | target_off < end; target_off += PAGE_SIZE) { |
3027 | p = vm_page_lookup(object, offset: target_off); |
3028 | |
3029 | if (p != VM_PAGE_NULL) { |
3030 | vm_object_offset_t start; |
3031 | |
3032 | /* |
3033 | * XXX FBDP 4K: intentionally using "offset" here instead |
3034 | * of "offset_in_object", since "start" is a pmap address. |
3035 | */ |
3036 | start = pmap_start + (p->vmp_offset - offset); |
3037 | |
3038 | if (pmap != PMAP_NULL) { |
3039 | vm_map_offset_t curr; |
3040 | for (curr = start; |
3041 | curr < start + PAGE_SIZE; |
3042 | curr += pmap_page_size) { |
3043 | if (curr < pmap_start) { |
3044 | continue; |
3045 | } |
3046 | if (curr >= pmap_start + size) { |
3047 | break; |
3048 | } |
3049 | pmap_protect_options( |
3050 | map: pmap, |
3051 | s: curr, |
3052 | e: curr + pmap_page_size, |
3053 | prot, |
3054 | options: options | PMAP_OPTIONS_NOFLUSH, |
3055 | arg: &pmap_flush_context_storage); |
3056 | } |
3057 | } else { |
3058 | pmap_page_protect_options( |
3059 | phys: VM_PAGE_GET_PHYS_PAGE(m: p), |
3060 | prot, |
3061 | options: options | PMAP_OPTIONS_NOFLUSH, |
3062 | arg: &pmap_flush_context_storage); |
3063 | } |
3064 | delayed_pmap_flush = TRUE; |
3065 | } |
3066 | } |
3067 | } |
3068 | if (delayed_pmap_flush == TRUE) { |
3069 | pmap_flush(&pmap_flush_context_storage); |
3070 | } |
3071 | |
3072 | if (prot == VM_PROT_NONE) { |
3073 | /* |
3074 | * Must follow shadow chain to remove access |
3075 | * to pages in shadowed objects. |
3076 | */ |
3077 | vm_object_t next_object; |
3078 | |
3079 | next_object = object->shadow; |
3080 | if (next_object != VM_OBJECT_NULL) { |
3081 | offset_in_object += object->vo_shadow_offset; |
3082 | offset += object->vo_shadow_offset; |
3083 | vm_object_lock(next_object); |
3084 | vm_object_unlock(object); |
3085 | object = next_object; |
3086 | } else { |
3087 | /* |
3088 | * End of chain - we are done. |
3089 | */ |
3090 | break; |
3091 | } |
3092 | } else { |
3093 | /* |
3094 | * Pages in shadowed objects may never have |
3095 | * write permission - we may stop here. |
3096 | */ |
3097 | break; |
3098 | } |
3099 | } |
3100 | |
3101 | vm_object_unlock(object); |
3102 | } |
3103 | |
3104 | uint32_t vm_page_busy_absent_skipped = 0; |
3105 | |
3106 | /* |
3107 | * Routine: vm_object_copy_slowly |
3108 | * |
3109 | * Description: |
3110 | * Copy the specified range of the source |
3111 | * virtual memory object without using |
3112 | * protection-based optimizations (such |
3113 | * as copy-on-write). The pages in the |
3114 | * region are actually copied. |
3115 | * |
3116 | * In/out conditions: |
3117 | * The caller must hold a reference and a lock |
3118 | * for the source virtual memory object. The source |
3119 | * object will be returned *unlocked*. |
3120 | * |
3121 | * Results: |
3122 | * If the copy is completed successfully, KERN_SUCCESS is |
3123 | * returned. If the caller asserted the interruptible |
3124 | * argument, and an interruption occurred while waiting |
3125 | * for a user-generated event, MACH_SEND_INTERRUPTED is |
3126 | * returned. Other values may be returned to indicate |
3127 | * hard errors during the copy operation. |
3128 | * |
3129 | * A new virtual memory object is returned in a |
3130 | * parameter (_result_object). The contents of this |
3131 | * new object, starting at a zero offset, are a copy |
3132 | * of the source memory region. In the event of |
3133 | * an error, this parameter will contain the value |
3134 | * VM_OBJECT_NULL. |
3135 | */ |
3136 | __private_extern__ kern_return_t |
3137 | vm_object_copy_slowly( |
3138 | vm_object_t src_object, |
3139 | vm_object_offset_t src_offset, |
3140 | vm_object_size_t size, |
3141 | boolean_t interruptible, |
3142 | vm_object_t *_result_object) /* OUT */ |
3143 | { |
3144 | vm_object_t new_object; |
3145 | vm_object_offset_t new_offset; |
3146 | |
3147 | struct vm_object_fault_info fault_info = {}; |
3148 | |
3149 | if (size == 0) { |
3150 | vm_object_unlock(src_object); |
3151 | *_result_object = VM_OBJECT_NULL; |
3152 | return KERN_INVALID_ARGUMENT; |
3153 | } |
3154 | |
3155 | /* |
3156 | * Prevent destruction of the source object while we copy. |
3157 | */ |
3158 | |
3159 | vm_object_reference_locked(src_object); |
3160 | vm_object_unlock(src_object); |
3161 | |
3162 | /* |
3163 | * Create a new object to hold the copied pages. |
3164 | * A few notes: |
3165 | * We fill the new object starting at offset 0, |
3166 | * regardless of the input offset. |
3167 | * We don't bother to lock the new object within |
3168 | * this routine, since we have the only reference. |
3169 | */ |
3170 | |
3171 | size = vm_object_round_page(src_offset + size) - vm_object_trunc_page(src_offset); |
3172 | src_offset = vm_object_trunc_page(src_offset); |
3173 | new_object = vm_object_allocate(size); |
3174 | new_offset = 0; |
3175 | |
3176 | assert(size == trunc_page_64(size)); /* Will the loop terminate? */ |
3177 | |
3178 | fault_info.interruptible = interruptible; |
3179 | fault_info.behavior = VM_BEHAVIOR_SEQUENTIAL; |
3180 | fault_info.lo_offset = src_offset; |
3181 | fault_info.hi_offset = src_offset + size; |
3182 | fault_info.stealth = TRUE; |
3183 | |
3184 | for (; |
3185 | size != 0; |
3186 | src_offset += PAGE_SIZE_64, |
3187 | new_offset += PAGE_SIZE_64, size -= PAGE_SIZE_64 |
3188 | ) { |
3189 | vm_page_t new_page; |
3190 | vm_fault_return_t result; |
3191 | |
3192 | vm_object_lock(new_object); |
3193 | |
3194 | while ((new_page = vm_page_alloc(object: new_object, offset: new_offset)) |
3195 | == VM_PAGE_NULL) { |
3196 | vm_object_unlock(new_object); |
3197 | |
3198 | if (!vm_page_wait(interruptible)) { |
3199 | vm_object_deallocate(object: new_object); |
3200 | vm_object_deallocate(object: src_object); |
3201 | *_result_object = VM_OBJECT_NULL; |
3202 | return MACH_SEND_INTERRUPTED; |
3203 | } |
3204 | vm_object_lock(new_object); |
3205 | } |
3206 | vm_object_unlock(new_object); |
3207 | |
3208 | do { |
3209 | vm_prot_t prot = VM_PROT_READ; |
3210 | vm_page_t _result_page; |
3211 | vm_page_t top_page; |
3212 | vm_page_t result_page; |
3213 | kern_return_t error_code; |
3214 | vm_object_t result_page_object; |
3215 | |
3216 | |
3217 | vm_object_lock(src_object); |
3218 | |
3219 | if (src_object->internal && |
3220 | src_object->shadow == VM_OBJECT_NULL && |
3221 | (src_object->pager == NULL || |
3222 | (VM_COMPRESSOR_PAGER_STATE_GET(src_object, |
3223 | src_offset) == |
3224 | VM_EXTERNAL_STATE_ABSENT))) { |
3225 | boolean_t can_skip_page; |
3226 | |
3227 | _result_page = vm_page_lookup(object: src_object, |
3228 | offset: src_offset); |
3229 | if (_result_page == VM_PAGE_NULL) { |
3230 | /* |
3231 | * This page is neither resident nor |
3232 | * compressed and there's no shadow |
3233 | * object below "src_object", so this |
3234 | * page is really missing. |
3235 | * There's no need to zero-fill it just |
3236 | * to copy it: let's leave it missing |
3237 | * in "new_object" and get zero-filled |
3238 | * on demand. |
3239 | */ |
3240 | can_skip_page = TRUE; |
3241 | } else if (workaround_41447923 && |
3242 | src_object->pager == NULL && |
3243 | _result_page != VM_PAGE_NULL && |
3244 | _result_page->vmp_busy && |
3245 | _result_page->vmp_absent && |
3246 | src_object->purgable == VM_PURGABLE_DENY && |
3247 | !src_object->blocked_access) { |
3248 | /* |
3249 | * This page is "busy" and "absent" |
3250 | * but not because we're waiting for |
3251 | * it to be decompressed. It must |
3252 | * be because it's a "no zero fill" |
3253 | * page that is currently not |
3254 | * accessible until it gets overwritten |
3255 | * by a device driver. |
3256 | * Since its initial state would have |
3257 | * been "zero-filled", let's leave the |
3258 | * copy page missing and get zero-filled |
3259 | * on demand. |
3260 | */ |
3261 | assert(src_object->internal); |
3262 | assert(src_object->shadow == NULL); |
3263 | assert(src_object->pager == NULL); |
3264 | can_skip_page = TRUE; |
3265 | vm_page_busy_absent_skipped++; |
3266 | } else { |
3267 | can_skip_page = FALSE; |
3268 | } |
3269 | if (can_skip_page) { |
3270 | vm_object_unlock(src_object); |
3271 | /* free the unused "new_page"... */ |
3272 | vm_object_lock(new_object); |
3273 | VM_PAGE_FREE(new_page); |
3274 | new_page = VM_PAGE_NULL; |
3275 | vm_object_unlock(new_object); |
3276 | /* ...and go to next page in "src_object" */ |
3277 | result = VM_FAULT_SUCCESS; |
3278 | break; |
3279 | } |
3280 | } |
3281 | |
3282 | vm_object_paging_begin(src_object); |
3283 | |
3284 | /* cap size at maximum UPL size */ |
3285 | upl_size_t cluster_size; |
3286 | if (os_convert_overflow(size, &cluster_size)) { |
3287 | cluster_size = 0 - (upl_size_t)PAGE_SIZE; |
3288 | } |
3289 | fault_info.cluster_size = cluster_size; |
3290 | |
3291 | _result_page = VM_PAGE_NULL; |
3292 | result = vm_fault_page(first_object: src_object, first_offset: src_offset, |
3293 | VM_PROT_READ, FALSE, |
3294 | FALSE, /* page not looked up */ |
3295 | protection: &prot, result_page: &_result_page, top_page: &top_page, |
3296 | type_of_fault: (int *)0, |
3297 | error_code: &error_code, FALSE, fault_info: &fault_info); |
3298 | |
3299 | switch (result) { |
3300 | case VM_FAULT_SUCCESS: |
3301 | result_page = _result_page; |
3302 | result_page_object = VM_PAGE_OBJECT(result_page); |
3303 | |
3304 | /* |
3305 | * Copy the page to the new object. |
3306 | * |
3307 | * POLICY DECISION: |
3308 | * If result_page is clean, |
3309 | * we could steal it instead |
3310 | * of copying. |
3311 | */ |
3312 | |
3313 | vm_page_copy(src_page: result_page, dest_page: new_page); |
3314 | vm_object_unlock(result_page_object); |
3315 | |
3316 | /* |
3317 | * Let go of both pages (make them |
3318 | * not busy, perform wakeup, activate). |
3319 | */ |
3320 | vm_object_lock(new_object); |
3321 | SET_PAGE_DIRTY(new_page, FALSE); |
3322 | PAGE_WAKEUP_DONE(new_page); |
3323 | vm_object_unlock(new_object); |
3324 | |
3325 | vm_object_lock(result_page_object); |
3326 | PAGE_WAKEUP_DONE(result_page); |
3327 | |
3328 | vm_page_lockspin_queues(); |
3329 | if ((result_page->vmp_q_state == VM_PAGE_ON_SPECULATIVE_Q) || |
3330 | (result_page->vmp_q_state == VM_PAGE_NOT_ON_Q)) { |
3331 | vm_page_activate(page: result_page); |
3332 | } |
3333 | vm_page_activate(page: new_page); |
3334 | vm_page_unlock_queues(); |
3335 | |
3336 | /* |
3337 | * Release paging references and |
3338 | * top-level placeholder page, if any. |
3339 | */ |
3340 | |
3341 | vm_fault_cleanup(object: result_page_object, |
3342 | top_page); |
3343 | |
3344 | break; |
3345 | |
3346 | case VM_FAULT_RETRY: |
3347 | break; |
3348 | |
3349 | case VM_FAULT_MEMORY_SHORTAGE: |
3350 | if (vm_page_wait(interruptible)) { |
3351 | break; |
3352 | } |
3353 | ktriage_record(thread_id: thread_tid(thread: current_thread()), KDBG_TRIAGE_EVENTID(KDBG_TRIAGE_SUBSYS_VM, KDBG_TRIAGE_RESERVED, KDBG_TRIAGE_VM_FAULT_OBJCOPYSLOWLY_MEMORY_SHORTAGE), arg: 0 /* arg */); |
3354 | OS_FALLTHROUGH; |
3355 | |
3356 | case VM_FAULT_INTERRUPTED: |
3357 | vm_object_lock(new_object); |
3358 | VM_PAGE_FREE(new_page); |
3359 | vm_object_unlock(new_object); |
3360 | |
3361 | vm_object_deallocate(object: new_object); |
3362 | vm_object_deallocate(object: src_object); |
3363 | *_result_object = VM_OBJECT_NULL; |
3364 | return MACH_SEND_INTERRUPTED; |
3365 | |
3366 | case VM_FAULT_SUCCESS_NO_VM_PAGE: |
3367 | /* success but no VM page: fail */ |
3368 | vm_object_paging_end(src_object); |
3369 | vm_object_unlock(src_object); |
3370 | OS_FALLTHROUGH; |
3371 | case VM_FAULT_MEMORY_ERROR: |
3372 | /* |
3373 | * A policy choice: |
3374 | * (a) ignore pages that we can't |
3375 | * copy |
3376 | * (b) return the null object if |
3377 | * any page fails [chosen] |
3378 | */ |
3379 | |
3380 | vm_object_lock(new_object); |
3381 | VM_PAGE_FREE(new_page); |
3382 | vm_object_unlock(new_object); |
3383 | |
3384 | vm_object_deallocate(object: new_object); |
3385 | vm_object_deallocate(object: src_object); |
3386 | *_result_object = VM_OBJECT_NULL; |
3387 | return error_code ? error_code: |
3388 | KERN_MEMORY_ERROR; |
3389 | |
3390 | default: |
3391 | panic("vm_object_copy_slowly: unexpected error" |
3392 | " 0x%x from vm_fault_page()\n" , result); |
3393 | } |
3394 | } while (result != VM_FAULT_SUCCESS); |
3395 | } |
3396 | |
3397 | /* |
3398 | * Lose the extra reference, and return our object. |
3399 | */ |
3400 | vm_object_deallocate(object: src_object); |
3401 | *_result_object = new_object; |
3402 | return KERN_SUCCESS; |
3403 | } |
3404 | |
3405 | /* |
3406 | * Routine: vm_object_copy_quickly |
3407 | * |
3408 | * Purpose: |
3409 | * Copy the specified range of the source virtual |
3410 | * memory object, if it can be done without waiting |
3411 | * for user-generated events. |
3412 | * |
3413 | * Results: |
3414 | * If the copy is successful, the copy is returned in |
3415 | * the arguments; otherwise, the arguments are not |
3416 | * affected. |
3417 | * |
3418 | * In/out conditions: |
3419 | * The object should be unlocked on entry and exit. |
3420 | */ |
3421 | |
3422 | /*ARGSUSED*/ |
3423 | __private_extern__ boolean_t |
3424 | vm_object_copy_quickly( |
3425 | vm_object_t object, /* IN */ |
3426 | __unused vm_object_offset_t offset, /* IN */ |
3427 | __unused vm_object_size_t size, /* IN */ |
3428 | boolean_t *_src_needs_copy, /* OUT */ |
3429 | boolean_t *_dst_needs_copy) /* OUT */ |
3430 | { |
3431 | memory_object_copy_strategy_t copy_strategy; |
3432 | |
3433 | if (object == VM_OBJECT_NULL) { |
3434 | *_src_needs_copy = FALSE; |
3435 | *_dst_needs_copy = FALSE; |
3436 | return TRUE; |
3437 | } |
3438 | |
3439 | vm_object_lock(object); |
3440 | |
3441 | copy_strategy = object->copy_strategy; |
3442 | |
3443 | switch (copy_strategy) { |
3444 | case MEMORY_OBJECT_COPY_SYMMETRIC: |
3445 | |
3446 | /* |
3447 | * Symmetric copy strategy. |
3448 | * Make another reference to the object. |
3449 | * Leave object/offset unchanged. |
3450 | */ |
3451 | |
3452 | vm_object_reference_locked(object); |
3453 | VM_OBJECT_SET_SHADOWED(object, TRUE); |
3454 | vm_object_unlock(object); |
3455 | |
3456 | /* |
3457 | * Both source and destination must make |
3458 | * shadows, and the source must be made |
3459 | * read-only if not already. |
3460 | */ |
3461 | |
3462 | *_src_needs_copy = TRUE; |
3463 | *_dst_needs_copy = TRUE; |
3464 | |
3465 | break; |
3466 | |
3467 | case MEMORY_OBJECT_COPY_DELAY: |
3468 | vm_object_unlock(object); |
3469 | return FALSE; |
3470 | |
3471 | default: |
3472 | vm_object_unlock(object); |
3473 | return FALSE; |
3474 | } |
3475 | return TRUE; |
3476 | } |
3477 | |
3478 | static uint32_t copy_delayed_lock_collisions; |
3479 | static uint32_t copy_delayed_max_collisions; |
3480 | static uint32_t copy_delayed_lock_contention; |
3481 | static uint32_t copy_delayed_protect_iterate; |
3482 | |
3483 | /* |
3484 | * Routine: vm_object_copy_delayed [internal] |
3485 | * |
3486 | * Description: |
3487 | * Copy the specified virtual memory object, using |
3488 | * the asymmetric copy-on-write algorithm. |
3489 | * |
3490 | * In/out conditions: |
3491 | * The src_object must be locked on entry. It will be unlocked |
3492 | * on exit - so the caller must also hold a reference to it. |
3493 | * |
3494 | * This routine will not block waiting for user-generated |
3495 | * events. It is not interruptible. |
3496 | */ |
3497 | __private_extern__ vm_object_t |
3498 | vm_object_copy_delayed( |
3499 | vm_object_t src_object, |
3500 | vm_object_offset_t src_offset, |
3501 | vm_object_size_t size, |
3502 | boolean_t src_object_shared) |
3503 | { |
3504 | vm_object_t new_copy = VM_OBJECT_NULL; |
3505 | vm_object_t old_copy; |
3506 | vm_page_t p; |
3507 | vm_object_size_t copy_size = src_offset + size; |
3508 | pmap_flush_context pmap_flush_context_storage; |
3509 | boolean_t delayed_pmap_flush = FALSE; |
3510 | |
3511 | |
3512 | uint32_t collisions = 0; |
3513 | /* |
3514 | * The user-level memory manager wants to see all of the changes |
3515 | * to this object, but it has promised not to make any changes on |
3516 | * its own. |
3517 | * |
3518 | * Perform an asymmetric copy-on-write, as follows: |
3519 | * Create a new object, called a "copy object" to hold |
3520 | * pages modified by the new mapping (i.e., the copy, |
3521 | * not the original mapping). |
3522 | * Record the original object as the backing object for |
3523 | * the copy object. If the original mapping does not |
3524 | * change a page, it may be used read-only by the copy. |
3525 | * Record the copy object in the original object. |
3526 | * When the original mapping causes a page to be modified, |
3527 | * it must be copied to a new page that is "pushed" to |
3528 | * the copy object. |
3529 | * Mark the new mapping (the copy object) copy-on-write. |
3530 | * This makes the copy object itself read-only, allowing |
3531 | * it to be reused if the original mapping makes no |
3532 | * changes, and simplifying the synchronization required |
3533 | * in the "push" operation described above. |
3534 | * |
3535 | * The copy-on-write is said to be assymetric because the original |
3536 | * object is *not* marked copy-on-write. A copied page is pushed |
3537 | * to the copy object, regardless which party attempted to modify |
3538 | * the page. |
3539 | * |
3540 | * Repeated asymmetric copy operations may be done. If the |
3541 | * original object has not been changed since the last copy, its |
3542 | * copy object can be reused. Otherwise, a new copy object can be |
3543 | * inserted between the original object and its previous copy |
3544 | * object. Since any copy object is read-only, this cannot affect |
3545 | * affect the contents of the previous copy object. |
3546 | * |
3547 | * Note that a copy object is higher in the object tree than the |
3548 | * original object; therefore, use of the copy object recorded in |
3549 | * the original object must be done carefully, to avoid deadlock. |
3550 | */ |
3551 | |
3552 | copy_size = vm_object_round_page(copy_size); |
3553 | Retry: |
3554 | |
3555 | /* |
3556 | * Wait for paging in progress. |
3557 | */ |
3558 | if (!src_object->true_share && |
3559 | (src_object->paging_in_progress != 0 || |
3560 | src_object->activity_in_progress != 0)) { |
3561 | if (src_object_shared == TRUE) { |
3562 | vm_object_unlock(src_object); |
3563 | vm_object_lock(src_object); |
3564 | src_object_shared = FALSE; |
3565 | goto Retry; |
3566 | } |
3567 | vm_object_paging_wait(src_object, THREAD_UNINT); |
3568 | } |
3569 | /* |
3570 | * See whether we can reuse the result of a previous |
3571 | * copy operation. |
3572 | */ |
3573 | |
3574 | old_copy = src_object->vo_copy; |
3575 | if (old_copy != VM_OBJECT_NULL) { |
3576 | int lock_granted; |
3577 | |
3578 | /* |
3579 | * Try to get the locks (out of order) |
3580 | */ |
3581 | if (src_object_shared == TRUE) { |
3582 | lock_granted = vm_object_lock_try_shared(old_copy); |
3583 | } else { |
3584 | lock_granted = vm_object_lock_try(old_copy); |
3585 | } |
3586 | |
3587 | if (!lock_granted) { |
3588 | vm_object_unlock(src_object); |
3589 | |
3590 | if (collisions++ == 0) { |
3591 | copy_delayed_lock_contention++; |
3592 | } |
3593 | mutex_pause(collisions); |
3594 | |
3595 | /* Heisenberg Rules */ |
3596 | copy_delayed_lock_collisions++; |
3597 | |
3598 | if (collisions > copy_delayed_max_collisions) { |
3599 | copy_delayed_max_collisions = collisions; |
3600 | } |
3601 | |
3602 | if (src_object_shared == TRUE) { |
3603 | vm_object_lock_shared(src_object); |
3604 | } else { |
3605 | vm_object_lock(src_object); |
3606 | } |
3607 | |
3608 | goto Retry; |
3609 | } |
3610 | |
3611 | /* |
3612 | * Determine whether the old copy object has |
3613 | * been modified. |
3614 | */ |
3615 | |
3616 | if (old_copy->resident_page_count == 0 && |
3617 | !old_copy->pager_created) { |
3618 | /* |
3619 | * It has not been modified. |
3620 | * |
3621 | * Return another reference to |
3622 | * the existing copy-object if |
3623 | * we can safely grow it (if |
3624 | * needed). |
3625 | */ |
3626 | |
3627 | if (old_copy->vo_size < copy_size) { |
3628 | if (src_object_shared == TRUE) { |
3629 | vm_object_unlock(old_copy); |
3630 | vm_object_unlock(src_object); |
3631 | |
3632 | vm_object_lock(src_object); |
3633 | src_object_shared = FALSE; |
3634 | goto Retry; |
3635 | } |
3636 | /* |
3637 | * We can't perform a delayed copy if any of the |
3638 | * pages in the extended range are wired (because |
3639 | * we can't safely take write permission away from |
3640 | * wired pages). If the pages aren't wired, then |
3641 | * go ahead and protect them. |
3642 | */ |
3643 | copy_delayed_protect_iterate++; |
3644 | |
3645 | pmap_flush_context_init(&pmap_flush_context_storage); |
3646 | delayed_pmap_flush = FALSE; |
3647 | |
3648 | vm_page_queue_iterate(&src_object->memq, p, vmp_listq) { |
3649 | if (!p->vmp_fictitious && |
3650 | p->vmp_offset >= old_copy->vo_size && |
3651 | p->vmp_offset < copy_size) { |
3652 | if (VM_PAGE_WIRED(p)) { |
3653 | vm_object_unlock(old_copy); |
3654 | vm_object_unlock(src_object); |
3655 | |
3656 | if (new_copy != VM_OBJECT_NULL) { |
3657 | vm_object_unlock(new_copy); |
3658 | vm_object_deallocate(object: new_copy); |
3659 | } |
3660 | if (delayed_pmap_flush == TRUE) { |
3661 | pmap_flush(&pmap_flush_context_storage); |
3662 | } |
3663 | |
3664 | return VM_OBJECT_NULL; |
3665 | } else { |
3666 | pmap_page_protect_options(phys: VM_PAGE_GET_PHYS_PAGE(m: p), |
3667 | prot: (p->vmp_xpmapped ? (VM_PROT_READ | VM_PROT_EXECUTE) : VM_PROT_READ), |
3668 | PMAP_OPTIONS_NOFLUSH, arg: (void *)&pmap_flush_context_storage); |
3669 | delayed_pmap_flush = TRUE; |
3670 | } |
3671 | } |
3672 | } |
3673 | if (delayed_pmap_flush == TRUE) { |
3674 | pmap_flush(&pmap_flush_context_storage); |
3675 | } |
3676 | |
3677 | assertf(page_aligned(copy_size), |
3678 | "object %p size 0x%llx" , |
3679 | old_copy, (uint64_t)copy_size); |
3680 | old_copy->vo_size = copy_size; |
3681 | |
3682 | /* |
3683 | * src_object's "vo_copy" object now covers |
3684 | * a larger portion of src_object. |
3685 | * Increment src_object's "vo_copy_version" |
3686 | * to make any racing vm_fault() on |
3687 | * "src_object" re-check if it needs to honor |
3688 | * any new copy-on-write obligation. |
3689 | */ |
3690 | src_object->vo_copy_version++; |
3691 | } |
3692 | if (src_object_shared == TRUE) { |
3693 | vm_object_reference_shared(old_copy); |
3694 | } else { |
3695 | vm_object_reference_locked(old_copy); |
3696 | } |
3697 | vm_object_unlock(old_copy); |
3698 | vm_object_unlock(src_object); |
3699 | |
3700 | if (new_copy != VM_OBJECT_NULL) { |
3701 | vm_object_unlock(new_copy); |
3702 | vm_object_deallocate(object: new_copy); |
3703 | } |
3704 | return old_copy; |
3705 | } |
3706 | |
3707 | |
3708 | |
3709 | /* |
3710 | * Adjust the size argument so that the newly-created |
3711 | * copy object will be large enough to back either the |
3712 | * old copy object or the new mapping. |
3713 | */ |
3714 | if (old_copy->vo_size > copy_size) { |
3715 | copy_size = old_copy->vo_size; |
3716 | } |
3717 | |
3718 | if (new_copy == VM_OBJECT_NULL) { |
3719 | vm_object_unlock(old_copy); |
3720 | vm_object_unlock(src_object); |
3721 | new_copy = vm_object_allocate(size: copy_size); |
3722 | vm_object_lock(src_object); |
3723 | vm_object_lock(new_copy); |
3724 | |
3725 | src_object_shared = FALSE; |
3726 | goto Retry; |
3727 | } |
3728 | assertf(page_aligned(copy_size), |
3729 | "object %p size 0x%llx" , |
3730 | new_copy, (uint64_t)copy_size); |
3731 | new_copy->vo_size = copy_size; |
3732 | |
3733 | /* |
3734 | * The copy-object is always made large enough to |
3735 | * completely shadow the original object, since |
3736 | * it may have several users who want to shadow |
3737 | * the original object at different points. |
3738 | */ |
3739 | |
3740 | assert((old_copy->shadow == src_object) && |
3741 | (old_copy->vo_shadow_offset == (vm_object_offset_t) 0)); |
3742 | } else if (new_copy == VM_OBJECT_NULL) { |
3743 | vm_object_unlock(src_object); |
3744 | new_copy = vm_object_allocate(size: copy_size); |
3745 | vm_object_lock(src_object); |
3746 | vm_object_lock(new_copy); |
3747 | |
3748 | src_object_shared = FALSE; |
3749 | goto Retry; |
3750 | } |
3751 | |
3752 | /* |
3753 | * We now have the src object locked, and the new copy object |
3754 | * allocated and locked (and potentially the old copy locked). |
3755 | * Before we go any further, make sure we can still perform |
3756 | * a delayed copy, as the situation may have changed. |
3757 | * |
3758 | * Specifically, we can't perform a delayed copy if any of the |
3759 | * pages in the range are wired (because we can't safely take |
3760 | * write permission away from wired pages). If the pages aren't |
3761 | * wired, then go ahead and protect them. |
3762 | */ |
3763 | copy_delayed_protect_iterate++; |
3764 | |
3765 | pmap_flush_context_init(&pmap_flush_context_storage); |
3766 | delayed_pmap_flush = FALSE; |
3767 | |
3768 | vm_page_queue_iterate(&src_object->memq, p, vmp_listq) { |
3769 | if (!p->vmp_fictitious && p->vmp_offset < copy_size) { |
3770 | if (VM_PAGE_WIRED(p)) { |
3771 | if (old_copy) { |
3772 | vm_object_unlock(old_copy); |
3773 | } |
3774 | vm_object_unlock(src_object); |
3775 | vm_object_unlock(new_copy); |
3776 | vm_object_deallocate(object: new_copy); |
3777 | |
3778 | if (delayed_pmap_flush == TRUE) { |
3779 | pmap_flush(&pmap_flush_context_storage); |
3780 | } |
3781 | |
3782 | return VM_OBJECT_NULL; |
3783 | } else { |
3784 | pmap_page_protect_options(phys: VM_PAGE_GET_PHYS_PAGE(m: p), |
3785 | prot: (p->vmp_xpmapped ? (VM_PROT_READ | VM_PROT_EXECUTE) : VM_PROT_READ), |
3786 | PMAP_OPTIONS_NOFLUSH, arg: (void *)&pmap_flush_context_storage); |
3787 | delayed_pmap_flush = TRUE; |
3788 | } |
3789 | } |
3790 | } |
3791 | if (delayed_pmap_flush == TRUE) { |
3792 | pmap_flush(&pmap_flush_context_storage); |
3793 | } |
3794 | |
3795 | if (old_copy != VM_OBJECT_NULL) { |
3796 | /* |
3797 | * Make the old copy-object shadow the new one. |
3798 | * It will receive no more pages from the original |
3799 | * object. |
3800 | */ |
3801 | |
3802 | /* remove ref. from old_copy */ |
3803 | vm_object_lock_assert_exclusive(src_object); |
3804 | src_object->ref_count--; |
3805 | assert(src_object->ref_count > 0); |
3806 | vm_object_lock_assert_exclusive(old_copy); |
3807 | old_copy->shadow = new_copy; |
3808 | vm_object_lock_assert_exclusive(new_copy); |
3809 | assert(new_copy->ref_count > 0); |
3810 | new_copy->ref_count++; /* for old_copy->shadow ref. */ |
3811 | |
3812 | vm_object_unlock(old_copy); /* done with old_copy */ |
3813 | } |
3814 | |
3815 | /* |
3816 | * Point the new copy at the existing object. |
3817 | */ |
3818 | vm_object_lock_assert_exclusive(new_copy); |
3819 | new_copy->shadow = src_object; |
3820 | new_copy->vo_shadow_offset = 0; |
3821 | VM_OBJECT_SET_SHADOWED(object: new_copy, TRUE); /* caller must set needs_copy */ |
3822 | |
3823 | vm_object_lock_assert_exclusive(src_object); |
3824 | vm_object_reference_locked(src_object); |
3825 | VM_OBJECT_COPY_SET(object: src_object, copy: new_copy); |
3826 | vm_object_unlock(src_object); |
3827 | vm_object_unlock(new_copy); |
3828 | |
3829 | return new_copy; |
3830 | } |
3831 | |
3832 | /* |
3833 | * Routine: vm_object_copy_strategically |
3834 | * |
3835 | * Purpose: |
3836 | * Perform a copy according to the source object's |
3837 | * declared strategy. This operation may block, |
3838 | * and may be interrupted. |
3839 | */ |
3840 | __private_extern__ kern_return_t |
3841 | vm_object_copy_strategically( |
3842 | vm_object_t src_object, |
3843 | vm_object_offset_t src_offset, |
3844 | vm_object_size_t size, |
3845 | bool forking, |
3846 | vm_object_t *dst_object, /* OUT */ |
3847 | vm_object_offset_t *dst_offset, /* OUT */ |
3848 | boolean_t *dst_needs_copy) /* OUT */ |
3849 | { |
3850 | boolean_t result; |
3851 | boolean_t interruptible = THREAD_ABORTSAFE; /* XXX */ |
3852 | boolean_t object_lock_shared = FALSE; |
3853 | memory_object_copy_strategy_t copy_strategy; |
3854 | |
3855 | assert(src_object != VM_OBJECT_NULL); |
3856 | |
3857 | copy_strategy = src_object->copy_strategy; |
3858 | |
3859 | if (copy_strategy == MEMORY_OBJECT_COPY_DELAY) { |
3860 | vm_object_lock_shared(src_object); |
3861 | object_lock_shared = TRUE; |
3862 | } else { |
3863 | vm_object_lock(src_object); |
3864 | } |
3865 | |
3866 | /* |
3867 | * The copy strategy is only valid if the memory manager |
3868 | * is "ready". Internal objects are always ready. |
3869 | */ |
3870 | |
3871 | while (!src_object->internal && !src_object->pager_ready) { |
3872 | wait_result_t wait_result; |
3873 | |
3874 | if (object_lock_shared == TRUE) { |
3875 | vm_object_unlock(src_object); |
3876 | vm_object_lock(src_object); |
3877 | object_lock_shared = FALSE; |
3878 | continue; |
3879 | } |
3880 | wait_result = vm_object_sleep( object: src_object, |
3881 | VM_OBJECT_EVENT_PAGER_READY, |
3882 | interruptible); |
3883 | if (wait_result != THREAD_AWAKENED) { |
3884 | vm_object_unlock(src_object); |
3885 | *dst_object = VM_OBJECT_NULL; |
3886 | *dst_offset = 0; |
3887 | *dst_needs_copy = FALSE; |
3888 | return MACH_SEND_INTERRUPTED; |
3889 | } |
3890 | } |
3891 | |
3892 | /* |
3893 | * Use the appropriate copy strategy. |
3894 | */ |
3895 | |
3896 | if (copy_strategy == MEMORY_OBJECT_COPY_DELAY_FORK) { |
3897 | if (forking) { |
3898 | copy_strategy = MEMORY_OBJECT_COPY_DELAY; |
3899 | } else { |
3900 | copy_strategy = MEMORY_OBJECT_COPY_NONE; |
3901 | if (object_lock_shared) { |
3902 | vm_object_unlock(src_object); |
3903 | vm_object_lock(src_object); |
3904 | object_lock_shared = FALSE; |
3905 | } |
3906 | } |
3907 | } |
3908 | |
3909 | switch (copy_strategy) { |
3910 | case MEMORY_OBJECT_COPY_DELAY: |
3911 | *dst_object = vm_object_copy_delayed(src_object, |
3912 | src_offset, size, src_object_shared: object_lock_shared); |
3913 | if (*dst_object != VM_OBJECT_NULL) { |
3914 | *dst_offset = src_offset; |
3915 | *dst_needs_copy = TRUE; |
3916 | result = KERN_SUCCESS; |
3917 | break; |
3918 | } |
3919 | vm_object_lock(src_object); |
3920 | OS_FALLTHROUGH; /* fall thru when delayed copy not allowed */ |
3921 | |
3922 | case MEMORY_OBJECT_COPY_NONE: |
3923 | result = vm_object_copy_slowly(src_object, src_offset, size, |
3924 | interruptible, result_object: dst_object); |
3925 | if (result == KERN_SUCCESS) { |
3926 | *dst_offset = src_offset - vm_object_trunc_page(src_offset); |
3927 | *dst_needs_copy = FALSE; |
3928 | } |
3929 | break; |
3930 | |
3931 | case MEMORY_OBJECT_COPY_SYMMETRIC: |
3932 | vm_object_unlock(src_object); |
3933 | result = KERN_MEMORY_RESTART_COPY; |
3934 | break; |
3935 | |
3936 | default: |
3937 | panic("copy_strategically: bad strategy %d for object %p" , |
3938 | copy_strategy, src_object); |
3939 | result = KERN_INVALID_ARGUMENT; |
3940 | } |
3941 | return result; |
3942 | } |
3943 | |
3944 | /* |
3945 | * vm_object_shadow: |
3946 | * |
3947 | * Create a new object which is backed by the |
3948 | * specified existing object range. The source |
3949 | * object reference is deallocated. |
3950 | * |
3951 | * The new object and offset into that object |
3952 | * are returned in the source parameters. |
3953 | */ |
3954 | boolean_t vm_object_shadow_check = TRUE; |
3955 | uint64_t vm_object_shadow_forced = 0; |
3956 | uint64_t vm_object_shadow_skipped = 0; |
3957 | |
3958 | __private_extern__ boolean_t |
3959 | vm_object_shadow( |
3960 | vm_object_t *object, /* IN/OUT */ |
3961 | vm_object_offset_t *offset, /* IN/OUT */ |
3962 | vm_object_size_t length, |
3963 | boolean_t always_shadow) |
3964 | { |
3965 | vm_object_t source; |
3966 | vm_object_t result; |
3967 | |
3968 | source = *object; |
3969 | assert(source != VM_OBJECT_NULL); |
3970 | if (source == VM_OBJECT_NULL) { |
3971 | return FALSE; |
3972 | } |
3973 | |
3974 | assert(source->copy_strategy == MEMORY_OBJECT_COPY_SYMMETRIC); |
3975 | |
3976 | /* |
3977 | * Determine if we really need a shadow. |
3978 | * |
3979 | * If the source object is larger than what we are trying |
3980 | * to create, then force the shadow creation even if the |
3981 | * ref count is 1. This will allow us to [potentially] |
3982 | * collapse the underlying object away in the future |
3983 | * (freeing up the extra data it might contain and that |
3984 | * we don't need). |
3985 | */ |
3986 | |
3987 | assert(source->copy_strategy != MEMORY_OBJECT_COPY_NONE); /* Purgeable objects shouldn't have shadow objects. */ |
3988 | |
3989 | /* |
3990 | * The following optimization does not work in the context of submaps |
3991 | * (the shared region, in particular). |
3992 | * This object might have only 1 reference (in the submap) but that |
3993 | * submap can itself be mapped multiple times, so the object is |
3994 | * actually indirectly referenced more than once... |
3995 | * The caller can specify to "always_shadow" to bypass the optimization. |
3996 | */ |
3997 | if (vm_object_shadow_check && |
3998 | source->vo_size == length && |
3999 | source->ref_count == 1) { |
4000 | if (always_shadow) { |
4001 | vm_object_shadow_forced++; |
4002 | } else { |
4003 | /* |
4004 | * Lock the object and check again. |
4005 | * We also check to see if there's |
4006 | * a shadow or copy object involved. |
4007 | * We can't do that earlier because |
4008 | * without the object locked, there |
4009 | * could be a collapse and the chain |
4010 | * gets modified leaving us with an |
4011 | * invalid pointer. |
4012 | */ |
4013 | vm_object_lock(source); |
4014 | if (source->vo_size == length && |
4015 | source->ref_count == 1 && |
4016 | (source->shadow == VM_OBJECT_NULL || |
4017 | source->shadow->vo_copy == VM_OBJECT_NULL)) { |
4018 | VM_OBJECT_SET_SHADOWED(object: source, FALSE); |
4019 | vm_object_unlock(source); |
4020 | vm_object_shadow_skipped++; |
4021 | return FALSE; |
4022 | } |
4023 | /* things changed while we were locking "source"... */ |
4024 | vm_object_unlock(source); |
4025 | } |
4026 | } |
4027 | |
4028 | /* |
4029 | * *offset is the map entry's offset into the VM object and |
4030 | * is aligned to the map's page size. |
4031 | * VM objects need to be aligned to the system's page size. |
4032 | * Record the necessary adjustment and re-align the offset so |
4033 | * that result->vo_shadow_offset is properly page-aligned. |
4034 | */ |
4035 | vm_object_offset_t offset_adjustment; |
4036 | offset_adjustment = *offset - vm_object_trunc_page(*offset); |
4037 | length = vm_object_round_page(length + offset_adjustment); |
4038 | *offset = vm_object_trunc_page(*offset); |
4039 | |
4040 | /* |
4041 | * Allocate a new object with the given length |
4042 | */ |
4043 | |
4044 | if ((result = vm_object_allocate(size: length)) == VM_OBJECT_NULL) { |
4045 | panic("vm_object_shadow: no object for shadowing" ); |
4046 | } |
4047 | |
4048 | /* |
4049 | * The new object shadows the source object, adding |
4050 | * a reference to it. Our caller changes his reference |
4051 | * to point to the new object, removing a reference to |
4052 | * the source object. Net result: no change of reference |
4053 | * count. |
4054 | */ |
4055 | result->shadow = source; |
4056 | |
4057 | /* |
4058 | * Store the offset into the source object, |
4059 | * and fix up the offset into the new object. |
4060 | */ |
4061 | |
4062 | result->vo_shadow_offset = *offset; |
4063 | assertf(page_aligned(result->vo_shadow_offset), |
4064 | "result %p shadow offset 0x%llx" , |
4065 | result, result->vo_shadow_offset); |
4066 | |
4067 | /* |
4068 | * Return the new things |
4069 | */ |
4070 | |
4071 | *offset = 0; |
4072 | if (offset_adjustment) { |
4073 | /* |
4074 | * Make the map entry point to the equivalent offset |
4075 | * in the new object. |
4076 | */ |
4077 | DEBUG4K_COPY("adjusting offset @ %p from 0x%llx to 0x%llx for object %p length: 0x%llx\n" , offset, *offset, *offset + offset_adjustment, result, length); |
4078 | *offset += offset_adjustment; |
4079 | } |
4080 | *object = result; |
4081 | return TRUE; |
4082 | } |
4083 | |
4084 | /* |
4085 | * The relationship between vm_object structures and |
4086 | * the memory_object requires careful synchronization. |
4087 | * |
4088 | * All associations are created by memory_object_create_named |
4089 | * for external pagers and vm_object_compressor_pager_create for internal |
4090 | * objects as follows: |
4091 | * |
4092 | * pager: the memory_object itself, supplied by |
4093 | * the user requesting a mapping (or the kernel, |
4094 | * when initializing internal objects); the |
4095 | * kernel simulates holding send rights by keeping |
4096 | * a port reference; |
4097 | * |
4098 | * pager_request: |
4099 | * the memory object control port, |
4100 | * created by the kernel; the kernel holds |
4101 | * receive (and ownership) rights to this |
4102 | * port, but no other references. |
4103 | * |
4104 | * When initialization is complete, the "initialized" field |
4105 | * is asserted. Other mappings using a particular memory object, |
4106 | * and any references to the vm_object gained through the |
4107 | * port association must wait for this initialization to occur. |
4108 | * |
4109 | * In order to allow the memory manager to set attributes before |
4110 | * requests (notably virtual copy operations, but also data or |
4111 | * unlock requests) are made, a "ready" attribute is made available. |
4112 | * Only the memory manager may affect the value of this attribute. |
4113 | * Its value does not affect critical kernel functions, such as |
4114 | * internal object initialization or destruction. [Furthermore, |
4115 | * memory objects created by the kernel are assumed to be ready |
4116 | * immediately; the default memory manager need not explicitly |
4117 | * set the "ready" attribute.] |
4118 | * |
4119 | * [Both the "initialized" and "ready" attribute wait conditions |
4120 | * use the "pager" field as the wait event.] |
4121 | * |
4122 | * The port associations can be broken down by any of the |
4123 | * following routines: |
4124 | * vm_object_terminate: |
4125 | * No references to the vm_object remain, and |
4126 | * the object cannot (or will not) be cached. |
4127 | * This is the normal case, and is done even |
4128 | * though one of the other cases has already been |
4129 | * done. |
4130 | * memory_object_destroy: |
4131 | * The memory manager has requested that the |
4132 | * kernel relinquish references to the memory |
4133 | * object. [The memory manager may not want to |
4134 | * destroy the memory object, but may wish to |
4135 | * refuse or tear down existing memory mappings.] |
4136 | * |
4137 | * Each routine that breaks an association must break all of |
4138 | * them at once. At some later time, that routine must clear |
4139 | * the pager field and release the memory object references. |
4140 | * [Furthermore, each routine must cope with the simultaneous |
4141 | * or previous operations of the others.] |
4142 | * |
4143 | * Because the pager field may be cleared spontaneously, it |
4144 | * cannot be used to determine whether a memory object has |
4145 | * ever been associated with a particular vm_object. [This |
4146 | * knowledge is important to the shadow object mechanism.] |
4147 | * For this reason, an additional "created" attribute is |
4148 | * provided. |
4149 | * |
4150 | * During various paging operations, the pager reference found in the |
4151 | * vm_object must be valid. To prevent this from being released, |
4152 | * (other than being removed, i.e., made null), routines may use |
4153 | * the vm_object_paging_begin/end routines [actually, macros]. |
4154 | * The implementation uses the "paging_in_progress" and "wanted" fields. |
4155 | * [Operations that alter the validity of the pager values include the |
4156 | * termination routines and vm_object_collapse.] |
4157 | */ |
4158 | |
4159 | |
4160 | /* |
4161 | * Routine: vm_object_memory_object_associate |
4162 | * Purpose: |
4163 | * Associate a VM object to the given pager. |
4164 | * If a VM object is not provided, create one. |
4165 | * Initialize the pager. |
4166 | */ |
4167 | vm_object_t |
4168 | vm_object_memory_object_associate( |
4169 | memory_object_t , |
4170 | vm_object_t object, |
4171 | vm_object_size_t size, |
4172 | boolean_t named) |
4173 | { |
4174 | memory_object_control_t control; |
4175 | |
4176 | assert(pager != MEMORY_OBJECT_NULL); |
4177 | |
4178 | if (object != VM_OBJECT_NULL) { |
4179 | vm_object_lock(object); |
4180 | assert(object->internal); |
4181 | assert(object->pager_created); |
4182 | assert(!object->pager_initialized); |
4183 | assert(!object->pager_ready); |
4184 | assert(object->pager_trusted); |
4185 | } else { |
4186 | object = vm_object_allocate(size); |
4187 | assert(object != VM_OBJECT_NULL); |
4188 | vm_object_lock(object); |
4189 | VM_OBJECT_SET_INTERNAL(object, FALSE); |
4190 | VM_OBJECT_SET_PAGER_TRUSTED(object, FALSE); |
4191 | /* copy strategy invalid until set by memory manager */ |
4192 | object->copy_strategy = MEMORY_OBJECT_COPY_INVALID; |
4193 | } |
4194 | |
4195 | /* |
4196 | * Allocate request port. |
4197 | */ |
4198 | |
4199 | control = memory_object_control_allocate(object); |
4200 | assert(control != MEMORY_OBJECT_CONTROL_NULL); |
4201 | |
4202 | assert(!object->pager_ready); |
4203 | assert(!object->pager_initialized); |
4204 | assert(object->pager == NULL); |
4205 | assert(object->pager_control == NULL); |
4206 | |
4207 | /* |
4208 | * Copy the reference we were given. |
4209 | */ |
4210 | |
4211 | memory_object_reference(object: pager); |
4212 | VM_OBJECT_SET_PAGER_CREATED(object, TRUE); |
4213 | object->pager = pager; |
4214 | object->pager_control = control; |
4215 | VM_OBJECT_SET_PAGER_READY(object, FALSE); |
4216 | |
4217 | vm_object_unlock(object); |
4218 | |
4219 | /* |
4220 | * Let the pager know we're using it. |
4221 | */ |
4222 | |
4223 | (void) memory_object_init(memory_object: pager, |
4224 | memory_control: object->pager_control, |
4225 | PAGE_SIZE); |
4226 | |
4227 | vm_object_lock(object); |
4228 | if (named) { |
4229 | VM_OBJECT_SET_NAMED(object, TRUE); |
4230 | } |
4231 | if (object->internal) { |
4232 | VM_OBJECT_SET_PAGER_READY(object, TRUE); |
4233 | vm_object_wakeup(object, VM_OBJECT_EVENT_PAGER_READY); |
4234 | } |
4235 | |
4236 | VM_OBJECT_SET_PAGER_INITIALIZED(object, TRUE); |
4237 | vm_object_wakeup(object, VM_OBJECT_EVENT_INITIALIZED); |
4238 | |
4239 | vm_object_unlock(object); |
4240 | |
4241 | return object; |
4242 | } |
4243 | |
4244 | /* |
4245 | * Routine: vm_object_compressor_pager_create |
4246 | * Purpose: |
4247 | * Create a memory object for an internal object. |
4248 | * In/out conditions: |
4249 | * The object is locked on entry and exit; |
4250 | * it may be unlocked within this call. |
4251 | * Limitations: |
4252 | * Only one thread may be performing a |
4253 | * vm_object_compressor_pager_create on an object at |
4254 | * a time. Presumably, only the pageout |
4255 | * daemon will be using this routine. |
4256 | */ |
4257 | |
4258 | void |
4259 | ( |
4260 | vm_object_t object) |
4261 | { |
4262 | memory_object_t ; |
4263 | vm_object_t = VM_OBJECT_NULL; |
4264 | |
4265 | assert(!is_kernel_object(object)); |
4266 | |
4267 | /* |
4268 | * Prevent collapse or termination by holding a paging reference |
4269 | */ |
4270 | |
4271 | vm_object_paging_begin(object); |
4272 | if (object->pager_created) { |
4273 | /* |
4274 | * Someone else got to it first... |
4275 | * wait for them to finish initializing the ports |
4276 | */ |
4277 | while (!object->pager_initialized) { |
4278 | vm_object_sleep(object, |
4279 | VM_OBJECT_EVENT_INITIALIZED, |
4280 | THREAD_UNINT); |
4281 | } |
4282 | vm_object_paging_end(object); |
4283 | return; |
4284 | } |
4285 | |
4286 | if ((uint32_t) (object->vo_size / PAGE_SIZE) != |
4287 | (object->vo_size / PAGE_SIZE)) { |
4288 | #if DEVELOPMENT || DEBUG |
4289 | printf("vm_object_compressor_pager_create(%p): " |
4290 | "object size 0x%llx >= 0x%llx\n" , |
4291 | object, |
4292 | (uint64_t) object->vo_size, |
4293 | 0x0FFFFFFFFULL * PAGE_SIZE); |
4294 | #endif /* DEVELOPMENT || DEBUG */ |
4295 | vm_object_paging_end(object); |
4296 | return; |
4297 | } |
4298 | |
4299 | /* |
4300 | * Indicate that a memory object has been assigned |
4301 | * before dropping the lock, to prevent a race. |
4302 | */ |
4303 | |
4304 | VM_OBJECT_SET_PAGER_CREATED(object, TRUE); |
4305 | VM_OBJECT_SET_PAGER_TRUSTED(object, TRUE); |
4306 | object->paging_offset = 0; |
4307 | |
4308 | vm_object_unlock(object); |
4309 | |
4310 | /* |
4311 | * Create the [internal] pager, and associate it with this object. |
4312 | * |
4313 | * We make the association here so that vm_object_enter() |
4314 | * can look up the object to complete initializing it. No |
4315 | * user will ever map this object. |
4316 | */ |
4317 | { |
4318 | /* create our new memory object */ |
4319 | assert((uint32_t) (object->vo_size / PAGE_SIZE) == |
4320 | (object->vo_size / PAGE_SIZE)); |
4321 | (void) compressor_memory_object_create( |
4322 | (memory_object_size_t) object->vo_size, |
4323 | &pager); |
4324 | if (pager == NULL) { |
4325 | panic("vm_object_compressor_pager_create(): " |
4326 | "no pager for object %p size 0x%llx\n" , |
4327 | object, (uint64_t) object->vo_size); |
4328 | } |
4329 | } |
4330 | |
4331 | /* |
4332 | * A reference was returned by |
4333 | * memory_object_create(), and it is |
4334 | * copied by vm_object_memory_object_associate(). |
4335 | */ |
4336 | |
4337 | pager_object = vm_object_memory_object_associate(pager, |
4338 | object, |
4339 | size: object->vo_size, |
4340 | FALSE); |
4341 | if (pager_object != object) { |
4342 | panic("vm_object_compressor_pager_create: mismatch (pager: %p, pager_object: %p, orig_object: %p, orig_object size: 0x%llx)" , pager, pager_object, object, (uint64_t) object->vo_size); |
4343 | } |
4344 | |
4345 | /* |
4346 | * Drop the reference we were passed. |
4347 | */ |
4348 | memory_object_deallocate(object: pager); |
4349 | |
4350 | vm_object_lock(object); |
4351 | |
4352 | /* |
4353 | * Release the paging reference |
4354 | */ |
4355 | vm_object_paging_end(object); |
4356 | } |
4357 | |
4358 | /* |
4359 | * Global variables for vm_object_collapse(): |
4360 | * |
4361 | * Counts for normal collapses and bypasses. |
4362 | * Debugging variables, to watch or disable collapse. |
4363 | */ |
4364 | static long object_collapses = 0; |
4365 | static long object_bypasses = 0; |
4366 | |
4367 | static boolean_t vm_object_collapse_allowed = TRUE; |
4368 | static boolean_t vm_object_bypass_allowed = TRUE; |
4369 | |
4370 | void vm_object_do_collapse_compressor(vm_object_t object, |
4371 | vm_object_t backing_object); |
4372 | void |
4373 | vm_object_do_collapse_compressor( |
4374 | vm_object_t object, |
4375 | vm_object_t backing_object) |
4376 | { |
4377 | vm_object_offset_t new_offset, backing_offset; |
4378 | vm_object_size_t size; |
4379 | |
4380 | vm_counters.do_collapse_compressor++; |
4381 | |
4382 | vm_object_lock_assert_exclusive(object); |
4383 | vm_object_lock_assert_exclusive(backing_object); |
4384 | |
4385 | size = object->vo_size; |
4386 | |
4387 | /* |
4388 | * Move all compressed pages from backing_object |
4389 | * to the parent. |
4390 | */ |
4391 | |
4392 | for (backing_offset = object->vo_shadow_offset; |
4393 | backing_offset < object->vo_shadow_offset + object->vo_size; |
4394 | backing_offset += PAGE_SIZE) { |
4395 | memory_object_offset_t ; |
4396 | |
4397 | /* find the next compressed page at or after this offset */ |
4398 | backing_pager_offset = (backing_offset + |
4399 | backing_object->paging_offset); |
4400 | backing_pager_offset = vm_compressor_pager_next_compressed( |
4401 | mem_obj: backing_object->pager, |
4402 | offset: backing_pager_offset); |
4403 | if (backing_pager_offset == (memory_object_offset_t) -1) { |
4404 | /* no more compressed pages */ |
4405 | break; |
4406 | } |
4407 | backing_offset = (backing_pager_offset - |
4408 | backing_object->paging_offset); |
4409 | |
4410 | new_offset = backing_offset - object->vo_shadow_offset; |
4411 | |
4412 | if (new_offset >= object->vo_size) { |
4413 | /* we're out of the scope of "object": done */ |
4414 | break; |
4415 | } |
4416 | |
4417 | if ((vm_page_lookup(object, offset: new_offset) != VM_PAGE_NULL) || |
4418 | (vm_compressor_pager_state_get(mem_obj: object->pager, |
4419 | offset: (new_offset + |
4420 | object->paging_offset)) == |
4421 | VM_EXTERNAL_STATE_EXISTS)) { |
4422 | /* |
4423 | * This page already exists in object, resident or |
4424 | * compressed. |
4425 | * We don't need this compressed page in backing_object |
4426 | * and it will be reclaimed when we release |
4427 | * backing_object. |
4428 | */ |
4429 | continue; |
4430 | } |
4431 | |
4432 | /* |
4433 | * backing_object has this page in the VM compressor and |
4434 | * we need to transfer it to object. |
4435 | */ |
4436 | vm_counters.do_collapse_compressor_pages++; |
4437 | vm_compressor_pager_transfer( |
4438 | /* destination: */ |
4439 | dst_mem_obj: object->pager, |
4440 | dst_offset: (new_offset + object->paging_offset), |
4441 | /* source: */ |
4442 | src_mem_obj: backing_object->pager, |
4443 | src_offset: (backing_offset + backing_object->paging_offset)); |
4444 | } |
4445 | } |
4446 | |
4447 | /* |
4448 | * Routine: vm_object_do_collapse |
4449 | * Purpose: |
4450 | * Collapse an object with the object backing it. |
4451 | * Pages in the backing object are moved into the |
4452 | * parent, and the backing object is deallocated. |
4453 | * Conditions: |
4454 | * Both objects and the cache are locked; the page |
4455 | * queues are unlocked. |
4456 | * |
4457 | */ |
4458 | static void |
4459 | vm_object_do_collapse( |
4460 | vm_object_t object, |
4461 | vm_object_t backing_object) |
4462 | { |
4463 | vm_page_t p, pp; |
4464 | vm_object_offset_t new_offset, backing_offset; |
4465 | vm_object_size_t size; |
4466 | |
4467 | vm_object_lock_assert_exclusive(object); |
4468 | vm_object_lock_assert_exclusive(backing_object); |
4469 | |
4470 | assert(object->purgable == VM_PURGABLE_DENY); |
4471 | assert(backing_object->purgable == VM_PURGABLE_DENY); |
4472 | |
4473 | backing_offset = object->vo_shadow_offset; |
4474 | size = object->vo_size; |
4475 | |
4476 | /* |
4477 | * Move all in-memory pages from backing_object |
4478 | * to the parent. Pages that have been paged out |
4479 | * will be overwritten by any of the parent's |
4480 | * pages that shadow them. |
4481 | */ |
4482 | |
4483 | while (!vm_page_queue_empty(&backing_object->memq)) { |
4484 | p = (vm_page_t) vm_page_queue_first(&backing_object->memq); |
4485 | |
4486 | new_offset = (p->vmp_offset - backing_offset); |
4487 | |
4488 | assert(!p->vmp_busy || p->vmp_absent); |
4489 | |
4490 | /* |
4491 | * If the parent has a page here, or if |
4492 | * this page falls outside the parent, |
4493 | * dispose of it. |
4494 | * |
4495 | * Otherwise, move it as planned. |
4496 | */ |
4497 | |
4498 | if (p->vmp_offset < backing_offset || new_offset >= size) { |
4499 | VM_PAGE_FREE(p); |
4500 | } else { |
4501 | pp = vm_page_lookup(object, offset: new_offset); |
4502 | if (pp == VM_PAGE_NULL) { |
4503 | if (VM_COMPRESSOR_PAGER_STATE_GET(object, |
4504 | new_offset) |
4505 | == VM_EXTERNAL_STATE_EXISTS) { |
4506 | /* |
4507 | * Parent object has this page |
4508 | * in the VM compressor. |
4509 | * Throw away the backing |
4510 | * object's page. |
4511 | */ |
4512 | VM_PAGE_FREE(p); |
4513 | } else { |
4514 | /* |
4515 | * Parent now has no page. |
4516 | * Move the backing object's page |
4517 | * up. |
4518 | */ |
4519 | vm_page_rename(page: p, new_object: object, new_offset); |
4520 | } |
4521 | } else { |
4522 | assert(!pp->vmp_absent); |
4523 | |
4524 | /* |
4525 | * Parent object has a real page. |
4526 | * Throw away the backing object's |
4527 | * page. |
4528 | */ |
4529 | VM_PAGE_FREE(p); |
4530 | } |
4531 | } |
4532 | } |
4533 | |
4534 | if (vm_object_collapse_compressor_allowed && |
4535 | object->pager != MEMORY_OBJECT_NULL && |
4536 | backing_object->pager != MEMORY_OBJECT_NULL) { |
4537 | /* move compressed pages from backing_object to object */ |
4538 | vm_object_do_collapse_compressor(object, backing_object); |
4539 | } else if (backing_object->pager != MEMORY_OBJECT_NULL) { |
4540 | assert((!object->pager_created && |
4541 | (object->pager == MEMORY_OBJECT_NULL)) || |
4542 | (!backing_object->pager_created && |
4543 | (backing_object->pager == MEMORY_OBJECT_NULL))); |
4544 | /* |
4545 | * Move the pager from backing_object to object. |
4546 | * |
4547 | * XXX We're only using part of the paging space |
4548 | * for keeps now... we ought to discard the |
4549 | * unused portion. |
4550 | */ |
4551 | |
4552 | assert(!object->paging_in_progress); |
4553 | assert(!object->activity_in_progress); |
4554 | assert(!object->pager_created); |
4555 | assert(object->pager == NULL); |
4556 | object->pager = backing_object->pager; |
4557 | |
4558 | VM_OBJECT_SET_PAGER_CREATED(object, value: backing_object->pager_created); |
4559 | object->pager_control = backing_object->pager_control; |
4560 | VM_OBJECT_SET_PAGER_READY(object, value: backing_object->pager_ready); |
4561 | VM_OBJECT_SET_PAGER_INITIALIZED(object, value: backing_object->pager_initialized); |
4562 | object->paging_offset = |
4563 | backing_object->paging_offset + backing_offset; |
4564 | if (object->pager_control != MEMORY_OBJECT_CONTROL_NULL) { |
4565 | memory_object_control_collapse(control: &object->pager_control, |
4566 | object); |
4567 | } |
4568 | /* the backing_object has lost its pager: reset all fields */ |
4569 | VM_OBJECT_SET_PAGER_CREATED(object: backing_object, FALSE); |
4570 | backing_object->pager_control = NULL; |
4571 | VM_OBJECT_SET_PAGER_READY(object: backing_object, FALSE); |
4572 | backing_object->paging_offset = 0; |
4573 | backing_object->pager = NULL; |
4574 | } |
4575 | /* |
4576 | * Object now shadows whatever backing_object did. |
4577 | * Note that the reference to backing_object->shadow |
4578 | * moves from within backing_object to within object. |
4579 | */ |
4580 | |
4581 | assert(!object->phys_contiguous); |
4582 | assert(!backing_object->phys_contiguous); |
4583 | object->shadow = backing_object->shadow; |
4584 | if (object->shadow) { |
4585 | assertf(page_aligned(object->vo_shadow_offset), |
4586 | "object %p shadow_offset 0x%llx" , |
4587 | object, object->vo_shadow_offset); |
4588 | assertf(page_aligned(backing_object->vo_shadow_offset), |
4589 | "backing_object %p shadow_offset 0x%llx" , |
4590 | backing_object, backing_object->vo_shadow_offset); |
4591 | object->vo_shadow_offset += backing_object->vo_shadow_offset; |
4592 | /* "backing_object" gave its shadow to "object" */ |
4593 | backing_object->shadow = VM_OBJECT_NULL; |
4594 | backing_object->vo_shadow_offset = 0; |
4595 | } else { |
4596 | /* no shadow, therefore no shadow offset... */ |
4597 | object->vo_shadow_offset = 0; |
4598 | } |
4599 | assert((object->shadow == VM_OBJECT_NULL) || |
4600 | (object->shadow->vo_copy != backing_object)); |
4601 | |
4602 | /* |
4603 | * Discard backing_object. |
4604 | * |
4605 | * Since the backing object has no pages, no |
4606 | * pager left, and no object references within it, |
4607 | * all that is necessary is to dispose of it. |
4608 | */ |
4609 | object_collapses++; |
4610 | |
4611 | assert(backing_object->ref_count == 1); |
4612 | assert(backing_object->resident_page_count == 0); |
4613 | assert(backing_object->paging_in_progress == 0); |
4614 | assert(backing_object->activity_in_progress == 0); |
4615 | assert(backing_object->shadow == VM_OBJECT_NULL); |
4616 | assert(backing_object->vo_shadow_offset == 0); |
4617 | |
4618 | if (backing_object->pager != MEMORY_OBJECT_NULL) { |
4619 | /* ... unless it has a pager; need to terminate pager too */ |
4620 | vm_counters.do_collapse_terminate++; |
4621 | if (vm_object_terminate(object: backing_object) != KERN_SUCCESS) { |
4622 | vm_counters.do_collapse_terminate_failure++; |
4623 | } |
4624 | return; |
4625 | } |
4626 | |
4627 | assert(backing_object->pager == NULL); |
4628 | |
4629 | VM_OBJECT_SET_ALIVE(object: backing_object, FALSE); |
4630 | vm_object_unlock(backing_object); |
4631 | |
4632 | #if VM_OBJECT_TRACKING |
4633 | if (vm_object_tracking_btlog) { |
4634 | btlog_erase(vm_object_tracking_btlog, backing_object); |
4635 | } |
4636 | #endif /* VM_OBJECT_TRACKING */ |
4637 | |
4638 | vm_object_lock_destroy(backing_object); |
4639 | |
4640 | zfree(vm_object_zone, backing_object); |
4641 | } |
4642 | |
4643 | static void |
4644 | vm_object_do_bypass( |
4645 | vm_object_t object, |
4646 | vm_object_t backing_object) |
4647 | { |
4648 | /* |
4649 | * Make the parent shadow the next object |
4650 | * in the chain. |
4651 | */ |
4652 | |
4653 | vm_object_lock_assert_exclusive(object); |
4654 | vm_object_lock_assert_exclusive(backing_object); |
4655 | |
4656 | vm_object_reference(backing_object->shadow); |
4657 | |
4658 | assert(!object->phys_contiguous); |
4659 | assert(!backing_object->phys_contiguous); |
4660 | object->shadow = backing_object->shadow; |
4661 | if (object->shadow) { |
4662 | assertf(page_aligned(object->vo_shadow_offset), |
4663 | "object %p shadow_offset 0x%llx" , |
4664 | object, object->vo_shadow_offset); |
4665 | assertf(page_aligned(backing_object->vo_shadow_offset), |
4666 | "backing_object %p shadow_offset 0x%llx" , |
4667 | backing_object, backing_object->vo_shadow_offset); |
4668 | object->vo_shadow_offset += backing_object->vo_shadow_offset; |
4669 | } else { |
4670 | /* no shadow, therefore no shadow offset... */ |
4671 | object->vo_shadow_offset = 0; |
4672 | } |
4673 | |
4674 | /* |
4675 | * Backing object might have had a copy pointer |
4676 | * to us. If it did, clear it. |
4677 | */ |
4678 | if (backing_object->vo_copy == object) { |
4679 | VM_OBJECT_COPY_SET(object: backing_object, VM_OBJECT_NULL); |
4680 | } |
4681 | |
4682 | /* |
4683 | * Drop the reference count on backing_object. |
4684 | #if TASK_SWAPPER |
4685 | * Since its ref_count was at least 2, it |
4686 | * will not vanish; so we don't need to call |
4687 | * vm_object_deallocate. |
4688 | * [with a caveat for "named" objects] |
4689 | * |
4690 | * The res_count on the backing object is |
4691 | * conditionally decremented. It's possible |
4692 | * (via vm_pageout_scan) to get here with |
4693 | * a "swapped" object, which has a 0 res_count, |
4694 | * in which case, the backing object res_count |
4695 | * is already down by one. |
4696 | #else |
4697 | * Don't call vm_object_deallocate unless |
4698 | * ref_count drops to zero. |
4699 | * |
4700 | * The ref_count can drop to zero here if the |
4701 | * backing object could be bypassed but not |
4702 | * collapsed, such as when the backing object |
4703 | * is temporary and cachable. |
4704 | #endif |
4705 | */ |
4706 | if (backing_object->ref_count > 2 || |
4707 | (!backing_object->named && backing_object->ref_count > 1)) { |
4708 | vm_object_lock_assert_exclusive(backing_object); |
4709 | backing_object->ref_count--; |
4710 | vm_object_unlock(backing_object); |
4711 | } else { |
4712 | /* |
4713 | * Drop locks so that we can deallocate |
4714 | * the backing object. |
4715 | */ |
4716 | |
4717 | /* |
4718 | * vm_object_collapse (the caller of this function) is |
4719 | * now called from contexts that may not guarantee that a |
4720 | * valid reference is held on the object... w/o a valid |
4721 | * reference, it is unsafe and unwise (you will definitely |
4722 | * regret it) to unlock the object and then retake the lock |
4723 | * since the object may be terminated and recycled in between. |
4724 | * The "activity_in_progress" reference will keep the object |
4725 | * 'stable'. |
4726 | */ |
4727 | vm_object_activity_begin(object); |
4728 | vm_object_unlock(object); |
4729 | |
4730 | vm_object_unlock(backing_object); |
4731 | vm_object_deallocate(object: backing_object); |
4732 | |
4733 | /* |
4734 | * Relock object. We don't have to reverify |
4735 | * its state since vm_object_collapse will |
4736 | * do that for us as it starts at the |
4737 | * top of its loop. |
4738 | */ |
4739 | |
4740 | vm_object_lock(object); |
4741 | vm_object_activity_end(object); |
4742 | } |
4743 | |
4744 | object_bypasses++; |
4745 | } |
4746 | |
4747 | |
4748 | /* |
4749 | * vm_object_collapse: |
4750 | * |
4751 | * Perform an object collapse or an object bypass if appropriate. |
4752 | * The real work of collapsing and bypassing is performed in |
4753 | * the routines vm_object_do_collapse and vm_object_do_bypass. |
4754 | * |
4755 | * Requires that the object be locked and the page queues be unlocked. |
4756 | * |
4757 | */ |
4758 | static unsigned long vm_object_collapse_calls = 0; |
4759 | static unsigned long vm_object_collapse_objects = 0; |
4760 | static unsigned long vm_object_collapse_do_collapse = 0; |
4761 | static unsigned long vm_object_collapse_do_bypass = 0; |
4762 | |
4763 | __private_extern__ void |
4764 | vm_object_collapse( |
4765 | vm_object_t object, |
4766 | vm_object_offset_t hint_offset, |
4767 | boolean_t can_bypass) |
4768 | { |
4769 | vm_object_t backing_object; |
4770 | vm_object_size_t object_vcount, object_rcount; |
4771 | vm_object_t original_object; |
4772 | int object_lock_type; |
4773 | int backing_object_lock_type; |
4774 | |
4775 | vm_object_collapse_calls++; |
4776 | |
4777 | assertf(page_aligned(hint_offset), "hint_offset 0x%llx" , hint_offset); |
4778 | |
4779 | if (!vm_object_collapse_allowed && |
4780 | !(can_bypass && vm_object_bypass_allowed)) { |
4781 | return; |
4782 | } |
4783 | |
4784 | if (object == VM_OBJECT_NULL) { |
4785 | return; |
4786 | } |
4787 | |
4788 | original_object = object; |
4789 | |
4790 | /* |
4791 | * The top object was locked "exclusive" by the caller. |
4792 | * In the first pass, to determine if we can collapse the shadow chain, |
4793 | * take a "shared" lock on the shadow objects. If we can collapse, |
4794 | * we'll have to go down the chain again with exclusive locks. |
4795 | */ |
4796 | object_lock_type = OBJECT_LOCK_EXCLUSIVE; |
4797 | backing_object_lock_type = OBJECT_LOCK_SHARED; |
4798 | |
4799 | retry: |
4800 | object = original_object; |
4801 | vm_object_lock_assert_exclusive(object); |
4802 | |
4803 | while (TRUE) { |
4804 | vm_object_collapse_objects++; |
4805 | /* |
4806 | * Verify that the conditions are right for either |
4807 | * collapse or bypass: |
4808 | */ |
4809 | |
4810 | /* |
4811 | * There is a backing object, and |
4812 | */ |
4813 | |
4814 | backing_object = object->shadow; |
4815 | if (backing_object == VM_OBJECT_NULL) { |
4816 | if (object != original_object) { |
4817 | vm_object_unlock(object); |
4818 | } |
4819 | return; |
4820 | } |
4821 | if (backing_object_lock_type == OBJECT_LOCK_SHARED) { |
4822 | vm_object_lock_shared(backing_object); |
4823 | } else { |
4824 | vm_object_lock(backing_object); |
4825 | } |
4826 | |
4827 | /* |
4828 | * No pages in the object are currently |
4829 | * being paged out, and |
4830 | */ |
4831 | if (object->paging_in_progress != 0 || |
4832 | object->activity_in_progress != 0) { |
4833 | /* try and collapse the rest of the shadow chain */ |
4834 | if (object != original_object) { |
4835 | vm_object_unlock(object); |
4836 | } |
4837 | object = backing_object; |
4838 | object_lock_type = backing_object_lock_type; |
4839 | continue; |
4840 | } |
4841 | |
4842 | /* |
4843 | * ... |
4844 | * The backing object is not read_only, |
4845 | * and no pages in the backing object are |
4846 | * currently being paged out. |
4847 | * The backing object is internal. |
4848 | * |
4849 | */ |
4850 | |
4851 | if (!backing_object->internal || |
4852 | backing_object->paging_in_progress != 0 || |
4853 | backing_object->activity_in_progress != 0) { |
4854 | /* try and collapse the rest of the shadow chain */ |
4855 | if (object != original_object) { |
4856 | vm_object_unlock(object); |
4857 | } |
4858 | object = backing_object; |
4859 | object_lock_type = backing_object_lock_type; |
4860 | continue; |
4861 | } |
4862 | |
4863 | /* |
4864 | * Purgeable objects are not supposed to engage in |
4865 | * copy-on-write activities, so should not have |
4866 | * any shadow objects or be a shadow object to another |
4867 | * object. |
4868 | * Collapsing a purgeable object would require some |
4869 | * updates to the purgeable compressed ledgers. |
4870 | */ |
4871 | if (object->purgable != VM_PURGABLE_DENY || |
4872 | backing_object->purgable != VM_PURGABLE_DENY) { |
4873 | panic("vm_object_collapse() attempting to collapse " |
4874 | "purgeable object: %p(%d) %p(%d)\n" , |
4875 | object, object->purgable, |
4876 | backing_object, backing_object->purgable); |
4877 | /* try and collapse the rest of the shadow chain */ |
4878 | if (object != original_object) { |
4879 | vm_object_unlock(object); |
4880 | } |
4881 | object = backing_object; |
4882 | object_lock_type = backing_object_lock_type; |
4883 | continue; |
4884 | } |
4885 | |
4886 | /* |
4887 | * The backing object can't be a copy-object: |
4888 | * the shadow_offset for the copy-object must stay |
4889 | * as 0. Furthermore (for the 'we have all the |
4890 | * pages' case), if we bypass backing_object and |
4891 | * just shadow the next object in the chain, old |
4892 | * pages from that object would then have to be copied |
4893 | * BOTH into the (former) backing_object and into the |
4894 | * parent object. |
4895 | */ |
4896 | if (backing_object->shadow != VM_OBJECT_NULL && |
4897 | backing_object->shadow->vo_copy == backing_object) { |
4898 | /* try and collapse the rest of the shadow chain */ |
4899 | if (object != original_object) { |
4900 | vm_object_unlock(object); |
4901 | } |
4902 | object = backing_object; |
4903 | object_lock_type = backing_object_lock_type; |
4904 | continue; |
4905 | } |
4906 | |
4907 | /* |
4908 | * We can now try to either collapse the backing |
4909 | * object (if the parent is the only reference to |
4910 | * it) or (perhaps) remove the parent's reference |
4911 | * to it. |
4912 | * |
4913 | * If there is exactly one reference to the backing |
4914 | * object, we may be able to collapse it into the |
4915 | * parent. |
4916 | * |
4917 | * As long as one of the objects is still not known |
4918 | * to the pager, we can collapse them. |
4919 | */ |
4920 | if (backing_object->ref_count == 1 && |
4921 | (vm_object_collapse_compressor_allowed || |
4922 | !object->pager_created |
4923 | || (!backing_object->pager_created) |
4924 | ) && vm_object_collapse_allowed) { |
4925 | /* |
4926 | * We need the exclusive lock on the VM objects. |
4927 | */ |
4928 | if (backing_object_lock_type != OBJECT_LOCK_EXCLUSIVE) { |
4929 | /* |
4930 | * We have an object and its shadow locked |
4931 | * "shared". We can't just upgrade the locks |
4932 | * to "exclusive", as some other thread might |
4933 | * also have these objects locked "shared" and |
4934 | * attempt to upgrade one or the other to |
4935 | * "exclusive". The upgrades would block |
4936 | * forever waiting for the other "shared" locks |
4937 | * to get released. |
4938 | * So we have to release the locks and go |
4939 | * down the shadow chain again (since it could |
4940 | * have changed) with "exclusive" locking. |
4941 | */ |
4942 | vm_object_unlock(backing_object); |
4943 | if (object != original_object) { |
4944 | vm_object_unlock(object); |
4945 | } |
4946 | object_lock_type = OBJECT_LOCK_EXCLUSIVE; |
4947 | backing_object_lock_type = OBJECT_LOCK_EXCLUSIVE; |
4948 | goto retry; |
4949 | } |
4950 | |
4951 | /* |
4952 | * Collapse the object with its backing |
4953 | * object, and try again with the object's |
4954 | * new backing object. |
4955 | */ |
4956 | |
4957 | vm_object_do_collapse(object, backing_object); |
4958 | vm_object_collapse_do_collapse++; |
4959 | continue; |
4960 | } |
4961 | |
4962 | /* |
4963 | * Collapsing the backing object was not possible |
4964 | * or permitted, so let's try bypassing it. |
4965 | */ |
4966 | |
4967 | if (!(can_bypass && vm_object_bypass_allowed)) { |
4968 | /* try and collapse the rest of the shadow chain */ |
4969 | if (object != original_object) { |
4970 | vm_object_unlock(object); |
4971 | } |
4972 | object = backing_object; |
4973 | object_lock_type = backing_object_lock_type; |
4974 | continue; |
4975 | } |
4976 | |
4977 | |
4978 | /* |
4979 | * If the object doesn't have all its pages present, |
4980 | * we have to make sure no pages in the backing object |
4981 | * "show through" before bypassing it. |
4982 | */ |
4983 | object_vcount = object->vo_size >> PAGE_SHIFT; |
4984 | object_rcount = (vm_object_size_t)object->resident_page_count; |
4985 | |
4986 | if (object_rcount != object_vcount) { |
4987 | vm_object_offset_t offset; |
4988 | vm_object_offset_t backing_offset; |
4989 | vm_object_size_t backing_rcount, backing_vcount; |
4990 | |
4991 | /* |
4992 | * If the backing object has a pager but no pagemap, |
4993 | * then we cannot bypass it, because we don't know |
4994 | * what pages it has. |
4995 | */ |
4996 | if (backing_object->pager_created) { |
4997 | /* try and collapse the rest of the shadow chain */ |
4998 | if (object != original_object) { |
4999 | vm_object_unlock(object); |
5000 | } |
5001 | object = backing_object; |
5002 | object_lock_type = backing_object_lock_type; |
5003 | continue; |
5004 | } |
5005 | |
5006 | /* |
5007 | * If the object has a pager but no pagemap, |
5008 | * then we cannot bypass it, because we don't know |
5009 | * what pages it has. |
5010 | */ |
5011 | if (object->pager_created) { |
5012 | /* try and collapse the rest of the shadow chain */ |
5013 | if (object != original_object) { |
5014 | vm_object_unlock(object); |
5015 | } |
5016 | object = backing_object; |
5017 | object_lock_type = backing_object_lock_type; |
5018 | continue; |
5019 | } |
5020 | |
5021 | backing_offset = object->vo_shadow_offset; |
5022 | backing_vcount = backing_object->vo_size >> PAGE_SHIFT; |
5023 | backing_rcount = (vm_object_size_t)backing_object->resident_page_count; |
5024 | assert(backing_vcount >= object_vcount); |
5025 | |
5026 | if (backing_rcount > (backing_vcount - object_vcount) && |
5027 | backing_rcount - (backing_vcount - object_vcount) > object_rcount) { |
5028 | /* |
5029 | * we have enough pages in the backing object to guarantee that |
5030 | * at least 1 of them must be 'uncovered' by a resident page |
5031 | * in the object we're evaluating, so move on and |
5032 | * try to collapse the rest of the shadow chain |
5033 | */ |
5034 | if (object != original_object) { |
5035 | vm_object_unlock(object); |
5036 | } |
5037 | object = backing_object; |
5038 | object_lock_type = backing_object_lock_type; |
5039 | continue; |
5040 | } |
5041 | |
5042 | /* |
5043 | * If all of the pages in the backing object are |
5044 | * shadowed by the parent object, the parent |
5045 | * object no longer has to shadow the backing |
5046 | * object; it can shadow the next one in the |
5047 | * chain. |
5048 | * |
5049 | * If the backing object has existence info, |
5050 | * we must check examine its existence info |
5051 | * as well. |
5052 | * |
5053 | */ |
5054 | |
5055 | #define EXISTS_IN_OBJECT(obj, off, rc) \ |
5056 | ((VM_COMPRESSOR_PAGER_STATE_GET((obj), (off)) \ |
5057 | == VM_EXTERNAL_STATE_EXISTS) || \ |
5058 | ((rc) && vm_page_lookup((obj), (off)) != VM_PAGE_NULL && (rc)--)) |
5059 | |
5060 | /* |
5061 | * Check the hint location first |
5062 | * (since it is often the quickest way out of here). |
5063 | */ |
5064 | if (object->cow_hint != ~(vm_offset_t)0) { |
5065 | hint_offset = (vm_object_offset_t)object->cow_hint; |
5066 | } else { |
5067 | hint_offset = (hint_offset > 8 * PAGE_SIZE_64) ? |
5068 | (hint_offset - 8 * PAGE_SIZE_64) : 0; |
5069 | } |
5070 | |
5071 | if (EXISTS_IN_OBJECT(backing_object, hint_offset + |
5072 | backing_offset, backing_rcount) && |
5073 | !EXISTS_IN_OBJECT(object, hint_offset, object_rcount)) { |
5074 | /* dependency right at the hint */ |
5075 | object->cow_hint = (vm_offset_t) hint_offset; /* atomic */ |
5076 | /* try and collapse the rest of the shadow chain */ |
5077 | if (object != original_object) { |
5078 | vm_object_unlock(object); |
5079 | } |
5080 | object = backing_object; |
5081 | object_lock_type = backing_object_lock_type; |
5082 | continue; |
5083 | } |
5084 | |
5085 | /* |
5086 | * If the object's window onto the backing_object |
5087 | * is large compared to the number of resident |
5088 | * pages in the backing object, it makes sense to |
5089 | * walk the backing_object's resident pages first. |
5090 | * |
5091 | * NOTE: Pages may be in both the existence map and/or |
5092 | * resident, so if we don't find a dependency while |
5093 | * walking the backing object's resident page list |
5094 | * directly, and there is an existence map, we'll have |
5095 | * to run the offset based 2nd pass. Because we may |
5096 | * have to run both passes, we need to be careful |
5097 | * not to decrement 'rcount' in the 1st pass |
5098 | */ |
5099 | if (backing_rcount && backing_rcount < (object_vcount / 8)) { |
5100 | vm_object_size_t rc = object_rcount; |
5101 | vm_page_t p; |
5102 | |
5103 | backing_rcount = backing_object->resident_page_count; |
5104 | p = (vm_page_t)vm_page_queue_first(&backing_object->memq); |
5105 | do { |
5106 | offset = (p->vmp_offset - backing_offset); |
5107 | |
5108 | if (offset < object->vo_size && |
5109 | offset != hint_offset && |
5110 | !EXISTS_IN_OBJECT(object, offset, rc)) { |
5111 | /* found a dependency */ |
5112 | object->cow_hint = (vm_offset_t) offset; /* atomic */ |
5113 | |
5114 | break; |
5115 | } |
5116 | p = (vm_page_t) vm_page_queue_next(&p->vmp_listq); |
5117 | } while (--backing_rcount); |
5118 | if (backing_rcount != 0) { |
5119 | /* try and collapse the rest of the shadow chain */ |
5120 | if (object != original_object) { |
5121 | vm_object_unlock(object); |
5122 | } |
5123 | object = backing_object; |
5124 | object_lock_type = backing_object_lock_type; |
5125 | continue; |
5126 | } |
5127 | } |
5128 | |
5129 | /* |
5130 | * Walk through the offsets looking for pages in the |
5131 | * backing object that show through to the object. |
5132 | */ |
5133 | if (backing_rcount) { |
5134 | offset = hint_offset; |
5135 | |
5136 | while ((offset = |
5137 | (offset + PAGE_SIZE_64 < object->vo_size) ? |
5138 | (offset + PAGE_SIZE_64) : 0) != hint_offset) { |
5139 | if (EXISTS_IN_OBJECT(backing_object, offset + |
5140 | backing_offset, backing_rcount) && |
5141 | !EXISTS_IN_OBJECT(object, offset, object_rcount)) { |
5142 | /* found a dependency */ |
5143 | object->cow_hint = (vm_offset_t) offset; /* atomic */ |
5144 | break; |
5145 | } |
5146 | } |
5147 | if (offset != hint_offset) { |
5148 | /* try and collapse the rest of the shadow chain */ |
5149 | if (object != original_object) { |
5150 | vm_object_unlock(object); |
5151 | } |
5152 | object = backing_object; |
5153 | object_lock_type = backing_object_lock_type; |
5154 | continue; |
5155 | } |
5156 | } |
5157 | } |
5158 | |
5159 | /* |
5160 | * We need "exclusive" locks on the 2 VM objects. |
5161 | */ |
5162 | if (backing_object_lock_type != OBJECT_LOCK_EXCLUSIVE) { |
5163 | vm_object_unlock(backing_object); |
5164 | if (object != original_object) { |
5165 | vm_object_unlock(object); |
5166 | } |
5167 | object_lock_type = OBJECT_LOCK_EXCLUSIVE; |
5168 | backing_object_lock_type = OBJECT_LOCK_EXCLUSIVE; |
5169 | goto retry; |
5170 | } |
5171 | |
5172 | /* reset the offset hint for any objects deeper in the chain */ |
5173 | object->cow_hint = (vm_offset_t)0; |
5174 | |
5175 | /* |
5176 | * All interesting pages in the backing object |
5177 | * already live in the parent or its pager. |
5178 | * Thus we can bypass the backing object. |
5179 | */ |
5180 | |
5181 | vm_object_do_bypass(object, backing_object); |
5182 | vm_object_collapse_do_bypass++; |
5183 | |
5184 | /* |
5185 | * Try again with this object's new backing object. |
5186 | */ |
5187 | |
5188 | continue; |
5189 | } |
5190 | |
5191 | /* NOT REACHED */ |
5192 | /* |
5193 | * if (object != original_object) { |
5194 | * vm_object_unlock(object); |
5195 | * } |
5196 | */ |
5197 | } |
5198 | |
5199 | /* |
5200 | * Routine: vm_object_page_remove: [internal] |
5201 | * Purpose: |
5202 | * Removes all physical pages in the specified |
5203 | * object range from the object's list of pages. |
5204 | * |
5205 | * In/out conditions: |
5206 | * The object must be locked. |
5207 | * The object must not have paging_in_progress, usually |
5208 | * guaranteed by not having a pager. |
5209 | */ |
5210 | unsigned int vm_object_page_remove_lookup = 0; |
5211 | unsigned int vm_object_page_remove_iterate = 0; |
5212 | |
5213 | __private_extern__ void |
5214 | vm_object_page_remove( |
5215 | vm_object_t object, |
5216 | vm_object_offset_t start, |
5217 | vm_object_offset_t end) |
5218 | { |
5219 | vm_page_t p, next; |
5220 | |
5221 | /* |
5222 | * One and two page removals are most popular. |
5223 | * The factor of 16 here is somewhat arbitrary. |
5224 | * It balances vm_object_lookup vs iteration. |
5225 | */ |
5226 | |
5227 | if (atop_64(end - start) < (unsigned)object->resident_page_count / 16) { |
5228 | vm_object_page_remove_lookup++; |
5229 | |
5230 | for (; start < end; start += PAGE_SIZE_64) { |
5231 | p = vm_page_lookup(object, offset: start); |
5232 | if (p != VM_PAGE_NULL) { |
5233 | assert(!p->vmp_cleaning && !p->vmp_laundry); |
5234 | if (!p->vmp_fictitious && p->vmp_pmapped) { |
5235 | pmap_disconnect(phys: VM_PAGE_GET_PHYS_PAGE(m: p)); |
5236 | } |
5237 | VM_PAGE_FREE(p); |
5238 | } |
5239 | } |
5240 | } else { |
5241 | vm_object_page_remove_iterate++; |
5242 | |
5243 | p = (vm_page_t) vm_page_queue_first(&object->memq); |
5244 | while (!vm_page_queue_end(&object->memq, (vm_page_queue_entry_t) p)) { |
5245 | next = (vm_page_t) vm_page_queue_next(&p->vmp_listq); |
5246 | if ((start <= p->vmp_offset) && (p->vmp_offset < end)) { |
5247 | assert(!p->vmp_cleaning && !p->vmp_laundry); |
5248 | if (!p->vmp_fictitious && p->vmp_pmapped) { |
5249 | pmap_disconnect(phys: VM_PAGE_GET_PHYS_PAGE(m: p)); |
5250 | } |
5251 | VM_PAGE_FREE(p); |
5252 | } |
5253 | p = next; |
5254 | } |
5255 | } |
5256 | } |
5257 | |
5258 | |
5259 | /* |
5260 | * Routine: vm_object_coalesce |
5261 | * Function: Coalesces two objects backing up adjoining |
5262 | * regions of memory into a single object. |
5263 | * |
5264 | * returns TRUE if objects were combined. |
5265 | * |
5266 | * NOTE: Only works at the moment if the second object is NULL - |
5267 | * if it's not, which object do we lock first? |
5268 | * |
5269 | * Parameters: |
5270 | * prev_object First object to coalesce |
5271 | * prev_offset Offset into prev_object |
5272 | * next_object Second object into coalesce |
5273 | * next_offset Offset into next_object |
5274 | * |
5275 | * prev_size Size of reference to prev_object |
5276 | * next_size Size of reference to next_object |
5277 | * |
5278 | * Conditions: |
5279 | * The object(s) must *not* be locked. The map must be locked |
5280 | * to preserve the reference to the object(s). |
5281 | */ |
5282 | static int vm_object_coalesce_count = 0; |
5283 | |
5284 | __private_extern__ boolean_t |
5285 | vm_object_coalesce( |
5286 | vm_object_t prev_object, |
5287 | vm_object_t next_object, |
5288 | vm_object_offset_t prev_offset, |
5289 | __unused vm_object_offset_t next_offset, |
5290 | vm_object_size_t prev_size, |
5291 | vm_object_size_t next_size) |
5292 | { |
5293 | vm_object_size_t newsize; |
5294 | |
5295 | #ifdef lint |
5296 | next_offset++; |
5297 | #endif /* lint */ |
5298 | |
5299 | if (next_object != VM_OBJECT_NULL) { |
5300 | return FALSE; |
5301 | } |
5302 | |
5303 | if (prev_object == VM_OBJECT_NULL) { |
5304 | return TRUE; |
5305 | } |
5306 | |
5307 | vm_object_lock(prev_object); |
5308 | |
5309 | /* |
5310 | * Try to collapse the object first |
5311 | */ |
5312 | vm_object_collapse(object: prev_object, hint_offset: prev_offset, TRUE); |
5313 | |
5314 | /* |
5315 | * Can't coalesce if pages not mapped to |
5316 | * prev_entry may be in use any way: |
5317 | * . more than one reference |
5318 | * . paged out |
5319 | * . shadows another object |
5320 | * . has a copy elsewhere |
5321 | * . is purgeable |
5322 | * . paging references (pages might be in page-list) |
5323 | */ |
5324 | |
5325 | if ((prev_object->ref_count > 1) || |
5326 | prev_object->pager_created || |
5327 | prev_object->phys_contiguous || |
5328 | (prev_object->shadow != VM_OBJECT_NULL) || |
5329 | (prev_object->vo_copy != VM_OBJECT_NULL) || |
5330 | (prev_object->true_share != FALSE) || |
5331 | (prev_object->purgable != VM_PURGABLE_DENY) || |
5332 | (prev_object->paging_in_progress != 0) || |
5333 | (prev_object->activity_in_progress != 0)) { |
5334 | vm_object_unlock(prev_object); |
5335 | return FALSE; |
5336 | } |
5337 | /* newsize = prev_offset + prev_size + next_size; */ |
5338 | if (__improbable(os_add3_overflow(prev_offset, prev_size, next_size, |
5339 | &newsize))) { |
5340 | vm_object_unlock(prev_object); |
5341 | return FALSE; |
5342 | } |
5343 | |
5344 | vm_object_coalesce_count++; |
5345 | |
5346 | /* |
5347 | * Remove any pages that may still be in the object from |
5348 | * a previous deallocation. |
5349 | */ |
5350 | vm_object_page_remove(object: prev_object, |
5351 | start: prev_offset + prev_size, |
5352 | end: prev_offset + prev_size + next_size); |
5353 | |
5354 | /* |
5355 | * Extend the object if necessary. |
5356 | */ |
5357 | if (newsize > prev_object->vo_size) { |
5358 | assertf(page_aligned(newsize), |
5359 | "object %p size 0x%llx" , |
5360 | prev_object, (uint64_t)newsize); |
5361 | prev_object->vo_size = newsize; |
5362 | } |
5363 | |
5364 | vm_object_unlock(prev_object); |
5365 | return TRUE; |
5366 | } |
5367 | |
5368 | kern_return_t |
5369 | vm_object_populate_with_private( |
5370 | vm_object_t object, |
5371 | vm_object_offset_t offset, |
5372 | ppnum_t phys_page, |
5373 | vm_size_t size) |
5374 | { |
5375 | ppnum_t base_page; |
5376 | vm_object_offset_t base_offset; |
5377 | |
5378 | |
5379 | if (!object->private) { |
5380 | return KERN_FAILURE; |
5381 | } |
5382 | |
5383 | base_page = phys_page; |
5384 | |
5385 | vm_object_lock(object); |
5386 | |
5387 | if (!object->phys_contiguous) { |
5388 | vm_page_t m; |
5389 | |
5390 | if ((base_offset = trunc_page_64(offset)) != offset) { |
5391 | vm_object_unlock(object); |
5392 | return KERN_FAILURE; |
5393 | } |
5394 | base_offset += object->paging_offset; |
5395 | |
5396 | while (size) { |
5397 | m = vm_page_lookup(object, offset: base_offset); |
5398 | |
5399 | if (m != VM_PAGE_NULL) { |
5400 | if (m->vmp_fictitious) { |
5401 | if (VM_PAGE_GET_PHYS_PAGE(m) != vm_page_guard_addr) { |
5402 | vm_page_lockspin_queues(); |
5403 | m->vmp_private = TRUE; |
5404 | vm_page_unlock_queues(); |
5405 | |
5406 | m->vmp_fictitious = FALSE; |
5407 | VM_PAGE_SET_PHYS_PAGE(m, base_page); |
5408 | } |
5409 | } else if (VM_PAGE_GET_PHYS_PAGE(m) != base_page) { |
5410 | if (!m->vmp_private) { |
5411 | /* |
5412 | * we'd leak a real page... that can't be right |
5413 | */ |
5414 | panic("vm_object_populate_with_private - %p not private" , m); |
5415 | } |
5416 | if (m->vmp_pmapped) { |
5417 | /* |
5418 | * pmap call to clear old mapping |
5419 | */ |
5420 | pmap_disconnect(phys: VM_PAGE_GET_PHYS_PAGE(m)); |
5421 | } |
5422 | VM_PAGE_SET_PHYS_PAGE(m, base_page); |
5423 | } |
5424 | } else { |
5425 | m = vm_page_grab_fictitious(TRUE); |
5426 | |
5427 | /* |
5428 | * private normally requires lock_queues but since we |
5429 | * are initializing the page, its not necessary here |
5430 | */ |
5431 | m->vmp_private = TRUE; |
5432 | m->vmp_fictitious = FALSE; |
5433 | VM_PAGE_SET_PHYS_PAGE(m, base_page); |
5434 | m->vmp_unusual = TRUE; |
5435 | m->vmp_busy = FALSE; |
5436 | |
5437 | vm_page_insert(page: m, object, offset: base_offset); |
5438 | } |
5439 | base_page++; /* Go to the next physical page */ |
5440 | base_offset += PAGE_SIZE; |
5441 | size -= PAGE_SIZE; |
5442 | } |
5443 | } else { |
5444 | /* NOTE: we should check the original settings here */ |
5445 | /* if we have a size > zero a pmap call should be made */ |
5446 | /* to disable the range */ |
5447 | |
5448 | /* pmap_? */ |
5449 | |
5450 | /* shadows on contiguous memory are not allowed */ |
5451 | /* we therefore can use the offset field */ |
5452 | object->vo_shadow_offset = (vm_object_offset_t)phys_page << PAGE_SHIFT; |
5453 | assertf(page_aligned(size), |
5454 | "object %p size 0x%llx" , |
5455 | object, (uint64_t)size); |
5456 | object->vo_size = size; |
5457 | } |
5458 | vm_object_unlock(object); |
5459 | |
5460 | return KERN_SUCCESS; |
5461 | } |
5462 | |
5463 | |
5464 | kern_return_t |
5465 | memory_object_create_named( |
5466 | memory_object_t , |
5467 | memory_object_offset_t size, |
5468 | memory_object_control_t *control) |
5469 | { |
5470 | vm_object_t object; |
5471 | |
5472 | *control = MEMORY_OBJECT_CONTROL_NULL; |
5473 | if (pager == MEMORY_OBJECT_NULL) { |
5474 | return KERN_INVALID_ARGUMENT; |
5475 | } |
5476 | |
5477 | object = vm_object_memory_object_associate(pager, |
5478 | VM_OBJECT_NULL, |
5479 | size, |
5480 | TRUE); |
5481 | if (object == VM_OBJECT_NULL) { |
5482 | return KERN_INVALID_OBJECT; |
5483 | } |
5484 | |
5485 | /* wait for object (if any) to be ready */ |
5486 | if (object != VM_OBJECT_NULL) { |
5487 | vm_object_lock(object); |
5488 | VM_OBJECT_SET_NAMED(object, TRUE); |
5489 | while (!object->pager_ready) { |
5490 | vm_object_sleep(object, |
5491 | VM_OBJECT_EVENT_PAGER_READY, |
5492 | THREAD_UNINT); |
5493 | } |
5494 | *control = object->pager_control; |
5495 | vm_object_unlock(object); |
5496 | } |
5497 | return KERN_SUCCESS; |
5498 | } |
5499 | |
5500 | |
5501 | __private_extern__ kern_return_t |
5502 | vm_object_lock_request( |
5503 | vm_object_t object, |
5504 | vm_object_offset_t offset, |
5505 | vm_object_size_t size, |
5506 | memory_object_return_t should_return, |
5507 | int flags, |
5508 | vm_prot_t prot) |
5509 | { |
5510 | __unused boolean_t should_flush; |
5511 | |
5512 | should_flush = flags & MEMORY_OBJECT_DATA_FLUSH; |
5513 | |
5514 | /* |
5515 | * Check for bogus arguments. |
5516 | */ |
5517 | if (object == VM_OBJECT_NULL) { |
5518 | return KERN_INVALID_ARGUMENT; |
5519 | } |
5520 | |
5521 | if ((prot & ~VM_PROT_ALL) != 0 && prot != VM_PROT_NO_CHANGE) { |
5522 | return KERN_INVALID_ARGUMENT; |
5523 | } |
5524 | |
5525 | /* |
5526 | * XXX TODO4K |
5527 | * extend range for conservative operations (copy-on-write, sync, ...) |
5528 | * truncate range for destructive operations (purge, ...) |
5529 | */ |
5530 | size = vm_object_round_page(offset + size) - vm_object_trunc_page(offset); |
5531 | offset = vm_object_trunc_page(offset); |
5532 | |
5533 | /* |
5534 | * Lock the object, and acquire a paging reference to |
5535 | * prevent the memory_object reference from being released. |
5536 | */ |
5537 | vm_object_lock(object); |
5538 | vm_object_paging_begin(object); |
5539 | |
5540 | (void)vm_object_update(object, |
5541 | offset, size, NULL, NULL, should_return, flags, prot); |
5542 | |
5543 | vm_object_paging_end(object); |
5544 | vm_object_unlock(object); |
5545 | |
5546 | return KERN_SUCCESS; |
5547 | } |
5548 | |
5549 | /* |
5550 | * Empty a purgeable object by grabbing the physical pages assigned to it and |
5551 | * putting them on the free queue without writing them to backing store, etc. |
5552 | * When the pages are next touched they will be demand zero-fill pages. We |
5553 | * skip pages which are busy, being paged in/out, wired, etc. We do _not_ |
5554 | * skip referenced/dirty pages, pages on the active queue, etc. We're more |
5555 | * than happy to grab these since this is a purgeable object. We mark the |
5556 | * object as "empty" after reaping its pages. |
5557 | * |
5558 | * On entry the object must be locked and it must be |
5559 | * purgeable with no delayed copies pending. |
5560 | */ |
5561 | uint64_t |
5562 | vm_object_purge(vm_object_t object, int flags) |
5563 | { |
5564 | unsigned int object_page_count = 0, pgcount = 0; |
5565 | uint64_t total_purged_pgcount = 0; |
5566 | boolean_t skipped_object = FALSE; |
5567 | |
5568 | vm_object_lock_assert_exclusive(object); |
5569 | |
5570 | if (object->purgable == VM_PURGABLE_DENY) { |
5571 | return 0; |
5572 | } |
5573 | |
5574 | assert(object->vo_copy == VM_OBJECT_NULL); |
5575 | assert(object->copy_strategy == MEMORY_OBJECT_COPY_NONE); |
5576 | |
5577 | /* |
5578 | * We need to set the object's state to VM_PURGABLE_EMPTY *before* |
5579 | * reaping its pages. We update vm_page_purgeable_count in bulk |
5580 | * and we don't want vm_page_remove() to update it again for each |
5581 | * page we reap later. |
5582 | * |
5583 | * For the purgeable ledgers, pages from VOLATILE and EMPTY objects |
5584 | * are all accounted for in the "volatile" ledgers, so this does not |
5585 | * make any difference. |
5586 | * If we transitioned directly from NONVOLATILE to EMPTY, |
5587 | * vm_page_purgeable_count must have been updated when the object |
5588 | * was dequeued from its volatile queue and the purgeable ledgers |
5589 | * must have also been updated accordingly at that time (in |
5590 | * vm_object_purgable_control()). |
5591 | */ |
5592 | if (object->purgable == VM_PURGABLE_VOLATILE) { |
5593 | unsigned int delta; |
5594 | assert(object->resident_page_count >= |
5595 | object->wired_page_count); |
5596 | delta = (object->resident_page_count - |
5597 | object->wired_page_count); |
5598 | if (delta != 0) { |
5599 | assert(vm_page_purgeable_count >= |
5600 | delta); |
5601 | OSAddAtomic(-delta, |
5602 | (SInt32 *)&vm_page_purgeable_count); |
5603 | } |
5604 | if (object->wired_page_count != 0) { |
5605 | assert(vm_page_purgeable_wired_count >= |
5606 | object->wired_page_count); |
5607 | OSAddAtomic(-object->wired_page_count, |
5608 | (SInt32 *)&vm_page_purgeable_wired_count); |
5609 | } |
5610 | VM_OBJECT_SET_PURGABLE(object, VM_PURGABLE_EMPTY); |
5611 | } |
5612 | assert(object->purgable == VM_PURGABLE_EMPTY); |
5613 | |
5614 | object_page_count = object->resident_page_count; |
5615 | |
5616 | vm_object_reap_pages(object, REAP_PURGEABLE); |
5617 | |
5618 | if (object->resident_page_count >= object_page_count) { |
5619 | total_purged_pgcount = 0; |
5620 | } else { |
5621 | total_purged_pgcount = object_page_count - object->resident_page_count; |
5622 | } |
5623 | |
5624 | if (object->pager != NULL) { |
5625 | assert(VM_CONFIG_COMPRESSOR_IS_PRESENT); |
5626 | |
5627 | if (object->activity_in_progress == 0 && |
5628 | object->paging_in_progress == 0) { |
5629 | /* |
5630 | * Also reap any memory coming from this object |
5631 | * in the VM compressor. |
5632 | * |
5633 | * There are no operations in progress on the VM object |
5634 | * and no operation can start while we're holding the |
5635 | * VM object lock, so it's safe to reap the compressed |
5636 | * pages and update the page counts. |
5637 | */ |
5638 | pgcount = vm_compressor_pager_get_count(mem_obj: object->pager); |
5639 | if (pgcount) { |
5640 | pgcount = vm_compressor_pager_reap_pages(mem_obj: object->pager, flags); |
5641 | vm_compressor_pager_count(mem_obj: object->pager, |
5642 | compressed_count_delta: -pgcount, |
5643 | FALSE, /* shared */ |
5644 | object); |
5645 | vm_object_owner_compressed_update(object, |
5646 | delta: -pgcount); |
5647 | } |
5648 | if (!(flags & C_DONT_BLOCK)) { |
5649 | assert(vm_compressor_pager_get_count(object->pager) |
5650 | == 0); |
5651 | } |
5652 | } else { |
5653 | /* |
5654 | * There's some kind of paging activity in progress |
5655 | * for this object, which could result in a page |
5656 | * being compressed or decompressed, possibly while |
5657 | * the VM object is not locked, so it could race |
5658 | * with us. |
5659 | * |
5660 | * We can't really synchronize this without possibly |
5661 | * causing a deadlock when the compressor needs to |
5662 | * allocate or free memory while compressing or |
5663 | * decompressing a page from a purgeable object |
5664 | * mapped in the kernel_map... |
5665 | * |
5666 | * So let's not attempt to purge the compressor |
5667 | * pager if there's any kind of operation in |
5668 | * progress on the VM object. |
5669 | */ |
5670 | skipped_object = TRUE; |
5671 | } |
5672 | } |
5673 | |
5674 | vm_object_lock_assert_exclusive(object); |
5675 | |
5676 | total_purged_pgcount += pgcount; |
5677 | |
5678 | KERNEL_DEBUG_CONSTANT_IST(KDEBUG_TRACE, (MACHDBG_CODE(DBG_MACH_VM, OBJECT_PURGE_ONE)), |
5679 | VM_KERNEL_UNSLIDE_OR_PERM(object), /* purged object */ |
5680 | object_page_count, |
5681 | total_purged_pgcount, |
5682 | skipped_object, |
5683 | 0); |
5684 | |
5685 | return total_purged_pgcount; |
5686 | } |
5687 | |
5688 | |
5689 | /* |
5690 | * vm_object_purgeable_control() allows the caller to control and investigate the |
5691 | * state of a purgeable object. A purgeable object is created via a call to |
5692 | * vm_allocate() with VM_FLAGS_PURGABLE specified. A purgeable object will |
5693 | * never be coalesced with any other object -- even other purgeable objects -- |
5694 | * and will thus always remain a distinct object. A purgeable object has |
5695 | * special semantics when its reference count is exactly 1. If its reference |
5696 | * count is greater than 1, then a purgeable object will behave like a normal |
5697 | * object and attempts to use this interface will result in an error return |
5698 | * of KERN_INVALID_ARGUMENT. |
5699 | * |
5700 | * A purgeable object may be put into a "volatile" state which will make the |
5701 | * object's pages elligable for being reclaimed without paging to backing |
5702 | * store if the system runs low on memory. If the pages in a volatile |
5703 | * purgeable object are reclaimed, the purgeable object is said to have been |
5704 | * "emptied." When a purgeable object is emptied the system will reclaim as |
5705 | * many pages from the object as it can in a convenient manner (pages already |
5706 | * en route to backing store or busy for other reasons are left as is). When |
5707 | * a purgeable object is made volatile, its pages will generally be reclaimed |
5708 | * before other pages in the application's working set. This semantic is |
5709 | * generally used by applications which can recreate the data in the object |
5710 | * faster than it can be paged in. One such example might be media assets |
5711 | * which can be reread from a much faster RAID volume. |
5712 | * |
5713 | * A purgeable object may be designated as "non-volatile" which means it will |
5714 | * behave like all other objects in the system with pages being written to and |
5715 | * read from backing store as needed to satisfy system memory needs. If the |
5716 | * object was emptied before the object was made non-volatile, that fact will |
5717 | * be returned as the old state of the purgeable object (see |
5718 | * VM_PURGABLE_SET_STATE below). In this case, any pages of the object which |
5719 | * were reclaimed as part of emptying the object will be refaulted in as |
5720 | * zero-fill on demand. It is up to the application to note that an object |
5721 | * was emptied and recreate the objects contents if necessary. When a |
5722 | * purgeable object is made non-volatile, its pages will generally not be paged |
5723 | * out to backing store in the immediate future. A purgeable object may also |
5724 | * be manually emptied. |
5725 | * |
5726 | * Finally, the current state (non-volatile, volatile, volatile & empty) of a |
5727 | * volatile purgeable object may be queried at any time. This information may |
5728 | * be used as a control input to let the application know when the system is |
5729 | * experiencing memory pressure and is reclaiming memory. |
5730 | * |
5731 | * The specified address may be any address within the purgeable object. If |
5732 | * the specified address does not represent any object in the target task's |
5733 | * virtual address space, then KERN_INVALID_ADDRESS will be returned. If the |
5734 | * object containing the specified address is not a purgeable object, then |
5735 | * KERN_INVALID_ARGUMENT will be returned. Otherwise, KERN_SUCCESS will be |
5736 | * returned. |
5737 | * |
5738 | * The control parameter may be any one of VM_PURGABLE_SET_STATE or |
5739 | * VM_PURGABLE_GET_STATE. For VM_PURGABLE_SET_STATE, the in/out parameter |
5740 | * state is used to set the new state of the purgeable object and return its |
5741 | * old state. For VM_PURGABLE_GET_STATE, the current state of the purgeable |
5742 | * object is returned in the parameter state. |
5743 | * |
5744 | * The in/out parameter state may be one of VM_PURGABLE_NONVOLATILE, |
5745 | * VM_PURGABLE_VOLATILE or VM_PURGABLE_EMPTY. These, respectively, represent |
5746 | * the non-volatile, volatile and volatile/empty states described above. |
5747 | * Setting the state of a purgeable object to VM_PURGABLE_EMPTY will |
5748 | * immediately reclaim as many pages in the object as can be conveniently |
5749 | * collected (some may have already been written to backing store or be |
5750 | * otherwise busy). |
5751 | * |
5752 | * The process of making a purgeable object non-volatile and determining its |
5753 | * previous state is atomic. Thus, if a purgeable object is made |
5754 | * VM_PURGABLE_NONVOLATILE and the old state is returned as |
5755 | * VM_PURGABLE_VOLATILE, then the purgeable object's previous contents are |
5756 | * completely intact and will remain so until the object is made volatile |
5757 | * again. If the old state is returned as VM_PURGABLE_EMPTY then the object |
5758 | * was reclaimed while it was in a volatile state and its previous contents |
5759 | * have been lost. |
5760 | */ |
5761 | /* |
5762 | * The object must be locked. |
5763 | */ |
5764 | kern_return_t |
5765 | vm_object_purgable_control( |
5766 | vm_object_t object, |
5767 | vm_purgable_t control, |
5768 | int *state) |
5769 | { |
5770 | int old_state; |
5771 | int new_state; |
5772 | |
5773 | if (object == VM_OBJECT_NULL) { |
5774 | /* |
5775 | * Object must already be present or it can't be purgeable. |
5776 | */ |
5777 | return KERN_INVALID_ARGUMENT; |
5778 | } |
5779 | |
5780 | vm_object_lock_assert_exclusive(object); |
5781 | |
5782 | /* |
5783 | * Get current state of the purgeable object. |
5784 | */ |
5785 | old_state = object->purgable; |
5786 | if (old_state == VM_PURGABLE_DENY) { |
5787 | return KERN_INVALID_ARGUMENT; |
5788 | } |
5789 | |
5790 | /* purgeable cant have delayed copies - now or in the future */ |
5791 | assert(object->vo_copy == VM_OBJECT_NULL); |
5792 | assert(object->copy_strategy == MEMORY_OBJECT_COPY_NONE); |
5793 | |
5794 | /* |
5795 | * Execute the desired operation. |
5796 | */ |
5797 | if (control == VM_PURGABLE_GET_STATE) { |
5798 | *state = old_state; |
5799 | return KERN_SUCCESS; |
5800 | } |
5801 | |
5802 | if (control == VM_PURGABLE_SET_STATE && |
5803 | object->purgeable_only_by_kernel) { |
5804 | return KERN_PROTECTION_FAILURE; |
5805 | } |
5806 | |
5807 | if (control != VM_PURGABLE_SET_STATE && |
5808 | control != VM_PURGABLE_SET_STATE_FROM_KERNEL) { |
5809 | return KERN_INVALID_ARGUMENT; |
5810 | } |
5811 | |
5812 | if ((*state) & VM_PURGABLE_DEBUG_EMPTY) { |
5813 | object->volatile_empty = TRUE; |
5814 | } |
5815 | if ((*state) & VM_PURGABLE_DEBUG_FAULT) { |
5816 | object->volatile_fault = TRUE; |
5817 | } |
5818 | |
5819 | new_state = *state & VM_PURGABLE_STATE_MASK; |
5820 | if (new_state == VM_PURGABLE_VOLATILE) { |
5821 | if (old_state == VM_PURGABLE_EMPTY) { |
5822 | /* what's been emptied must stay empty */ |
5823 | new_state = VM_PURGABLE_EMPTY; |
5824 | } |
5825 | if (object->volatile_empty) { |
5826 | /* debugging mode: go straight to empty */ |
5827 | new_state = VM_PURGABLE_EMPTY; |
5828 | } |
5829 | } |
5830 | |
5831 | switch (new_state) { |
5832 | case VM_PURGABLE_DENY: |
5833 | /* |
5834 | * Attempting to convert purgeable memory to non-purgeable: |
5835 | * not allowed. |
5836 | */ |
5837 | return KERN_INVALID_ARGUMENT; |
5838 | case VM_PURGABLE_NONVOLATILE: |
5839 | VM_OBJECT_SET_PURGABLE(object, value: new_state); |
5840 | |
5841 | if (old_state == VM_PURGABLE_VOLATILE) { |
5842 | unsigned int delta; |
5843 | |
5844 | assert(object->resident_page_count >= |
5845 | object->wired_page_count); |
5846 | delta = (object->resident_page_count - |
5847 | object->wired_page_count); |
5848 | |
5849 | assert(vm_page_purgeable_count >= delta); |
5850 | |
5851 | if (delta != 0) { |
5852 | OSAddAtomic(-delta, |
5853 | (SInt32 *)&vm_page_purgeable_count); |
5854 | } |
5855 | if (object->wired_page_count != 0) { |
5856 | assert(vm_page_purgeable_wired_count >= |
5857 | object->wired_page_count); |
5858 | OSAddAtomic(-object->wired_page_count, |
5859 | (SInt32 *)&vm_page_purgeable_wired_count); |
5860 | } |
5861 | |
5862 | vm_page_lock_queues(); |
5863 | |
5864 | /* object should be on a queue */ |
5865 | assert(object->objq.next != NULL && |
5866 | object->objq.prev != NULL); |
5867 | purgeable_q_t queue; |
5868 | |
5869 | /* |
5870 | * Move object from its volatile queue to the |
5871 | * non-volatile queue... |
5872 | */ |
5873 | queue = vm_purgeable_object_remove(object); |
5874 | assert(queue); |
5875 | |
5876 | if (object->purgeable_when_ripe) { |
5877 | vm_purgeable_token_delete_last(queue); |
5878 | } |
5879 | assert(queue->debug_count_objects >= 0); |
5880 | |
5881 | vm_page_unlock_queues(); |
5882 | } |
5883 | if (old_state == VM_PURGABLE_VOLATILE || |
5884 | old_state == VM_PURGABLE_EMPTY) { |
5885 | /* |
5886 | * Transfer the object's pages from the volatile to |
5887 | * non-volatile ledgers. |
5888 | */ |
5889 | vm_purgeable_accounting(object, VM_PURGABLE_VOLATILE); |
5890 | } |
5891 | |
5892 | break; |
5893 | |
5894 | case VM_PURGABLE_VOLATILE: |
5895 | if (object->volatile_fault) { |
5896 | vm_page_t p; |
5897 | int refmod; |
5898 | |
5899 | vm_page_queue_iterate(&object->memq, p, vmp_listq) { |
5900 | if (p->vmp_busy || |
5901 | VM_PAGE_WIRED(p) || |
5902 | p->vmp_fictitious) { |
5903 | continue; |
5904 | } |
5905 | refmod = pmap_disconnect(phys: VM_PAGE_GET_PHYS_PAGE(m: p)); |
5906 | if ((refmod & VM_MEM_MODIFIED) && |
5907 | !p->vmp_dirty) { |
5908 | SET_PAGE_DIRTY(p, FALSE); |
5909 | } |
5910 | } |
5911 | } |
5912 | |
5913 | assert(old_state != VM_PURGABLE_EMPTY); |
5914 | |
5915 | purgeable_q_t queue; |
5916 | |
5917 | /* find the correct queue */ |
5918 | if ((*state & VM_PURGABLE_ORDERING_MASK) == VM_PURGABLE_ORDERING_OBSOLETE) { |
5919 | queue = &purgeable_queues[PURGEABLE_Q_TYPE_OBSOLETE]; |
5920 | } else { |
5921 | if ((*state & VM_PURGABLE_BEHAVIOR_MASK) == VM_PURGABLE_BEHAVIOR_FIFO) { |
5922 | queue = &purgeable_queues[PURGEABLE_Q_TYPE_FIFO]; |
5923 | } else { |
5924 | queue = &purgeable_queues[PURGEABLE_Q_TYPE_LIFO]; |
5925 | } |
5926 | } |
5927 | |
5928 | if (old_state == VM_PURGABLE_NONVOLATILE || |
5929 | old_state == VM_PURGABLE_EMPTY) { |
5930 | unsigned int delta; |
5931 | |
5932 | if ((*state & VM_PURGABLE_NO_AGING_MASK) == |
5933 | VM_PURGABLE_NO_AGING) { |
5934 | VM_OBJECT_SET_PURGEABLE_WHEN_RIPE(object, FALSE); |
5935 | } else { |
5936 | VM_OBJECT_SET_PURGEABLE_WHEN_RIPE(object, TRUE); |
5937 | } |
5938 | |
5939 | if (object->purgeable_when_ripe) { |
5940 | kern_return_t result; |
5941 | |
5942 | /* try to add token... this can fail */ |
5943 | vm_page_lock_queues(); |
5944 | |
5945 | result = vm_purgeable_token_add(queue); |
5946 | if (result != KERN_SUCCESS) { |
5947 | vm_page_unlock_queues(); |
5948 | return result; |
5949 | } |
5950 | vm_page_unlock_queues(); |
5951 | } |
5952 | |
5953 | assert(object->resident_page_count >= |
5954 | object->wired_page_count); |
5955 | delta = (object->resident_page_count - |
5956 | object->wired_page_count); |
5957 | |
5958 | if (delta != 0) { |
5959 | OSAddAtomic(delta, |
5960 | &vm_page_purgeable_count); |
5961 | } |
5962 | if (object->wired_page_count != 0) { |
5963 | OSAddAtomic(object->wired_page_count, |
5964 | &vm_page_purgeable_wired_count); |
5965 | } |
5966 | |
5967 | VM_OBJECT_SET_PURGABLE(object, value: new_state); |
5968 | |
5969 | /* object should be on "non-volatile" queue */ |
5970 | assert(object->objq.next != NULL); |
5971 | assert(object->objq.prev != NULL); |
5972 | } else if (old_state == VM_PURGABLE_VOLATILE) { |
5973 | purgeable_q_t old_queue; |
5974 | boolean_t purgeable_when_ripe; |
5975 | |
5976 | /* |
5977 | * if reassigning priorities / purgeable groups, we don't change the |
5978 | * token queue. So moving priorities will not make pages stay around longer. |
5979 | * Reasoning is that the algorithm gives most priority to the most important |
5980 | * object. If a new token is added, the most important object' priority is boosted. |
5981 | * This biases the system already for purgeable queues that move a lot. |
5982 | * It doesn't seem more biasing is neccessary in this case, where no new object is added. |
5983 | */ |
5984 | assert(object->objq.next != NULL && object->objq.prev != NULL); /* object should be on a queue */ |
5985 | |
5986 | old_queue = vm_purgeable_object_remove(object); |
5987 | assert(old_queue); |
5988 | |
5989 | if ((*state & VM_PURGABLE_NO_AGING_MASK) == |
5990 | VM_PURGABLE_NO_AGING) { |
5991 | purgeable_when_ripe = FALSE; |
5992 | } else { |
5993 | purgeable_when_ripe = TRUE; |
5994 | } |
5995 | |
5996 | if (old_queue != queue || |
5997 | (purgeable_when_ripe != |
5998 | object->purgeable_when_ripe)) { |
5999 | kern_return_t result; |
6000 | |
6001 | /* Changing queue. Have to move token. */ |
6002 | vm_page_lock_queues(); |
6003 | if (object->purgeable_when_ripe) { |
6004 | vm_purgeable_token_delete_last(queue: old_queue); |
6005 | } |
6006 | VM_OBJECT_SET_PURGEABLE_WHEN_RIPE(object, value: purgeable_when_ripe); |
6007 | if (object->purgeable_when_ripe) { |
6008 | result = vm_purgeable_token_add(queue); |
6009 | assert(result == KERN_SUCCESS); /* this should never fail since we just freed a token */ |
6010 | } |
6011 | vm_page_unlock_queues(); |
6012 | } |
6013 | } |
6014 | ; |
6015 | vm_purgeable_object_add(object, queue, group: (*state & VM_VOLATILE_GROUP_MASK) >> VM_VOLATILE_GROUP_SHIFT ); |
6016 | if (old_state == VM_PURGABLE_NONVOLATILE) { |
6017 | vm_purgeable_accounting(object, |
6018 | VM_PURGABLE_NONVOLATILE); |
6019 | } |
6020 | |
6021 | assert(queue->debug_count_objects >= 0); |
6022 | |
6023 | break; |
6024 | |
6025 | |
6026 | case VM_PURGABLE_EMPTY: |
6027 | if (object->volatile_fault) { |
6028 | vm_page_t p; |
6029 | int refmod; |
6030 | |
6031 | vm_page_queue_iterate(&object->memq, p, vmp_listq) { |
6032 | if (p->vmp_busy || |
6033 | VM_PAGE_WIRED(p) || |
6034 | p->vmp_fictitious) { |
6035 | continue; |
6036 | } |
6037 | refmod = pmap_disconnect(phys: VM_PAGE_GET_PHYS_PAGE(m: p)); |
6038 | if ((refmod & VM_MEM_MODIFIED) && |
6039 | !p->vmp_dirty) { |
6040 | SET_PAGE_DIRTY(p, FALSE); |
6041 | } |
6042 | } |
6043 | } |
6044 | |
6045 | if (old_state == VM_PURGABLE_VOLATILE) { |
6046 | purgeable_q_t old_queue; |
6047 | |
6048 | /* object should be on a queue */ |
6049 | assert(object->objq.next != NULL && |
6050 | object->objq.prev != NULL); |
6051 | |
6052 | old_queue = vm_purgeable_object_remove(object); |
6053 | assert(old_queue); |
6054 | if (object->purgeable_when_ripe) { |
6055 | vm_page_lock_queues(); |
6056 | vm_purgeable_token_delete_first(queue: old_queue); |
6057 | vm_page_unlock_queues(); |
6058 | } |
6059 | } |
6060 | |
6061 | if (old_state == VM_PURGABLE_NONVOLATILE) { |
6062 | /* |
6063 | * This object's pages were previously accounted as |
6064 | * "non-volatile" and now need to be accounted as |
6065 | * "volatile". |
6066 | */ |
6067 | vm_purgeable_accounting(object, |
6068 | VM_PURGABLE_NONVOLATILE); |
6069 | /* |
6070 | * Set to VM_PURGABLE_EMPTY because the pages are no |
6071 | * longer accounted in the "non-volatile" ledger |
6072 | * and are also not accounted for in |
6073 | * "vm_page_purgeable_count". |
6074 | */ |
6075 | VM_OBJECT_SET_PURGABLE(object, VM_PURGABLE_EMPTY); |
6076 | } |
6077 | |
6078 | (void) vm_object_purge(object, flags: 0); |
6079 | assert(object->purgable == VM_PURGABLE_EMPTY); |
6080 | |
6081 | break; |
6082 | } |
6083 | |
6084 | *state = old_state; |
6085 | |
6086 | vm_object_lock_assert_exclusive(object); |
6087 | |
6088 | return KERN_SUCCESS; |
6089 | } |
6090 | |
6091 | kern_return_t |
6092 | vm_object_get_page_counts( |
6093 | vm_object_t object, |
6094 | vm_object_offset_t offset, |
6095 | vm_object_size_t size, |
6096 | unsigned int *resident_page_count, |
6097 | unsigned int *dirty_page_count) |
6098 | { |
6099 | kern_return_t kr = KERN_SUCCESS; |
6100 | boolean_t count_dirty_pages = FALSE; |
6101 | vm_page_t p = VM_PAGE_NULL; |
6102 | unsigned int local_resident_count = 0; |
6103 | unsigned int local_dirty_count = 0; |
6104 | vm_object_offset_t cur_offset = 0; |
6105 | vm_object_offset_t end_offset = 0; |
6106 | |
6107 | if (object == VM_OBJECT_NULL) { |
6108 | return KERN_INVALID_ARGUMENT; |
6109 | } |
6110 | |
6111 | |
6112 | cur_offset = offset; |
6113 | |
6114 | end_offset = offset + size; |
6115 | |
6116 | vm_object_lock_assert_exclusive(object); |
6117 | |
6118 | if (dirty_page_count != NULL) { |
6119 | count_dirty_pages = TRUE; |
6120 | } |
6121 | |
6122 | if (resident_page_count != NULL && count_dirty_pages == FALSE) { |
6123 | /* |
6124 | * Fast path when: |
6125 | * - we only want the resident page count, and, |
6126 | * - the entire object is exactly covered by the request. |
6127 | */ |
6128 | if (offset == 0 && (object->vo_size == size)) { |
6129 | *resident_page_count = object->resident_page_count; |
6130 | goto out; |
6131 | } |
6132 | } |
6133 | |
6134 | if (object->resident_page_count <= (size >> PAGE_SHIFT)) { |
6135 | vm_page_queue_iterate(&object->memq, p, vmp_listq) { |
6136 | if (p->vmp_offset >= cur_offset && p->vmp_offset < end_offset) { |
6137 | local_resident_count++; |
6138 | |
6139 | if (count_dirty_pages) { |
6140 | if (p->vmp_dirty || (p->vmp_wpmapped && pmap_is_modified(pn: VM_PAGE_GET_PHYS_PAGE(m: p)))) { |
6141 | local_dirty_count++; |
6142 | } |
6143 | } |
6144 | } |
6145 | } |
6146 | } else { |
6147 | for (cur_offset = offset; cur_offset < end_offset; cur_offset += PAGE_SIZE_64) { |
6148 | p = vm_page_lookup(object, offset: cur_offset); |
6149 | |
6150 | if (p != VM_PAGE_NULL) { |
6151 | local_resident_count++; |
6152 | |
6153 | if (count_dirty_pages) { |
6154 | if (p->vmp_dirty || (p->vmp_wpmapped && pmap_is_modified(pn: VM_PAGE_GET_PHYS_PAGE(m: p)))) { |
6155 | local_dirty_count++; |
6156 | } |
6157 | } |
6158 | } |
6159 | } |
6160 | } |
6161 | |
6162 | if (resident_page_count != NULL) { |
6163 | *resident_page_count = local_resident_count; |
6164 | } |
6165 | |
6166 | if (dirty_page_count != NULL) { |
6167 | *dirty_page_count = local_dirty_count; |
6168 | } |
6169 | |
6170 | out: |
6171 | return kr; |
6172 | } |
6173 | |
6174 | |
6175 | /* |
6176 | * vm_object_reference: |
6177 | * |
6178 | * Gets another reference to the given object. |
6179 | */ |
6180 | #ifdef vm_object_reference |
6181 | #undef vm_object_reference |
6182 | #endif |
6183 | __private_extern__ void |
6184 | vm_object_reference( |
6185 | vm_object_t object) |
6186 | { |
6187 | if (object == VM_OBJECT_NULL) { |
6188 | return; |
6189 | } |
6190 | |
6191 | vm_object_lock(object); |
6192 | assert(object->ref_count > 0); |
6193 | vm_object_reference_locked(object); |
6194 | vm_object_unlock(object); |
6195 | } |
6196 | |
6197 | /* |
6198 | * vm_object_transpose |
6199 | * |
6200 | * This routine takes two VM objects of the same size and exchanges |
6201 | * their backing store. |
6202 | * The objects should be "quiesced" via a UPL operation with UPL_SET_IO_WIRE |
6203 | * and UPL_BLOCK_ACCESS if they are referenced anywhere. |
6204 | * |
6205 | * The VM objects must not be locked by caller. |
6206 | */ |
6207 | unsigned int vm_object_transpose_count = 0; |
6208 | kern_return_t |
6209 | vm_object_transpose( |
6210 | vm_object_t object1, |
6211 | vm_object_t object2, |
6212 | vm_object_size_t transpose_size) |
6213 | { |
6214 | vm_object_t tmp_object; |
6215 | kern_return_t retval; |
6216 | boolean_t object1_locked, object2_locked; |
6217 | vm_page_t page; |
6218 | vm_object_offset_t page_offset; |
6219 | |
6220 | tmp_object = VM_OBJECT_NULL; |
6221 | object1_locked = FALSE; object2_locked = FALSE; |
6222 | |
6223 | if (object1 == object2 || |
6224 | object1 == VM_OBJECT_NULL || |
6225 | object2 == VM_OBJECT_NULL) { |
6226 | /* |
6227 | * If the 2 VM objects are the same, there's |
6228 | * no point in exchanging their backing store. |
6229 | */ |
6230 | retval = KERN_INVALID_VALUE; |
6231 | goto done; |
6232 | } |
6233 | |
6234 | /* |
6235 | * Since we need to lock both objects at the same time, |
6236 | * make sure we always lock them in the same order to |
6237 | * avoid deadlocks. |
6238 | */ |
6239 | if (object1 > object2) { |
6240 | tmp_object = object1; |
6241 | object1 = object2; |
6242 | object2 = tmp_object; |
6243 | } |
6244 | |
6245 | /* |
6246 | * Allocate a temporary VM object to hold object1's contents |
6247 | * while we copy object2 to object1. |
6248 | */ |
6249 | tmp_object = vm_object_allocate(size: transpose_size); |
6250 | vm_object_lock(tmp_object); |
6251 | VM_OBJECT_SET_CAN_PERSIST(object: tmp_object, FALSE); |
6252 | |
6253 | |
6254 | /* |
6255 | * Grab control of the 1st VM object. |
6256 | */ |
6257 | vm_object_lock(object1); |
6258 | object1_locked = TRUE; |
6259 | if (!object1->alive || object1->terminating || |
6260 | object1->vo_copy || object1->shadow || object1->shadowed || |
6261 | object1->purgable != VM_PURGABLE_DENY) { |
6262 | /* |
6263 | * We don't deal with copy or shadow objects (yet). |
6264 | */ |
6265 | retval = KERN_INVALID_VALUE; |
6266 | goto done; |
6267 | } |
6268 | /* |
6269 | * We're about to mess with the object's backing store and |
6270 | * taking a "paging_in_progress" reference wouldn't be enough |
6271 | * to prevent any paging activity on this object, so the caller should |
6272 | * have "quiesced" the objects beforehand, via a UPL operation with |
6273 | * UPL_SET_IO_WIRE (to make sure all the pages are there and wired) |
6274 | * and UPL_BLOCK_ACCESS (to mark the pages "busy"). |
6275 | * |
6276 | * Wait for any paging operation to complete (but only paging, not |
6277 | * other kind of activities not linked to the pager). After we're |
6278 | * statisfied that there's no more paging in progress, we keep the |
6279 | * object locked, to guarantee that no one tries to access its pager. |
6280 | */ |
6281 | vm_object_paging_only_wait(object1, THREAD_UNINT); |
6282 | |
6283 | /* |
6284 | * Same as above for the 2nd object... |
6285 | */ |
6286 | vm_object_lock(object2); |
6287 | object2_locked = TRUE; |
6288 | if (!object2->alive || object2->terminating || |
6289 | object2->vo_copy || object2->shadow || object2->shadowed || |
6290 | object2->purgable != VM_PURGABLE_DENY) { |
6291 | retval = KERN_INVALID_VALUE; |
6292 | goto done; |
6293 | } |
6294 | vm_object_paging_only_wait(object2, THREAD_UNINT); |
6295 | |
6296 | |
6297 | if (object1->vo_size != object2->vo_size || |
6298 | object1->vo_size != transpose_size) { |
6299 | /* |
6300 | * If the 2 objects don't have the same size, we can't |
6301 | * exchange their backing stores or one would overflow. |
6302 | * If their size doesn't match the caller's |
6303 | * "transpose_size", we can't do it either because the |
6304 | * transpose operation will affect the entire span of |
6305 | * the objects. |
6306 | */ |
6307 | retval = KERN_INVALID_VALUE; |
6308 | goto done; |
6309 | } |
6310 | |
6311 | |
6312 | /* |
6313 | * Transpose the lists of resident pages. |
6314 | * This also updates the resident_page_count and the memq_hint. |
6315 | */ |
6316 | if (object1->phys_contiguous || vm_page_queue_empty(&object1->memq)) { |
6317 | /* |
6318 | * No pages in object1, just transfer pages |
6319 | * from object2 to object1. No need to go through |
6320 | * an intermediate object. |
6321 | */ |
6322 | while (!vm_page_queue_empty(&object2->memq)) { |
6323 | page = (vm_page_t) vm_page_queue_first(&object2->memq); |
6324 | vm_page_rename(page, new_object: object1, new_offset: page->vmp_offset); |
6325 | } |
6326 | assert(vm_page_queue_empty(&object2->memq)); |
6327 | } else if (object2->phys_contiguous || vm_page_queue_empty(&object2->memq)) { |
6328 | /* |
6329 | * No pages in object2, just transfer pages |
6330 | * from object1 to object2. No need to go through |
6331 | * an intermediate object. |
6332 | */ |
6333 | while (!vm_page_queue_empty(&object1->memq)) { |
6334 | page = (vm_page_t) vm_page_queue_first(&object1->memq); |
6335 | vm_page_rename(page, new_object: object2, new_offset: page->vmp_offset); |
6336 | } |
6337 | assert(vm_page_queue_empty(&object1->memq)); |
6338 | } else { |
6339 | /* transfer object1's pages to tmp_object */ |
6340 | while (!vm_page_queue_empty(&object1->memq)) { |
6341 | page = (vm_page_t) vm_page_queue_first(&object1->memq); |
6342 | page_offset = page->vmp_offset; |
6343 | vm_page_remove(page, TRUE); |
6344 | page->vmp_offset = page_offset; |
6345 | vm_page_queue_enter(&tmp_object->memq, page, vmp_listq); |
6346 | } |
6347 | assert(vm_page_queue_empty(&object1->memq)); |
6348 | /* transfer object2's pages to object1 */ |
6349 | while (!vm_page_queue_empty(&object2->memq)) { |
6350 | page = (vm_page_t) vm_page_queue_first(&object2->memq); |
6351 | vm_page_rename(page, new_object: object1, new_offset: page->vmp_offset); |
6352 | } |
6353 | assert(vm_page_queue_empty(&object2->memq)); |
6354 | /* transfer tmp_object's pages to object2 */ |
6355 | while (!vm_page_queue_empty(&tmp_object->memq)) { |
6356 | page = (vm_page_t) vm_page_queue_first(&tmp_object->memq); |
6357 | vm_page_queue_remove(&tmp_object->memq, page, vmp_listq); |
6358 | vm_page_insert(page, object: object2, offset: page->vmp_offset); |
6359 | } |
6360 | assert(vm_page_queue_empty(&tmp_object->memq)); |
6361 | } |
6362 | |
6363 | #define __TRANSPOSE_FIELD(field) \ |
6364 | MACRO_BEGIN \ |
6365 | tmp_object->field = object1->field; \ |
6366 | object1->field = object2->field; \ |
6367 | object2->field = tmp_object->field; \ |
6368 | MACRO_END |
6369 | |
6370 | /* "Lock" refers to the object not its contents */ |
6371 | /* "size" should be identical */ |
6372 | assert(object1->vo_size == object2->vo_size); |
6373 | /* "memq_hint" was updated above when transposing pages */ |
6374 | /* "ref_count" refers to the object not its contents */ |
6375 | assert(object1->ref_count >= 1); |
6376 | assert(object2->ref_count >= 1); |
6377 | /* "resident_page_count" was updated above when transposing pages */ |
6378 | /* "wired_page_count" was updated above when transposing pages */ |
6379 | #if !VM_TAG_ACTIVE_UPDATE |
6380 | /* "wired_objq" was dealt with along with "wired_page_count" */ |
6381 | #endif /* ! VM_TAG_ACTIVE_UPDATE */ |
6382 | /* "reusable_page_count" was updated above when transposing pages */ |
6383 | /* there should be no "copy" */ |
6384 | assert(!object1->vo_copy); |
6385 | assert(!object2->vo_copy); |
6386 | /* there should be no "shadow" */ |
6387 | assert(!object1->shadow); |
6388 | assert(!object2->shadow); |
6389 | __TRANSPOSE_FIELD(vo_shadow_offset); /* used by phys_contiguous objects */ |
6390 | __TRANSPOSE_FIELD(pager); |
6391 | __TRANSPOSE_FIELD(paging_offset); |
6392 | __TRANSPOSE_FIELD(pager_control); |
6393 | /* update the memory_objects' pointers back to the VM objects */ |
6394 | if (object1->pager_control != MEMORY_OBJECT_CONTROL_NULL) { |
6395 | memory_object_control_collapse(control: &object1->pager_control, |
6396 | object: object1); |
6397 | } |
6398 | if (object2->pager_control != MEMORY_OBJECT_CONTROL_NULL) { |
6399 | memory_object_control_collapse(control: &object2->pager_control, |
6400 | object: object2); |
6401 | } |
6402 | __TRANSPOSE_FIELD(copy_strategy); |
6403 | /* "paging_in_progress" refers to the object not its contents */ |
6404 | assert(!object1->paging_in_progress); |
6405 | assert(!object2->paging_in_progress); |
6406 | assert(object1->activity_in_progress); |
6407 | assert(object2->activity_in_progress); |
6408 | /* "all_wanted" refers to the object not its contents */ |
6409 | __TRANSPOSE_FIELD(pager_created); |
6410 | __TRANSPOSE_FIELD(pager_initialized); |
6411 | __TRANSPOSE_FIELD(pager_ready); |
6412 | __TRANSPOSE_FIELD(pager_trusted); |
6413 | __TRANSPOSE_FIELD(can_persist); |
6414 | __TRANSPOSE_FIELD(internal); |
6415 | __TRANSPOSE_FIELD(private); |
6416 | __TRANSPOSE_FIELD(pageout); |
6417 | /* "alive" should be set */ |
6418 | assert(object1->alive); |
6419 | assert(object2->alive); |
6420 | /* "purgeable" should be non-purgeable */ |
6421 | assert(object1->purgable == VM_PURGABLE_DENY); |
6422 | assert(object2->purgable == VM_PURGABLE_DENY); |
6423 | /* "shadowed" refers to the the object not its contents */ |
6424 | __TRANSPOSE_FIELD(purgeable_when_ripe); |
6425 | __TRANSPOSE_FIELD(true_share); |
6426 | /* "terminating" should not be set */ |
6427 | assert(!object1->terminating); |
6428 | assert(!object2->terminating); |
6429 | /* transfer "named" reference if needed */ |
6430 | if (object1->named && !object2->named) { |
6431 | assert(object1->ref_count >= 2); |
6432 | assert(object2->ref_count >= 1); |
6433 | object1->ref_count--; |
6434 | object2->ref_count++; |
6435 | } else if (!object1->named && object2->named) { |
6436 | assert(object1->ref_count >= 1); |
6437 | assert(object2->ref_count >= 2); |
6438 | object1->ref_count++; |
6439 | object2->ref_count--; |
6440 | } |
6441 | __TRANSPOSE_FIELD(named); |
6442 | /* "shadow_severed" refers to the object not its contents */ |
6443 | __TRANSPOSE_FIELD(phys_contiguous); |
6444 | __TRANSPOSE_FIELD(nophyscache); |
6445 | __TRANSPOSE_FIELD(no_pager_reason); |
6446 | /* "cached_list.next" points to transposed object */ |
6447 | object1->cached_list.next = (queue_entry_t) object2; |
6448 | object2->cached_list.next = (queue_entry_t) object1; |
6449 | /* "cached_list.prev" should be NULL */ |
6450 | assert(object1->cached_list.prev == NULL); |
6451 | assert(object2->cached_list.prev == NULL); |
6452 | __TRANSPOSE_FIELD(last_alloc); |
6453 | __TRANSPOSE_FIELD(sequential); |
6454 | __TRANSPOSE_FIELD(pages_created); |
6455 | __TRANSPOSE_FIELD(pages_used); |
6456 | __TRANSPOSE_FIELD(scan_collisions); |
6457 | __TRANSPOSE_FIELD(cow_hint); |
6458 | __TRANSPOSE_FIELD(wimg_bits); |
6459 | __TRANSPOSE_FIELD(set_cache_attr); |
6460 | __TRANSPOSE_FIELD(code_signed); |
6461 | object1->transposed = TRUE; |
6462 | object2->transposed = TRUE; |
6463 | __TRANSPOSE_FIELD(mapping_in_progress); |
6464 | __TRANSPOSE_FIELD(volatile_empty); |
6465 | __TRANSPOSE_FIELD(volatile_fault); |
6466 | __TRANSPOSE_FIELD(all_reusable); |
6467 | assert(object1->blocked_access); |
6468 | assert(object2->blocked_access); |
6469 | __TRANSPOSE_FIELD(set_cache_attr); |
6470 | assert(!object1->object_is_shared_cache); |
6471 | assert(!object2->object_is_shared_cache); |
6472 | /* ignore purgeable_queue_type and purgeable_queue_group */ |
6473 | assert(!object1->io_tracking); |
6474 | assert(!object2->io_tracking); |
6475 | #if VM_OBJECT_ACCESS_TRACKING |
6476 | assert(!object1->access_tracking); |
6477 | assert(!object2->access_tracking); |
6478 | #endif /* VM_OBJECT_ACCESS_TRACKING */ |
6479 | __TRANSPOSE_FIELD(no_tag_update); |
6480 | #if CONFIG_SECLUDED_MEMORY |
6481 | assert(!object1->eligible_for_secluded); |
6482 | assert(!object2->eligible_for_secluded); |
6483 | assert(!object1->can_grab_secluded); |
6484 | assert(!object2->can_grab_secluded); |
6485 | #else /* CONFIG_SECLUDED_MEMORY */ |
6486 | assert(object1->__object3_unused_bits == 0); |
6487 | assert(object2->__object3_unused_bits == 0); |
6488 | #endif /* CONFIG_SECLUDED_MEMORY */ |
6489 | #if UPL_DEBUG |
6490 | /* "uplq" refers to the object not its contents (see upl_transpose()) */ |
6491 | #endif |
6492 | assert((object1->purgable == VM_PURGABLE_DENY) || (object1->objq.next == NULL)); |
6493 | assert((object1->purgable == VM_PURGABLE_DENY) || (object1->objq.prev == NULL)); |
6494 | assert((object2->purgable == VM_PURGABLE_DENY) || (object2->objq.next == NULL)); |
6495 | assert((object2->purgable == VM_PURGABLE_DENY) || (object2->objq.prev == NULL)); |
6496 | |
6497 | #undef __TRANSPOSE_FIELD |
6498 | |
6499 | retval = KERN_SUCCESS; |
6500 | |
6501 | done: |
6502 | /* |
6503 | * Cleanup. |
6504 | */ |
6505 | if (tmp_object != VM_OBJECT_NULL) { |
6506 | vm_object_unlock(tmp_object); |
6507 | /* |
6508 | * Re-initialize the temporary object to avoid |
6509 | * deallocating a real pager. |
6510 | */ |
6511 | _vm_object_allocate(size: transpose_size, object: tmp_object); |
6512 | vm_object_deallocate(object: tmp_object); |
6513 | tmp_object = VM_OBJECT_NULL; |
6514 | } |
6515 | |
6516 | if (object1_locked) { |
6517 | vm_object_unlock(object1); |
6518 | object1_locked = FALSE; |
6519 | } |
6520 | if (object2_locked) { |
6521 | vm_object_unlock(object2); |
6522 | object2_locked = FALSE; |
6523 | } |
6524 | |
6525 | vm_object_transpose_count++; |
6526 | |
6527 | return retval; |
6528 | } |
6529 | |
6530 | |
6531 | /* |
6532 | * vm_object_cluster_size |
6533 | * |
6534 | * Determine how big a cluster we should issue an I/O for... |
6535 | * |
6536 | * Inputs: *start == offset of page needed |
6537 | * *length == maximum cluster pager can handle |
6538 | * Outputs: *start == beginning offset of cluster |
6539 | * *length == length of cluster to try |
6540 | * |
6541 | * The original *start will be encompassed by the cluster |
6542 | * |
6543 | */ |
6544 | extern int speculative_reads_disabled; |
6545 | |
6546 | /* |
6547 | * Try to always keep these values an even multiple of PAGE_SIZE. We use these values |
6548 | * to derive min_ph_bytes and max_ph_bytes (IMP: bytes not # of pages) and expect those values to |
6549 | * always be page-aligned. The derivation could involve operations (e.g. division) |
6550 | * that could give us non-page-size aligned values if we start out with values that |
6551 | * are odd multiples of PAGE_SIZE. |
6552 | */ |
6553 | #if !XNU_TARGET_OS_OSX |
6554 | unsigned int preheat_max_bytes = (1024 * 512); |
6555 | #else /* !XNU_TARGET_OS_OSX */ |
6556 | unsigned int preheat_max_bytes = MAX_UPL_TRANSFER_BYTES; |
6557 | #endif /* !XNU_TARGET_OS_OSX */ |
6558 | unsigned int preheat_min_bytes = (1024 * 32); |
6559 | |
6560 | |
6561 | __private_extern__ void |
6562 | vm_object_cluster_size(vm_object_t object, vm_object_offset_t *start, |
6563 | vm_size_t *length, vm_object_fault_info_t fault_info, uint32_t *io_streaming) |
6564 | { |
6565 | vm_size_t pre_heat_size; |
6566 | vm_size_t tail_size; |
6567 | vm_size_t head_size; |
6568 | vm_size_t max_length; |
6569 | vm_size_t cluster_size; |
6570 | vm_object_offset_t object_size; |
6571 | vm_object_offset_t orig_start; |
6572 | vm_object_offset_t target_start; |
6573 | vm_object_offset_t offset; |
6574 | vm_behavior_t behavior; |
6575 | boolean_t look_behind = TRUE; |
6576 | boolean_t look_ahead = TRUE; |
6577 | boolean_t isSSD = FALSE; |
6578 | uint32_t throttle_limit; |
6579 | int sequential_run; |
6580 | int sequential_behavior = VM_BEHAVIOR_SEQUENTIAL; |
6581 | vm_size_t max_ph_size; |
6582 | vm_size_t min_ph_size; |
6583 | |
6584 | assert( !(*length & PAGE_MASK)); |
6585 | assert( !(*start & PAGE_MASK_64)); |
6586 | |
6587 | /* |
6588 | * remember maxiumum length of run requested |
6589 | */ |
6590 | max_length = *length; |
6591 | /* |
6592 | * we'll always return a cluster size of at least |
6593 | * 1 page, since the original fault must always |
6594 | * be processed |
6595 | */ |
6596 | *length = PAGE_SIZE; |
6597 | *io_streaming = 0; |
6598 | |
6599 | if (speculative_reads_disabled || fault_info == NULL) { |
6600 | /* |
6601 | * no cluster... just fault the page in |
6602 | */ |
6603 | return; |
6604 | } |
6605 | orig_start = *start; |
6606 | target_start = orig_start; |
6607 | cluster_size = round_page(x: fault_info->cluster_size); |
6608 | behavior = fault_info->behavior; |
6609 | |
6610 | vm_object_lock(object); |
6611 | |
6612 | if (object->pager == MEMORY_OBJECT_NULL) { |
6613 | goto out; /* pager is gone for this object, nothing more to do */ |
6614 | } |
6615 | vnode_pager_get_isSSD(object->pager, &isSSD); |
6616 | |
6617 | min_ph_size = round_page(x: preheat_min_bytes); |
6618 | max_ph_size = round_page(x: preheat_max_bytes); |
6619 | |
6620 | #if XNU_TARGET_OS_OSX |
6621 | if (isSSD) { |
6622 | min_ph_size /= 2; |
6623 | max_ph_size /= 8; |
6624 | |
6625 | if (min_ph_size & PAGE_MASK_64) { |
6626 | min_ph_size = trunc_page(min_ph_size); |
6627 | } |
6628 | |
6629 | if (max_ph_size & PAGE_MASK_64) { |
6630 | max_ph_size = trunc_page(max_ph_size); |
6631 | } |
6632 | } |
6633 | #endif /* XNU_TARGET_OS_OSX */ |
6634 | |
6635 | if (min_ph_size < PAGE_SIZE) { |
6636 | min_ph_size = PAGE_SIZE; |
6637 | } |
6638 | |
6639 | if (max_ph_size < PAGE_SIZE) { |
6640 | max_ph_size = PAGE_SIZE; |
6641 | } else if (max_ph_size > MAX_UPL_TRANSFER_BYTES) { |
6642 | max_ph_size = MAX_UPL_TRANSFER_BYTES; |
6643 | } |
6644 | |
6645 | if (max_length > max_ph_size) { |
6646 | max_length = max_ph_size; |
6647 | } |
6648 | |
6649 | if (max_length <= PAGE_SIZE) { |
6650 | goto out; |
6651 | } |
6652 | |
6653 | if (object->internal) { |
6654 | object_size = object->vo_size; |
6655 | } else { |
6656 | vnode_pager_get_object_size(object->pager, &object_size); |
6657 | } |
6658 | |
6659 | object_size = round_page_64(x: object_size); |
6660 | |
6661 | if (orig_start >= object_size) { |
6662 | /* |
6663 | * fault occurred beyond the EOF... |
6664 | * we need to punt w/o changing the |
6665 | * starting offset |
6666 | */ |
6667 | goto out; |
6668 | } |
6669 | if (object->pages_used > object->pages_created) { |
6670 | /* |
6671 | * must have wrapped our 32 bit counters |
6672 | * so reset |
6673 | */ |
6674 | object->pages_used = object->pages_created = 0; |
6675 | } |
6676 | if ((sequential_run = object->sequential)) { |
6677 | if (sequential_run < 0) { |
6678 | sequential_behavior = VM_BEHAVIOR_RSEQNTL; |
6679 | sequential_run = 0 - sequential_run; |
6680 | } else { |
6681 | sequential_behavior = VM_BEHAVIOR_SEQUENTIAL; |
6682 | } |
6683 | } |
6684 | switch (behavior) { |
6685 | default: |
6686 | behavior = VM_BEHAVIOR_DEFAULT; |
6687 | OS_FALLTHROUGH; |
6688 | |
6689 | case VM_BEHAVIOR_DEFAULT: |
6690 | if (object->internal && fault_info->user_tag == VM_MEMORY_STACK) { |
6691 | goto out; |
6692 | } |
6693 | |
6694 | if (sequential_run >= (3 * PAGE_SIZE)) { |
6695 | pre_heat_size = sequential_run + PAGE_SIZE; |
6696 | |
6697 | if (sequential_behavior == VM_BEHAVIOR_SEQUENTIAL) { |
6698 | look_behind = FALSE; |
6699 | } else { |
6700 | look_ahead = FALSE; |
6701 | } |
6702 | |
6703 | *io_streaming = 1; |
6704 | } else { |
6705 | if (object->pages_created < (20 * (min_ph_size >> PAGE_SHIFT))) { |
6706 | /* |
6707 | * prime the pump |
6708 | */ |
6709 | pre_heat_size = min_ph_size; |
6710 | } else { |
6711 | /* |
6712 | * Linear growth in PH size: The maximum size is max_length... |
6713 | * this cacluation will result in a size that is neither a |
6714 | * power of 2 nor a multiple of PAGE_SIZE... so round |
6715 | * it up to the nearest PAGE_SIZE boundary |
6716 | */ |
6717 | pre_heat_size = (max_length * (uint64_t)object->pages_used) / object->pages_created; |
6718 | |
6719 | if (pre_heat_size < min_ph_size) { |
6720 | pre_heat_size = min_ph_size; |
6721 | } else { |
6722 | pre_heat_size = round_page(x: pre_heat_size); |
6723 | } |
6724 | } |
6725 | } |
6726 | break; |
6727 | |
6728 | case VM_BEHAVIOR_RANDOM: |
6729 | if ((pre_heat_size = cluster_size) <= PAGE_SIZE) { |
6730 | goto out; |
6731 | } |
6732 | break; |
6733 | |
6734 | case VM_BEHAVIOR_SEQUENTIAL: |
6735 | if ((pre_heat_size = cluster_size) == 0) { |
6736 | pre_heat_size = sequential_run + PAGE_SIZE; |
6737 | } |
6738 | look_behind = FALSE; |
6739 | *io_streaming = 1; |
6740 | |
6741 | break; |
6742 | |
6743 | case VM_BEHAVIOR_RSEQNTL: |
6744 | if ((pre_heat_size = cluster_size) == 0) { |
6745 | pre_heat_size = sequential_run + PAGE_SIZE; |
6746 | } |
6747 | look_ahead = FALSE; |
6748 | *io_streaming = 1; |
6749 | |
6750 | break; |
6751 | } |
6752 | throttle_limit = (uint32_t) max_length; |
6753 | assert(throttle_limit == max_length); |
6754 | |
6755 | if (vnode_pager_get_throttle_io_limit(object->pager, &throttle_limit) == KERN_SUCCESS) { |
6756 | if (max_length > throttle_limit) { |
6757 | max_length = throttle_limit; |
6758 | } |
6759 | } |
6760 | if (pre_heat_size > max_length) { |
6761 | pre_heat_size = max_length; |
6762 | } |
6763 | |
6764 | if (behavior == VM_BEHAVIOR_DEFAULT && (pre_heat_size > min_ph_size)) { |
6765 | unsigned int consider_free = vm_page_free_count + vm_page_cleaned_count; |
6766 | |
6767 | if (consider_free < vm_page_throttle_limit) { |
6768 | pre_heat_size = trunc_page(pre_heat_size / 16); |
6769 | } else if (consider_free < vm_page_free_target) { |
6770 | pre_heat_size = trunc_page(pre_heat_size / 4); |
6771 | } |
6772 | |
6773 | if (pre_heat_size < min_ph_size) { |
6774 | pre_heat_size = min_ph_size; |
6775 | } |
6776 | } |
6777 | if (look_ahead == TRUE) { |
6778 | if (look_behind == TRUE) { |
6779 | /* |
6780 | * if we get here its due to a random access... |
6781 | * so we want to center the original fault address |
6782 | * within the cluster we will issue... make sure |
6783 | * to calculate 'head_size' as a multiple of PAGE_SIZE... |
6784 | * 'pre_heat_size' is a multiple of PAGE_SIZE but not |
6785 | * necessarily an even number of pages so we need to truncate |
6786 | * the result to a PAGE_SIZE boundary |
6787 | */ |
6788 | head_size = trunc_page(pre_heat_size / 2); |
6789 | |
6790 | if (target_start > head_size) { |
6791 | target_start -= head_size; |
6792 | } else { |
6793 | target_start = 0; |
6794 | } |
6795 | |
6796 | /* |
6797 | * 'target_start' at this point represents the beginning offset |
6798 | * of the cluster we are considering... 'orig_start' will be in |
6799 | * the center of this cluster if we didn't have to clip the start |
6800 | * due to running into the start of the file |
6801 | */ |
6802 | } |
6803 | if ((target_start + pre_heat_size) > object_size) { |
6804 | pre_heat_size = (vm_size_t)(round_page_64(x: object_size - target_start)); |
6805 | } |
6806 | /* |
6807 | * at this point caclulate the number of pages beyond the original fault |
6808 | * address that we want to consider... this is guaranteed not to extend beyond |
6809 | * the current EOF... |
6810 | */ |
6811 | assert((vm_size_t)(orig_start - target_start) == (orig_start - target_start)); |
6812 | tail_size = pre_heat_size - (vm_size_t)(orig_start - target_start) - PAGE_SIZE; |
6813 | } else { |
6814 | if (pre_heat_size > target_start) { |
6815 | /* |
6816 | * since pre_heat_size is always smaller then 2^32, |
6817 | * if it is larger then target_start (a 64 bit value) |
6818 | * it is safe to clip target_start to 32 bits |
6819 | */ |
6820 | pre_heat_size = (vm_size_t) target_start; |
6821 | } |
6822 | tail_size = 0; |
6823 | } |
6824 | assert( !(target_start & PAGE_MASK_64)); |
6825 | assert( !(pre_heat_size & PAGE_MASK_64)); |
6826 | |
6827 | if (pre_heat_size <= PAGE_SIZE) { |
6828 | goto out; |
6829 | } |
6830 | |
6831 | if (look_behind == TRUE) { |
6832 | /* |
6833 | * take a look at the pages before the original |
6834 | * faulting offset... recalculate this in case |
6835 | * we had to clip 'pre_heat_size' above to keep |
6836 | * from running past the EOF. |
6837 | */ |
6838 | head_size = pre_heat_size - tail_size - PAGE_SIZE; |
6839 | |
6840 | for (offset = orig_start - PAGE_SIZE_64; head_size; offset -= PAGE_SIZE_64, head_size -= PAGE_SIZE) { |
6841 | /* |
6842 | * don't poke below the lowest offset |
6843 | */ |
6844 | if (offset < fault_info->lo_offset) { |
6845 | break; |
6846 | } |
6847 | /* |
6848 | * for external objects or internal objects w/o a pager, |
6849 | * VM_COMPRESSOR_PAGER_STATE_GET will return VM_EXTERNAL_STATE_UNKNOWN |
6850 | */ |
6851 | if (VM_COMPRESSOR_PAGER_STATE_GET(object, offset) == VM_EXTERNAL_STATE_ABSENT) { |
6852 | break; |
6853 | } |
6854 | if (vm_page_lookup(object, offset) != VM_PAGE_NULL) { |
6855 | /* |
6856 | * don't bridge resident pages |
6857 | */ |
6858 | break; |
6859 | } |
6860 | *start = offset; |
6861 | *length += PAGE_SIZE; |
6862 | } |
6863 | } |
6864 | if (look_ahead == TRUE) { |
6865 | for (offset = orig_start + PAGE_SIZE_64; tail_size; offset += PAGE_SIZE_64, tail_size -= PAGE_SIZE) { |
6866 | /* |
6867 | * don't poke above the highest offset |
6868 | */ |
6869 | if (offset >= fault_info->hi_offset) { |
6870 | break; |
6871 | } |
6872 | assert(offset < object_size); |
6873 | |
6874 | /* |
6875 | * for external objects or internal objects w/o a pager, |
6876 | * VM_COMPRESSOR_PAGER_STATE_GET will return VM_EXTERNAL_STATE_UNKNOWN |
6877 | */ |
6878 | if (VM_COMPRESSOR_PAGER_STATE_GET(object, offset) == VM_EXTERNAL_STATE_ABSENT) { |
6879 | break; |
6880 | } |
6881 | if (vm_page_lookup(object, offset) != VM_PAGE_NULL) { |
6882 | /* |
6883 | * don't bridge resident pages |
6884 | */ |
6885 | break; |
6886 | } |
6887 | *length += PAGE_SIZE; |
6888 | } |
6889 | } |
6890 | out: |
6891 | if (*length > max_length) { |
6892 | *length = max_length; |
6893 | } |
6894 | |
6895 | vm_object_unlock(object); |
6896 | |
6897 | DTRACE_VM1(clustersize, vm_size_t, *length); |
6898 | } |
6899 | |
6900 | |
6901 | /* |
6902 | * Allow manipulation of individual page state. This is actually part of |
6903 | * the UPL regimen but takes place on the VM object rather than on a UPL |
6904 | */ |
6905 | |
6906 | kern_return_t |
6907 | vm_object_page_op( |
6908 | vm_object_t object, |
6909 | vm_object_offset_t offset, |
6910 | int ops, |
6911 | ppnum_t *phys_entry, |
6912 | int *flags) |
6913 | { |
6914 | vm_page_t dst_page; |
6915 | |
6916 | vm_object_lock(object); |
6917 | |
6918 | if (ops & UPL_POP_PHYSICAL) { |
6919 | if (object->phys_contiguous) { |
6920 | if (phys_entry) { |
6921 | *phys_entry = (ppnum_t) |
6922 | (object->vo_shadow_offset >> PAGE_SHIFT); |
6923 | } |
6924 | vm_object_unlock(object); |
6925 | return KERN_SUCCESS; |
6926 | } else { |
6927 | vm_object_unlock(object); |
6928 | return KERN_INVALID_OBJECT; |
6929 | } |
6930 | } |
6931 | if (object->phys_contiguous) { |
6932 | vm_object_unlock(object); |
6933 | return KERN_INVALID_OBJECT; |
6934 | } |
6935 | |
6936 | while (TRUE) { |
6937 | if ((dst_page = vm_page_lookup(object, offset)) == VM_PAGE_NULL) { |
6938 | vm_object_unlock(object); |
6939 | return KERN_FAILURE; |
6940 | } |
6941 | |
6942 | /* Sync up on getting the busy bit */ |
6943 | if ((dst_page->vmp_busy || dst_page->vmp_cleaning) && |
6944 | (((ops & UPL_POP_SET) && |
6945 | (ops & UPL_POP_BUSY)) || (ops & UPL_POP_DUMP))) { |
6946 | /* someone else is playing with the page, we will */ |
6947 | /* have to wait */ |
6948 | PAGE_SLEEP(object, dst_page, THREAD_UNINT); |
6949 | continue; |
6950 | } |
6951 | |
6952 | if (ops & UPL_POP_DUMP) { |
6953 | if (dst_page->vmp_pmapped == TRUE) { |
6954 | pmap_disconnect(phys: VM_PAGE_GET_PHYS_PAGE(m: dst_page)); |
6955 | } |
6956 | |
6957 | VM_PAGE_FREE(dst_page); |
6958 | break; |
6959 | } |
6960 | |
6961 | if (flags) { |
6962 | *flags = 0; |
6963 | |
6964 | /* Get the condition of flags before requested ops */ |
6965 | /* are undertaken */ |
6966 | |
6967 | if (dst_page->vmp_dirty) { |
6968 | *flags |= UPL_POP_DIRTY; |
6969 | } |
6970 | if (dst_page->vmp_free_when_done) { |
6971 | *flags |= UPL_POP_PAGEOUT; |
6972 | } |
6973 | if (dst_page->vmp_precious) { |
6974 | *flags |= UPL_POP_PRECIOUS; |
6975 | } |
6976 | if (dst_page->vmp_absent) { |
6977 | *flags |= UPL_POP_ABSENT; |
6978 | } |
6979 | if (dst_page->vmp_busy) { |
6980 | *flags |= UPL_POP_BUSY; |
6981 | } |
6982 | } |
6983 | |
6984 | /* The caller should have made a call either contingent with */ |
6985 | /* or prior to this call to set UPL_POP_BUSY */ |
6986 | if (ops & UPL_POP_SET) { |
6987 | /* The protection granted with this assert will */ |
6988 | /* not be complete. If the caller violates the */ |
6989 | /* convention and attempts to change page state */ |
6990 | /* without first setting busy we may not see it */ |
6991 | /* because the page may already be busy. However */ |
6992 | /* if such violations occur we will assert sooner */ |
6993 | /* or later. */ |
6994 | assert(dst_page->vmp_busy || (ops & UPL_POP_BUSY)); |
6995 | if (ops & UPL_POP_DIRTY) { |
6996 | SET_PAGE_DIRTY(dst_page, FALSE); |
6997 | } |
6998 | if (ops & UPL_POP_PAGEOUT) { |
6999 | dst_page->vmp_free_when_done = TRUE; |
7000 | } |
7001 | if (ops & UPL_POP_PRECIOUS) { |
7002 | dst_page->vmp_precious = TRUE; |
7003 | } |
7004 | if (ops & UPL_POP_ABSENT) { |
7005 | dst_page->vmp_absent = TRUE; |
7006 | } |
7007 | if (ops & UPL_POP_BUSY) { |
7008 | dst_page->vmp_busy = TRUE; |
7009 | } |
7010 | } |
7011 | |
7012 | if (ops & UPL_POP_CLR) { |
7013 | assert(dst_page->vmp_busy); |
7014 | if (ops & UPL_POP_DIRTY) { |
7015 | dst_page->vmp_dirty = FALSE; |
7016 | } |
7017 | if (ops & UPL_POP_PAGEOUT) { |
7018 | dst_page->vmp_free_when_done = FALSE; |
7019 | } |
7020 | if (ops & UPL_POP_PRECIOUS) { |
7021 | dst_page->vmp_precious = FALSE; |
7022 | } |
7023 | if (ops & UPL_POP_ABSENT) { |
7024 | dst_page->vmp_absent = FALSE; |
7025 | } |
7026 | if (ops & UPL_POP_BUSY) { |
7027 | dst_page->vmp_busy = FALSE; |
7028 | PAGE_WAKEUP(dst_page); |
7029 | } |
7030 | } |
7031 | if (phys_entry) { |
7032 | /* |
7033 | * The physical page number will remain valid |
7034 | * only if the page is kept busy. |
7035 | */ |
7036 | assert(dst_page->vmp_busy); |
7037 | *phys_entry = VM_PAGE_GET_PHYS_PAGE(m: dst_page); |
7038 | } |
7039 | |
7040 | break; |
7041 | } |
7042 | |
7043 | vm_object_unlock(object); |
7044 | return KERN_SUCCESS; |
7045 | } |
7046 | |
7047 | /* |
7048 | * vm_object_range_op offers performance enhancement over |
7049 | * vm_object_page_op for page_op functions which do not require page |
7050 | * level state to be returned from the call. Page_op was created to provide |
7051 | * a low-cost alternative to page manipulation via UPLs when only a single |
7052 | * page was involved. The range_op call establishes the ability in the _op |
7053 | * family of functions to work on multiple pages where the lack of page level |
7054 | * state handling allows the caller to avoid the overhead of the upl structures. |
7055 | */ |
7056 | |
7057 | kern_return_t |
7058 | vm_object_range_op( |
7059 | vm_object_t object, |
7060 | vm_object_offset_t offset_beg, |
7061 | vm_object_offset_t offset_end, |
7062 | int ops, |
7063 | uint32_t *range) |
7064 | { |
7065 | vm_object_offset_t offset; |
7066 | vm_page_t dst_page; |
7067 | |
7068 | if (offset_end - offset_beg > (uint32_t) -1) { |
7069 | /* range is too big and would overflow "*range" */ |
7070 | return KERN_INVALID_ARGUMENT; |
7071 | } |
7072 | if (object->resident_page_count == 0) { |
7073 | if (range) { |
7074 | if (ops & UPL_ROP_PRESENT) { |
7075 | *range = 0; |
7076 | } else { |
7077 | *range = (uint32_t) (offset_end - offset_beg); |
7078 | assert(*range == (offset_end - offset_beg)); |
7079 | } |
7080 | } |
7081 | return KERN_SUCCESS; |
7082 | } |
7083 | vm_object_lock(object); |
7084 | |
7085 | if (object->phys_contiguous) { |
7086 | vm_object_unlock(object); |
7087 | return KERN_INVALID_OBJECT; |
7088 | } |
7089 | |
7090 | offset = offset_beg & ~PAGE_MASK_64; |
7091 | |
7092 | while (offset < offset_end) { |
7093 | dst_page = vm_page_lookup(object, offset); |
7094 | if (dst_page != VM_PAGE_NULL) { |
7095 | if (ops & UPL_ROP_DUMP) { |
7096 | if (dst_page->vmp_busy || dst_page->vmp_cleaning) { |
7097 | /* |
7098 | * someone else is playing with the |
7099 | * page, we will have to wait |
7100 | */ |
7101 | PAGE_SLEEP(object, dst_page, THREAD_UNINT); |
7102 | /* |
7103 | * need to relook the page up since it's |
7104 | * state may have changed while we slept |
7105 | * it might even belong to a different object |
7106 | * at this point |
7107 | */ |
7108 | continue; |
7109 | } |
7110 | if (dst_page->vmp_laundry) { |
7111 | vm_pageout_steal_laundry(page: dst_page, FALSE); |
7112 | } |
7113 | |
7114 | if (dst_page->vmp_pmapped == TRUE) { |
7115 | pmap_disconnect(phys: VM_PAGE_GET_PHYS_PAGE(m: dst_page)); |
7116 | } |
7117 | |
7118 | VM_PAGE_FREE(dst_page); |
7119 | } else if ((ops & UPL_ROP_ABSENT) |
7120 | && (!dst_page->vmp_absent || dst_page->vmp_busy)) { |
7121 | break; |
7122 | } |
7123 | } else if (ops & UPL_ROP_PRESENT) { |
7124 | break; |
7125 | } |
7126 | |
7127 | offset += PAGE_SIZE; |
7128 | } |
7129 | vm_object_unlock(object); |
7130 | |
7131 | if (range) { |
7132 | if (offset > offset_end) { |
7133 | offset = offset_end; |
7134 | } |
7135 | if (offset > offset_beg) { |
7136 | *range = (uint32_t) (offset - offset_beg); |
7137 | assert(*range == (offset - offset_beg)); |
7138 | } else { |
7139 | *range = 0; |
7140 | } |
7141 | } |
7142 | return KERN_SUCCESS; |
7143 | } |
7144 | |
7145 | /* |
7146 | * Used to point a pager directly to a range of memory (when the pager may be associated |
7147 | * with a non-device vnode). Takes a virtual address, an offset, and a size. We currently |
7148 | * expect that the virtual address will denote the start of a range that is physically contiguous. |
7149 | */ |
7150 | kern_return_t |
7151 | ( |
7152 | memory_object_control_t object, |
7153 | memory_object_offset_t offset, |
7154 | addr64_t base_vaddr, |
7155 | vm_size_t size) |
7156 | { |
7157 | ppnum_t page_num; |
7158 | boolean_t clobbered_private; |
7159 | kern_return_t retval; |
7160 | vm_object_t ; |
7161 | |
7162 | page_num = pmap_find_phys(map: kernel_pmap, va: base_vaddr); |
7163 | |
7164 | if (!page_num) { |
7165 | retval = KERN_FAILURE; |
7166 | goto out; |
7167 | } |
7168 | |
7169 | pager_object = memory_object_control_to_vm_object(control: object); |
7170 | |
7171 | if (!pager_object) { |
7172 | retval = KERN_FAILURE; |
7173 | goto out; |
7174 | } |
7175 | |
7176 | clobbered_private = pager_object->private; |
7177 | if (pager_object->private != TRUE) { |
7178 | vm_object_lock(pager_object); |
7179 | VM_OBJECT_SET_PRIVATE(object: pager_object, TRUE); |
7180 | vm_object_unlock(pager_object); |
7181 | } |
7182 | retval = vm_object_populate_with_private(object: pager_object, offset, phys_page: page_num, size); |
7183 | |
7184 | if (retval != KERN_SUCCESS) { |
7185 | if (pager_object->private != clobbered_private) { |
7186 | vm_object_lock(pager_object); |
7187 | VM_OBJECT_SET_PRIVATE(object: pager_object, value: clobbered_private); |
7188 | vm_object_unlock(pager_object); |
7189 | } |
7190 | } |
7191 | |
7192 | out: |
7193 | return retval; |
7194 | } |
7195 | |
7196 | uint32_t scan_object_collision = 0; |
7197 | |
7198 | void |
7199 | vm_object_lock(vm_object_t object) |
7200 | { |
7201 | if (object == vm_pageout_scan_wants_object) { |
7202 | scan_object_collision++; |
7203 | mutex_pause(2); |
7204 | } |
7205 | DTRACE_VM(vm_object_lock_w); |
7206 | lck_rw_lock_exclusive(lck: &object->Lock); |
7207 | } |
7208 | |
7209 | boolean_t |
7210 | vm_object_lock_avoid(vm_object_t object) |
7211 | { |
7212 | if (object == vm_pageout_scan_wants_object) { |
7213 | scan_object_collision++; |
7214 | return TRUE; |
7215 | } |
7216 | return FALSE; |
7217 | } |
7218 | |
7219 | boolean_t |
7220 | _vm_object_lock_try(vm_object_t object) |
7221 | { |
7222 | boolean_t retval; |
7223 | |
7224 | retval = lck_rw_try_lock_exclusive(lck: &object->Lock); |
7225 | #if DEVELOPMENT || DEBUG |
7226 | if (retval == TRUE) { |
7227 | DTRACE_VM(vm_object_lock_w); |
7228 | } |
7229 | #endif |
7230 | return retval; |
7231 | } |
7232 | |
7233 | boolean_t |
7234 | vm_object_lock_try(vm_object_t object) |
7235 | { |
7236 | /* |
7237 | * Called from hibernate path so check before blocking. |
7238 | */ |
7239 | if (vm_object_lock_avoid(object) && ml_get_interrupts_enabled() && get_preemption_level() == 0) { |
7240 | mutex_pause(2); |
7241 | } |
7242 | return _vm_object_lock_try(object); |
7243 | } |
7244 | |
7245 | /* |
7246 | * Lock the object exclusive. |
7247 | * |
7248 | * Returns true iff the thread had to spin or block before |
7249 | * acquiring the lock. |
7250 | */ |
7251 | bool |
7252 | vm_object_lock_check_contended(vm_object_t object) |
7253 | { |
7254 | if (object == vm_pageout_scan_wants_object) { |
7255 | scan_object_collision++; |
7256 | mutex_pause(2); |
7257 | } |
7258 | DTRACE_VM(vm_object_lock_w); |
7259 | return lck_rw_lock_exclusive_check_contended(lck: &object->Lock); |
7260 | } |
7261 | |
7262 | void |
7263 | vm_object_lock_shared(vm_object_t object) |
7264 | { |
7265 | if (vm_object_lock_avoid(object)) { |
7266 | mutex_pause(2); |
7267 | } |
7268 | DTRACE_VM(vm_object_lock_r); |
7269 | lck_rw_lock_shared(lck: &object->Lock); |
7270 | } |
7271 | |
7272 | boolean_t |
7273 | vm_object_lock_yield_shared(vm_object_t object) |
7274 | { |
7275 | boolean_t retval = FALSE, force_yield = FALSE; |
7276 | |
7277 | vm_object_lock_assert_shared(object); |
7278 | |
7279 | force_yield = vm_object_lock_avoid(object); |
7280 | |
7281 | retval = lck_rw_lock_yield_shared(lck: &object->Lock, force_yield); |
7282 | if (retval) { |
7283 | DTRACE_VM(vm_object_lock_yield); |
7284 | } |
7285 | |
7286 | return retval; |
7287 | } |
7288 | |
7289 | boolean_t |
7290 | vm_object_lock_try_shared(vm_object_t object) |
7291 | { |
7292 | boolean_t retval; |
7293 | |
7294 | if (vm_object_lock_avoid(object)) { |
7295 | mutex_pause(2); |
7296 | } |
7297 | retval = lck_rw_try_lock_shared(lck: &object->Lock); |
7298 | if (retval) { |
7299 | DTRACE_VM(vm_object_lock_r); |
7300 | } |
7301 | return retval; |
7302 | } |
7303 | |
7304 | boolean_t |
7305 | vm_object_lock_upgrade(vm_object_t object) |
7306 | { |
7307 | boolean_t retval; |
7308 | |
7309 | retval = lck_rw_lock_shared_to_exclusive(lck: &object->Lock); |
7310 | #if DEVELOPMENT || DEBUG |
7311 | if (retval == TRUE) { |
7312 | DTRACE_VM(vm_object_lock_w); |
7313 | } |
7314 | #endif |
7315 | return retval; |
7316 | } |
7317 | |
7318 | void |
7319 | vm_object_unlock(vm_object_t object) |
7320 | { |
7321 | #if DEVELOPMENT || DEBUG |
7322 | DTRACE_VM(vm_object_unlock); |
7323 | #endif |
7324 | lck_rw_done(lck: &object->Lock); |
7325 | } |
7326 | |
7327 | |
7328 | unsigned int vm_object_change_wimg_mode_count = 0; |
7329 | |
7330 | /* |
7331 | * The object must be locked |
7332 | */ |
7333 | void |
7334 | vm_object_change_wimg_mode(vm_object_t object, unsigned int wimg_mode) |
7335 | { |
7336 | vm_page_t p; |
7337 | |
7338 | vm_object_lock_assert_exclusive(object); |
7339 | |
7340 | vm_object_paging_only_wait(object, THREAD_UNINT); |
7341 | |
7342 | vm_page_queue_iterate(&object->memq, p, vmp_listq) { |
7343 | if (!p->vmp_fictitious) { |
7344 | pmap_set_cache_attributes(VM_PAGE_GET_PHYS_PAGE(m: p), wimg_mode); |
7345 | } |
7346 | } |
7347 | if (wimg_mode == VM_WIMG_USE_DEFAULT) { |
7348 | object->set_cache_attr = FALSE; |
7349 | } else { |
7350 | object->set_cache_attr = TRUE; |
7351 | } |
7352 | |
7353 | object->wimg_bits = wimg_mode; |
7354 | |
7355 | vm_object_change_wimg_mode_count++; |
7356 | } |
7357 | |
7358 | #if CONFIG_FREEZE |
7359 | |
7360 | extern struct freezer_context freezer_context_global; |
7361 | |
7362 | /* |
7363 | * This routine does the "relocation" of previously |
7364 | * compressed pages belonging to this object that are |
7365 | * residing in a number of compressed segments into |
7366 | * a set of compressed segments dedicated to hold |
7367 | * compressed pages belonging to this object. |
7368 | */ |
7369 | |
7370 | extern AbsoluteTime c_freezer_last_yield_ts; |
7371 | |
7372 | #define MAX_FREE_BATCH 32 |
7373 | #define FREEZER_DUTY_CYCLE_ON_MS 5 |
7374 | #define FREEZER_DUTY_CYCLE_OFF_MS 5 |
7375 | |
7376 | static int c_freezer_should_yield(void); |
7377 | |
7378 | |
7379 | static int |
7380 | c_freezer_should_yield() |
7381 | { |
7382 | AbsoluteTime cur_time; |
7383 | uint64_t nsecs; |
7384 | |
7385 | assert(c_freezer_last_yield_ts); |
7386 | clock_get_uptime(&cur_time); |
7387 | |
7388 | SUB_ABSOLUTETIME(&cur_time, &c_freezer_last_yield_ts); |
7389 | absolutetime_to_nanoseconds(cur_time, &nsecs); |
7390 | |
7391 | if (nsecs > 1000 * 1000 * FREEZER_DUTY_CYCLE_ON_MS) { |
7392 | return 1; |
7393 | } |
7394 | return 0; |
7395 | } |
7396 | |
7397 | |
7398 | void |
7399 | vm_object_compressed_freezer_done() |
7400 | { |
7401 | vm_compressor_finished_filling( &(freezer_context_global.freezer_ctx_chead)); |
7402 | } |
7403 | |
7404 | |
7405 | uint32_t |
7406 | vm_object_compressed_freezer_pageout( |
7407 | vm_object_t object, uint32_t dirty_budget) |
7408 | { |
7409 | vm_page_t p; |
7410 | vm_page_t local_freeq = NULL; |
7411 | int local_freed = 0; |
7412 | kern_return_t retval = KERN_SUCCESS; |
7413 | int obj_resident_page_count_snapshot = 0; |
7414 | uint32_t paged_out_count = 0; |
7415 | |
7416 | assert(object != VM_OBJECT_NULL); |
7417 | assert(object->internal); |
7418 | |
7419 | vm_object_lock(object); |
7420 | |
7421 | if (!object->pager_initialized || object->pager == MEMORY_OBJECT_NULL) { |
7422 | if (!object->pager_initialized) { |
7423 | vm_object_collapse(object, (vm_object_offset_t) 0, TRUE); |
7424 | |
7425 | if (!object->pager_initialized) { |
7426 | vm_object_compressor_pager_create(object); |
7427 | } |
7428 | } |
7429 | |
7430 | if (!object->pager_initialized || object->pager == MEMORY_OBJECT_NULL) { |
7431 | vm_object_unlock(object); |
7432 | return paged_out_count; |
7433 | } |
7434 | } |
7435 | |
7436 | /* |
7437 | * We could be freezing a shared internal object that might |
7438 | * be part of some other thread's current VM operations. |
7439 | * We skip it if there's a paging-in-progress or activity-in-progress |
7440 | * because we could be here a long time with the map lock held. |
7441 | * |
7442 | * Note: We are holding the map locked while we wait. |
7443 | * This is fine in the freezer path because the task |
7444 | * is suspended and so this latency is acceptable. |
7445 | */ |
7446 | if (object->paging_in_progress || object->activity_in_progress) { |
7447 | vm_object_unlock(object); |
7448 | return paged_out_count; |
7449 | } |
7450 | |
7451 | if (VM_CONFIG_FREEZER_SWAP_IS_ACTIVE) { |
7452 | vm_object_offset_t curr_offset = 0; |
7453 | |
7454 | /* |
7455 | * Go through the object and make sure that any |
7456 | * previously compressed pages are relocated into |
7457 | * a compressed segment associated with our "freezer_chead". |
7458 | */ |
7459 | while (curr_offset < object->vo_size) { |
7460 | curr_offset = vm_compressor_pager_next_compressed(object->pager, curr_offset); |
7461 | |
7462 | if (curr_offset == (vm_object_offset_t) -1) { |
7463 | break; |
7464 | } |
7465 | |
7466 | retval = vm_compressor_pager_relocate(object->pager, curr_offset, &(freezer_context_global.freezer_ctx_chead)); |
7467 | |
7468 | if (retval != KERN_SUCCESS) { |
7469 | break; |
7470 | } |
7471 | |
7472 | curr_offset += PAGE_SIZE_64; |
7473 | } |
7474 | } |
7475 | |
7476 | /* |
7477 | * We can't hold the object lock while heading down into the compressed pager |
7478 | * layer because we might need the kernel map lock down there to allocate new |
7479 | * compressor data structures. And if this same object is mapped in the kernel |
7480 | * and there's a fault on it, then that thread will want the object lock while |
7481 | * holding the kernel map lock. |
7482 | * |
7483 | * Since we are going to drop/grab the object lock repeatedly, we must make sure |
7484 | * we won't be stuck in an infinite loop if the same page(s) keep getting |
7485 | * decompressed. So we grab a snapshot of the number of pages in the object and |
7486 | * we won't process any more than that number of pages. |
7487 | */ |
7488 | |
7489 | obj_resident_page_count_snapshot = object->resident_page_count; |
7490 | |
7491 | vm_object_activity_begin(object); |
7492 | |
7493 | while ((obj_resident_page_count_snapshot--) && !vm_page_queue_empty(&object->memq) && paged_out_count < dirty_budget) { |
7494 | p = (vm_page_t)vm_page_queue_first(&object->memq); |
7495 | |
7496 | KERNEL_DEBUG(0xe0430004 | DBG_FUNC_START, object, local_freed, 0, 0, 0); |
7497 | |
7498 | vm_page_lockspin_queues(); |
7499 | |
7500 | if (p->vmp_cleaning || p->vmp_fictitious || p->vmp_busy || p->vmp_absent || p->vmp_unusual || VMP_ERROR_GET(p) || VM_PAGE_WIRED(p)) { |
7501 | vm_page_unlock_queues(); |
7502 | |
7503 | KERNEL_DEBUG(0xe0430004 | DBG_FUNC_END, object, local_freed, 1, 0, 0); |
7504 | |
7505 | vm_page_queue_remove(&object->memq, p, vmp_listq); |
7506 | vm_page_queue_enter(&object->memq, p, vmp_listq); |
7507 | |
7508 | continue; |
7509 | } |
7510 | |
7511 | if (p->vmp_pmapped == TRUE) { |
7512 | int refmod_state, pmap_flags; |
7513 | |
7514 | if (p->vmp_dirty || p->vmp_precious) { |
7515 | pmap_flags = PMAP_OPTIONS_COMPRESSOR; |
7516 | } else { |
7517 | pmap_flags = PMAP_OPTIONS_COMPRESSOR_IFF_MODIFIED; |
7518 | } |
7519 | |
7520 | vm_page_lockconvert_queues(); |
7521 | refmod_state = pmap_disconnect_options(VM_PAGE_GET_PHYS_PAGE(p), pmap_flags, NULL); |
7522 | if (refmod_state & VM_MEM_MODIFIED) { |
7523 | SET_PAGE_DIRTY(p, FALSE); |
7524 | } |
7525 | } |
7526 | |
7527 | if (p->vmp_dirty == FALSE && p->vmp_precious == FALSE) { |
7528 | /* |
7529 | * Clean and non-precious page. |
7530 | */ |
7531 | vm_page_unlock_queues(); |
7532 | VM_PAGE_FREE(p); |
7533 | |
7534 | KERNEL_DEBUG(0xe0430004 | DBG_FUNC_END, object, local_freed, 2, 0, 0); |
7535 | continue; |
7536 | } |
7537 | |
7538 | if (p->vmp_laundry) { |
7539 | vm_pageout_steal_laundry(p, TRUE); |
7540 | } |
7541 | |
7542 | vm_page_queues_remove(p, TRUE); |
7543 | |
7544 | vm_page_unlock_queues(); |
7545 | |
7546 | |
7547 | /* |
7548 | * In case the compressor fails to compress this page, we need it at |
7549 | * the back of the object memq so that we don't keep trying to process it. |
7550 | * Make the move here while we have the object lock held. |
7551 | */ |
7552 | |
7553 | vm_page_queue_remove(&object->memq, p, vmp_listq); |
7554 | vm_page_queue_enter(&object->memq, p, vmp_listq); |
7555 | |
7556 | /* |
7557 | * Grab an activity_in_progress here for vm_pageout_compress_page() to consume. |
7558 | * |
7559 | * Mark the page busy so no one messes with it while we have the object lock dropped. |
7560 | */ |
7561 | p->vmp_busy = TRUE; |
7562 | |
7563 | vm_object_activity_begin(object); |
7564 | |
7565 | vm_object_unlock(object); |
7566 | |
7567 | if (vm_pageout_compress_page(&(freezer_context_global.freezer_ctx_chead), |
7568 | (freezer_context_global.freezer_ctx_compressor_scratch_buf), |
7569 | p) == KERN_SUCCESS) { |
7570 | /* |
7571 | * page has already been un-tabled from the object via 'vm_page_remove' |
7572 | */ |
7573 | p->vmp_snext = local_freeq; |
7574 | local_freeq = p; |
7575 | local_freed++; |
7576 | paged_out_count++; |
7577 | |
7578 | if (local_freed >= MAX_FREE_BATCH) { |
7579 | OSAddAtomic64(local_freed, &vm_pageout_vminfo.vm_pageout_compressions); |
7580 | |
7581 | vm_page_free_list(local_freeq, TRUE); |
7582 | |
7583 | local_freeq = NULL; |
7584 | local_freed = 0; |
7585 | } |
7586 | freezer_context_global.freezer_ctx_uncompressed_pages++; |
7587 | } |
7588 | KERNEL_DEBUG(0xe0430004 | DBG_FUNC_END, object, local_freed, 0, 0, 0); |
7589 | |
7590 | if (local_freed == 0 && c_freezer_should_yield()) { |
7591 | thread_yield_internal(FREEZER_DUTY_CYCLE_OFF_MS); |
7592 | clock_get_uptime(&c_freezer_last_yield_ts); |
7593 | } |
7594 | |
7595 | vm_object_lock(object); |
7596 | } |
7597 | |
7598 | if (local_freeq) { |
7599 | OSAddAtomic64(local_freed, &vm_pageout_vminfo.vm_pageout_compressions); |
7600 | |
7601 | vm_page_free_list(local_freeq, TRUE); |
7602 | |
7603 | local_freeq = NULL; |
7604 | local_freed = 0; |
7605 | } |
7606 | |
7607 | vm_object_activity_end(object); |
7608 | |
7609 | vm_object_unlock(object); |
7610 | |
7611 | if (c_freezer_should_yield()) { |
7612 | thread_yield_internal(FREEZER_DUTY_CYCLE_OFF_MS); |
7613 | clock_get_uptime(&c_freezer_last_yield_ts); |
7614 | } |
7615 | return paged_out_count; |
7616 | } |
7617 | |
7618 | #endif /* CONFIG_FREEZE */ |
7619 | |
7620 | |
7621 | void |
7622 | vm_object_pageout( |
7623 | vm_object_t object) |
7624 | { |
7625 | vm_page_t p, next; |
7626 | struct vm_pageout_queue *iq; |
7627 | |
7628 | if (!VM_CONFIG_COMPRESSOR_IS_PRESENT) { |
7629 | return; |
7630 | } |
7631 | |
7632 | iq = &vm_pageout_queue_internal; |
7633 | |
7634 | assert(object != VM_OBJECT_NULL ); |
7635 | |
7636 | vm_object_lock(object); |
7637 | |
7638 | if (!object->internal || |
7639 | object->terminating || |
7640 | !object->alive) { |
7641 | vm_object_unlock(object); |
7642 | return; |
7643 | } |
7644 | |
7645 | if (!object->pager_initialized || object->pager == MEMORY_OBJECT_NULL) { |
7646 | if (!object->pager_initialized) { |
7647 | vm_object_collapse(object, hint_offset: (vm_object_offset_t) 0, TRUE); |
7648 | |
7649 | if (!object->pager_initialized) { |
7650 | vm_object_compressor_pager_create(object); |
7651 | } |
7652 | } |
7653 | |
7654 | if (!object->pager_initialized || object->pager == MEMORY_OBJECT_NULL) { |
7655 | vm_object_unlock(object); |
7656 | return; |
7657 | } |
7658 | } |
7659 | |
7660 | ReScan: |
7661 | next = (vm_page_t)vm_page_queue_first(&object->memq); |
7662 | |
7663 | while (!vm_page_queue_end(&object->memq, (vm_page_queue_entry_t)next)) { |
7664 | p = next; |
7665 | next = (vm_page_t)vm_page_queue_next(&next->vmp_listq); |
7666 | |
7667 | assert(p->vmp_q_state != VM_PAGE_ON_FREE_Q); |
7668 | |
7669 | if ((p->vmp_q_state == VM_PAGE_ON_THROTTLED_Q) || |
7670 | p->vmp_cleaning || |
7671 | p->vmp_laundry || |
7672 | p->vmp_busy || |
7673 | p->vmp_absent || |
7674 | VMP_ERROR_GET(p) || |
7675 | p->vmp_fictitious || |
7676 | VM_PAGE_WIRED(p)) { |
7677 | /* |
7678 | * Page is already being cleaned or can't be cleaned. |
7679 | */ |
7680 | continue; |
7681 | } |
7682 | if (vm_compressor_low_on_space()) { |
7683 | break; |
7684 | } |
7685 | |
7686 | /* Throw to the pageout queue */ |
7687 | |
7688 | vm_page_lockspin_queues(); |
7689 | |
7690 | if (VM_PAGE_Q_THROTTLED(iq)) { |
7691 | iq->pgo_draining = TRUE; |
7692 | |
7693 | assert_wait(event: (event_t) (&iq->pgo_laundry + 1), |
7694 | THREAD_INTERRUPTIBLE); |
7695 | vm_page_unlock_queues(); |
7696 | vm_object_unlock(object); |
7697 | |
7698 | thread_block(THREAD_CONTINUE_NULL); |
7699 | |
7700 | vm_object_lock(object); |
7701 | goto ReScan; |
7702 | } |
7703 | |
7704 | assert(!p->vmp_fictitious); |
7705 | assert(!p->vmp_busy); |
7706 | assert(!p->vmp_absent); |
7707 | assert(!p->vmp_unusual); |
7708 | assert(!VMP_ERROR_GET(p)); /* XXX there's a window here where we could have an ECC error! */ |
7709 | assert(!VM_PAGE_WIRED(p)); |
7710 | assert(!p->vmp_cleaning); |
7711 | |
7712 | if (p->vmp_pmapped == TRUE) { |
7713 | int refmod_state; |
7714 | int pmap_options; |
7715 | |
7716 | /* |
7717 | * Tell pmap the page should be accounted |
7718 | * for as "compressed" if it's been modified. |
7719 | */ |
7720 | pmap_options = |
7721 | PMAP_OPTIONS_COMPRESSOR_IFF_MODIFIED; |
7722 | if (p->vmp_dirty || p->vmp_precious) { |
7723 | /* |
7724 | * We already know it's been modified, |
7725 | * so tell pmap to account for it |
7726 | * as "compressed". |
7727 | */ |
7728 | pmap_options = PMAP_OPTIONS_COMPRESSOR; |
7729 | } |
7730 | vm_page_lockconvert_queues(); |
7731 | refmod_state = pmap_disconnect_options(phys: VM_PAGE_GET_PHYS_PAGE(m: p), |
7732 | options: pmap_options, |
7733 | NULL); |
7734 | if (refmod_state & VM_MEM_MODIFIED) { |
7735 | SET_PAGE_DIRTY(p, FALSE); |
7736 | } |
7737 | } |
7738 | |
7739 | if (!p->vmp_dirty && !p->vmp_precious) { |
7740 | vm_page_unlock_queues(); |
7741 | VM_PAGE_FREE(p); |
7742 | continue; |
7743 | } |
7744 | vm_page_queues_remove(mem: p, TRUE); |
7745 | |
7746 | vm_pageout_cluster(m: p); |
7747 | |
7748 | vm_page_unlock_queues(); |
7749 | } |
7750 | vm_object_unlock(object); |
7751 | } |
7752 | |
7753 | |
7754 | #if CONFIG_IOSCHED |
7755 | void |
7756 | vm_page_request_reprioritize(vm_object_t o, uint64_t blkno, uint32_t len, int prio) |
7757 | { |
7758 | io_reprioritize_req_t req; |
7759 | struct vnode *devvp = NULL; |
7760 | |
7761 | if (vnode_pager_get_object_devvp(o->pager, (uintptr_t *)&devvp) != KERN_SUCCESS) { |
7762 | return; |
7763 | } |
7764 | |
7765 | /* |
7766 | * Create the request for I/O reprioritization. |
7767 | * We use the noblock variant of zalloc because we're holding the object |
7768 | * lock here and we could cause a deadlock in low memory conditions. |
7769 | */ |
7770 | req = (io_reprioritize_req_t)zalloc_noblock(zone: io_reprioritize_req_zone); |
7771 | if (req == NULL) { |
7772 | return; |
7773 | } |
7774 | req->blkno = blkno; |
7775 | req->len = len; |
7776 | req->priority = prio; |
7777 | req->devvp = devvp; |
7778 | |
7779 | /* Insert request into the reprioritization list */ |
7780 | IO_REPRIORITIZE_LIST_LOCK(); |
7781 | queue_enter(&io_reprioritize_list, req, io_reprioritize_req_t, io_reprioritize_list); |
7782 | IO_REPRIORITIZE_LIST_UNLOCK(); |
7783 | |
7784 | /* Wakeup reprioritize thread */ |
7785 | IO_REPRIO_THREAD_WAKEUP(); |
7786 | |
7787 | return; |
7788 | } |
7789 | |
7790 | void |
7791 | vm_decmp_upl_reprioritize(upl_t upl, int prio) |
7792 | { |
7793 | int offset; |
7794 | vm_object_t object; |
7795 | io_reprioritize_req_t req; |
7796 | struct vnode *devvp = NULL; |
7797 | uint64_t blkno; |
7798 | uint32_t len; |
7799 | upl_t io_upl; |
7800 | uint64_t *io_upl_reprio_info; |
7801 | int io_upl_size; |
7802 | |
7803 | if ((upl->flags & UPL_TRACKED_BY_OBJECT) == 0 || (upl->flags & UPL_EXPEDITE_SUPPORTED) == 0) { |
7804 | return; |
7805 | } |
7806 | |
7807 | /* |
7808 | * We dont want to perform any allocations with the upl lock held since that might |
7809 | * result in a deadlock. If the system is low on memory, the pageout thread would |
7810 | * try to pageout stuff and might wait on this lock. If we are waiting for the memory to |
7811 | * be freed up by the pageout thread, it would be a deadlock. |
7812 | */ |
7813 | |
7814 | |
7815 | /* First step is just to get the size of the upl to find out how big the reprio info is */ |
7816 | if (!upl_try_lock(upl)) { |
7817 | return; |
7818 | } |
7819 | |
7820 | if (upl->decmp_io_upl == NULL) { |
7821 | /* The real I/O upl was destroyed by the time we came in here. Nothing to do. */ |
7822 | upl_unlock(upl); |
7823 | return; |
7824 | } |
7825 | |
7826 | io_upl = upl->decmp_io_upl; |
7827 | assert((io_upl->flags & UPL_DECMP_REAL_IO) != 0); |
7828 | assertf(page_aligned(io_upl->u_offset) && page_aligned(io_upl->u_size), |
7829 | "upl %p offset 0x%llx size 0x%x\n" , |
7830 | io_upl, io_upl->u_offset, io_upl->u_size); |
7831 | io_upl_size = io_upl->u_size; |
7832 | upl_unlock(upl); |
7833 | |
7834 | /* Now perform the allocation */ |
7835 | io_upl_reprio_info = kalloc_data(sizeof(uint64_t) * atop(io_upl_size), Z_WAITOK); |
7836 | if (io_upl_reprio_info == NULL) { |
7837 | return; |
7838 | } |
7839 | |
7840 | /* Now again take the lock, recheck the state and grab out the required info */ |
7841 | if (!upl_try_lock(upl)) { |
7842 | goto out; |
7843 | } |
7844 | |
7845 | if (upl->decmp_io_upl == NULL || upl->decmp_io_upl != io_upl) { |
7846 | /* The real I/O upl was destroyed by the time we came in here. Nothing to do. */ |
7847 | upl_unlock(upl); |
7848 | goto out; |
7849 | } |
7850 | memcpy(dst: io_upl_reprio_info, src: io_upl->upl_reprio_info, |
7851 | n: sizeof(uint64_t) * atop(io_upl_size)); |
7852 | |
7853 | /* Get the VM object for this UPL */ |
7854 | if (io_upl->flags & UPL_SHADOWED) { |
7855 | object = io_upl->map_object->shadow; |
7856 | } else { |
7857 | object = io_upl->map_object; |
7858 | } |
7859 | |
7860 | /* Get the dev vnode ptr for this object */ |
7861 | if (!object || !object->pager || |
7862 | vnode_pager_get_object_devvp(object->pager, (uintptr_t *)&devvp) != KERN_SUCCESS) { |
7863 | upl_unlock(upl); |
7864 | goto out; |
7865 | } |
7866 | |
7867 | upl_unlock(upl); |
7868 | |
7869 | /* Now we have all the information needed to do the expedite */ |
7870 | |
7871 | offset = 0; |
7872 | while (offset < io_upl_size) { |
7873 | blkno = io_upl_reprio_info[atop(offset)] & UPL_REPRIO_INFO_MASK; |
7874 | len = (io_upl_reprio_info[atop(offset)] >> UPL_REPRIO_INFO_SHIFT) & UPL_REPRIO_INFO_MASK; |
7875 | |
7876 | /* |
7877 | * This implementation may cause some spurious expedites due to the |
7878 | * fact that we dont cleanup the blkno & len from the upl_reprio_info |
7879 | * even after the I/O is complete. |
7880 | */ |
7881 | |
7882 | if (blkno != 0 && len != 0) { |
7883 | /* Create the request for I/O reprioritization */ |
7884 | req = zalloc_flags(io_reprioritize_req_zone, |
7885 | Z_WAITOK | Z_NOFAIL); |
7886 | req->blkno = blkno; |
7887 | req->len = len; |
7888 | req->priority = prio; |
7889 | req->devvp = devvp; |
7890 | |
7891 | /* Insert request into the reprioritization list */ |
7892 | IO_REPRIORITIZE_LIST_LOCK(); |
7893 | queue_enter(&io_reprioritize_list, req, io_reprioritize_req_t, io_reprioritize_list); |
7894 | IO_REPRIORITIZE_LIST_UNLOCK(); |
7895 | |
7896 | offset += len; |
7897 | } else { |
7898 | offset += PAGE_SIZE; |
7899 | } |
7900 | } |
7901 | |
7902 | /* Wakeup reprioritize thread */ |
7903 | IO_REPRIO_THREAD_WAKEUP(); |
7904 | |
7905 | out: |
7906 | kfree_data(io_upl_reprio_info, sizeof(uint64_t) * atop(io_upl_size)); |
7907 | } |
7908 | |
7909 | void |
7910 | vm_page_handle_prio_inversion(vm_object_t o, vm_page_t m) |
7911 | { |
7912 | upl_t upl; |
7913 | upl_page_info_t *pl; |
7914 | unsigned int i, num_pages; |
7915 | int cur_tier; |
7916 | |
7917 | cur_tier = proc_get_effective_thread_policy(thread: current_thread(), TASK_POLICY_IO); |
7918 | |
7919 | /* |
7920 | * Scan through all UPLs associated with the object to find the |
7921 | * UPL containing the contended page. |
7922 | */ |
7923 | queue_iterate(&o->uplq, upl, upl_t, uplq) { |
7924 | if (((upl->flags & UPL_EXPEDITE_SUPPORTED) == 0) || upl->upl_priority <= cur_tier) { |
7925 | continue; |
7926 | } |
7927 | pl = UPL_GET_INTERNAL_PAGE_LIST(upl); |
7928 | assertf(page_aligned(upl->u_offset) && page_aligned(upl->u_size), |
7929 | "upl %p offset 0x%llx size 0x%x\n" , |
7930 | upl, upl->u_offset, upl->u_size); |
7931 | num_pages = (upl->u_size / PAGE_SIZE); |
7932 | |
7933 | /* |
7934 | * For each page in the UPL page list, see if it matches the contended |
7935 | * page and was issued as a low prio I/O. |
7936 | */ |
7937 | for (i = 0; i < num_pages; i++) { |
7938 | if (UPL_PAGE_PRESENT(pl, i) && VM_PAGE_GET_PHYS_PAGE(m) == pl[i].phys_addr) { |
7939 | if ((upl->flags & UPL_DECMP_REQ) && upl->decmp_io_upl) { |
7940 | KERNEL_DEBUG_CONSTANT((MACHDBG_CODE(DBG_MACH_VM, VM_PAGE_EXPEDITE)) | DBG_FUNC_NONE, VM_KERNEL_UNSLIDE_OR_PERM(upl->upl_creator), VM_KERNEL_UNSLIDE_OR_PERM(m), |
7941 | VM_KERNEL_UNSLIDE_OR_PERM(upl), upl->upl_priority, 0); |
7942 | vm_decmp_upl_reprioritize(upl, prio: cur_tier); |
7943 | break; |
7944 | } |
7945 | KERNEL_DEBUG_CONSTANT((MACHDBG_CODE(DBG_MACH_VM, VM_PAGE_EXPEDITE)) | DBG_FUNC_NONE, VM_KERNEL_UNSLIDE_OR_PERM(upl->upl_creator), VM_KERNEL_UNSLIDE_OR_PERM(m), |
7946 | upl->upl_reprio_info[i], upl->upl_priority, 0); |
7947 | if (UPL_REPRIO_INFO_BLKNO(upl, i) != 0 && UPL_REPRIO_INFO_LEN(upl, i) != 0) { |
7948 | vm_page_request_reprioritize(o, UPL_REPRIO_INFO_BLKNO(upl, i), UPL_REPRIO_INFO_LEN(upl, i), prio: cur_tier); |
7949 | } |
7950 | break; |
7951 | } |
7952 | } |
7953 | /* Check if we found any hits */ |
7954 | if (i != num_pages) { |
7955 | break; |
7956 | } |
7957 | } |
7958 | |
7959 | return; |
7960 | } |
7961 | |
7962 | wait_result_t |
7963 | vm_page_sleep(vm_object_t o, vm_page_t m, int interruptible) |
7964 | { |
7965 | wait_result_t ret; |
7966 | |
7967 | KERNEL_DEBUG((MACHDBG_CODE(DBG_MACH_VM, VM_PAGE_SLEEP)) | DBG_FUNC_START, o, m, 0, 0, 0); |
7968 | |
7969 | if (o->io_tracking && ((m->vmp_busy == TRUE) || (m->vmp_cleaning == TRUE) || VM_PAGE_WIRED(m))) { |
7970 | /* |
7971 | * Indicates page is busy due to an I/O. Issue a reprioritize request if necessary. |
7972 | */ |
7973 | vm_page_handle_prio_inversion(o, m); |
7974 | } |
7975 | m->vmp_wanted = TRUE; |
7976 | ret = thread_sleep_vm_object(object: o, event: m, interruptible); |
7977 | KERNEL_DEBUG((MACHDBG_CODE(DBG_MACH_VM, VM_PAGE_SLEEP)) | DBG_FUNC_END, o, m, 0, 0, 0); |
7978 | return ret; |
7979 | } |
7980 | |
7981 | static void |
7982 | io_reprioritize_thread(void *param __unused, wait_result_t wr __unused) |
7983 | { |
7984 | io_reprioritize_req_t req = NULL; |
7985 | |
7986 | while (1) { |
7987 | IO_REPRIORITIZE_LIST_LOCK(); |
7988 | if (queue_empty(&io_reprioritize_list)) { |
7989 | IO_REPRIORITIZE_LIST_UNLOCK(); |
7990 | break; |
7991 | } |
7992 | |
7993 | queue_remove_first(&io_reprioritize_list, req, io_reprioritize_req_t, io_reprioritize_list); |
7994 | IO_REPRIORITIZE_LIST_UNLOCK(); |
7995 | |
7996 | vnode_pager_issue_reprioritize_io(devvp: req->devvp, blkno: req->blkno, len: req->len, priority: req->priority); |
7997 | zfree(io_reprioritize_req_zone, req); |
7998 | } |
7999 | |
8000 | IO_REPRIO_THREAD_CONTINUATION(); |
8001 | } |
8002 | #endif |
8003 | |
8004 | #if VM_OBJECT_ACCESS_TRACKING |
8005 | void |
8006 | vm_object_access_tracking( |
8007 | vm_object_t object, |
8008 | int *access_tracking_p, |
8009 | uint32_t *access_tracking_reads_p, |
8010 | uint32_t *access_tracking_writes_p) |
8011 | { |
8012 | int access_tracking; |
8013 | |
8014 | access_tracking = !!*access_tracking_p; |
8015 | |
8016 | vm_object_lock(object); |
8017 | *access_tracking_p = object->access_tracking; |
8018 | if (access_tracking_reads_p) { |
8019 | *access_tracking_reads_p = object->access_tracking_reads; |
8020 | } |
8021 | if (access_tracking_writes_p) { |
8022 | *access_tracking_writes_p = object->access_tracking_writes; |
8023 | } |
8024 | object->access_tracking = access_tracking; |
8025 | object->access_tracking_reads = 0; |
8026 | object->access_tracking_writes = 0; |
8027 | vm_object_unlock(object); |
8028 | |
8029 | if (access_tracking) { |
8030 | vm_object_pmap_protect_options(object, |
8031 | 0, |
8032 | object->vo_size, |
8033 | PMAP_NULL, |
8034 | PAGE_SIZE, |
8035 | 0, |
8036 | VM_PROT_NONE, |
8037 | 0); |
8038 | } |
8039 | } |
8040 | #endif /* VM_OBJECT_ACCESS_TRACKING */ |
8041 | |
8042 | void |
8043 | vm_object_ledger_tag_ledgers( |
8044 | vm_object_t object, |
8045 | int *ledger_idx_volatile, |
8046 | int *ledger_idx_nonvolatile, |
8047 | int *ledger_idx_volatile_compressed, |
8048 | int *ledger_idx_nonvolatile_compressed, |
8049 | boolean_t *) |
8050 | { |
8051 | assert(object->shadow == VM_OBJECT_NULL); |
8052 | |
8053 | *do_footprint = !object->vo_no_footprint; |
8054 | |
8055 | switch (object->vo_ledger_tag) { |
8056 | case VM_LEDGER_TAG_NONE: |
8057 | /* |
8058 | * Regular purgeable memory: |
8059 | * counts in footprint only when nonvolatile. |
8060 | */ |
8061 | *do_footprint = TRUE; |
8062 | assert(object->purgable != VM_PURGABLE_DENY); |
8063 | *ledger_idx_volatile = task_ledgers.purgeable_volatile; |
8064 | *ledger_idx_nonvolatile = task_ledgers.purgeable_nonvolatile; |
8065 | *ledger_idx_volatile_compressed = task_ledgers.purgeable_volatile_compressed; |
8066 | *ledger_idx_nonvolatile_compressed = task_ledgers.purgeable_nonvolatile_compressed; |
8067 | break; |
8068 | case VM_LEDGER_TAG_DEFAULT: |
8069 | /* |
8070 | * "default" tagged memory: |
8071 | * counts in footprint only when nonvolatile and not marked |
8072 | * as "no_footprint". |
8073 | */ |
8074 | *ledger_idx_volatile = task_ledgers.tagged_nofootprint; |
8075 | *ledger_idx_volatile_compressed = task_ledgers.tagged_nofootprint_compressed; |
8076 | if (*do_footprint) { |
8077 | *ledger_idx_nonvolatile = task_ledgers.tagged_footprint; |
8078 | *ledger_idx_nonvolatile_compressed = task_ledgers.tagged_footprint_compressed; |
8079 | } else { |
8080 | *ledger_idx_nonvolatile = task_ledgers.tagged_nofootprint; |
8081 | *ledger_idx_nonvolatile_compressed = task_ledgers.tagged_nofootprint_compressed; |
8082 | } |
8083 | break; |
8084 | case VM_LEDGER_TAG_NETWORK: |
8085 | /* |
8086 | * "network" tagged memory: |
8087 | * never counts in footprint. |
8088 | */ |
8089 | *do_footprint = FALSE; |
8090 | *ledger_idx_volatile = task_ledgers.network_volatile; |
8091 | *ledger_idx_volatile_compressed = task_ledgers.network_volatile_compressed; |
8092 | *ledger_idx_nonvolatile = task_ledgers.network_nonvolatile; |
8093 | *ledger_idx_nonvolatile_compressed = task_ledgers.network_nonvolatile_compressed; |
8094 | break; |
8095 | case VM_LEDGER_TAG_MEDIA: |
8096 | /* |
8097 | * "media" tagged memory: |
8098 | * counts in footprint only when nonvolatile and not marked |
8099 | * as "no footprint". |
8100 | */ |
8101 | *ledger_idx_volatile = task_ledgers.media_nofootprint; |
8102 | *ledger_idx_volatile_compressed = task_ledgers.media_nofootprint_compressed; |
8103 | if (*do_footprint) { |
8104 | *ledger_idx_nonvolatile = task_ledgers.media_footprint; |
8105 | *ledger_idx_nonvolatile_compressed = task_ledgers.media_footprint_compressed; |
8106 | } else { |
8107 | *ledger_idx_nonvolatile = task_ledgers.media_nofootprint; |
8108 | *ledger_idx_nonvolatile_compressed = task_ledgers.media_nofootprint_compressed; |
8109 | } |
8110 | break; |
8111 | case VM_LEDGER_TAG_GRAPHICS: |
8112 | /* |
8113 | * "graphics" tagged memory: |
8114 | * counts in footprint only when nonvolatile and not marked |
8115 | * as "no footprint". |
8116 | */ |
8117 | *ledger_idx_volatile = task_ledgers.graphics_nofootprint; |
8118 | *ledger_idx_volatile_compressed = task_ledgers.graphics_nofootprint_compressed; |
8119 | if (*do_footprint) { |
8120 | *ledger_idx_nonvolatile = task_ledgers.graphics_footprint; |
8121 | *ledger_idx_nonvolatile_compressed = task_ledgers.graphics_footprint_compressed; |
8122 | } else { |
8123 | *ledger_idx_nonvolatile = task_ledgers.graphics_nofootprint; |
8124 | *ledger_idx_nonvolatile_compressed = task_ledgers.graphics_nofootprint_compressed; |
8125 | } |
8126 | break; |
8127 | case VM_LEDGER_TAG_NEURAL: |
8128 | /* |
8129 | * "neural" tagged memory: |
8130 | * counts in footprint only when nonvolatile and not marked |
8131 | * as "no footprint". |
8132 | */ |
8133 | *ledger_idx_volatile = task_ledgers.neural_nofootprint; |
8134 | *ledger_idx_volatile_compressed = task_ledgers.neural_nofootprint_compressed; |
8135 | if (*do_footprint) { |
8136 | *ledger_idx_nonvolatile = task_ledgers.neural_footprint; |
8137 | *ledger_idx_nonvolatile_compressed = task_ledgers.neural_footprint_compressed; |
8138 | } else { |
8139 | *ledger_idx_nonvolatile = task_ledgers.neural_nofootprint; |
8140 | *ledger_idx_nonvolatile_compressed = task_ledgers.neural_nofootprint_compressed; |
8141 | } |
8142 | break; |
8143 | default: |
8144 | panic("%s: object %p has unsupported ledger_tag %d" , |
8145 | __FUNCTION__, object, object->vo_ledger_tag); |
8146 | } |
8147 | } |
8148 | |
8149 | kern_return_t |
8150 | vm_object_ownership_change( |
8151 | vm_object_t object, |
8152 | int new_ledger_tag, |
8153 | task_t new_owner, |
8154 | int new_ledger_flags, |
8155 | boolean_t old_task_objq_locked) |
8156 | { |
8157 | int old_ledger_tag; |
8158 | task_t old_owner; |
8159 | int resident_count, wired_count; |
8160 | unsigned int compressed_count; |
8161 | int ledger_idx_volatile; |
8162 | int ledger_idx_nonvolatile; |
8163 | int ledger_idx_volatile_compressed; |
8164 | int ledger_idx_nonvolatile_compressed; |
8165 | int ledger_idx; |
8166 | int ledger_idx_compressed; |
8167 | boolean_t , , ; |
8168 | boolean_t new_task_objq_locked; |
8169 | |
8170 | vm_object_lock_assert_exclusive(object); |
8171 | |
8172 | if (!object->internal) { |
8173 | return KERN_INVALID_ARGUMENT; |
8174 | } |
8175 | if (new_owner == VM_OBJECT_OWNER_UNCHANGED) { |
8176 | /* leave owner unchanged */ |
8177 | new_owner = VM_OBJECT_OWNER(object); |
8178 | } |
8179 | if (new_ledger_tag == VM_LEDGER_TAG_UNCHANGED) { |
8180 | /* leave ledger_tag unchanged */ |
8181 | new_ledger_tag = object->vo_ledger_tag; |
8182 | } |
8183 | if (new_ledger_tag == VM_LEDGER_TAG_NONE && |
8184 | object->purgable == VM_PURGABLE_DENY) { |
8185 | /* non-purgeable memory must have a valid non-zero ledger tag */ |
8186 | return KERN_INVALID_ARGUMENT; |
8187 | } |
8188 | if (new_ledger_tag < 0 || |
8189 | new_ledger_tag > VM_LEDGER_TAG_MAX) { |
8190 | return KERN_INVALID_ARGUMENT; |
8191 | } |
8192 | if (new_ledger_flags & ~VM_LEDGER_FLAGS) { |
8193 | return KERN_INVALID_ARGUMENT; |
8194 | } |
8195 | if (object->vo_ledger_tag == VM_LEDGER_TAG_NONE && |
8196 | object->purgable == VM_PURGABLE_DENY) { |
8197 | /* |
8198 | * This VM object is neither ledger-tagged nor purgeable. |
8199 | * We can convert it to "ledger tag" ownership iff it |
8200 | * has not been used at all yet (no resident pages and |
8201 | * no pager) and it's going to be assigned to a valid task. |
8202 | */ |
8203 | if (object->resident_page_count != 0 || |
8204 | object->pager != NULL || |
8205 | object->pager_created || |
8206 | object->ref_count != 1 || |
8207 | object->vo_owner != TASK_NULL || |
8208 | object->copy_strategy != MEMORY_OBJECT_COPY_NONE || |
8209 | new_owner == TASK_NULL) { |
8210 | return KERN_FAILURE; |
8211 | } |
8212 | } |
8213 | |
8214 | if (new_ledger_flags & VM_LEDGER_FLAG_NO_FOOTPRINT) { |
8215 | new_no_footprint = TRUE; |
8216 | } else { |
8217 | new_no_footprint = FALSE; |
8218 | } |
8219 | #if __arm64__ |
8220 | if (!new_no_footprint && |
8221 | object->purgable != VM_PURGABLE_DENY && |
8222 | new_owner != TASK_NULL && |
8223 | new_owner != VM_OBJECT_OWNER_DISOWNED && |
8224 | new_owner->task_legacy_footprint) { |
8225 | /* |
8226 | * This task has been granted "legacy footprint" and should |
8227 | * not be charged for its IOKit purgeable memory. Since we |
8228 | * might now change the accounting of such memory to the |
8229 | * "graphics" ledger, for example, give it the "no footprint" |
8230 | * option. |
8231 | */ |
8232 | new_no_footprint = TRUE; |
8233 | } |
8234 | #endif /* __arm64__ */ |
8235 | assert(object->copy_strategy == MEMORY_OBJECT_COPY_NONE); |
8236 | assert(object->shadow == VM_OBJECT_NULL); |
8237 | assert(object->vo_copy == VM_OBJECT_NULL); |
8238 | |
8239 | old_ledger_tag = object->vo_ledger_tag; |
8240 | old_no_footprint = object->vo_no_footprint; |
8241 | old_owner = VM_OBJECT_OWNER(object); |
8242 | |
8243 | if (__improbable(vm_debug_events)) { |
8244 | DTRACE_VM8(object_ownership_change, |
8245 | vm_object_t, object, |
8246 | task_t, old_owner, |
8247 | int, old_ledger_tag, |
8248 | int, old_no_footprint, |
8249 | task_t, new_owner, |
8250 | int, new_ledger_tag, |
8251 | int, new_no_footprint, |
8252 | int, VM_OBJECT_ID(object)); |
8253 | } |
8254 | |
8255 | assert(object->internal); |
8256 | resident_count = object->resident_page_count - object->wired_page_count; |
8257 | wired_count = object->wired_page_count; |
8258 | compressed_count = vm_compressor_pager_get_count(mem_obj: object->pager); |
8259 | |
8260 | /* |
8261 | * Deal with the old owner and/or ledger tag, if needed. |
8262 | */ |
8263 | if (old_owner != TASK_NULL && |
8264 | ((old_owner != new_owner) /* new owner ... */ |
8265 | || /* ... or ... */ |
8266 | (old_no_footprint != new_no_footprint) /* new "no_footprint" */ |
8267 | || /* ... or ... */ |
8268 | old_ledger_tag != new_ledger_tag)) { /* ... new ledger */ |
8269 | /* |
8270 | * Take this object off of the old owner's ledgers. |
8271 | */ |
8272 | vm_object_ledger_tag_ledgers(object, |
8273 | ledger_idx_volatile: &ledger_idx_volatile, |
8274 | ledger_idx_nonvolatile: &ledger_idx_nonvolatile, |
8275 | ledger_idx_volatile_compressed: &ledger_idx_volatile_compressed, |
8276 | ledger_idx_nonvolatile_compressed: &ledger_idx_nonvolatile_compressed, |
8277 | do_footprint: &do_footprint); |
8278 | if (object->purgable == VM_PURGABLE_VOLATILE || |
8279 | object->purgable == VM_PURGABLE_EMPTY) { |
8280 | ledger_idx = ledger_idx_volatile; |
8281 | ledger_idx_compressed = ledger_idx_volatile_compressed; |
8282 | } else { |
8283 | ledger_idx = ledger_idx_nonvolatile; |
8284 | ledger_idx_compressed = ledger_idx_nonvolatile_compressed; |
8285 | } |
8286 | if (resident_count) { |
8287 | /* |
8288 | * Adjust the appropriate old owners's ledgers by the |
8289 | * number of resident pages. |
8290 | */ |
8291 | ledger_debit(ledger: old_owner->ledger, |
8292 | entry: ledger_idx, |
8293 | ptoa_64(resident_count)); |
8294 | /* adjust old owner's footprint */ |
8295 | if (do_footprint && |
8296 | object->purgable != VM_PURGABLE_VOLATILE && |
8297 | object->purgable != VM_PURGABLE_EMPTY) { |
8298 | ledger_debit(ledger: old_owner->ledger, |
8299 | entry: task_ledgers.phys_footprint, |
8300 | ptoa_64(resident_count)); |
8301 | } |
8302 | } |
8303 | if (wired_count) { |
8304 | /* wired pages are always nonvolatile */ |
8305 | ledger_debit(ledger: old_owner->ledger, |
8306 | entry: ledger_idx_nonvolatile, |
8307 | ptoa_64(wired_count)); |
8308 | if (do_footprint) { |
8309 | ledger_debit(ledger: old_owner->ledger, |
8310 | entry: task_ledgers.phys_footprint, |
8311 | ptoa_64(wired_count)); |
8312 | } |
8313 | } |
8314 | if (compressed_count) { |
8315 | /* |
8316 | * Adjust the appropriate old owner's ledgers |
8317 | * by the number of compressed pages. |
8318 | */ |
8319 | ledger_debit(ledger: old_owner->ledger, |
8320 | entry: ledger_idx_compressed, |
8321 | ptoa_64(compressed_count)); |
8322 | if (do_footprint && |
8323 | object->purgable != VM_PURGABLE_VOLATILE && |
8324 | object->purgable != VM_PURGABLE_EMPTY) { |
8325 | ledger_debit(ledger: old_owner->ledger, |
8326 | entry: task_ledgers.phys_footprint, |
8327 | ptoa_64(compressed_count)); |
8328 | } |
8329 | } |
8330 | if (old_owner != new_owner) { |
8331 | /* remove object from old_owner's list of owned objects */ |
8332 | DTRACE_VM2(object_owner_remove, |
8333 | vm_object_t, object, |
8334 | task_t, old_owner); |
8335 | if (!old_task_objq_locked) { |
8336 | task_objq_lock(old_owner); |
8337 | } |
8338 | old_owner->task_owned_objects--; |
8339 | queue_remove(&old_owner->task_objq, object, |
8340 | vm_object_t, task_objq); |
8341 | switch (object->purgable) { |
8342 | case VM_PURGABLE_NONVOLATILE: |
8343 | case VM_PURGABLE_EMPTY: |
8344 | vm_purgeable_nonvolatile_owner_update(owner: old_owner, |
8345 | delta: -1); |
8346 | break; |
8347 | case VM_PURGABLE_VOLATILE: |
8348 | vm_purgeable_volatile_owner_update(owner: old_owner, |
8349 | delta: -1); |
8350 | break; |
8351 | default: |
8352 | break; |
8353 | } |
8354 | if (!old_task_objq_locked) { |
8355 | task_objq_unlock(old_owner); |
8356 | } |
8357 | } |
8358 | } |
8359 | |
8360 | /* |
8361 | * Switch to new ledger tag and/or owner. |
8362 | */ |
8363 | |
8364 | new_task_objq_locked = FALSE; |
8365 | if (new_owner != old_owner && |
8366 | new_owner != TASK_NULL && |
8367 | new_owner != VM_OBJECT_OWNER_DISOWNED) { |
8368 | /* |
8369 | * If the new owner is not accepting new objects ("disowning"), |
8370 | * the object becomes "disowned" and will be added to |
8371 | * the kernel's task_objq. |
8372 | * |
8373 | * Check first without locking, to avoid blocking while the |
8374 | * task is disowning its objects. |
8375 | */ |
8376 | if (new_owner->task_objects_disowning) { |
8377 | new_owner = VM_OBJECT_OWNER_DISOWNED; |
8378 | } else { |
8379 | task_objq_lock(new_owner); |
8380 | /* check again now that we have the lock */ |
8381 | if (new_owner->task_objects_disowning) { |
8382 | new_owner = VM_OBJECT_OWNER_DISOWNED; |
8383 | task_objq_unlock(new_owner); |
8384 | } else { |
8385 | new_task_objq_locked = TRUE; |
8386 | } |
8387 | } |
8388 | } |
8389 | |
8390 | object->vo_ledger_tag = new_ledger_tag; |
8391 | object->vo_owner = new_owner; |
8392 | object->vo_no_footprint = new_no_footprint; |
8393 | |
8394 | if (new_owner == VM_OBJECT_OWNER_DISOWNED) { |
8395 | /* |
8396 | * Disowned objects are added to the kernel's task_objq but |
8397 | * are marked as owned by "VM_OBJECT_OWNER_DISOWNED" to |
8398 | * differentiate them from objects intentionally owned by |
8399 | * the kernel. |
8400 | */ |
8401 | assert(old_owner != kernel_task); |
8402 | new_owner = kernel_task; |
8403 | assert(!new_task_objq_locked); |
8404 | task_objq_lock(new_owner); |
8405 | new_task_objq_locked = TRUE; |
8406 | } |
8407 | |
8408 | /* |
8409 | * Deal with the new owner and/or ledger tag, if needed. |
8410 | */ |
8411 | if (new_owner != TASK_NULL && |
8412 | ((new_owner != old_owner) /* new owner ... */ |
8413 | || /* ... or ... */ |
8414 | (new_no_footprint != old_no_footprint) /* ... new "no_footprint" */ |
8415 | || /* ... or ... */ |
8416 | new_ledger_tag != old_ledger_tag)) { /* ... new ledger */ |
8417 | /* |
8418 | * Add this object to the new owner's ledgers. |
8419 | */ |
8420 | vm_object_ledger_tag_ledgers(object, |
8421 | ledger_idx_volatile: &ledger_idx_volatile, |
8422 | ledger_idx_nonvolatile: &ledger_idx_nonvolatile, |
8423 | ledger_idx_volatile_compressed: &ledger_idx_volatile_compressed, |
8424 | ledger_idx_nonvolatile_compressed: &ledger_idx_nonvolatile_compressed, |
8425 | do_footprint: &do_footprint); |
8426 | if (object->purgable == VM_PURGABLE_VOLATILE || |
8427 | object->purgable == VM_PURGABLE_EMPTY) { |
8428 | ledger_idx = ledger_idx_volatile; |
8429 | ledger_idx_compressed = ledger_idx_volatile_compressed; |
8430 | } else { |
8431 | ledger_idx = ledger_idx_nonvolatile; |
8432 | ledger_idx_compressed = ledger_idx_nonvolatile_compressed; |
8433 | } |
8434 | if (resident_count) { |
8435 | /* |
8436 | * Adjust the appropriate new owners's ledgers by the |
8437 | * number of resident pages. |
8438 | */ |
8439 | ledger_credit(ledger: new_owner->ledger, |
8440 | entry: ledger_idx, |
8441 | ptoa_64(resident_count)); |
8442 | /* adjust new owner's footprint */ |
8443 | if (do_footprint && |
8444 | object->purgable != VM_PURGABLE_VOLATILE && |
8445 | object->purgable != VM_PURGABLE_EMPTY) { |
8446 | ledger_credit(ledger: new_owner->ledger, |
8447 | entry: task_ledgers.phys_footprint, |
8448 | ptoa_64(resident_count)); |
8449 | } |
8450 | } |
8451 | if (wired_count) { |
8452 | /* wired pages are always nonvolatile */ |
8453 | ledger_credit(ledger: new_owner->ledger, |
8454 | entry: ledger_idx_nonvolatile, |
8455 | ptoa_64(wired_count)); |
8456 | if (do_footprint) { |
8457 | ledger_credit(ledger: new_owner->ledger, |
8458 | entry: task_ledgers.phys_footprint, |
8459 | ptoa_64(wired_count)); |
8460 | } |
8461 | } |
8462 | if (compressed_count) { |
8463 | /* |
8464 | * Adjust the new owner's ledgers by the number of |
8465 | * compressed pages. |
8466 | */ |
8467 | ledger_credit(ledger: new_owner->ledger, |
8468 | entry: ledger_idx_compressed, |
8469 | ptoa_64(compressed_count)); |
8470 | if (do_footprint && |
8471 | object->purgable != VM_PURGABLE_VOLATILE && |
8472 | object->purgable != VM_PURGABLE_EMPTY) { |
8473 | ledger_credit(ledger: new_owner->ledger, |
8474 | entry: task_ledgers.phys_footprint, |
8475 | ptoa_64(compressed_count)); |
8476 | } |
8477 | } |
8478 | if (new_owner != old_owner) { |
8479 | /* add object to new_owner's list of owned objects */ |
8480 | DTRACE_VM2(object_owner_add, |
8481 | vm_object_t, object, |
8482 | task_t, new_owner); |
8483 | assert(new_task_objq_locked); |
8484 | new_owner->task_owned_objects++; |
8485 | queue_enter(&new_owner->task_objq, object, |
8486 | vm_object_t, task_objq); |
8487 | switch (object->purgable) { |
8488 | case VM_PURGABLE_NONVOLATILE: |
8489 | case VM_PURGABLE_EMPTY: |
8490 | vm_purgeable_nonvolatile_owner_update(owner: new_owner, |
8491 | delta: +1); |
8492 | break; |
8493 | case VM_PURGABLE_VOLATILE: |
8494 | vm_purgeable_volatile_owner_update(owner: new_owner, |
8495 | delta: +1); |
8496 | break; |
8497 | default: |
8498 | break; |
8499 | } |
8500 | } |
8501 | } |
8502 | |
8503 | if (new_task_objq_locked) { |
8504 | task_objq_unlock(new_owner); |
8505 | } |
8506 | |
8507 | return KERN_SUCCESS; |
8508 | } |
8509 | |
8510 | void |
8511 | vm_owned_objects_disown( |
8512 | task_t task) |
8513 | { |
8514 | vm_object_t next_object; |
8515 | vm_object_t object; |
8516 | int collisions; |
8517 | kern_return_t kr; |
8518 | |
8519 | if (task == NULL) { |
8520 | return; |
8521 | } |
8522 | |
8523 | collisions = 0; |
8524 | |
8525 | again: |
8526 | if (task->task_objects_disowned) { |
8527 | /* task has already disowned its owned objects */ |
8528 | assert(task->task_volatile_objects == 0); |
8529 | assert(task->task_nonvolatile_objects == 0); |
8530 | assert(task->task_owned_objects == 0); |
8531 | return; |
8532 | } |
8533 | |
8534 | task_objq_lock(task); |
8535 | |
8536 | task->task_objects_disowning = TRUE; |
8537 | |
8538 | for (object = (vm_object_t) queue_first(&task->task_objq); |
8539 | !queue_end(&task->task_objq, (queue_entry_t) object); |
8540 | object = next_object) { |
8541 | if (task->task_nonvolatile_objects == 0 && |
8542 | task->task_volatile_objects == 0 && |
8543 | task->task_owned_objects == 0) { |
8544 | /* no more objects owned by "task" */ |
8545 | break; |
8546 | } |
8547 | |
8548 | next_object = (vm_object_t) queue_next(&object->task_objq); |
8549 | |
8550 | #if DEBUG |
8551 | assert(object->vo_purgeable_volatilizer == NULL); |
8552 | #endif /* DEBUG */ |
8553 | assert(object->vo_owner == task); |
8554 | if (!vm_object_lock_try(object)) { |
8555 | task_objq_unlock(task); |
8556 | mutex_pause(collisions++); |
8557 | goto again; |
8558 | } |
8559 | /* transfer ownership to the kernel */ |
8560 | assert(VM_OBJECT_OWNER(object) != kernel_task); |
8561 | kr = vm_object_ownership_change( |
8562 | object, |
8563 | new_ledger_tag: object->vo_ledger_tag, /* unchanged */ |
8564 | VM_OBJECT_OWNER_DISOWNED, /* new owner */ |
8565 | new_ledger_flags: 0, /* new_ledger_flags */ |
8566 | TRUE); /* old_owner->task_objq locked */ |
8567 | assert(kr == KERN_SUCCESS); |
8568 | assert(object->vo_owner == VM_OBJECT_OWNER_DISOWNED); |
8569 | vm_object_unlock(object); |
8570 | } |
8571 | |
8572 | if (__improbable(task->task_owned_objects != 0)) { |
8573 | panic("%s(%p): volatile=%d nonvolatile=%d owned=%d q=%p q_first=%p q_last=%p" , |
8574 | __FUNCTION__, |
8575 | task, |
8576 | task->task_volatile_objects, |
8577 | task->task_nonvolatile_objects, |
8578 | task->task_owned_objects, |
8579 | &task->task_objq, |
8580 | queue_first(&task->task_objq), |
8581 | queue_last(&task->task_objq)); |
8582 | } |
8583 | |
8584 | /* there shouldn't be any objects owned by task now */ |
8585 | assert(task->task_volatile_objects == 0); |
8586 | assert(task->task_nonvolatile_objects == 0); |
8587 | assert(task->task_owned_objects == 0); |
8588 | assert(task->task_objects_disowning); |
8589 | |
8590 | /* and we don't need to try and disown again */ |
8591 | task->task_objects_disowned = TRUE; |
8592 | |
8593 | task_objq_unlock(task); |
8594 | } |
8595 | |