1 | /* |
2 | * Copyright (c) 2000-2007 Apple Inc. All rights reserved. |
3 | * |
4 | * @APPLE_OSREFERENCE_LICENSE_HEADER_START@ |
5 | * |
6 | * This file contains Original Code and/or Modifications of Original Code |
7 | * as defined in and that are subject to the Apple Public Source License |
8 | * Version 2.0 (the 'License'). You may not use this file except in |
9 | * compliance with the License. The rights granted to you under the License |
10 | * may not be used to create, or enable the creation or redistribution of, |
11 | * unlawful or unlicensed copies of an Apple operating system, or to |
12 | * circumvent, violate, or enable the circumvention or violation of, any |
13 | * terms of an Apple operating system software license agreement. |
14 | * |
15 | * Please obtain a copy of the License at |
16 | * http://www.opensource.apple.com/apsl/ and read it before using this file. |
17 | * |
18 | * The Original Code and all software distributed under the License are |
19 | * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER |
20 | * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES, |
21 | * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY, |
22 | * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT. |
23 | * Please see the License for the specific language governing rights and |
24 | * limitations under the License. |
25 | * |
26 | * @APPLE_OSREFERENCE_LICENSE_HEADER_END@ |
27 | */ |
28 | /* |
29 | * @OSF_COPYRIGHT@ |
30 | */ |
31 | /* |
32 | * Mach Operating System |
33 | * Copyright (c) 1991,1990,1989,1988,1987 Carnegie Mellon University |
34 | * All Rights Reserved. |
35 | * |
36 | * Permission to use, copy, modify and distribute this software and its |
37 | * documentation is hereby granted, provided that both the copyright |
38 | * notice and this permission notice appear in all copies of the |
39 | * software, derivative works or modified versions, and any portions |
40 | * thereof, and that both notices appear in supporting documentation. |
41 | * |
42 | * CARNEGIE MELLON ALLOWS FREE USE OF THIS SOFTWARE IN ITS "AS IS" |
43 | * CONDITION. CARNEGIE MELLON DISCLAIMS ANY LIABILITY OF ANY KIND FOR |
44 | * ANY DAMAGES WHATSOEVER RESULTING FROM THE USE OF THIS SOFTWARE. |
45 | * |
46 | * Carnegie Mellon requests users of this software to return to |
47 | * |
48 | * Software Distribution Coordinator or Software.Distribution@CS.CMU.EDU |
49 | * School of Computer Science |
50 | * Carnegie Mellon University |
51 | * Pittsburgh PA 15213-3890 |
52 | * |
53 | * any improvements or extensions that they make and grant Carnegie Mellon |
54 | * the rights to redistribute these changes. |
55 | */ |
56 | /* |
57 | */ |
58 | /* |
59 | * File: vm/vm_object.c |
60 | * Author: Avadis Tevanian, Jr., Michael Wayne Young |
61 | * |
62 | * Virtual memory object module. |
63 | */ |
64 | |
65 | #include <debug.h> |
66 | #include <mach_pagemap.h> |
67 | #include <task_swapper.h> |
68 | |
69 | #include <mach/mach_types.h> |
70 | #include <mach/memory_object.h> |
71 | #include <mach/memory_object_default.h> |
72 | #include <mach/memory_object_control_server.h> |
73 | #include <mach/vm_param.h> |
74 | |
75 | #include <mach/sdt.h> |
76 | |
77 | #include <ipc/ipc_types.h> |
78 | #include <ipc/ipc_port.h> |
79 | |
80 | #include <kern/kern_types.h> |
81 | #include <kern/assert.h> |
82 | #include <kern/queue.h> |
83 | #include <kern/xpr.h> |
84 | #include <kern/kalloc.h> |
85 | #include <kern/zalloc.h> |
86 | #include <kern/host.h> |
87 | #include <kern/host_statistics.h> |
88 | #include <kern/processor.h> |
89 | #include <kern/misc_protos.h> |
90 | #include <kern/policy_internal.h> |
91 | |
92 | #include <vm/memory_object.h> |
93 | #include <vm/vm_compressor_pager.h> |
94 | #include <vm/vm_fault.h> |
95 | #include <vm/vm_map.h> |
96 | #include <vm/vm_object.h> |
97 | #include <vm/vm_page.h> |
98 | #include <vm/vm_pageout.h> |
99 | #include <vm/vm_protos.h> |
100 | #include <vm/vm_purgeable_internal.h> |
101 | |
102 | #include <vm/vm_compressor.h> |
103 | |
104 | #if CONFIG_PHANTOM_CACHE |
105 | #include <vm/vm_phantom_cache.h> |
106 | #endif |
107 | |
108 | #if VM_OBJECT_ACCESS_TRACKING |
109 | uint64_t vm_object_access_tracking_reads = 0; |
110 | uint64_t vm_object_access_tracking_writes = 0; |
111 | #endif /* VM_OBJECT_ACCESS_TRACKING */ |
112 | |
113 | boolean_t vm_object_collapse_compressor_allowed = TRUE; |
114 | |
115 | struct vm_counters vm_counters; |
116 | |
117 | #if VM_OBJECT_TRACKING |
118 | boolean_t vm_object_tracking_inited = FALSE; |
119 | btlog_t *vm_object_tracking_btlog; |
120 | |
121 | void |
122 | vm_object_tracking_init(void) |
123 | { |
124 | int vm_object_tracking; |
125 | |
126 | vm_object_tracking = 1; |
127 | PE_parse_boot_argn("vm_object_tracking" , &vm_object_tracking, |
128 | sizeof (vm_object_tracking)); |
129 | |
130 | if (vm_object_tracking) { |
131 | vm_object_tracking_btlog = btlog_create( |
132 | VM_OBJECT_TRACKING_NUM_RECORDS, |
133 | VM_OBJECT_TRACKING_BTDEPTH, |
134 | TRUE /* caller_will_remove_entries_for_element? */); |
135 | assert(vm_object_tracking_btlog); |
136 | vm_object_tracking_inited = TRUE; |
137 | } |
138 | } |
139 | #endif /* VM_OBJECT_TRACKING */ |
140 | |
141 | /* |
142 | * Virtual memory objects maintain the actual data |
143 | * associated with allocated virtual memory. A given |
144 | * page of memory exists within exactly one object. |
145 | * |
146 | * An object is only deallocated when all "references" |
147 | * are given up. |
148 | * |
149 | * Associated with each object is a list of all resident |
150 | * memory pages belonging to that object; this list is |
151 | * maintained by the "vm_page" module, but locked by the object's |
152 | * lock. |
153 | * |
154 | * Each object also records the memory object reference |
155 | * that is used by the kernel to request and write |
156 | * back data (the memory object, field "pager"), etc... |
157 | * |
158 | * Virtual memory objects are allocated to provide |
159 | * zero-filled memory (vm_allocate) or map a user-defined |
160 | * memory object into a virtual address space (vm_map). |
161 | * |
162 | * Virtual memory objects that refer to a user-defined |
163 | * memory object are called "permanent", because all changes |
164 | * made in virtual memory are reflected back to the |
165 | * memory manager, which may then store it permanently. |
166 | * Other virtual memory objects are called "temporary", |
167 | * meaning that changes need be written back only when |
168 | * necessary to reclaim pages, and that storage associated |
169 | * with the object can be discarded once it is no longer |
170 | * mapped. |
171 | * |
172 | * A permanent memory object may be mapped into more |
173 | * than one virtual address space. Moreover, two threads |
174 | * may attempt to make the first mapping of a memory |
175 | * object concurrently. Only one thread is allowed to |
176 | * complete this mapping; all others wait for the |
177 | * "pager_initialized" field is asserted, indicating |
178 | * that the first thread has initialized all of the |
179 | * necessary fields in the virtual memory object structure. |
180 | * |
181 | * The kernel relies on a *default memory manager* to |
182 | * provide backing storage for the zero-filled virtual |
183 | * memory objects. The pager memory objects associated |
184 | * with these temporary virtual memory objects are only |
185 | * requested from the default memory manager when it |
186 | * becomes necessary. Virtual memory objects |
187 | * that depend on the default memory manager are called |
188 | * "internal". The "pager_created" field is provided to |
189 | * indicate whether these ports have ever been allocated. |
190 | * |
191 | * The kernel may also create virtual memory objects to |
192 | * hold changed pages after a copy-on-write operation. |
193 | * In this case, the virtual memory object (and its |
194 | * backing storage -- its memory object) only contain |
195 | * those pages that have been changed. The "shadow" |
196 | * field refers to the virtual memory object that contains |
197 | * the remainder of the contents. The "shadow_offset" |
198 | * field indicates where in the "shadow" these contents begin. |
199 | * The "copy" field refers to a virtual memory object |
200 | * to which changed pages must be copied before changing |
201 | * this object, in order to implement another form |
202 | * of copy-on-write optimization. |
203 | * |
204 | * The virtual memory object structure also records |
205 | * the attributes associated with its memory object. |
206 | * The "pager_ready", "can_persist" and "copy_strategy" |
207 | * fields represent those attributes. The "cached_list" |
208 | * field is used in the implementation of the persistence |
209 | * attribute. |
210 | * |
211 | * ZZZ Continue this comment. |
212 | */ |
213 | |
214 | /* Forward declarations for internal functions. */ |
215 | static kern_return_t vm_object_terminate( |
216 | vm_object_t object); |
217 | |
218 | static kern_return_t vm_object_copy_call( |
219 | vm_object_t src_object, |
220 | vm_object_offset_t src_offset, |
221 | vm_object_size_t size, |
222 | vm_object_t *_result_object); |
223 | |
224 | static void vm_object_do_collapse( |
225 | vm_object_t object, |
226 | vm_object_t backing_object); |
227 | |
228 | static void vm_object_do_bypass( |
229 | vm_object_t object, |
230 | vm_object_t backing_object); |
231 | |
232 | static void vm_object_release_pager( |
233 | memory_object_t ); |
234 | |
235 | zone_t vm_object_zone; /* vm backing store zone */ |
236 | |
237 | /* |
238 | * All wired-down kernel memory belongs to a single virtual |
239 | * memory object (kernel_object) to avoid wasting data structures. |
240 | */ |
241 | static struct vm_object kernel_object_store __attribute__((aligned(VM_PACKED_POINTER_ALIGNMENT))); |
242 | vm_object_t kernel_object; |
243 | |
244 | static struct vm_object compressor_object_store __attribute__((aligned(VM_PACKED_POINTER_ALIGNMENT))); |
245 | vm_object_t compressor_object = &compressor_object_store; |
246 | |
247 | /* |
248 | * The submap object is used as a placeholder for vm_map_submap |
249 | * operations. The object is declared in vm_map.c because it |
250 | * is exported by the vm_map module. The storage is declared |
251 | * here because it must be initialized here. |
252 | */ |
253 | static struct vm_object vm_submap_object_store __attribute__((aligned(VM_PACKED_POINTER_ALIGNMENT))); |
254 | |
255 | /* |
256 | * Virtual memory objects are initialized from |
257 | * a template (see vm_object_allocate). |
258 | * |
259 | * When adding a new field to the virtual memory |
260 | * object structure, be sure to add initialization |
261 | * (see _vm_object_allocate()). |
262 | */ |
263 | static struct vm_object vm_object_template; |
264 | |
265 | unsigned int vm_page_purged_wired = 0; |
266 | unsigned int vm_page_purged_busy = 0; |
267 | unsigned int vm_page_purged_others = 0; |
268 | |
269 | static queue_head_t vm_object_cached_list; |
270 | static uint32_t vm_object_cache_pages_freed = 0; |
271 | static uint32_t vm_object_cache_pages_moved = 0; |
272 | static uint32_t vm_object_cache_pages_skipped = 0; |
273 | static uint32_t vm_object_cache_adds = 0; |
274 | static uint32_t vm_object_cached_count = 0; |
275 | static lck_mtx_t vm_object_cached_lock_data; |
276 | static lck_mtx_ext_t vm_object_cached_lock_data_ext; |
277 | |
278 | static uint32_t vm_object_page_grab_failed = 0; |
279 | static uint32_t vm_object_page_grab_skipped = 0; |
280 | static uint32_t vm_object_page_grab_returned = 0; |
281 | static uint32_t vm_object_page_grab_pmapped = 0; |
282 | static uint32_t vm_object_page_grab_reactivations = 0; |
283 | |
284 | #define vm_object_cache_lock_spin() \ |
285 | lck_mtx_lock_spin(&vm_object_cached_lock_data) |
286 | #define vm_object_cache_unlock() \ |
287 | lck_mtx_unlock(&vm_object_cached_lock_data) |
288 | |
289 | static void vm_object_cache_remove_locked(vm_object_t); |
290 | |
291 | |
292 | static void vm_object_reap(vm_object_t object); |
293 | static void vm_object_reap_async(vm_object_t object); |
294 | static void vm_object_reaper_thread(void); |
295 | |
296 | static lck_mtx_t vm_object_reaper_lock_data; |
297 | static lck_mtx_ext_t vm_object_reaper_lock_data_ext; |
298 | |
299 | static queue_head_t vm_object_reaper_queue; /* protected by vm_object_reaper_lock() */ |
300 | unsigned int vm_object_reap_count = 0; |
301 | unsigned int vm_object_reap_count_async = 0; |
302 | |
303 | #define vm_object_reaper_lock() \ |
304 | lck_mtx_lock(&vm_object_reaper_lock_data) |
305 | #define vm_object_reaper_lock_spin() \ |
306 | lck_mtx_lock_spin(&vm_object_reaper_lock_data) |
307 | #define vm_object_reaper_unlock() \ |
308 | lck_mtx_unlock(&vm_object_reaper_lock_data) |
309 | |
310 | #if CONFIG_IOSCHED |
311 | /* I/O Re-prioritization request list */ |
312 | queue_head_t io_reprioritize_list; |
313 | lck_spin_t io_reprioritize_list_lock; |
314 | |
315 | #define IO_REPRIORITIZE_LIST_LOCK() \ |
316 | lck_spin_lock(&io_reprioritize_list_lock) |
317 | #define IO_REPRIORITIZE_LIST_UNLOCK() \ |
318 | lck_spin_unlock(&io_reprioritize_list_lock) |
319 | |
320 | #define MAX_IO_REPRIORITIZE_REQS 8192 |
321 | zone_t io_reprioritize_req_zone; |
322 | |
323 | /* I/O Re-prioritization thread */ |
324 | int io_reprioritize_wakeup = 0; |
325 | static void io_reprioritize_thread(void *param __unused, wait_result_t wr __unused); |
326 | |
327 | #define IO_REPRIO_THREAD_WAKEUP() thread_wakeup((event_t)&io_reprioritize_wakeup) |
328 | #define IO_REPRIO_THREAD_CONTINUATION() \ |
329 | { \ |
330 | assert_wait(&io_reprioritize_wakeup, THREAD_UNINT); \ |
331 | thread_block(io_reprioritize_thread); \ |
332 | } |
333 | |
334 | void vm_page_request_reprioritize(vm_object_t, uint64_t, uint32_t, int); |
335 | void vm_page_handle_prio_inversion(vm_object_t, vm_page_t); |
336 | void vm_decmp_upl_reprioritize(upl_t, int); |
337 | #endif |
338 | |
339 | #if 0 |
340 | #undef KERNEL_DEBUG |
341 | #define KERNEL_DEBUG KERNEL_DEBUG_CONSTANT |
342 | #endif |
343 | |
344 | |
345 | /* |
346 | * vm_object_allocate: |
347 | * |
348 | * Returns a new object with the given size. |
349 | */ |
350 | |
351 | __private_extern__ void |
352 | _vm_object_allocate( |
353 | vm_object_size_t size, |
354 | vm_object_t object) |
355 | { |
356 | XPR(XPR_VM_OBJECT, |
357 | "vm_object_allocate, object 0x%X size 0x%X\n" , |
358 | object, size, 0,0,0); |
359 | |
360 | *object = vm_object_template; |
361 | vm_page_queue_init(&object->memq); |
362 | #if UPL_DEBUG || CONFIG_IOSCHED |
363 | queue_init(&object->uplq); |
364 | #endif |
365 | vm_object_lock_init(object); |
366 | object->vo_size = size; |
367 | |
368 | #if VM_OBJECT_TRACKING_OP_CREATED |
369 | if (vm_object_tracking_inited) { |
370 | void *bt[VM_OBJECT_TRACKING_BTDEPTH]; |
371 | int numsaved = 0; |
372 | |
373 | numsaved = OSBacktrace(bt, VM_OBJECT_TRACKING_BTDEPTH); |
374 | btlog_add_entry(vm_object_tracking_btlog, |
375 | object, |
376 | VM_OBJECT_TRACKING_OP_CREATED, |
377 | bt, |
378 | numsaved); |
379 | } |
380 | #endif /* VM_OBJECT_TRACKING_OP_CREATED */ |
381 | } |
382 | |
383 | __private_extern__ vm_object_t |
384 | vm_object_allocate( |
385 | vm_object_size_t size) |
386 | { |
387 | vm_object_t object; |
388 | |
389 | object = (vm_object_t) zalloc(vm_object_zone); |
390 | |
391 | // dbgLog(object, size, 0, 2); /* (TEST/DEBUG) */ |
392 | |
393 | if (object != VM_OBJECT_NULL) |
394 | _vm_object_allocate(size, object); |
395 | |
396 | return object; |
397 | } |
398 | |
399 | |
400 | lck_grp_t vm_object_lck_grp; |
401 | lck_grp_t vm_object_cache_lck_grp; |
402 | lck_grp_attr_t vm_object_lck_grp_attr; |
403 | lck_attr_t vm_object_lck_attr; |
404 | lck_attr_t kernel_object_lck_attr; |
405 | lck_attr_t compressor_object_lck_attr; |
406 | |
407 | extern void vm_named_entry_init(void); |
408 | |
409 | int workaround_41447923 = 0; |
410 | |
411 | /* |
412 | * vm_object_bootstrap: |
413 | * |
414 | * Initialize the VM objects module. |
415 | */ |
416 | __private_extern__ void |
417 | vm_object_bootstrap(void) |
418 | { |
419 | vm_size_t vm_object_size; |
420 | |
421 | assert(sizeof (mo_ipc_object_bits_t) == sizeof (ipc_object_bits_t)); |
422 | |
423 | vm_object_size = (sizeof(struct vm_object) + (VM_PACKED_POINTER_ALIGNMENT-1)) & ~(VM_PACKED_POINTER_ALIGNMENT - 1); |
424 | |
425 | vm_object_zone = zinit(vm_object_size, |
426 | round_page(512*1024), |
427 | round_page(12*1024), |
428 | "vm objects" ); |
429 | zone_change(vm_object_zone, Z_CALLERACCT, FALSE); /* don't charge caller */ |
430 | zone_change(vm_object_zone, Z_NOENCRYPT, TRUE); |
431 | zone_change(vm_object_zone, Z_ALIGNMENT_REQUIRED, TRUE); |
432 | |
433 | vm_object_init_lck_grp(); |
434 | |
435 | queue_init(&vm_object_cached_list); |
436 | |
437 | lck_mtx_init_ext(&vm_object_cached_lock_data, |
438 | &vm_object_cached_lock_data_ext, |
439 | &vm_object_cache_lck_grp, |
440 | &vm_object_lck_attr); |
441 | |
442 | queue_init(&vm_object_reaper_queue); |
443 | |
444 | lck_mtx_init_ext(&vm_object_reaper_lock_data, |
445 | &vm_object_reaper_lock_data_ext, |
446 | &vm_object_lck_grp, |
447 | &vm_object_lck_attr); |
448 | |
449 | |
450 | /* |
451 | * Fill in a template object, for quick initialization |
452 | */ |
453 | |
454 | /* memq; Lock; init after allocation */ |
455 | |
456 | vm_object_template.memq.prev = 0; |
457 | vm_object_template.memq.next = 0; |
458 | #if 0 |
459 | /* |
460 | * We can't call vm_object_lock_init() here because that will |
461 | * allocate some memory and VM is not fully initialized yet. |
462 | * The lock will be initialized for each allocated object in |
463 | * _vm_object_allocate(), so we don't need to initialize it in |
464 | * the vm_object_template. |
465 | */ |
466 | vm_object_lock_init(&vm_object_template); |
467 | #endif |
468 | #if DEVELOPMENT || DEBUG |
469 | vm_object_template.Lock_owner = 0; |
470 | #endif |
471 | vm_object_template.vo_size = 0; |
472 | vm_object_template.memq_hint = VM_PAGE_NULL; |
473 | vm_object_template.ref_count = 1; |
474 | #if TASK_SWAPPER |
475 | vm_object_template.res_count = 1; |
476 | #endif /* TASK_SWAPPER */ |
477 | vm_object_template.resident_page_count = 0; |
478 | vm_object_template.wired_page_count = 0; |
479 | vm_object_template.reusable_page_count = 0; |
480 | vm_object_template.copy = VM_OBJECT_NULL; |
481 | vm_object_template.shadow = VM_OBJECT_NULL; |
482 | vm_object_template.vo_shadow_offset = (vm_object_offset_t) 0; |
483 | vm_object_template.pager = MEMORY_OBJECT_NULL; |
484 | vm_object_template.paging_offset = 0; |
485 | vm_object_template.pager_control = MEMORY_OBJECT_CONTROL_NULL; |
486 | vm_object_template.copy_strategy = MEMORY_OBJECT_COPY_SYMMETRIC; |
487 | vm_object_template.paging_in_progress = 0; |
488 | #if __LP64__ |
489 | vm_object_template.__object1_unused_bits = 0; |
490 | #endif /* __LP64__ */ |
491 | vm_object_template.activity_in_progress = 0; |
492 | |
493 | /* Begin bitfields */ |
494 | vm_object_template.all_wanted = 0; /* all bits FALSE */ |
495 | vm_object_template.pager_created = FALSE; |
496 | vm_object_template.pager_initialized = FALSE; |
497 | vm_object_template.pager_ready = FALSE; |
498 | vm_object_template.pager_trusted = FALSE; |
499 | vm_object_template.can_persist = FALSE; |
500 | vm_object_template.internal = TRUE; |
501 | vm_object_template.private = FALSE; |
502 | vm_object_template.pageout = FALSE; |
503 | vm_object_template.alive = TRUE; |
504 | vm_object_template.purgable = VM_PURGABLE_DENY; |
505 | vm_object_template.purgeable_when_ripe = FALSE; |
506 | vm_object_template.purgeable_only_by_kernel = FALSE; |
507 | vm_object_template.shadowed = FALSE; |
508 | vm_object_template.true_share = FALSE; |
509 | vm_object_template.terminating = FALSE; |
510 | vm_object_template.named = FALSE; |
511 | vm_object_template.shadow_severed = FALSE; |
512 | vm_object_template.phys_contiguous = FALSE; |
513 | vm_object_template.nophyscache = FALSE; |
514 | /* End bitfields */ |
515 | |
516 | vm_object_template.cached_list.prev = NULL; |
517 | vm_object_template.cached_list.next = NULL; |
518 | |
519 | vm_object_template.last_alloc = (vm_object_offset_t) 0; |
520 | vm_object_template.sequential = (vm_object_offset_t) 0; |
521 | vm_object_template.pages_created = 0; |
522 | vm_object_template.pages_used = 0; |
523 | vm_object_template.scan_collisions = 0; |
524 | #if CONFIG_PHANTOM_CACHE |
525 | vm_object_template.phantom_object_id = 0; |
526 | #endif |
527 | vm_object_template.cow_hint = ~(vm_offset_t)0; |
528 | |
529 | /* cache bitfields */ |
530 | vm_object_template.wimg_bits = VM_WIMG_USE_DEFAULT; |
531 | vm_object_template.set_cache_attr = FALSE; |
532 | vm_object_template.object_is_shared_cache = FALSE; |
533 | vm_object_template.code_signed = FALSE; |
534 | vm_object_template.transposed = FALSE; |
535 | vm_object_template.mapping_in_progress = FALSE; |
536 | vm_object_template.phantom_isssd = FALSE; |
537 | vm_object_template.volatile_empty = FALSE; |
538 | vm_object_template.volatile_fault = FALSE; |
539 | vm_object_template.all_reusable = FALSE; |
540 | vm_object_template.blocked_access = FALSE; |
541 | vm_object_template.vo_ledger_tag = VM_OBJECT_LEDGER_TAG_NONE; |
542 | vm_object_template.__object2_unused_bits = 0; |
543 | #if CONFIG_IOSCHED || UPL_DEBUG |
544 | vm_object_template.uplq.prev = NULL; |
545 | vm_object_template.uplq.next = NULL; |
546 | #endif /* UPL_DEBUG */ |
547 | #ifdef VM_PIP_DEBUG |
548 | bzero(&vm_object_template.pip_holders, |
549 | sizeof (vm_object_template.pip_holders)); |
550 | #endif /* VM_PIP_DEBUG */ |
551 | |
552 | vm_object_template.objq.next = NULL; |
553 | vm_object_template.objq.prev = NULL; |
554 | vm_object_template.task_objq.next = NULL; |
555 | vm_object_template.task_objq.prev = NULL; |
556 | |
557 | vm_object_template.purgeable_queue_type = PURGEABLE_Q_TYPE_MAX; |
558 | vm_object_template.purgeable_queue_group = 0; |
559 | |
560 | vm_object_template.vo_cache_ts = 0; |
561 | |
562 | vm_object_template.wire_tag = VM_KERN_MEMORY_NONE; |
563 | #if ! VM_TAG_ACTIVE_UPDATE |
564 | vm_object_template.wired_objq.next = NULL; |
565 | vm_object_template.wired_objq.prev = NULL; |
566 | #endif /* ! VM_TAG_ACTIVE_UPDATE */ |
567 | |
568 | vm_object_template.io_tracking = FALSE; |
569 | |
570 | #if CONFIG_SECLUDED_MEMORY |
571 | vm_object_template.eligible_for_secluded = FALSE; |
572 | vm_object_template.can_grab_secluded = FALSE; |
573 | #else /* CONFIG_SECLUDED_MEMORY */ |
574 | vm_object_template.__object3_unused_bits = 0; |
575 | #endif /* CONFIG_SECLUDED_MEMORY */ |
576 | |
577 | #if VM_OBJECT_ACCESS_TRACKING |
578 | vm_object_template.access_tracking = FALSE; |
579 | vm_object_template.access_tracking_reads = 0; |
580 | vm_object_template.access_tracking_writes = 0; |
581 | #endif /* VM_OBJECT_ACCESS_TRACKING */ |
582 | |
583 | #if DEBUG |
584 | bzero(&vm_object_template.purgeable_owner_bt[0], |
585 | sizeof (vm_object_template.purgeable_owner_bt)); |
586 | vm_object_template.vo_purgeable_volatilizer = NULL; |
587 | bzero(&vm_object_template.purgeable_volatilizer_bt[0], |
588 | sizeof (vm_object_template.purgeable_volatilizer_bt)); |
589 | #endif /* DEBUG */ |
590 | |
591 | /* |
592 | * Initialize the "kernel object" |
593 | */ |
594 | |
595 | kernel_object = &kernel_object_store; |
596 | |
597 | /* |
598 | * Note that in the following size specifications, we need to add 1 because |
599 | * VM_MAX_KERNEL_ADDRESS (vm_last_addr) is a maximum address, not a size. |
600 | */ |
601 | |
602 | _vm_object_allocate(VM_MAX_KERNEL_ADDRESS + 1, |
603 | kernel_object); |
604 | |
605 | _vm_object_allocate(VM_MAX_KERNEL_ADDRESS + 1, |
606 | compressor_object); |
607 | kernel_object->copy_strategy = MEMORY_OBJECT_COPY_NONE; |
608 | compressor_object->copy_strategy = MEMORY_OBJECT_COPY_NONE; |
609 | kernel_object->no_tag_update = TRUE; |
610 | |
611 | /* |
612 | * Initialize the "submap object". Make it as large as the |
613 | * kernel object so that no limit is imposed on submap sizes. |
614 | */ |
615 | |
616 | vm_submap_object = &vm_submap_object_store; |
617 | _vm_object_allocate(VM_MAX_KERNEL_ADDRESS + 1, |
618 | vm_submap_object); |
619 | vm_submap_object->copy_strategy = MEMORY_OBJECT_COPY_NONE; |
620 | |
621 | /* |
622 | * Create an "extra" reference to this object so that we never |
623 | * try to deallocate it; zfree doesn't like to be called with |
624 | * non-zone memory. |
625 | */ |
626 | vm_object_reference(vm_submap_object); |
627 | |
628 | vm_named_entry_init(); |
629 | |
630 | PE_parse_boot_argn("workaround_41447923" , &workaround_41447923, |
631 | sizeof (workaround_41447923)); |
632 | } |
633 | |
634 | #if CONFIG_IOSCHED |
635 | void |
636 | vm_io_reprioritize_init(void) |
637 | { |
638 | kern_return_t result; |
639 | thread_t thread = THREAD_NULL; |
640 | |
641 | /* Initialze the I/O reprioritization subsystem */ |
642 | lck_spin_init(&io_reprioritize_list_lock, &vm_object_lck_grp, &vm_object_lck_attr); |
643 | queue_init(&io_reprioritize_list); |
644 | |
645 | io_reprioritize_req_zone = zinit(sizeof(struct io_reprioritize_req), |
646 | MAX_IO_REPRIORITIZE_REQS * sizeof(struct io_reprioritize_req), |
647 | 4096, "io_reprioritize_req" ); |
648 | zone_change(io_reprioritize_req_zone, Z_COLLECT, FALSE); |
649 | |
650 | result = kernel_thread_start_priority(io_reprioritize_thread, NULL, 95 /* MAXPRI_KERNEL */, &thread); |
651 | if (result == KERN_SUCCESS) { |
652 | thread_deallocate(thread); |
653 | } else { |
654 | panic("Could not create io_reprioritize_thread" ); |
655 | } |
656 | } |
657 | #endif |
658 | |
659 | void |
660 | vm_object_reaper_init(void) |
661 | { |
662 | kern_return_t kr; |
663 | thread_t thread; |
664 | |
665 | kr = kernel_thread_start_priority( |
666 | (thread_continue_t) vm_object_reaper_thread, |
667 | NULL, |
668 | BASEPRI_VM, |
669 | &thread); |
670 | if (kr != KERN_SUCCESS) { |
671 | panic("failed to launch vm_object_reaper_thread kr=0x%x" , kr); |
672 | } |
673 | thread_deallocate(thread); |
674 | } |
675 | |
676 | __private_extern__ void |
677 | vm_object_init(void) |
678 | { |
679 | /* |
680 | * Finish initializing the kernel object. |
681 | */ |
682 | } |
683 | |
684 | |
685 | __private_extern__ void |
686 | vm_object_init_lck_grp(void) |
687 | { |
688 | /* |
689 | * initialze the vm_object lock world |
690 | */ |
691 | lck_grp_attr_setdefault(&vm_object_lck_grp_attr); |
692 | lck_grp_init(&vm_object_lck_grp, "vm_object" , &vm_object_lck_grp_attr); |
693 | lck_grp_init(&vm_object_cache_lck_grp, "vm_object_cache" , &vm_object_lck_grp_attr); |
694 | lck_attr_setdefault(&vm_object_lck_attr); |
695 | lck_attr_setdefault(&kernel_object_lck_attr); |
696 | lck_attr_cleardebug(&kernel_object_lck_attr); |
697 | lck_attr_setdefault(&compressor_object_lck_attr); |
698 | lck_attr_cleardebug(&compressor_object_lck_attr); |
699 | } |
700 | |
701 | |
702 | /* |
703 | * vm_object_deallocate: |
704 | * |
705 | * Release a reference to the specified object, |
706 | * gained either through a vm_object_allocate |
707 | * or a vm_object_reference call. When all references |
708 | * are gone, storage associated with this object |
709 | * may be relinquished. |
710 | * |
711 | * No object may be locked. |
712 | */ |
713 | unsigned long vm_object_deallocate_shared_successes = 0; |
714 | unsigned long vm_object_deallocate_shared_failures = 0; |
715 | unsigned long vm_object_deallocate_shared_swap_failures = 0; |
716 | |
717 | __private_extern__ void |
718 | vm_object_deallocate( |
719 | vm_object_t object) |
720 | { |
721 | vm_object_t shadow = VM_OBJECT_NULL; |
722 | |
723 | // if(object)dbgLog(object, object->ref_count, object->can_persist, 3); /* (TEST/DEBUG) */ |
724 | // else dbgLog(object, 0, 0, 3); /* (TEST/DEBUG) */ |
725 | |
726 | if (object == VM_OBJECT_NULL) |
727 | return; |
728 | |
729 | if (object == kernel_object || object == compressor_object) { |
730 | vm_object_lock_shared(object); |
731 | |
732 | OSAddAtomic(-1, &object->ref_count); |
733 | |
734 | if (object->ref_count == 0) { |
735 | if (object == kernel_object) |
736 | panic("vm_object_deallocate: losing kernel_object\n" ); |
737 | else |
738 | panic("vm_object_deallocate: losing compressor_object\n" ); |
739 | } |
740 | vm_object_unlock(object); |
741 | return; |
742 | } |
743 | |
744 | if (object->ref_count == 2 && |
745 | object->named) { |
746 | /* |
747 | * This "named" object's reference count is about to |
748 | * drop from 2 to 1: |
749 | * we'll need to call memory_object_last_unmap(). |
750 | */ |
751 | } else if (object->ref_count == 2 && |
752 | object->internal && |
753 | object->shadow != VM_OBJECT_NULL) { |
754 | /* |
755 | * This internal object's reference count is about to |
756 | * drop from 2 to 1 and it has a shadow object: |
757 | * we'll want to try and collapse this object with its |
758 | * shadow. |
759 | */ |
760 | } else if (object->ref_count >= 2) { |
761 | UInt32 original_ref_count; |
762 | volatile UInt32 *ref_count_p; |
763 | Boolean atomic_swap; |
764 | |
765 | /* |
766 | * The object currently looks like it is not being |
767 | * kept alive solely by the reference we're about to release. |
768 | * Let's try and release our reference without taking |
769 | * all the locks we would need if we had to terminate the |
770 | * object (cache lock + exclusive object lock). |
771 | * Lock the object "shared" to make sure we don't race with |
772 | * anyone holding it "exclusive". |
773 | */ |
774 | vm_object_lock_shared(object); |
775 | ref_count_p = (volatile UInt32 *) &object->ref_count; |
776 | original_ref_count = object->ref_count; |
777 | /* |
778 | * Test again as "ref_count" could have changed. |
779 | * "named" shouldn't change. |
780 | */ |
781 | if (original_ref_count == 2 && |
782 | object->named) { |
783 | /* need to take slow path for m_o_last_unmap() */ |
784 | atomic_swap = FALSE; |
785 | } else if (original_ref_count == 2 && |
786 | object->internal && |
787 | object->shadow != VM_OBJECT_NULL) { |
788 | /* need to take slow path for vm_object_collapse() */ |
789 | atomic_swap = FALSE; |
790 | } else if (original_ref_count < 2) { |
791 | /* need to take slow path for vm_object_terminate() */ |
792 | atomic_swap = FALSE; |
793 | } else { |
794 | /* try an atomic update with the shared lock */ |
795 | atomic_swap = OSCompareAndSwap( |
796 | original_ref_count, |
797 | original_ref_count - 1, |
798 | (UInt32 *) &object->ref_count); |
799 | if (atomic_swap == FALSE) { |
800 | vm_object_deallocate_shared_swap_failures++; |
801 | /* fall back to the slow path... */ |
802 | } |
803 | } |
804 | |
805 | vm_object_unlock(object); |
806 | |
807 | if (atomic_swap) { |
808 | /* |
809 | * ref_count was updated atomically ! |
810 | */ |
811 | vm_object_deallocate_shared_successes++; |
812 | return; |
813 | } |
814 | |
815 | /* |
816 | * Someone else updated the ref_count at the same |
817 | * time and we lost the race. Fall back to the usual |
818 | * slow but safe path... |
819 | */ |
820 | vm_object_deallocate_shared_failures++; |
821 | } |
822 | |
823 | while (object != VM_OBJECT_NULL) { |
824 | |
825 | vm_object_lock(object); |
826 | |
827 | assert(object->ref_count > 0); |
828 | |
829 | /* |
830 | * If the object has a named reference, and only |
831 | * that reference would remain, inform the pager |
832 | * about the last "mapping" reference going away. |
833 | */ |
834 | if ((object->ref_count == 2) && (object->named)) { |
835 | memory_object_t = object->pager; |
836 | |
837 | /* Notify the Pager that there are no */ |
838 | /* more mappers for this object */ |
839 | |
840 | if (pager != MEMORY_OBJECT_NULL) { |
841 | vm_object_mapping_wait(object, THREAD_UNINT); |
842 | vm_object_mapping_begin(object); |
843 | vm_object_unlock(object); |
844 | |
845 | memory_object_last_unmap(pager); |
846 | |
847 | vm_object_lock(object); |
848 | vm_object_mapping_end(object); |
849 | } |
850 | assert(object->ref_count > 0); |
851 | } |
852 | |
853 | /* |
854 | * Lose the reference. If other references |
855 | * remain, then we are done, unless we need |
856 | * to retry a cache trim. |
857 | * If it is the last reference, then keep it |
858 | * until any pending initialization is completed. |
859 | */ |
860 | |
861 | /* if the object is terminating, it cannot go into */ |
862 | /* the cache and we obviously should not call */ |
863 | /* terminate again. */ |
864 | |
865 | if ((object->ref_count > 1) || object->terminating) { |
866 | vm_object_lock_assert_exclusive(object); |
867 | object->ref_count--; |
868 | vm_object_res_deallocate(object); |
869 | |
870 | if (object->ref_count == 1 && |
871 | object->shadow != VM_OBJECT_NULL) { |
872 | /* |
873 | * There's only one reference left on this |
874 | * VM object. We can't tell if it's a valid |
875 | * one (from a mapping for example) or if this |
876 | * object is just part of a possibly stale and |
877 | * useless shadow chain. |
878 | * We would like to try and collapse it into |
879 | * its parent, but we don't have any pointers |
880 | * back to this parent object. |
881 | * But we can try and collapse this object with |
882 | * its own shadows, in case these are useless |
883 | * too... |
884 | * We can't bypass this object though, since we |
885 | * don't know if this last reference on it is |
886 | * meaningful or not. |
887 | */ |
888 | vm_object_collapse(object, 0, FALSE); |
889 | } |
890 | vm_object_unlock(object); |
891 | return; |
892 | } |
893 | |
894 | /* |
895 | * We have to wait for initialization |
896 | * before destroying or caching the object. |
897 | */ |
898 | |
899 | if (object->pager_created && ! object->pager_initialized) { |
900 | assert(! object->can_persist); |
901 | vm_object_assert_wait(object, |
902 | VM_OBJECT_EVENT_INITIALIZED, |
903 | THREAD_UNINT); |
904 | vm_object_unlock(object); |
905 | |
906 | thread_block(THREAD_CONTINUE_NULL); |
907 | continue; |
908 | } |
909 | |
910 | XPR(XPR_VM_OBJECT, |
911 | "vm_o_deallocate: 0x%X res %d paging_ops %d thread 0x%p ref %d\n" , |
912 | object, object->resident_page_count, |
913 | object->paging_in_progress, |
914 | (void *)current_thread(),object->ref_count); |
915 | |
916 | VM_OBJ_RES_DECR(object); /* XXX ? */ |
917 | /* |
918 | * Terminate this object. If it had a shadow, |
919 | * then deallocate it; otherwise, if we need |
920 | * to retry a cache trim, do so now; otherwise, |
921 | * we are done. "pageout" objects have a shadow, |
922 | * but maintain a "paging reference" rather than |
923 | * a normal reference. |
924 | */ |
925 | shadow = object->pageout?VM_OBJECT_NULL:object->shadow; |
926 | |
927 | if (vm_object_terminate(object) != KERN_SUCCESS) { |
928 | return; |
929 | } |
930 | if (shadow != VM_OBJECT_NULL) { |
931 | object = shadow; |
932 | continue; |
933 | } |
934 | return; |
935 | } |
936 | } |
937 | |
938 | |
939 | |
940 | vm_page_t |
941 | vm_object_page_grab( |
942 | vm_object_t object) |
943 | { |
944 | vm_page_t p, next_p; |
945 | int p_limit = 0; |
946 | int p_skipped = 0; |
947 | |
948 | vm_object_lock_assert_exclusive(object); |
949 | |
950 | next_p = (vm_page_t)vm_page_queue_first(&object->memq); |
951 | p_limit = MIN(50, object->resident_page_count); |
952 | |
953 | while (!vm_page_queue_end(&object->memq, (vm_page_queue_entry_t)next_p) && --p_limit > 0) { |
954 | |
955 | p = next_p; |
956 | next_p = (vm_page_t)vm_page_queue_next(&next_p->vmp_listq); |
957 | |
958 | if (VM_PAGE_WIRED(p) || p->vmp_busy || p->vmp_cleaning || p->vmp_laundry || p->vmp_fictitious) |
959 | goto move_page_in_obj; |
960 | |
961 | if (p->vmp_pmapped || p->vmp_dirty || p->vmp_precious) { |
962 | vm_page_lockspin_queues(); |
963 | |
964 | if (p->vmp_pmapped) { |
965 | int refmod_state; |
966 | |
967 | vm_object_page_grab_pmapped++; |
968 | |
969 | if (p->vmp_reference == FALSE || p->vmp_dirty == FALSE) { |
970 | |
971 | refmod_state = pmap_get_refmod(VM_PAGE_GET_PHYS_PAGE(p)); |
972 | |
973 | if (refmod_state & VM_MEM_REFERENCED) |
974 | p->vmp_reference = TRUE; |
975 | if (refmod_state & VM_MEM_MODIFIED) { |
976 | SET_PAGE_DIRTY(p, FALSE); |
977 | } |
978 | } |
979 | if (p->vmp_dirty == FALSE && p->vmp_precious == FALSE) { |
980 | |
981 | refmod_state = pmap_disconnect(VM_PAGE_GET_PHYS_PAGE(p)); |
982 | |
983 | if (refmod_state & VM_MEM_REFERENCED) |
984 | p->vmp_reference = TRUE; |
985 | if (refmod_state & VM_MEM_MODIFIED) { |
986 | SET_PAGE_DIRTY(p, FALSE); |
987 | } |
988 | |
989 | if (p->vmp_dirty == FALSE) |
990 | goto take_page; |
991 | } |
992 | } |
993 | if ((p->vmp_q_state != VM_PAGE_ON_ACTIVE_Q) && p->vmp_reference == TRUE) { |
994 | vm_page_activate(p); |
995 | |
996 | VM_STAT_INCR(reactivations); |
997 | vm_object_page_grab_reactivations++; |
998 | } |
999 | vm_page_unlock_queues(); |
1000 | move_page_in_obj: |
1001 | vm_page_queue_remove(&object->memq, p, vm_page_t, vmp_listq); |
1002 | vm_page_queue_enter(&object->memq, p, vm_page_t, vmp_listq); |
1003 | |
1004 | p_skipped++; |
1005 | continue; |
1006 | } |
1007 | vm_page_lockspin_queues(); |
1008 | take_page: |
1009 | vm_page_free_prepare_queues(p); |
1010 | vm_object_page_grab_returned++; |
1011 | vm_object_page_grab_skipped += p_skipped; |
1012 | |
1013 | vm_page_unlock_queues(); |
1014 | |
1015 | vm_page_free_prepare_object(p, TRUE); |
1016 | |
1017 | return (p); |
1018 | } |
1019 | vm_object_page_grab_skipped += p_skipped; |
1020 | vm_object_page_grab_failed++; |
1021 | |
1022 | return (NULL); |
1023 | } |
1024 | |
1025 | |
1026 | |
1027 | #define EVICT_PREPARE_LIMIT 64 |
1028 | #define EVICT_AGE 10 |
1029 | |
1030 | static clock_sec_t vm_object_cache_aging_ts = 0; |
1031 | |
1032 | static void |
1033 | vm_object_cache_remove_locked( |
1034 | vm_object_t object) |
1035 | { |
1036 | assert(object->purgable == VM_PURGABLE_DENY); |
1037 | |
1038 | queue_remove(&vm_object_cached_list, object, vm_object_t, cached_list); |
1039 | object->cached_list.next = NULL; |
1040 | object->cached_list.prev = NULL; |
1041 | |
1042 | vm_object_cached_count--; |
1043 | } |
1044 | |
1045 | void |
1046 | vm_object_cache_remove( |
1047 | vm_object_t object) |
1048 | { |
1049 | vm_object_cache_lock_spin(); |
1050 | |
1051 | if (object->cached_list.next && |
1052 | object->cached_list.prev) |
1053 | vm_object_cache_remove_locked(object); |
1054 | |
1055 | vm_object_cache_unlock(); |
1056 | } |
1057 | |
1058 | void |
1059 | vm_object_cache_add( |
1060 | vm_object_t object) |
1061 | { |
1062 | clock_sec_t sec; |
1063 | clock_nsec_t nsec; |
1064 | |
1065 | assert(object->purgable == VM_PURGABLE_DENY); |
1066 | |
1067 | if (object->resident_page_count == 0) |
1068 | return; |
1069 | clock_get_system_nanotime(&sec, &nsec); |
1070 | |
1071 | vm_object_cache_lock_spin(); |
1072 | |
1073 | if (object->cached_list.next == NULL && |
1074 | object->cached_list.prev == NULL) { |
1075 | queue_enter(&vm_object_cached_list, object, vm_object_t, cached_list); |
1076 | object->vo_cache_ts = sec + EVICT_AGE; |
1077 | object->vo_cache_pages_to_scan = object->resident_page_count; |
1078 | |
1079 | vm_object_cached_count++; |
1080 | vm_object_cache_adds++; |
1081 | } |
1082 | vm_object_cache_unlock(); |
1083 | } |
1084 | |
1085 | int |
1086 | vm_object_cache_evict( |
1087 | int num_to_evict, |
1088 | int max_objects_to_examine) |
1089 | { |
1090 | vm_object_t object = VM_OBJECT_NULL; |
1091 | vm_object_t next_obj = VM_OBJECT_NULL; |
1092 | vm_page_t local_free_q = VM_PAGE_NULL; |
1093 | vm_page_t p; |
1094 | vm_page_t next_p; |
1095 | int object_cnt = 0; |
1096 | vm_page_t ep_array[EVICT_PREPARE_LIMIT]; |
1097 | int ep_count; |
1098 | int ep_limit; |
1099 | int ep_index; |
1100 | int ep_freed = 0; |
1101 | int ep_moved = 0; |
1102 | uint32_t ep_skipped = 0; |
1103 | clock_sec_t sec; |
1104 | clock_nsec_t nsec; |
1105 | |
1106 | KERNEL_DEBUG(0x13001ec | DBG_FUNC_START, 0, 0, 0, 0, 0); |
1107 | /* |
1108 | * do a couple of quick checks to see if it's |
1109 | * worthwhile grabbing the lock |
1110 | */ |
1111 | if (queue_empty(&vm_object_cached_list)) { |
1112 | KERNEL_DEBUG(0x13001ec | DBG_FUNC_END, 0, 0, 0, 0, 0); |
1113 | return (0); |
1114 | } |
1115 | clock_get_system_nanotime(&sec, &nsec); |
1116 | |
1117 | /* |
1118 | * the object on the head of the queue has not |
1119 | * yet sufficiently aged |
1120 | */ |
1121 | if (sec < vm_object_cache_aging_ts) { |
1122 | KERNEL_DEBUG(0x13001ec | DBG_FUNC_END, 0, 0, 0, 0, 0); |
1123 | return (0); |
1124 | } |
1125 | /* |
1126 | * don't need the queue lock to find |
1127 | * and lock an object on the cached list |
1128 | */ |
1129 | vm_page_unlock_queues(); |
1130 | |
1131 | vm_object_cache_lock_spin(); |
1132 | |
1133 | for (;;) { |
1134 | next_obj = (vm_object_t)queue_first(&vm_object_cached_list); |
1135 | |
1136 | while (!queue_end(&vm_object_cached_list, (queue_entry_t)next_obj) && object_cnt++ < max_objects_to_examine) { |
1137 | |
1138 | object = next_obj; |
1139 | next_obj = (vm_object_t)queue_next(&next_obj->cached_list); |
1140 | |
1141 | assert(object->purgable == VM_PURGABLE_DENY); |
1142 | |
1143 | if (sec < object->vo_cache_ts) { |
1144 | KERNEL_DEBUG(0x130020c, object, object->resident_page_count, object->vo_cache_ts, sec, 0); |
1145 | |
1146 | vm_object_cache_aging_ts = object->vo_cache_ts; |
1147 | object = VM_OBJECT_NULL; |
1148 | break; |
1149 | } |
1150 | if (!vm_object_lock_try_scan(object)) { |
1151 | /* |
1152 | * just skip over this guy for now... if we find |
1153 | * an object to steal pages from, we'll revist in a bit... |
1154 | * hopefully, the lock will have cleared |
1155 | */ |
1156 | KERNEL_DEBUG(0x13001f8, object, object->resident_page_count, 0, 0, 0); |
1157 | |
1158 | object = VM_OBJECT_NULL; |
1159 | continue; |
1160 | } |
1161 | if (vm_page_queue_empty(&object->memq) || object->vo_cache_pages_to_scan == 0) { |
1162 | /* |
1163 | * this case really shouldn't happen, but it's not fatal |
1164 | * so deal with it... if we don't remove the object from |
1165 | * the list, we'll never move past it. |
1166 | */ |
1167 | KERNEL_DEBUG(0x13001fc, object, object->resident_page_count, ep_freed, ep_moved, 0); |
1168 | |
1169 | vm_object_cache_remove_locked(object); |
1170 | vm_object_unlock(object); |
1171 | object = VM_OBJECT_NULL; |
1172 | continue; |
1173 | } |
1174 | /* |
1175 | * we have a locked object with pages... |
1176 | * time to start harvesting |
1177 | */ |
1178 | break; |
1179 | } |
1180 | vm_object_cache_unlock(); |
1181 | |
1182 | if (object == VM_OBJECT_NULL) |
1183 | break; |
1184 | |
1185 | /* |
1186 | * object is locked at this point and |
1187 | * has resident pages |
1188 | */ |
1189 | next_p = (vm_page_t)vm_page_queue_first(&object->memq); |
1190 | |
1191 | /* |
1192 | * break the page scan into 2 pieces to minimize the time spent |
1193 | * behind the page queue lock... |
1194 | * the list of pages on these unused objects is likely to be cold |
1195 | * w/r to the cpu cache which increases the time to scan the list |
1196 | * tenfold... and we may have a 'run' of pages we can't utilize that |
1197 | * needs to be skipped over... |
1198 | */ |
1199 | if ((ep_limit = num_to_evict - (ep_freed + ep_moved)) > EVICT_PREPARE_LIMIT) |
1200 | ep_limit = EVICT_PREPARE_LIMIT; |
1201 | ep_count = 0; |
1202 | |
1203 | while (!vm_page_queue_end(&object->memq, (vm_page_queue_entry_t)next_p) && object->vo_cache_pages_to_scan && ep_count < ep_limit) { |
1204 | |
1205 | p = next_p; |
1206 | next_p = (vm_page_t)vm_page_queue_next(&next_p->vmp_listq); |
1207 | |
1208 | object->vo_cache_pages_to_scan--; |
1209 | |
1210 | if (VM_PAGE_WIRED(p) || p->vmp_busy || p->vmp_cleaning || p->vmp_laundry) { |
1211 | vm_page_queue_remove(&object->memq, p, vm_page_t, vmp_listq); |
1212 | vm_page_queue_enter(&object->memq, p, vm_page_t, vmp_listq); |
1213 | |
1214 | ep_skipped++; |
1215 | continue; |
1216 | } |
1217 | if (p->vmp_wpmapped || p->vmp_dirty || p->vmp_precious) { |
1218 | vm_page_queue_remove(&object->memq, p, vm_page_t, vmp_listq); |
1219 | vm_page_queue_enter(&object->memq, p, vm_page_t, vmp_listq); |
1220 | |
1221 | pmap_clear_reference(VM_PAGE_GET_PHYS_PAGE(p)); |
1222 | } |
1223 | ep_array[ep_count++] = p; |
1224 | } |
1225 | KERNEL_DEBUG(0x13001f4 | DBG_FUNC_START, object, object->resident_page_count, ep_freed, ep_moved, 0); |
1226 | |
1227 | vm_page_lockspin_queues(); |
1228 | |
1229 | for (ep_index = 0; ep_index < ep_count; ep_index++) { |
1230 | |
1231 | p = ep_array[ep_index]; |
1232 | |
1233 | if (p->vmp_wpmapped || p->vmp_dirty || p->vmp_precious) { |
1234 | p->vmp_reference = FALSE; |
1235 | p->vmp_no_cache = FALSE; |
1236 | |
1237 | /* |
1238 | * we've already filtered out pages that are in the laundry |
1239 | * so if we get here, this page can't be on the pageout queue |
1240 | */ |
1241 | vm_page_queues_remove(p, FALSE); |
1242 | vm_page_enqueue_inactive(p, TRUE); |
1243 | |
1244 | ep_moved++; |
1245 | } else { |
1246 | #if CONFIG_PHANTOM_CACHE |
1247 | vm_phantom_cache_add_ghost(p); |
1248 | #endif |
1249 | vm_page_free_prepare_queues(p); |
1250 | |
1251 | assert(p->vmp_pageq.next == 0 && p->vmp_pageq.prev == 0); |
1252 | /* |
1253 | * Add this page to our list of reclaimed pages, |
1254 | * to be freed later. |
1255 | */ |
1256 | p->vmp_snext = local_free_q; |
1257 | local_free_q = p; |
1258 | |
1259 | ep_freed++; |
1260 | } |
1261 | } |
1262 | vm_page_unlock_queues(); |
1263 | |
1264 | KERNEL_DEBUG(0x13001f4 | DBG_FUNC_END, object, object->resident_page_count, ep_freed, ep_moved, 0); |
1265 | |
1266 | if (local_free_q) { |
1267 | vm_page_free_list(local_free_q, TRUE); |
1268 | local_free_q = VM_PAGE_NULL; |
1269 | } |
1270 | if (object->vo_cache_pages_to_scan == 0) { |
1271 | KERNEL_DEBUG(0x1300208, object, object->resident_page_count, ep_freed, ep_moved, 0); |
1272 | |
1273 | vm_object_cache_remove(object); |
1274 | |
1275 | KERNEL_DEBUG(0x13001fc, object, object->resident_page_count, ep_freed, ep_moved, 0); |
1276 | } |
1277 | /* |
1278 | * done with this object |
1279 | */ |
1280 | vm_object_unlock(object); |
1281 | object = VM_OBJECT_NULL; |
1282 | |
1283 | /* |
1284 | * at this point, we are not holding any locks |
1285 | */ |
1286 | if ((ep_freed + ep_moved) >= num_to_evict) { |
1287 | /* |
1288 | * we've reached our target for the |
1289 | * number of pages to evict |
1290 | */ |
1291 | break; |
1292 | } |
1293 | vm_object_cache_lock_spin(); |
1294 | } |
1295 | /* |
1296 | * put the page queues lock back to the caller's |
1297 | * idea of it |
1298 | */ |
1299 | vm_page_lock_queues(); |
1300 | |
1301 | vm_object_cache_pages_freed += ep_freed; |
1302 | vm_object_cache_pages_moved += ep_moved; |
1303 | vm_object_cache_pages_skipped += ep_skipped; |
1304 | |
1305 | KERNEL_DEBUG(0x13001ec | DBG_FUNC_END, ep_freed, 0, 0, 0, 0); |
1306 | return (ep_freed); |
1307 | } |
1308 | |
1309 | /* |
1310 | * Routine: vm_object_terminate |
1311 | * Purpose: |
1312 | * Free all resources associated with a vm_object. |
1313 | * In/out conditions: |
1314 | * Upon entry, the object must be locked, |
1315 | * and the object must have exactly one reference. |
1316 | * |
1317 | * The shadow object reference is left alone. |
1318 | * |
1319 | * The object must be unlocked if its found that pages |
1320 | * must be flushed to a backing object. If someone |
1321 | * manages to map the object while it is being flushed |
1322 | * the object is returned unlocked and unchanged. Otherwise, |
1323 | * upon exit, the cache will be unlocked, and the |
1324 | * object will cease to exist. |
1325 | */ |
1326 | static kern_return_t |
1327 | vm_object_terminate( |
1328 | vm_object_t object) |
1329 | { |
1330 | vm_object_t shadow_object; |
1331 | |
1332 | XPR(XPR_VM_OBJECT, "vm_object_terminate, object 0x%X ref %d\n" , |
1333 | object, object->ref_count, 0, 0, 0); |
1334 | |
1335 | vm_object_lock_assert_exclusive(object); |
1336 | |
1337 | if (!object->pageout && (!object->internal && object->can_persist) && |
1338 | (object->pager != NULL || object->shadow_severed)) { |
1339 | /* |
1340 | * Clear pager_trusted bit so that the pages get yanked |
1341 | * out of the object instead of cleaned in place. This |
1342 | * prevents a deadlock in XMM and makes more sense anyway. |
1343 | */ |
1344 | object->pager_trusted = FALSE; |
1345 | |
1346 | vm_object_reap_pages(object, REAP_TERMINATE); |
1347 | } |
1348 | /* |
1349 | * Make sure the object isn't already being terminated |
1350 | */ |
1351 | if (object->terminating) { |
1352 | vm_object_lock_assert_exclusive(object); |
1353 | object->ref_count--; |
1354 | assert(object->ref_count > 0); |
1355 | vm_object_unlock(object); |
1356 | return KERN_FAILURE; |
1357 | } |
1358 | |
1359 | /* |
1360 | * Did somebody get a reference to the object while we were |
1361 | * cleaning it? |
1362 | */ |
1363 | if (object->ref_count != 1) { |
1364 | vm_object_lock_assert_exclusive(object); |
1365 | object->ref_count--; |
1366 | assert(object->ref_count > 0); |
1367 | vm_object_res_deallocate(object); |
1368 | vm_object_unlock(object); |
1369 | return KERN_FAILURE; |
1370 | } |
1371 | |
1372 | /* |
1373 | * Make sure no one can look us up now. |
1374 | */ |
1375 | |
1376 | object->terminating = TRUE; |
1377 | object->alive = FALSE; |
1378 | |
1379 | if (!object->internal && |
1380 | object->cached_list.next && |
1381 | object->cached_list.prev) |
1382 | vm_object_cache_remove(object); |
1383 | |
1384 | /* |
1385 | * Detach the object from its shadow if we are the shadow's |
1386 | * copy. The reference we hold on the shadow must be dropped |
1387 | * by our caller. |
1388 | */ |
1389 | if (((shadow_object = object->shadow) != VM_OBJECT_NULL) && |
1390 | !(object->pageout)) { |
1391 | vm_object_lock(shadow_object); |
1392 | if (shadow_object->copy == object) |
1393 | shadow_object->copy = VM_OBJECT_NULL; |
1394 | vm_object_unlock(shadow_object); |
1395 | } |
1396 | |
1397 | if (object->paging_in_progress != 0 || |
1398 | object->activity_in_progress != 0) { |
1399 | /* |
1400 | * There are still some paging_in_progress references |
1401 | * on this object, meaning that there are some paging |
1402 | * or other I/O operations in progress for this VM object. |
1403 | * Such operations take some paging_in_progress references |
1404 | * up front to ensure that the object doesn't go away, but |
1405 | * they may also need to acquire a reference on the VM object, |
1406 | * to map it in kernel space, for example. That means that |
1407 | * they may end up releasing the last reference on the VM |
1408 | * object, triggering its termination, while still holding |
1409 | * paging_in_progress references. Waiting for these |
1410 | * pending paging_in_progress references to go away here would |
1411 | * deadlock. |
1412 | * |
1413 | * To avoid deadlocking, we'll let the vm_object_reaper_thread |
1414 | * complete the VM object termination if it still holds |
1415 | * paging_in_progress references at this point. |
1416 | * |
1417 | * No new paging_in_progress should appear now that the |
1418 | * VM object is "terminating" and not "alive". |
1419 | */ |
1420 | vm_object_reap_async(object); |
1421 | vm_object_unlock(object); |
1422 | /* |
1423 | * Return KERN_FAILURE to let the caller know that we |
1424 | * haven't completed the termination and it can't drop this |
1425 | * object's reference on its shadow object yet. |
1426 | * The reaper thread will take care of that once it has |
1427 | * completed this object's termination. |
1428 | */ |
1429 | return KERN_FAILURE; |
1430 | } |
1431 | /* |
1432 | * complete the VM object termination |
1433 | */ |
1434 | vm_object_reap(object); |
1435 | object = VM_OBJECT_NULL; |
1436 | |
1437 | /* |
1438 | * the object lock was released by vm_object_reap() |
1439 | * |
1440 | * KERN_SUCCESS means that this object has been terminated |
1441 | * and no longer needs its shadow object but still holds a |
1442 | * reference on it. |
1443 | * The caller is responsible for dropping that reference. |
1444 | * We can't call vm_object_deallocate() here because that |
1445 | * would create a recursion. |
1446 | */ |
1447 | return KERN_SUCCESS; |
1448 | } |
1449 | |
1450 | |
1451 | /* |
1452 | * vm_object_reap(): |
1453 | * |
1454 | * Complete the termination of a VM object after it's been marked |
1455 | * as "terminating" and "!alive" by vm_object_terminate(). |
1456 | * |
1457 | * The VM object must be locked by caller. |
1458 | * The lock will be released on return and the VM object is no longer valid. |
1459 | */ |
1460 | |
1461 | void |
1462 | vm_object_reap( |
1463 | vm_object_t object) |
1464 | { |
1465 | memory_object_t ; |
1466 | |
1467 | vm_object_lock_assert_exclusive(object); |
1468 | assert(object->paging_in_progress == 0); |
1469 | assert(object->activity_in_progress == 0); |
1470 | |
1471 | vm_object_reap_count++; |
1472 | |
1473 | /* |
1474 | * Disown this purgeable object to cleanup its owner's purgeable |
1475 | * ledgers. We need to do this before disconnecting the object |
1476 | * from its pager, to properly account for compressed pages. |
1477 | */ |
1478 | if (object->internal && |
1479 | (object->purgable != VM_PURGABLE_DENY || |
1480 | object->vo_ledger_tag)) { |
1481 | assert(!object->alive); |
1482 | assert(object->terminating); |
1483 | vm_object_ownership_change(object, |
1484 | object->vo_ledger_tag, /* unchanged */ |
1485 | NULL, /* no owner */ |
1486 | FALSE); /* task_objq not locked */ |
1487 | assert(object->vo_owner == NULL); |
1488 | } |
1489 | |
1490 | pager = object->pager; |
1491 | object->pager = MEMORY_OBJECT_NULL; |
1492 | |
1493 | if (pager != MEMORY_OBJECT_NULL) |
1494 | memory_object_control_disable(object->pager_control); |
1495 | |
1496 | object->ref_count--; |
1497 | #if TASK_SWAPPER |
1498 | assert(object->res_count == 0); |
1499 | #endif /* TASK_SWAPPER */ |
1500 | |
1501 | assert (object->ref_count == 0); |
1502 | |
1503 | /* |
1504 | * remove from purgeable queue if it's on |
1505 | */ |
1506 | if (object->internal) { |
1507 | assert(VM_OBJECT_OWNER(object) == TASK_NULL); |
1508 | |
1509 | VM_OBJECT_UNWIRED(object); |
1510 | |
1511 | if (object->purgable == VM_PURGABLE_DENY) { |
1512 | /* not purgeable: nothing to do */ |
1513 | } else if (object->purgable == VM_PURGABLE_VOLATILE) { |
1514 | purgeable_q_t queue; |
1515 | |
1516 | queue = vm_purgeable_object_remove(object); |
1517 | assert(queue); |
1518 | |
1519 | if (object->purgeable_when_ripe) { |
1520 | /* |
1521 | * Must take page lock for this - |
1522 | * using it to protect token queue |
1523 | */ |
1524 | vm_page_lock_queues(); |
1525 | vm_purgeable_token_delete_first(queue); |
1526 | |
1527 | assert(queue->debug_count_objects>=0); |
1528 | vm_page_unlock_queues(); |
1529 | } |
1530 | |
1531 | /* |
1532 | * Update "vm_page_purgeable_count" in bulk and mark |
1533 | * object as VM_PURGABLE_EMPTY to avoid updating |
1534 | * "vm_page_purgeable_count" again in vm_page_remove() |
1535 | * when reaping the pages. |
1536 | */ |
1537 | unsigned int delta; |
1538 | assert(object->resident_page_count >= |
1539 | object->wired_page_count); |
1540 | delta = (object->resident_page_count - |
1541 | object->wired_page_count); |
1542 | if (delta != 0) { |
1543 | assert(vm_page_purgeable_count >= delta); |
1544 | OSAddAtomic(-delta, |
1545 | (SInt32 *)&vm_page_purgeable_count); |
1546 | } |
1547 | if (object->wired_page_count != 0) { |
1548 | assert(vm_page_purgeable_wired_count >= |
1549 | object->wired_page_count); |
1550 | OSAddAtomic(-object->wired_page_count, |
1551 | (SInt32 *)&vm_page_purgeable_wired_count); |
1552 | } |
1553 | object->purgable = VM_PURGABLE_EMPTY; |
1554 | } |
1555 | else if (object->purgable == VM_PURGABLE_NONVOLATILE || |
1556 | object->purgable == VM_PURGABLE_EMPTY) { |
1557 | /* remove from nonvolatile queue */ |
1558 | vm_purgeable_nonvolatile_dequeue(object); |
1559 | } else { |
1560 | panic("object %p in unexpected purgeable state 0x%x\n" , |
1561 | object, object->purgable); |
1562 | } |
1563 | if (object->transposed && |
1564 | object->cached_list.next != NULL && |
1565 | object->cached_list.prev == NULL) { |
1566 | /* |
1567 | * object->cached_list.next "points" to the |
1568 | * object that was transposed with this object. |
1569 | */ |
1570 | } else { |
1571 | assert(object->cached_list.next == NULL); |
1572 | } |
1573 | assert(object->cached_list.prev == NULL); |
1574 | } |
1575 | |
1576 | if (object->pageout) { |
1577 | /* |
1578 | * free all remaining pages tabled on |
1579 | * this object |
1580 | * clean up it's shadow |
1581 | */ |
1582 | assert(object->shadow != VM_OBJECT_NULL); |
1583 | |
1584 | vm_pageout_object_terminate(object); |
1585 | |
1586 | } else if (object->resident_page_count) { |
1587 | /* |
1588 | * free all remaining pages tabled on |
1589 | * this object |
1590 | */ |
1591 | vm_object_reap_pages(object, REAP_REAP); |
1592 | } |
1593 | assert(vm_page_queue_empty(&object->memq)); |
1594 | assert(object->paging_in_progress == 0); |
1595 | assert(object->activity_in_progress == 0); |
1596 | assert(object->ref_count == 0); |
1597 | |
1598 | /* |
1599 | * If the pager has not already been released by |
1600 | * vm_object_destroy, we need to terminate it and |
1601 | * release our reference to it here. |
1602 | */ |
1603 | if (pager != MEMORY_OBJECT_NULL) { |
1604 | vm_object_unlock(object); |
1605 | vm_object_release_pager(pager); |
1606 | vm_object_lock(object); |
1607 | } |
1608 | |
1609 | /* kick off anyone waiting on terminating */ |
1610 | object->terminating = FALSE; |
1611 | vm_object_paging_begin(object); |
1612 | vm_object_paging_end(object); |
1613 | vm_object_unlock(object); |
1614 | |
1615 | object->shadow = VM_OBJECT_NULL; |
1616 | |
1617 | #if VM_OBJECT_TRACKING |
1618 | if (vm_object_tracking_inited) { |
1619 | btlog_remove_entries_for_element(vm_object_tracking_btlog, |
1620 | object); |
1621 | } |
1622 | #endif /* VM_OBJECT_TRACKING */ |
1623 | |
1624 | vm_object_lock_destroy(object); |
1625 | /* |
1626 | * Free the space for the object. |
1627 | */ |
1628 | zfree(vm_object_zone, object); |
1629 | object = VM_OBJECT_NULL; |
1630 | } |
1631 | |
1632 | |
1633 | unsigned int vm_max_batch = 256; |
1634 | |
1635 | #define V_O_R_MAX_BATCH 128 |
1636 | |
1637 | #define BATCH_LIMIT(max) (vm_max_batch >= max ? max : vm_max_batch) |
1638 | |
1639 | |
1640 | #define VM_OBJ_REAP_FREELIST(_local_free_q, do_disconnect) \ |
1641 | MACRO_BEGIN \ |
1642 | if (_local_free_q) { \ |
1643 | if (do_disconnect) { \ |
1644 | vm_page_t m; \ |
1645 | for (m = _local_free_q; \ |
1646 | m != VM_PAGE_NULL; \ |
1647 | m = m->vmp_snext) { \ |
1648 | if (m->vmp_pmapped) { \ |
1649 | pmap_disconnect(VM_PAGE_GET_PHYS_PAGE(m)); \ |
1650 | } \ |
1651 | } \ |
1652 | } \ |
1653 | vm_page_free_list(_local_free_q, TRUE); \ |
1654 | _local_free_q = VM_PAGE_NULL; \ |
1655 | } \ |
1656 | MACRO_END |
1657 | |
1658 | |
1659 | void |
1660 | vm_object_reap_pages( |
1661 | vm_object_t object, |
1662 | int reap_type) |
1663 | { |
1664 | vm_page_t p; |
1665 | vm_page_t next; |
1666 | vm_page_t local_free_q = VM_PAGE_NULL; |
1667 | int loop_count; |
1668 | boolean_t disconnect_on_release; |
1669 | pmap_flush_context pmap_flush_context_storage; |
1670 | |
1671 | if (reap_type == REAP_DATA_FLUSH) { |
1672 | /* |
1673 | * We need to disconnect pages from all pmaps before |
1674 | * releasing them to the free list |
1675 | */ |
1676 | disconnect_on_release = TRUE; |
1677 | } else { |
1678 | /* |
1679 | * Either the caller has already disconnected the pages |
1680 | * from all pmaps, or we disconnect them here as we add |
1681 | * them to out local list of pages to be released. |
1682 | * No need to re-disconnect them when we release the pages |
1683 | * to the free list. |
1684 | */ |
1685 | disconnect_on_release = FALSE; |
1686 | } |
1687 | |
1688 | restart_after_sleep: |
1689 | if (vm_page_queue_empty(&object->memq)) |
1690 | return; |
1691 | loop_count = BATCH_LIMIT(V_O_R_MAX_BATCH); |
1692 | |
1693 | if (reap_type == REAP_PURGEABLE) |
1694 | pmap_flush_context_init(&pmap_flush_context_storage); |
1695 | |
1696 | vm_page_lockspin_queues(); |
1697 | |
1698 | next = (vm_page_t)vm_page_queue_first(&object->memq); |
1699 | |
1700 | while (!vm_page_queue_end(&object->memq, (vm_page_queue_entry_t)next)) { |
1701 | |
1702 | p = next; |
1703 | next = (vm_page_t)vm_page_queue_next(&next->vmp_listq); |
1704 | |
1705 | if (--loop_count == 0) { |
1706 | |
1707 | vm_page_unlock_queues(); |
1708 | |
1709 | if (local_free_q) { |
1710 | |
1711 | if (reap_type == REAP_PURGEABLE) { |
1712 | pmap_flush(&pmap_flush_context_storage); |
1713 | pmap_flush_context_init(&pmap_flush_context_storage); |
1714 | } |
1715 | /* |
1716 | * Free the pages we reclaimed so far |
1717 | * and take a little break to avoid |
1718 | * hogging the page queue lock too long |
1719 | */ |
1720 | VM_OBJ_REAP_FREELIST(local_free_q, |
1721 | disconnect_on_release); |
1722 | } else |
1723 | mutex_pause(0); |
1724 | |
1725 | loop_count = BATCH_LIMIT(V_O_R_MAX_BATCH); |
1726 | |
1727 | vm_page_lockspin_queues(); |
1728 | } |
1729 | if (reap_type == REAP_DATA_FLUSH || reap_type == REAP_TERMINATE) { |
1730 | |
1731 | if (p->vmp_busy || p->vmp_cleaning) { |
1732 | |
1733 | vm_page_unlock_queues(); |
1734 | /* |
1735 | * free the pages reclaimed so far |
1736 | */ |
1737 | VM_OBJ_REAP_FREELIST(local_free_q, |
1738 | disconnect_on_release); |
1739 | |
1740 | PAGE_SLEEP(object, p, THREAD_UNINT); |
1741 | |
1742 | goto restart_after_sleep; |
1743 | } |
1744 | if (p->vmp_laundry) |
1745 | vm_pageout_steal_laundry(p, TRUE); |
1746 | } |
1747 | switch (reap_type) { |
1748 | |
1749 | case REAP_DATA_FLUSH: |
1750 | if (VM_PAGE_WIRED(p)) { |
1751 | /* |
1752 | * this is an odd case... perhaps we should |
1753 | * zero-fill this page since we're conceptually |
1754 | * tossing its data at this point, but leaving |
1755 | * it on the object to honor the 'wire' contract |
1756 | */ |
1757 | continue; |
1758 | } |
1759 | break; |
1760 | |
1761 | case REAP_PURGEABLE: |
1762 | if (VM_PAGE_WIRED(p)) { |
1763 | /* |
1764 | * can't purge a wired page |
1765 | */ |
1766 | vm_page_purged_wired++; |
1767 | continue; |
1768 | } |
1769 | if (p->vmp_laundry && !p->vmp_busy && !p->vmp_cleaning) |
1770 | vm_pageout_steal_laundry(p, TRUE); |
1771 | |
1772 | if (p->vmp_cleaning || p->vmp_laundry || p->vmp_absent) { |
1773 | /* |
1774 | * page is being acted upon, |
1775 | * so don't mess with it |
1776 | */ |
1777 | vm_page_purged_others++; |
1778 | continue; |
1779 | } |
1780 | if (p->vmp_busy) { |
1781 | /* |
1782 | * We can't reclaim a busy page but we can |
1783 | * make it more likely to be paged (it's not wired) to make |
1784 | * sure that it gets considered by |
1785 | * vm_pageout_scan() later. |
1786 | */ |
1787 | if (VM_PAGE_PAGEABLE(p)) |
1788 | vm_page_deactivate(p); |
1789 | vm_page_purged_busy++; |
1790 | continue; |
1791 | } |
1792 | |
1793 | assert(VM_PAGE_OBJECT(p) != kernel_object); |
1794 | |
1795 | /* |
1796 | * we can discard this page... |
1797 | */ |
1798 | if (p->vmp_pmapped == TRUE) { |
1799 | /* |
1800 | * unmap the page |
1801 | */ |
1802 | pmap_disconnect_options(VM_PAGE_GET_PHYS_PAGE(p), PMAP_OPTIONS_NOFLUSH | PMAP_OPTIONS_NOREFMOD, (void *)&pmap_flush_context_storage); |
1803 | } |
1804 | vm_page_purged_count++; |
1805 | |
1806 | break; |
1807 | |
1808 | case REAP_TERMINATE: |
1809 | if (p->vmp_absent || p->vmp_private) { |
1810 | /* |
1811 | * For private pages, VM_PAGE_FREE just |
1812 | * leaves the page structure around for |
1813 | * its owner to clean up. For absent |
1814 | * pages, the structure is returned to |
1815 | * the appropriate pool. |
1816 | */ |
1817 | break; |
1818 | } |
1819 | if (p->vmp_fictitious) { |
1820 | assert (VM_PAGE_GET_PHYS_PAGE(p) == vm_page_guard_addr); |
1821 | break; |
1822 | } |
1823 | if (!p->vmp_dirty && p->vmp_wpmapped) |
1824 | p->vmp_dirty = pmap_is_modified(VM_PAGE_GET_PHYS_PAGE(p)); |
1825 | |
1826 | if ((p->vmp_dirty || p->vmp_precious) && !p->vmp_error && object->alive) { |
1827 | |
1828 | assert(!object->internal); |
1829 | |
1830 | p->vmp_free_when_done = TRUE; |
1831 | |
1832 | if (!p->vmp_laundry) { |
1833 | vm_page_queues_remove(p, TRUE); |
1834 | /* |
1835 | * flush page... page will be freed |
1836 | * upon completion of I/O |
1837 | */ |
1838 | vm_pageout_cluster(p); |
1839 | } |
1840 | vm_page_unlock_queues(); |
1841 | /* |
1842 | * free the pages reclaimed so far |
1843 | */ |
1844 | VM_OBJ_REAP_FREELIST(local_free_q, |
1845 | disconnect_on_release); |
1846 | |
1847 | vm_object_paging_wait(object, THREAD_UNINT); |
1848 | |
1849 | goto restart_after_sleep; |
1850 | } |
1851 | break; |
1852 | |
1853 | case REAP_REAP: |
1854 | break; |
1855 | } |
1856 | vm_page_free_prepare_queues(p); |
1857 | assert(p->vmp_pageq.next == 0 && p->vmp_pageq.prev == 0); |
1858 | /* |
1859 | * Add this page to our list of reclaimed pages, |
1860 | * to be freed later. |
1861 | */ |
1862 | p->vmp_snext = local_free_q; |
1863 | local_free_q = p; |
1864 | } |
1865 | vm_page_unlock_queues(); |
1866 | |
1867 | /* |
1868 | * Free the remaining reclaimed pages |
1869 | */ |
1870 | if (reap_type == REAP_PURGEABLE) |
1871 | pmap_flush(&pmap_flush_context_storage); |
1872 | |
1873 | VM_OBJ_REAP_FREELIST(local_free_q, |
1874 | disconnect_on_release); |
1875 | } |
1876 | |
1877 | |
1878 | void |
1879 | vm_object_reap_async( |
1880 | vm_object_t object) |
1881 | { |
1882 | vm_object_lock_assert_exclusive(object); |
1883 | |
1884 | vm_object_reaper_lock_spin(); |
1885 | |
1886 | vm_object_reap_count_async++; |
1887 | |
1888 | /* enqueue the VM object... */ |
1889 | queue_enter(&vm_object_reaper_queue, object, |
1890 | vm_object_t, cached_list); |
1891 | |
1892 | vm_object_reaper_unlock(); |
1893 | |
1894 | /* ... and wake up the reaper thread */ |
1895 | thread_wakeup((event_t) &vm_object_reaper_queue); |
1896 | } |
1897 | |
1898 | |
1899 | void |
1900 | vm_object_reaper_thread(void) |
1901 | { |
1902 | vm_object_t object, shadow_object; |
1903 | |
1904 | vm_object_reaper_lock_spin(); |
1905 | |
1906 | while (!queue_empty(&vm_object_reaper_queue)) { |
1907 | queue_remove_first(&vm_object_reaper_queue, |
1908 | object, |
1909 | vm_object_t, |
1910 | cached_list); |
1911 | |
1912 | vm_object_reaper_unlock(); |
1913 | vm_object_lock(object); |
1914 | |
1915 | assert(object->terminating); |
1916 | assert(!object->alive); |
1917 | |
1918 | /* |
1919 | * The pageout daemon might be playing with our pages. |
1920 | * Now that the object is dead, it won't touch any more |
1921 | * pages, but some pages might already be on their way out. |
1922 | * Hence, we wait until the active paging activities have |
1923 | * ceased before we break the association with the pager |
1924 | * itself. |
1925 | */ |
1926 | while (object->paging_in_progress != 0 || |
1927 | object->activity_in_progress != 0) { |
1928 | vm_object_wait(object, |
1929 | VM_OBJECT_EVENT_PAGING_IN_PROGRESS, |
1930 | THREAD_UNINT); |
1931 | vm_object_lock(object); |
1932 | } |
1933 | |
1934 | shadow_object = |
1935 | object->pageout ? VM_OBJECT_NULL : object->shadow; |
1936 | |
1937 | vm_object_reap(object); |
1938 | /* cache is unlocked and object is no longer valid */ |
1939 | object = VM_OBJECT_NULL; |
1940 | |
1941 | if (shadow_object != VM_OBJECT_NULL) { |
1942 | /* |
1943 | * Drop the reference "object" was holding on |
1944 | * its shadow object. |
1945 | */ |
1946 | vm_object_deallocate(shadow_object); |
1947 | shadow_object = VM_OBJECT_NULL; |
1948 | } |
1949 | vm_object_reaper_lock_spin(); |
1950 | } |
1951 | |
1952 | /* wait for more work... */ |
1953 | assert_wait((event_t) &vm_object_reaper_queue, THREAD_UNINT); |
1954 | |
1955 | vm_object_reaper_unlock(); |
1956 | |
1957 | thread_block((thread_continue_t) vm_object_reaper_thread); |
1958 | /*NOTREACHED*/ |
1959 | } |
1960 | |
1961 | /* |
1962 | * Routine: vm_object_release_pager |
1963 | * Purpose: Terminate the pager and, upon completion, |
1964 | * release our last reference to it. |
1965 | */ |
1966 | static void |
1967 | ( |
1968 | memory_object_t ) |
1969 | { |
1970 | |
1971 | /* |
1972 | * Terminate the pager. |
1973 | */ |
1974 | |
1975 | (void) memory_object_terminate(pager); |
1976 | |
1977 | /* |
1978 | * Release reference to pager. |
1979 | */ |
1980 | memory_object_deallocate(pager); |
1981 | } |
1982 | |
1983 | /* |
1984 | * Routine: vm_object_destroy |
1985 | * Purpose: |
1986 | * Shut down a VM object, despite the |
1987 | * presence of address map (or other) references |
1988 | * to the vm_object. |
1989 | */ |
1990 | kern_return_t |
1991 | vm_object_destroy( |
1992 | vm_object_t object, |
1993 | __unused kern_return_t reason) |
1994 | { |
1995 | memory_object_t ; |
1996 | |
1997 | if (object == VM_OBJECT_NULL) |
1998 | return(KERN_SUCCESS); |
1999 | |
2000 | /* |
2001 | * Remove the pager association immediately. |
2002 | * |
2003 | * This will prevent the memory manager from further |
2004 | * meddling. [If it wanted to flush data or make |
2005 | * other changes, it should have done so before performing |
2006 | * the destroy call.] |
2007 | */ |
2008 | |
2009 | vm_object_lock(object); |
2010 | object->can_persist = FALSE; |
2011 | object->named = FALSE; |
2012 | object->alive = FALSE; |
2013 | |
2014 | old_pager = object->pager; |
2015 | object->pager = MEMORY_OBJECT_NULL; |
2016 | if (old_pager != MEMORY_OBJECT_NULL) |
2017 | memory_object_control_disable(object->pager_control); |
2018 | |
2019 | /* |
2020 | * Wait for the existing paging activity (that got |
2021 | * through before we nulled out the pager) to subside. |
2022 | */ |
2023 | |
2024 | vm_object_paging_wait(object, THREAD_UNINT); |
2025 | vm_object_unlock(object); |
2026 | |
2027 | /* |
2028 | * Terminate the object now. |
2029 | */ |
2030 | if (old_pager != MEMORY_OBJECT_NULL) { |
2031 | vm_object_release_pager(old_pager); |
2032 | |
2033 | /* |
2034 | * JMM - Release the caller's reference. This assumes the |
2035 | * caller had a reference to release, which is a big (but |
2036 | * currently valid) assumption if this is driven from the |
2037 | * vnode pager (it is holding a named reference when making |
2038 | * this call).. |
2039 | */ |
2040 | vm_object_deallocate(object); |
2041 | |
2042 | } |
2043 | return(KERN_SUCCESS); |
2044 | } |
2045 | |
2046 | /* |
2047 | * The "chunk" macros are used by routines below when looking for pages to deactivate. These |
2048 | * exist because of the need to handle shadow chains. When deactivating pages, we only |
2049 | * want to deactive the ones at the top most level in the object chain. In order to do |
2050 | * this efficiently, the specified address range is divided up into "chunks" and we use |
2051 | * a bit map to keep track of which pages have already been processed as we descend down |
2052 | * the shadow chain. These chunk macros hide the details of the bit map implementation |
2053 | * as much as we can. |
2054 | * |
2055 | * For convenience, we use a 64-bit data type as the bit map, and therefore a chunk is |
2056 | * set to 64 pages. The bit map is indexed from the low-order end, so that the lowest |
2057 | * order bit represents page 0 in the current range and highest order bit represents |
2058 | * page 63. |
2059 | * |
2060 | * For further convenience, we also use negative logic for the page state in the bit map. |
2061 | * The bit is set to 1 to indicate it has not yet been seen, and to 0 to indicate it has |
2062 | * been processed. This way we can simply test the 64-bit long word to see if it's zero |
2063 | * to easily tell if the whole range has been processed. Therefore, the bit map starts |
2064 | * out with all the bits set. The macros below hide all these details from the caller. |
2065 | */ |
2066 | |
2067 | #define PAGES_IN_A_CHUNK 64 /* The number of pages in the chunk must */ |
2068 | /* be the same as the number of bits in */ |
2069 | /* the chunk_state_t type. We use 64 */ |
2070 | /* just for convenience. */ |
2071 | |
2072 | #define CHUNK_SIZE (PAGES_IN_A_CHUNK * PAGE_SIZE_64) /* Size of a chunk in bytes */ |
2073 | |
2074 | typedef uint64_t chunk_state_t; |
2075 | |
2076 | /* |
2077 | * The bit map uses negative logic, so we start out with all 64 bits set to indicate |
2078 | * that no pages have been processed yet. Also, if len is less than the full CHUNK_SIZE, |
2079 | * then we mark pages beyond the len as having been "processed" so that we don't waste time |
2080 | * looking at pages in that range. This can save us from unnecessarily chasing down the |
2081 | * shadow chain. |
2082 | */ |
2083 | |
2084 | #define CHUNK_INIT(c, len) \ |
2085 | MACRO_BEGIN \ |
2086 | uint64_t p; \ |
2087 | \ |
2088 | (c) = 0xffffffffffffffffLL; \ |
2089 | \ |
2090 | for (p = (len) / PAGE_SIZE_64; p < PAGES_IN_A_CHUNK; p++) \ |
2091 | MARK_PAGE_HANDLED(c, p); \ |
2092 | MACRO_END |
2093 | |
2094 | |
2095 | /* |
2096 | * Return true if all pages in the chunk have not yet been processed. |
2097 | */ |
2098 | |
2099 | #define CHUNK_NOT_COMPLETE(c) ((c) != 0) |
2100 | |
2101 | /* |
2102 | * Return true if the page at offset 'p' in the bit map has already been handled |
2103 | * while processing a higher level object in the shadow chain. |
2104 | */ |
2105 | |
2106 | #define PAGE_ALREADY_HANDLED(c, p) (((c) & (1LL << (p))) == 0) |
2107 | |
2108 | /* |
2109 | * Mark the page at offset 'p' in the bit map as having been processed. |
2110 | */ |
2111 | |
2112 | #define MARK_PAGE_HANDLED(c, p) \ |
2113 | MACRO_BEGIN \ |
2114 | (c) = (c) & ~(1LL << (p)); \ |
2115 | MACRO_END |
2116 | |
2117 | |
2118 | /* |
2119 | * Return true if the page at the given offset has been paged out. Object is |
2120 | * locked upon entry and returned locked. |
2121 | */ |
2122 | |
2123 | static boolean_t |
2124 | page_is_paged_out( |
2125 | vm_object_t object, |
2126 | vm_object_offset_t offset) |
2127 | { |
2128 | if (object->internal && |
2129 | object->alive && |
2130 | !object->terminating && |
2131 | object->pager_ready) { |
2132 | |
2133 | if (VM_COMPRESSOR_PAGER_STATE_GET(object, offset) |
2134 | == VM_EXTERNAL_STATE_EXISTS) { |
2135 | return TRUE; |
2136 | } |
2137 | } |
2138 | return FALSE; |
2139 | } |
2140 | |
2141 | |
2142 | |
2143 | /* |
2144 | * madvise_free_debug |
2145 | * |
2146 | * To help debug madvise(MADV_FREE*) mis-usage, this triggers a |
2147 | * zero-fill as soon as a page is affected by a madvise(MADV_FREE*), to |
2148 | * simulate the loss of the page's contents as if the page had been |
2149 | * reclaimed and then re-faulted. |
2150 | */ |
2151 | #if DEVELOPMENT || DEBUG |
2152 | int madvise_free_debug = 1; |
2153 | #else /* DEBUG */ |
2154 | int madvise_free_debug = 0; |
2155 | #endif /* DEBUG */ |
2156 | |
2157 | /* |
2158 | * Deactivate the pages in the specified object and range. If kill_page is set, also discard any |
2159 | * page modified state from the pmap. Update the chunk_state as we go along. The caller must specify |
2160 | * a size that is less than or equal to the CHUNK_SIZE. |
2161 | */ |
2162 | |
2163 | static void |
2164 | deactivate_pages_in_object( |
2165 | vm_object_t object, |
2166 | vm_object_offset_t offset, |
2167 | vm_object_size_t size, |
2168 | boolean_t kill_page, |
2169 | boolean_t reusable_page, |
2170 | boolean_t all_reusable, |
2171 | chunk_state_t *chunk_state, |
2172 | pmap_flush_context *pfc, |
2173 | struct pmap *pmap, |
2174 | vm_map_offset_t pmap_offset) |
2175 | { |
2176 | vm_page_t m; |
2177 | int p; |
2178 | struct vm_page_delayed_work dw_array[DEFAULT_DELAYED_WORK_LIMIT]; |
2179 | struct vm_page_delayed_work *dwp; |
2180 | int dw_count; |
2181 | int dw_limit; |
2182 | unsigned int reusable = 0; |
2183 | |
2184 | /* |
2185 | * Examine each page in the chunk. The variable 'p' is the page number relative to the start of the |
2186 | * chunk. Since this routine is called once for each level in the shadow chain, the chunk_state may |
2187 | * have pages marked as having been processed already. We stop the loop early if we find we've handled |
2188 | * all the pages in the chunk. |
2189 | */ |
2190 | |
2191 | dwp = &dw_array[0]; |
2192 | dw_count = 0; |
2193 | dw_limit = DELAYED_WORK_LIMIT(DEFAULT_DELAYED_WORK_LIMIT); |
2194 | |
2195 | for(p = 0; size && CHUNK_NOT_COMPLETE(*chunk_state); p++, size -= PAGE_SIZE_64, offset += PAGE_SIZE_64, pmap_offset += PAGE_SIZE_64) { |
2196 | |
2197 | /* |
2198 | * If this offset has already been found and handled in a higher level object, then don't |
2199 | * do anything with it in the current shadow object. |
2200 | */ |
2201 | |
2202 | if (PAGE_ALREADY_HANDLED(*chunk_state, p)) |
2203 | continue; |
2204 | |
2205 | /* |
2206 | * See if the page at this offset is around. First check to see if the page is resident, |
2207 | * then if not, check the existence map or with the pager. |
2208 | */ |
2209 | |
2210 | if ((m = vm_page_lookup(object, offset)) != VM_PAGE_NULL) { |
2211 | |
2212 | /* |
2213 | * We found a page we were looking for. Mark it as "handled" now in the chunk_state |
2214 | * so that we won't bother looking for a page at this offset again if there are more |
2215 | * shadow objects. Then deactivate the page. |
2216 | */ |
2217 | |
2218 | MARK_PAGE_HANDLED(*chunk_state, p); |
2219 | |
2220 | if (( !VM_PAGE_WIRED(m)) && (!m->vmp_private) && (!m->vmp_gobbled) && (!m->vmp_busy) && |
2221 | (!m->vmp_laundry) && (!m->vmp_cleaning) && !(m->vmp_free_when_done)) { |
2222 | int clear_refmod; |
2223 | int pmap_options; |
2224 | |
2225 | dwp->dw_mask = 0; |
2226 | |
2227 | pmap_options = 0; |
2228 | clear_refmod = VM_MEM_REFERENCED; |
2229 | dwp->dw_mask |= DW_clear_reference; |
2230 | |
2231 | if ((kill_page) && (object->internal)) { |
2232 | if (madvise_free_debug) { |
2233 | /* |
2234 | * zero-fill the page now |
2235 | * to simulate it being |
2236 | * reclaimed and re-faulted. |
2237 | */ |
2238 | pmap_zero_page(VM_PAGE_GET_PHYS_PAGE(m)); |
2239 | } |
2240 | m->vmp_precious = FALSE; |
2241 | m->vmp_dirty = FALSE; |
2242 | |
2243 | clear_refmod |= VM_MEM_MODIFIED; |
2244 | if (m->vmp_q_state == VM_PAGE_ON_THROTTLED_Q) { |
2245 | /* |
2246 | * This page is now clean and |
2247 | * reclaimable. Move it out |
2248 | * of the throttled queue, so |
2249 | * that vm_pageout_scan() can |
2250 | * find it. |
2251 | */ |
2252 | dwp->dw_mask |= DW_move_page; |
2253 | } |
2254 | |
2255 | VM_COMPRESSOR_PAGER_STATE_CLR(object, offset); |
2256 | |
2257 | if (reusable_page && !m->vmp_reusable) { |
2258 | assert(!all_reusable); |
2259 | assert(!object->all_reusable); |
2260 | m->vmp_reusable = TRUE; |
2261 | object->reusable_page_count++; |
2262 | assert(object->resident_page_count >= object->reusable_page_count); |
2263 | reusable++; |
2264 | /* |
2265 | * Tell pmap this page is now |
2266 | * "reusable" (to update pmap |
2267 | * stats for all mappings). |
2268 | */ |
2269 | pmap_options |= PMAP_OPTIONS_SET_REUSABLE; |
2270 | } |
2271 | } |
2272 | pmap_options |= PMAP_OPTIONS_NOFLUSH; |
2273 | pmap_clear_refmod_options(VM_PAGE_GET_PHYS_PAGE(m), |
2274 | clear_refmod, |
2275 | pmap_options, |
2276 | (void *)pfc); |
2277 | |
2278 | if ((m->vmp_q_state != VM_PAGE_ON_THROTTLED_Q) && !(reusable_page || all_reusable)) |
2279 | dwp->dw_mask |= DW_move_page; |
2280 | |
2281 | if (dwp->dw_mask) |
2282 | VM_PAGE_ADD_DELAYED_WORK(dwp, m, |
2283 | dw_count); |
2284 | |
2285 | if (dw_count >= dw_limit) { |
2286 | if (reusable) { |
2287 | OSAddAtomic(reusable, |
2288 | &vm_page_stats_reusable.reusable_count); |
2289 | vm_page_stats_reusable.reusable += reusable; |
2290 | reusable = 0; |
2291 | } |
2292 | vm_page_do_delayed_work(object, VM_KERN_MEMORY_NONE, &dw_array[0], dw_count); |
2293 | |
2294 | dwp = &dw_array[0]; |
2295 | dw_count = 0; |
2296 | } |
2297 | } |
2298 | |
2299 | } else { |
2300 | |
2301 | /* |
2302 | * The page at this offset isn't memory resident, check to see if it's |
2303 | * been paged out. If so, mark it as handled so we don't bother looking |
2304 | * for it in the shadow chain. |
2305 | */ |
2306 | |
2307 | if (page_is_paged_out(object, offset)) { |
2308 | MARK_PAGE_HANDLED(*chunk_state, p); |
2309 | |
2310 | /* |
2311 | * If we're killing a non-resident page, then clear the page in the existence |
2312 | * map so we don't bother paging it back in if it's touched again in the future. |
2313 | */ |
2314 | |
2315 | if ((kill_page) && (object->internal)) { |
2316 | |
2317 | VM_COMPRESSOR_PAGER_STATE_CLR(object, offset); |
2318 | |
2319 | if (pmap != PMAP_NULL) { |
2320 | /* |
2321 | * Tell pmap that this page |
2322 | * is no longer mapped, to |
2323 | * adjust the footprint ledger |
2324 | * because this page is no |
2325 | * longer compressed. |
2326 | */ |
2327 | pmap_remove_options( |
2328 | pmap, |
2329 | pmap_offset, |
2330 | (pmap_offset + |
2331 | PAGE_SIZE), |
2332 | PMAP_OPTIONS_REMOVE); |
2333 | } |
2334 | } |
2335 | } |
2336 | } |
2337 | } |
2338 | |
2339 | if (reusable) { |
2340 | OSAddAtomic(reusable, &vm_page_stats_reusable.reusable_count); |
2341 | vm_page_stats_reusable.reusable += reusable; |
2342 | reusable = 0; |
2343 | } |
2344 | |
2345 | if (dw_count) |
2346 | vm_page_do_delayed_work(object, VM_KERN_MEMORY_NONE, &dw_array[0], dw_count); |
2347 | } |
2348 | |
2349 | |
2350 | /* |
2351 | * Deactive a "chunk" of the given range of the object starting at offset. A "chunk" |
2352 | * will always be less than or equal to the given size. The total range is divided up |
2353 | * into chunks for efficiency and performance related to the locks and handling the shadow |
2354 | * chain. This routine returns how much of the given "size" it actually processed. It's |
2355 | * up to the caler to loop and keep calling this routine until the entire range they want |
2356 | * to process has been done. |
2357 | */ |
2358 | |
2359 | static vm_object_size_t |
2360 | deactivate_a_chunk( |
2361 | vm_object_t orig_object, |
2362 | vm_object_offset_t offset, |
2363 | vm_object_size_t size, |
2364 | boolean_t kill_page, |
2365 | boolean_t reusable_page, |
2366 | boolean_t all_reusable, |
2367 | pmap_flush_context *pfc, |
2368 | struct pmap *pmap, |
2369 | vm_map_offset_t pmap_offset) |
2370 | { |
2371 | vm_object_t object; |
2372 | vm_object_t tmp_object; |
2373 | vm_object_size_t length; |
2374 | chunk_state_t chunk_state; |
2375 | |
2376 | |
2377 | /* |
2378 | * Get set to do a chunk. We'll do up to CHUNK_SIZE, but no more than the |
2379 | * remaining size the caller asked for. |
2380 | */ |
2381 | |
2382 | length = MIN(size, CHUNK_SIZE); |
2383 | |
2384 | /* |
2385 | * The chunk_state keeps track of which pages we've already processed if there's |
2386 | * a shadow chain on this object. At this point, we haven't done anything with this |
2387 | * range of pages yet, so initialize the state to indicate no pages processed yet. |
2388 | */ |
2389 | |
2390 | CHUNK_INIT(chunk_state, length); |
2391 | object = orig_object; |
2392 | |
2393 | /* |
2394 | * Start at the top level object and iterate around the loop once for each object |
2395 | * in the shadow chain. We stop processing early if we've already found all the pages |
2396 | * in the range. Otherwise we stop when we run out of shadow objects. |
2397 | */ |
2398 | |
2399 | while (object && CHUNK_NOT_COMPLETE(chunk_state)) { |
2400 | vm_object_paging_begin(object); |
2401 | |
2402 | deactivate_pages_in_object(object, offset, length, kill_page, reusable_page, all_reusable, &chunk_state, pfc, pmap, pmap_offset); |
2403 | |
2404 | vm_object_paging_end(object); |
2405 | |
2406 | /* |
2407 | * We've finished with this object, see if there's a shadow object. If |
2408 | * there is, update the offset and lock the new object. We also turn off |
2409 | * kill_page at this point since we only kill pages in the top most object. |
2410 | */ |
2411 | |
2412 | tmp_object = object->shadow; |
2413 | |
2414 | if (tmp_object) { |
2415 | kill_page = FALSE; |
2416 | reusable_page = FALSE; |
2417 | all_reusable = FALSE; |
2418 | offset += object->vo_shadow_offset; |
2419 | vm_object_lock(tmp_object); |
2420 | } |
2421 | |
2422 | if (object != orig_object) |
2423 | vm_object_unlock(object); |
2424 | |
2425 | object = tmp_object; |
2426 | } |
2427 | |
2428 | if (object && object != orig_object) |
2429 | vm_object_unlock(object); |
2430 | |
2431 | return length; |
2432 | } |
2433 | |
2434 | |
2435 | |
2436 | /* |
2437 | * Move any resident pages in the specified range to the inactive queue. If kill_page is set, |
2438 | * we also clear the modified status of the page and "forget" any changes that have been made |
2439 | * to the page. |
2440 | */ |
2441 | |
2442 | __private_extern__ void |
2443 | vm_object_deactivate_pages( |
2444 | vm_object_t object, |
2445 | vm_object_offset_t offset, |
2446 | vm_object_size_t size, |
2447 | boolean_t kill_page, |
2448 | boolean_t reusable_page, |
2449 | struct pmap *pmap, |
2450 | vm_map_offset_t pmap_offset) |
2451 | { |
2452 | vm_object_size_t length; |
2453 | boolean_t all_reusable; |
2454 | pmap_flush_context pmap_flush_context_storage; |
2455 | |
2456 | /* |
2457 | * We break the range up into chunks and do one chunk at a time. This is for |
2458 | * efficiency and performance while handling the shadow chains and the locks. |
2459 | * The deactivate_a_chunk() function returns how much of the range it processed. |
2460 | * We keep calling this routine until the given size is exhausted. |
2461 | */ |
2462 | |
2463 | |
2464 | all_reusable = FALSE; |
2465 | #if 11 |
2466 | /* |
2467 | * For the sake of accurate "reusable" pmap stats, we need |
2468 | * to tell pmap about each page that is no longer "reusable", |
2469 | * so we can't do the "all_reusable" optimization. |
2470 | */ |
2471 | #else |
2472 | if (reusable_page && |
2473 | object->internal && |
2474 | object->vo_size != 0 && |
2475 | object->vo_size == size && |
2476 | object->reusable_page_count == 0) { |
2477 | all_reusable = TRUE; |
2478 | reusable_page = FALSE; |
2479 | } |
2480 | #endif |
2481 | |
2482 | if ((reusable_page || all_reusable) && object->all_reusable) { |
2483 | /* This means MADV_FREE_REUSABLE has been called twice, which |
2484 | * is probably illegal. */ |
2485 | return; |
2486 | } |
2487 | |
2488 | pmap_flush_context_init(&pmap_flush_context_storage); |
2489 | |
2490 | while (size) { |
2491 | length = deactivate_a_chunk(object, offset, size, kill_page, reusable_page, all_reusable, &pmap_flush_context_storage, pmap, pmap_offset); |
2492 | |
2493 | size -= length; |
2494 | offset += length; |
2495 | pmap_offset += length; |
2496 | } |
2497 | pmap_flush(&pmap_flush_context_storage); |
2498 | |
2499 | if (all_reusable) { |
2500 | if (!object->all_reusable) { |
2501 | unsigned int reusable; |
2502 | |
2503 | object->all_reusable = TRUE; |
2504 | assert(object->reusable_page_count == 0); |
2505 | /* update global stats */ |
2506 | reusable = object->resident_page_count; |
2507 | OSAddAtomic(reusable, |
2508 | &vm_page_stats_reusable.reusable_count); |
2509 | vm_page_stats_reusable.reusable += reusable; |
2510 | vm_page_stats_reusable.all_reusable_calls++; |
2511 | } |
2512 | } else if (reusable_page) { |
2513 | vm_page_stats_reusable.partial_reusable_calls++; |
2514 | } |
2515 | } |
2516 | |
2517 | void |
2518 | vm_object_reuse_pages( |
2519 | vm_object_t object, |
2520 | vm_object_offset_t start_offset, |
2521 | vm_object_offset_t end_offset, |
2522 | boolean_t allow_partial_reuse) |
2523 | { |
2524 | vm_object_offset_t cur_offset; |
2525 | vm_page_t m; |
2526 | unsigned int reused, reusable; |
2527 | |
2528 | #define VM_OBJECT_REUSE_PAGE(object, m, reused) \ |
2529 | MACRO_BEGIN \ |
2530 | if ((m) != VM_PAGE_NULL && \ |
2531 | (m)->vmp_reusable) { \ |
2532 | assert((object)->reusable_page_count <= \ |
2533 | (object)->resident_page_count); \ |
2534 | assert((object)->reusable_page_count > 0); \ |
2535 | (object)->reusable_page_count--; \ |
2536 | (m)->vmp_reusable = FALSE; \ |
2537 | (reused)++; \ |
2538 | /* \ |
2539 | * Tell pmap that this page is no longer \ |
2540 | * "reusable", to update the "reusable" stats \ |
2541 | * for all the pmaps that have mapped this \ |
2542 | * page. \ |
2543 | */ \ |
2544 | pmap_clear_refmod_options(VM_PAGE_GET_PHYS_PAGE((m)), \ |
2545 | 0, /* refmod */ \ |
2546 | (PMAP_OPTIONS_CLEAR_REUSABLE \ |
2547 | | PMAP_OPTIONS_NOFLUSH), \ |
2548 | NULL); \ |
2549 | } \ |
2550 | MACRO_END |
2551 | |
2552 | reused = 0; |
2553 | reusable = 0; |
2554 | |
2555 | vm_object_lock_assert_exclusive(object); |
2556 | |
2557 | if (object->all_reusable) { |
2558 | panic("object %p all_reusable: can't update pmap stats\n" , |
2559 | object); |
2560 | assert(object->reusable_page_count == 0); |
2561 | object->all_reusable = FALSE; |
2562 | if (end_offset - start_offset == object->vo_size || |
2563 | !allow_partial_reuse) { |
2564 | vm_page_stats_reusable.all_reuse_calls++; |
2565 | reused = object->resident_page_count; |
2566 | } else { |
2567 | vm_page_stats_reusable.partial_reuse_calls++; |
2568 | vm_page_queue_iterate(&object->memq, m, vm_page_t, vmp_listq) { |
2569 | if (m->vmp_offset < start_offset || |
2570 | m->vmp_offset >= end_offset) { |
2571 | m->vmp_reusable = TRUE; |
2572 | object->reusable_page_count++; |
2573 | assert(object->resident_page_count >= object->reusable_page_count); |
2574 | continue; |
2575 | } else { |
2576 | assert(!m->vmp_reusable); |
2577 | reused++; |
2578 | } |
2579 | } |
2580 | } |
2581 | } else if (object->resident_page_count > |
2582 | ((end_offset - start_offset) >> PAGE_SHIFT)) { |
2583 | vm_page_stats_reusable.partial_reuse_calls++; |
2584 | for (cur_offset = start_offset; |
2585 | cur_offset < end_offset; |
2586 | cur_offset += PAGE_SIZE_64) { |
2587 | if (object->reusable_page_count == 0) { |
2588 | break; |
2589 | } |
2590 | m = vm_page_lookup(object, cur_offset); |
2591 | VM_OBJECT_REUSE_PAGE(object, m, reused); |
2592 | } |
2593 | } else { |
2594 | vm_page_stats_reusable.partial_reuse_calls++; |
2595 | vm_page_queue_iterate(&object->memq, m, vm_page_t, vmp_listq) { |
2596 | if (object->reusable_page_count == 0) { |
2597 | break; |
2598 | } |
2599 | if (m->vmp_offset < start_offset || |
2600 | m->vmp_offset >= end_offset) { |
2601 | continue; |
2602 | } |
2603 | VM_OBJECT_REUSE_PAGE(object, m, reused); |
2604 | } |
2605 | } |
2606 | |
2607 | /* update global stats */ |
2608 | OSAddAtomic(reusable-reused, &vm_page_stats_reusable.reusable_count); |
2609 | vm_page_stats_reusable.reused += reused; |
2610 | vm_page_stats_reusable.reusable += reusable; |
2611 | } |
2612 | |
2613 | /* |
2614 | * Routine: vm_object_pmap_protect |
2615 | * |
2616 | * Purpose: |
2617 | * Reduces the permission for all physical |
2618 | * pages in the specified object range. |
2619 | * |
2620 | * If removing write permission only, it is |
2621 | * sufficient to protect only the pages in |
2622 | * the top-level object; only those pages may |
2623 | * have write permission. |
2624 | * |
2625 | * If removing all access, we must follow the |
2626 | * shadow chain from the top-level object to |
2627 | * remove access to all pages in shadowed objects. |
2628 | * |
2629 | * The object must *not* be locked. The object must |
2630 | * be internal. |
2631 | * |
2632 | * If pmap is not NULL, this routine assumes that |
2633 | * the only mappings for the pages are in that |
2634 | * pmap. |
2635 | */ |
2636 | |
2637 | __private_extern__ void |
2638 | vm_object_pmap_protect( |
2639 | vm_object_t object, |
2640 | vm_object_offset_t offset, |
2641 | vm_object_size_t size, |
2642 | pmap_t pmap, |
2643 | vm_map_offset_t pmap_start, |
2644 | vm_prot_t prot) |
2645 | { |
2646 | vm_object_pmap_protect_options(object, offset, size, |
2647 | pmap, pmap_start, prot, 0); |
2648 | } |
2649 | |
2650 | __private_extern__ void |
2651 | vm_object_pmap_protect_options( |
2652 | vm_object_t object, |
2653 | vm_object_offset_t offset, |
2654 | vm_object_size_t size, |
2655 | pmap_t pmap, |
2656 | vm_map_offset_t pmap_start, |
2657 | vm_prot_t prot, |
2658 | int options) |
2659 | { |
2660 | pmap_flush_context pmap_flush_context_storage; |
2661 | boolean_t delayed_pmap_flush = FALSE; |
2662 | |
2663 | if (object == VM_OBJECT_NULL) |
2664 | return; |
2665 | size = vm_object_round_page(size); |
2666 | offset = vm_object_trunc_page(offset); |
2667 | |
2668 | vm_object_lock(object); |
2669 | |
2670 | if (object->phys_contiguous) { |
2671 | if (pmap != NULL) { |
2672 | vm_object_unlock(object); |
2673 | pmap_protect_options(pmap, |
2674 | pmap_start, |
2675 | pmap_start + size, |
2676 | prot, |
2677 | options & ~PMAP_OPTIONS_NOFLUSH, |
2678 | NULL); |
2679 | } else { |
2680 | vm_object_offset_t phys_start, phys_end, phys_addr; |
2681 | |
2682 | phys_start = object->vo_shadow_offset + offset; |
2683 | phys_end = phys_start + size; |
2684 | assert(phys_start <= phys_end); |
2685 | assert(phys_end <= object->vo_shadow_offset + object->vo_size); |
2686 | vm_object_unlock(object); |
2687 | |
2688 | pmap_flush_context_init(&pmap_flush_context_storage); |
2689 | delayed_pmap_flush = FALSE; |
2690 | |
2691 | for (phys_addr = phys_start; |
2692 | phys_addr < phys_end; |
2693 | phys_addr += PAGE_SIZE_64) { |
2694 | pmap_page_protect_options( |
2695 | (ppnum_t) (phys_addr >> PAGE_SHIFT), |
2696 | prot, |
2697 | options | PMAP_OPTIONS_NOFLUSH, |
2698 | (void *)&pmap_flush_context_storage); |
2699 | delayed_pmap_flush = TRUE; |
2700 | } |
2701 | if (delayed_pmap_flush == TRUE) |
2702 | pmap_flush(&pmap_flush_context_storage); |
2703 | } |
2704 | return; |
2705 | } |
2706 | |
2707 | assert(object->internal); |
2708 | |
2709 | while (TRUE) { |
2710 | if (ptoa_64(object->resident_page_count) > size/2 && pmap != PMAP_NULL) { |
2711 | vm_object_unlock(object); |
2712 | pmap_protect_options(pmap, pmap_start, pmap_start + size, prot, |
2713 | options & ~PMAP_OPTIONS_NOFLUSH, NULL); |
2714 | return; |
2715 | } |
2716 | |
2717 | pmap_flush_context_init(&pmap_flush_context_storage); |
2718 | delayed_pmap_flush = FALSE; |
2719 | |
2720 | /* |
2721 | * if we are doing large ranges with respect to resident |
2722 | * page count then we should interate over pages otherwise |
2723 | * inverse page look-up will be faster |
2724 | */ |
2725 | if (ptoa_64(object->resident_page_count / 4) < size) { |
2726 | vm_page_t p; |
2727 | vm_object_offset_t end; |
2728 | |
2729 | end = offset + size; |
2730 | |
2731 | vm_page_queue_iterate(&object->memq, p, vm_page_t, vmp_listq) { |
2732 | if (!p->vmp_fictitious && (offset <= p->vmp_offset) && (p->vmp_offset < end)) { |
2733 | vm_map_offset_t start; |
2734 | |
2735 | start = pmap_start + p->vmp_offset - offset; |
2736 | |
2737 | if (pmap != PMAP_NULL) |
2738 | pmap_protect_options( |
2739 | pmap, |
2740 | start, |
2741 | start + PAGE_SIZE_64, |
2742 | prot, |
2743 | options | PMAP_OPTIONS_NOFLUSH, |
2744 | &pmap_flush_context_storage); |
2745 | else |
2746 | pmap_page_protect_options( |
2747 | VM_PAGE_GET_PHYS_PAGE(p), |
2748 | prot, |
2749 | options | PMAP_OPTIONS_NOFLUSH, |
2750 | &pmap_flush_context_storage); |
2751 | delayed_pmap_flush = TRUE; |
2752 | } |
2753 | } |
2754 | |
2755 | } else { |
2756 | vm_page_t p; |
2757 | vm_object_offset_t end; |
2758 | vm_object_offset_t target_off; |
2759 | |
2760 | end = offset + size; |
2761 | |
2762 | for (target_off = offset; |
2763 | target_off < end; target_off += PAGE_SIZE) { |
2764 | |
2765 | p = vm_page_lookup(object, target_off); |
2766 | |
2767 | if (p != VM_PAGE_NULL) { |
2768 | vm_object_offset_t start; |
2769 | |
2770 | start = pmap_start + (p->vmp_offset - offset); |
2771 | |
2772 | if (pmap != PMAP_NULL) |
2773 | pmap_protect_options( |
2774 | pmap, |
2775 | start, |
2776 | start + PAGE_SIZE_64, |
2777 | prot, |
2778 | options | PMAP_OPTIONS_NOFLUSH, |
2779 | &pmap_flush_context_storage); |
2780 | else |
2781 | pmap_page_protect_options( |
2782 | VM_PAGE_GET_PHYS_PAGE(p), |
2783 | prot, |
2784 | options | PMAP_OPTIONS_NOFLUSH, |
2785 | &pmap_flush_context_storage); |
2786 | delayed_pmap_flush = TRUE; |
2787 | } |
2788 | } |
2789 | } |
2790 | if (delayed_pmap_flush == TRUE) |
2791 | pmap_flush(&pmap_flush_context_storage); |
2792 | |
2793 | if (prot == VM_PROT_NONE) { |
2794 | /* |
2795 | * Must follow shadow chain to remove access |
2796 | * to pages in shadowed objects. |
2797 | */ |
2798 | vm_object_t next_object; |
2799 | |
2800 | next_object = object->shadow; |
2801 | if (next_object != VM_OBJECT_NULL) { |
2802 | offset += object->vo_shadow_offset; |
2803 | vm_object_lock(next_object); |
2804 | vm_object_unlock(object); |
2805 | object = next_object; |
2806 | } |
2807 | else { |
2808 | /* |
2809 | * End of chain - we are done. |
2810 | */ |
2811 | break; |
2812 | } |
2813 | } |
2814 | else { |
2815 | /* |
2816 | * Pages in shadowed objects may never have |
2817 | * write permission - we may stop here. |
2818 | */ |
2819 | break; |
2820 | } |
2821 | } |
2822 | |
2823 | vm_object_unlock(object); |
2824 | } |
2825 | |
2826 | uint32_t vm_page_busy_absent_skipped = 0; |
2827 | |
2828 | /* |
2829 | * Routine: vm_object_copy_slowly |
2830 | * |
2831 | * Description: |
2832 | * Copy the specified range of the source |
2833 | * virtual memory object without using |
2834 | * protection-based optimizations (such |
2835 | * as copy-on-write). The pages in the |
2836 | * region are actually copied. |
2837 | * |
2838 | * In/out conditions: |
2839 | * The caller must hold a reference and a lock |
2840 | * for the source virtual memory object. The source |
2841 | * object will be returned *unlocked*. |
2842 | * |
2843 | * Results: |
2844 | * If the copy is completed successfully, KERN_SUCCESS is |
2845 | * returned. If the caller asserted the interruptible |
2846 | * argument, and an interruption occurred while waiting |
2847 | * for a user-generated event, MACH_SEND_INTERRUPTED is |
2848 | * returned. Other values may be returned to indicate |
2849 | * hard errors during the copy operation. |
2850 | * |
2851 | * A new virtual memory object is returned in a |
2852 | * parameter (_result_object). The contents of this |
2853 | * new object, starting at a zero offset, are a copy |
2854 | * of the source memory region. In the event of |
2855 | * an error, this parameter will contain the value |
2856 | * VM_OBJECT_NULL. |
2857 | */ |
2858 | __private_extern__ kern_return_t |
2859 | vm_object_copy_slowly( |
2860 | vm_object_t src_object, |
2861 | vm_object_offset_t src_offset, |
2862 | vm_object_size_t size, |
2863 | boolean_t interruptible, |
2864 | vm_object_t *_result_object) /* OUT */ |
2865 | { |
2866 | vm_object_t new_object; |
2867 | vm_object_offset_t new_offset; |
2868 | |
2869 | struct vm_object_fault_info fault_info = {}; |
2870 | |
2871 | XPR(XPR_VM_OBJECT, "v_o_c_slowly obj 0x%x off 0x%x size 0x%x\n" , |
2872 | src_object, src_offset, size, 0, 0); |
2873 | |
2874 | if (size == 0) { |
2875 | vm_object_unlock(src_object); |
2876 | *_result_object = VM_OBJECT_NULL; |
2877 | return(KERN_INVALID_ARGUMENT); |
2878 | } |
2879 | |
2880 | /* |
2881 | * Prevent destruction of the source object while we copy. |
2882 | */ |
2883 | |
2884 | vm_object_reference_locked(src_object); |
2885 | vm_object_unlock(src_object); |
2886 | |
2887 | /* |
2888 | * Create a new object to hold the copied pages. |
2889 | * A few notes: |
2890 | * We fill the new object starting at offset 0, |
2891 | * regardless of the input offset. |
2892 | * We don't bother to lock the new object within |
2893 | * this routine, since we have the only reference. |
2894 | */ |
2895 | |
2896 | new_object = vm_object_allocate(size); |
2897 | new_offset = 0; |
2898 | |
2899 | assert(size == trunc_page_64(size)); /* Will the loop terminate? */ |
2900 | |
2901 | fault_info.interruptible = interruptible; |
2902 | fault_info.behavior = VM_BEHAVIOR_SEQUENTIAL; |
2903 | fault_info.lo_offset = src_offset; |
2904 | fault_info.hi_offset = src_offset + size; |
2905 | fault_info.stealth = TRUE; |
2906 | |
2907 | for ( ; |
2908 | size != 0 ; |
2909 | src_offset += PAGE_SIZE_64, |
2910 | new_offset += PAGE_SIZE_64, size -= PAGE_SIZE_64 |
2911 | ) { |
2912 | vm_page_t new_page; |
2913 | vm_fault_return_t result; |
2914 | |
2915 | vm_object_lock(new_object); |
2916 | |
2917 | while ((new_page = vm_page_alloc(new_object, new_offset)) |
2918 | == VM_PAGE_NULL) { |
2919 | |
2920 | vm_object_unlock(new_object); |
2921 | |
2922 | if (!vm_page_wait(interruptible)) { |
2923 | vm_object_deallocate(new_object); |
2924 | vm_object_deallocate(src_object); |
2925 | *_result_object = VM_OBJECT_NULL; |
2926 | return(MACH_SEND_INTERRUPTED); |
2927 | } |
2928 | vm_object_lock(new_object); |
2929 | } |
2930 | vm_object_unlock(new_object); |
2931 | |
2932 | do { |
2933 | vm_prot_t prot = VM_PROT_READ; |
2934 | vm_page_t _result_page; |
2935 | vm_page_t top_page; |
2936 | vm_page_t result_page; |
2937 | kern_return_t error_code; |
2938 | vm_object_t result_page_object; |
2939 | |
2940 | |
2941 | vm_object_lock(src_object); |
2942 | |
2943 | if (src_object->internal && |
2944 | src_object->shadow == VM_OBJECT_NULL && |
2945 | (src_object->pager == NULL || |
2946 | (VM_COMPRESSOR_PAGER_STATE_GET(src_object, |
2947 | src_offset) == |
2948 | VM_EXTERNAL_STATE_ABSENT))) { |
2949 | boolean_t can_skip_page; |
2950 | |
2951 | _result_page = vm_page_lookup(src_object, |
2952 | src_offset); |
2953 | if (_result_page == VM_PAGE_NULL) { |
2954 | /* |
2955 | * This page is neither resident nor |
2956 | * compressed and there's no shadow |
2957 | * object below "src_object", so this |
2958 | * page is really missing. |
2959 | * There's no need to zero-fill it just |
2960 | * to copy it: let's leave it missing |
2961 | * in "new_object" and get zero-filled |
2962 | * on demand. |
2963 | */ |
2964 | can_skip_page = TRUE; |
2965 | } else if (workaround_41447923 && |
2966 | src_object->pager == NULL && |
2967 | _result_page != VM_PAGE_NULL && |
2968 | _result_page->vmp_busy && |
2969 | _result_page->vmp_absent && |
2970 | src_object->purgable == VM_PURGABLE_DENY && |
2971 | !src_object->blocked_access) { |
2972 | /* |
2973 | * This page is "busy" and "absent" |
2974 | * but not because we're waiting for |
2975 | * it to be decompressed. It must |
2976 | * be because it's a "no zero fill" |
2977 | * page that is currently not |
2978 | * accessible until it gets overwritten |
2979 | * by a device driver. |
2980 | * Since its initial state would have |
2981 | * been "zero-filled", let's leave the |
2982 | * copy page missing and get zero-filled |
2983 | * on demand. |
2984 | */ |
2985 | assert(src_object->internal); |
2986 | assert(src_object->shadow == NULL); |
2987 | assert(src_object->pager == NULL); |
2988 | can_skip_page = TRUE; |
2989 | vm_page_busy_absent_skipped++; |
2990 | } else { |
2991 | can_skip_page = FALSE; |
2992 | } |
2993 | if (can_skip_page) { |
2994 | vm_object_unlock(src_object); |
2995 | /* free the unused "new_page"... */ |
2996 | vm_object_lock(new_object); |
2997 | VM_PAGE_FREE(new_page); |
2998 | new_page = VM_PAGE_NULL; |
2999 | vm_object_unlock(new_object); |
3000 | /* ...and go to next page in "src_object" */ |
3001 | result = VM_FAULT_SUCCESS; |
3002 | break; |
3003 | } |
3004 | } |
3005 | |
3006 | vm_object_paging_begin(src_object); |
3007 | |
3008 | /* cap size at maximum UPL size */ |
3009 | upl_size_t cluster_size; |
3010 | if (os_convert_overflow(size, &cluster_size)) { |
3011 | cluster_size = 0 - (upl_size_t)PAGE_SIZE; |
3012 | } |
3013 | fault_info.cluster_size = cluster_size; |
3014 | |
3015 | XPR(XPR_VM_FAULT,"vm_object_copy_slowly -> vm_fault_page" ,0,0,0,0,0); |
3016 | _result_page = VM_PAGE_NULL; |
3017 | result = vm_fault_page(src_object, src_offset, |
3018 | VM_PROT_READ, FALSE, |
3019 | FALSE, /* page not looked up */ |
3020 | &prot, &_result_page, &top_page, |
3021 | (int *)0, |
3022 | &error_code, FALSE, FALSE, &fault_info); |
3023 | |
3024 | switch(result) { |
3025 | case VM_FAULT_SUCCESS: |
3026 | result_page = _result_page; |
3027 | result_page_object = VM_PAGE_OBJECT(result_page); |
3028 | |
3029 | /* |
3030 | * Copy the page to the new object. |
3031 | * |
3032 | * POLICY DECISION: |
3033 | * If result_page is clean, |
3034 | * we could steal it instead |
3035 | * of copying. |
3036 | */ |
3037 | |
3038 | vm_page_copy(result_page, new_page); |
3039 | vm_object_unlock(result_page_object); |
3040 | |
3041 | /* |
3042 | * Let go of both pages (make them |
3043 | * not busy, perform wakeup, activate). |
3044 | */ |
3045 | vm_object_lock(new_object); |
3046 | SET_PAGE_DIRTY(new_page, FALSE); |
3047 | PAGE_WAKEUP_DONE(new_page); |
3048 | vm_object_unlock(new_object); |
3049 | |
3050 | vm_object_lock(result_page_object); |
3051 | PAGE_WAKEUP_DONE(result_page); |
3052 | |
3053 | vm_page_lockspin_queues(); |
3054 | if ((result_page->vmp_q_state == VM_PAGE_ON_SPECULATIVE_Q) || |
3055 | (result_page->vmp_q_state == VM_PAGE_NOT_ON_Q)) { |
3056 | vm_page_activate(result_page); |
3057 | } |
3058 | vm_page_activate(new_page); |
3059 | vm_page_unlock_queues(); |
3060 | |
3061 | /* |
3062 | * Release paging references and |
3063 | * top-level placeholder page, if any. |
3064 | */ |
3065 | |
3066 | vm_fault_cleanup(result_page_object, |
3067 | top_page); |
3068 | |
3069 | break; |
3070 | |
3071 | case VM_FAULT_RETRY: |
3072 | break; |
3073 | |
3074 | case VM_FAULT_MEMORY_SHORTAGE: |
3075 | if (vm_page_wait(interruptible)) |
3076 | break; |
3077 | /* fall thru */ |
3078 | |
3079 | case VM_FAULT_INTERRUPTED: |
3080 | vm_object_lock(new_object); |
3081 | VM_PAGE_FREE(new_page); |
3082 | vm_object_unlock(new_object); |
3083 | |
3084 | vm_object_deallocate(new_object); |
3085 | vm_object_deallocate(src_object); |
3086 | *_result_object = VM_OBJECT_NULL; |
3087 | return(MACH_SEND_INTERRUPTED); |
3088 | |
3089 | case VM_FAULT_SUCCESS_NO_VM_PAGE: |
3090 | /* success but no VM page: fail */ |
3091 | vm_object_paging_end(src_object); |
3092 | vm_object_unlock(src_object); |
3093 | /*FALLTHROUGH*/ |
3094 | case VM_FAULT_MEMORY_ERROR: |
3095 | /* |
3096 | * A policy choice: |
3097 | * (a) ignore pages that we can't |
3098 | * copy |
3099 | * (b) return the null object if |
3100 | * any page fails [chosen] |
3101 | */ |
3102 | |
3103 | vm_object_lock(new_object); |
3104 | VM_PAGE_FREE(new_page); |
3105 | vm_object_unlock(new_object); |
3106 | |
3107 | vm_object_deallocate(new_object); |
3108 | vm_object_deallocate(src_object); |
3109 | *_result_object = VM_OBJECT_NULL; |
3110 | return(error_code ? error_code: |
3111 | KERN_MEMORY_ERROR); |
3112 | |
3113 | default: |
3114 | panic("vm_object_copy_slowly: unexpected error" |
3115 | " 0x%x from vm_fault_page()\n" , result); |
3116 | } |
3117 | } while (result != VM_FAULT_SUCCESS); |
3118 | } |
3119 | |
3120 | /* |
3121 | * Lose the extra reference, and return our object. |
3122 | */ |
3123 | vm_object_deallocate(src_object); |
3124 | *_result_object = new_object; |
3125 | return(KERN_SUCCESS); |
3126 | } |
3127 | |
3128 | /* |
3129 | * Routine: vm_object_copy_quickly |
3130 | * |
3131 | * Purpose: |
3132 | * Copy the specified range of the source virtual |
3133 | * memory object, if it can be done without waiting |
3134 | * for user-generated events. |
3135 | * |
3136 | * Results: |
3137 | * If the copy is successful, the copy is returned in |
3138 | * the arguments; otherwise, the arguments are not |
3139 | * affected. |
3140 | * |
3141 | * In/out conditions: |
3142 | * The object should be unlocked on entry and exit. |
3143 | */ |
3144 | |
3145 | /*ARGSUSED*/ |
3146 | __private_extern__ boolean_t |
3147 | vm_object_copy_quickly( |
3148 | vm_object_t *_object, /* INOUT */ |
3149 | __unused vm_object_offset_t offset, /* IN */ |
3150 | __unused vm_object_size_t size, /* IN */ |
3151 | boolean_t *_src_needs_copy, /* OUT */ |
3152 | boolean_t *_dst_needs_copy) /* OUT */ |
3153 | { |
3154 | vm_object_t object = *_object; |
3155 | memory_object_copy_strategy_t copy_strategy; |
3156 | |
3157 | XPR(XPR_VM_OBJECT, "v_o_c_quickly obj 0x%x off 0x%x size 0x%x\n" , |
3158 | *_object, offset, size, 0, 0); |
3159 | if (object == VM_OBJECT_NULL) { |
3160 | *_src_needs_copy = FALSE; |
3161 | *_dst_needs_copy = FALSE; |
3162 | return(TRUE); |
3163 | } |
3164 | |
3165 | vm_object_lock(object); |
3166 | |
3167 | copy_strategy = object->copy_strategy; |
3168 | |
3169 | switch (copy_strategy) { |
3170 | case MEMORY_OBJECT_COPY_SYMMETRIC: |
3171 | |
3172 | /* |
3173 | * Symmetric copy strategy. |
3174 | * Make another reference to the object. |
3175 | * Leave object/offset unchanged. |
3176 | */ |
3177 | |
3178 | vm_object_reference_locked(object); |
3179 | object->shadowed = TRUE; |
3180 | vm_object_unlock(object); |
3181 | |
3182 | /* |
3183 | * Both source and destination must make |
3184 | * shadows, and the source must be made |
3185 | * read-only if not already. |
3186 | */ |
3187 | |
3188 | *_src_needs_copy = TRUE; |
3189 | *_dst_needs_copy = TRUE; |
3190 | |
3191 | break; |
3192 | |
3193 | case MEMORY_OBJECT_COPY_DELAY: |
3194 | vm_object_unlock(object); |
3195 | return(FALSE); |
3196 | |
3197 | default: |
3198 | vm_object_unlock(object); |
3199 | return(FALSE); |
3200 | } |
3201 | return(TRUE); |
3202 | } |
3203 | |
3204 | static int copy_call_count = 0; |
3205 | static int copy_call_sleep_count = 0; |
3206 | static int copy_call_restart_count = 0; |
3207 | |
3208 | /* |
3209 | * Routine: vm_object_copy_call [internal] |
3210 | * |
3211 | * Description: |
3212 | * Copy the source object (src_object), using the |
3213 | * user-managed copy algorithm. |
3214 | * |
3215 | * In/out conditions: |
3216 | * The source object must be locked on entry. It |
3217 | * will be *unlocked* on exit. |
3218 | * |
3219 | * Results: |
3220 | * If the copy is successful, KERN_SUCCESS is returned. |
3221 | * A new object that represents the copied virtual |
3222 | * memory is returned in a parameter (*_result_object). |
3223 | * If the return value indicates an error, this parameter |
3224 | * is not valid. |
3225 | */ |
3226 | static kern_return_t |
3227 | vm_object_copy_call( |
3228 | vm_object_t src_object, |
3229 | vm_object_offset_t src_offset, |
3230 | vm_object_size_t size, |
3231 | vm_object_t *_result_object) /* OUT */ |
3232 | { |
3233 | kern_return_t kr; |
3234 | vm_object_t copy; |
3235 | boolean_t check_ready = FALSE; |
3236 | uint32_t try_failed_count = 0; |
3237 | |
3238 | /* |
3239 | * If a copy is already in progress, wait and retry. |
3240 | * |
3241 | * XXX |
3242 | * Consider making this call interruptable, as Mike |
3243 | * intended it to be. |
3244 | * |
3245 | * XXXO |
3246 | * Need a counter or version or something to allow |
3247 | * us to use the copy that the currently requesting |
3248 | * thread is obtaining -- is it worth adding to the |
3249 | * vm object structure? Depends how common this case it. |
3250 | */ |
3251 | copy_call_count++; |
3252 | while (vm_object_wanted(src_object, VM_OBJECT_EVENT_COPY_CALL)) { |
3253 | vm_object_sleep(src_object, VM_OBJECT_EVENT_COPY_CALL, |
3254 | THREAD_UNINT); |
3255 | copy_call_restart_count++; |
3256 | } |
3257 | |
3258 | /* |
3259 | * Indicate (for the benefit of memory_object_create_copy) |
3260 | * that we want a copy for src_object. (Note that we cannot |
3261 | * do a real assert_wait before calling memory_object_copy, |
3262 | * so we simply set the flag.) |
3263 | */ |
3264 | |
3265 | vm_object_set_wanted(src_object, VM_OBJECT_EVENT_COPY_CALL); |
3266 | vm_object_unlock(src_object); |
3267 | |
3268 | /* |
3269 | * Ask the memory manager to give us a memory object |
3270 | * which represents a copy of the src object. |
3271 | * The memory manager may give us a memory object |
3272 | * which we already have, or it may give us a |
3273 | * new memory object. This memory object will arrive |
3274 | * via memory_object_create_copy. |
3275 | */ |
3276 | |
3277 | kr = KERN_FAILURE; /* XXX need to change memory_object.defs */ |
3278 | if (kr != KERN_SUCCESS) { |
3279 | return kr; |
3280 | } |
3281 | |
3282 | /* |
3283 | * Wait for the copy to arrive. |
3284 | */ |
3285 | vm_object_lock(src_object); |
3286 | while (vm_object_wanted(src_object, VM_OBJECT_EVENT_COPY_CALL)) { |
3287 | vm_object_sleep(src_object, VM_OBJECT_EVENT_COPY_CALL, |
3288 | THREAD_UNINT); |
3289 | copy_call_sleep_count++; |
3290 | } |
3291 | Retry: |
3292 | assert(src_object->copy != VM_OBJECT_NULL); |
3293 | copy = src_object->copy; |
3294 | if (!vm_object_lock_try(copy)) { |
3295 | vm_object_unlock(src_object); |
3296 | |
3297 | try_failed_count++; |
3298 | mutex_pause(try_failed_count); /* wait a bit */ |
3299 | |
3300 | vm_object_lock(src_object); |
3301 | goto Retry; |
3302 | } |
3303 | if (copy->vo_size < src_offset+size) |
3304 | copy->vo_size = src_offset+size; |
3305 | |
3306 | if (!copy->pager_ready) |
3307 | check_ready = TRUE; |
3308 | |
3309 | /* |
3310 | * Return the copy. |
3311 | */ |
3312 | *_result_object = copy; |
3313 | vm_object_unlock(copy); |
3314 | vm_object_unlock(src_object); |
3315 | |
3316 | /* Wait for the copy to be ready. */ |
3317 | if (check_ready == TRUE) { |
3318 | vm_object_lock(copy); |
3319 | while (!copy->pager_ready) { |
3320 | vm_object_sleep(copy, VM_OBJECT_EVENT_PAGER_READY, THREAD_UNINT); |
3321 | } |
3322 | vm_object_unlock(copy); |
3323 | } |
3324 | |
3325 | return KERN_SUCCESS; |
3326 | } |
3327 | |
3328 | static int copy_delayed_lock_collisions = 0; |
3329 | static int copy_delayed_max_collisions = 0; |
3330 | static int copy_delayed_lock_contention = 0; |
3331 | static int copy_delayed_protect_iterate = 0; |
3332 | |
3333 | /* |
3334 | * Routine: vm_object_copy_delayed [internal] |
3335 | * |
3336 | * Description: |
3337 | * Copy the specified virtual memory object, using |
3338 | * the asymmetric copy-on-write algorithm. |
3339 | * |
3340 | * In/out conditions: |
3341 | * The src_object must be locked on entry. It will be unlocked |
3342 | * on exit - so the caller must also hold a reference to it. |
3343 | * |
3344 | * This routine will not block waiting for user-generated |
3345 | * events. It is not interruptible. |
3346 | */ |
3347 | __private_extern__ vm_object_t |
3348 | vm_object_copy_delayed( |
3349 | vm_object_t src_object, |
3350 | vm_object_offset_t src_offset, |
3351 | vm_object_size_t size, |
3352 | boolean_t src_object_shared) |
3353 | { |
3354 | vm_object_t new_copy = VM_OBJECT_NULL; |
3355 | vm_object_t old_copy; |
3356 | vm_page_t p; |
3357 | vm_object_size_t copy_size = src_offset + size; |
3358 | pmap_flush_context pmap_flush_context_storage; |
3359 | boolean_t delayed_pmap_flush = FALSE; |
3360 | |
3361 | |
3362 | int collisions = 0; |
3363 | /* |
3364 | * The user-level memory manager wants to see all of the changes |
3365 | * to this object, but it has promised not to make any changes on |
3366 | * its own. |
3367 | * |
3368 | * Perform an asymmetric copy-on-write, as follows: |
3369 | * Create a new object, called a "copy object" to hold |
3370 | * pages modified by the new mapping (i.e., the copy, |
3371 | * not the original mapping). |
3372 | * Record the original object as the backing object for |
3373 | * the copy object. If the original mapping does not |
3374 | * change a page, it may be used read-only by the copy. |
3375 | * Record the copy object in the original object. |
3376 | * When the original mapping causes a page to be modified, |
3377 | * it must be copied to a new page that is "pushed" to |
3378 | * the copy object. |
3379 | * Mark the new mapping (the copy object) copy-on-write. |
3380 | * This makes the copy object itself read-only, allowing |
3381 | * it to be reused if the original mapping makes no |
3382 | * changes, and simplifying the synchronization required |
3383 | * in the "push" operation described above. |
3384 | * |
3385 | * The copy-on-write is said to be assymetric because the original |
3386 | * object is *not* marked copy-on-write. A copied page is pushed |
3387 | * to the copy object, regardless which party attempted to modify |
3388 | * the page. |
3389 | * |
3390 | * Repeated asymmetric copy operations may be done. If the |
3391 | * original object has not been changed since the last copy, its |
3392 | * copy object can be reused. Otherwise, a new copy object can be |
3393 | * inserted between the original object and its previous copy |
3394 | * object. Since any copy object is read-only, this cannot affect |
3395 | * affect the contents of the previous copy object. |
3396 | * |
3397 | * Note that a copy object is higher in the object tree than the |
3398 | * original object; therefore, use of the copy object recorded in |
3399 | * the original object must be done carefully, to avoid deadlock. |
3400 | */ |
3401 | |
3402 | copy_size = vm_object_round_page(copy_size); |
3403 | Retry: |
3404 | |
3405 | /* |
3406 | * Wait for paging in progress. |
3407 | */ |
3408 | if (!src_object->true_share && |
3409 | (src_object->paging_in_progress != 0 || |
3410 | src_object->activity_in_progress != 0)) { |
3411 | if (src_object_shared == TRUE) { |
3412 | vm_object_unlock(src_object); |
3413 | vm_object_lock(src_object); |
3414 | src_object_shared = FALSE; |
3415 | goto Retry; |
3416 | } |
3417 | vm_object_paging_wait(src_object, THREAD_UNINT); |
3418 | } |
3419 | /* |
3420 | * See whether we can reuse the result of a previous |
3421 | * copy operation. |
3422 | */ |
3423 | |
3424 | old_copy = src_object->copy; |
3425 | if (old_copy != VM_OBJECT_NULL) { |
3426 | int lock_granted; |
3427 | |
3428 | /* |
3429 | * Try to get the locks (out of order) |
3430 | */ |
3431 | if (src_object_shared == TRUE) |
3432 | lock_granted = vm_object_lock_try_shared(old_copy); |
3433 | else |
3434 | lock_granted = vm_object_lock_try(old_copy); |
3435 | |
3436 | if (!lock_granted) { |
3437 | vm_object_unlock(src_object); |
3438 | |
3439 | if (collisions++ == 0) |
3440 | copy_delayed_lock_contention++; |
3441 | mutex_pause(collisions); |
3442 | |
3443 | /* Heisenberg Rules */ |
3444 | copy_delayed_lock_collisions++; |
3445 | |
3446 | if (collisions > copy_delayed_max_collisions) |
3447 | copy_delayed_max_collisions = collisions; |
3448 | |
3449 | if (src_object_shared == TRUE) |
3450 | vm_object_lock_shared(src_object); |
3451 | else |
3452 | vm_object_lock(src_object); |
3453 | |
3454 | goto Retry; |
3455 | } |
3456 | |
3457 | /* |
3458 | * Determine whether the old copy object has |
3459 | * been modified. |
3460 | */ |
3461 | |
3462 | if (old_copy->resident_page_count == 0 && |
3463 | !old_copy->pager_created) { |
3464 | /* |
3465 | * It has not been modified. |
3466 | * |
3467 | * Return another reference to |
3468 | * the existing copy-object if |
3469 | * we can safely grow it (if |
3470 | * needed). |
3471 | */ |
3472 | |
3473 | if (old_copy->vo_size < copy_size) { |
3474 | if (src_object_shared == TRUE) { |
3475 | vm_object_unlock(old_copy); |
3476 | vm_object_unlock(src_object); |
3477 | |
3478 | vm_object_lock(src_object); |
3479 | src_object_shared = FALSE; |
3480 | goto Retry; |
3481 | } |
3482 | /* |
3483 | * We can't perform a delayed copy if any of the |
3484 | * pages in the extended range are wired (because |
3485 | * we can't safely take write permission away from |
3486 | * wired pages). If the pages aren't wired, then |
3487 | * go ahead and protect them. |
3488 | */ |
3489 | copy_delayed_protect_iterate++; |
3490 | |
3491 | pmap_flush_context_init(&pmap_flush_context_storage); |
3492 | delayed_pmap_flush = FALSE; |
3493 | |
3494 | vm_page_queue_iterate(&src_object->memq, p, vm_page_t, vmp_listq) { |
3495 | if (!p->vmp_fictitious && |
3496 | p->vmp_offset >= old_copy->vo_size && |
3497 | p->vmp_offset < copy_size) { |
3498 | if (VM_PAGE_WIRED(p)) { |
3499 | vm_object_unlock(old_copy); |
3500 | vm_object_unlock(src_object); |
3501 | |
3502 | if (new_copy != VM_OBJECT_NULL) { |
3503 | vm_object_unlock(new_copy); |
3504 | vm_object_deallocate(new_copy); |
3505 | } |
3506 | if (delayed_pmap_flush == TRUE) |
3507 | pmap_flush(&pmap_flush_context_storage); |
3508 | |
3509 | return VM_OBJECT_NULL; |
3510 | } else { |
3511 | pmap_page_protect_options(VM_PAGE_GET_PHYS_PAGE(p), (VM_PROT_ALL & ~VM_PROT_WRITE), |
3512 | PMAP_OPTIONS_NOFLUSH, (void *)&pmap_flush_context_storage); |
3513 | delayed_pmap_flush = TRUE; |
3514 | } |
3515 | } |
3516 | } |
3517 | if (delayed_pmap_flush == TRUE) |
3518 | pmap_flush(&pmap_flush_context_storage); |
3519 | |
3520 | old_copy->vo_size = copy_size; |
3521 | } |
3522 | if (src_object_shared == TRUE) |
3523 | vm_object_reference_shared(old_copy); |
3524 | else |
3525 | vm_object_reference_locked(old_copy); |
3526 | vm_object_unlock(old_copy); |
3527 | vm_object_unlock(src_object); |
3528 | |
3529 | if (new_copy != VM_OBJECT_NULL) { |
3530 | vm_object_unlock(new_copy); |
3531 | vm_object_deallocate(new_copy); |
3532 | } |
3533 | return(old_copy); |
3534 | } |
3535 | |
3536 | |
3537 | |
3538 | /* |
3539 | * Adjust the size argument so that the newly-created |
3540 | * copy object will be large enough to back either the |
3541 | * old copy object or the new mapping. |
3542 | */ |
3543 | if (old_copy->vo_size > copy_size) |
3544 | copy_size = old_copy->vo_size; |
3545 | |
3546 | if (new_copy == VM_OBJECT_NULL) { |
3547 | vm_object_unlock(old_copy); |
3548 | vm_object_unlock(src_object); |
3549 | new_copy = vm_object_allocate(copy_size); |
3550 | vm_object_lock(src_object); |
3551 | vm_object_lock(new_copy); |
3552 | |
3553 | src_object_shared = FALSE; |
3554 | goto Retry; |
3555 | } |
3556 | new_copy->vo_size = copy_size; |
3557 | |
3558 | /* |
3559 | * The copy-object is always made large enough to |
3560 | * completely shadow the original object, since |
3561 | * it may have several users who want to shadow |
3562 | * the original object at different points. |
3563 | */ |
3564 | |
3565 | assert((old_copy->shadow == src_object) && |
3566 | (old_copy->vo_shadow_offset == (vm_object_offset_t) 0)); |
3567 | |
3568 | } else if (new_copy == VM_OBJECT_NULL) { |
3569 | vm_object_unlock(src_object); |
3570 | new_copy = vm_object_allocate(copy_size); |
3571 | vm_object_lock(src_object); |
3572 | vm_object_lock(new_copy); |
3573 | |
3574 | src_object_shared = FALSE; |
3575 | goto Retry; |
3576 | } |
3577 | |
3578 | /* |
3579 | * We now have the src object locked, and the new copy object |
3580 | * allocated and locked (and potentially the old copy locked). |
3581 | * Before we go any further, make sure we can still perform |
3582 | * a delayed copy, as the situation may have changed. |
3583 | * |
3584 | * Specifically, we can't perform a delayed copy if any of the |
3585 | * pages in the range are wired (because we can't safely take |
3586 | * write permission away from wired pages). If the pages aren't |
3587 | * wired, then go ahead and protect them. |
3588 | */ |
3589 | copy_delayed_protect_iterate++; |
3590 | |
3591 | pmap_flush_context_init(&pmap_flush_context_storage); |
3592 | delayed_pmap_flush = FALSE; |
3593 | |
3594 | vm_page_queue_iterate(&src_object->memq, p, vm_page_t, vmp_listq) { |
3595 | if (!p->vmp_fictitious && p->vmp_offset < copy_size) { |
3596 | if (VM_PAGE_WIRED(p)) { |
3597 | if (old_copy) |
3598 | vm_object_unlock(old_copy); |
3599 | vm_object_unlock(src_object); |
3600 | vm_object_unlock(new_copy); |
3601 | vm_object_deallocate(new_copy); |
3602 | |
3603 | if (delayed_pmap_flush == TRUE) |
3604 | pmap_flush(&pmap_flush_context_storage); |
3605 | |
3606 | return VM_OBJECT_NULL; |
3607 | } else { |
3608 | pmap_page_protect_options(VM_PAGE_GET_PHYS_PAGE(p), (VM_PROT_ALL & ~VM_PROT_WRITE), |
3609 | PMAP_OPTIONS_NOFLUSH, (void *)&pmap_flush_context_storage); |
3610 | delayed_pmap_flush = TRUE; |
3611 | } |
3612 | } |
3613 | } |
3614 | if (delayed_pmap_flush == TRUE) |
3615 | pmap_flush(&pmap_flush_context_storage); |
3616 | |
3617 | if (old_copy != VM_OBJECT_NULL) { |
3618 | /* |
3619 | * Make the old copy-object shadow the new one. |
3620 | * It will receive no more pages from the original |
3621 | * object. |
3622 | */ |
3623 | |
3624 | /* remove ref. from old_copy */ |
3625 | vm_object_lock_assert_exclusive(src_object); |
3626 | src_object->ref_count--; |
3627 | assert(src_object->ref_count > 0); |
3628 | vm_object_lock_assert_exclusive(old_copy); |
3629 | old_copy->shadow = new_copy; |
3630 | vm_object_lock_assert_exclusive(new_copy); |
3631 | assert(new_copy->ref_count > 0); |
3632 | new_copy->ref_count++; /* for old_copy->shadow ref. */ |
3633 | |
3634 | #if TASK_SWAPPER |
3635 | if (old_copy->res_count) { |
3636 | VM_OBJ_RES_INCR(new_copy); |
3637 | VM_OBJ_RES_DECR(src_object); |
3638 | } |
3639 | #endif |
3640 | |
3641 | vm_object_unlock(old_copy); /* done with old_copy */ |
3642 | } |
3643 | |
3644 | /* |
3645 | * Point the new copy at the existing object. |
3646 | */ |
3647 | vm_object_lock_assert_exclusive(new_copy); |
3648 | new_copy->shadow = src_object; |
3649 | new_copy->vo_shadow_offset = 0; |
3650 | new_copy->shadowed = TRUE; /* caller must set needs_copy */ |
3651 | |
3652 | vm_object_lock_assert_exclusive(src_object); |
3653 | vm_object_reference_locked(src_object); |
3654 | src_object->copy = new_copy; |
3655 | vm_object_unlock(src_object); |
3656 | vm_object_unlock(new_copy); |
3657 | |
3658 | XPR(XPR_VM_OBJECT, |
3659 | "vm_object_copy_delayed: used copy object %X for source %X\n" , |
3660 | new_copy, src_object, 0, 0, 0); |
3661 | |
3662 | return new_copy; |
3663 | } |
3664 | |
3665 | /* |
3666 | * Routine: vm_object_copy_strategically |
3667 | * |
3668 | * Purpose: |
3669 | * Perform a copy according to the source object's |
3670 | * declared strategy. This operation may block, |
3671 | * and may be interrupted. |
3672 | */ |
3673 | __private_extern__ kern_return_t |
3674 | vm_object_copy_strategically( |
3675 | vm_object_t src_object, |
3676 | vm_object_offset_t src_offset, |
3677 | vm_object_size_t size, |
3678 | vm_object_t *dst_object, /* OUT */ |
3679 | vm_object_offset_t *dst_offset, /* OUT */ |
3680 | boolean_t *dst_needs_copy) /* OUT */ |
3681 | { |
3682 | boolean_t result; |
3683 | boolean_t interruptible = THREAD_ABORTSAFE; /* XXX */ |
3684 | boolean_t object_lock_shared = FALSE; |
3685 | memory_object_copy_strategy_t copy_strategy; |
3686 | |
3687 | assert(src_object != VM_OBJECT_NULL); |
3688 | |
3689 | copy_strategy = src_object->copy_strategy; |
3690 | |
3691 | if (copy_strategy == MEMORY_OBJECT_COPY_DELAY) { |
3692 | vm_object_lock_shared(src_object); |
3693 | object_lock_shared = TRUE; |
3694 | } else |
3695 | vm_object_lock(src_object); |
3696 | |
3697 | /* |
3698 | * The copy strategy is only valid if the memory manager |
3699 | * is "ready". Internal objects are always ready. |
3700 | */ |
3701 | |
3702 | while (!src_object->internal && !src_object->pager_ready) { |
3703 | wait_result_t wait_result; |
3704 | |
3705 | if (object_lock_shared == TRUE) { |
3706 | vm_object_unlock(src_object); |
3707 | vm_object_lock(src_object); |
3708 | object_lock_shared = FALSE; |
3709 | continue; |
3710 | } |
3711 | wait_result = vm_object_sleep( src_object, |
3712 | VM_OBJECT_EVENT_PAGER_READY, |
3713 | interruptible); |
3714 | if (wait_result != THREAD_AWAKENED) { |
3715 | vm_object_unlock(src_object); |
3716 | *dst_object = VM_OBJECT_NULL; |
3717 | *dst_offset = 0; |
3718 | *dst_needs_copy = FALSE; |
3719 | return(MACH_SEND_INTERRUPTED); |
3720 | } |
3721 | } |
3722 | |
3723 | /* |
3724 | * Use the appropriate copy strategy. |
3725 | */ |
3726 | |
3727 | switch (copy_strategy) { |
3728 | case MEMORY_OBJECT_COPY_DELAY: |
3729 | *dst_object = vm_object_copy_delayed(src_object, |
3730 | src_offset, size, object_lock_shared); |
3731 | if (*dst_object != VM_OBJECT_NULL) { |
3732 | *dst_offset = src_offset; |
3733 | *dst_needs_copy = TRUE; |
3734 | result = KERN_SUCCESS; |
3735 | break; |
3736 | } |
3737 | vm_object_lock(src_object); |
3738 | /* fall thru when delayed copy not allowed */ |
3739 | |
3740 | case MEMORY_OBJECT_COPY_NONE: |
3741 | result = vm_object_copy_slowly(src_object, src_offset, size, |
3742 | interruptible, dst_object); |
3743 | if (result == KERN_SUCCESS) { |
3744 | *dst_offset = 0; |
3745 | *dst_needs_copy = FALSE; |
3746 | } |
3747 | break; |
3748 | |
3749 | case MEMORY_OBJECT_COPY_CALL: |
3750 | result = vm_object_copy_call(src_object, src_offset, size, |
3751 | dst_object); |
3752 | if (result == KERN_SUCCESS) { |
3753 | *dst_offset = src_offset; |
3754 | *dst_needs_copy = TRUE; |
3755 | } |
3756 | break; |
3757 | |
3758 | case MEMORY_OBJECT_COPY_SYMMETRIC: |
3759 | XPR(XPR_VM_OBJECT, "v_o_c_strategically obj 0x%x off 0x%x size 0x%x\n" , src_object, src_offset, size, 0, 0); |
3760 | vm_object_unlock(src_object); |
3761 | result = KERN_MEMORY_RESTART_COPY; |
3762 | break; |
3763 | |
3764 | default: |
3765 | panic("copy_strategically: bad strategy" ); |
3766 | result = KERN_INVALID_ARGUMENT; |
3767 | } |
3768 | return(result); |
3769 | } |
3770 | |
3771 | /* |
3772 | * vm_object_shadow: |
3773 | * |
3774 | * Create a new object which is backed by the |
3775 | * specified existing object range. The source |
3776 | * object reference is deallocated. |
3777 | * |
3778 | * The new object and offset into that object |
3779 | * are returned in the source parameters. |
3780 | */ |
3781 | boolean_t vm_object_shadow_check = TRUE; |
3782 | |
3783 | __private_extern__ boolean_t |
3784 | vm_object_shadow( |
3785 | vm_object_t *object, /* IN/OUT */ |
3786 | vm_object_offset_t *offset, /* IN/OUT */ |
3787 | vm_object_size_t length) |
3788 | { |
3789 | vm_object_t source; |
3790 | vm_object_t result; |
3791 | |
3792 | source = *object; |
3793 | assert(source != VM_OBJECT_NULL); |
3794 | if (source == VM_OBJECT_NULL) |
3795 | return FALSE; |
3796 | |
3797 | #if 0 |
3798 | /* |
3799 | * XXX FBDP |
3800 | * This assertion is valid but it gets triggered by Rosetta for example |
3801 | * due to a combination of vm_remap() that changes a VM object's |
3802 | * copy_strategy from SYMMETRIC to DELAY and vm_protect(VM_PROT_COPY) |
3803 | * that then sets "needs_copy" on its map entry. This creates a |
3804 | * mapping situation that VM should never see and doesn't know how to |
3805 | * handle. |
3806 | * It's not clear if this can create any real problem but we should |
3807 | * look into fixing this, probably by having vm_protect(VM_PROT_COPY) |
3808 | * do more than just set "needs_copy" to handle the copy-on-write... |
3809 | * In the meantime, let's disable the assertion. |
3810 | */ |
3811 | assert(source->copy_strategy == MEMORY_OBJECT_COPY_SYMMETRIC); |
3812 | #endif |
3813 | |
3814 | /* |
3815 | * Determine if we really need a shadow. |
3816 | * |
3817 | * If the source object is larger than what we are trying |
3818 | * to create, then force the shadow creation even if the |
3819 | * ref count is 1. This will allow us to [potentially] |
3820 | * collapse the underlying object away in the future |
3821 | * (freeing up the extra data it might contain and that |
3822 | * we don't need). |
3823 | */ |
3824 | |
3825 | assert(source->copy_strategy != MEMORY_OBJECT_COPY_NONE); /* Purgeable objects shouldn't have shadow objects. */ |
3826 | |
3827 | if (vm_object_shadow_check && |
3828 | source->vo_size == length && |
3829 | source->ref_count == 1 && |
3830 | (source->shadow == VM_OBJECT_NULL || |
3831 | source->shadow->copy == VM_OBJECT_NULL) ) |
3832 | { |
3833 | /* lock the object and check again */ |
3834 | vm_object_lock(source); |
3835 | if (source->vo_size == length && |
3836 | source->ref_count == 1 && |
3837 | (source->shadow == VM_OBJECT_NULL || |
3838 | source->shadow->copy == VM_OBJECT_NULL)) |
3839 | { |
3840 | source->shadowed = FALSE; |
3841 | vm_object_unlock(source); |
3842 | return FALSE; |
3843 | } |
3844 | /* things changed while we were locking "source"... */ |
3845 | vm_object_unlock(source); |
3846 | } |
3847 | |
3848 | /* |
3849 | * Allocate a new object with the given length |
3850 | */ |
3851 | |
3852 | if ((result = vm_object_allocate(length)) == VM_OBJECT_NULL) |
3853 | panic("vm_object_shadow: no object for shadowing" ); |
3854 | |
3855 | /* |
3856 | * The new object shadows the source object, adding |
3857 | * a reference to it. Our caller changes his reference |
3858 | * to point to the new object, removing a reference to |
3859 | * the source object. Net result: no change of reference |
3860 | * count. |
3861 | */ |
3862 | result->shadow = source; |
3863 | |
3864 | /* |
3865 | * Store the offset into the source object, |
3866 | * and fix up the offset into the new object. |
3867 | */ |
3868 | |
3869 | result->vo_shadow_offset = *offset; |
3870 | |
3871 | /* |
3872 | * Return the new things |
3873 | */ |
3874 | |
3875 | *offset = 0; |
3876 | *object = result; |
3877 | return TRUE; |
3878 | } |
3879 | |
3880 | /* |
3881 | * The relationship between vm_object structures and |
3882 | * the memory_object requires careful synchronization. |
3883 | * |
3884 | * All associations are created by memory_object_create_named |
3885 | * for external pagers and vm_object_compressor_pager_create for internal |
3886 | * objects as follows: |
3887 | * |
3888 | * pager: the memory_object itself, supplied by |
3889 | * the user requesting a mapping (or the kernel, |
3890 | * when initializing internal objects); the |
3891 | * kernel simulates holding send rights by keeping |
3892 | * a port reference; |
3893 | * |
3894 | * pager_request: |
3895 | * the memory object control port, |
3896 | * created by the kernel; the kernel holds |
3897 | * receive (and ownership) rights to this |
3898 | * port, but no other references. |
3899 | * |
3900 | * When initialization is complete, the "initialized" field |
3901 | * is asserted. Other mappings using a particular memory object, |
3902 | * and any references to the vm_object gained through the |
3903 | * port association must wait for this initialization to occur. |
3904 | * |
3905 | * In order to allow the memory manager to set attributes before |
3906 | * requests (notably virtual copy operations, but also data or |
3907 | * unlock requests) are made, a "ready" attribute is made available. |
3908 | * Only the memory manager may affect the value of this attribute. |
3909 | * Its value does not affect critical kernel functions, such as |
3910 | * internal object initialization or destruction. [Furthermore, |
3911 | * memory objects created by the kernel are assumed to be ready |
3912 | * immediately; the default memory manager need not explicitly |
3913 | * set the "ready" attribute.] |
3914 | * |
3915 | * [Both the "initialized" and "ready" attribute wait conditions |
3916 | * use the "pager" field as the wait event.] |
3917 | * |
3918 | * The port associations can be broken down by any of the |
3919 | * following routines: |
3920 | * vm_object_terminate: |
3921 | * No references to the vm_object remain, and |
3922 | * the object cannot (or will not) be cached. |
3923 | * This is the normal case, and is done even |
3924 | * though one of the other cases has already been |
3925 | * done. |
3926 | * memory_object_destroy: |
3927 | * The memory manager has requested that the |
3928 | * kernel relinquish references to the memory |
3929 | * object. [The memory manager may not want to |
3930 | * destroy the memory object, but may wish to |
3931 | * refuse or tear down existing memory mappings.] |
3932 | * |
3933 | * Each routine that breaks an association must break all of |
3934 | * them at once. At some later time, that routine must clear |
3935 | * the pager field and release the memory object references. |
3936 | * [Furthermore, each routine must cope with the simultaneous |
3937 | * or previous operations of the others.] |
3938 | * |
3939 | * Because the pager field may be cleared spontaneously, it |
3940 | * cannot be used to determine whether a memory object has |
3941 | * ever been associated with a particular vm_object. [This |
3942 | * knowledge is important to the shadow object mechanism.] |
3943 | * For this reason, an additional "created" attribute is |
3944 | * provided. |
3945 | * |
3946 | * During various paging operations, the pager reference found in the |
3947 | * vm_object must be valid. To prevent this from being released, |
3948 | * (other than being removed, i.e., made null), routines may use |
3949 | * the vm_object_paging_begin/end routines [actually, macros]. |
3950 | * The implementation uses the "paging_in_progress" and "wanted" fields. |
3951 | * [Operations that alter the validity of the pager values include the |
3952 | * termination routines and vm_object_collapse.] |
3953 | */ |
3954 | |
3955 | |
3956 | /* |
3957 | * Routine: vm_object_memory_object_associate |
3958 | * Purpose: |
3959 | * Associate a VM object to the given pager. |
3960 | * If a VM object is not provided, create one. |
3961 | * Initialize the pager. |
3962 | */ |
3963 | vm_object_t |
3964 | vm_object_memory_object_associate( |
3965 | memory_object_t , |
3966 | vm_object_t object, |
3967 | vm_object_size_t size, |
3968 | boolean_t named) |
3969 | { |
3970 | memory_object_control_t control; |
3971 | |
3972 | assert(pager != MEMORY_OBJECT_NULL); |
3973 | |
3974 | if (object != VM_OBJECT_NULL) { |
3975 | assert(object->internal); |
3976 | assert(object->pager_created); |
3977 | assert(!object->pager_initialized); |
3978 | assert(!object->pager_ready); |
3979 | } else { |
3980 | object = vm_object_allocate(size); |
3981 | assert(object != VM_OBJECT_NULL); |
3982 | object->internal = FALSE; |
3983 | object->pager_trusted = FALSE; |
3984 | /* copy strategy invalid until set by memory manager */ |
3985 | object->copy_strategy = MEMORY_OBJECT_COPY_INVALID; |
3986 | } |
3987 | |
3988 | /* |
3989 | * Allocate request port. |
3990 | */ |
3991 | |
3992 | control = memory_object_control_allocate(object); |
3993 | assert (control != MEMORY_OBJECT_CONTROL_NULL); |
3994 | |
3995 | vm_object_lock(object); |
3996 | |
3997 | assert(!object->pager_ready); |
3998 | assert(!object->pager_initialized); |
3999 | assert(object->pager == NULL); |
4000 | assert(object->pager_control == NULL); |
4001 | |
4002 | /* |
4003 | * Copy the reference we were given. |
4004 | */ |
4005 | |
4006 | memory_object_reference(pager); |
4007 | object->pager_created = TRUE; |
4008 | object->pager = pager; |
4009 | object->pager_control = control; |
4010 | object->pager_ready = FALSE; |
4011 | |
4012 | vm_object_unlock(object); |
4013 | |
4014 | /* |
4015 | * Let the pager know we're using it. |
4016 | */ |
4017 | |
4018 | (void) memory_object_init(pager, |
4019 | object->pager_control, |
4020 | PAGE_SIZE); |
4021 | |
4022 | vm_object_lock(object); |
4023 | if (named) |
4024 | object->named = TRUE; |
4025 | if (object->internal) { |
4026 | object->pager_ready = TRUE; |
4027 | vm_object_wakeup(object, VM_OBJECT_EVENT_PAGER_READY); |
4028 | } |
4029 | |
4030 | object->pager_initialized = TRUE; |
4031 | vm_object_wakeup(object, VM_OBJECT_EVENT_INITIALIZED); |
4032 | |
4033 | vm_object_unlock(object); |
4034 | |
4035 | return object; |
4036 | } |
4037 | |
4038 | /* |
4039 | * Routine: vm_object_compressor_pager_create |
4040 | * Purpose: |
4041 | * Create a memory object for an internal object. |
4042 | * In/out conditions: |
4043 | * The object is locked on entry and exit; |
4044 | * it may be unlocked within this call. |
4045 | * Limitations: |
4046 | * Only one thread may be performing a |
4047 | * vm_object_compressor_pager_create on an object at |
4048 | * a time. Presumably, only the pageout |
4049 | * daemon will be using this routine. |
4050 | */ |
4051 | |
4052 | void |
4053 | ( |
4054 | vm_object_t object) |
4055 | { |
4056 | memory_object_t ; |
4057 | vm_object_t = VM_OBJECT_NULL; |
4058 | |
4059 | assert(object != kernel_object); |
4060 | |
4061 | /* |
4062 | * Prevent collapse or termination by holding a paging reference |
4063 | */ |
4064 | |
4065 | vm_object_paging_begin(object); |
4066 | if (object->pager_created) { |
4067 | /* |
4068 | * Someone else got to it first... |
4069 | * wait for them to finish initializing the ports |
4070 | */ |
4071 | while (!object->pager_initialized) { |
4072 | vm_object_sleep(object, |
4073 | VM_OBJECT_EVENT_INITIALIZED, |
4074 | THREAD_UNINT); |
4075 | } |
4076 | vm_object_paging_end(object); |
4077 | return; |
4078 | } |
4079 | |
4080 | if ((uint32_t) (object->vo_size/PAGE_SIZE) != |
4081 | (object->vo_size/PAGE_SIZE)) { |
4082 | #if DEVELOPMENT || DEBUG |
4083 | printf("vm_object_compressor_pager_create(%p): " |
4084 | "object size 0x%llx >= 0x%llx\n" , |
4085 | object, |
4086 | (uint64_t) object->vo_size, |
4087 | 0x0FFFFFFFFULL*PAGE_SIZE); |
4088 | #endif /* DEVELOPMENT || DEBUG */ |
4089 | vm_object_paging_end(object); |
4090 | return; |
4091 | } |
4092 | |
4093 | /* |
4094 | * Indicate that a memory object has been assigned |
4095 | * before dropping the lock, to prevent a race. |
4096 | */ |
4097 | |
4098 | object->pager_created = TRUE; |
4099 | object->paging_offset = 0; |
4100 | |
4101 | vm_object_unlock(object); |
4102 | |
4103 | /* |
4104 | * Create the [internal] pager, and associate it with this object. |
4105 | * |
4106 | * We make the association here so that vm_object_enter() |
4107 | * can look up the object to complete initializing it. No |
4108 | * user will ever map this object. |
4109 | */ |
4110 | { |
4111 | /* create our new memory object */ |
4112 | assert((uint32_t) (object->vo_size/PAGE_SIZE) == |
4113 | (object->vo_size/PAGE_SIZE)); |
4114 | (void) compressor_memory_object_create( |
4115 | (memory_object_size_t) object->vo_size, |
4116 | &pager); |
4117 | if (pager == NULL) { |
4118 | panic("vm_object_compressor_pager_create(): " |
4119 | "no pager for object %p size 0x%llx\n" , |
4120 | object, (uint64_t) object->vo_size); |
4121 | } |
4122 | } |
4123 | |
4124 | /* |
4125 | * A reference was returned by |
4126 | * memory_object_create(), and it is |
4127 | * copied by vm_object_memory_object_associate(). |
4128 | */ |
4129 | |
4130 | pager_object = vm_object_memory_object_associate(pager, |
4131 | object, |
4132 | object->vo_size, |
4133 | FALSE); |
4134 | if (pager_object != object) { |
4135 | panic("vm_object_compressor_pager_create: mismatch (pager: %p, pager_object: %p, orig_object: %p, orig_object size: 0x%llx)\n" , pager, pager_object, object, (uint64_t) object->vo_size); |
4136 | } |
4137 | |
4138 | /* |
4139 | * Drop the reference we were passed. |
4140 | */ |
4141 | memory_object_deallocate(pager); |
4142 | |
4143 | vm_object_lock(object); |
4144 | |
4145 | /* |
4146 | * Release the paging reference |
4147 | */ |
4148 | vm_object_paging_end(object); |
4149 | } |
4150 | |
4151 | /* |
4152 | * Global variables for vm_object_collapse(): |
4153 | * |
4154 | * Counts for normal collapses and bypasses. |
4155 | * Debugging variables, to watch or disable collapse. |
4156 | */ |
4157 | static long object_collapses = 0; |
4158 | static long object_bypasses = 0; |
4159 | |
4160 | static boolean_t vm_object_collapse_allowed = TRUE; |
4161 | static boolean_t vm_object_bypass_allowed = TRUE; |
4162 | |
4163 | void vm_object_do_collapse_compressor(vm_object_t object, |
4164 | vm_object_t backing_object); |
4165 | void |
4166 | vm_object_do_collapse_compressor( |
4167 | vm_object_t object, |
4168 | vm_object_t backing_object) |
4169 | { |
4170 | vm_object_offset_t new_offset, backing_offset; |
4171 | vm_object_size_t size; |
4172 | |
4173 | vm_counters.do_collapse_compressor++; |
4174 | |
4175 | vm_object_lock_assert_exclusive(object); |
4176 | vm_object_lock_assert_exclusive(backing_object); |
4177 | |
4178 | size = object->vo_size; |
4179 | |
4180 | /* |
4181 | * Move all compressed pages from backing_object |
4182 | * to the parent. |
4183 | */ |
4184 | |
4185 | for (backing_offset = object->vo_shadow_offset; |
4186 | backing_offset < object->vo_shadow_offset + object->vo_size; |
4187 | backing_offset += PAGE_SIZE) { |
4188 | memory_object_offset_t ; |
4189 | |
4190 | /* find the next compressed page at or after this offset */ |
4191 | backing_pager_offset = (backing_offset + |
4192 | backing_object->paging_offset); |
4193 | backing_pager_offset = vm_compressor_pager_next_compressed( |
4194 | backing_object->pager, |
4195 | backing_pager_offset); |
4196 | if (backing_pager_offset == (memory_object_offset_t) -1) { |
4197 | /* no more compressed pages */ |
4198 | break; |
4199 | } |
4200 | backing_offset = (backing_pager_offset - |
4201 | backing_object->paging_offset); |
4202 | |
4203 | new_offset = backing_offset - object->vo_shadow_offset; |
4204 | |
4205 | if (new_offset >= object->vo_size) { |
4206 | /* we're out of the scope of "object": done */ |
4207 | break; |
4208 | } |
4209 | |
4210 | if ((vm_page_lookup(object, new_offset) != VM_PAGE_NULL) || |
4211 | (vm_compressor_pager_state_get(object->pager, |
4212 | (new_offset + |
4213 | object->paging_offset)) == |
4214 | VM_EXTERNAL_STATE_EXISTS)) { |
4215 | /* |
4216 | * This page already exists in object, resident or |
4217 | * compressed. |
4218 | * We don't need this compressed page in backing_object |
4219 | * and it will be reclaimed when we release |
4220 | * backing_object. |
4221 | */ |
4222 | continue; |
4223 | } |
4224 | |
4225 | /* |
4226 | * backing_object has this page in the VM compressor and |
4227 | * we need to transfer it to object. |
4228 | */ |
4229 | vm_counters.do_collapse_compressor_pages++; |
4230 | vm_compressor_pager_transfer( |
4231 | /* destination: */ |
4232 | object->pager, |
4233 | (new_offset + object->paging_offset), |
4234 | /* source: */ |
4235 | backing_object->pager, |
4236 | (backing_offset + backing_object->paging_offset)); |
4237 | } |
4238 | } |
4239 | |
4240 | /* |
4241 | * Routine: vm_object_do_collapse |
4242 | * Purpose: |
4243 | * Collapse an object with the object backing it. |
4244 | * Pages in the backing object are moved into the |
4245 | * parent, and the backing object is deallocated. |
4246 | * Conditions: |
4247 | * Both objects and the cache are locked; the page |
4248 | * queues are unlocked. |
4249 | * |
4250 | */ |
4251 | static void |
4252 | vm_object_do_collapse( |
4253 | vm_object_t object, |
4254 | vm_object_t backing_object) |
4255 | { |
4256 | vm_page_t p, pp; |
4257 | vm_object_offset_t new_offset, backing_offset; |
4258 | vm_object_size_t size; |
4259 | |
4260 | vm_object_lock_assert_exclusive(object); |
4261 | vm_object_lock_assert_exclusive(backing_object); |
4262 | |
4263 | assert(object->purgable == VM_PURGABLE_DENY); |
4264 | assert(backing_object->purgable == VM_PURGABLE_DENY); |
4265 | |
4266 | backing_offset = object->vo_shadow_offset; |
4267 | size = object->vo_size; |
4268 | |
4269 | /* |
4270 | * Move all in-memory pages from backing_object |
4271 | * to the parent. Pages that have been paged out |
4272 | * will be overwritten by any of the parent's |
4273 | * pages that shadow them. |
4274 | */ |
4275 | |
4276 | while (!vm_page_queue_empty(&backing_object->memq)) { |
4277 | |
4278 | p = (vm_page_t) vm_page_queue_first(&backing_object->memq); |
4279 | |
4280 | new_offset = (p->vmp_offset - backing_offset); |
4281 | |
4282 | assert(!p->vmp_busy || p->vmp_absent); |
4283 | |
4284 | /* |
4285 | * If the parent has a page here, or if |
4286 | * this page falls outside the parent, |
4287 | * dispose of it. |
4288 | * |
4289 | * Otherwise, move it as planned. |
4290 | */ |
4291 | |
4292 | if (p->vmp_offset < backing_offset || new_offset >= size) { |
4293 | VM_PAGE_FREE(p); |
4294 | } else { |
4295 | pp = vm_page_lookup(object, new_offset); |
4296 | if (pp == VM_PAGE_NULL) { |
4297 | |
4298 | if (VM_COMPRESSOR_PAGER_STATE_GET(object, |
4299 | new_offset) |
4300 | == VM_EXTERNAL_STATE_EXISTS) { |
4301 | /* |
4302 | * Parent object has this page |
4303 | * in the VM compressor. |
4304 | * Throw away the backing |
4305 | * object's page. |
4306 | */ |
4307 | VM_PAGE_FREE(p); |
4308 | } else { |
4309 | /* |
4310 | * Parent now has no page. |
4311 | * Move the backing object's page |
4312 | * up. |
4313 | */ |
4314 | vm_page_rename(p, object, new_offset); |
4315 | } |
4316 | } else { |
4317 | assert(! pp->vmp_absent); |
4318 | |
4319 | /* |
4320 | * Parent object has a real page. |
4321 | * Throw away the backing object's |
4322 | * page. |
4323 | */ |
4324 | VM_PAGE_FREE(p); |
4325 | } |
4326 | } |
4327 | } |
4328 | |
4329 | if (vm_object_collapse_compressor_allowed && |
4330 | object->pager != MEMORY_OBJECT_NULL && |
4331 | backing_object->pager != MEMORY_OBJECT_NULL) { |
4332 | |
4333 | /* move compressed pages from backing_object to object */ |
4334 | vm_object_do_collapse_compressor(object, backing_object); |
4335 | |
4336 | } else if (backing_object->pager != MEMORY_OBJECT_NULL) { |
4337 | |
4338 | assert((!object->pager_created && |
4339 | (object->pager == MEMORY_OBJECT_NULL)) || |
4340 | (!backing_object->pager_created && |
4341 | (backing_object->pager == MEMORY_OBJECT_NULL))); |
4342 | /* |
4343 | * Move the pager from backing_object to object. |
4344 | * |
4345 | * XXX We're only using part of the paging space |
4346 | * for keeps now... we ought to discard the |
4347 | * unused portion. |
4348 | */ |
4349 | |
4350 | assert(!object->paging_in_progress); |
4351 | assert(!object->activity_in_progress); |
4352 | assert(!object->pager_created); |
4353 | assert(object->pager == NULL); |
4354 | object->pager = backing_object->pager; |
4355 | |
4356 | object->pager_created = backing_object->pager_created; |
4357 | object->pager_control = backing_object->pager_control; |
4358 | object->pager_ready = backing_object->pager_ready; |
4359 | object->pager_initialized = backing_object->pager_initialized; |
4360 | object->paging_offset = |
4361 | backing_object->paging_offset + backing_offset; |
4362 | if (object->pager_control != MEMORY_OBJECT_CONTROL_NULL) { |
4363 | memory_object_control_collapse(object->pager_control, |
4364 | object); |
4365 | } |
4366 | /* the backing_object has lost its pager: reset all fields */ |
4367 | backing_object->pager_created = FALSE; |
4368 | backing_object->pager_control = NULL; |
4369 | backing_object->pager_ready = FALSE; |
4370 | backing_object->paging_offset = 0; |
4371 | backing_object->pager = NULL; |
4372 | } |
4373 | /* |
4374 | * Object now shadows whatever backing_object did. |
4375 | * Note that the reference to backing_object->shadow |
4376 | * moves from within backing_object to within object. |
4377 | */ |
4378 | |
4379 | assert(!object->phys_contiguous); |
4380 | assert(!backing_object->phys_contiguous); |
4381 | object->shadow = backing_object->shadow; |
4382 | if (object->shadow) { |
4383 | object->vo_shadow_offset += backing_object->vo_shadow_offset; |
4384 | /* "backing_object" gave its shadow to "object" */ |
4385 | backing_object->shadow = VM_OBJECT_NULL; |
4386 | backing_object->vo_shadow_offset = 0; |
4387 | } else { |
4388 | /* no shadow, therefore no shadow offset... */ |
4389 | object->vo_shadow_offset = 0; |
4390 | } |
4391 | assert((object->shadow == VM_OBJECT_NULL) || |
4392 | (object->shadow->copy != backing_object)); |
4393 | |
4394 | /* |
4395 | * Discard backing_object. |
4396 | * |
4397 | * Since the backing object has no pages, no |
4398 | * pager left, and no object references within it, |
4399 | * all that is necessary is to dispose of it. |
4400 | */ |
4401 | object_collapses++; |
4402 | |
4403 | assert(backing_object->ref_count == 1); |
4404 | assert(backing_object->resident_page_count == 0); |
4405 | assert(backing_object->paging_in_progress == 0); |
4406 | assert(backing_object->activity_in_progress == 0); |
4407 | assert(backing_object->shadow == VM_OBJECT_NULL); |
4408 | assert(backing_object->vo_shadow_offset == 0); |
4409 | |
4410 | if (backing_object->pager != MEMORY_OBJECT_NULL) { |
4411 | /* ... unless it has a pager; need to terminate pager too */ |
4412 | vm_counters.do_collapse_terminate++; |
4413 | if (vm_object_terminate(backing_object) != KERN_SUCCESS) { |
4414 | vm_counters.do_collapse_terminate_failure++; |
4415 | } |
4416 | return; |
4417 | } |
4418 | |
4419 | assert(backing_object->pager == NULL); |
4420 | |
4421 | backing_object->alive = FALSE; |
4422 | vm_object_unlock(backing_object); |
4423 | |
4424 | XPR(XPR_VM_OBJECT, "vm_object_collapse, collapsed 0x%X\n" , |
4425 | backing_object, 0,0,0,0); |
4426 | |
4427 | #if VM_OBJECT_TRACKING |
4428 | if (vm_object_tracking_inited) { |
4429 | btlog_remove_entries_for_element(vm_object_tracking_btlog, |
4430 | backing_object); |
4431 | } |
4432 | #endif /* VM_OBJECT_TRACKING */ |
4433 | |
4434 | vm_object_lock_destroy(backing_object); |
4435 | |
4436 | zfree(vm_object_zone, backing_object); |
4437 | |
4438 | } |
4439 | |
4440 | static void |
4441 | vm_object_do_bypass( |
4442 | vm_object_t object, |
4443 | vm_object_t backing_object) |
4444 | { |
4445 | /* |
4446 | * Make the parent shadow the next object |
4447 | * in the chain. |
4448 | */ |
4449 | |
4450 | vm_object_lock_assert_exclusive(object); |
4451 | vm_object_lock_assert_exclusive(backing_object); |
4452 | |
4453 | #if TASK_SWAPPER |
4454 | /* |
4455 | * Do object reference in-line to |
4456 | * conditionally increment shadow's |
4457 | * residence count. If object is not |
4458 | * resident, leave residence count |
4459 | * on shadow alone. |
4460 | */ |
4461 | if (backing_object->shadow != VM_OBJECT_NULL) { |
4462 | vm_object_lock(backing_object->shadow); |
4463 | vm_object_lock_assert_exclusive(backing_object->shadow); |
4464 | backing_object->shadow->ref_count++; |
4465 | if (object->res_count != 0) |
4466 | vm_object_res_reference(backing_object->shadow); |
4467 | vm_object_unlock(backing_object->shadow); |
4468 | } |
4469 | #else /* TASK_SWAPPER */ |
4470 | vm_object_reference(backing_object->shadow); |
4471 | #endif /* TASK_SWAPPER */ |
4472 | |
4473 | assert(!object->phys_contiguous); |
4474 | assert(!backing_object->phys_contiguous); |
4475 | object->shadow = backing_object->shadow; |
4476 | if (object->shadow) { |
4477 | object->vo_shadow_offset += backing_object->vo_shadow_offset; |
4478 | } else { |
4479 | /* no shadow, therefore no shadow offset... */ |
4480 | object->vo_shadow_offset = 0; |
4481 | } |
4482 | |
4483 | /* |
4484 | * Backing object might have had a copy pointer |
4485 | * to us. If it did, clear it. |
4486 | */ |
4487 | if (backing_object->copy == object) { |
4488 | backing_object->copy = VM_OBJECT_NULL; |
4489 | } |
4490 | |
4491 | /* |
4492 | * Drop the reference count on backing_object. |
4493 | #if TASK_SWAPPER |
4494 | * Since its ref_count was at least 2, it |
4495 | * will not vanish; so we don't need to call |
4496 | * vm_object_deallocate. |
4497 | * [with a caveat for "named" objects] |
4498 | * |
4499 | * The res_count on the backing object is |
4500 | * conditionally decremented. It's possible |
4501 | * (via vm_pageout_scan) to get here with |
4502 | * a "swapped" object, which has a 0 res_count, |
4503 | * in which case, the backing object res_count |
4504 | * is already down by one. |
4505 | #else |
4506 | * Don't call vm_object_deallocate unless |
4507 | * ref_count drops to zero. |
4508 | * |
4509 | * The ref_count can drop to zero here if the |
4510 | * backing object could be bypassed but not |
4511 | * collapsed, such as when the backing object |
4512 | * is temporary and cachable. |
4513 | #endif |
4514 | */ |
4515 | if (backing_object->ref_count > 2 || |
4516 | (!backing_object->named && backing_object->ref_count > 1)) { |
4517 | vm_object_lock_assert_exclusive(backing_object); |
4518 | backing_object->ref_count--; |
4519 | #if TASK_SWAPPER |
4520 | if (object->res_count != 0) |
4521 | vm_object_res_deallocate(backing_object); |
4522 | assert(backing_object->ref_count > 0); |
4523 | #endif /* TASK_SWAPPER */ |
4524 | vm_object_unlock(backing_object); |
4525 | } else { |
4526 | |
4527 | /* |
4528 | * Drop locks so that we can deallocate |
4529 | * the backing object. |
4530 | */ |
4531 | |
4532 | #if TASK_SWAPPER |
4533 | if (object->res_count == 0) { |
4534 | /* XXX get a reference for the deallocate below */ |
4535 | vm_object_res_reference(backing_object); |
4536 | } |
4537 | #endif /* TASK_SWAPPER */ |
4538 | /* |
4539 | * vm_object_collapse (the caller of this function) is |
4540 | * now called from contexts that may not guarantee that a |
4541 | * valid reference is held on the object... w/o a valid |
4542 | * reference, it is unsafe and unwise (you will definitely |
4543 | * regret it) to unlock the object and then retake the lock |
4544 | * since the object may be terminated and recycled in between. |
4545 | * The "activity_in_progress" reference will keep the object |
4546 | * 'stable'. |
4547 | */ |
4548 | vm_object_activity_begin(object); |
4549 | vm_object_unlock(object); |
4550 | |
4551 | vm_object_unlock(backing_object); |
4552 | vm_object_deallocate(backing_object); |
4553 | |
4554 | /* |
4555 | * Relock object. We don't have to reverify |
4556 | * its state since vm_object_collapse will |
4557 | * do that for us as it starts at the |
4558 | * top of its loop. |
4559 | */ |
4560 | |
4561 | vm_object_lock(object); |
4562 | vm_object_activity_end(object); |
4563 | } |
4564 | |
4565 | object_bypasses++; |
4566 | } |
4567 | |
4568 | |
4569 | /* |
4570 | * vm_object_collapse: |
4571 | * |
4572 | * Perform an object collapse or an object bypass if appropriate. |
4573 | * The real work of collapsing and bypassing is performed in |
4574 | * the routines vm_object_do_collapse and vm_object_do_bypass. |
4575 | * |
4576 | * Requires that the object be locked and the page queues be unlocked. |
4577 | * |
4578 | */ |
4579 | static unsigned long vm_object_collapse_calls = 0; |
4580 | static unsigned long vm_object_collapse_objects = 0; |
4581 | static unsigned long vm_object_collapse_do_collapse = 0; |
4582 | static unsigned long vm_object_collapse_do_bypass = 0; |
4583 | |
4584 | __private_extern__ void |
4585 | vm_object_collapse( |
4586 | vm_object_t object, |
4587 | vm_object_offset_t hint_offset, |
4588 | boolean_t can_bypass) |
4589 | { |
4590 | vm_object_t backing_object; |
4591 | unsigned int rcount; |
4592 | unsigned int size; |
4593 | vm_object_t original_object; |
4594 | int object_lock_type; |
4595 | int backing_object_lock_type; |
4596 | |
4597 | vm_object_collapse_calls++; |
4598 | |
4599 | if (! vm_object_collapse_allowed && |
4600 | ! (can_bypass && vm_object_bypass_allowed)) { |
4601 | return; |
4602 | } |
4603 | |
4604 | XPR(XPR_VM_OBJECT, "vm_object_collapse, obj 0x%X\n" , |
4605 | object, 0,0,0,0); |
4606 | |
4607 | if (object == VM_OBJECT_NULL) |
4608 | return; |
4609 | |
4610 | original_object = object; |
4611 | |
4612 | /* |
4613 | * The top object was locked "exclusive" by the caller. |
4614 | * In the first pass, to determine if we can collapse the shadow chain, |
4615 | * take a "shared" lock on the shadow objects. If we can collapse, |
4616 | * we'll have to go down the chain again with exclusive locks. |
4617 | */ |
4618 | object_lock_type = OBJECT_LOCK_EXCLUSIVE; |
4619 | backing_object_lock_type = OBJECT_LOCK_SHARED; |
4620 | |
4621 | retry: |
4622 | object = original_object; |
4623 | vm_object_lock_assert_exclusive(object); |
4624 | |
4625 | while (TRUE) { |
4626 | vm_object_collapse_objects++; |
4627 | /* |
4628 | * Verify that the conditions are right for either |
4629 | * collapse or bypass: |
4630 | */ |
4631 | |
4632 | /* |
4633 | * There is a backing object, and |
4634 | */ |
4635 | |
4636 | backing_object = object->shadow; |
4637 | if (backing_object == VM_OBJECT_NULL) { |
4638 | if (object != original_object) { |
4639 | vm_object_unlock(object); |
4640 | } |
4641 | return; |
4642 | } |
4643 | if (backing_object_lock_type == OBJECT_LOCK_SHARED) { |
4644 | vm_object_lock_shared(backing_object); |
4645 | } else { |
4646 | vm_object_lock(backing_object); |
4647 | } |
4648 | |
4649 | /* |
4650 | * No pages in the object are currently |
4651 | * being paged out, and |
4652 | */ |
4653 | if (object->paging_in_progress != 0 || |
4654 | object->activity_in_progress != 0) { |
4655 | /* try and collapse the rest of the shadow chain */ |
4656 | if (object != original_object) { |
4657 | vm_object_unlock(object); |
4658 | } |
4659 | object = backing_object; |
4660 | object_lock_type = backing_object_lock_type; |
4661 | continue; |
4662 | } |
4663 | |
4664 | /* |
4665 | * ... |
4666 | * The backing object is not read_only, |
4667 | * and no pages in the backing object are |
4668 | * currently being paged out. |
4669 | * The backing object is internal. |
4670 | * |
4671 | */ |
4672 | |
4673 | if (!backing_object->internal || |
4674 | backing_object->paging_in_progress != 0 || |
4675 | backing_object->activity_in_progress != 0) { |
4676 | /* try and collapse the rest of the shadow chain */ |
4677 | if (object != original_object) { |
4678 | vm_object_unlock(object); |
4679 | } |
4680 | object = backing_object; |
4681 | object_lock_type = backing_object_lock_type; |
4682 | continue; |
4683 | } |
4684 | |
4685 | /* |
4686 | * Purgeable objects are not supposed to engage in |
4687 | * copy-on-write activities, so should not have |
4688 | * any shadow objects or be a shadow object to another |
4689 | * object. |
4690 | * Collapsing a purgeable object would require some |
4691 | * updates to the purgeable compressed ledgers. |
4692 | */ |
4693 | if (object->purgable != VM_PURGABLE_DENY || |
4694 | backing_object->purgable != VM_PURGABLE_DENY) { |
4695 | panic("vm_object_collapse() attempting to collapse " |
4696 | "purgeable object: %p(%d) %p(%d)\n" , |
4697 | object, object->purgable, |
4698 | backing_object, backing_object->purgable); |
4699 | /* try and collapse the rest of the shadow chain */ |
4700 | if (object != original_object) { |
4701 | vm_object_unlock(object); |
4702 | } |
4703 | object = backing_object; |
4704 | object_lock_type = backing_object_lock_type; |
4705 | continue; |
4706 | } |
4707 | |
4708 | /* |
4709 | * The backing object can't be a copy-object: |
4710 | * the shadow_offset for the copy-object must stay |
4711 | * as 0. Furthermore (for the 'we have all the |
4712 | * pages' case), if we bypass backing_object and |
4713 | * just shadow the next object in the chain, old |
4714 | * pages from that object would then have to be copied |
4715 | * BOTH into the (former) backing_object and into the |
4716 | * parent object. |
4717 | */ |
4718 | if (backing_object->shadow != VM_OBJECT_NULL && |
4719 | backing_object->shadow->copy == backing_object) { |
4720 | /* try and collapse the rest of the shadow chain */ |
4721 | if (object != original_object) { |
4722 | vm_object_unlock(object); |
4723 | } |
4724 | object = backing_object; |
4725 | object_lock_type = backing_object_lock_type; |
4726 | continue; |
4727 | } |
4728 | |
4729 | /* |
4730 | * We can now try to either collapse the backing |
4731 | * object (if the parent is the only reference to |
4732 | * it) or (perhaps) remove the parent's reference |
4733 | * to it. |
4734 | * |
4735 | * If there is exactly one reference to the backing |
4736 | * object, we may be able to collapse it into the |
4737 | * parent. |
4738 | * |
4739 | * As long as one of the objects is still not known |
4740 | * to the pager, we can collapse them. |
4741 | */ |
4742 | if (backing_object->ref_count == 1 && |
4743 | (vm_object_collapse_compressor_allowed || |
4744 | !object->pager_created |
4745 | || (!backing_object->pager_created) |
4746 | ) && vm_object_collapse_allowed) { |
4747 | |
4748 | /* |
4749 | * We need the exclusive lock on the VM objects. |
4750 | */ |
4751 | if (backing_object_lock_type != OBJECT_LOCK_EXCLUSIVE) { |
4752 | /* |
4753 | * We have an object and its shadow locked |
4754 | * "shared". We can't just upgrade the locks |
4755 | * to "exclusive", as some other thread might |
4756 | * also have these objects locked "shared" and |
4757 | * attempt to upgrade one or the other to |
4758 | * "exclusive". The upgrades would block |
4759 | * forever waiting for the other "shared" locks |
4760 | * to get released. |
4761 | * So we have to release the locks and go |
4762 | * down the shadow chain again (since it could |
4763 | * have changed) with "exclusive" locking. |
4764 | */ |
4765 | vm_object_unlock(backing_object); |
4766 | if (object != original_object) |
4767 | vm_object_unlock(object); |
4768 | object_lock_type = OBJECT_LOCK_EXCLUSIVE; |
4769 | backing_object_lock_type = OBJECT_LOCK_EXCLUSIVE; |
4770 | goto retry; |
4771 | } |
4772 | |
4773 | XPR(XPR_VM_OBJECT, |
4774 | "vm_object_collapse: %x to %x, pager %x, pager_control %x\n" , |
4775 | backing_object, object, |
4776 | backing_object->pager, |
4777 | backing_object->pager_control, 0); |
4778 | |
4779 | /* |
4780 | * Collapse the object with its backing |
4781 | * object, and try again with the object's |
4782 | * new backing object. |
4783 | */ |
4784 | |
4785 | vm_object_do_collapse(object, backing_object); |
4786 | vm_object_collapse_do_collapse++; |
4787 | continue; |
4788 | } |
4789 | |
4790 | /* |
4791 | * Collapsing the backing object was not possible |
4792 | * or permitted, so let's try bypassing it. |
4793 | */ |
4794 | |
4795 | if (! (can_bypass && vm_object_bypass_allowed)) { |
4796 | /* try and collapse the rest of the shadow chain */ |
4797 | if (object != original_object) { |
4798 | vm_object_unlock(object); |
4799 | } |
4800 | object = backing_object; |
4801 | object_lock_type = backing_object_lock_type; |
4802 | continue; |
4803 | } |
4804 | |
4805 | |
4806 | /* |
4807 | * If the object doesn't have all its pages present, |
4808 | * we have to make sure no pages in the backing object |
4809 | * "show through" before bypassing it. |
4810 | */ |
4811 | size = (unsigned int)atop(object->vo_size); |
4812 | rcount = object->resident_page_count; |
4813 | |
4814 | if (rcount != size) { |
4815 | vm_object_offset_t offset; |
4816 | vm_object_offset_t backing_offset; |
4817 | unsigned int backing_rcount; |
4818 | |
4819 | /* |
4820 | * If the backing object has a pager but no pagemap, |
4821 | * then we cannot bypass it, because we don't know |
4822 | * what pages it has. |
4823 | */ |
4824 | if (backing_object->pager_created) { |
4825 | /* try and collapse the rest of the shadow chain */ |
4826 | if (object != original_object) { |
4827 | vm_object_unlock(object); |
4828 | } |
4829 | object = backing_object; |
4830 | object_lock_type = backing_object_lock_type; |
4831 | continue; |
4832 | } |
4833 | |
4834 | /* |
4835 | * If the object has a pager but no pagemap, |
4836 | * then we cannot bypass it, because we don't know |
4837 | * what pages it has. |
4838 | */ |
4839 | if (object->pager_created) { |
4840 | /* try and collapse the rest of the shadow chain */ |
4841 | if (object != original_object) { |
4842 | vm_object_unlock(object); |
4843 | } |
4844 | object = backing_object; |
4845 | object_lock_type = backing_object_lock_type; |
4846 | continue; |
4847 | } |
4848 | |
4849 | backing_offset = object->vo_shadow_offset; |
4850 | backing_rcount = backing_object->resident_page_count; |
4851 | |
4852 | if ( (int)backing_rcount - (int)(atop(backing_object->vo_size) - size) > (int)rcount) { |
4853 | /* |
4854 | * we have enough pages in the backing object to guarantee that |
4855 | * at least 1 of them must be 'uncovered' by a resident page |
4856 | * in the object we're evaluating, so move on and |
4857 | * try to collapse the rest of the shadow chain |
4858 | */ |
4859 | if (object != original_object) { |
4860 | vm_object_unlock(object); |
4861 | } |
4862 | object = backing_object; |
4863 | object_lock_type = backing_object_lock_type; |
4864 | continue; |
4865 | } |
4866 | |
4867 | /* |
4868 | * If all of the pages in the backing object are |
4869 | * shadowed by the parent object, the parent |
4870 | * object no longer has to shadow the backing |
4871 | * object; it can shadow the next one in the |
4872 | * chain. |
4873 | * |
4874 | * If the backing object has existence info, |
4875 | * we must check examine its existence info |
4876 | * as well. |
4877 | * |
4878 | */ |
4879 | |
4880 | #define EXISTS_IN_OBJECT(obj, off, rc) \ |
4881 | ((VM_COMPRESSOR_PAGER_STATE_GET((obj), (off)) \ |
4882 | == VM_EXTERNAL_STATE_EXISTS) || \ |
4883 | ((rc) && vm_page_lookup((obj), (off)) != VM_PAGE_NULL && (rc)--)) |
4884 | |
4885 | /* |
4886 | * Check the hint location first |
4887 | * (since it is often the quickest way out of here). |
4888 | */ |
4889 | if (object->cow_hint != ~(vm_offset_t)0) |
4890 | hint_offset = (vm_object_offset_t)object->cow_hint; |
4891 | else |
4892 | hint_offset = (hint_offset > 8 * PAGE_SIZE_64) ? |
4893 | (hint_offset - 8 * PAGE_SIZE_64) : 0; |
4894 | |
4895 | if (EXISTS_IN_OBJECT(backing_object, hint_offset + |
4896 | backing_offset, backing_rcount) && |
4897 | !EXISTS_IN_OBJECT(object, hint_offset, rcount)) { |
4898 | /* dependency right at the hint */ |
4899 | object->cow_hint = (vm_offset_t) hint_offset; /* atomic */ |
4900 | /* try and collapse the rest of the shadow chain */ |
4901 | if (object != original_object) { |
4902 | vm_object_unlock(object); |
4903 | } |
4904 | object = backing_object; |
4905 | object_lock_type = backing_object_lock_type; |
4906 | continue; |
4907 | } |
4908 | |
4909 | /* |
4910 | * If the object's window onto the backing_object |
4911 | * is large compared to the number of resident |
4912 | * pages in the backing object, it makes sense to |
4913 | * walk the backing_object's resident pages first. |
4914 | * |
4915 | * NOTE: Pages may be in both the existence map and/or |
4916 | * resident, so if we don't find a dependency while |
4917 | * walking the backing object's resident page list |
4918 | * directly, and there is an existence map, we'll have |
4919 | * to run the offset based 2nd pass. Because we may |
4920 | * have to run both passes, we need to be careful |
4921 | * not to decrement 'rcount' in the 1st pass |
4922 | */ |
4923 | if (backing_rcount && backing_rcount < (size / 8)) { |
4924 | unsigned int rc = rcount; |
4925 | vm_page_t p; |
4926 | |
4927 | backing_rcount = backing_object->resident_page_count; |
4928 | p = (vm_page_t)vm_page_queue_first(&backing_object->memq); |
4929 | do { |
4930 | offset = (p->vmp_offset - backing_offset); |
4931 | |
4932 | if (offset < object->vo_size && |
4933 | offset != hint_offset && |
4934 | !EXISTS_IN_OBJECT(object, offset, rc)) { |
4935 | /* found a dependency */ |
4936 | object->cow_hint = (vm_offset_t) offset; /* atomic */ |
4937 | |
4938 | break; |
4939 | } |
4940 | p = (vm_page_t) vm_page_queue_next(&p->vmp_listq); |
4941 | |
4942 | } while (--backing_rcount); |
4943 | if (backing_rcount != 0 ) { |
4944 | /* try and collapse the rest of the shadow chain */ |
4945 | if (object != original_object) { |
4946 | vm_object_unlock(object); |
4947 | } |
4948 | object = backing_object; |
4949 | object_lock_type = backing_object_lock_type; |
4950 | continue; |
4951 | } |
4952 | } |
4953 | |
4954 | /* |
4955 | * Walk through the offsets looking for pages in the |
4956 | * backing object that show through to the object. |
4957 | */ |
4958 | if (backing_rcount) { |
4959 | offset = hint_offset; |
4960 | |
4961 | while((offset = |
4962 | (offset + PAGE_SIZE_64 < object->vo_size) ? |
4963 | (offset + PAGE_SIZE_64) : 0) != hint_offset) { |
4964 | |
4965 | if (EXISTS_IN_OBJECT(backing_object, offset + |
4966 | backing_offset, backing_rcount) && |
4967 | !EXISTS_IN_OBJECT(object, offset, rcount)) { |
4968 | /* found a dependency */ |
4969 | object->cow_hint = (vm_offset_t) offset; /* atomic */ |
4970 | break; |
4971 | } |
4972 | } |
4973 | if (offset != hint_offset) { |
4974 | /* try and collapse the rest of the shadow chain */ |
4975 | if (object != original_object) { |
4976 | vm_object_unlock(object); |
4977 | } |
4978 | object = backing_object; |
4979 | object_lock_type = backing_object_lock_type; |
4980 | continue; |
4981 | } |
4982 | } |
4983 | } |
4984 | |
4985 | /* |
4986 | * We need "exclusive" locks on the 2 VM objects. |
4987 | */ |
4988 | if (backing_object_lock_type != OBJECT_LOCK_EXCLUSIVE) { |
4989 | vm_object_unlock(backing_object); |
4990 | if (object != original_object) |
4991 | vm_object_unlock(object); |
4992 | object_lock_type = OBJECT_LOCK_EXCLUSIVE; |
4993 | backing_object_lock_type = OBJECT_LOCK_EXCLUSIVE; |
4994 | goto retry; |
4995 | } |
4996 | |
4997 | /* reset the offset hint for any objects deeper in the chain */ |
4998 | object->cow_hint = (vm_offset_t)0; |
4999 | |
5000 | /* |
5001 | * All interesting pages in the backing object |
5002 | * already live in the parent or its pager. |
5003 | * Thus we can bypass the backing object. |
5004 | */ |
5005 | |
5006 | vm_object_do_bypass(object, backing_object); |
5007 | vm_object_collapse_do_bypass++; |
5008 | |
5009 | /* |
5010 | * Try again with this object's new backing object. |
5011 | */ |
5012 | |
5013 | continue; |
5014 | } |
5015 | |
5016 | /* NOT REACHED */ |
5017 | /* |
5018 | if (object != original_object) { |
5019 | vm_object_unlock(object); |
5020 | } |
5021 | */ |
5022 | } |
5023 | |
5024 | /* |
5025 | * Routine: vm_object_page_remove: [internal] |
5026 | * Purpose: |
5027 | * Removes all physical pages in the specified |
5028 | * object range from the object's list of pages. |
5029 | * |
5030 | * In/out conditions: |
5031 | * The object must be locked. |
5032 | * The object must not have paging_in_progress, usually |
5033 | * guaranteed by not having a pager. |
5034 | */ |
5035 | unsigned int vm_object_page_remove_lookup = 0; |
5036 | unsigned int vm_object_page_remove_iterate = 0; |
5037 | |
5038 | __private_extern__ void |
5039 | vm_object_page_remove( |
5040 | vm_object_t object, |
5041 | vm_object_offset_t start, |
5042 | vm_object_offset_t end) |
5043 | { |
5044 | vm_page_t p, next; |
5045 | |
5046 | /* |
5047 | * One and two page removals are most popular. |
5048 | * The factor of 16 here is somewhat arbitrary. |
5049 | * It balances vm_object_lookup vs iteration. |
5050 | */ |
5051 | |
5052 | if (atop_64(end - start) < (unsigned)object->resident_page_count/16) { |
5053 | vm_object_page_remove_lookup++; |
5054 | |
5055 | for (; start < end; start += PAGE_SIZE_64) { |
5056 | p = vm_page_lookup(object, start); |
5057 | if (p != VM_PAGE_NULL) { |
5058 | assert(!p->vmp_cleaning && !p->vmp_laundry); |
5059 | if (!p->vmp_fictitious && p->vmp_pmapped) |
5060 | pmap_disconnect(VM_PAGE_GET_PHYS_PAGE(p)); |
5061 | VM_PAGE_FREE(p); |
5062 | } |
5063 | } |
5064 | } else { |
5065 | vm_object_page_remove_iterate++; |
5066 | |
5067 | p = (vm_page_t) vm_page_queue_first(&object->memq); |
5068 | while (!vm_page_queue_end(&object->memq, (vm_page_queue_entry_t) p)) { |
5069 | next = (vm_page_t) vm_page_queue_next(&p->vmp_listq); |
5070 | if ((start <= p->vmp_offset) && (p->vmp_offset < end)) { |
5071 | assert(!p->vmp_cleaning && !p->vmp_laundry); |
5072 | if (!p->vmp_fictitious && p->vmp_pmapped) |
5073 | pmap_disconnect(VM_PAGE_GET_PHYS_PAGE(p)); |
5074 | VM_PAGE_FREE(p); |
5075 | } |
5076 | p = next; |
5077 | } |
5078 | } |
5079 | } |
5080 | |
5081 | |
5082 | /* |
5083 | * Routine: vm_object_coalesce |
5084 | * Function: Coalesces two objects backing up adjoining |
5085 | * regions of memory into a single object. |
5086 | * |
5087 | * returns TRUE if objects were combined. |
5088 | * |
5089 | * NOTE: Only works at the moment if the second object is NULL - |
5090 | * if it's not, which object do we lock first? |
5091 | * |
5092 | * Parameters: |
5093 | * prev_object First object to coalesce |
5094 | * prev_offset Offset into prev_object |
5095 | * next_object Second object into coalesce |
5096 | * next_offset Offset into next_object |
5097 | * |
5098 | * prev_size Size of reference to prev_object |
5099 | * next_size Size of reference to next_object |
5100 | * |
5101 | * Conditions: |
5102 | * The object(s) must *not* be locked. The map must be locked |
5103 | * to preserve the reference to the object(s). |
5104 | */ |
5105 | static int vm_object_coalesce_count = 0; |
5106 | |
5107 | __private_extern__ boolean_t |
5108 | vm_object_coalesce( |
5109 | vm_object_t prev_object, |
5110 | vm_object_t next_object, |
5111 | vm_object_offset_t prev_offset, |
5112 | __unused vm_object_offset_t next_offset, |
5113 | vm_object_size_t prev_size, |
5114 | vm_object_size_t next_size) |
5115 | { |
5116 | vm_object_size_t newsize; |
5117 | |
5118 | #ifdef lint |
5119 | next_offset++; |
5120 | #endif /* lint */ |
5121 | |
5122 | if (next_object != VM_OBJECT_NULL) { |
5123 | return(FALSE); |
5124 | } |
5125 | |
5126 | if (prev_object == VM_OBJECT_NULL) { |
5127 | return(TRUE); |
5128 | } |
5129 | |
5130 | XPR(XPR_VM_OBJECT, |
5131 | "vm_object_coalesce: 0x%X prev_off 0x%X prev_size 0x%X next_size 0x%X\n" , |
5132 | prev_object, prev_offset, prev_size, next_size, 0); |
5133 | |
5134 | vm_object_lock(prev_object); |
5135 | |
5136 | /* |
5137 | * Try to collapse the object first |
5138 | */ |
5139 | vm_object_collapse(prev_object, prev_offset, TRUE); |
5140 | |
5141 | /* |
5142 | * Can't coalesce if pages not mapped to |
5143 | * prev_entry may be in use any way: |
5144 | * . more than one reference |
5145 | * . paged out |
5146 | * . shadows another object |
5147 | * . has a copy elsewhere |
5148 | * . is purgeable |
5149 | * . paging references (pages might be in page-list) |
5150 | */ |
5151 | |
5152 | if ((prev_object->ref_count > 1) || |
5153 | prev_object->pager_created || |
5154 | (prev_object->shadow != VM_OBJECT_NULL) || |
5155 | (prev_object->copy != VM_OBJECT_NULL) || |
5156 | (prev_object->true_share != FALSE) || |
5157 | (prev_object->purgable != VM_PURGABLE_DENY) || |
5158 | (prev_object->paging_in_progress != 0) || |
5159 | (prev_object->activity_in_progress != 0)) { |
5160 | vm_object_unlock(prev_object); |
5161 | return(FALSE); |
5162 | } |
5163 | |
5164 | vm_object_coalesce_count++; |
5165 | |
5166 | /* |
5167 | * Remove any pages that may still be in the object from |
5168 | * a previous deallocation. |
5169 | */ |
5170 | vm_object_page_remove(prev_object, |
5171 | prev_offset + prev_size, |
5172 | prev_offset + prev_size + next_size); |
5173 | |
5174 | /* |
5175 | * Extend the object if necessary. |
5176 | */ |
5177 | newsize = prev_offset + prev_size + next_size; |
5178 | if (newsize > prev_object->vo_size) { |
5179 | prev_object->vo_size = newsize; |
5180 | } |
5181 | |
5182 | vm_object_unlock(prev_object); |
5183 | return(TRUE); |
5184 | } |
5185 | |
5186 | kern_return_t |
5187 | vm_object_populate_with_private( |
5188 | vm_object_t object, |
5189 | vm_object_offset_t offset, |
5190 | ppnum_t phys_page, |
5191 | vm_size_t size) |
5192 | { |
5193 | ppnum_t base_page; |
5194 | vm_object_offset_t base_offset; |
5195 | |
5196 | |
5197 | if (!object->private) |
5198 | return KERN_FAILURE; |
5199 | |
5200 | base_page = phys_page; |
5201 | |
5202 | vm_object_lock(object); |
5203 | |
5204 | if (!object->phys_contiguous) { |
5205 | vm_page_t m; |
5206 | |
5207 | if ((base_offset = trunc_page_64(offset)) != offset) { |
5208 | vm_object_unlock(object); |
5209 | return KERN_FAILURE; |
5210 | } |
5211 | base_offset += object->paging_offset; |
5212 | |
5213 | while (size) { |
5214 | m = vm_page_lookup(object, base_offset); |
5215 | |
5216 | if (m != VM_PAGE_NULL) { |
5217 | if (m->vmp_fictitious) { |
5218 | if (VM_PAGE_GET_PHYS_PAGE(m) != vm_page_guard_addr) { |
5219 | |
5220 | vm_page_lockspin_queues(); |
5221 | m->vmp_private = TRUE; |
5222 | vm_page_unlock_queues(); |
5223 | |
5224 | m->vmp_fictitious = FALSE; |
5225 | VM_PAGE_SET_PHYS_PAGE(m, base_page); |
5226 | } |
5227 | } else if (VM_PAGE_GET_PHYS_PAGE(m) != base_page) { |
5228 | |
5229 | if ( !m->vmp_private) { |
5230 | /* |
5231 | * we'd leak a real page... that can't be right |
5232 | */ |
5233 | panic("vm_object_populate_with_private - %p not private" , m); |
5234 | } |
5235 | if (m->vmp_pmapped) { |
5236 | /* |
5237 | * pmap call to clear old mapping |
5238 | */ |
5239 | pmap_disconnect(VM_PAGE_GET_PHYS_PAGE(m)); |
5240 | } |
5241 | VM_PAGE_SET_PHYS_PAGE(m, base_page); |
5242 | } |
5243 | |
5244 | } else { |
5245 | while ((m = vm_page_grab_fictitious()) == VM_PAGE_NULL) |
5246 | vm_page_more_fictitious(); |
5247 | |
5248 | /* |
5249 | * private normally requires lock_queues but since we |
5250 | * are initializing the page, its not necessary here |
5251 | */ |
5252 | m->vmp_private = TRUE; |
5253 | m->vmp_fictitious = FALSE; |
5254 | VM_PAGE_SET_PHYS_PAGE(m, base_page); |
5255 | m->vmp_unusual = TRUE; |
5256 | m->vmp_busy = FALSE; |
5257 | |
5258 | vm_page_insert(m, object, base_offset); |
5259 | } |
5260 | base_page++; /* Go to the next physical page */ |
5261 | base_offset += PAGE_SIZE; |
5262 | size -= PAGE_SIZE; |
5263 | } |
5264 | } else { |
5265 | /* NOTE: we should check the original settings here */ |
5266 | /* if we have a size > zero a pmap call should be made */ |
5267 | /* to disable the range */ |
5268 | |
5269 | /* pmap_? */ |
5270 | |
5271 | /* shadows on contiguous memory are not allowed */ |
5272 | /* we therefore can use the offset field */ |
5273 | object->vo_shadow_offset = (vm_object_offset_t)phys_page << PAGE_SHIFT; |
5274 | object->vo_size = size; |
5275 | } |
5276 | vm_object_unlock(object); |
5277 | |
5278 | return KERN_SUCCESS; |
5279 | } |
5280 | |
5281 | |
5282 | kern_return_t |
5283 | memory_object_create_named( |
5284 | memory_object_t , |
5285 | memory_object_offset_t size, |
5286 | memory_object_control_t *control) |
5287 | { |
5288 | vm_object_t object; |
5289 | |
5290 | *control = MEMORY_OBJECT_CONTROL_NULL; |
5291 | if (pager == MEMORY_OBJECT_NULL) |
5292 | return KERN_INVALID_ARGUMENT; |
5293 | |
5294 | object = vm_object_memory_object_associate(pager, |
5295 | VM_OBJECT_NULL, |
5296 | size, |
5297 | TRUE); |
5298 | if (object == VM_OBJECT_NULL) { |
5299 | return KERN_INVALID_OBJECT; |
5300 | } |
5301 | |
5302 | /* wait for object (if any) to be ready */ |
5303 | if (object != VM_OBJECT_NULL) { |
5304 | vm_object_lock(object); |
5305 | object->named = TRUE; |
5306 | while (!object->pager_ready) { |
5307 | vm_object_sleep(object, |
5308 | VM_OBJECT_EVENT_PAGER_READY, |
5309 | THREAD_UNINT); |
5310 | } |
5311 | *control = object->pager_control; |
5312 | vm_object_unlock(object); |
5313 | } |
5314 | return (KERN_SUCCESS); |
5315 | } |
5316 | |
5317 | |
5318 | /* |
5319 | * Routine: memory_object_recover_named [user interface] |
5320 | * Purpose: |
5321 | * Attempt to recover a named reference for a VM object. |
5322 | * VM will verify that the object has not already started |
5323 | * down the termination path, and if it has, will optionally |
5324 | * wait for that to finish. |
5325 | * Returns: |
5326 | * KERN_SUCCESS - we recovered a named reference on the object |
5327 | * KERN_FAILURE - we could not recover a reference (object dead) |
5328 | * KERN_INVALID_ARGUMENT - bad memory object control |
5329 | */ |
5330 | kern_return_t |
5331 | memory_object_recover_named( |
5332 | memory_object_control_t control, |
5333 | boolean_t wait_on_terminating) |
5334 | { |
5335 | vm_object_t object; |
5336 | |
5337 | object = memory_object_control_to_vm_object(control); |
5338 | if (object == VM_OBJECT_NULL) { |
5339 | return (KERN_INVALID_ARGUMENT); |
5340 | } |
5341 | restart: |
5342 | vm_object_lock(object); |
5343 | |
5344 | if (object->terminating && wait_on_terminating) { |
5345 | vm_object_wait(object, |
5346 | VM_OBJECT_EVENT_PAGING_IN_PROGRESS, |
5347 | THREAD_UNINT); |
5348 | goto restart; |
5349 | } |
5350 | |
5351 | if (!object->alive) { |
5352 | vm_object_unlock(object); |
5353 | return KERN_FAILURE; |
5354 | } |
5355 | |
5356 | if (object->named == TRUE) { |
5357 | vm_object_unlock(object); |
5358 | return KERN_SUCCESS; |
5359 | } |
5360 | object->named = TRUE; |
5361 | vm_object_lock_assert_exclusive(object); |
5362 | object->ref_count++; |
5363 | vm_object_res_reference(object); |
5364 | while (!object->pager_ready) { |
5365 | vm_object_sleep(object, |
5366 | VM_OBJECT_EVENT_PAGER_READY, |
5367 | THREAD_UNINT); |
5368 | } |
5369 | vm_object_unlock(object); |
5370 | return (KERN_SUCCESS); |
5371 | } |
5372 | |
5373 | |
5374 | /* |
5375 | * vm_object_release_name: |
5376 | * |
5377 | * Enforces name semantic on memory_object reference count decrement |
5378 | * This routine should not be called unless the caller holds a name |
5379 | * reference gained through the memory_object_create_named. |
5380 | * |
5381 | * If the TERMINATE_IDLE flag is set, the call will return if the |
5382 | * reference count is not 1. i.e. idle with the only remaining reference |
5383 | * being the name. |
5384 | * If the decision is made to proceed the name field flag is set to |
5385 | * false and the reference count is decremented. If the RESPECT_CACHE |
5386 | * flag is set and the reference count has gone to zero, the |
5387 | * memory_object is checked to see if it is cacheable otherwise when |
5388 | * the reference count is zero, it is simply terminated. |
5389 | */ |
5390 | |
5391 | __private_extern__ kern_return_t |
5392 | vm_object_release_name( |
5393 | vm_object_t object, |
5394 | int flags) |
5395 | { |
5396 | vm_object_t shadow; |
5397 | boolean_t original_object = TRUE; |
5398 | |
5399 | while (object != VM_OBJECT_NULL) { |
5400 | |
5401 | vm_object_lock(object); |
5402 | |
5403 | assert(object->alive); |
5404 | if (original_object) |
5405 | assert(object->named); |
5406 | assert(object->ref_count > 0); |
5407 | |
5408 | /* |
5409 | * We have to wait for initialization before |
5410 | * destroying or caching the object. |
5411 | */ |
5412 | |
5413 | if (object->pager_created && !object->pager_initialized) { |
5414 | assert(!object->can_persist); |
5415 | vm_object_assert_wait(object, |
5416 | VM_OBJECT_EVENT_INITIALIZED, |
5417 | THREAD_UNINT); |
5418 | vm_object_unlock(object); |
5419 | thread_block(THREAD_CONTINUE_NULL); |
5420 | continue; |
5421 | } |
5422 | |
5423 | if (((object->ref_count > 1) |
5424 | && (flags & MEMORY_OBJECT_TERMINATE_IDLE)) |
5425 | || (object->terminating)) { |
5426 | vm_object_unlock(object); |
5427 | return KERN_FAILURE; |
5428 | } else { |
5429 | if (flags & MEMORY_OBJECT_RELEASE_NO_OP) { |
5430 | vm_object_unlock(object); |
5431 | return KERN_SUCCESS; |
5432 | } |
5433 | } |
5434 | |
5435 | if ((flags & MEMORY_OBJECT_RESPECT_CACHE) && |
5436 | (object->ref_count == 1)) { |
5437 | if (original_object) |
5438 | object->named = FALSE; |
5439 | vm_object_unlock(object); |
5440 | /* let vm_object_deallocate push this thing into */ |
5441 | /* the cache, if that it is where it is bound */ |
5442 | vm_object_deallocate(object); |
5443 | return KERN_SUCCESS; |
5444 | } |
5445 | VM_OBJ_RES_DECR(object); |
5446 | shadow = object->pageout?VM_OBJECT_NULL:object->shadow; |
5447 | |
5448 | if (object->ref_count == 1) { |
5449 | if (vm_object_terminate(object) != KERN_SUCCESS) { |
5450 | if (original_object) { |
5451 | return KERN_FAILURE; |
5452 | } else { |
5453 | return KERN_SUCCESS; |
5454 | } |
5455 | } |
5456 | if (shadow != VM_OBJECT_NULL) { |
5457 | original_object = FALSE; |
5458 | object = shadow; |
5459 | continue; |
5460 | } |
5461 | return KERN_SUCCESS; |
5462 | } else { |
5463 | vm_object_lock_assert_exclusive(object); |
5464 | object->ref_count--; |
5465 | assert(object->ref_count > 0); |
5466 | if(original_object) |
5467 | object->named = FALSE; |
5468 | vm_object_unlock(object); |
5469 | return KERN_SUCCESS; |
5470 | } |
5471 | } |
5472 | /*NOTREACHED*/ |
5473 | assert(0); |
5474 | return KERN_FAILURE; |
5475 | } |
5476 | |
5477 | |
5478 | __private_extern__ kern_return_t |
5479 | vm_object_lock_request( |
5480 | vm_object_t object, |
5481 | vm_object_offset_t offset, |
5482 | vm_object_size_t size, |
5483 | memory_object_return_t should_return, |
5484 | int flags, |
5485 | vm_prot_t prot) |
5486 | { |
5487 | __unused boolean_t should_flush; |
5488 | |
5489 | should_flush = flags & MEMORY_OBJECT_DATA_FLUSH; |
5490 | |
5491 | XPR(XPR_MEMORY_OBJECT, |
5492 | "vm_o_lock_request, obj 0x%X off 0x%X size 0x%X flags %X prot %X\n" , |
5493 | object, offset, size, |
5494 | (((should_return&1)<<1)|should_flush), prot); |
5495 | |
5496 | /* |
5497 | * Check for bogus arguments. |
5498 | */ |
5499 | if (object == VM_OBJECT_NULL) |
5500 | return (KERN_INVALID_ARGUMENT); |
5501 | |
5502 | if ((prot & ~VM_PROT_ALL) != 0 && prot != VM_PROT_NO_CHANGE) |
5503 | return (KERN_INVALID_ARGUMENT); |
5504 | |
5505 | size = round_page_64(size); |
5506 | |
5507 | /* |
5508 | * Lock the object, and acquire a paging reference to |
5509 | * prevent the memory_object reference from being released. |
5510 | */ |
5511 | vm_object_lock(object); |
5512 | vm_object_paging_begin(object); |
5513 | |
5514 | (void)vm_object_update(object, |
5515 | offset, size, NULL, NULL, should_return, flags, prot); |
5516 | |
5517 | vm_object_paging_end(object); |
5518 | vm_object_unlock(object); |
5519 | |
5520 | return (KERN_SUCCESS); |
5521 | } |
5522 | |
5523 | /* |
5524 | * Empty a purgeable object by grabbing the physical pages assigned to it and |
5525 | * putting them on the free queue without writing them to backing store, etc. |
5526 | * When the pages are next touched they will be demand zero-fill pages. We |
5527 | * skip pages which are busy, being paged in/out, wired, etc. We do _not_ |
5528 | * skip referenced/dirty pages, pages on the active queue, etc. We're more |
5529 | * than happy to grab these since this is a purgeable object. We mark the |
5530 | * object as "empty" after reaping its pages. |
5531 | * |
5532 | * On entry the object must be locked and it must be |
5533 | * purgeable with no delayed copies pending. |
5534 | */ |
5535 | uint64_t |
5536 | vm_object_purge(vm_object_t object, int flags) |
5537 | { |
5538 | unsigned int object_page_count = 0, pgcount = 0; |
5539 | uint64_t total_purged_pgcount = 0; |
5540 | boolean_t skipped_object = FALSE; |
5541 | |
5542 | vm_object_lock_assert_exclusive(object); |
5543 | |
5544 | if (object->purgable == VM_PURGABLE_DENY) |
5545 | return 0; |
5546 | |
5547 | assert(object->copy == VM_OBJECT_NULL); |
5548 | assert(object->copy_strategy == MEMORY_OBJECT_COPY_NONE); |
5549 | |
5550 | /* |
5551 | * We need to set the object's state to VM_PURGABLE_EMPTY *before* |
5552 | * reaping its pages. We update vm_page_purgeable_count in bulk |
5553 | * and we don't want vm_page_remove() to update it again for each |
5554 | * page we reap later. |
5555 | * |
5556 | * For the purgeable ledgers, pages from VOLATILE and EMPTY objects |
5557 | * are all accounted for in the "volatile" ledgers, so this does not |
5558 | * make any difference. |
5559 | * If we transitioned directly from NONVOLATILE to EMPTY, |
5560 | * vm_page_purgeable_count must have been updated when the object |
5561 | * was dequeued from its volatile queue and the purgeable ledgers |
5562 | * must have also been updated accordingly at that time (in |
5563 | * vm_object_purgable_control()). |
5564 | */ |
5565 | if (object->purgable == VM_PURGABLE_VOLATILE) { |
5566 | unsigned int delta; |
5567 | assert(object->resident_page_count >= |
5568 | object->wired_page_count); |
5569 | delta = (object->resident_page_count - |
5570 | object->wired_page_count); |
5571 | if (delta != 0) { |
5572 | assert(vm_page_purgeable_count >= |
5573 | delta); |
5574 | OSAddAtomic(-delta, |
5575 | (SInt32 *)&vm_page_purgeable_count); |
5576 | } |
5577 | if (object->wired_page_count != 0) { |
5578 | assert(vm_page_purgeable_wired_count >= |
5579 | object->wired_page_count); |
5580 | OSAddAtomic(-object->wired_page_count, |
5581 | (SInt32 *)&vm_page_purgeable_wired_count); |
5582 | } |
5583 | object->purgable = VM_PURGABLE_EMPTY; |
5584 | } |
5585 | assert(object->purgable == VM_PURGABLE_EMPTY); |
5586 | |
5587 | object_page_count = object->resident_page_count; |
5588 | |
5589 | vm_object_reap_pages(object, REAP_PURGEABLE); |
5590 | |
5591 | if (object->resident_page_count >= object_page_count) { |
5592 | total_purged_pgcount = 0; |
5593 | } else { |
5594 | total_purged_pgcount = object_page_count - object->resident_page_count; |
5595 | } |
5596 | |
5597 | if (object->pager != NULL) { |
5598 | |
5599 | assert(VM_CONFIG_COMPRESSOR_IS_PRESENT); |
5600 | |
5601 | if (object->activity_in_progress == 0 && |
5602 | object->paging_in_progress == 0) { |
5603 | /* |
5604 | * Also reap any memory coming from this object |
5605 | * in the VM compressor. |
5606 | * |
5607 | * There are no operations in progress on the VM object |
5608 | * and no operation can start while we're holding the |
5609 | * VM object lock, so it's safe to reap the compressed |
5610 | * pages and update the page counts. |
5611 | */ |
5612 | pgcount = vm_compressor_pager_get_count(object->pager); |
5613 | if (pgcount) { |
5614 | pgcount = vm_compressor_pager_reap_pages(object->pager, flags); |
5615 | vm_compressor_pager_count(object->pager, |
5616 | -pgcount, |
5617 | FALSE, /* shared */ |
5618 | object); |
5619 | vm_object_owner_compressed_update(object, |
5620 | -pgcount); |
5621 | } |
5622 | if ( !(flags & C_DONT_BLOCK)) { |
5623 | assert(vm_compressor_pager_get_count(object->pager) |
5624 | == 0); |
5625 | } |
5626 | } else { |
5627 | /* |
5628 | * There's some kind of paging activity in progress |
5629 | * for this object, which could result in a page |
5630 | * being compressed or decompressed, possibly while |
5631 | * the VM object is not locked, so it could race |
5632 | * with us. |
5633 | * |
5634 | * We can't really synchronize this without possibly |
5635 | * causing a deadlock when the compressor needs to |
5636 | * allocate or free memory while compressing or |
5637 | * decompressing a page from a purgeable object |
5638 | * mapped in the kernel_map... |
5639 | * |
5640 | * So let's not attempt to purge the compressor |
5641 | * pager if there's any kind of operation in |
5642 | * progress on the VM object. |
5643 | */ |
5644 | skipped_object = TRUE; |
5645 | } |
5646 | } |
5647 | |
5648 | vm_object_lock_assert_exclusive(object); |
5649 | |
5650 | total_purged_pgcount += pgcount; |
5651 | |
5652 | KERNEL_DEBUG_CONSTANT_IST(KDEBUG_TRACE, (MACHDBG_CODE(DBG_MACH_VM, OBJECT_PURGE_ONE)), |
5653 | VM_KERNEL_UNSLIDE_OR_PERM(object), /* purged object */ |
5654 | object_page_count, |
5655 | total_purged_pgcount, |
5656 | skipped_object, |
5657 | 0); |
5658 | |
5659 | return total_purged_pgcount; |
5660 | } |
5661 | |
5662 | |
5663 | /* |
5664 | * vm_object_purgeable_control() allows the caller to control and investigate the |
5665 | * state of a purgeable object. A purgeable object is created via a call to |
5666 | * vm_allocate() with VM_FLAGS_PURGABLE specified. A purgeable object will |
5667 | * never be coalesced with any other object -- even other purgeable objects -- |
5668 | * and will thus always remain a distinct object. A purgeable object has |
5669 | * special semantics when its reference count is exactly 1. If its reference |
5670 | * count is greater than 1, then a purgeable object will behave like a normal |
5671 | * object and attempts to use this interface will result in an error return |
5672 | * of KERN_INVALID_ARGUMENT. |
5673 | * |
5674 | * A purgeable object may be put into a "volatile" state which will make the |
5675 | * object's pages elligable for being reclaimed without paging to backing |
5676 | * store if the system runs low on memory. If the pages in a volatile |
5677 | * purgeable object are reclaimed, the purgeable object is said to have been |
5678 | * "emptied." When a purgeable object is emptied the system will reclaim as |
5679 | * many pages from the object as it can in a convenient manner (pages already |
5680 | * en route to backing store or busy for other reasons are left as is). When |
5681 | * a purgeable object is made volatile, its pages will generally be reclaimed |
5682 | * before other pages in the application's working set. This semantic is |
5683 | * generally used by applications which can recreate the data in the object |
5684 | * faster than it can be paged in. One such example might be media assets |
5685 | * which can be reread from a much faster RAID volume. |
5686 | * |
5687 | * A purgeable object may be designated as "non-volatile" which means it will |
5688 | * behave like all other objects in the system with pages being written to and |
5689 | * read from backing store as needed to satisfy system memory needs. If the |
5690 | * object was emptied before the object was made non-volatile, that fact will |
5691 | * be returned as the old state of the purgeable object (see |
5692 | * VM_PURGABLE_SET_STATE below). In this case, any pages of the object which |
5693 | * were reclaimed as part of emptying the object will be refaulted in as |
5694 | * zero-fill on demand. It is up to the application to note that an object |
5695 | * was emptied and recreate the objects contents if necessary. When a |
5696 | * purgeable object is made non-volatile, its pages will generally not be paged |
5697 | * out to backing store in the immediate future. A purgeable object may also |
5698 | * be manually emptied. |
5699 | * |
5700 | * Finally, the current state (non-volatile, volatile, volatile & empty) of a |
5701 | * volatile purgeable object may be queried at any time. This information may |
5702 | * be used as a control input to let the application know when the system is |
5703 | * experiencing memory pressure and is reclaiming memory. |
5704 | * |
5705 | * The specified address may be any address within the purgeable object. If |
5706 | * the specified address does not represent any object in the target task's |
5707 | * virtual address space, then KERN_INVALID_ADDRESS will be returned. If the |
5708 | * object containing the specified address is not a purgeable object, then |
5709 | * KERN_INVALID_ARGUMENT will be returned. Otherwise, KERN_SUCCESS will be |
5710 | * returned. |
5711 | * |
5712 | * The control parameter may be any one of VM_PURGABLE_SET_STATE or |
5713 | * VM_PURGABLE_GET_STATE. For VM_PURGABLE_SET_STATE, the in/out parameter |
5714 | * state is used to set the new state of the purgeable object and return its |
5715 | * old state. For VM_PURGABLE_GET_STATE, the current state of the purgeable |
5716 | * object is returned in the parameter state. |
5717 | * |
5718 | * The in/out parameter state may be one of VM_PURGABLE_NONVOLATILE, |
5719 | * VM_PURGABLE_VOLATILE or VM_PURGABLE_EMPTY. These, respectively, represent |
5720 | * the non-volatile, volatile and volatile/empty states described above. |
5721 | * Setting the state of a purgeable object to VM_PURGABLE_EMPTY will |
5722 | * immediately reclaim as many pages in the object as can be conveniently |
5723 | * collected (some may have already been written to backing store or be |
5724 | * otherwise busy). |
5725 | * |
5726 | * The process of making a purgeable object non-volatile and determining its |
5727 | * previous state is atomic. Thus, if a purgeable object is made |
5728 | * VM_PURGABLE_NONVOLATILE and the old state is returned as |
5729 | * VM_PURGABLE_VOLATILE, then the purgeable object's previous contents are |
5730 | * completely intact and will remain so until the object is made volatile |
5731 | * again. If the old state is returned as VM_PURGABLE_EMPTY then the object |
5732 | * was reclaimed while it was in a volatile state and its previous contents |
5733 | * have been lost. |
5734 | */ |
5735 | /* |
5736 | * The object must be locked. |
5737 | */ |
5738 | kern_return_t |
5739 | vm_object_purgable_control( |
5740 | vm_object_t object, |
5741 | vm_purgable_t control, |
5742 | int *state) |
5743 | { |
5744 | int old_state; |
5745 | int new_state; |
5746 | |
5747 | if (object == VM_OBJECT_NULL) { |
5748 | /* |
5749 | * Object must already be present or it can't be purgeable. |
5750 | */ |
5751 | return KERN_INVALID_ARGUMENT; |
5752 | } |
5753 | |
5754 | vm_object_lock_assert_exclusive(object); |
5755 | |
5756 | /* |
5757 | * Get current state of the purgeable object. |
5758 | */ |
5759 | old_state = object->purgable; |
5760 | if (old_state == VM_PURGABLE_DENY) |
5761 | return KERN_INVALID_ARGUMENT; |
5762 | |
5763 | /* purgeable cant have delayed copies - now or in the future */ |
5764 | assert(object->copy == VM_OBJECT_NULL); |
5765 | assert(object->copy_strategy == MEMORY_OBJECT_COPY_NONE); |
5766 | |
5767 | /* |
5768 | * Execute the desired operation. |
5769 | */ |
5770 | if (control == VM_PURGABLE_GET_STATE) { |
5771 | *state = old_state; |
5772 | return KERN_SUCCESS; |
5773 | } |
5774 | |
5775 | if (control == VM_PURGABLE_SET_STATE && |
5776 | object->purgeable_only_by_kernel) { |
5777 | return KERN_PROTECTION_FAILURE; |
5778 | } |
5779 | |
5780 | if (control != VM_PURGABLE_SET_STATE && |
5781 | control != VM_PURGABLE_SET_STATE_FROM_KERNEL) { |
5782 | return KERN_INVALID_ARGUMENT; |
5783 | } |
5784 | |
5785 | if ((*state) & VM_PURGABLE_DEBUG_EMPTY) { |
5786 | object->volatile_empty = TRUE; |
5787 | } |
5788 | if ((*state) & VM_PURGABLE_DEBUG_FAULT) { |
5789 | object->volatile_fault = TRUE; |
5790 | } |
5791 | |
5792 | new_state = *state & VM_PURGABLE_STATE_MASK; |
5793 | if (new_state == VM_PURGABLE_VOLATILE) { |
5794 | if (old_state == VM_PURGABLE_EMPTY) { |
5795 | /* what's been emptied must stay empty */ |
5796 | new_state = VM_PURGABLE_EMPTY; |
5797 | } |
5798 | if (object->volatile_empty) { |
5799 | /* debugging mode: go straight to empty */ |
5800 | new_state = VM_PURGABLE_EMPTY; |
5801 | } |
5802 | } |
5803 | |
5804 | switch (new_state) { |
5805 | case VM_PURGABLE_DENY: |
5806 | /* |
5807 | * Attempting to convert purgeable memory to non-purgeable: |
5808 | * not allowed. |
5809 | */ |
5810 | return KERN_INVALID_ARGUMENT; |
5811 | case VM_PURGABLE_NONVOLATILE: |
5812 | object->purgable = new_state; |
5813 | |
5814 | if (old_state == VM_PURGABLE_VOLATILE) { |
5815 | unsigned int delta; |
5816 | |
5817 | assert(object->resident_page_count >= |
5818 | object->wired_page_count); |
5819 | delta = (object->resident_page_count - |
5820 | object->wired_page_count); |
5821 | |
5822 | assert(vm_page_purgeable_count >= delta); |
5823 | |
5824 | if (delta != 0) { |
5825 | OSAddAtomic(-delta, |
5826 | (SInt32 *)&vm_page_purgeable_count); |
5827 | } |
5828 | if (object->wired_page_count != 0) { |
5829 | assert(vm_page_purgeable_wired_count >= |
5830 | object->wired_page_count); |
5831 | OSAddAtomic(-object->wired_page_count, |
5832 | (SInt32 *)&vm_page_purgeable_wired_count); |
5833 | } |
5834 | |
5835 | vm_page_lock_queues(); |
5836 | |
5837 | /* object should be on a queue */ |
5838 | assert(object->objq.next != NULL && |
5839 | object->objq.prev != NULL); |
5840 | purgeable_q_t queue; |
5841 | |
5842 | /* |
5843 | * Move object from its volatile queue to the |
5844 | * non-volatile queue... |
5845 | */ |
5846 | queue = vm_purgeable_object_remove(object); |
5847 | assert(queue); |
5848 | |
5849 | if (object->purgeable_when_ripe) { |
5850 | vm_purgeable_token_delete_last(queue); |
5851 | } |
5852 | assert(queue->debug_count_objects>=0); |
5853 | |
5854 | vm_page_unlock_queues(); |
5855 | } |
5856 | if (old_state == VM_PURGABLE_VOLATILE || |
5857 | old_state == VM_PURGABLE_EMPTY) { |
5858 | /* |
5859 | * Transfer the object's pages from the volatile to |
5860 | * non-volatile ledgers. |
5861 | */ |
5862 | vm_purgeable_accounting(object, VM_PURGABLE_VOLATILE); |
5863 | } |
5864 | |
5865 | break; |
5866 | |
5867 | case VM_PURGABLE_VOLATILE: |
5868 | if (object->volatile_fault) { |
5869 | vm_page_t p; |
5870 | int refmod; |
5871 | |
5872 | vm_page_queue_iterate(&object->memq, p, vm_page_t, vmp_listq) { |
5873 | if (p->vmp_busy || |
5874 | VM_PAGE_WIRED(p) || |
5875 | p->vmp_fictitious) { |
5876 | continue; |
5877 | } |
5878 | refmod = pmap_disconnect(VM_PAGE_GET_PHYS_PAGE(p)); |
5879 | if ((refmod & VM_MEM_MODIFIED) && |
5880 | !p->vmp_dirty) { |
5881 | SET_PAGE_DIRTY(p, FALSE); |
5882 | } |
5883 | } |
5884 | } |
5885 | |
5886 | assert(old_state != VM_PURGABLE_EMPTY); |
5887 | |
5888 | purgeable_q_t queue; |
5889 | |
5890 | /* find the correct queue */ |
5891 | if ((*state&VM_PURGABLE_ORDERING_MASK) == VM_PURGABLE_ORDERING_OBSOLETE) |
5892 | queue = &purgeable_queues[PURGEABLE_Q_TYPE_OBSOLETE]; |
5893 | else { |
5894 | if ((*state&VM_PURGABLE_BEHAVIOR_MASK) == VM_PURGABLE_BEHAVIOR_FIFO) |
5895 | queue = &purgeable_queues[PURGEABLE_Q_TYPE_FIFO]; |
5896 | else |
5897 | queue = &purgeable_queues[PURGEABLE_Q_TYPE_LIFO]; |
5898 | } |
5899 | |
5900 | if (old_state == VM_PURGABLE_NONVOLATILE || |
5901 | old_state == VM_PURGABLE_EMPTY) { |
5902 | unsigned int delta; |
5903 | |
5904 | if ((*state & VM_PURGABLE_NO_AGING_MASK) == |
5905 | VM_PURGABLE_NO_AGING) { |
5906 | object->purgeable_when_ripe = FALSE; |
5907 | } else { |
5908 | object->purgeable_when_ripe = TRUE; |
5909 | } |
5910 | |
5911 | if (object->purgeable_when_ripe) { |
5912 | kern_return_t result; |
5913 | |
5914 | /* try to add token... this can fail */ |
5915 | vm_page_lock_queues(); |
5916 | |
5917 | result = vm_purgeable_token_add(queue); |
5918 | if (result != KERN_SUCCESS) { |
5919 | vm_page_unlock_queues(); |
5920 | return result; |
5921 | } |
5922 | vm_page_unlock_queues(); |
5923 | } |
5924 | |
5925 | assert(object->resident_page_count >= |
5926 | object->wired_page_count); |
5927 | delta = (object->resident_page_count - |
5928 | object->wired_page_count); |
5929 | |
5930 | if (delta != 0) { |
5931 | OSAddAtomic(delta, |
5932 | &vm_page_purgeable_count); |
5933 | } |
5934 | if (object->wired_page_count != 0) { |
5935 | OSAddAtomic(object->wired_page_count, |
5936 | &vm_page_purgeable_wired_count); |
5937 | } |
5938 | |
5939 | object->purgable = new_state; |
5940 | |
5941 | /* object should be on "non-volatile" queue */ |
5942 | assert(object->objq.next != NULL); |
5943 | assert(object->objq.prev != NULL); |
5944 | } |
5945 | else if (old_state == VM_PURGABLE_VOLATILE) { |
5946 | purgeable_q_t old_queue; |
5947 | boolean_t purgeable_when_ripe; |
5948 | |
5949 | /* |
5950 | * if reassigning priorities / purgeable groups, we don't change the |
5951 | * token queue. So moving priorities will not make pages stay around longer. |
5952 | * Reasoning is that the algorithm gives most priority to the most important |
5953 | * object. If a new token is added, the most important object' priority is boosted. |
5954 | * This biases the system already for purgeable queues that move a lot. |
5955 | * It doesn't seem more biasing is neccessary in this case, where no new object is added. |
5956 | */ |
5957 | assert(object->objq.next != NULL && object->objq.prev != NULL); /* object should be on a queue */ |
5958 | |
5959 | old_queue = vm_purgeable_object_remove(object); |
5960 | assert(old_queue); |
5961 | |
5962 | if ((*state & VM_PURGABLE_NO_AGING_MASK) == |
5963 | VM_PURGABLE_NO_AGING) { |
5964 | purgeable_when_ripe = FALSE; |
5965 | } else { |
5966 | purgeable_when_ripe = TRUE; |
5967 | } |
5968 | |
5969 | if (old_queue != queue || |
5970 | (purgeable_when_ripe != |
5971 | object->purgeable_when_ripe)) { |
5972 | kern_return_t result; |
5973 | |
5974 | /* Changing queue. Have to move token. */ |
5975 | vm_page_lock_queues(); |
5976 | if (object->purgeable_when_ripe) { |
5977 | vm_purgeable_token_delete_last(old_queue); |
5978 | } |
5979 | object->purgeable_when_ripe = purgeable_when_ripe; |
5980 | if (object->purgeable_when_ripe) { |
5981 | result = vm_purgeable_token_add(queue); |
5982 | assert(result==KERN_SUCCESS); /* this should never fail since we just freed a token */ |
5983 | } |
5984 | vm_page_unlock_queues(); |
5985 | |
5986 | } |
5987 | }; |
5988 | vm_purgeable_object_add(object, queue, (*state&VM_VOLATILE_GROUP_MASK)>>VM_VOLATILE_GROUP_SHIFT ); |
5989 | if (old_state == VM_PURGABLE_NONVOLATILE) { |
5990 | vm_purgeable_accounting(object, |
5991 | VM_PURGABLE_NONVOLATILE); |
5992 | } |
5993 | |
5994 | assert(queue->debug_count_objects>=0); |
5995 | |
5996 | break; |
5997 | |
5998 | |
5999 | case VM_PURGABLE_EMPTY: |
6000 | if (object->volatile_fault) { |
6001 | vm_page_t p; |
6002 | int refmod; |
6003 | |
6004 | vm_page_queue_iterate(&object->memq, p, vm_page_t, vmp_listq) { |
6005 | if (p->vmp_busy || |
6006 | VM_PAGE_WIRED(p) || |
6007 | p->vmp_fictitious) { |
6008 | continue; |
6009 | } |
6010 | refmod = pmap_disconnect(VM_PAGE_GET_PHYS_PAGE(p)); |
6011 | if ((refmod & VM_MEM_MODIFIED) && |
6012 | !p->vmp_dirty) { |
6013 | SET_PAGE_DIRTY(p, FALSE); |
6014 | } |
6015 | } |
6016 | } |
6017 | |
6018 | if (old_state == VM_PURGABLE_VOLATILE) { |
6019 | purgeable_q_t old_queue; |
6020 | |
6021 | /* object should be on a queue */ |
6022 | assert(object->objq.next != NULL && |
6023 | object->objq.prev != NULL); |
6024 | |
6025 | old_queue = vm_purgeable_object_remove(object); |
6026 | assert(old_queue); |
6027 | if (object->purgeable_when_ripe) { |
6028 | vm_page_lock_queues(); |
6029 | vm_purgeable_token_delete_first(old_queue); |
6030 | vm_page_unlock_queues(); |
6031 | } |
6032 | } |
6033 | |
6034 | if (old_state == VM_PURGABLE_NONVOLATILE) { |
6035 | /* |
6036 | * This object's pages were previously accounted as |
6037 | * "non-volatile" and now need to be accounted as |
6038 | * "volatile". |
6039 | */ |
6040 | vm_purgeable_accounting(object, |
6041 | VM_PURGABLE_NONVOLATILE); |
6042 | /* |
6043 | * Set to VM_PURGABLE_EMPTY because the pages are no |
6044 | * longer accounted in the "non-volatile" ledger |
6045 | * and are also not accounted for in |
6046 | * "vm_page_purgeable_count". |
6047 | */ |
6048 | object->purgable = VM_PURGABLE_EMPTY; |
6049 | } |
6050 | |
6051 | (void) vm_object_purge(object, 0); |
6052 | assert(object->purgable == VM_PURGABLE_EMPTY); |
6053 | |
6054 | break; |
6055 | } |
6056 | |
6057 | *state = old_state; |
6058 | |
6059 | vm_object_lock_assert_exclusive(object); |
6060 | |
6061 | return KERN_SUCCESS; |
6062 | } |
6063 | |
6064 | kern_return_t |
6065 | vm_object_get_page_counts( |
6066 | vm_object_t object, |
6067 | vm_object_offset_t offset, |
6068 | vm_object_size_t size, |
6069 | unsigned int *resident_page_count, |
6070 | unsigned int *dirty_page_count) |
6071 | { |
6072 | |
6073 | kern_return_t kr = KERN_SUCCESS; |
6074 | boolean_t count_dirty_pages = FALSE; |
6075 | vm_page_t p = VM_PAGE_NULL; |
6076 | unsigned int local_resident_count = 0; |
6077 | unsigned int local_dirty_count = 0; |
6078 | vm_object_offset_t cur_offset = 0; |
6079 | vm_object_offset_t end_offset = 0; |
6080 | |
6081 | if (object == VM_OBJECT_NULL) |
6082 | return KERN_INVALID_ARGUMENT; |
6083 | |
6084 | |
6085 | cur_offset = offset; |
6086 | |
6087 | end_offset = offset + size; |
6088 | |
6089 | vm_object_lock_assert_exclusive(object); |
6090 | |
6091 | if (dirty_page_count != NULL) { |
6092 | |
6093 | count_dirty_pages = TRUE; |
6094 | } |
6095 | |
6096 | if (resident_page_count != NULL && count_dirty_pages == FALSE) { |
6097 | /* |
6098 | * Fast path when: |
6099 | * - we only want the resident page count, and, |
6100 | * - the entire object is exactly covered by the request. |
6101 | */ |
6102 | if (offset == 0 && (object->vo_size == size)) { |
6103 | |
6104 | *resident_page_count = object->resident_page_count; |
6105 | goto out; |
6106 | } |
6107 | } |
6108 | |
6109 | if (object->resident_page_count <= (size >> PAGE_SHIFT)) { |
6110 | |
6111 | vm_page_queue_iterate(&object->memq, p, vm_page_t, vmp_listq) { |
6112 | |
6113 | if (p->vmp_offset >= cur_offset && p->vmp_offset < end_offset) { |
6114 | |
6115 | local_resident_count++; |
6116 | |
6117 | if (count_dirty_pages) { |
6118 | |
6119 | if (p->vmp_dirty || (p->vmp_wpmapped && pmap_is_modified(VM_PAGE_GET_PHYS_PAGE(p)))) { |
6120 | |
6121 | local_dirty_count++; |
6122 | } |
6123 | } |
6124 | } |
6125 | } |
6126 | } else { |
6127 | |
6128 | for (cur_offset = offset; cur_offset < end_offset; cur_offset += PAGE_SIZE_64) { |
6129 | |
6130 | p = vm_page_lookup(object, cur_offset); |
6131 | |
6132 | if (p != VM_PAGE_NULL) { |
6133 | |
6134 | local_resident_count++; |
6135 | |
6136 | if (count_dirty_pages) { |
6137 | |
6138 | if (p->vmp_dirty || (p->vmp_wpmapped && pmap_is_modified(VM_PAGE_GET_PHYS_PAGE(p)))) { |
6139 | |
6140 | local_dirty_count++; |
6141 | } |
6142 | } |
6143 | } |
6144 | } |
6145 | |
6146 | } |
6147 | |
6148 | if (resident_page_count != NULL) { |
6149 | *resident_page_count = local_resident_count; |
6150 | } |
6151 | |
6152 | if (dirty_page_count != NULL) { |
6153 | *dirty_page_count = local_dirty_count; |
6154 | } |
6155 | |
6156 | out: |
6157 | return kr; |
6158 | } |
6159 | |
6160 | |
6161 | #if TASK_SWAPPER |
6162 | /* |
6163 | * vm_object_res_deallocate |
6164 | * |
6165 | * (recursively) decrement residence counts on vm objects and their shadows. |
6166 | * Called from vm_object_deallocate and when swapping out an object. |
6167 | * |
6168 | * The object is locked, and remains locked throughout the function, |
6169 | * even as we iterate down the shadow chain. Locks on intermediate objects |
6170 | * will be dropped, but not the original object. |
6171 | * |
6172 | * NOTE: this function used to use recursion, rather than iteration. |
6173 | */ |
6174 | |
6175 | __private_extern__ void |
6176 | vm_object_res_deallocate( |
6177 | vm_object_t object) |
6178 | { |
6179 | vm_object_t orig_object = object; |
6180 | /* |
6181 | * Object is locked so it can be called directly |
6182 | * from vm_object_deallocate. Original object is never |
6183 | * unlocked. |
6184 | */ |
6185 | assert(object->res_count > 0); |
6186 | while (--object->res_count == 0) { |
6187 | assert(object->ref_count >= object->res_count); |
6188 | vm_object_deactivate_all_pages(object); |
6189 | /* iterate on shadow, if present */ |
6190 | if (object->shadow != VM_OBJECT_NULL) { |
6191 | vm_object_t tmp_object = object->shadow; |
6192 | vm_object_lock(tmp_object); |
6193 | if (object != orig_object) |
6194 | vm_object_unlock(object); |
6195 | object = tmp_object; |
6196 | assert(object->res_count > 0); |
6197 | } else |
6198 | break; |
6199 | } |
6200 | if (object != orig_object) |
6201 | vm_object_unlock(object); |
6202 | } |
6203 | |
6204 | /* |
6205 | * vm_object_res_reference |
6206 | * |
6207 | * Internal function to increment residence count on a vm object |
6208 | * and its shadows. It is called only from vm_object_reference, and |
6209 | * when swapping in a vm object, via vm_map_swap. |
6210 | * |
6211 | * The object is locked, and remains locked throughout the function, |
6212 | * even as we iterate down the shadow chain. Locks on intermediate objects |
6213 | * will be dropped, but not the original object. |
6214 | * |
6215 | * NOTE: this function used to use recursion, rather than iteration. |
6216 | */ |
6217 | |
6218 | __private_extern__ void |
6219 | vm_object_res_reference( |
6220 | vm_object_t object) |
6221 | { |
6222 | vm_object_t orig_object = object; |
6223 | /* |
6224 | * Object is locked, so this can be called directly |
6225 | * from vm_object_reference. This lock is never released. |
6226 | */ |
6227 | while ((++object->res_count == 1) && |
6228 | (object->shadow != VM_OBJECT_NULL)) { |
6229 | vm_object_t tmp_object = object->shadow; |
6230 | |
6231 | assert(object->ref_count >= object->res_count); |
6232 | vm_object_lock(tmp_object); |
6233 | if (object != orig_object) |
6234 | vm_object_unlock(object); |
6235 | object = tmp_object; |
6236 | } |
6237 | if (object != orig_object) |
6238 | vm_object_unlock(object); |
6239 | assert(orig_object->ref_count >= orig_object->res_count); |
6240 | } |
6241 | #endif /* TASK_SWAPPER */ |
6242 | |
6243 | /* |
6244 | * vm_object_reference: |
6245 | * |
6246 | * Gets another reference to the given object. |
6247 | */ |
6248 | #ifdef vm_object_reference |
6249 | #undef vm_object_reference |
6250 | #endif |
6251 | __private_extern__ void |
6252 | vm_object_reference( |
6253 | vm_object_t object) |
6254 | { |
6255 | if (object == VM_OBJECT_NULL) |
6256 | return; |
6257 | |
6258 | vm_object_lock(object); |
6259 | assert(object->ref_count > 0); |
6260 | vm_object_reference_locked(object); |
6261 | vm_object_unlock(object); |
6262 | } |
6263 | |
6264 | /* |
6265 | * vm_object_transpose |
6266 | * |
6267 | * This routine takes two VM objects of the same size and exchanges |
6268 | * their backing store. |
6269 | * The objects should be "quiesced" via a UPL operation with UPL_SET_IO_WIRE |
6270 | * and UPL_BLOCK_ACCESS if they are referenced anywhere. |
6271 | * |
6272 | * The VM objects must not be locked by caller. |
6273 | */ |
6274 | unsigned int vm_object_transpose_count = 0; |
6275 | kern_return_t |
6276 | vm_object_transpose( |
6277 | vm_object_t object1, |
6278 | vm_object_t object2, |
6279 | vm_object_size_t transpose_size) |
6280 | { |
6281 | vm_object_t tmp_object; |
6282 | kern_return_t retval; |
6283 | boolean_t object1_locked, object2_locked; |
6284 | vm_page_t page; |
6285 | vm_object_offset_t page_offset; |
6286 | |
6287 | tmp_object = VM_OBJECT_NULL; |
6288 | object1_locked = FALSE; object2_locked = FALSE; |
6289 | |
6290 | if (object1 == object2 || |
6291 | object1 == VM_OBJECT_NULL || |
6292 | object2 == VM_OBJECT_NULL) { |
6293 | /* |
6294 | * If the 2 VM objects are the same, there's |
6295 | * no point in exchanging their backing store. |
6296 | */ |
6297 | retval = KERN_INVALID_VALUE; |
6298 | goto done; |
6299 | } |
6300 | |
6301 | /* |
6302 | * Since we need to lock both objects at the same time, |
6303 | * make sure we always lock them in the same order to |
6304 | * avoid deadlocks. |
6305 | */ |
6306 | if (object1 > object2) { |
6307 | tmp_object = object1; |
6308 | object1 = object2; |
6309 | object2 = tmp_object; |
6310 | } |
6311 | |
6312 | /* |
6313 | * Allocate a temporary VM object to hold object1's contents |
6314 | * while we copy object2 to object1. |
6315 | */ |
6316 | tmp_object = vm_object_allocate(transpose_size); |
6317 | vm_object_lock(tmp_object); |
6318 | tmp_object->can_persist = FALSE; |
6319 | |
6320 | |
6321 | /* |
6322 | * Grab control of the 1st VM object. |
6323 | */ |
6324 | vm_object_lock(object1); |
6325 | object1_locked = TRUE; |
6326 | if (!object1->alive || object1->terminating || |
6327 | object1->copy || object1->shadow || object1->shadowed || |
6328 | object1->purgable != VM_PURGABLE_DENY) { |
6329 | /* |
6330 | * We don't deal with copy or shadow objects (yet). |
6331 | */ |
6332 | retval = KERN_INVALID_VALUE; |
6333 | goto done; |
6334 | } |
6335 | /* |
6336 | * We're about to mess with the object's backing store and |
6337 | * taking a "paging_in_progress" reference wouldn't be enough |
6338 | * to prevent any paging activity on this object, so the caller should |
6339 | * have "quiesced" the objects beforehand, via a UPL operation with |
6340 | * UPL_SET_IO_WIRE (to make sure all the pages are there and wired) |
6341 | * and UPL_BLOCK_ACCESS (to mark the pages "busy"). |
6342 | * |
6343 | * Wait for any paging operation to complete (but only paging, not |
6344 | * other kind of activities not linked to the pager). After we're |
6345 | * statisfied that there's no more paging in progress, we keep the |
6346 | * object locked, to guarantee that no one tries to access its pager. |
6347 | */ |
6348 | vm_object_paging_only_wait(object1, THREAD_UNINT); |
6349 | |
6350 | /* |
6351 | * Same as above for the 2nd object... |
6352 | */ |
6353 | vm_object_lock(object2); |
6354 | object2_locked = TRUE; |
6355 | if (! object2->alive || object2->terminating || |
6356 | object2->copy || object2->shadow || object2->shadowed || |
6357 | object2->purgable != VM_PURGABLE_DENY) { |
6358 | retval = KERN_INVALID_VALUE; |
6359 | goto done; |
6360 | } |
6361 | vm_object_paging_only_wait(object2, THREAD_UNINT); |
6362 | |
6363 | |
6364 | if (object1->vo_size != object2->vo_size || |
6365 | object1->vo_size != transpose_size) { |
6366 | /* |
6367 | * If the 2 objects don't have the same size, we can't |
6368 | * exchange their backing stores or one would overflow. |
6369 | * If their size doesn't match the caller's |
6370 | * "transpose_size", we can't do it either because the |
6371 | * transpose operation will affect the entire span of |
6372 | * the objects. |
6373 | */ |
6374 | retval = KERN_INVALID_VALUE; |
6375 | goto done; |
6376 | } |
6377 | |
6378 | |
6379 | /* |
6380 | * Transpose the lists of resident pages. |
6381 | * This also updates the resident_page_count and the memq_hint. |
6382 | */ |
6383 | if (object1->phys_contiguous || vm_page_queue_empty(&object1->memq)) { |
6384 | /* |
6385 | * No pages in object1, just transfer pages |
6386 | * from object2 to object1. No need to go through |
6387 | * an intermediate object. |
6388 | */ |
6389 | while (!vm_page_queue_empty(&object2->memq)) { |
6390 | page = (vm_page_t) vm_page_queue_first(&object2->memq); |
6391 | vm_page_rename(page, object1, page->vmp_offset); |
6392 | } |
6393 | assert(vm_page_queue_empty(&object2->memq)); |
6394 | } else if (object2->phys_contiguous || vm_page_queue_empty(&object2->memq)) { |
6395 | /* |
6396 | * No pages in object2, just transfer pages |
6397 | * from object1 to object2. No need to go through |
6398 | * an intermediate object. |
6399 | */ |
6400 | while (!vm_page_queue_empty(&object1->memq)) { |
6401 | page = (vm_page_t) vm_page_queue_first(&object1->memq); |
6402 | vm_page_rename(page, object2, page->vmp_offset); |
6403 | } |
6404 | assert(vm_page_queue_empty(&object1->memq)); |
6405 | } else { |
6406 | /* transfer object1's pages to tmp_object */ |
6407 | while (!vm_page_queue_empty(&object1->memq)) { |
6408 | page = (vm_page_t) vm_page_queue_first(&object1->memq); |
6409 | page_offset = page->vmp_offset; |
6410 | vm_page_remove(page, TRUE); |
6411 | page->vmp_offset = page_offset; |
6412 | vm_page_queue_enter(&tmp_object->memq, page, vm_page_t, vmp_listq); |
6413 | } |
6414 | assert(vm_page_queue_empty(&object1->memq)); |
6415 | /* transfer object2's pages to object1 */ |
6416 | while (!vm_page_queue_empty(&object2->memq)) { |
6417 | page = (vm_page_t) vm_page_queue_first(&object2->memq); |
6418 | vm_page_rename(page, object1, page->vmp_offset); |
6419 | } |
6420 | assert(vm_page_queue_empty(&object2->memq)); |
6421 | /* transfer tmp_object's pages to object2 */ |
6422 | while (!vm_page_queue_empty(&tmp_object->memq)) { |
6423 | page = (vm_page_t) vm_page_queue_first(&tmp_object->memq); |
6424 | vm_page_queue_remove(&tmp_object->memq, page, |
6425 | vm_page_t, vmp_listq); |
6426 | vm_page_insert(page, object2, page->vmp_offset); |
6427 | } |
6428 | assert(vm_page_queue_empty(&tmp_object->memq)); |
6429 | } |
6430 | |
6431 | #define __TRANSPOSE_FIELD(field) \ |
6432 | MACRO_BEGIN \ |
6433 | tmp_object->field = object1->field; \ |
6434 | object1->field = object2->field; \ |
6435 | object2->field = tmp_object->field; \ |
6436 | MACRO_END |
6437 | |
6438 | /* "Lock" refers to the object not its contents */ |
6439 | /* "size" should be identical */ |
6440 | assert(object1->vo_size == object2->vo_size); |
6441 | /* "memq_hint" was updated above when transposing pages */ |
6442 | /* "ref_count" refers to the object not its contents */ |
6443 | assert(object1->ref_count >= 1); |
6444 | assert(object2->ref_count >= 1); |
6445 | #if TASK_SWAPPER |
6446 | /* "res_count" refers to the object not its contents */ |
6447 | #endif |
6448 | /* "resident_page_count" was updated above when transposing pages */ |
6449 | /* "wired_page_count" was updated above when transposing pages */ |
6450 | #if ! VM_TAG_ACTIVE_UPDATE |
6451 | /* "wired_objq" was dealt with along with "wired_page_count" */ |
6452 | #endif /* ! VM_TAG_ACTIVE_UPDATE */ |
6453 | /* "reusable_page_count" was updated above when transposing pages */ |
6454 | /* there should be no "copy" */ |
6455 | assert(!object1->copy); |
6456 | assert(!object2->copy); |
6457 | /* there should be no "shadow" */ |
6458 | assert(!object1->shadow); |
6459 | assert(!object2->shadow); |
6460 | __TRANSPOSE_FIELD(vo_shadow_offset); /* used by phys_contiguous objects */ |
6461 | __TRANSPOSE_FIELD(pager); |
6462 | __TRANSPOSE_FIELD(paging_offset); |
6463 | __TRANSPOSE_FIELD(pager_control); |
6464 | /* update the memory_objects' pointers back to the VM objects */ |
6465 | if (object1->pager_control != MEMORY_OBJECT_CONTROL_NULL) { |
6466 | memory_object_control_collapse(object1->pager_control, |
6467 | object1); |
6468 | } |
6469 | if (object2->pager_control != MEMORY_OBJECT_CONTROL_NULL) { |
6470 | memory_object_control_collapse(object2->pager_control, |
6471 | object2); |
6472 | } |
6473 | __TRANSPOSE_FIELD(copy_strategy); |
6474 | /* "paging_in_progress" refers to the object not its contents */ |
6475 | assert(!object1->paging_in_progress); |
6476 | assert(!object2->paging_in_progress); |
6477 | assert(object1->activity_in_progress); |
6478 | assert(object2->activity_in_progress); |
6479 | /* "all_wanted" refers to the object not its contents */ |
6480 | __TRANSPOSE_FIELD(pager_created); |
6481 | __TRANSPOSE_FIELD(pager_initialized); |
6482 | __TRANSPOSE_FIELD(pager_ready); |
6483 | __TRANSPOSE_FIELD(pager_trusted); |
6484 | __TRANSPOSE_FIELD(can_persist); |
6485 | __TRANSPOSE_FIELD(internal); |
6486 | __TRANSPOSE_FIELD(private); |
6487 | __TRANSPOSE_FIELD(pageout); |
6488 | /* "alive" should be set */ |
6489 | assert(object1->alive); |
6490 | assert(object2->alive); |
6491 | /* "purgeable" should be non-purgeable */ |
6492 | assert(object1->purgable == VM_PURGABLE_DENY); |
6493 | assert(object2->purgable == VM_PURGABLE_DENY); |
6494 | /* "shadowed" refers to the the object not its contents */ |
6495 | __TRANSPOSE_FIELD(purgeable_when_ripe); |
6496 | __TRANSPOSE_FIELD(true_share); |
6497 | /* "terminating" should not be set */ |
6498 | assert(!object1->terminating); |
6499 | assert(!object2->terminating); |
6500 | /* transfer "named" reference if needed */ |
6501 | if (object1->named && !object2->named) { |
6502 | assert(object1->ref_count >= 2); |
6503 | assert(object2->ref_count >= 1); |
6504 | object1->ref_count--; |
6505 | object2->ref_count++; |
6506 | } else if (!object1->named && object2->named) { |
6507 | assert(object1->ref_count >= 1); |
6508 | assert(object2->ref_count >= 2); |
6509 | object1->ref_count++; |
6510 | object2->ref_count--; |
6511 | } |
6512 | __TRANSPOSE_FIELD(named); |
6513 | /* "shadow_severed" refers to the object not its contents */ |
6514 | __TRANSPOSE_FIELD(phys_contiguous); |
6515 | __TRANSPOSE_FIELD(nophyscache); |
6516 | /* "cached_list.next" points to transposed object */ |
6517 | object1->cached_list.next = (queue_entry_t) object2; |
6518 | object2->cached_list.next = (queue_entry_t) object1; |
6519 | /* "cached_list.prev" should be NULL */ |
6520 | assert(object1->cached_list.prev == NULL); |
6521 | assert(object2->cached_list.prev == NULL); |
6522 | __TRANSPOSE_FIELD(last_alloc); |
6523 | __TRANSPOSE_FIELD(sequential); |
6524 | __TRANSPOSE_FIELD(pages_created); |
6525 | __TRANSPOSE_FIELD(pages_used); |
6526 | __TRANSPOSE_FIELD(scan_collisions); |
6527 | __TRANSPOSE_FIELD(cow_hint); |
6528 | __TRANSPOSE_FIELD(wimg_bits); |
6529 | __TRANSPOSE_FIELD(set_cache_attr); |
6530 | __TRANSPOSE_FIELD(code_signed); |
6531 | object1->transposed = TRUE; |
6532 | object2->transposed = TRUE; |
6533 | __TRANSPOSE_FIELD(mapping_in_progress); |
6534 | __TRANSPOSE_FIELD(volatile_empty); |
6535 | __TRANSPOSE_FIELD(volatile_fault); |
6536 | __TRANSPOSE_FIELD(all_reusable); |
6537 | assert(object1->blocked_access); |
6538 | assert(object2->blocked_access); |
6539 | __TRANSPOSE_FIELD(set_cache_attr); |
6540 | assert(!object1->object_is_shared_cache); |
6541 | assert(!object2->object_is_shared_cache); |
6542 | /* ignore purgeable_queue_type and purgeable_queue_group */ |
6543 | assert(!object1->io_tracking); |
6544 | assert(!object2->io_tracking); |
6545 | #if VM_OBJECT_ACCESS_TRACKING |
6546 | assert(!object1->access_tracking); |
6547 | assert(!object2->access_tracking); |
6548 | #endif /* VM_OBJECT_ACCESS_TRACKING */ |
6549 | __TRANSPOSE_FIELD(no_tag_update); |
6550 | #if CONFIG_SECLUDED_MEMORY |
6551 | assert(!object1->eligible_for_secluded); |
6552 | assert(!object2->eligible_for_secluded); |
6553 | assert(!object1->can_grab_secluded); |
6554 | assert(!object2->can_grab_secluded); |
6555 | #else /* CONFIG_SECLUDED_MEMORY */ |
6556 | assert(object1->__object3_unused_bits == 0); |
6557 | assert(object2->__object3_unused_bits == 0); |
6558 | #endif /* CONFIG_SECLUDED_MEMORY */ |
6559 | assert(object1->__object2_unused_bits == 0); |
6560 | assert(object2->__object2_unused_bits == 0); |
6561 | #if UPL_DEBUG |
6562 | /* "uplq" refers to the object not its contents (see upl_transpose()) */ |
6563 | #endif |
6564 | assert((object1->purgable == VM_PURGABLE_DENY) || (object1->objq.next == NULL)); |
6565 | assert((object1->purgable == VM_PURGABLE_DENY) || (object1->objq.prev == NULL)); |
6566 | assert((object2->purgable == VM_PURGABLE_DENY) || (object2->objq.next == NULL)); |
6567 | assert((object2->purgable == VM_PURGABLE_DENY) || (object2->objq.prev == NULL)); |
6568 | |
6569 | #undef __TRANSPOSE_FIELD |
6570 | |
6571 | retval = KERN_SUCCESS; |
6572 | |
6573 | done: |
6574 | /* |
6575 | * Cleanup. |
6576 | */ |
6577 | if (tmp_object != VM_OBJECT_NULL) { |
6578 | vm_object_unlock(tmp_object); |
6579 | /* |
6580 | * Re-initialize the temporary object to avoid |
6581 | * deallocating a real pager. |
6582 | */ |
6583 | _vm_object_allocate(transpose_size, tmp_object); |
6584 | vm_object_deallocate(tmp_object); |
6585 | tmp_object = VM_OBJECT_NULL; |
6586 | } |
6587 | |
6588 | if (object1_locked) { |
6589 | vm_object_unlock(object1); |
6590 | object1_locked = FALSE; |
6591 | } |
6592 | if (object2_locked) { |
6593 | vm_object_unlock(object2); |
6594 | object2_locked = FALSE; |
6595 | } |
6596 | |
6597 | vm_object_transpose_count++; |
6598 | |
6599 | return retval; |
6600 | } |
6601 | |
6602 | |
6603 | /* |
6604 | * vm_object_cluster_size |
6605 | * |
6606 | * Determine how big a cluster we should issue an I/O for... |
6607 | * |
6608 | * Inputs: *start == offset of page needed |
6609 | * *length == maximum cluster pager can handle |
6610 | * Outputs: *start == beginning offset of cluster |
6611 | * *length == length of cluster to try |
6612 | * |
6613 | * The original *start will be encompassed by the cluster |
6614 | * |
6615 | */ |
6616 | extern int speculative_reads_disabled; |
6617 | |
6618 | /* |
6619 | * Try to always keep these values an even multiple of PAGE_SIZE. We use these values |
6620 | * to derive min_ph_bytes and max_ph_bytes (IMP: bytes not # of pages) and expect those values to |
6621 | * always be page-aligned. The derivation could involve operations (e.g. division) |
6622 | * that could give us non-page-size aligned values if we start out with values that |
6623 | * are odd multiples of PAGE_SIZE. |
6624 | */ |
6625 | #if CONFIG_EMBEDDED |
6626 | unsigned int preheat_max_bytes = (1024 * 512); |
6627 | #else /* CONFIG_EMBEDDED */ |
6628 | unsigned int preheat_max_bytes = MAX_UPL_TRANSFER_BYTES; |
6629 | #endif /* CONFIG_EMBEDDED */ |
6630 | unsigned int preheat_min_bytes = (1024 * 32); |
6631 | |
6632 | |
6633 | __private_extern__ void |
6634 | vm_object_cluster_size(vm_object_t object, vm_object_offset_t *start, |
6635 | vm_size_t *length, vm_object_fault_info_t fault_info, uint32_t *io_streaming) |
6636 | { |
6637 | vm_size_t pre_heat_size; |
6638 | vm_size_t tail_size; |
6639 | vm_size_t head_size; |
6640 | vm_size_t max_length; |
6641 | vm_size_t cluster_size; |
6642 | vm_object_offset_t object_size; |
6643 | vm_object_offset_t orig_start; |
6644 | vm_object_offset_t target_start; |
6645 | vm_object_offset_t offset; |
6646 | vm_behavior_t behavior; |
6647 | boolean_t look_behind = TRUE; |
6648 | boolean_t look_ahead = TRUE; |
6649 | boolean_t isSSD = FALSE; |
6650 | uint32_t throttle_limit; |
6651 | int sequential_run; |
6652 | int sequential_behavior = VM_BEHAVIOR_SEQUENTIAL; |
6653 | vm_size_t max_ph_size; |
6654 | vm_size_t min_ph_size; |
6655 | |
6656 | assert( !(*length & PAGE_MASK)); |
6657 | assert( !(*start & PAGE_MASK_64)); |
6658 | |
6659 | /* |
6660 | * remember maxiumum length of run requested |
6661 | */ |
6662 | max_length = *length; |
6663 | /* |
6664 | * we'll always return a cluster size of at least |
6665 | * 1 page, since the original fault must always |
6666 | * be processed |
6667 | */ |
6668 | *length = PAGE_SIZE; |
6669 | *io_streaming = 0; |
6670 | |
6671 | if (speculative_reads_disabled || fault_info == NULL) { |
6672 | /* |
6673 | * no cluster... just fault the page in |
6674 | */ |
6675 | return; |
6676 | } |
6677 | orig_start = *start; |
6678 | target_start = orig_start; |
6679 | cluster_size = round_page(fault_info->cluster_size); |
6680 | behavior = fault_info->behavior; |
6681 | |
6682 | vm_object_lock(object); |
6683 | |
6684 | if (object->pager == MEMORY_OBJECT_NULL) |
6685 | goto out; /* pager is gone for this object, nothing more to do */ |
6686 | |
6687 | vnode_pager_get_isSSD(object->pager, &isSSD); |
6688 | |
6689 | min_ph_size = round_page(preheat_min_bytes); |
6690 | max_ph_size = round_page(preheat_max_bytes); |
6691 | |
6692 | #if !CONFIG_EMBEDDED |
6693 | if (isSSD) { |
6694 | min_ph_size /= 2; |
6695 | max_ph_size /= 8; |
6696 | |
6697 | if (min_ph_size & PAGE_MASK_64) { |
6698 | min_ph_size = trunc_page(min_ph_size); |
6699 | } |
6700 | |
6701 | if (max_ph_size & PAGE_MASK_64) { |
6702 | max_ph_size = trunc_page(max_ph_size); |
6703 | } |
6704 | } |
6705 | #endif /* !CONFIG_EMBEDDED */ |
6706 | |
6707 | if (min_ph_size < PAGE_SIZE) |
6708 | min_ph_size = PAGE_SIZE; |
6709 | |
6710 | if (max_ph_size < PAGE_SIZE) |
6711 | max_ph_size = PAGE_SIZE; |
6712 | else if (max_ph_size > MAX_UPL_TRANSFER_BYTES) |
6713 | max_ph_size = MAX_UPL_TRANSFER_BYTES; |
6714 | |
6715 | if (max_length > max_ph_size) |
6716 | max_length = max_ph_size; |
6717 | |
6718 | if (max_length <= PAGE_SIZE) |
6719 | goto out; |
6720 | |
6721 | if (object->internal) |
6722 | object_size = object->vo_size; |
6723 | else |
6724 | vnode_pager_get_object_size(object->pager, &object_size); |
6725 | |
6726 | object_size = round_page_64(object_size); |
6727 | |
6728 | if (orig_start >= object_size) { |
6729 | /* |
6730 | * fault occurred beyond the EOF... |
6731 | * we need to punt w/o changing the |
6732 | * starting offset |
6733 | */ |
6734 | goto out; |
6735 | } |
6736 | if (object->pages_used > object->pages_created) { |
6737 | /* |
6738 | * must have wrapped our 32 bit counters |
6739 | * so reset |
6740 | */ |
6741 | object->pages_used = object->pages_created = 0; |
6742 | } |
6743 | if ((sequential_run = object->sequential)) { |
6744 | if (sequential_run < 0) { |
6745 | sequential_behavior = VM_BEHAVIOR_RSEQNTL; |
6746 | sequential_run = 0 - sequential_run; |
6747 | } else { |
6748 | sequential_behavior = VM_BEHAVIOR_SEQUENTIAL; |
6749 | } |
6750 | |
6751 | } |
6752 | switch (behavior) { |
6753 | |
6754 | default: |
6755 | behavior = VM_BEHAVIOR_DEFAULT; |
6756 | |
6757 | case VM_BEHAVIOR_DEFAULT: |
6758 | if (object->internal && fault_info->user_tag == VM_MEMORY_STACK) |
6759 | goto out; |
6760 | |
6761 | if (sequential_run >= (3 * PAGE_SIZE)) { |
6762 | pre_heat_size = sequential_run + PAGE_SIZE; |
6763 | |
6764 | if (sequential_behavior == VM_BEHAVIOR_SEQUENTIAL) |
6765 | look_behind = FALSE; |
6766 | else |
6767 | look_ahead = FALSE; |
6768 | |
6769 | *io_streaming = 1; |
6770 | } else { |
6771 | |
6772 | if (object->pages_created < (20 * (min_ph_size >> PAGE_SHIFT))) { |
6773 | /* |
6774 | * prime the pump |
6775 | */ |
6776 | pre_heat_size = min_ph_size; |
6777 | } else { |
6778 | /* |
6779 | * Linear growth in PH size: The maximum size is max_length... |
6780 | * this cacluation will result in a size that is neither a |
6781 | * power of 2 nor a multiple of PAGE_SIZE... so round |
6782 | * it up to the nearest PAGE_SIZE boundary |
6783 | */ |
6784 | pre_heat_size = (max_length * (uint64_t)object->pages_used) / object->pages_created; |
6785 | |
6786 | if (pre_heat_size < min_ph_size) |
6787 | pre_heat_size = min_ph_size; |
6788 | else |
6789 | pre_heat_size = round_page(pre_heat_size); |
6790 | } |
6791 | } |
6792 | break; |
6793 | |
6794 | case VM_BEHAVIOR_RANDOM: |
6795 | if ((pre_heat_size = cluster_size) <= PAGE_SIZE) |
6796 | goto out; |
6797 | break; |
6798 | |
6799 | case VM_BEHAVIOR_SEQUENTIAL: |
6800 | if ((pre_heat_size = cluster_size) == 0) |
6801 | pre_heat_size = sequential_run + PAGE_SIZE; |
6802 | look_behind = FALSE; |
6803 | *io_streaming = 1; |
6804 | |
6805 | break; |
6806 | |
6807 | case VM_BEHAVIOR_RSEQNTL: |
6808 | if ((pre_heat_size = cluster_size) == 0) |
6809 | pre_heat_size = sequential_run + PAGE_SIZE; |
6810 | look_ahead = FALSE; |
6811 | *io_streaming = 1; |
6812 | |
6813 | break; |
6814 | |
6815 | } |
6816 | throttle_limit = (uint32_t) max_length; |
6817 | assert(throttle_limit == max_length); |
6818 | |
6819 | if (vnode_pager_get_throttle_io_limit(object->pager, &throttle_limit) == KERN_SUCCESS) { |
6820 | if (max_length > throttle_limit) |
6821 | max_length = throttle_limit; |
6822 | } |
6823 | if (pre_heat_size > max_length) |
6824 | pre_heat_size = max_length; |
6825 | |
6826 | if (behavior == VM_BEHAVIOR_DEFAULT && (pre_heat_size > min_ph_size)) { |
6827 | |
6828 | unsigned int consider_free = vm_page_free_count + vm_page_cleaned_count; |
6829 | |
6830 | if (consider_free < vm_page_throttle_limit) { |
6831 | pre_heat_size = trunc_page(pre_heat_size / 16); |
6832 | } else if (consider_free < vm_page_free_target) { |
6833 | pre_heat_size = trunc_page(pre_heat_size / 4); |
6834 | } |
6835 | |
6836 | if (pre_heat_size < min_ph_size) |
6837 | pre_heat_size = min_ph_size; |
6838 | } |
6839 | if (look_ahead == TRUE) { |
6840 | if (look_behind == TRUE) { |
6841 | /* |
6842 | * if we get here its due to a random access... |
6843 | * so we want to center the original fault address |
6844 | * within the cluster we will issue... make sure |
6845 | * to calculate 'head_size' as a multiple of PAGE_SIZE... |
6846 | * 'pre_heat_size' is a multiple of PAGE_SIZE but not |
6847 | * necessarily an even number of pages so we need to truncate |
6848 | * the result to a PAGE_SIZE boundary |
6849 | */ |
6850 | head_size = trunc_page(pre_heat_size / 2); |
6851 | |
6852 | if (target_start > head_size) |
6853 | target_start -= head_size; |
6854 | else |
6855 | target_start = 0; |
6856 | |
6857 | /* |
6858 | * 'target_start' at this point represents the beginning offset |
6859 | * of the cluster we are considering... 'orig_start' will be in |
6860 | * the center of this cluster if we didn't have to clip the start |
6861 | * due to running into the start of the file |
6862 | */ |
6863 | } |
6864 | if ((target_start + pre_heat_size) > object_size) |
6865 | pre_heat_size = (vm_size_t)(round_page_64(object_size - target_start)); |
6866 | /* |
6867 | * at this point caclulate the number of pages beyond the original fault |
6868 | * address that we want to consider... this is guaranteed not to extend beyond |
6869 | * the current EOF... |
6870 | */ |
6871 | assert((vm_size_t)(orig_start - target_start) == (orig_start - target_start)); |
6872 | tail_size = pre_heat_size - (vm_size_t)(orig_start - target_start) - PAGE_SIZE; |
6873 | } else { |
6874 | if (pre_heat_size > target_start) { |
6875 | /* |
6876 | * since pre_heat_size is always smaller then 2^32, |
6877 | * if it is larger then target_start (a 64 bit value) |
6878 | * it is safe to clip target_start to 32 bits |
6879 | */ |
6880 | pre_heat_size = (vm_size_t) target_start; |
6881 | } |
6882 | tail_size = 0; |
6883 | } |
6884 | assert( !(target_start & PAGE_MASK_64)); |
6885 | assert( !(pre_heat_size & PAGE_MASK_64)); |
6886 | |
6887 | if (pre_heat_size <= PAGE_SIZE) |
6888 | goto out; |
6889 | |
6890 | if (look_behind == TRUE) { |
6891 | /* |
6892 | * take a look at the pages before the original |
6893 | * faulting offset... recalculate this in case |
6894 | * we had to clip 'pre_heat_size' above to keep |
6895 | * from running past the EOF. |
6896 | */ |
6897 | head_size = pre_heat_size - tail_size - PAGE_SIZE; |
6898 | |
6899 | for (offset = orig_start - PAGE_SIZE_64; head_size; offset -= PAGE_SIZE_64, head_size -= PAGE_SIZE) { |
6900 | /* |
6901 | * don't poke below the lowest offset |
6902 | */ |
6903 | if (offset < fault_info->lo_offset) |
6904 | break; |
6905 | /* |
6906 | * for external objects or internal objects w/o a pager, |
6907 | * VM_COMPRESSOR_PAGER_STATE_GET will return VM_EXTERNAL_STATE_UNKNOWN |
6908 | */ |
6909 | if (VM_COMPRESSOR_PAGER_STATE_GET(object, offset) == VM_EXTERNAL_STATE_ABSENT) { |
6910 | break; |
6911 | } |
6912 | if (vm_page_lookup(object, offset) != VM_PAGE_NULL) { |
6913 | /* |
6914 | * don't bridge resident pages |
6915 | */ |
6916 | break; |
6917 | } |
6918 | *start = offset; |
6919 | *length += PAGE_SIZE; |
6920 | } |
6921 | } |
6922 | if (look_ahead == TRUE) { |
6923 | for (offset = orig_start + PAGE_SIZE_64; tail_size; offset += PAGE_SIZE_64, tail_size -= PAGE_SIZE) { |
6924 | /* |
6925 | * don't poke above the highest offset |
6926 | */ |
6927 | if (offset >= fault_info->hi_offset) |
6928 | break; |
6929 | assert(offset < object_size); |
6930 | |
6931 | /* |
6932 | * for external objects or internal objects w/o a pager, |
6933 | * VM_COMPRESSOR_PAGER_STATE_GET will return VM_EXTERNAL_STATE_UNKNOWN |
6934 | */ |
6935 | if (VM_COMPRESSOR_PAGER_STATE_GET(object, offset) == VM_EXTERNAL_STATE_ABSENT) { |
6936 | break; |
6937 | } |
6938 | if (vm_page_lookup(object, offset) != VM_PAGE_NULL) { |
6939 | /* |
6940 | * don't bridge resident pages |
6941 | */ |
6942 | break; |
6943 | } |
6944 | *length += PAGE_SIZE; |
6945 | } |
6946 | } |
6947 | out: |
6948 | if (*length > max_length) |
6949 | *length = max_length; |
6950 | |
6951 | vm_object_unlock(object); |
6952 | |
6953 | DTRACE_VM1(clustersize, vm_size_t, *length); |
6954 | } |
6955 | |
6956 | |
6957 | /* |
6958 | * Allow manipulation of individual page state. This is actually part of |
6959 | * the UPL regimen but takes place on the VM object rather than on a UPL |
6960 | */ |
6961 | |
6962 | kern_return_t |
6963 | vm_object_page_op( |
6964 | vm_object_t object, |
6965 | vm_object_offset_t offset, |
6966 | int ops, |
6967 | ppnum_t *phys_entry, |
6968 | int *flags) |
6969 | { |
6970 | vm_page_t dst_page; |
6971 | |
6972 | vm_object_lock(object); |
6973 | |
6974 | if(ops & UPL_POP_PHYSICAL) { |
6975 | if(object->phys_contiguous) { |
6976 | if (phys_entry) { |
6977 | *phys_entry = (ppnum_t) |
6978 | (object->vo_shadow_offset >> PAGE_SHIFT); |
6979 | } |
6980 | vm_object_unlock(object); |
6981 | return KERN_SUCCESS; |
6982 | } else { |
6983 | vm_object_unlock(object); |
6984 | return KERN_INVALID_OBJECT; |
6985 | } |
6986 | } |
6987 | if(object->phys_contiguous) { |
6988 | vm_object_unlock(object); |
6989 | return KERN_INVALID_OBJECT; |
6990 | } |
6991 | |
6992 | while(TRUE) { |
6993 | if((dst_page = vm_page_lookup(object,offset)) == VM_PAGE_NULL) { |
6994 | vm_object_unlock(object); |
6995 | return KERN_FAILURE; |
6996 | } |
6997 | |
6998 | /* Sync up on getting the busy bit */ |
6999 | if((dst_page->vmp_busy || dst_page->vmp_cleaning) && |
7000 | (((ops & UPL_POP_SET) && |
7001 | (ops & UPL_POP_BUSY)) || (ops & UPL_POP_DUMP))) { |
7002 | /* someone else is playing with the page, we will */ |
7003 | /* have to wait */ |
7004 | PAGE_SLEEP(object, dst_page, THREAD_UNINT); |
7005 | continue; |
7006 | } |
7007 | |
7008 | if (ops & UPL_POP_DUMP) { |
7009 | if (dst_page->vmp_pmapped == TRUE) |
7010 | pmap_disconnect(VM_PAGE_GET_PHYS_PAGE(dst_page)); |
7011 | |
7012 | VM_PAGE_FREE(dst_page); |
7013 | break; |
7014 | } |
7015 | |
7016 | if (flags) { |
7017 | *flags = 0; |
7018 | |
7019 | /* Get the condition of flags before requested ops */ |
7020 | /* are undertaken */ |
7021 | |
7022 | if(dst_page->vmp_dirty) *flags |= UPL_POP_DIRTY; |
7023 | if(dst_page->vmp_free_when_done) *flags |= UPL_POP_PAGEOUT; |
7024 | if(dst_page->vmp_precious) *flags |= UPL_POP_PRECIOUS; |
7025 | if(dst_page->vmp_absent) *flags |= UPL_POP_ABSENT; |
7026 | if(dst_page->vmp_busy) *flags |= UPL_POP_BUSY; |
7027 | } |
7028 | |
7029 | /* The caller should have made a call either contingent with */ |
7030 | /* or prior to this call to set UPL_POP_BUSY */ |
7031 | if(ops & UPL_POP_SET) { |
7032 | /* The protection granted with this assert will */ |
7033 | /* not be complete. If the caller violates the */ |
7034 | /* convention and attempts to change page state */ |
7035 | /* without first setting busy we may not see it */ |
7036 | /* because the page may already be busy. However */ |
7037 | /* if such violations occur we will assert sooner */ |
7038 | /* or later. */ |
7039 | assert(dst_page->vmp_busy || (ops & UPL_POP_BUSY)); |
7040 | if (ops & UPL_POP_DIRTY) { |
7041 | SET_PAGE_DIRTY(dst_page, FALSE); |
7042 | } |
7043 | if (ops & UPL_POP_PAGEOUT) dst_page->vmp_free_when_done = TRUE; |
7044 | if (ops & UPL_POP_PRECIOUS) dst_page->vmp_precious = TRUE; |
7045 | if (ops & UPL_POP_ABSENT) dst_page->vmp_absent = TRUE; |
7046 | if (ops & UPL_POP_BUSY) dst_page->vmp_busy = TRUE; |
7047 | } |
7048 | |
7049 | if(ops & UPL_POP_CLR) { |
7050 | assert(dst_page->vmp_busy); |
7051 | if (ops & UPL_POP_DIRTY) dst_page->vmp_dirty = FALSE; |
7052 | if (ops & UPL_POP_PAGEOUT) dst_page->vmp_free_when_done = FALSE; |
7053 | if (ops & UPL_POP_PRECIOUS) dst_page->vmp_precious = FALSE; |
7054 | if (ops & UPL_POP_ABSENT) dst_page->vmp_absent = FALSE; |
7055 | if (ops & UPL_POP_BUSY) { |
7056 | dst_page->vmp_busy = FALSE; |
7057 | PAGE_WAKEUP(dst_page); |
7058 | } |
7059 | } |
7060 | if (phys_entry) { |
7061 | /* |
7062 | * The physical page number will remain valid |
7063 | * only if the page is kept busy. |
7064 | */ |
7065 | assert(dst_page->vmp_busy); |
7066 | *phys_entry = VM_PAGE_GET_PHYS_PAGE(dst_page); |
7067 | } |
7068 | |
7069 | break; |
7070 | } |
7071 | |
7072 | vm_object_unlock(object); |
7073 | return KERN_SUCCESS; |
7074 | |
7075 | } |
7076 | |
7077 | /* |
7078 | * vm_object_range_op offers performance enhancement over |
7079 | * vm_object_page_op for page_op functions which do not require page |
7080 | * level state to be returned from the call. Page_op was created to provide |
7081 | * a low-cost alternative to page manipulation via UPLs when only a single |
7082 | * page was involved. The range_op call establishes the ability in the _op |
7083 | * family of functions to work on multiple pages where the lack of page level |
7084 | * state handling allows the caller to avoid the overhead of the upl structures. |
7085 | */ |
7086 | |
7087 | kern_return_t |
7088 | vm_object_range_op( |
7089 | vm_object_t object, |
7090 | vm_object_offset_t offset_beg, |
7091 | vm_object_offset_t offset_end, |
7092 | int ops, |
7093 | uint32_t *range) |
7094 | { |
7095 | vm_object_offset_t offset; |
7096 | vm_page_t dst_page; |
7097 | |
7098 | if (offset_end - offset_beg > (uint32_t) -1) { |
7099 | /* range is too big and would overflow "*range" */ |
7100 | return KERN_INVALID_ARGUMENT; |
7101 | } |
7102 | if (object->resident_page_count == 0) { |
7103 | if (range) { |
7104 | if (ops & UPL_ROP_PRESENT) { |
7105 | *range = 0; |
7106 | } else { |
7107 | *range = (uint32_t) (offset_end - offset_beg); |
7108 | assert(*range == (offset_end - offset_beg)); |
7109 | } |
7110 | } |
7111 | return KERN_SUCCESS; |
7112 | } |
7113 | vm_object_lock(object); |
7114 | |
7115 | if (object->phys_contiguous) { |
7116 | vm_object_unlock(object); |
7117 | return KERN_INVALID_OBJECT; |
7118 | } |
7119 | |
7120 | offset = offset_beg & ~PAGE_MASK_64; |
7121 | |
7122 | while (offset < offset_end) { |
7123 | dst_page = vm_page_lookup(object, offset); |
7124 | if (dst_page != VM_PAGE_NULL) { |
7125 | if (ops & UPL_ROP_DUMP) { |
7126 | if (dst_page->vmp_busy || dst_page->vmp_cleaning) { |
7127 | /* |
7128 | * someone else is playing with the |
7129 | * page, we will have to wait |
7130 | */ |
7131 | PAGE_SLEEP(object, dst_page, THREAD_UNINT); |
7132 | /* |
7133 | * need to relook the page up since it's |
7134 | * state may have changed while we slept |
7135 | * it might even belong to a different object |
7136 | * at this point |
7137 | */ |
7138 | continue; |
7139 | } |
7140 | if (dst_page->vmp_laundry) |
7141 | vm_pageout_steal_laundry(dst_page, FALSE); |
7142 | |
7143 | if (dst_page->vmp_pmapped == TRUE) |
7144 | pmap_disconnect(VM_PAGE_GET_PHYS_PAGE(dst_page)); |
7145 | |
7146 | VM_PAGE_FREE(dst_page); |
7147 | |
7148 | } else if ((ops & UPL_ROP_ABSENT) |
7149 | && (!dst_page->vmp_absent || dst_page->vmp_busy)) { |
7150 | break; |
7151 | } |
7152 | } else if (ops & UPL_ROP_PRESENT) |
7153 | break; |
7154 | |
7155 | offset += PAGE_SIZE; |
7156 | } |
7157 | vm_object_unlock(object); |
7158 | |
7159 | if (range) { |
7160 | if (offset > offset_end) |
7161 | offset = offset_end; |
7162 | if(offset > offset_beg) { |
7163 | *range = (uint32_t) (offset - offset_beg); |
7164 | assert(*range == (offset - offset_beg)); |
7165 | } else { |
7166 | *range = 0; |
7167 | } |
7168 | } |
7169 | return KERN_SUCCESS; |
7170 | } |
7171 | |
7172 | /* |
7173 | * Used to point a pager directly to a range of memory (when the pager may be associated |
7174 | * with a non-device vnode). Takes a virtual address, an offset, and a size. We currently |
7175 | * expect that the virtual address will denote the start of a range that is physically contiguous. |
7176 | */ |
7177 | kern_return_t ( |
7178 | memory_object_control_t object, |
7179 | memory_object_offset_t offset, |
7180 | addr64_t base_vaddr, |
7181 | vm_size_t size) |
7182 | { |
7183 | ppnum_t page_num; |
7184 | boolean_t clobbered_private; |
7185 | kern_return_t retval; |
7186 | vm_object_t ; |
7187 | |
7188 | page_num = pmap_find_phys(kernel_pmap, base_vaddr); |
7189 | |
7190 | if (!page_num) { |
7191 | retval = KERN_FAILURE; |
7192 | goto out; |
7193 | } |
7194 | |
7195 | pager_object = memory_object_control_to_vm_object(object); |
7196 | |
7197 | if (!pager_object) { |
7198 | retval = KERN_FAILURE; |
7199 | goto out; |
7200 | } |
7201 | |
7202 | clobbered_private = pager_object->private; |
7203 | if (pager_object->private != TRUE) { |
7204 | vm_object_lock(pager_object); |
7205 | pager_object->private = TRUE; |
7206 | vm_object_unlock(pager_object); |
7207 | } |
7208 | retval = vm_object_populate_with_private(pager_object, offset, page_num, size); |
7209 | |
7210 | if (retval != KERN_SUCCESS) { |
7211 | if (pager_object->private != clobbered_private) { |
7212 | vm_object_lock(pager_object); |
7213 | pager_object->private = clobbered_private; |
7214 | vm_object_unlock(pager_object); |
7215 | } |
7216 | } |
7217 | |
7218 | out: |
7219 | return retval; |
7220 | } |
7221 | |
7222 | uint32_t scan_object_collision = 0; |
7223 | |
7224 | void |
7225 | vm_object_lock(vm_object_t object) |
7226 | { |
7227 | if (object == vm_pageout_scan_wants_object) { |
7228 | scan_object_collision++; |
7229 | mutex_pause(2); |
7230 | } |
7231 | lck_rw_lock_exclusive(&object->Lock); |
7232 | #if DEVELOPMENT || DEBUG |
7233 | object->Lock_owner = current_thread(); |
7234 | #endif |
7235 | } |
7236 | |
7237 | boolean_t |
7238 | vm_object_lock_avoid(vm_object_t object) |
7239 | { |
7240 | if (object == vm_pageout_scan_wants_object) { |
7241 | scan_object_collision++; |
7242 | return TRUE; |
7243 | } |
7244 | return FALSE; |
7245 | } |
7246 | |
7247 | boolean_t |
7248 | _vm_object_lock_try(vm_object_t object) |
7249 | { |
7250 | boolean_t retval; |
7251 | |
7252 | retval = lck_rw_try_lock_exclusive(&object->Lock); |
7253 | #if DEVELOPMENT || DEBUG |
7254 | if (retval == TRUE) |
7255 | object->Lock_owner = current_thread(); |
7256 | #endif |
7257 | return (retval); |
7258 | } |
7259 | |
7260 | boolean_t |
7261 | vm_object_lock_try(vm_object_t object) |
7262 | { |
7263 | /* |
7264 | * Called from hibernate path so check before blocking. |
7265 | */ |
7266 | if (vm_object_lock_avoid(object) && ml_get_interrupts_enabled() && get_preemption_level()==0) { |
7267 | mutex_pause(2); |
7268 | } |
7269 | return _vm_object_lock_try(object); |
7270 | } |
7271 | |
7272 | void |
7273 | vm_object_lock_shared(vm_object_t object) |
7274 | { |
7275 | if (vm_object_lock_avoid(object)) { |
7276 | mutex_pause(2); |
7277 | } |
7278 | lck_rw_lock_shared(&object->Lock); |
7279 | } |
7280 | |
7281 | boolean_t |
7282 | vm_object_lock_yield_shared(vm_object_t object) |
7283 | { |
7284 | boolean_t retval = FALSE, force_yield = FALSE;; |
7285 | |
7286 | vm_object_lock_assert_shared(object); |
7287 | |
7288 | force_yield = vm_object_lock_avoid(object); |
7289 | |
7290 | retval = lck_rw_lock_yield_shared(&object->Lock, force_yield); |
7291 | |
7292 | return (retval); |
7293 | } |
7294 | |
7295 | boolean_t |
7296 | vm_object_lock_try_shared(vm_object_t object) |
7297 | { |
7298 | if (vm_object_lock_avoid(object)) { |
7299 | mutex_pause(2); |
7300 | } |
7301 | return (lck_rw_try_lock_shared(&object->Lock)); |
7302 | } |
7303 | |
7304 | boolean_t |
7305 | vm_object_lock_upgrade(vm_object_t object) |
7306 | { boolean_t retval; |
7307 | |
7308 | retval = lck_rw_lock_shared_to_exclusive(&object->Lock); |
7309 | #if DEVELOPMENT || DEBUG |
7310 | if (retval == TRUE) |
7311 | object->Lock_owner = current_thread(); |
7312 | #endif |
7313 | return (retval); |
7314 | } |
7315 | |
7316 | void |
7317 | vm_object_unlock(vm_object_t object) |
7318 | { |
7319 | #if DEVELOPMENT || DEBUG |
7320 | if (object->Lock_owner) { |
7321 | if (object->Lock_owner != current_thread()) |
7322 | panic("vm_object_unlock: not owner - %p\n" , object); |
7323 | object->Lock_owner = 0; |
7324 | } |
7325 | #endif |
7326 | lck_rw_done(&object->Lock); |
7327 | } |
7328 | |
7329 | |
7330 | unsigned int vm_object_change_wimg_mode_count = 0; |
7331 | |
7332 | /* |
7333 | * The object must be locked |
7334 | */ |
7335 | void |
7336 | vm_object_change_wimg_mode(vm_object_t object, unsigned int wimg_mode) |
7337 | { |
7338 | vm_page_t p; |
7339 | |
7340 | vm_object_lock_assert_exclusive(object); |
7341 | |
7342 | vm_object_paging_wait(object, THREAD_UNINT); |
7343 | |
7344 | vm_page_queue_iterate(&object->memq, p, vm_page_t, vmp_listq) { |
7345 | |
7346 | if (!p->vmp_fictitious) |
7347 | pmap_set_cache_attributes(VM_PAGE_GET_PHYS_PAGE(p), wimg_mode); |
7348 | } |
7349 | if (wimg_mode == VM_WIMG_USE_DEFAULT) |
7350 | object->set_cache_attr = FALSE; |
7351 | else |
7352 | object->set_cache_attr = TRUE; |
7353 | |
7354 | object->wimg_bits = wimg_mode; |
7355 | |
7356 | vm_object_change_wimg_mode_count++; |
7357 | } |
7358 | |
7359 | #if CONFIG_FREEZE |
7360 | |
7361 | /* |
7362 | * This routine does the "relocation" of previously |
7363 | * compressed pages belonging to this object that are |
7364 | * residing in a number of compressed segments into |
7365 | * a set of compressed segments dedicated to hold |
7366 | * compressed pages belonging to this object. |
7367 | */ |
7368 | |
7369 | extern void *freezer_chead; |
7370 | extern char *freezer_compressor_scratch_buf; |
7371 | extern int c_freezer_compression_count; |
7372 | extern AbsoluteTime c_freezer_last_yield_ts; |
7373 | |
7374 | #define MAX_FREE_BATCH 32 |
7375 | #define FREEZER_DUTY_CYCLE_ON_MS 5 |
7376 | #define FREEZER_DUTY_CYCLE_OFF_MS 5 |
7377 | |
7378 | static int c_freezer_should_yield(void); |
7379 | |
7380 | |
7381 | static int |
7382 | c_freezer_should_yield() |
7383 | { |
7384 | AbsoluteTime cur_time; |
7385 | uint64_t nsecs; |
7386 | |
7387 | assert(c_freezer_last_yield_ts); |
7388 | clock_get_uptime(&cur_time); |
7389 | |
7390 | SUB_ABSOLUTETIME(&cur_time, &c_freezer_last_yield_ts); |
7391 | absolutetime_to_nanoseconds(cur_time, &nsecs); |
7392 | |
7393 | if (nsecs > 1000 * 1000 * FREEZER_DUTY_CYCLE_ON_MS) |
7394 | return (1); |
7395 | return (0); |
7396 | } |
7397 | |
7398 | |
7399 | void |
7400 | vm_object_compressed_freezer_done() |
7401 | { |
7402 | vm_compressor_finished_filling(&freezer_chead); |
7403 | } |
7404 | |
7405 | |
7406 | void |
7407 | vm_object_compressed_freezer_pageout( |
7408 | vm_object_t object) |
7409 | { |
7410 | vm_page_t p; |
7411 | vm_page_t local_freeq = NULL; |
7412 | int local_freed = 0; |
7413 | kern_return_t retval = KERN_SUCCESS; |
7414 | int obj_resident_page_count_snapshot = 0; |
7415 | |
7416 | assert(object != VM_OBJECT_NULL); |
7417 | assert(object->internal); |
7418 | |
7419 | vm_object_lock(object); |
7420 | |
7421 | if (!object->pager_initialized || object->pager == MEMORY_OBJECT_NULL) { |
7422 | |
7423 | if (!object->pager_initialized) { |
7424 | |
7425 | vm_object_collapse(object, (vm_object_offset_t) 0, TRUE); |
7426 | |
7427 | if (!object->pager_initialized) |
7428 | vm_object_compressor_pager_create(object); |
7429 | } |
7430 | |
7431 | if (!object->pager_initialized || object->pager == MEMORY_OBJECT_NULL) { |
7432 | vm_object_unlock(object); |
7433 | return; |
7434 | } |
7435 | } |
7436 | |
7437 | if (VM_CONFIG_FREEZER_SWAP_IS_ACTIVE) { |
7438 | vm_object_offset_t curr_offset = 0; |
7439 | |
7440 | /* |
7441 | * Go through the object and make sure that any |
7442 | * previously compressed pages are relocated into |
7443 | * a compressed segment associated with our "freezer_chead". |
7444 | */ |
7445 | while (curr_offset < object->vo_size) { |
7446 | |
7447 | curr_offset = vm_compressor_pager_next_compressed(object->pager, curr_offset); |
7448 | |
7449 | if (curr_offset == (vm_object_offset_t) -1) |
7450 | break; |
7451 | |
7452 | retval = vm_compressor_pager_relocate(object->pager, curr_offset, &freezer_chead); |
7453 | |
7454 | if (retval != KERN_SUCCESS) |
7455 | break; |
7456 | |
7457 | curr_offset += PAGE_SIZE_64; |
7458 | } |
7459 | } |
7460 | |
7461 | /* |
7462 | * We can't hold the object lock while heading down into the compressed pager |
7463 | * layer because we might need the kernel map lock down there to allocate new |
7464 | * compressor data structures. And if this same object is mapped in the kernel |
7465 | * and there's a fault on it, then that thread will want the object lock while |
7466 | * holding the kernel map lock. |
7467 | * |
7468 | * Since we are going to drop/grab the object lock repeatedly, we must make sure |
7469 | * we won't be stuck in an infinite loop if the same page(s) keep getting |
7470 | * decompressed. So we grab a snapshot of the number of pages in the object and |
7471 | * we won't process any more than that number of pages. |
7472 | */ |
7473 | |
7474 | obj_resident_page_count_snapshot = object->resident_page_count; |
7475 | |
7476 | vm_object_activity_begin(object); |
7477 | |
7478 | while ((obj_resident_page_count_snapshot--) && !vm_page_queue_empty(&object->memq)) { |
7479 | |
7480 | p = (vm_page_t)vm_page_queue_first(&object->memq); |
7481 | |
7482 | KERNEL_DEBUG(0xe0430004 | DBG_FUNC_START, object, local_freed, 0, 0, 0); |
7483 | |
7484 | vm_page_lockspin_queues(); |
7485 | |
7486 | if (p->vmp_cleaning || p->vmp_fictitious || p->vmp_busy || p->vmp_absent || p->vmp_unusual || p->vmp_error || VM_PAGE_WIRED(p)) { |
7487 | |
7488 | vm_page_unlock_queues(); |
7489 | |
7490 | KERNEL_DEBUG(0xe0430004 | DBG_FUNC_END, object, local_freed, 1, 0, 0); |
7491 | |
7492 | vm_page_queue_remove(&object->memq, p, vm_page_t, vmp_listq); |
7493 | vm_page_queue_enter(&object->memq, p, vm_page_t, vmp_listq); |
7494 | |
7495 | continue; |
7496 | } |
7497 | |
7498 | if (p->vmp_pmapped == TRUE) { |
7499 | int refmod_state, pmap_flags; |
7500 | |
7501 | if (p->vmp_dirty || p->vmp_precious) { |
7502 | pmap_flags = PMAP_OPTIONS_COMPRESSOR; |
7503 | } else { |
7504 | pmap_flags = PMAP_OPTIONS_COMPRESSOR_IFF_MODIFIED; |
7505 | } |
7506 | |
7507 | refmod_state = pmap_disconnect_options(VM_PAGE_GET_PHYS_PAGE(p), pmap_flags, NULL); |
7508 | if (refmod_state & VM_MEM_MODIFIED) { |
7509 | SET_PAGE_DIRTY(p, FALSE); |
7510 | } |
7511 | } |
7512 | |
7513 | if (p->vmp_dirty == FALSE && p->vmp_precious == FALSE) { |
7514 | /* |
7515 | * Clean and non-precious page. |
7516 | */ |
7517 | vm_page_unlock_queues(); |
7518 | VM_PAGE_FREE(p); |
7519 | |
7520 | KERNEL_DEBUG(0xe0430004 | DBG_FUNC_END, object, local_freed, 2, 0, 0); |
7521 | continue; |
7522 | } |
7523 | |
7524 | if (p->vmp_laundry) |
7525 | vm_pageout_steal_laundry(p, TRUE); |
7526 | |
7527 | vm_page_queues_remove(p, TRUE); |
7528 | |
7529 | vm_page_unlock_queues(); |
7530 | |
7531 | |
7532 | /* |
7533 | * In case the compressor fails to compress this page, we need it at |
7534 | * the back of the object memq so that we don't keep trying to process it. |
7535 | * Make the move here while we have the object lock held. |
7536 | */ |
7537 | |
7538 | vm_page_queue_remove(&object->memq, p, vm_page_t, vmp_listq); |
7539 | vm_page_queue_enter(&object->memq, p, vm_page_t, vmp_listq); |
7540 | |
7541 | /* |
7542 | * Grab an activity_in_progress here for vm_pageout_compress_page() to consume. |
7543 | * |
7544 | * Mark the page busy so no one messes with it while we have the object lock dropped. |
7545 | */ |
7546 | p->vmp_busy = TRUE; |
7547 | |
7548 | vm_object_activity_begin(object); |
7549 | |
7550 | vm_object_unlock(object); |
7551 | |
7552 | if (vm_pageout_compress_page(&freezer_chead, freezer_compressor_scratch_buf, p) == KERN_SUCCESS) { |
7553 | /* |
7554 | * page has already been un-tabled from the object via 'vm_page_remove' |
7555 | */ |
7556 | p->vmp_snext = local_freeq; |
7557 | local_freeq = p; |
7558 | local_freed++; |
7559 | |
7560 | if (local_freed >= MAX_FREE_BATCH) { |
7561 | |
7562 | OSAddAtomic64(local_freed, &vm_pageout_vminfo.vm_pageout_compressions); |
7563 | |
7564 | vm_page_free_list(local_freeq, TRUE); |
7565 | |
7566 | local_freeq = NULL; |
7567 | local_freed = 0; |
7568 | } |
7569 | c_freezer_compression_count++; |
7570 | } |
7571 | KERNEL_DEBUG(0xe0430004 | DBG_FUNC_END, object, local_freed, 0, 0, 0); |
7572 | |
7573 | if (local_freed == 0 && c_freezer_should_yield()) { |
7574 | |
7575 | thread_yield_internal(FREEZER_DUTY_CYCLE_OFF_MS); |
7576 | clock_get_uptime(&c_freezer_last_yield_ts); |
7577 | } |
7578 | |
7579 | vm_object_lock(object); |
7580 | } |
7581 | |
7582 | if (local_freeq) { |
7583 | OSAddAtomic64(local_freed, &vm_pageout_vminfo.vm_pageout_compressions); |
7584 | |
7585 | vm_page_free_list(local_freeq, TRUE); |
7586 | |
7587 | local_freeq = NULL; |
7588 | local_freed = 0; |
7589 | } |
7590 | |
7591 | vm_object_activity_end(object); |
7592 | |
7593 | vm_object_unlock(object); |
7594 | |
7595 | if (c_freezer_should_yield()) { |
7596 | |
7597 | thread_yield_internal(FREEZER_DUTY_CYCLE_OFF_MS); |
7598 | clock_get_uptime(&c_freezer_last_yield_ts); |
7599 | } |
7600 | } |
7601 | |
7602 | #endif /* CONFIG_FREEZE */ |
7603 | |
7604 | |
7605 | void |
7606 | vm_object_pageout( |
7607 | vm_object_t object) |
7608 | { |
7609 | vm_page_t p, next; |
7610 | struct vm_pageout_queue *iq; |
7611 | |
7612 | if (!VM_CONFIG_COMPRESSOR_IS_PRESENT) |
7613 | return; |
7614 | |
7615 | iq = &vm_pageout_queue_internal; |
7616 | |
7617 | assert(object != VM_OBJECT_NULL ); |
7618 | |
7619 | vm_object_lock(object); |
7620 | |
7621 | if (!object->internal || |
7622 | object->terminating || |
7623 | !object->alive) { |
7624 | vm_object_unlock(object); |
7625 | return; |
7626 | } |
7627 | |
7628 | if (!object->pager_initialized || object->pager == MEMORY_OBJECT_NULL) { |
7629 | |
7630 | if (!object->pager_initialized) { |
7631 | |
7632 | vm_object_collapse(object, (vm_object_offset_t) 0, TRUE); |
7633 | |
7634 | if (!object->pager_initialized) |
7635 | vm_object_compressor_pager_create(object); |
7636 | } |
7637 | |
7638 | if (!object->pager_initialized || object->pager == MEMORY_OBJECT_NULL) { |
7639 | vm_object_unlock(object); |
7640 | return; |
7641 | } |
7642 | } |
7643 | |
7644 | ReScan: |
7645 | next = (vm_page_t)vm_page_queue_first(&object->memq); |
7646 | |
7647 | while (!vm_page_queue_end(&object->memq, (vm_page_queue_entry_t)next)) { |
7648 | p = next; |
7649 | next = (vm_page_t)vm_page_queue_next(&next->vmp_listq); |
7650 | |
7651 | assert(p->vmp_q_state != VM_PAGE_ON_FREE_Q); |
7652 | |
7653 | if ((p->vmp_q_state == VM_PAGE_ON_THROTTLED_Q) || |
7654 | p->vmp_cleaning || |
7655 | p->vmp_laundry || |
7656 | p->vmp_busy || |
7657 | p->vmp_absent || |
7658 | p->vmp_error || |
7659 | p->vmp_fictitious || |
7660 | VM_PAGE_WIRED(p)) { |
7661 | /* |
7662 | * Page is already being cleaned or can't be cleaned. |
7663 | */ |
7664 | continue; |
7665 | } |
7666 | if (vm_compressor_low_on_space()) { |
7667 | break; |
7668 | } |
7669 | |
7670 | /* Throw to the pageout queue */ |
7671 | |
7672 | vm_page_lockspin_queues(); |
7673 | |
7674 | if (VM_PAGE_Q_THROTTLED(iq)) { |
7675 | |
7676 | iq->pgo_draining = TRUE; |
7677 | |
7678 | assert_wait((event_t) (&iq->pgo_laundry + 1), |
7679 | THREAD_INTERRUPTIBLE); |
7680 | vm_page_unlock_queues(); |
7681 | vm_object_unlock(object); |
7682 | |
7683 | thread_block(THREAD_CONTINUE_NULL); |
7684 | |
7685 | vm_object_lock(object); |
7686 | goto ReScan; |
7687 | } |
7688 | |
7689 | assert(!p->vmp_fictitious); |
7690 | assert(!p->vmp_busy); |
7691 | assert(!p->vmp_absent); |
7692 | assert(!p->vmp_unusual); |
7693 | assert(!p->vmp_error); |
7694 | assert(!VM_PAGE_WIRED(p)); |
7695 | assert(!p->vmp_cleaning); |
7696 | |
7697 | if (p->vmp_pmapped == TRUE) { |
7698 | int refmod_state; |
7699 | int pmap_options; |
7700 | |
7701 | /* |
7702 | * Tell pmap the page should be accounted |
7703 | * for as "compressed" if it's been modified. |
7704 | */ |
7705 | pmap_options = |
7706 | PMAP_OPTIONS_COMPRESSOR_IFF_MODIFIED; |
7707 | if (p->vmp_dirty || p->vmp_precious) { |
7708 | /* |
7709 | * We already know it's been modified, |
7710 | * so tell pmap to account for it |
7711 | * as "compressed". |
7712 | */ |
7713 | pmap_options = PMAP_OPTIONS_COMPRESSOR; |
7714 | } |
7715 | refmod_state = pmap_disconnect_options(VM_PAGE_GET_PHYS_PAGE(p), |
7716 | pmap_options, |
7717 | NULL); |
7718 | if (refmod_state & VM_MEM_MODIFIED) { |
7719 | SET_PAGE_DIRTY(p, FALSE); |
7720 | } |
7721 | } |
7722 | |
7723 | if (!p->vmp_dirty && !p->vmp_precious) { |
7724 | vm_page_unlock_queues(); |
7725 | VM_PAGE_FREE(p); |
7726 | continue; |
7727 | } |
7728 | vm_page_queues_remove(p, TRUE); |
7729 | |
7730 | vm_pageout_cluster(p); |
7731 | |
7732 | vm_page_unlock_queues(); |
7733 | } |
7734 | vm_object_unlock(object); |
7735 | } |
7736 | |
7737 | |
7738 | #if CONFIG_IOSCHED |
7739 | void |
7740 | vm_page_request_reprioritize(vm_object_t o, uint64_t blkno, uint32_t len, int prio) |
7741 | { |
7742 | io_reprioritize_req_t req; |
7743 | struct vnode *devvp = NULL; |
7744 | |
7745 | if(vnode_pager_get_object_devvp(o->pager, (uintptr_t *)&devvp) != KERN_SUCCESS) |
7746 | return; |
7747 | |
7748 | /* |
7749 | * Create the request for I/O reprioritization. |
7750 | * We use the noblock variant of zalloc because we're holding the object |
7751 | * lock here and we could cause a deadlock in low memory conditions. |
7752 | */ |
7753 | req = (io_reprioritize_req_t)zalloc_noblock(io_reprioritize_req_zone); |
7754 | if (req == NULL) |
7755 | return; |
7756 | req->blkno = blkno; |
7757 | req->len = len; |
7758 | req->priority = prio; |
7759 | req->devvp = devvp; |
7760 | |
7761 | /* Insert request into the reprioritization list */ |
7762 | IO_REPRIORITIZE_LIST_LOCK(); |
7763 | queue_enter(&io_reprioritize_list, req, io_reprioritize_req_t, io_reprioritize_list); |
7764 | IO_REPRIORITIZE_LIST_UNLOCK(); |
7765 | |
7766 | /* Wakeup reprioritize thread */ |
7767 | IO_REPRIO_THREAD_WAKEUP(); |
7768 | |
7769 | return; |
7770 | } |
7771 | |
7772 | void |
7773 | vm_decmp_upl_reprioritize(upl_t upl, int prio) |
7774 | { |
7775 | int offset; |
7776 | vm_object_t object; |
7777 | io_reprioritize_req_t req; |
7778 | struct vnode *devvp = NULL; |
7779 | uint64_t blkno; |
7780 | uint32_t len; |
7781 | upl_t io_upl; |
7782 | uint64_t *io_upl_reprio_info; |
7783 | int io_upl_size; |
7784 | |
7785 | if ((upl->flags & UPL_TRACKED_BY_OBJECT) == 0 || (upl->flags & UPL_EXPEDITE_SUPPORTED) == 0) |
7786 | return; |
7787 | |
7788 | /* |
7789 | * We dont want to perform any allocations with the upl lock held since that might |
7790 | * result in a deadlock. If the system is low on memory, the pageout thread would |
7791 | * try to pageout stuff and might wait on this lock. If we are waiting for the memory to |
7792 | * be freed up by the pageout thread, it would be a deadlock. |
7793 | */ |
7794 | |
7795 | |
7796 | /* First step is just to get the size of the upl to find out how big the reprio info is */ |
7797 | if(!upl_try_lock(upl)) |
7798 | return; |
7799 | |
7800 | if (upl->decmp_io_upl == NULL) { |
7801 | /* The real I/O upl was destroyed by the time we came in here. Nothing to do. */ |
7802 | upl_unlock(upl); |
7803 | return; |
7804 | } |
7805 | |
7806 | io_upl = upl->decmp_io_upl; |
7807 | assert((io_upl->flags & UPL_DECMP_REAL_IO) != 0); |
7808 | io_upl_size = io_upl->size; |
7809 | upl_unlock(upl); |
7810 | |
7811 | /* Now perform the allocation */ |
7812 | io_upl_reprio_info = (uint64_t *)kalloc(sizeof(uint64_t) * (io_upl_size / PAGE_SIZE)); |
7813 | if (io_upl_reprio_info == NULL) |
7814 | return; |
7815 | |
7816 | /* Now again take the lock, recheck the state and grab out the required info */ |
7817 | if(!upl_try_lock(upl)) |
7818 | goto out; |
7819 | |
7820 | if (upl->decmp_io_upl == NULL || upl->decmp_io_upl != io_upl) { |
7821 | /* The real I/O upl was destroyed by the time we came in here. Nothing to do. */ |
7822 | upl_unlock(upl); |
7823 | goto out; |
7824 | } |
7825 | memcpy(io_upl_reprio_info, io_upl->upl_reprio_info, sizeof(uint64_t) * (io_upl_size / PAGE_SIZE)); |
7826 | |
7827 | /* Get the VM object for this UPL */ |
7828 | if (io_upl->flags & UPL_SHADOWED) { |
7829 | object = io_upl->map_object->shadow; |
7830 | } else { |
7831 | object = io_upl->map_object; |
7832 | } |
7833 | |
7834 | /* Get the dev vnode ptr for this object */ |
7835 | if(!object || !object->pager || |
7836 | vnode_pager_get_object_devvp(object->pager, (uintptr_t *)&devvp) != KERN_SUCCESS) { |
7837 | upl_unlock(upl); |
7838 | goto out; |
7839 | } |
7840 | |
7841 | upl_unlock(upl); |
7842 | |
7843 | /* Now we have all the information needed to do the expedite */ |
7844 | |
7845 | offset = 0; |
7846 | while (offset < io_upl_size) { |
7847 | blkno = io_upl_reprio_info[(offset / PAGE_SIZE)] & UPL_REPRIO_INFO_MASK; |
7848 | len = (io_upl_reprio_info[(offset / PAGE_SIZE)] >> UPL_REPRIO_INFO_SHIFT) & UPL_REPRIO_INFO_MASK; |
7849 | |
7850 | /* |
7851 | * This implementation may cause some spurious expedites due to the |
7852 | * fact that we dont cleanup the blkno & len from the upl_reprio_info |
7853 | * even after the I/O is complete. |
7854 | */ |
7855 | |
7856 | if (blkno != 0 && len != 0) { |
7857 | /* Create the request for I/O reprioritization */ |
7858 | req = (io_reprioritize_req_t)zalloc(io_reprioritize_req_zone); |
7859 | assert(req != NULL); |
7860 | req->blkno = blkno; |
7861 | req->len = len; |
7862 | req->priority = prio; |
7863 | req->devvp = devvp; |
7864 | |
7865 | /* Insert request into the reprioritization list */ |
7866 | IO_REPRIORITIZE_LIST_LOCK(); |
7867 | queue_enter(&io_reprioritize_list, req, io_reprioritize_req_t, io_reprioritize_list); |
7868 | IO_REPRIORITIZE_LIST_UNLOCK(); |
7869 | |
7870 | offset += len; |
7871 | } else { |
7872 | offset += PAGE_SIZE; |
7873 | } |
7874 | } |
7875 | |
7876 | /* Wakeup reprioritize thread */ |
7877 | IO_REPRIO_THREAD_WAKEUP(); |
7878 | |
7879 | out: |
7880 | kfree(io_upl_reprio_info, sizeof(uint64_t) * (io_upl_size / PAGE_SIZE)); |
7881 | return; |
7882 | } |
7883 | |
7884 | void |
7885 | vm_page_handle_prio_inversion(vm_object_t o, vm_page_t m) |
7886 | { |
7887 | upl_t upl; |
7888 | upl_page_info_t *pl; |
7889 | unsigned int i, num_pages; |
7890 | int cur_tier; |
7891 | |
7892 | cur_tier = proc_get_effective_thread_policy(current_thread(), TASK_POLICY_IO); |
7893 | |
7894 | /* |
7895 | Scan through all UPLs associated with the object to find the |
7896 | UPL containing the contended page. |
7897 | */ |
7898 | queue_iterate(&o->uplq, upl, upl_t, uplq) { |
7899 | if (((upl->flags & UPL_EXPEDITE_SUPPORTED) == 0) || upl->upl_priority <= cur_tier) |
7900 | continue; |
7901 | pl = UPL_GET_INTERNAL_PAGE_LIST(upl); |
7902 | num_pages = (upl->size / PAGE_SIZE); |
7903 | |
7904 | /* |
7905 | For each page in the UPL page list, see if it matches the contended |
7906 | page and was issued as a low prio I/O. |
7907 | */ |
7908 | for(i=0; i < num_pages; i++) { |
7909 | if(UPL_PAGE_PRESENT(pl,i) && VM_PAGE_GET_PHYS_PAGE(m) == pl[i].phys_addr) { |
7910 | if ((upl->flags & UPL_DECMP_REQ) && upl->decmp_io_upl) { |
7911 | KERNEL_DEBUG_CONSTANT((MACHDBG_CODE(DBG_MACH_VM, VM_PAGE_EXPEDITE)) | DBG_FUNC_NONE, VM_KERNEL_UNSLIDE_OR_PERM(upl->upl_creator), VM_KERNEL_UNSLIDE_OR_PERM(m), |
7912 | VM_KERNEL_UNSLIDE_OR_PERM(upl), upl->upl_priority, 0); |
7913 | vm_decmp_upl_reprioritize(upl, cur_tier); |
7914 | break; |
7915 | } |
7916 | KERNEL_DEBUG_CONSTANT((MACHDBG_CODE(DBG_MACH_VM, VM_PAGE_EXPEDITE)) | DBG_FUNC_NONE, VM_KERNEL_UNSLIDE_OR_PERM(upl->upl_creator), VM_KERNEL_UNSLIDE_OR_PERM(m), |
7917 | upl->upl_reprio_info[i], upl->upl_priority, 0); |
7918 | if (UPL_REPRIO_INFO_BLKNO(upl, i) != 0 && UPL_REPRIO_INFO_LEN(upl, i) != 0) |
7919 | vm_page_request_reprioritize(o, UPL_REPRIO_INFO_BLKNO(upl, i), UPL_REPRIO_INFO_LEN(upl, i), cur_tier); |
7920 | break; |
7921 | } |
7922 | } |
7923 | /* Check if we found any hits */ |
7924 | if (i != num_pages) |
7925 | break; |
7926 | } |
7927 | |
7928 | return; |
7929 | } |
7930 | |
7931 | wait_result_t |
7932 | vm_page_sleep(vm_object_t o, vm_page_t m, int interruptible) |
7933 | { |
7934 | wait_result_t ret; |
7935 | |
7936 | KERNEL_DEBUG((MACHDBG_CODE(DBG_MACH_VM, VM_PAGE_SLEEP)) | DBG_FUNC_START, o, m, 0, 0, 0); |
7937 | |
7938 | if (o->io_tracking && ((m->vmp_busy == TRUE) || (m->vmp_cleaning == TRUE) || VM_PAGE_WIRED(m))) { |
7939 | /* |
7940 | Indicates page is busy due to an I/O. Issue a reprioritize request if necessary. |
7941 | */ |
7942 | vm_page_handle_prio_inversion(o,m); |
7943 | } |
7944 | m->vmp_wanted = TRUE; |
7945 | ret = thread_sleep_vm_object(o, m, interruptible); |
7946 | KERNEL_DEBUG((MACHDBG_CODE(DBG_MACH_VM, VM_PAGE_SLEEP)) | DBG_FUNC_END, o, m, 0, 0, 0); |
7947 | return ret; |
7948 | } |
7949 | |
7950 | static void |
7951 | io_reprioritize_thread(void *param __unused, wait_result_t wr __unused) |
7952 | { |
7953 | io_reprioritize_req_t req = NULL; |
7954 | |
7955 | while(1) { |
7956 | |
7957 | IO_REPRIORITIZE_LIST_LOCK(); |
7958 | if (queue_empty(&io_reprioritize_list)) { |
7959 | IO_REPRIORITIZE_LIST_UNLOCK(); |
7960 | break; |
7961 | } |
7962 | |
7963 | queue_remove_first(&io_reprioritize_list, req, io_reprioritize_req_t, io_reprioritize_list); |
7964 | IO_REPRIORITIZE_LIST_UNLOCK(); |
7965 | |
7966 | vnode_pager_issue_reprioritize_io(req->devvp, req->blkno, req->len, req->priority); |
7967 | zfree(io_reprioritize_req_zone, req); |
7968 | } |
7969 | |
7970 | IO_REPRIO_THREAD_CONTINUATION(); |
7971 | } |
7972 | #endif |
7973 | |
7974 | #if VM_OBJECT_ACCESS_TRACKING |
7975 | void |
7976 | vm_object_access_tracking( |
7977 | vm_object_t object, |
7978 | int *access_tracking_p, |
7979 | uint32_t *access_tracking_reads_p, |
7980 | uint32_t *access_tracking_writes_p) |
7981 | { |
7982 | int access_tracking; |
7983 | |
7984 | access_tracking = !!*access_tracking_p; |
7985 | |
7986 | vm_object_lock(object); |
7987 | *access_tracking_p = object->access_tracking; |
7988 | if (access_tracking_reads_p) { |
7989 | *access_tracking_reads_p = object->access_tracking_reads; |
7990 | } |
7991 | if (access_tracking_writes_p) { |
7992 | *access_tracking_writes_p = object->access_tracking_writes; |
7993 | } |
7994 | object->access_tracking = access_tracking; |
7995 | object->access_tracking_reads = 0; |
7996 | object->access_tracking_writes = 0; |
7997 | vm_object_unlock(object); |
7998 | |
7999 | if (access_tracking) { |
8000 | vm_object_pmap_protect_options(object, |
8001 | 0, |
8002 | object->vo_size, |
8003 | PMAP_NULL, |
8004 | 0, |
8005 | VM_PROT_NONE, |
8006 | 0); |
8007 | } |
8008 | } |
8009 | #endif /* VM_OBJECT_ACCESS_TRACKING */ |
8010 | |
8011 | void |
8012 | vm_object_ledger_tag_ledgers( |
8013 | vm_object_t object, |
8014 | int *ledger_idx_volatile, |
8015 | int *ledger_idx_nonvolatile, |
8016 | int *ledger_idx_volatile_compressed, |
8017 | int *ledger_idx_nonvolatile_compressed, |
8018 | boolean_t *) |
8019 | { |
8020 | assert(object->shadow == VM_OBJECT_NULL); |
8021 | |
8022 | switch (object->vo_ledger_tag) { |
8023 | case VM_OBJECT_LEDGER_TAG_NONE: |
8024 | /* regular purgeable memory */ |
8025 | assert(object->purgable != VM_PURGABLE_DENY); |
8026 | *ledger_idx_volatile = task_ledgers.purgeable_volatile; |
8027 | *ledger_idx_nonvolatile = task_ledgers.purgeable_nonvolatile; |
8028 | *ledger_idx_volatile_compressed = task_ledgers.purgeable_volatile_compressed; |
8029 | *ledger_idx_nonvolatile_compressed = task_ledgers.purgeable_nonvolatile_compressed; |
8030 | *do_footprint = TRUE; |
8031 | break; |
8032 | case VM_OBJECT_LEDGER_TAG_NETWORK: |
8033 | *ledger_idx_volatile = task_ledgers.network_volatile; |
8034 | *ledger_idx_volatile_compressed = task_ledgers.network_volatile_compressed; |
8035 | *ledger_idx_nonvolatile = task_ledgers.network_nonvolatile; |
8036 | *ledger_idx_nonvolatile_compressed = task_ledgers.network_nonvolatile_compressed; |
8037 | *do_footprint = FALSE; |
8038 | break; |
8039 | case VM_OBJECT_LEDGER_TAG_MEDIA: |
8040 | default: |
8041 | panic("%s: object %p has unsupported ledger_tag %d\n" , |
8042 | __FUNCTION__, object, object->vo_ledger_tag); |
8043 | } |
8044 | } |
8045 | |
8046 | kern_return_t |
8047 | vm_object_ownership_change( |
8048 | vm_object_t object, |
8049 | int new_ledger_tag, |
8050 | task_t new_owner, |
8051 | boolean_t task_objq_locked) |
8052 | { |
8053 | int old_ledger_tag; |
8054 | task_t old_owner; |
8055 | int resident_count, wired_count; |
8056 | unsigned int compressed_count; |
8057 | int ledger_idx_volatile; |
8058 | int ledger_idx_nonvolatile; |
8059 | int ledger_idx_volatile_compressed; |
8060 | int ledger_idx_nonvolatile_compressed; |
8061 | int ledger_idx; |
8062 | int ledger_idx_compressed; |
8063 | boolean_t ; |
8064 | |
8065 | vm_object_lock_assert_exclusive(object); |
8066 | assert(object->internal); |
8067 | |
8068 | old_ledger_tag = object->vo_ledger_tag; |
8069 | old_owner = VM_OBJECT_OWNER(object); |
8070 | |
8071 | resident_count = object->resident_page_count - object->wired_page_count; |
8072 | wired_count = object->wired_page_count; |
8073 | compressed_count = vm_compressor_pager_get_count(object->pager); |
8074 | |
8075 | /* |
8076 | * Deal with the old owner and/or ledger tag, if needed. |
8077 | */ |
8078 | if (old_owner != TASK_NULL && |
8079 | ((old_owner != new_owner) /* new owner ... */ |
8080 | || /* ... or ... */ |
8081 | (old_ledger_tag && /* ... new ledger */ |
8082 | old_ledger_tag != new_ledger_tag))) { |
8083 | /* |
8084 | * Take this object off of the old owner's ledgers. |
8085 | */ |
8086 | vm_object_ledger_tag_ledgers(object, |
8087 | &ledger_idx_volatile, |
8088 | &ledger_idx_nonvolatile, |
8089 | &ledger_idx_volatile_compressed, |
8090 | &ledger_idx_nonvolatile_compressed, |
8091 | &do_footprint); |
8092 | if (object->purgable == VM_PURGABLE_VOLATILE || |
8093 | object->purgable == VM_PURGABLE_EMPTY) { |
8094 | ledger_idx = ledger_idx_volatile; |
8095 | ledger_idx_compressed = ledger_idx_volatile_compressed; |
8096 | } else { |
8097 | ledger_idx = ledger_idx_nonvolatile; |
8098 | ledger_idx_compressed = ledger_idx_nonvolatile_compressed; |
8099 | } |
8100 | if (resident_count) { |
8101 | /* |
8102 | * Adjust the appropriate old owners's ledgers by the |
8103 | * number of resident pages. |
8104 | */ |
8105 | ledger_debit(old_owner->ledger, |
8106 | ledger_idx, |
8107 | ptoa_64(resident_count)); |
8108 | /* adjust old owner's footprint */ |
8109 | if (do_footprint && |
8110 | object->purgable != VM_PURGABLE_VOLATILE && |
8111 | object->purgable != VM_PURGABLE_EMPTY) { |
8112 | ledger_debit(old_owner->ledger, |
8113 | task_ledgers.phys_footprint, |
8114 | ptoa_64(resident_count)); |
8115 | } |
8116 | } |
8117 | if (wired_count) { |
8118 | /* wired pages are always nonvolatile */ |
8119 | ledger_debit(old_owner->ledger, |
8120 | ledger_idx_nonvolatile, |
8121 | ptoa_64(wired_count)); |
8122 | if (do_footprint) { |
8123 | ledger_debit(old_owner->ledger, |
8124 | task_ledgers.phys_footprint, |
8125 | ptoa_64(wired_count)); |
8126 | } |
8127 | } |
8128 | if (compressed_count) { |
8129 | /* |
8130 | * Adjust the appropriate old owner's ledgers |
8131 | * by the number of compressed pages. |
8132 | */ |
8133 | ledger_debit(old_owner->ledger, |
8134 | ledger_idx_compressed, |
8135 | ptoa_64(compressed_count)); |
8136 | if (do_footprint && |
8137 | object->purgable != VM_PURGABLE_VOLATILE && |
8138 | object->purgable != VM_PURGABLE_EMPTY) { |
8139 | ledger_debit(old_owner->ledger, |
8140 | task_ledgers.phys_footprint, |
8141 | ptoa_64(compressed_count)); |
8142 | } |
8143 | } |
8144 | if (old_owner != new_owner) { |
8145 | /* remove object from old_owner's list of owned objects */ |
8146 | DTRACE_VM2(object_owner_remove, |
8147 | vm_object_t, object, |
8148 | task_t, new_owner); |
8149 | if (!task_objq_locked) { |
8150 | task_objq_lock(old_owner); |
8151 | } |
8152 | queue_remove(&old_owner->task_objq, object, |
8153 | vm_object_t, task_objq); |
8154 | switch (object->purgable) { |
8155 | case VM_PURGABLE_NONVOLATILE: |
8156 | case VM_PURGABLE_EMPTY: |
8157 | vm_purgeable_nonvolatile_owner_update(old_owner, |
8158 | -1); |
8159 | break; |
8160 | case VM_PURGABLE_VOLATILE: |
8161 | vm_purgeable_volatile_owner_update(old_owner, |
8162 | -1); |
8163 | break; |
8164 | default: |
8165 | break; |
8166 | } |
8167 | if (!task_objq_locked) { |
8168 | task_objq_unlock(old_owner); |
8169 | } |
8170 | } |
8171 | } |
8172 | |
8173 | /* |
8174 | * Switch to new ledger tag and/or owner. |
8175 | */ |
8176 | object->vo_ledger_tag = new_ledger_tag; |
8177 | object->vo_owner = new_owner; |
8178 | |
8179 | if (new_owner == VM_OBJECT_OWNER_DISOWNED) { |
8180 | assert(old_owner != kernel_task); |
8181 | new_owner = kernel_task; |
8182 | } |
8183 | |
8184 | /* |
8185 | * Deal with the new owner and/or ledger tag, if needed. |
8186 | */ |
8187 | if (new_owner != TASK_NULL && |
8188 | ((new_owner != old_owner) /* new owner ... */ |
8189 | || /* ... or ... */ |
8190 | (new_ledger_tag && /* ... new ledger */ |
8191 | new_ledger_tag != old_ledger_tag))) { |
8192 | /* |
8193 | * Add this object to the new owner's ledgers. |
8194 | */ |
8195 | vm_object_ledger_tag_ledgers(object, |
8196 | &ledger_idx_volatile, |
8197 | &ledger_idx_nonvolatile, |
8198 | &ledger_idx_volatile_compressed, |
8199 | &ledger_idx_nonvolatile_compressed, |
8200 | &do_footprint); |
8201 | if (object->purgable == VM_PURGABLE_VOLATILE || |
8202 | object->purgable == VM_PURGABLE_EMPTY) { |
8203 | ledger_idx = ledger_idx_volatile; |
8204 | ledger_idx_compressed = ledger_idx_volatile_compressed; |
8205 | } else { |
8206 | ledger_idx = ledger_idx_nonvolatile; |
8207 | ledger_idx_compressed = ledger_idx_nonvolatile_compressed; |
8208 | } |
8209 | if (resident_count) { |
8210 | /* |
8211 | * Adjust the appropriate new owners's ledgers by the |
8212 | * number of resident pages. |
8213 | */ |
8214 | ledger_credit(new_owner->ledger, |
8215 | ledger_idx, |
8216 | ptoa_64(resident_count)); |
8217 | /* adjust new owner's footprint */ |
8218 | if (do_footprint && |
8219 | object->purgable != VM_PURGABLE_VOLATILE && |
8220 | object->purgable != VM_PURGABLE_EMPTY) { |
8221 | ledger_credit(new_owner->ledger, |
8222 | task_ledgers.phys_footprint, |
8223 | ptoa_64(resident_count)); |
8224 | } |
8225 | } |
8226 | if (wired_count) { |
8227 | /* wired pages are always nonvolatile */ |
8228 | ledger_credit(new_owner->ledger, |
8229 | ledger_idx_nonvolatile, |
8230 | ptoa_64(wired_count)); |
8231 | if (do_footprint) { |
8232 | ledger_credit(new_owner->ledger, |
8233 | task_ledgers.phys_footprint, |
8234 | ptoa_64(wired_count)); |
8235 | } |
8236 | } |
8237 | if (compressed_count) { |
8238 | /* |
8239 | * Adjust the new owner's ledgers by the number of |
8240 | * compressed pages. |
8241 | */ |
8242 | ledger_credit(new_owner->ledger, |
8243 | ledger_idx_compressed, |
8244 | ptoa_64(compressed_count)); |
8245 | if (do_footprint && |
8246 | object->purgable != VM_PURGABLE_VOLATILE && |
8247 | object->purgable != VM_PURGABLE_EMPTY) { |
8248 | ledger_credit(new_owner->ledger, |
8249 | task_ledgers.phys_footprint, |
8250 | ptoa_64(compressed_count)); |
8251 | } |
8252 | } |
8253 | if (new_owner != old_owner) { |
8254 | /* add object to new_owner's list of owned objects */ |
8255 | DTRACE_VM2(object_owner_add, |
8256 | vm_object_t, object, |
8257 | task_t, new_owner); |
8258 | task_objq_lock(new_owner); |
8259 | queue_enter(&new_owner->task_objq, object, |
8260 | vm_object_t, task_objq); |
8261 | switch (object->purgable) { |
8262 | case VM_PURGABLE_NONVOLATILE: |
8263 | case VM_PURGABLE_EMPTY: |
8264 | vm_purgeable_nonvolatile_owner_update(new_owner, |
8265 | +1); |
8266 | break; |
8267 | case VM_PURGABLE_VOLATILE: |
8268 | vm_purgeable_volatile_owner_update(new_owner, |
8269 | +1); |
8270 | break; |
8271 | default: |
8272 | break; |
8273 | } |
8274 | task_objq_unlock(new_owner); |
8275 | } |
8276 | } |
8277 | |
8278 | return KERN_SUCCESS; |
8279 | } |
8280 | |