1 | /* |
2 | * Copyright (c) 2000-2006 Apple Computer, Inc. All rights reserved. |
3 | * |
4 | * @APPLE_OSREFERENCE_LICENSE_HEADER_START@ |
5 | * |
6 | * This file contains Original Code and/or Modifications of Original Code |
7 | * as defined in and that are subject to the Apple Public Source License |
8 | * Version 2.0 (the 'License'). You may not use this file except in |
9 | * compliance with the License. The rights granted to you under the License |
10 | * may not be used to create, or enable the creation or redistribution of, |
11 | * unlawful or unlicensed copies of an Apple operating system, or to |
12 | * circumvent, violate, or enable the circumvention or violation of, any |
13 | * terms of an Apple operating system software license agreement. |
14 | * |
15 | * Please obtain a copy of the License at |
16 | * http://www.opensource.apple.com/apsl/ and read it before using this file. |
17 | * |
18 | * The Original Code and all software distributed under the License are |
19 | * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER |
20 | * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES, |
21 | * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY, |
22 | * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT. |
23 | * Please see the License for the specific language governing rights and |
24 | * limitations under the License. |
25 | * |
26 | * @APPLE_OSREFERENCE_LICENSE_HEADER_END@ |
27 | */ |
28 | /* |
29 | * @OSF_COPYRIGHT@ |
30 | */ |
31 | /* |
32 | * Mach Operating System |
33 | * Copyright (c) 1991,1990,1989,1988 Carnegie Mellon University |
34 | * All Rights Reserved. |
35 | * |
36 | * Permission to use, copy, modify and distribute this software and its |
37 | * documentation is hereby granted, provided that both the copyright |
38 | * notice and this permission notice appear in all copies of the |
39 | * software, derivative works or modified versions, and any portions |
40 | * thereof, and that both notices appear in supporting documentation. |
41 | * |
42 | * CARNEGIE MELLON ALLOWS FREE USE OF THIS SOFTWARE IN ITS "AS IS" |
43 | * CONDITION. CARNEGIE MELLON DISCLAIMS ANY LIABILITY OF ANY KIND FOR |
44 | * ANY DAMAGES WHATSOEVER RESULTING FROM THE USE OF THIS SOFTWARE. |
45 | * |
46 | * Carnegie Mellon requests users of this software to return to |
47 | * |
48 | * Software Distribution Coordinator or Software.Distribution@CS.CMU.EDU |
49 | * School of Computer Science |
50 | * Carnegie Mellon University |
51 | * Pittsburgh PA 15213-3890 |
52 | * |
53 | * any improvements or extensions that they make and grant Carnegie Mellon |
54 | * the rights to redistribute these changes. |
55 | */ |
56 | /* |
57 | */ |
58 | /* |
59 | * File: vm/vm_page.h |
60 | * Author: Avadis Tevanian, Jr., Michael Wayne Young |
61 | * Date: 1985 |
62 | * |
63 | * Resident memory system definitions. |
64 | */ |
65 | |
66 | #ifndef _VM_VM_PAGE_H_ |
67 | #define _VM_VM_PAGE_H_ |
68 | |
69 | #include <debug.h> |
70 | #include <vm/vm_options.h> |
71 | #include <mach/boolean.h> |
72 | #include <mach/vm_prot.h> |
73 | #include <mach/vm_param.h> |
74 | |
75 | |
76 | #if defined(__LP64__) |
77 | |
78 | /* |
79 | * in order to make the size of a vm_page_t 64 bytes (cache line size for both arm64 and x86_64) |
80 | * we'll keep the next_m pointer packed... as long as the kernel virtual space where we allocate |
81 | * vm_page_t's from doesn't span more then 256 Gbytes, we're safe. There are live tests in the |
82 | * vm_page_t array allocation and the zone init code to determine if we can safely pack and unpack |
83 | * pointers from the 2 ends of these spaces |
84 | */ |
85 | typedef uint32_t vm_page_packed_t; |
86 | |
87 | struct vm_page_packed_queue_entry { |
88 | vm_page_packed_t next; /* next element */ |
89 | vm_page_packed_t prev; /* previous element */ |
90 | }; |
91 | |
92 | typedef struct vm_page_packed_queue_entry *vm_page_queue_t; |
93 | typedef struct vm_page_packed_queue_entry vm_page_queue_head_t; |
94 | typedef struct vm_page_packed_queue_entry vm_page_queue_chain_t; |
95 | typedef struct vm_page_packed_queue_entry *vm_page_queue_entry_t; |
96 | |
97 | typedef vm_page_packed_t vm_page_object_t; |
98 | |
99 | #else |
100 | |
101 | /* |
102 | * we can't do the packing trick on 32 bit architectures, so |
103 | * just turn the macros into noops. |
104 | */ |
105 | typedef struct vm_page *vm_page_packed_t; |
106 | |
107 | #define vm_page_queue_t queue_t |
108 | #define vm_page_queue_head_t queue_head_t |
109 | #define vm_page_queue_chain_t queue_chain_t |
110 | #define vm_page_queue_entry_t queue_entry_t |
111 | |
112 | #define vm_page_object_t vm_object_t |
113 | #endif |
114 | |
115 | |
116 | #include <vm/vm_object.h> |
117 | #include <kern/queue.h> |
118 | #include <kern/locks.h> |
119 | |
120 | #include <kern/macro_help.h> |
121 | #include <libkern/OSAtomic.h> |
122 | |
123 | |
124 | |
125 | #define VM_PAGE_COMPRESSOR_COUNT (compressor_object->resident_page_count) |
126 | |
127 | /* |
128 | * Management of resident (logical) pages. |
129 | * |
130 | * A small structure is kept for each resident |
131 | * page, indexed by page number. Each structure |
132 | * is an element of several lists: |
133 | * |
134 | * A hash table bucket used to quickly |
135 | * perform object/offset lookups |
136 | * |
137 | * A list of all pages for a given object, |
138 | * so they can be quickly deactivated at |
139 | * time of deallocation. |
140 | * |
141 | * An ordered list of pages due for pageout. |
142 | * |
143 | * In addition, the structure contains the object |
144 | * and offset to which this page belongs (for pageout), |
145 | * and sundry status bits. |
146 | * |
147 | * Fields in this structure are locked either by the lock on the |
148 | * object that the page belongs to (O) or by the lock on the page |
149 | * queues (P). [Some fields require that both locks be held to |
150 | * change that field; holding either lock is sufficient to read.] |
151 | */ |
152 | |
153 | #define VM_PAGE_NULL ((vm_page_t) 0) |
154 | |
155 | extern char vm_page_inactive_states[]; |
156 | extern char vm_page_pageable_states[]; |
157 | extern char vm_page_non_speculative_pageable_states[]; |
158 | extern char vm_page_active_or_inactive_states[]; |
159 | |
160 | |
161 | #define VM_PAGE_INACTIVE(m) (vm_page_inactive_states[m->vmp_q_state]) |
162 | #define VM_PAGE_PAGEABLE(m) (vm_page_pageable_states[m->vmp_q_state]) |
163 | #define VM_PAGE_NON_SPECULATIVE_PAGEABLE(m) (vm_page_non_speculative_pageable_states[m->vmp_q_state]) |
164 | #define VM_PAGE_ACTIVE_OR_INACTIVE(m) (vm_page_active_or_inactive_states[m->vmp_q_state]) |
165 | |
166 | |
167 | #define VM_PAGE_NOT_ON_Q 0 /* page is not present on any queue, nor is it wired... mainly a transient state */ |
168 | #define VM_PAGE_IS_WIRED 1 /* page is currently wired */ |
169 | #define VM_PAGE_USED_BY_COMPRESSOR 2 /* page is in use by the compressor to hold compressed data */ |
170 | #define VM_PAGE_ON_FREE_Q 3 /* page is on the main free queue */ |
171 | #define VM_PAGE_ON_FREE_LOCAL_Q 4 /* page is on one of the per-CPU free queues */ |
172 | #define VM_PAGE_ON_FREE_LOPAGE_Q 5 /* page is on the lopage pool free list */ |
173 | #define VM_PAGE_ON_THROTTLED_Q 6 /* page is on the throttled queue... we stash anonymous pages here when not paging */ |
174 | #define VM_PAGE_ON_PAGEOUT_Q 7 /* page is on one of the pageout queues (internal/external) awaiting processing */ |
175 | #define VM_PAGE_ON_SPECULATIVE_Q 8 /* page is on one of the speculative queues */ |
176 | #define VM_PAGE_ON_ACTIVE_LOCAL_Q 9 /* page has recently been created and is being held in one of the per-CPU local queues */ |
177 | #define VM_PAGE_ON_ACTIVE_Q 10 /* page is in global active queue */ |
178 | #define VM_PAGE_ON_INACTIVE_INTERNAL_Q 11 /* page is on the inactive internal queue a.k.a. anonymous queue */ |
179 | #define VM_PAGE_ON_INACTIVE_EXTERNAL_Q 12 /* page in on the inactive external queue a.k.a. file backed queue */ |
180 | #define VM_PAGE_ON_INACTIVE_CLEANED_Q 13 /* page has been cleaned to a backing file and is ready to be stolen */ |
181 | #define VM_PAGE_ON_SECLUDED_Q 14 /* page is on secluded queue */ |
182 | #define VM_PAGE_Q_STATE_LAST_VALID_VALUE 14 /* we currently use 4 bits for the state... don't let this go beyond 15 */ |
183 | |
184 | #define VM_PAGE_Q_STATE_ARRAY_SIZE (VM_PAGE_Q_STATE_LAST_VALID_VALUE+1) |
185 | |
186 | |
187 | /* |
188 | * The structure itself. See the block comment above for what (O) and (P) mean. |
189 | */ |
190 | #define vmp_pageq vmp_q_un.vmp_q_pageq |
191 | #define vmp_snext vmp_q_un.vmp_q_snext |
192 | |
193 | struct vm_page { |
194 | union { |
195 | vm_page_queue_chain_t vmp_q_pageq; /* queue info for FIFO queue or free list (P) */ |
196 | struct vm_page *vmp_q_snext; |
197 | } vmp_q_un; |
198 | |
199 | vm_page_queue_chain_t vmp_listq; /* all pages in same object (O) */ |
200 | |
201 | #if CONFIG_BACKGROUND_QUEUE |
202 | vm_page_queue_chain_t vmp_backgroundq; /* anonymous pages in the background pool (P) */ |
203 | #endif |
204 | |
205 | vm_object_offset_t vmp_offset; /* offset into that object (O,P) */ |
206 | vm_page_object_t vmp_object; /* which object am I in (O&P) */ |
207 | |
208 | /* |
209 | * The following word of flags is always protected by the "page queues" lock. |
210 | * |
211 | * We use 'vmp_wire_count' to store the local queue id if local queues are enabled. |
212 | * See the comments at 'vm_page_queues_remove' as to why this is safe to do. |
213 | */ |
214 | #define vmp_local_id vmp_wire_count |
215 | unsigned int vmp_wire_count:16, /* how many wired down maps use me? (O&P) */ |
216 | vmp_q_state:4, /* which q is the page on (P) */ |
217 | vmp_in_background:1, |
218 | vmp_on_backgroundq:1, |
219 | vmp_gobbled:1, /* page used internally (P) */ |
220 | vmp_laundry:1, /* page is being cleaned now (P)*/ |
221 | vmp_no_cache:1, /* page is not to be cached and should */ |
222 | /* be reused ahead of other pages (P) */ |
223 | vmp_private:1, /* Page should not be returned to the free list (P) */ |
224 | vmp_reference:1, /* page has been used (P) */ |
225 | vmp_unused_page_bits:5; |
226 | |
227 | /* |
228 | * MUST keep the 2 32 bit words used as bit fields |
229 | * separated since the compiler has a nasty habit |
230 | * of using 64 bit loads and stores on them as |
231 | * if they were a single 64 bit field... since |
232 | * they are protected by 2 different locks, this |
233 | * is a real problem |
234 | */ |
235 | vm_page_packed_t vmp_next_m; /* VP bucket link (O) */ |
236 | |
237 | /* |
238 | * The following word of flags is protected by the "VM object" lock. |
239 | * |
240 | * IMPORTANT: the "vmp_pmapped", "vmp_xpmapped" and "vmp_clustered" bits can be modified while holding the |
241 | * VM object "shared" lock + the page lock provided through the pmap_lock_phys_page function. |
242 | * This is done in vm_fault_enter() and the CONSUME_CLUSTERED macro. |
243 | * It's also ok to modify them behind just the VM object "exclusive" lock. |
244 | */ |
245 | unsigned int vmp_busy:1, /* page is in transit (O) */ |
246 | vmp_wanted:1, /* someone is waiting for page (O) */ |
247 | vmp_tabled:1, /* page is in VP table (O) */ |
248 | vmp_hashed:1, /* page is in vm_page_buckets[] (O) + the bucket lock */ |
249 | vmp_fictitious:1, /* Physical page doesn't exist (O) */ |
250 | vmp_clustered:1, /* page is not the faulted page (O) or (O-shared AND pmap_page) */ |
251 | vmp_pmapped:1, /* page has at some time been entered into a pmap (O) or */ |
252 | /* (O-shared AND pmap_page) */ |
253 | vmp_xpmapped:1, /* page has been entered with execute permission (O) or */ |
254 | /* (O-shared AND pmap_page) */ |
255 | vmp_wpmapped:1, /* page has been entered at some point into a pmap for write (O) */ |
256 | vmp_free_when_done:1, /* page is to be freed once cleaning is completed (O) */ |
257 | vmp_absent:1, /* Data has been requested, but is not yet available (O) */ |
258 | vmp_error:1, /* Data manager was unable to provide data due to error (O) */ |
259 | vmp_dirty:1, /* Page must be cleaned (O) */ |
260 | vmp_cleaning:1, /* Page clean has begun (O) */ |
261 | vmp_precious:1, /* Page is precious; data must be returned even if clean (O) */ |
262 | vmp_overwriting:1, /* Request to unlock has been made without having data. (O) */ |
263 | /* [See vm_fault_page_overwrite] */ |
264 | vmp_restart:1, /* Page was pushed higher in shadow chain by copy_call-related pagers */ |
265 | /* start again at top of chain */ |
266 | vmp_unusual:1, /* Page is absent, error, restart or page locked */ |
267 | vmp_cs_validated:1, /* code-signing: page was checked */ |
268 | vmp_cs_tainted:1, /* code-signing: page is tainted */ |
269 | vmp_cs_nx:1, /* code-signing: page is nx */ |
270 | vmp_reusable:1, |
271 | vmp_lopage:1, |
272 | vmp_written_by_kernel:1, /* page was written by kernel (i.e. decompressed) */ |
273 | vmp_unused_object_bits:8; |
274 | |
275 | #if !defined(__arm__) && !defined(__arm64__) |
276 | ppnum_t vmp_phys_page; /* Physical page number of the page */ |
277 | #endif |
278 | }; |
279 | |
280 | |
281 | typedef struct vm_page *vm_page_t; |
282 | extern vm_page_t vm_pages; |
283 | extern vm_page_t vm_page_array_beginning_addr; |
284 | extern vm_page_t vm_page_array_ending_addr; |
285 | |
286 | |
287 | #if defined(__arm__) || defined(__arm64__) |
288 | |
289 | extern unsigned int vm_first_phys_ppnum; |
290 | |
291 | struct vm_page_with_ppnum { |
292 | struct vm_page vm_page_wo_ppnum; |
293 | |
294 | ppnum_t vmp_phys_page; |
295 | }; |
296 | typedef struct vm_page_with_ppnum *vm_page_with_ppnum_t; |
297 | |
298 | |
299 | static inline ppnum_t VM_PAGE_GET_PHYS_PAGE(vm_page_t m) |
300 | { |
301 | if (m >= vm_page_array_beginning_addr && m < vm_page_array_ending_addr) |
302 | return ((ppnum_t)((uintptr_t)(m - vm_page_array_beginning_addr) + vm_first_phys_ppnum)); |
303 | else |
304 | return (((vm_page_with_ppnum_t)m)->vmp_phys_page); |
305 | } |
306 | |
307 | #define VM_PAGE_SET_PHYS_PAGE(m, ppnum) \ |
308 | MACRO_BEGIN \ |
309 | if ((m) < vm_page_array_beginning_addr || (m) >= vm_page_array_ending_addr) \ |
310 | ((vm_page_with_ppnum_t)(m))->vmp_phys_page = ppnum; \ |
311 | assert(ppnum == VM_PAGE_GET_PHYS_PAGE(m)); \ |
312 | MACRO_END |
313 | |
314 | #define VM_PAGE_GET_COLOR(m) (VM_PAGE_GET_PHYS_PAGE(m) & vm_color_mask) |
315 | |
316 | #else /* defined(__arm__) || defined(__arm64__) */ |
317 | |
318 | |
319 | struct vm_page_with_ppnum { |
320 | struct vm_page vm_page_with_ppnum; |
321 | }; |
322 | typedef struct vm_page_with_ppnum *vm_page_with_ppnum_t; |
323 | |
324 | |
325 | #define VM_PAGE_GET_PHYS_PAGE(page) (page)->vmp_phys_page |
326 | #define VM_PAGE_SET_PHYS_PAGE(page, ppnum) \ |
327 | MACRO_BEGIN \ |
328 | (page)->vmp_phys_page = ppnum; \ |
329 | MACRO_END |
330 | |
331 | #define VM_PAGE_GET_CLUMP(m) ((VM_PAGE_GET_PHYS_PAGE(m)) >> vm_clump_shift) |
332 | #define VM_PAGE_GET_COLOR(m) ((VM_PAGE_GET_CLUMP(m)) & vm_color_mask) |
333 | |
334 | #endif /* defined(__arm__) || defined(__arm64__) */ |
335 | |
336 | |
337 | |
338 | #if defined(__LP64__) |
339 | |
340 | #define VM_VPLQ_ALIGNMENT 128 |
341 | #define VM_PACKED_POINTER_ALIGNMENT 64 /* must be a power of 2 */ |
342 | #define VM_PACKED_POINTER_SHIFT 6 |
343 | |
344 | #define VM_PACKED_FROM_VM_PAGES_ARRAY 0x80000000 |
345 | |
346 | static inline vm_page_packed_t vm_page_pack_ptr(uintptr_t p) |
347 | { |
348 | vm_page_packed_t packed_ptr; |
349 | |
350 | if (!p) |
351 | return ((vm_page_packed_t)0); |
352 | |
353 | if (p >= (uintptr_t)(vm_page_array_beginning_addr) && p < (uintptr_t)(vm_page_array_ending_addr)) { |
354 | packed_ptr = ((vm_page_packed_t)(((vm_page_t)p - vm_page_array_beginning_addr))); |
355 | assert(! (packed_ptr & VM_PACKED_FROM_VM_PAGES_ARRAY)); |
356 | packed_ptr |= VM_PACKED_FROM_VM_PAGES_ARRAY; |
357 | return packed_ptr; |
358 | } |
359 | |
360 | assert((p & (VM_PACKED_POINTER_ALIGNMENT - 1)) == 0); |
361 | |
362 | packed_ptr = ((vm_page_packed_t)(((uintptr_t)(p - (uintptr_t) VM_MIN_KERNEL_AND_KEXT_ADDRESS)) >> VM_PACKED_POINTER_SHIFT)); |
363 | assert(packed_ptr != 0); |
364 | assert(! (packed_ptr & VM_PACKED_FROM_VM_PAGES_ARRAY)); |
365 | return packed_ptr; |
366 | } |
367 | |
368 | |
369 | static inline uintptr_t vm_page_unpack_ptr(uintptr_t p) |
370 | { |
371 | if (!p) |
372 | return ((uintptr_t)0); |
373 | |
374 | if (p & VM_PACKED_FROM_VM_PAGES_ARRAY) |
375 | return ((uintptr_t)(&vm_pages[(uint32_t)(p & ~VM_PACKED_FROM_VM_PAGES_ARRAY)])); |
376 | return (((p << VM_PACKED_POINTER_SHIFT) + (uintptr_t) VM_MIN_KERNEL_AND_KEXT_ADDRESS)); |
377 | } |
378 | |
379 | |
380 | #define VM_PAGE_PACK_PTR(p) vm_page_pack_ptr((uintptr_t)(p)) |
381 | #define VM_PAGE_UNPACK_PTR(p) vm_page_unpack_ptr((uintptr_t)(p)) |
382 | |
383 | #define VM_PAGE_OBJECT(p) ((vm_object_t)(VM_PAGE_UNPACK_PTR(p->vmp_object))) |
384 | #define VM_PAGE_PACK_OBJECT(o) ((vm_page_object_t)(VM_PAGE_PACK_PTR(o))) |
385 | |
386 | |
387 | #define VM_PAGE_ZERO_PAGEQ_ENTRY(p) \ |
388 | MACRO_BEGIN \ |
389 | (p)->vmp_snext = 0; \ |
390 | MACRO_END |
391 | |
392 | |
393 | #define VM_PAGE_CONVERT_TO_QUEUE_ENTRY(p) VM_PAGE_PACK_PTR(p) |
394 | |
395 | |
396 | static __inline__ void |
397 | vm_page_enqueue_tail( |
398 | vm_page_queue_t que, |
399 | vm_page_queue_entry_t elt) |
400 | { |
401 | vm_page_queue_entry_t old_tail; |
402 | |
403 | old_tail = (vm_page_queue_entry_t)VM_PAGE_UNPACK_PTR(que->prev); |
404 | elt->next = VM_PAGE_PACK_PTR(que); |
405 | elt->prev = que->prev; |
406 | old_tail->next = VM_PAGE_PACK_PTR(elt); |
407 | que->prev = VM_PAGE_PACK_PTR(elt); |
408 | } |
409 | |
410 | |
411 | static __inline__ void |
412 | vm_page_remque( |
413 | vm_page_queue_entry_t elt) |
414 | { |
415 | vm_page_queue_entry_t next_elt, prev_elt; |
416 | |
417 | next_elt = (vm_page_queue_entry_t)VM_PAGE_UNPACK_PTR(elt->next); |
418 | |
419 | /* next_elt may equal prev_elt (and the queue head) if elt was the only element */ |
420 | prev_elt = (vm_page_queue_entry_t)VM_PAGE_UNPACK_PTR(elt->prev); |
421 | |
422 | next_elt->prev = VM_PAGE_PACK_PTR(prev_elt); |
423 | prev_elt->next = VM_PAGE_PACK_PTR(next_elt); |
424 | |
425 | elt->next = 0; |
426 | elt->prev = 0; |
427 | } |
428 | |
429 | |
430 | /* |
431 | * Macro: vm_page_queue_init |
432 | * Function: |
433 | * Initialize the given queue. |
434 | * Header: |
435 | * void vm_page_queue_init(q) |
436 | * vm_page_queue_t q; \* MODIFIED *\ |
437 | */ |
438 | #define vm_page_queue_init(q) \ |
439 | MACRO_BEGIN \ |
440 | assert((((uintptr_t)q) & (VM_PACKED_POINTER_ALIGNMENT-1)) == 0); \ |
441 | assert((VM_PAGE_UNPACK_PTR(VM_PAGE_PACK_PTR((uintptr_t)q))) == (uintptr_t)q); \ |
442 | (q)->next = VM_PAGE_PACK_PTR(q); \ |
443 | (q)->prev = VM_PAGE_PACK_PTR(q); \ |
444 | MACRO_END |
445 | |
446 | |
447 | /* |
448 | * Macro: vm_page_queue_enter |
449 | * Function: |
450 | * Insert a new element at the tail of the queue. |
451 | * Header: |
452 | * void vm_page_queue_enter(q, elt, type, field) |
453 | * queue_t q; |
454 | * <type> elt; |
455 | * <type> is what's in our queue |
456 | * <field> is the chain field in (*<type>) |
457 | * Note: |
458 | * This should only be used with Method 2 queue iteration (element chains) |
459 | */ |
460 | #define vm_page_queue_enter(head, elt, type, field) \ |
461 | MACRO_BEGIN \ |
462 | vm_page_queue_entry_t __prev; \ |
463 | \ |
464 | __prev = ((vm_page_queue_entry_t)VM_PAGE_UNPACK_PTR((head)->prev)); \ |
465 | if ((head) == __prev) { \ |
466 | (head)->next = VM_PAGE_PACK_PTR(elt); \ |
467 | } \ |
468 | else { \ |
469 | ((type)(void *)__prev)->field.next = VM_PAGE_PACK_PTR(elt); \ |
470 | } \ |
471 | (elt)->field.prev = VM_PAGE_PACK_PTR(__prev); \ |
472 | (elt)->field.next = VM_PAGE_PACK_PTR(head); \ |
473 | (head)->prev = VM_PAGE_PACK_PTR(elt); \ |
474 | MACRO_END |
475 | |
476 | |
477 | /* |
478 | * These are helper macros for vm_page_queue_enter_clump to assist |
479 | * with conditional compilation (release / debug / development) |
480 | */ |
481 | #if DEVELOPMENT || DEBUG |
482 | |
483 | #define __DEBUG_CHECK_BUDDIES(__check, __prev, __p, field) \ |
484 | MACRO_BEGIN \ |
485 | if(__check) { /* if first forward buddy.. */ \ |
486 | if(__prev) { /* ..and if a backward buddy was found, verify link consistency */ \ |
487 | assert(__p == (vm_page_t) VM_PAGE_UNPACK_PTR(__prev->next)); \ |
488 | assert(__prev == (vm_page_queue_entry_t) VM_PAGE_UNPACK_PTR(__p->field.prev)); \ |
489 | } \ |
490 | __check=0; \ |
491 | } \ |
492 | MACRO_END |
493 | |
494 | #define __DEBUG_VERIFY_LINKS(__i, __first, __n_free, __last_next) \ |
495 | MACRO_BEGIN \ |
496 | vm_page_queue_entry_t __tmp; \ |
497 | for(__i=0, __tmp=__first; __i<__n_free; __i++) \ |
498 | __tmp=(vm_page_queue_entry_t) VM_PAGE_UNPACK_PTR(__tmp->next); \ |
499 | assert(__tmp == __last_next); \ |
500 | MACRO_END |
501 | |
502 | #define __DEBUG_STAT_INCREMENT_INRANGE vm_clump_inrange++ |
503 | #define __DEBUG_STAT_INCREMENT_INSERTS vm_clump_inserts++ |
504 | #define __DEBUG_STAT_INCREMENT_PROMOTES(__n_free) vm_clump_promotes+=__n_free |
505 | |
506 | #else |
507 | |
508 | #define __DEBUG_CHECK_BUDDIES(__check, __prev, __p, field) __check=1 |
509 | #define __DEBUG_VERIFY_LINKS(__i, __first, __n_free, __last_next) |
510 | #define __DEBUG_STAT_INCREMENT_INRANGE |
511 | #define __DEBUG_STAT_INCREMENT_INSERTS |
512 | #define __DEBUG_STAT_INCREMENT_PROMOTES(__n_free) |
513 | |
514 | #endif /* if DEVELOPMENT || DEBUG */ |
515 | |
516 | /* |
517 | * Macro: vm_page_queue_enter_clump |
518 | * Function: |
519 | * Insert a new element into the free queue and clump pages within the same 16K boundary together |
520 | * |
521 | * Header: |
522 | * void vm_page_queue_enter_clump(q, elt, type, field) |
523 | * queue_t q; |
524 | * <type> elt; |
525 | * <type> is what's in our queue |
526 | * <field> is the chain field in (*<type>) |
527 | * Note: |
528 | * This should only be used with Method 2 queue iteration (element chains) |
529 | */ |
530 | #if defined(__x86_64__) |
531 | #define vm_page_queue_enter_clump(head, elt, type, field) \ |
532 | MACRO_BEGIN \ |
533 | ppnum_t __clump_num; \ |
534 | unsigned int __i, __n, __n_free=1, __check=1; \ |
535 | vm_page_queue_entry_t __prev=0, __next, __last, __last_next, __first, __first_prev, __head_next; \ |
536 | vm_page_t __p; \ |
537 | \ |
538 | /* if elt is part of vm_pages[] */ \ |
539 | if((elt) >= vm_page_array_beginning_addr && (elt) < vm_page_array_boundary) { \ |
540 | __first = __last = (vm_page_queue_entry_t) (elt); \ |
541 | __clump_num = VM_PAGE_GET_CLUMP(elt); \ |
542 | __n = VM_PAGE_GET_PHYS_PAGE(elt) & vm_clump_mask; \ |
543 | /* scan backward looking for a buddy page */ \ |
544 | for(__i=0, __p=(elt)-1; __i<__n && __p>=vm_page_array_beginning_addr; __i++, __p--) { \ |
545 | if(__p->vmp_q_state == VM_PAGE_ON_FREE_Q && __clump_num == VM_PAGE_GET_CLUMP(__p)) { \ |
546 | if(__prev == 0) __prev = (vm_page_queue_entry_t) __p; \ |
547 | __first = (vm_page_queue_entry_t) __p; \ |
548 | __n_free++; \ |
549 | } \ |
550 | } \ |
551 | /* scan forward looking for a buddy page */ \ |
552 | for(__i=__n+1, __p=(elt)+1; __i<vm_clump_size && __p<vm_page_array_boundary; __i++, __p++) { \ |
553 | if(__p->vmp_q_state == VM_PAGE_ON_FREE_Q && __clump_num == VM_PAGE_GET_CLUMP(__p)) { \ |
554 | __DEBUG_CHECK_BUDDIES(__check, __prev, __p, field); \ |
555 | if(__prev == 0) __prev = (vm_page_queue_entry_t) VM_PAGE_UNPACK_PTR(__p->field.prev); \ |
556 | __last = (vm_page_queue_entry_t) __p; \ |
557 | __n_free++; \ |
558 | } \ |
559 | } \ |
560 | __DEBUG_STAT_INCREMENT_INRANGE; \ |
561 | } \ |
562 | /* if elt is not part of vm_pages or if 1st page in clump, insert at tail */ \ |
563 | if(__prev == 0) __prev = (vm_page_queue_entry_t) VM_PAGE_UNPACK_PTR((head)->prev); \ |
564 | \ |
565 | /* insert the element */ \ |
566 | __next = (vm_page_queue_entry_t) VM_PAGE_UNPACK_PTR(__prev->next); \ |
567 | (elt)->field.next = __prev->next; \ |
568 | (elt)->field.prev = __next->prev; \ |
569 | __prev->next = __next->prev = VM_PAGE_PACK_PTR(elt); \ |
570 | __DEBUG_STAT_INCREMENT_INSERTS; \ |
571 | \ |
572 | /* check if clump needs to be promoted to head */ \ |
573 | if(__n_free >= vm_clump_promote_threshold && __n_free > 1) { \ |
574 | __first_prev = (vm_page_queue_entry_t) VM_PAGE_UNPACK_PTR(__first->prev); \ |
575 | if(__first_prev != (head)) { /* if not at head already */ \ |
576 | __last_next = (vm_page_queue_entry_t) VM_PAGE_UNPACK_PTR(__last->next); \ |
577 | /* verify that the links within the clump are consistent */ \ |
578 | __DEBUG_VERIFY_LINKS(__i, __first, __n_free, __last_next); \ |
579 | /* promote clump to head */ \ |
580 | __first_prev->next = __last->next; \ |
581 | __last_next->prev = __first->prev; \ |
582 | __first->prev = VM_PAGE_PACK_PTR(head); \ |
583 | __last->next = (head)->next; \ |
584 | __head_next = (vm_page_queue_entry_t) VM_PAGE_UNPACK_PTR((head)->next); \ |
585 | __head_next->prev = VM_PAGE_PACK_PTR(__last); \ |
586 | (head)->next = VM_PAGE_PACK_PTR(__first); \ |
587 | __DEBUG_STAT_INCREMENT_PROMOTES(__n_free); \ |
588 | } \ |
589 | } \ |
590 | MACRO_END |
591 | #endif |
592 | |
593 | /* |
594 | * Macro: vm_page_queue_enter_first |
595 | * Function: |
596 | * Insert a new element at the head of the queue. |
597 | * Header: |
598 | * void queue_enter_first(q, elt, type, field) |
599 | * queue_t q; |
600 | * <type> elt; |
601 | * <type> is what's in our queue |
602 | * <field> is the chain field in (*<type>) |
603 | * Note: |
604 | * This should only be used with Method 2 queue iteration (element chains) |
605 | */ |
606 | #define vm_page_queue_enter_first(head, elt, type, field) \ |
607 | MACRO_BEGIN \ |
608 | vm_page_queue_entry_t __next; \ |
609 | \ |
610 | __next = ((vm_page_queue_entry_t)VM_PAGE_UNPACK_PTR((head)->next)); \ |
611 | if ((head) == __next) { \ |
612 | (head)->prev = VM_PAGE_PACK_PTR(elt); \ |
613 | } \ |
614 | else { \ |
615 | ((type)(void *)__next)->field.prev = VM_PAGE_PACK_PTR(elt); \ |
616 | } \ |
617 | (elt)->field.next = VM_PAGE_PACK_PTR(__next); \ |
618 | (elt)->field.prev = VM_PAGE_PACK_PTR(head); \ |
619 | (head)->next = VM_PAGE_PACK_PTR(elt); \ |
620 | MACRO_END |
621 | |
622 | |
623 | /* |
624 | * Macro: vm_page_queue_remove |
625 | * Function: |
626 | * Remove an arbitrary item from the queue. |
627 | * Header: |
628 | * void vm_page_queue_remove(q, qe, type, field) |
629 | * arguments as in vm_page_queue_enter |
630 | * Note: |
631 | * This should only be used with Method 2 queue iteration (element chains) |
632 | */ |
633 | #define vm_page_queue_remove(head, elt, type, field) \ |
634 | MACRO_BEGIN \ |
635 | vm_page_queue_entry_t __next, __prev; \ |
636 | \ |
637 | __next = ((vm_page_queue_entry_t)VM_PAGE_UNPACK_PTR((elt)->field.next)); \ |
638 | __prev = ((vm_page_queue_entry_t)VM_PAGE_UNPACK_PTR((elt)->field.prev)); \ |
639 | \ |
640 | if ((head) == __next) \ |
641 | (head)->prev = VM_PAGE_PACK_PTR(__prev); \ |
642 | else \ |
643 | ((type)(void *)__next)->field.prev = VM_PAGE_PACK_PTR(__prev); \ |
644 | \ |
645 | if ((head) == __prev) \ |
646 | (head)->next = VM_PAGE_PACK_PTR(__next); \ |
647 | else \ |
648 | ((type)(void *)__prev)->field.next = VM_PAGE_PACK_PTR(__next); \ |
649 | \ |
650 | (elt)->field.next = 0; \ |
651 | (elt)->field.prev = 0; \ |
652 | MACRO_END |
653 | |
654 | |
655 | /* |
656 | * Macro: vm_page_queue_remove_first |
657 | * Function: |
658 | * Remove and return the entry at the head of |
659 | * the queue. |
660 | * Header: |
661 | * vm_page_queue_remove_first(head, entry, type, field) |
662 | * entry is returned by reference |
663 | * Note: |
664 | * This should only be used with Method 2 queue iteration (element chains) |
665 | */ |
666 | #define vm_page_queue_remove_first(head, entry, type, field) \ |
667 | MACRO_BEGIN \ |
668 | vm_page_queue_entry_t __next; \ |
669 | \ |
670 | (entry) = (type)(void *) VM_PAGE_UNPACK_PTR(((head)->next)); \ |
671 | __next = ((vm_page_queue_entry_t)VM_PAGE_UNPACK_PTR((entry)->field.next)); \ |
672 | \ |
673 | if ((head) == __next) \ |
674 | (head)->prev = VM_PAGE_PACK_PTR(head); \ |
675 | else \ |
676 | ((type)(void *)(__next))->field.prev = VM_PAGE_PACK_PTR(head); \ |
677 | (head)->next = VM_PAGE_PACK_PTR(__next); \ |
678 | \ |
679 | (entry)->field.next = 0; \ |
680 | (entry)->field.prev = 0; \ |
681 | MACRO_END |
682 | |
683 | |
684 | /* |
685 | * Macro: vm_page_queue_remove_first_with_clump |
686 | * Function: |
687 | * Remove and return the entry at the head of the free queue |
688 | * end is set to 1 to indicate that we just returned the last page in a clump |
689 | * |
690 | * Header: |
691 | * vm_page_queue_remove_first_with_clump(head, entry, type, field, end) |
692 | * entry is returned by reference |
693 | * end is returned by reference |
694 | * Note: |
695 | * This should only be used with Method 2 queue iteration (element chains) |
696 | */ |
697 | #if defined(__x86_64__) |
698 | #define vm_page_queue_remove_first_with_clump(head, entry, type, field, end) \ |
699 | MACRO_BEGIN \ |
700 | vm_page_queue_entry_t __next; \ |
701 | \ |
702 | (entry) = (type)(void *) VM_PAGE_UNPACK_PTR(((head)->next)); \ |
703 | __next = ((vm_page_queue_entry_t)VM_PAGE_UNPACK_PTR((entry)->field.next)); \ |
704 | \ |
705 | (end)=0; \ |
706 | if ((head) == __next) { \ |
707 | (head)->prev = VM_PAGE_PACK_PTR(head); \ |
708 | (end)=1; \ |
709 | } \ |
710 | else { \ |
711 | ((type)(void *)(__next))->field.prev = VM_PAGE_PACK_PTR(head); \ |
712 | if(VM_PAGE_GET_CLUMP(entry) != VM_PAGE_GET_CLUMP(((type)(void *)(__next)))) (end)=1; \ |
713 | } \ |
714 | (head)->next = VM_PAGE_PACK_PTR(__next); \ |
715 | \ |
716 | (entry)->field.next = 0; \ |
717 | (entry)->field.prev = 0; \ |
718 | \ |
719 | MACRO_END |
720 | #endif |
721 | |
722 | /* |
723 | * Macro: vm_page_queue_end |
724 | * Function: |
725 | * Tests whether a new entry is really the end of |
726 | * the queue. |
727 | * Header: |
728 | * boolean_t vm_page_queue_end(q, qe) |
729 | * vm_page_queue_t q; |
730 | * vm_page_queue_entry_t qe; |
731 | */ |
732 | #define vm_page_queue_end(q, qe) ((q) == (qe)) |
733 | |
734 | |
735 | /* |
736 | * Macro: vm_page_queue_empty |
737 | * Function: |
738 | * Tests whether a queue is empty. |
739 | * Header: |
740 | * boolean_t vm_page_queue_empty(q) |
741 | * vm_page_queue_t q; |
742 | */ |
743 | #define vm_page_queue_empty(q) vm_page_queue_end((q), ((vm_page_queue_entry_t)vm_page_queue_first(q))) |
744 | |
745 | |
746 | |
747 | /* |
748 | * Macro: vm_page_queue_first |
749 | * Function: |
750 | * Returns the first entry in the queue, |
751 | * Header: |
752 | * uintpr_t vm_page_queue_first(q) |
753 | * vm_page_queue_t q; \* IN *\ |
754 | */ |
755 | #define vm_page_queue_first(q) (VM_PAGE_UNPACK_PTR((q)->next)) |
756 | |
757 | |
758 | |
759 | /* |
760 | * Macro: vm_page_queue_last |
761 | * Function: |
762 | * Returns the last entry in the queue. |
763 | * Header: |
764 | * vm_page_queue_entry_t queue_last(q) |
765 | * queue_t q; \* IN *\ |
766 | */ |
767 | #define vm_page_queue_last(q) (VM_PAGE_UNPACK_PTR((q)->prev)) |
768 | |
769 | |
770 | |
771 | /* |
772 | * Macro: vm_page_queue_next |
773 | * Function: |
774 | * Returns the entry after an item in the queue. |
775 | * Header: |
776 | * uintpr_t vm_page_queue_next(qc) |
777 | * vm_page_queue_t qc; |
778 | */ |
779 | #define vm_page_queue_next(qc) (VM_PAGE_UNPACK_PTR((qc)->next)) |
780 | |
781 | |
782 | |
783 | /* |
784 | * Macro: vm_page_queue_prev |
785 | * Function: |
786 | * Returns the entry before an item in the queue. |
787 | * Header: |
788 | * uinptr_t vm_page_queue_prev(qc) |
789 | * vm_page_queue_t qc; |
790 | */ |
791 | #define vm_page_queue_prev(qc) (VM_PAGE_UNPACK_PTR((qc)->prev)) |
792 | |
793 | |
794 | |
795 | /* |
796 | * Macro: vm_page_queue_iterate |
797 | * Function: |
798 | * iterate over each item in the queue. |
799 | * Generates a 'for' loop, setting elt to |
800 | * each item in turn (by reference). |
801 | * Header: |
802 | * vm_page_queue_iterate(q, elt, type, field) |
803 | * queue_t q; |
804 | * <type> elt; |
805 | * <type> is what's in our queue |
806 | * <field> is the chain field in (*<type>) |
807 | * Note: |
808 | * This should only be used with Method 2 queue iteration (element chains) |
809 | */ |
810 | #define vm_page_queue_iterate(head, elt, type, field) \ |
811 | for ((elt) = (type)(void *) vm_page_queue_first(head); \ |
812 | !vm_page_queue_end((head), (vm_page_queue_entry_t)(elt)); \ |
813 | (elt) = (type)(void *) vm_page_queue_next(&(elt)->field)) |
814 | |
815 | #else |
816 | |
817 | #define VM_VPLQ_ALIGNMENT 128 |
818 | #define VM_PACKED_POINTER_ALIGNMENT 4 |
819 | #define VM_PACKED_POINTER_SHIFT 0 |
820 | |
821 | #define VM_PACKED_FROM_VM_PAGES_ARRAY 0 |
822 | |
823 | #define VM_PAGE_PACK_PTR(p) (p) |
824 | #define VM_PAGE_UNPACK_PTR(p) ((uintptr_t)(p)) |
825 | |
826 | #define VM_PAGE_OBJECT(p) (vm_object_t)(p->vmp_object) |
827 | #define VM_PAGE_PACK_OBJECT(o) ((vm_page_object_t)(VM_PAGE_PACK_PTR(o))) |
828 | |
829 | |
830 | #define VM_PAGE_ZERO_PAGEQ_ENTRY(p) \ |
831 | MACRO_BEGIN \ |
832 | (p)->vmp_pageq.next = 0; \ |
833 | (p)->vmp_pageq.prev = 0; \ |
834 | MACRO_END |
835 | |
836 | #define VM_PAGE_CONVERT_TO_QUEUE_ENTRY(p) ((queue_entry_t)(p)) |
837 | |
838 | #define vm_page_remque remque |
839 | #define vm_page_enqueue_tail enqueue_tail |
840 | #define vm_page_queue_init queue_init |
841 | #define vm_page_queue_enter queue_enter |
842 | #define vm_page_queue_enter_first queue_enter_first |
843 | #define vm_page_queue_remove queue_remove |
844 | #define vm_page_queue_remove_first queue_remove_first |
845 | #define vm_page_queue_end queue_end |
846 | #define vm_page_queue_empty queue_empty |
847 | #define vm_page_queue_first queue_first |
848 | #define vm_page_queue_last queue_last |
849 | #define vm_page_queue_next queue_next |
850 | #define vm_page_queue_prev queue_prev |
851 | #define vm_page_queue_iterate queue_iterate |
852 | |
853 | #endif |
854 | |
855 | |
856 | |
857 | /* |
858 | * VM_PAGE_MIN_SPECULATIVE_AGE_Q through VM_PAGE_MAX_SPECULATIVE_AGE_Q |
859 | * represents a set of aging bins that are 'protected'... |
860 | * |
861 | * VM_PAGE_SPECULATIVE_AGED_Q is a list of the speculative pages that have |
862 | * not yet been 'claimed' but have been aged out of the protective bins |
863 | * this occurs in vm_page_speculate when it advances to the next bin |
864 | * and discovers that it is still occupied... at that point, all of the |
865 | * pages in that bin are moved to the VM_PAGE_SPECULATIVE_AGED_Q. the pages |
866 | * in that bin are all guaranteed to have reached at least the maximum age |
867 | * we allow for a protected page... they can be older if there is no |
868 | * memory pressure to pull them from the bin, or there are no new speculative pages |
869 | * being generated to push them out. |
870 | * this list is the one that vm_pageout_scan will prefer when looking |
871 | * for pages to move to the underweight free list |
872 | * |
873 | * VM_PAGE_MAX_SPECULATIVE_AGE_Q * VM_PAGE_SPECULATIVE_Q_AGE_MS |
874 | * defines the amount of time a speculative page is normally |
875 | * allowed to live in the 'protected' state (i.e. not available |
876 | * to be stolen if vm_pageout_scan is running and looking for |
877 | * pages)... however, if the total number of speculative pages |
878 | * in the protected state exceeds our limit (defined in vm_pageout.c) |
879 | * and there are none available in VM_PAGE_SPECULATIVE_AGED_Q, then |
880 | * vm_pageout_scan is allowed to steal pages from the protected |
881 | * bucket even if they are underage. |
882 | * |
883 | * vm_pageout_scan is also allowed to pull pages from a protected |
884 | * bin if the bin has reached the "age of consent" we've set |
885 | */ |
886 | #define VM_PAGE_MAX_SPECULATIVE_AGE_Q 10 |
887 | #define VM_PAGE_MIN_SPECULATIVE_AGE_Q 1 |
888 | #define VM_PAGE_SPECULATIVE_AGED_Q 0 |
889 | |
890 | #define VM_PAGE_SPECULATIVE_Q_AGE_MS 500 |
891 | |
892 | struct vm_speculative_age_q { |
893 | /* |
894 | * memory queue for speculative pages via clustered pageins |
895 | */ |
896 | vm_page_queue_head_t age_q; |
897 | mach_timespec_t age_ts; |
898 | } __attribute__((aligned(VM_PACKED_POINTER_ALIGNMENT))); |
899 | |
900 | |
901 | |
902 | extern |
903 | struct vm_speculative_age_q vm_page_queue_speculative[]; |
904 | |
905 | extern int speculative_steal_index; |
906 | extern int speculative_age_index; |
907 | extern unsigned int vm_page_speculative_q_age_ms; |
908 | |
909 | |
910 | typedef struct vm_locks_array { |
911 | char pad __attribute__ ((aligned (64))); |
912 | lck_mtx_t vm_page_queue_lock2 __attribute__ ((aligned (64))); |
913 | lck_mtx_t vm_page_queue_free_lock2 __attribute__ ((aligned (64))); |
914 | char pad2 __attribute__ ((aligned (64))); |
915 | } vm_locks_array_t; |
916 | |
917 | |
918 | #if CONFIG_BACKGROUND_QUEUE |
919 | extern void vm_page_assign_background_state(vm_page_t mem); |
920 | extern void vm_page_update_background_state(vm_page_t mem); |
921 | extern void vm_page_add_to_backgroundq(vm_page_t mem, boolean_t first); |
922 | extern void vm_page_remove_from_backgroundq(vm_page_t mem); |
923 | #endif |
924 | |
925 | #define VM_PAGE_WIRED(m) ((m)->vmp_q_state == VM_PAGE_IS_WIRED) |
926 | #define NEXT_PAGE(m) ((m)->vmp_snext) |
927 | #define NEXT_PAGE_PTR(m) (&(m)->vmp_snext) |
928 | |
929 | /* |
930 | * XXX The unusual bit should not be necessary. Most of the bit |
931 | * XXX fields above really want to be masks. |
932 | */ |
933 | |
934 | /* |
935 | * For debugging, this macro can be defined to perform |
936 | * some useful check on a page structure. |
937 | * INTENTIONALLY left as a no-op so that the |
938 | * current call-sites can be left intact for future uses. |
939 | */ |
940 | |
941 | #define VM_PAGE_CHECK(mem) \ |
942 | MACRO_BEGIN \ |
943 | MACRO_END |
944 | |
945 | /* Page coloring: |
946 | * |
947 | * The free page list is actually n lists, one per color, |
948 | * where the number of colors is a function of the machine's |
949 | * cache geometry set at system initialization. To disable |
950 | * coloring, set vm_colors to 1 and vm_color_mask to 0. |
951 | * The boot-arg "colors" may be used to override vm_colors. |
952 | * Note that there is little harm in having more colors than needed. |
953 | */ |
954 | |
955 | #define MAX_COLORS 128 |
956 | #define DEFAULT_COLORS 32 |
957 | |
958 | extern |
959 | unsigned int vm_colors; /* must be in range 1..MAX_COLORS */ |
960 | extern |
961 | unsigned int vm_color_mask; /* must be (vm_colors-1) */ |
962 | extern |
963 | unsigned int vm_cache_geometry_colors; /* optimal #colors based on cache geometry */ |
964 | |
965 | /* |
966 | * Wired memory is a very limited resource and we can't let users exhaust it |
967 | * and deadlock the entire system. We enforce the following limits: |
968 | * |
969 | * vm_user_wire_limit (default: all memory minus vm_global_no_user_wire_amount) |
970 | * how much memory can be user-wired in one user task |
971 | * |
972 | * vm_global_user_wire_limit (default: same as vm_user_wire_limit) |
973 | * how much memory can be user-wired in all user tasks |
974 | * |
975 | * vm_global_no_user_wire_amount (default: VM_NOT_USER_WIREABLE) |
976 | * how much memory must remain user-unwired at any time |
977 | */ |
978 | #define VM_NOT_USER_WIREABLE (64*1024*1024) /* 64MB */ |
979 | extern |
980 | vm_map_size_t vm_user_wire_limit; |
981 | extern |
982 | vm_map_size_t vm_global_user_wire_limit; |
983 | extern |
984 | vm_map_size_t vm_global_no_user_wire_amount; |
985 | |
986 | /* |
987 | * Each pageable resident page falls into one of three lists: |
988 | * |
989 | * free |
990 | * Available for allocation now. The free list is |
991 | * actually an array of lists, one per color. |
992 | * inactive |
993 | * Not referenced in any map, but still has an |
994 | * object/offset-page mapping, and may be dirty. |
995 | * This is the list of pages that should be |
996 | * paged out next. There are actually two |
997 | * inactive lists, one for pages brought in from |
998 | * disk or other backing store, and another |
999 | * for "zero-filled" pages. See vm_pageout_scan() |
1000 | * for the distinction and usage. |
1001 | * active |
1002 | * A list of pages which have been placed in |
1003 | * at least one physical map. This list is |
1004 | * ordered, in LRU-like fashion. |
1005 | */ |
1006 | |
1007 | |
1008 | #define VPL_LOCK_SPIN 1 |
1009 | |
1010 | struct vpl { |
1011 | vm_page_queue_head_t vpl_queue; |
1012 | unsigned int vpl_count; |
1013 | unsigned int vpl_internal_count; |
1014 | unsigned int vpl_external_count; |
1015 | #ifdef VPL_LOCK_SPIN |
1016 | lck_spin_t vpl_lock; |
1017 | #else |
1018 | lck_mtx_t vpl_lock; |
1019 | lck_mtx_ext_t vpl_lock_ext; |
1020 | #endif |
1021 | }; |
1022 | |
1023 | struct vplq { |
1024 | union { |
1025 | char cache_line_pad[VM_VPLQ_ALIGNMENT]; |
1026 | struct vpl vpl; |
1027 | } vpl_un; |
1028 | }; |
1029 | extern |
1030 | unsigned int vm_page_local_q_count; |
1031 | extern |
1032 | struct vplq *vm_page_local_q; |
1033 | extern |
1034 | unsigned int vm_page_local_q_soft_limit; |
1035 | extern |
1036 | unsigned int vm_page_local_q_hard_limit; |
1037 | extern |
1038 | vm_locks_array_t vm_page_locks; |
1039 | |
1040 | extern |
1041 | vm_page_queue_head_t vm_lopage_queue_free; /* low memory free queue */ |
1042 | extern |
1043 | vm_page_queue_head_t vm_page_queue_active; /* active memory queue */ |
1044 | extern |
1045 | vm_page_queue_head_t vm_page_queue_inactive; /* inactive memory queue for normal pages */ |
1046 | #if CONFIG_SECLUDED_MEMORY |
1047 | extern |
1048 | vm_page_queue_head_t vm_page_queue_secluded; /* reclaimable pages secluded for Camera */ |
1049 | #endif /* CONFIG_SECLUDED_MEMORY */ |
1050 | extern |
1051 | vm_page_queue_head_t vm_page_queue_cleaned; /* clean-queue inactive memory */ |
1052 | extern |
1053 | vm_page_queue_head_t vm_page_queue_anonymous; /* inactive memory queue for anonymous pages */ |
1054 | extern |
1055 | vm_page_queue_head_t vm_page_queue_throttled; /* memory queue for throttled pageout pages */ |
1056 | |
1057 | extern |
1058 | queue_head_t vm_objects_wired; |
1059 | extern |
1060 | lck_spin_t vm_objects_wired_lock; |
1061 | |
1062 | #if CONFIG_BACKGROUND_QUEUE |
1063 | |
1064 | #define VM_PAGE_BACKGROUND_TARGET_MAX 50000 |
1065 | |
1066 | #define VM_PAGE_BG_DISABLED 0 |
1067 | #define VM_PAGE_BG_LEVEL_1 1 |
1068 | |
1069 | extern |
1070 | vm_page_queue_head_t vm_page_queue_background; |
1071 | extern |
1072 | uint64_t vm_page_background_promoted_count; |
1073 | extern |
1074 | uint32_t vm_page_background_count; |
1075 | extern |
1076 | uint32_t vm_page_background_target; |
1077 | extern |
1078 | uint32_t vm_page_background_internal_count; |
1079 | extern |
1080 | uint32_t vm_page_background_external_count; |
1081 | extern |
1082 | uint32_t vm_page_background_mode; |
1083 | extern |
1084 | uint32_t vm_page_background_exclude_external; |
1085 | |
1086 | #endif |
1087 | |
1088 | extern |
1089 | vm_offset_t first_phys_addr; /* physical address for first_page */ |
1090 | extern |
1091 | vm_offset_t last_phys_addr; /* physical address for last_page */ |
1092 | |
1093 | extern |
1094 | unsigned int vm_page_free_count; /* How many pages are free? (sum of all colors) */ |
1095 | extern |
1096 | unsigned int vm_page_active_count; /* How many pages are active? */ |
1097 | extern |
1098 | unsigned int vm_page_inactive_count; /* How many pages are inactive? */ |
1099 | #if CONFIG_SECLUDED_MEMORY |
1100 | extern |
1101 | unsigned int vm_page_secluded_count; /* How many pages are secluded? */ |
1102 | extern |
1103 | unsigned int vm_page_secluded_count_free; |
1104 | extern |
1105 | unsigned int vm_page_secluded_count_inuse; |
1106 | #endif /* CONFIG_SECLUDED_MEMORY */ |
1107 | extern |
1108 | unsigned int vm_page_cleaned_count; /* How many pages are in the clean queue? */ |
1109 | extern |
1110 | unsigned int vm_page_throttled_count;/* How many inactives are throttled */ |
1111 | extern |
1112 | unsigned int vm_page_speculative_count; /* How many speculative pages are unclaimed? */ |
1113 | extern unsigned int vm_page_pageable_internal_count; |
1114 | extern unsigned int vm_page_pageable_external_count; |
1115 | extern |
1116 | unsigned int vm_page_xpmapped_external_count; /* How many pages are mapped executable? */ |
1117 | extern |
1118 | unsigned int vm_page_external_count; /* How many pages are file-backed? */ |
1119 | extern |
1120 | unsigned int vm_page_internal_count; /* How many pages are anonymous? */ |
1121 | extern |
1122 | unsigned int vm_page_wire_count; /* How many pages are wired? */ |
1123 | extern |
1124 | unsigned int vm_page_wire_count_initial; /* How many pages wired at startup */ |
1125 | extern |
1126 | unsigned int vm_page_free_target; /* How many do we want free? */ |
1127 | extern |
1128 | unsigned int vm_page_free_min; /* When to wakeup pageout */ |
1129 | extern |
1130 | unsigned int vm_page_throttle_limit; /* When to throttle new page creation */ |
1131 | extern |
1132 | unsigned int vm_page_inactive_target;/* How many do we want inactive? */ |
1133 | #if CONFIG_SECLUDED_MEMORY |
1134 | extern |
1135 | unsigned int vm_page_secluded_target;/* How many do we want secluded? */ |
1136 | #endif /* CONFIG_SECLUDED_MEMORY */ |
1137 | extern |
1138 | unsigned int vm_page_anonymous_min; /* When it's ok to pre-clean */ |
1139 | extern |
1140 | unsigned int vm_page_free_reserved; /* How many pages reserved to do pageout */ |
1141 | extern |
1142 | unsigned int vm_page_gobble_count; |
1143 | extern |
1144 | unsigned int vm_page_stolen_count; /* Count of stolen pages not acccounted in zones */ |
1145 | |
1146 | |
1147 | #if DEVELOPMENT || DEBUG |
1148 | extern |
1149 | unsigned int vm_page_speculative_used; |
1150 | #endif |
1151 | |
1152 | extern |
1153 | unsigned int vm_page_purgeable_count;/* How many pages are purgeable now ? */ |
1154 | extern |
1155 | unsigned int vm_page_purgeable_wired_count;/* How many purgeable pages are wired now ? */ |
1156 | extern |
1157 | uint64_t vm_page_purged_count; /* How many pages got purged so far ? */ |
1158 | |
1159 | extern unsigned int vm_page_free_wanted; |
1160 | /* how many threads are waiting for memory */ |
1161 | |
1162 | extern unsigned int vm_page_free_wanted_privileged; |
1163 | /* how many VM privileged threads are waiting for memory */ |
1164 | #if CONFIG_SECLUDED_MEMORY |
1165 | extern unsigned int vm_page_free_wanted_secluded; |
1166 | /* how many threads are waiting for secluded memory */ |
1167 | #endif /* CONFIG_SECLUDED_MEMORY */ |
1168 | |
1169 | extern const ppnum_t vm_page_fictitious_addr; |
1170 | /* (fake) phys_addr of fictitious pages */ |
1171 | |
1172 | extern const ppnum_t vm_page_guard_addr; |
1173 | /* (fake) phys_addr of guard pages */ |
1174 | |
1175 | |
1176 | extern boolean_t vm_page_deactivate_hint; |
1177 | |
1178 | extern int vm_compressor_mode; |
1179 | |
1180 | /* |
1181 | 0 = all pages avail ( default. ) |
1182 | 1 = disable high mem ( cap max pages to 4G) |
1183 | 2 = prefer himem |
1184 | */ |
1185 | extern int vm_himemory_mode; |
1186 | |
1187 | extern boolean_t vm_lopage_needed; |
1188 | extern uint32_t vm_lopage_free_count; |
1189 | extern uint32_t vm_lopage_free_limit; |
1190 | extern uint32_t vm_lopage_lowater; |
1191 | extern boolean_t vm_lopage_refill; |
1192 | extern uint64_t max_valid_dma_address; |
1193 | extern ppnum_t max_valid_low_ppnum; |
1194 | |
1195 | /* |
1196 | * Prototypes for functions exported by this module. |
1197 | */ |
1198 | extern void vm_page_bootstrap( |
1199 | vm_offset_t *startp, |
1200 | vm_offset_t *endp); |
1201 | |
1202 | extern void vm_page_module_init(void); |
1203 | |
1204 | extern void vm_page_init_local_q(void); |
1205 | |
1206 | extern void vm_page_create( |
1207 | ppnum_t start, |
1208 | ppnum_t end); |
1209 | |
1210 | extern vm_page_t kdp_vm_page_lookup( |
1211 | vm_object_t object, |
1212 | vm_object_offset_t offset); |
1213 | |
1214 | extern vm_page_t vm_page_lookup( |
1215 | vm_object_t object, |
1216 | vm_object_offset_t offset); |
1217 | |
1218 | extern vm_page_t vm_page_grab_fictitious(void); |
1219 | |
1220 | extern vm_page_t vm_page_grab_guard(void); |
1221 | |
1222 | extern void vm_page_release_fictitious( |
1223 | vm_page_t page); |
1224 | |
1225 | extern void vm_page_more_fictitious(void); |
1226 | |
1227 | extern int vm_pool_low(void); |
1228 | |
1229 | extern vm_page_t vm_page_grab(void); |
1230 | extern vm_page_t vm_page_grab_options(int flags); |
1231 | #if CONFIG_SECLUDED_MEMORY |
1232 | #define VM_PAGE_GRAB_SECLUDED 0x00000001 |
1233 | #endif /* CONFIG_SECLUDED_MEMORY */ |
1234 | |
1235 | extern vm_page_t vm_page_grablo(void); |
1236 | |
1237 | extern void vm_page_release( |
1238 | vm_page_t page, |
1239 | boolean_t page_queues_locked); |
1240 | |
1241 | extern boolean_t vm_page_wait( |
1242 | int interruptible ); |
1243 | |
1244 | extern vm_page_t vm_page_alloc( |
1245 | vm_object_t object, |
1246 | vm_object_offset_t offset); |
1247 | |
1248 | extern vm_page_t vm_page_alloc_guard( |
1249 | vm_object_t object, |
1250 | vm_object_offset_t offset); |
1251 | |
1252 | extern void vm_page_init( |
1253 | vm_page_t page, |
1254 | ppnum_t phys_page, |
1255 | boolean_t lopage); |
1256 | |
1257 | extern void vm_page_free( |
1258 | vm_page_t page); |
1259 | |
1260 | extern void vm_page_free_unlocked( |
1261 | vm_page_t page, |
1262 | boolean_t remove_from_hash); |
1263 | |
1264 | extern void vm_page_balance_inactive( |
1265 | int max_to_move); |
1266 | |
1267 | extern void vm_page_activate( |
1268 | vm_page_t page); |
1269 | |
1270 | extern void vm_page_deactivate( |
1271 | vm_page_t page); |
1272 | |
1273 | extern void vm_page_deactivate_internal( |
1274 | vm_page_t page, |
1275 | boolean_t clear_hw_reference); |
1276 | |
1277 | extern void vm_page_enqueue_cleaned(vm_page_t page); |
1278 | |
1279 | extern void vm_page_lru( |
1280 | vm_page_t page); |
1281 | |
1282 | extern void vm_page_speculate( |
1283 | vm_page_t page, |
1284 | boolean_t new); |
1285 | |
1286 | extern void vm_page_speculate_ageit( |
1287 | struct vm_speculative_age_q *aq); |
1288 | |
1289 | extern void vm_page_reactivate_all_throttled(void); |
1290 | |
1291 | extern void vm_page_reactivate_local(uint32_t lid, boolean_t force, boolean_t nolocks); |
1292 | |
1293 | extern void vm_page_rename( |
1294 | vm_page_t page, |
1295 | vm_object_t new_object, |
1296 | vm_object_offset_t new_offset); |
1297 | |
1298 | extern void vm_page_insert( |
1299 | vm_page_t page, |
1300 | vm_object_t object, |
1301 | vm_object_offset_t offset); |
1302 | |
1303 | extern void vm_page_insert_wired( |
1304 | vm_page_t page, |
1305 | vm_object_t object, |
1306 | vm_object_offset_t offset, |
1307 | vm_tag_t tag); |
1308 | |
1309 | extern void vm_page_insert_internal( |
1310 | vm_page_t page, |
1311 | vm_object_t object, |
1312 | vm_object_offset_t offset, |
1313 | vm_tag_t tag, |
1314 | boolean_t queues_lock_held, |
1315 | boolean_t insert_in_hash, |
1316 | boolean_t batch_pmap_op, |
1317 | boolean_t delayed_accounting, |
1318 | uint64_t *delayed_ledger_update); |
1319 | |
1320 | extern void vm_page_replace( |
1321 | vm_page_t mem, |
1322 | vm_object_t object, |
1323 | vm_object_offset_t offset); |
1324 | |
1325 | extern void vm_page_remove( |
1326 | vm_page_t page, |
1327 | boolean_t remove_from_hash); |
1328 | |
1329 | extern void vm_page_zero_fill( |
1330 | vm_page_t page); |
1331 | |
1332 | extern void vm_page_part_zero_fill( |
1333 | vm_page_t m, |
1334 | vm_offset_t m_pa, |
1335 | vm_size_t len); |
1336 | |
1337 | extern void vm_page_copy( |
1338 | vm_page_t src_page, |
1339 | vm_page_t dest_page); |
1340 | |
1341 | extern void vm_page_part_copy( |
1342 | vm_page_t src_m, |
1343 | vm_offset_t src_pa, |
1344 | vm_page_t dst_m, |
1345 | vm_offset_t dst_pa, |
1346 | vm_size_t len); |
1347 | |
1348 | extern void vm_page_wire( |
1349 | vm_page_t page, |
1350 | vm_tag_t tag, |
1351 | boolean_t check_memorystatus); |
1352 | |
1353 | extern void vm_page_unwire( |
1354 | vm_page_t page, |
1355 | boolean_t queueit); |
1356 | |
1357 | extern void vm_set_page_size(void); |
1358 | |
1359 | extern void vm_page_gobble( |
1360 | vm_page_t page); |
1361 | |
1362 | extern void vm_page_validate_cs(vm_page_t page); |
1363 | extern void vm_page_validate_cs_mapped( |
1364 | vm_page_t page, |
1365 | const void *kaddr); |
1366 | extern void vm_page_validate_cs_mapped_slow( |
1367 | vm_page_t page, |
1368 | const void *kaddr); |
1369 | extern void vm_page_validate_cs_mapped_chunk( |
1370 | vm_page_t page, |
1371 | const void *kaddr, |
1372 | vm_offset_t chunk_offset, |
1373 | vm_size_t chunk_size, |
1374 | boolean_t *validated, |
1375 | unsigned *tainted); |
1376 | |
1377 | extern void vm_page_free_prepare_queues( |
1378 | vm_page_t page); |
1379 | |
1380 | extern void vm_page_free_prepare_object( |
1381 | vm_page_t page, |
1382 | boolean_t remove_from_hash); |
1383 | |
1384 | #if CONFIG_IOSCHED |
1385 | extern wait_result_t vm_page_sleep( |
1386 | vm_object_t object, |
1387 | vm_page_t m, |
1388 | int interruptible); |
1389 | #endif |
1390 | |
1391 | extern void vm_pressure_response(void); |
1392 | |
1393 | #if CONFIG_JETSAM |
1394 | extern void memorystatus_pages_update(unsigned int pages_avail); |
1395 | |
1396 | #define VM_CHECK_MEMORYSTATUS do { \ |
1397 | memorystatus_pages_update( \ |
1398 | vm_page_pageable_external_count + \ |
1399 | vm_page_free_count + \ |
1400 | (VM_DYNAMIC_PAGING_ENABLED() ? 0 : vm_page_purgeable_count) \ |
1401 | ); \ |
1402 | } while(0) |
1403 | |
1404 | #else /* CONFIG_JETSAM */ |
1405 | |
1406 | #if CONFIG_EMBEDDED |
1407 | |
1408 | #define VM_CHECK_MEMORYSTATUS do {} while(0) |
1409 | |
1410 | #else /* CONFIG_EMBEDDED */ |
1411 | |
1412 | #define VM_CHECK_MEMORYSTATUS vm_pressure_response() |
1413 | |
1414 | #endif /* CONFIG_EMBEDDED */ |
1415 | |
1416 | #endif /* CONFIG_JETSAM */ |
1417 | |
1418 | /* |
1419 | * Functions implemented as macros. m->vmp_wanted and m->vmp_busy are |
1420 | * protected by the object lock. |
1421 | */ |
1422 | |
1423 | #if CONFIG_EMBEDDED |
1424 | #define SET_PAGE_DIRTY(m, set_pmap_modified) \ |
1425 | MACRO_BEGIN \ |
1426 | vm_page_t __page__ = (m); \ |
1427 | if (__page__->vmp_pmapped == TRUE && \ |
1428 | __page__->vmp_wpmapped == TRUE && \ |
1429 | __page__->vmp_dirty == FALSE && \ |
1430 | (set_pmap_modified)) { \ |
1431 | pmap_set_modify(VM_PAGE_GET_PHYS_PAGE(__page__)); \ |
1432 | } \ |
1433 | __page__->vmp_dirty = TRUE; \ |
1434 | MACRO_END |
1435 | #else /* CONFIG_EMBEDDED */ |
1436 | #define SET_PAGE_DIRTY(m, set_pmap_modified) \ |
1437 | MACRO_BEGIN \ |
1438 | vm_page_t __page__ = (m); \ |
1439 | __page__->vmp_dirty = TRUE; \ |
1440 | MACRO_END |
1441 | #endif /* CONFIG_EMBEDDED */ |
1442 | |
1443 | #define PAGE_ASSERT_WAIT(m, interruptible) \ |
1444 | (((m)->vmp_wanted = TRUE), \ |
1445 | assert_wait((event_t) (m), (interruptible))) |
1446 | |
1447 | #if CONFIG_IOSCHED |
1448 | #define PAGE_SLEEP(o, m, interruptible) \ |
1449 | vm_page_sleep(o, m, interruptible) |
1450 | #else |
1451 | #define PAGE_SLEEP(o, m, interruptible) \ |
1452 | (((m)->vmp_wanted = TRUE), \ |
1453 | thread_sleep_vm_object((o), (m), (interruptible))) |
1454 | #endif |
1455 | |
1456 | #define PAGE_WAKEUP_DONE(m) \ |
1457 | MACRO_BEGIN \ |
1458 | (m)->vmp_busy = FALSE; \ |
1459 | if ((m)->vmp_wanted) { \ |
1460 | (m)->vmp_wanted = FALSE; \ |
1461 | thread_wakeup((event_t) (m)); \ |
1462 | } \ |
1463 | MACRO_END |
1464 | |
1465 | #define PAGE_WAKEUP(m) \ |
1466 | MACRO_BEGIN \ |
1467 | if ((m)->vmp_wanted) { \ |
1468 | (m)->vmp_wanted = FALSE; \ |
1469 | thread_wakeup((event_t) (m)); \ |
1470 | } \ |
1471 | MACRO_END |
1472 | |
1473 | #define VM_PAGE_FREE(p) \ |
1474 | MACRO_BEGIN \ |
1475 | vm_page_free_unlocked(p, TRUE); \ |
1476 | MACRO_END |
1477 | |
1478 | #define VM_PAGE_GRAB_FICTITIOUS(M) \ |
1479 | MACRO_BEGIN \ |
1480 | while ((M = vm_page_grab_fictitious()) == VM_PAGE_NULL) \ |
1481 | vm_page_more_fictitious(); \ |
1482 | MACRO_END |
1483 | |
1484 | #define VM_PAGE_WAIT() ((void)vm_page_wait(THREAD_UNINT)) |
1485 | |
1486 | #define vm_page_queue_lock (vm_page_locks.vm_page_queue_lock2) |
1487 | #define vm_page_queue_free_lock (vm_page_locks.vm_page_queue_free_lock2) |
1488 | |
1489 | #define vm_page_lock_queues() lck_mtx_lock(&vm_page_queue_lock) |
1490 | #define vm_page_trylock_queues() lck_mtx_try_lock(&vm_page_queue_lock) |
1491 | #define vm_page_unlock_queues() lck_mtx_unlock(&vm_page_queue_lock) |
1492 | |
1493 | #define vm_page_lockspin_queues() lck_mtx_lock_spin(&vm_page_queue_lock) |
1494 | #define vm_page_trylockspin_queues() lck_mtx_try_lock_spin(&vm_page_queue_lock) |
1495 | #define vm_page_lockconvert_queues() lck_mtx_convert_spin(&vm_page_queue_lock) |
1496 | |
1497 | #ifdef VPL_LOCK_SPIN |
1498 | #define VPL_LOCK_INIT(vlq, vpl_grp, vpl_attr) lck_spin_init(&vlq->vpl_lock, vpl_grp, vpl_attr) |
1499 | #define VPL_LOCK(vpl) lck_spin_lock(vpl) |
1500 | #define VPL_UNLOCK(vpl) lck_spin_unlock(vpl) |
1501 | #else |
1502 | #define VPL_LOCK_INIT(vlq, vpl_grp, vpl_attr) lck_mtx_init_ext(&vlq->vpl_lock, &vlq->vpl_lock_ext, vpl_grp, vpl_attr) |
1503 | #define VPL_LOCK(vpl) lck_mtx_lock_spin(vpl) |
1504 | #define VPL_UNLOCK(vpl) lck_mtx_unlock(vpl) |
1505 | #endif |
1506 | |
1507 | |
1508 | #if DEVELOPMENT || DEBUG |
1509 | #define VM_PAGE_SPECULATIVE_USED_ADD() \ |
1510 | MACRO_BEGIN \ |
1511 | OSAddAtomic(1, &vm_page_speculative_used); \ |
1512 | MACRO_END |
1513 | #else |
1514 | #define VM_PAGE_SPECULATIVE_USED_ADD() |
1515 | #endif |
1516 | |
1517 | |
1518 | #define VM_PAGE_CONSUME_CLUSTERED(mem) \ |
1519 | MACRO_BEGIN \ |
1520 | ppnum_t __phys_page; \ |
1521 | __phys_page = VM_PAGE_GET_PHYS_PAGE(mem); \ |
1522 | pmap_lock_phys_page(__phys_page); \ |
1523 | if (mem->vmp_clustered) { \ |
1524 | vm_object_t o; \ |
1525 | o = VM_PAGE_OBJECT(mem); \ |
1526 | assert(o); \ |
1527 | o->pages_used++; \ |
1528 | mem->vmp_clustered = FALSE; \ |
1529 | VM_PAGE_SPECULATIVE_USED_ADD(); \ |
1530 | } \ |
1531 | pmap_unlock_phys_page(__phys_page); \ |
1532 | MACRO_END |
1533 | |
1534 | |
1535 | #define VM_PAGE_COUNT_AS_PAGEIN(mem) \ |
1536 | MACRO_BEGIN \ |
1537 | { \ |
1538 | vm_object_t o; \ |
1539 | o = VM_PAGE_OBJECT(mem); \ |
1540 | DTRACE_VM2(pgin, int, 1, (uint64_t *), NULL); \ |
1541 | current_task()->pageins++; \ |
1542 | if (o->internal) { \ |
1543 | DTRACE_VM2(anonpgin, int, 1, (uint64_t *), NULL); \ |
1544 | } else { \ |
1545 | DTRACE_VM2(fspgin, int, 1, (uint64_t *), NULL); \ |
1546 | } \ |
1547 | } \ |
1548 | MACRO_END |
1549 | |
1550 | /* adjust for stolen pages accounted elsewhere */ |
1551 | #define VM_PAGE_MOVE_STOLEN(page_count) \ |
1552 | MACRO_BEGIN \ |
1553 | vm_page_stolen_count -= (page_count); \ |
1554 | vm_page_wire_count_initial -= (page_count); \ |
1555 | MACRO_END |
1556 | |
1557 | #define DW_vm_page_unwire 0x01 |
1558 | #define DW_vm_page_wire 0x02 |
1559 | #define DW_vm_page_free 0x04 |
1560 | #define DW_vm_page_activate 0x08 |
1561 | #define DW_vm_page_deactivate_internal 0x10 |
1562 | #define DW_vm_page_speculate 0x20 |
1563 | #define DW_vm_page_lru 0x40 |
1564 | #define DW_vm_pageout_throttle_up 0x80 |
1565 | #define DW_PAGE_WAKEUP 0x100 |
1566 | #define DW_clear_busy 0x200 |
1567 | #define DW_clear_reference 0x400 |
1568 | #define DW_set_reference 0x800 |
1569 | #define DW_move_page 0x1000 |
1570 | #define DW_VM_PAGE_QUEUES_REMOVE 0x2000 |
1571 | #define DW_enqueue_cleaned 0x4000 |
1572 | #define DW_vm_phantom_cache_update 0x8000 |
1573 | |
1574 | struct vm_page_delayed_work { |
1575 | vm_page_t dw_m; |
1576 | int dw_mask; |
1577 | }; |
1578 | |
1579 | void vm_page_do_delayed_work(vm_object_t object, vm_tag_t tag, struct vm_page_delayed_work *dwp, int dw_count); |
1580 | |
1581 | extern unsigned int vm_max_delayed_work_limit; |
1582 | |
1583 | #define DEFAULT_DELAYED_WORK_LIMIT 32 |
1584 | |
1585 | #define DELAYED_WORK_LIMIT(max) ((vm_max_delayed_work_limit >= max ? max : vm_max_delayed_work_limit)) |
1586 | |
1587 | /* |
1588 | * vm_page_do_delayed_work may need to drop the object lock... |
1589 | * if it does, we need the pages it's looking at to |
1590 | * be held stable via the busy bit, so if busy isn't already |
1591 | * set, we need to set it and ask vm_page_do_delayed_work |
1592 | * to clear it and wakeup anyone that might have blocked on |
1593 | * it once we're done processing the page. |
1594 | */ |
1595 | |
1596 | #define VM_PAGE_ADD_DELAYED_WORK(dwp, mem, dw_cnt) \ |
1597 | MACRO_BEGIN \ |
1598 | if (mem->vmp_busy == FALSE) { \ |
1599 | mem->vmp_busy = TRUE; \ |
1600 | if ( !(dwp->dw_mask & DW_vm_page_free)) \ |
1601 | dwp->dw_mask |= (DW_clear_busy | DW_PAGE_WAKEUP); \ |
1602 | } \ |
1603 | dwp->dw_m = mem; \ |
1604 | dwp++; \ |
1605 | dw_cnt++; \ |
1606 | MACRO_END |
1607 | |
1608 | extern vm_page_t vm_object_page_grab(vm_object_t); |
1609 | |
1610 | #if VM_PAGE_BUCKETS_CHECK |
1611 | extern void vm_page_buckets_check(void); |
1612 | #endif /* VM_PAGE_BUCKETS_CHECK */ |
1613 | |
1614 | extern void vm_page_queues_remove(vm_page_t mem, boolean_t remove_from_backgroundq); |
1615 | extern void vm_page_remove_internal(vm_page_t page); |
1616 | extern void vm_page_enqueue_inactive(vm_page_t mem, boolean_t first); |
1617 | extern void vm_page_enqueue_active(vm_page_t mem, boolean_t first); |
1618 | extern void vm_page_check_pageable_safe(vm_page_t page); |
1619 | |
1620 | #if CONFIG_SECLUDED_MEMORY |
1621 | extern uint64_t secluded_shutoff_trigger; |
1622 | extern void start_secluded_suppression(task_t); |
1623 | extern void stop_secluded_suppression(task_t); |
1624 | #endif /* CONFIG_SECLUDED_MEMORY */ |
1625 | |
1626 | |
1627 | #endif /* _VM_VM_PAGE_H_ */ |
1628 | |