| 1 | /* |
| 2 | * Copyright (c) 2000-2020 Apple Computer, Inc. All rights reserved. |
| 3 | * |
| 4 | * @APPLE_OSREFERENCE_LICENSE_HEADER_START@ |
| 5 | * |
| 6 | * This file contains Original Code and/or Modifications of Original Code |
| 7 | * as defined in and that are subject to the Apple Public Source License |
| 8 | * Version 2.0 (the 'License'). You may not use this file except in |
| 9 | * compliance with the License. The rights granted to you under the License |
| 10 | * may not be used to create, or enable the creation or redistribution of, |
| 11 | * unlawful or unlicensed copies of an Apple operating system, or to |
| 12 | * circumvent, violate, or enable the circumvention or violation of, any |
| 13 | * terms of an Apple operating system software license agreement. |
| 14 | * |
| 15 | * Please obtain a copy of the License at |
| 16 | * http://www.opensource.apple.com/apsl/ and read it before using this file. |
| 17 | * |
| 18 | * The Original Code and all software distributed under the License are |
| 19 | * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER |
| 20 | * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES, |
| 21 | * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY, |
| 22 | * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT. |
| 23 | * Please see the License for the specific language governing rights and |
| 24 | * limitations under the License. |
| 25 | * |
| 26 | * @APPLE_OSREFERENCE_LICENSE_HEADER_END@ |
| 27 | */ |
| 28 | /* |
| 29 | * @OSF_COPYRIGHT@ |
| 30 | */ |
| 31 | /* |
| 32 | * Mach Operating System |
| 33 | * Copyright (c) 1991,1990,1989,1988 Carnegie Mellon University |
| 34 | * All Rights Reserved. |
| 35 | * |
| 36 | * Permission to use, copy, modify and distribute this software and its |
| 37 | * documentation is hereby granted, provided that both the copyright |
| 38 | * notice and this permission notice appear in all copies of the |
| 39 | * software, derivative works or modified versions, and any portions |
| 40 | * thereof, and that both notices appear in supporting documentation. |
| 41 | * |
| 42 | * CARNEGIE MELLON ALLOWS FREE USE OF THIS SOFTWARE IN ITS "AS IS" |
| 43 | * CONDITION. CARNEGIE MELLON DISCLAIMS ANY LIABILITY OF ANY KIND FOR |
| 44 | * ANY DAMAGES WHATSOEVER RESULTING FROM THE USE OF THIS SOFTWARE. |
| 45 | * |
| 46 | * Carnegie Mellon requests users of this software to return to |
| 47 | * |
| 48 | * Software Distribution Coordinator or Software.Distribution@CS.CMU.EDU |
| 49 | * School of Computer Science |
| 50 | * Carnegie Mellon University |
| 51 | * Pittsburgh PA 15213-3890 |
| 52 | * |
| 53 | * any improvements or extensions that they make and grant Carnegie Mellon |
| 54 | * the rights to redistribute these changes. |
| 55 | */ |
| 56 | /* |
| 57 | */ |
| 58 | /* |
| 59 | * File: vm/vm_page.h |
| 60 | * Author: Avadis Tevanian, Jr., Michael Wayne Young |
| 61 | * Date: 1985 |
| 62 | * |
| 63 | * Resident memory system definitions. |
| 64 | */ |
| 65 | |
| 66 | #ifndef _VM_VM_PAGE_H_ |
| 67 | #define _VM_VM_PAGE_H_ |
| 68 | |
| 69 | #include <debug.h> |
| 70 | #include <vm/vm_options.h> |
| 71 | #include <vm/vm_protos.h> |
| 72 | #include <mach/boolean.h> |
| 73 | #include <mach/vm_prot.h> |
| 74 | #include <mach/vm_param.h> |
| 75 | #include <mach/memory_object_types.h> /* for VMP_CS_BITS... */ |
| 76 | |
| 77 | |
| 78 | #if defined(__LP64__) |
| 79 | |
| 80 | /* |
| 81 | * in order to make the size of a vm_page_t 64 bytes (cache line size for both arm64 and x86_64) |
| 82 | * we'll keep the next_m pointer packed... as long as the kernel virtual space where we allocate |
| 83 | * vm_page_t's from doesn't span more then 256 Gbytes, we're safe. There are live tests in the |
| 84 | * vm_page_t array allocation and the zone init code to determine if we can safely pack and unpack |
| 85 | * pointers from the 2 ends of these spaces |
| 86 | */ |
| 87 | typedef uint32_t vm_page_packed_t; |
| 88 | |
| 89 | struct vm_page_packed_queue_entry { |
| 90 | vm_page_packed_t next; /* next element */ |
| 91 | vm_page_packed_t prev; /* previous element */ |
| 92 | }; |
| 93 | |
| 94 | typedef struct vm_page_packed_queue_entry *vm_page_queue_t; |
| 95 | typedef struct vm_page_packed_queue_entry vm_page_queue_head_t; |
| 96 | typedef struct vm_page_packed_queue_entry vm_page_queue_chain_t; |
| 97 | typedef struct vm_page_packed_queue_entry *vm_page_queue_entry_t; |
| 98 | |
| 99 | typedef vm_page_packed_t vm_page_object_t; |
| 100 | |
| 101 | #else // __LP64__ |
| 102 | |
| 103 | /* |
| 104 | * we can't do the packing trick on 32 bit architectures |
| 105 | * so just turn the macros into noops. |
| 106 | */ |
| 107 | typedef struct vm_page *vm_page_packed_t; |
| 108 | |
| 109 | #define vm_page_queue_t queue_t |
| 110 | #define vm_page_queue_head_t queue_head_t |
| 111 | #define vm_page_queue_chain_t queue_chain_t |
| 112 | #define vm_page_queue_entry_t queue_entry_t |
| 113 | |
| 114 | #define vm_page_object_t vm_object_t |
| 115 | #endif // __LP64__ |
| 116 | |
| 117 | |
| 118 | #include <vm/vm_object.h> |
| 119 | #include <kern/queue.h> |
| 120 | #include <kern/locks.h> |
| 121 | |
| 122 | #include <kern/macro_help.h> |
| 123 | #include <libkern/OSAtomic.h> |
| 124 | |
| 125 | |
| 126 | |
| 127 | #define VM_PAGE_COMPRESSOR_COUNT (compressor_object->resident_page_count) |
| 128 | |
| 129 | /* |
| 130 | * Management of resident (logical) pages. |
| 131 | * |
| 132 | * A small structure is kept for each resident |
| 133 | * page, indexed by page number. Each structure |
| 134 | * is an element of several lists: |
| 135 | * |
| 136 | * A hash table bucket used to quickly |
| 137 | * perform object/offset lookups |
| 138 | * |
| 139 | * A list of all pages for a given object, |
| 140 | * so they can be quickly deactivated at |
| 141 | * time of deallocation. |
| 142 | * |
| 143 | * An ordered list of pages due for pageout. |
| 144 | * |
| 145 | * In addition, the structure contains the object |
| 146 | * and offset to which this page belongs (for pageout), |
| 147 | * and sundry status bits. |
| 148 | * |
| 149 | * Fields in this structure are locked either by the lock on the |
| 150 | * object that the page belongs to (O) or by the lock on the page |
| 151 | * queues (P). [Some fields require that both locks be held to |
| 152 | * change that field; holding either lock is sufficient to read.] |
| 153 | */ |
| 154 | |
| 155 | #define VM_PAGE_NULL ((vm_page_t) 0) |
| 156 | |
| 157 | extern char vm_page_inactive_states[]; |
| 158 | extern char vm_page_pageable_states[]; |
| 159 | extern char vm_page_non_speculative_pageable_states[]; |
| 160 | extern char vm_page_active_or_inactive_states[]; |
| 161 | |
| 162 | |
| 163 | #define VM_PAGE_INACTIVE(m) (vm_page_inactive_states[m->vmp_q_state]) |
| 164 | #define VM_PAGE_PAGEABLE(m) (vm_page_pageable_states[m->vmp_q_state]) |
| 165 | #define VM_PAGE_NON_SPECULATIVE_PAGEABLE(m) (vm_page_non_speculative_pageable_states[m->vmp_q_state]) |
| 166 | #define VM_PAGE_ACTIVE_OR_INACTIVE(m) (vm_page_active_or_inactive_states[m->vmp_q_state]) |
| 167 | |
| 168 | |
| 169 | #define VM_PAGE_NOT_ON_Q 0 /* page is not present on any queue, nor is it wired... mainly a transient state */ |
| 170 | #define VM_PAGE_IS_WIRED 1 /* page is currently wired */ |
| 171 | #define VM_PAGE_USED_BY_COMPRESSOR 2 /* page is in use by the compressor to hold compressed data */ |
| 172 | #define VM_PAGE_ON_FREE_Q 3 /* page is on the main free queue */ |
| 173 | #define VM_PAGE_ON_FREE_LOCAL_Q 4 /* page is on one of the per-CPU free queues */ |
| 174 | #define VM_PAGE_ON_FREE_LOPAGE_Q 5 /* page is on the lopage pool free list */ |
| 175 | #define VM_PAGE_ON_THROTTLED_Q 6 /* page is on the throttled queue... we stash anonymous pages here when not paging */ |
| 176 | #define VM_PAGE_ON_PAGEOUT_Q 7 /* page is on one of the pageout queues (internal/external) awaiting processing */ |
| 177 | #define VM_PAGE_ON_SPECULATIVE_Q 8 /* page is on one of the speculative queues */ |
| 178 | #define VM_PAGE_ON_ACTIVE_LOCAL_Q 9 /* page has recently been created and is being held in one of the per-CPU local queues */ |
| 179 | #define VM_PAGE_ON_ACTIVE_Q 10 /* page is in global active queue */ |
| 180 | #define VM_PAGE_ON_INACTIVE_INTERNAL_Q 11 /* page is on the inactive internal queue a.k.a. anonymous queue */ |
| 181 | #define VM_PAGE_ON_INACTIVE_EXTERNAL_Q 12 /* page in on the inactive external queue a.k.a. file backed queue */ |
| 182 | #define VM_PAGE_ON_INACTIVE_CLEANED_Q 13 /* page has been cleaned to a backing file and is ready to be stolen */ |
| 183 | #define VM_PAGE_ON_SECLUDED_Q 14 /* page is on secluded queue */ |
| 184 | #define VM_PAGE_Q_STATE_LAST_VALID_VALUE 14 /* we currently use 4 bits for the state... don't let this go beyond 15 */ |
| 185 | |
| 186 | #define VM_PAGE_Q_STATE_ARRAY_SIZE (VM_PAGE_Q_STATE_LAST_VALID_VALUE+1) |
| 187 | |
| 188 | |
| 189 | /* |
| 190 | * The structure itself. See the block comment above for what (O) and (P) mean. |
| 191 | */ |
| 192 | #define vmp_pageq vmp_q_un.vmp_q_pageq |
| 193 | #define vmp_snext vmp_q_un.vmp_q_snext |
| 194 | |
| 195 | struct vm_page { |
| 196 | union { |
| 197 | vm_page_queue_chain_t vmp_q_pageq; /* queue info for FIFO queue or free list (P) */ |
| 198 | struct vm_page *vmp_q_snext; |
| 199 | } vmp_q_un; |
| 200 | |
| 201 | vm_page_queue_chain_t vmp_listq; /* all pages in same object (O) */ |
| 202 | |
| 203 | vm_page_queue_chain_t vmp_specialq; /* anonymous pages in the special queues (P) */ |
| 204 | vm_object_offset_t vmp_offset; /* offset into that object (O,P) */ |
| 205 | |
| 206 | vm_page_object_t vmp_object; /* which object am I in (O&P) */ |
| 207 | |
| 208 | /* |
| 209 | * The following word of flags used to be protected by the "page queues" lock. |
| 210 | * That's no longer true and what lock, if any, is needed may depend on the |
| 211 | * value of vmp_q_state. |
| 212 | * |
| 213 | * We use 'vmp_wire_count' to store the local queue id if local queues are enabled. |
| 214 | * See the comments at 'vm_page_queues_remove' as to why this is safe to do. |
| 215 | */ |
| 216 | #define VM_PAGE_SPECIAL_Q_EMPTY (0) |
| 217 | #define VM_PAGE_SPECIAL_Q_BG (1) |
| 218 | #define VM_PAGE_SPECIAL_Q_DONATE (2) |
| 219 | #define VM_PAGE_SPECIAL_Q_FG (3) |
| 220 | #define vmp_local_id vmp_wire_count |
| 221 | unsigned int vmp_wire_count:16, /* how many wired down maps use me? (O&P) */ |
| 222 | vmp_q_state:4, /* which q is the page on (P) */ |
| 223 | vmp_on_specialq:2, |
| 224 | vmp_gobbled:1, /* page used internally (P) */ |
| 225 | vmp_laundry:1, /* page is being cleaned now (P)*/ |
| 226 | vmp_no_cache:1, /* page is not to be cached and should */ |
| 227 | /* be reused ahead of other pages (P) */ |
| 228 | vmp_private:1, /* Page should not be returned to the free list (P) */ |
| 229 | vmp_reference:1, /* page has been used (P) */ |
| 230 | vmp_lopage:1, |
| 231 | vmp_realtime:1, /* page used by realtime thread */ |
| 232 | #if !CONFIG_TRACK_UNMODIFIED_ANON_PAGES |
| 233 | vmp_unused_page_bits:3; |
| 234 | #else /* ! CONFIG_TRACK_UNMODIFIED_ANON_PAGES */ |
| 235 | vmp_unmodified_ro:1, /* Tracks if an anonymous page is modified after a decompression (O&P).*/ |
| 236 | vmp_unused_page_bits:2; |
| 237 | #endif /* ! CONFIG_TRACK_UNMODIFIED_ANON_PAGES */ |
| 238 | |
| 239 | /* |
| 240 | * MUST keep the 2 32 bit words used as bit fields |
| 241 | * separated since the compiler has a nasty habit |
| 242 | * of using 64 bit loads and stores on them as |
| 243 | * if they were a single 64 bit field... since |
| 244 | * they are protected by 2 different locks, this |
| 245 | * is a real problem |
| 246 | */ |
| 247 | vm_page_packed_t vmp_next_m; /* VP bucket link (O) */ |
| 248 | |
| 249 | /* |
| 250 | * The following word of flags is protected by the "VM object" lock. |
| 251 | * |
| 252 | * IMPORTANT: the "vmp_pmapped", "vmp_xpmapped" and "vmp_clustered" bits can be modified while holding the |
| 253 | * VM object "shared" lock + the page lock provided through the pmap_lock_phys_page function. |
| 254 | * This is done in vm_fault_enter() and the CONSUME_CLUSTERED macro. |
| 255 | * It's also ok to modify them behind just the VM object "exclusive" lock. |
| 256 | */ |
| 257 | unsigned int vmp_busy:1, /* page is in transit (O) */ |
| 258 | vmp_wanted:1, /* someone is waiting for page (O) */ |
| 259 | vmp_tabled:1, /* page is in VP table (O) */ |
| 260 | vmp_hashed:1, /* page is in vm_page_buckets[] (O) + the bucket lock */ |
| 261 | vmp_fictitious:1, /* Physical page doesn't exist (O) */ |
| 262 | vmp_clustered:1, /* page is not the faulted page (O) or (O-shared AND pmap_page) */ |
| 263 | vmp_pmapped:1, /* page has at some time been entered into a pmap (O) or */ |
| 264 | /* (O-shared AND pmap_page) */ |
| 265 | vmp_xpmapped:1, /* page has been entered with execute permission (O) or */ |
| 266 | /* (O-shared AND pmap_page) */ |
| 267 | vmp_wpmapped:1, /* page has been entered at some point into a pmap for write (O) */ |
| 268 | vmp_free_when_done:1, /* page is to be freed once cleaning is completed (O) */ |
| 269 | vmp_absent:1, /* Data has been requested, but is not yet available (O) */ |
| 270 | vmp_error:1, /* Data manager was unable to provide data due to error (O) */ |
| 271 | vmp_dirty:1, /* Page must be cleaned (O) */ |
| 272 | vmp_cleaning:1, /* Page clean has begun (O) */ |
| 273 | vmp_precious:1, /* Page is precious; data must be returned even if clean (O) */ |
| 274 | vmp_overwriting:1, /* Request to unlock has been made without having data. (O) */ |
| 275 | /* [See vm_fault_page_overwrite] */ |
| 276 | vmp_restart:1, /* Page was pushed higher in shadow chain by copy_call-related pagers */ |
| 277 | /* start again at top of chain */ |
| 278 | vmp_unusual:1, /* Page is absent, error, restart or page locked */ |
| 279 | vmp_cs_validated:VMP_CS_BITS, /* code-signing: page was checked */ |
| 280 | vmp_cs_tainted:VMP_CS_BITS, /* code-signing: page is tainted */ |
| 281 | vmp_cs_nx:VMP_CS_BITS, /* code-signing: page is nx */ |
| 282 | vmp_reusable:1, |
| 283 | vmp_written_by_kernel:1; /* page was written by kernel (i.e. decompressed) */ |
| 284 | |
| 285 | #if !defined(__arm64__) |
| 286 | ppnum_t vmp_phys_page; /* Physical page number of the page */ |
| 287 | #endif |
| 288 | }; |
| 289 | |
| 290 | typedef struct vm_page *vm_page_t; |
| 291 | extern vm_page_t vm_pages; |
| 292 | extern vm_page_t vm_page_array_beginning_addr; |
| 293 | extern vm_page_t vm_page_array_ending_addr; |
| 294 | |
| 295 | static inline int |
| 296 | VMP_CS_FOR_OFFSET( |
| 297 | vm_map_offset_t fault_phys_offset) |
| 298 | { |
| 299 | assertf(fault_phys_offset < PAGE_SIZE && |
| 300 | !(fault_phys_offset & FOURK_PAGE_MASK), |
| 301 | "offset 0x%llx\n" , (uint64_t)fault_phys_offset); |
| 302 | return 1 << (fault_phys_offset >> FOURK_PAGE_SHIFT); |
| 303 | } |
| 304 | static inline bool |
| 305 | VMP_CS_VALIDATED( |
| 306 | vm_page_t p, |
| 307 | vm_map_size_t fault_page_size, |
| 308 | vm_map_offset_t fault_phys_offset) |
| 309 | { |
| 310 | assertf(fault_page_size <= PAGE_SIZE, |
| 311 | "fault_page_size 0x%llx fault_phys_offset 0x%llx\n" , |
| 312 | (uint64_t)fault_page_size, (uint64_t)fault_phys_offset); |
| 313 | if (fault_page_size == PAGE_SIZE) { |
| 314 | return p->vmp_cs_validated == VMP_CS_ALL_TRUE; |
| 315 | } |
| 316 | return p->vmp_cs_validated & VMP_CS_FOR_OFFSET(fault_phys_offset); |
| 317 | } |
| 318 | static inline bool |
| 319 | VMP_CS_TAINTED( |
| 320 | vm_page_t p, |
| 321 | vm_map_size_t fault_page_size, |
| 322 | vm_map_offset_t fault_phys_offset) |
| 323 | { |
| 324 | assertf(fault_page_size <= PAGE_SIZE, |
| 325 | "fault_page_size 0x%llx fault_phys_offset 0x%llx\n" , |
| 326 | (uint64_t)fault_page_size, (uint64_t)fault_phys_offset); |
| 327 | if (fault_page_size == PAGE_SIZE) { |
| 328 | return p->vmp_cs_tainted != VMP_CS_ALL_FALSE; |
| 329 | } |
| 330 | return p->vmp_cs_tainted & VMP_CS_FOR_OFFSET(fault_phys_offset); |
| 331 | } |
| 332 | static inline bool |
| 333 | VMP_CS_NX( |
| 334 | vm_page_t p, |
| 335 | vm_map_size_t fault_page_size, |
| 336 | vm_map_offset_t fault_phys_offset) |
| 337 | { |
| 338 | assertf(fault_page_size <= PAGE_SIZE, |
| 339 | "fault_page_size 0x%llx fault_phys_offset 0x%llx\n" , |
| 340 | (uint64_t)fault_page_size, (uint64_t)fault_phys_offset); |
| 341 | if (fault_page_size == PAGE_SIZE) { |
| 342 | return p->vmp_cs_nx != VMP_CS_ALL_FALSE; |
| 343 | } |
| 344 | return p->vmp_cs_nx & VMP_CS_FOR_OFFSET(fault_phys_offset); |
| 345 | } |
| 346 | static inline void |
| 347 | VMP_CS_SET_VALIDATED( |
| 348 | vm_page_t p, |
| 349 | vm_map_size_t fault_page_size, |
| 350 | vm_map_offset_t fault_phys_offset, |
| 351 | boolean_t value) |
| 352 | { |
| 353 | assertf(fault_page_size <= PAGE_SIZE, |
| 354 | "fault_page_size 0x%llx fault_phys_offset 0x%llx\n" , |
| 355 | (uint64_t)fault_page_size, (uint64_t)fault_phys_offset); |
| 356 | if (value) { |
| 357 | if (fault_page_size == PAGE_SIZE) { |
| 358 | p->vmp_cs_validated = VMP_CS_ALL_TRUE; |
| 359 | } |
| 360 | p->vmp_cs_validated |= VMP_CS_FOR_OFFSET(fault_phys_offset); |
| 361 | } else { |
| 362 | if (fault_page_size == PAGE_SIZE) { |
| 363 | p->vmp_cs_validated = VMP_CS_ALL_FALSE; |
| 364 | } |
| 365 | p->vmp_cs_validated &= ~VMP_CS_FOR_OFFSET(fault_phys_offset); |
| 366 | } |
| 367 | } |
| 368 | static inline void |
| 369 | VMP_CS_SET_TAINTED( |
| 370 | vm_page_t p, |
| 371 | vm_map_size_t fault_page_size, |
| 372 | vm_map_offset_t fault_phys_offset, |
| 373 | boolean_t value) |
| 374 | { |
| 375 | assertf(fault_page_size <= PAGE_SIZE, |
| 376 | "fault_page_size 0x%llx fault_phys_offset 0x%llx\n" , |
| 377 | (uint64_t)fault_page_size, (uint64_t)fault_phys_offset); |
| 378 | if (value) { |
| 379 | if (fault_page_size == PAGE_SIZE) { |
| 380 | p->vmp_cs_tainted = VMP_CS_ALL_TRUE; |
| 381 | } |
| 382 | p->vmp_cs_tainted |= VMP_CS_FOR_OFFSET(fault_phys_offset); |
| 383 | } else { |
| 384 | if (fault_page_size == PAGE_SIZE) { |
| 385 | p->vmp_cs_tainted = VMP_CS_ALL_FALSE; |
| 386 | } |
| 387 | p->vmp_cs_tainted &= ~VMP_CS_FOR_OFFSET(fault_phys_offset); |
| 388 | } |
| 389 | } |
| 390 | static inline void |
| 391 | VMP_CS_SET_NX( |
| 392 | vm_page_t p, |
| 393 | vm_map_size_t fault_page_size, |
| 394 | vm_map_offset_t fault_phys_offset, |
| 395 | boolean_t value) |
| 396 | { |
| 397 | assertf(fault_page_size <= PAGE_SIZE, |
| 398 | "fault_page_size 0x%llx fault_phys_offset 0x%llx\n" , |
| 399 | (uint64_t)fault_page_size, (uint64_t)fault_phys_offset); |
| 400 | if (value) { |
| 401 | if (fault_page_size == PAGE_SIZE) { |
| 402 | p->vmp_cs_nx = VMP_CS_ALL_TRUE; |
| 403 | } |
| 404 | p->vmp_cs_nx |= VMP_CS_FOR_OFFSET(fault_phys_offset); |
| 405 | } else { |
| 406 | if (fault_page_size == PAGE_SIZE) { |
| 407 | p->vmp_cs_nx = VMP_CS_ALL_FALSE; |
| 408 | } |
| 409 | p->vmp_cs_nx &= ~VMP_CS_FOR_OFFSET(fault_phys_offset); |
| 410 | } |
| 411 | } |
| 412 | |
| 413 | |
| 414 | #if defined(__arm64__) |
| 415 | |
| 416 | extern unsigned int vm_first_phys_ppnum; |
| 417 | |
| 418 | struct vm_page_with_ppnum { |
| 419 | struct vm_page vm_page_wo_ppnum; |
| 420 | |
| 421 | ppnum_t vmp_phys_page; |
| 422 | }; |
| 423 | typedef struct vm_page_with_ppnum *vm_page_with_ppnum_t; |
| 424 | |
| 425 | |
| 426 | static inline ppnum_t |
| 427 | VM_PAGE_GET_PHYS_PAGE(vm_page_t m) |
| 428 | { |
| 429 | if (m >= vm_page_array_beginning_addr && m < vm_page_array_ending_addr) { |
| 430 | return (ppnum_t)((uintptr_t)(m - vm_page_array_beginning_addr) + vm_first_phys_ppnum); |
| 431 | } else { |
| 432 | return ((vm_page_with_ppnum_t)m)->vmp_phys_page; |
| 433 | } |
| 434 | } |
| 435 | |
| 436 | #define VM_PAGE_SET_PHYS_PAGE(m, ppnum) \ |
| 437 | MACRO_BEGIN \ |
| 438 | if ((m) < vm_page_array_beginning_addr || (m) >= vm_page_array_ending_addr) \ |
| 439 | ((vm_page_with_ppnum_t)(m))->vmp_phys_page = ppnum; \ |
| 440 | assert(ppnum == VM_PAGE_GET_PHYS_PAGE(m)); \ |
| 441 | MACRO_END |
| 442 | |
| 443 | #define VM_PAGE_GET_COLOR(m) (VM_PAGE_GET_PHYS_PAGE(m) & vm_color_mask) |
| 444 | |
| 445 | #else /* defined(__arm64__) */ |
| 446 | |
| 447 | |
| 448 | struct vm_page_with_ppnum { |
| 449 | struct vm_page vm_page_with_ppnum; |
| 450 | }; |
| 451 | typedef struct vm_page_with_ppnum *vm_page_with_ppnum_t; |
| 452 | |
| 453 | |
| 454 | #define VM_PAGE_GET_PHYS_PAGE(page) (page)->vmp_phys_page |
| 455 | #define VM_PAGE_SET_PHYS_PAGE(page, ppnum) \ |
| 456 | MACRO_BEGIN \ |
| 457 | (page)->vmp_phys_page = ppnum; \ |
| 458 | MACRO_END |
| 459 | |
| 460 | #define VM_PAGE_GET_CLUMP(m) ((VM_PAGE_GET_PHYS_PAGE(m)) >> vm_clump_shift) |
| 461 | #define VM_PAGE_GET_COLOR(m) ((VM_PAGE_GET_CLUMP(m)) & vm_color_mask) |
| 462 | |
| 463 | #endif /* defined(__arm64__) */ |
| 464 | |
| 465 | |
| 466 | |
| 467 | #if defined(__LP64__) |
| 468 | /* |
| 469 | * Parameters for pointer packing |
| 470 | * |
| 471 | * |
| 472 | * VM Pages pointers might point to: |
| 473 | * |
| 474 | * 1. VM_PAGE_PACKED_ALIGNED aligned kernel globals, |
| 475 | * |
| 476 | * 2. VM_PAGE_PACKED_ALIGNED aligned heap allocated vm pages |
| 477 | * |
| 478 | * 3. entries in the vm_pages array (whose entries aren't VM_PAGE_PACKED_ALIGNED |
| 479 | * aligned). |
| 480 | * |
| 481 | * |
| 482 | * The current scheme uses 31 bits of storage and 6 bits of shift using the |
| 483 | * VM_PACK_POINTER() scheme for (1-2), and packs (3) as an index within the |
| 484 | * vm_pages array, setting the top bit (VM_PAGE_PACKED_FROM_ARRAY). |
| 485 | * |
| 486 | * This scheme gives us a reach of 128G from VM_MIN_KERNEL_AND_KEXT_ADDRESS. |
| 487 | */ |
| 488 | #define VM_VPLQ_ALIGNMENT 128 |
| 489 | #define VM_PAGE_PACKED_PTR_ALIGNMENT 64 /* must be a power of 2 */ |
| 490 | #define VM_PAGE_PACKED_ALIGNED __attribute__((aligned(VM_PAGE_PACKED_PTR_ALIGNMENT))) |
| 491 | #define VM_PAGE_PACKED_PTR_BITS 31 |
| 492 | #define VM_PAGE_PACKED_PTR_SHIFT 6 |
| 493 | #define VM_PAGE_PACKED_PTR_BASE ((uintptr_t)VM_MIN_KERNEL_AND_KEXT_ADDRESS) |
| 494 | |
| 495 | #define VM_PAGE_PACKED_FROM_ARRAY 0x80000000 |
| 496 | |
| 497 | static inline vm_page_packed_t |
| 498 | vm_page_pack_ptr(uintptr_t p) |
| 499 | { |
| 500 | if (p >= (uintptr_t)vm_page_array_beginning_addr && |
| 501 | p < (uintptr_t)vm_page_array_ending_addr) { |
| 502 | ptrdiff_t diff = (vm_page_t)p - vm_page_array_beginning_addr; |
| 503 | assert((vm_page_t)p == &vm_pages[diff]); |
| 504 | return (vm_page_packed_t)(diff | VM_PAGE_PACKED_FROM_ARRAY); |
| 505 | } |
| 506 | |
| 507 | VM_ASSERT_POINTER_PACKABLE(p, VM_PAGE_PACKED_PTR); |
| 508 | vm_offset_t packed = VM_PACK_POINTER(p, VM_PAGE_PACKED_PTR); |
| 509 | return CAST_DOWN_EXPLICIT(vm_page_packed_t, packed); |
| 510 | } |
| 511 | |
| 512 | |
| 513 | static inline uintptr_t |
| 514 | vm_page_unpack_ptr(uintptr_t p) |
| 515 | { |
| 516 | extern unsigned int vm_pages_count; |
| 517 | |
| 518 | if (p >= VM_PAGE_PACKED_FROM_ARRAY) { |
| 519 | p &= ~VM_PAGE_PACKED_FROM_ARRAY; |
| 520 | assert(p < (uintptr_t)vm_pages_count); |
| 521 | return (uintptr_t)&vm_pages[p]; |
| 522 | } |
| 523 | |
| 524 | return VM_UNPACK_POINTER(p, VM_PAGE_PACKED_PTR); |
| 525 | } |
| 526 | |
| 527 | |
| 528 | #define VM_PAGE_PACK_PTR(p) vm_page_pack_ptr((uintptr_t)(p)) |
| 529 | #define VM_PAGE_UNPACK_PTR(p) vm_page_unpack_ptr((uintptr_t)(p)) |
| 530 | |
| 531 | #define VM_OBJECT_PACK(o) ((vm_page_object_t)VM_PACK_POINTER((uintptr_t)(o), VM_PAGE_PACKED_PTR)) |
| 532 | #define VM_OBJECT_UNPACK(p) ((vm_object_t)VM_UNPACK_POINTER(p, VM_PAGE_PACKED_PTR)) |
| 533 | |
| 534 | #define VM_PAGE_OBJECT(p) VM_OBJECT_UNPACK((p)->vmp_object) |
| 535 | #define VM_PAGE_PACK_OBJECT(o) VM_OBJECT_PACK(o) |
| 536 | |
| 537 | |
| 538 | #define VM_PAGE_ZERO_PAGEQ_ENTRY(p) \ |
| 539 | MACRO_BEGIN \ |
| 540 | (p)->vmp_snext = 0; \ |
| 541 | MACRO_END |
| 542 | |
| 543 | |
| 544 | #define VM_PAGE_CONVERT_TO_QUEUE_ENTRY(p) VM_PAGE_PACK_PTR(p) |
| 545 | |
| 546 | |
| 547 | static __inline__ void |
| 548 | vm_page_enqueue_tail( |
| 549 | vm_page_queue_t que, |
| 550 | vm_page_queue_entry_t elt) |
| 551 | { |
| 552 | vm_page_queue_entry_t old_tail; |
| 553 | |
| 554 | old_tail = (vm_page_queue_entry_t)VM_PAGE_UNPACK_PTR(que->prev); |
| 555 | elt->next = VM_PAGE_PACK_PTR(que); |
| 556 | elt->prev = que->prev; |
| 557 | que->prev = old_tail->next = VM_PAGE_PACK_PTR(elt); |
| 558 | } |
| 559 | |
| 560 | |
| 561 | static __inline__ void |
| 562 | vm_page_remque( |
| 563 | vm_page_queue_entry_t elt) |
| 564 | { |
| 565 | vm_page_queue_entry_t next; |
| 566 | vm_page_queue_entry_t prev; |
| 567 | vm_page_packed_t next_pck = elt->next; |
| 568 | vm_page_packed_t prev_pck = elt->prev; |
| 569 | |
| 570 | next = (vm_page_queue_entry_t)VM_PAGE_UNPACK_PTR(next_pck); |
| 571 | |
| 572 | /* next may equal prev (and the queue head) if elt was the only element */ |
| 573 | prev = (vm_page_queue_entry_t)VM_PAGE_UNPACK_PTR(prev_pck); |
| 574 | |
| 575 | next->prev = prev_pck; |
| 576 | prev->next = next_pck; |
| 577 | |
| 578 | elt->next = 0; |
| 579 | elt->prev = 0; |
| 580 | } |
| 581 | |
| 582 | |
| 583 | /* |
| 584 | * Macro: vm_page_queue_init |
| 585 | * Function: |
| 586 | * Initialize the given queue. |
| 587 | * Header: |
| 588 | * void vm_page_queue_init(q) |
| 589 | * vm_page_queue_t q; \* MODIFIED *\ |
| 590 | */ |
| 591 | #define vm_page_queue_init(q) \ |
| 592 | MACRO_BEGIN \ |
| 593 | VM_ASSERT_POINTER_PACKABLE((vm_offset_t)(q), VM_PAGE_PACKED_PTR); \ |
| 594 | (q)->next = VM_PAGE_PACK_PTR(q); \ |
| 595 | (q)->prev = VM_PAGE_PACK_PTR(q); \ |
| 596 | MACRO_END |
| 597 | |
| 598 | |
| 599 | /* |
| 600 | * Macro: vm_page_queue_enter |
| 601 | * Function: |
| 602 | * Insert a new element at the tail of the vm_page queue. |
| 603 | * Header: |
| 604 | * void vm_page_queue_enter(q, elt, field) |
| 605 | * queue_t q; |
| 606 | * vm_page_t elt; |
| 607 | * <field> is the list field in vm_page_t |
| 608 | * |
| 609 | * This macro's arguments have to match the generic "queue_enter()" macro which is |
| 610 | * what is used for this on 32 bit kernels. |
| 611 | */ |
| 612 | #define vm_page_queue_enter(head, elt, field) \ |
| 613 | MACRO_BEGIN \ |
| 614 | vm_page_packed_t __pck_elt = VM_PAGE_PACK_PTR(elt); \ |
| 615 | vm_page_packed_t __pck_head = VM_PAGE_PACK_PTR(head); \ |
| 616 | vm_page_packed_t __pck_prev = (head)->prev; \ |
| 617 | \ |
| 618 | if (__pck_head == __pck_prev) { \ |
| 619 | (head)->next = __pck_elt; \ |
| 620 | } else { \ |
| 621 | vm_page_t __prev; \ |
| 622 | __prev = (vm_page_t)VM_PAGE_UNPACK_PTR(__pck_prev); \ |
| 623 | __prev->field.next = __pck_elt; \ |
| 624 | } \ |
| 625 | (elt)->field.prev = __pck_prev; \ |
| 626 | (elt)->field.next = __pck_head; \ |
| 627 | (head)->prev = __pck_elt; \ |
| 628 | MACRO_END |
| 629 | |
| 630 | |
| 631 | #if defined(__x86_64__) |
| 632 | /* |
| 633 | * These are helper macros for vm_page_queue_enter_clump to assist |
| 634 | * with conditional compilation (release / debug / development) |
| 635 | */ |
| 636 | #if DEVELOPMENT || DEBUG |
| 637 | |
| 638 | #define __DEBUG_CHECK_BUDDIES(__prev, __p, field) \ |
| 639 | MACRO_BEGIN \ |
| 640 | if (__prev != NULL) { \ |
| 641 | assert(__p == (vm_page_t)VM_PAGE_UNPACK_PTR(__prev->next)); \ |
| 642 | assert(__prev == (vm_page_queue_entry_t)VM_PAGE_UNPACK_PTR(__p->field.prev)); \ |
| 643 | } \ |
| 644 | MACRO_END |
| 645 | |
| 646 | #define __DEBUG_VERIFY_LINKS(__first, __n_free, __last_next) \ |
| 647 | MACRO_BEGIN \ |
| 648 | unsigned int __i; \ |
| 649 | vm_page_queue_entry_t __tmp; \ |
| 650 | for (__i = 0, __tmp = __first; __i < __n_free; __i++) { \ |
| 651 | __tmp = (vm_page_queue_entry_t)VM_PAGE_UNPACK_PTR(__tmp->next); \ |
| 652 | } \ |
| 653 | assert(__tmp == __last_next); \ |
| 654 | MACRO_END |
| 655 | |
| 656 | #define __DEBUG_STAT_INCREMENT_INRANGE vm_clump_inrange++ |
| 657 | #define __DEBUG_STAT_INCREMENT_INSERTS vm_clump_inserts++ |
| 658 | #define __DEBUG_STAT_INCREMENT_PROMOTES(__n_free) vm_clump_promotes+=__n_free |
| 659 | |
| 660 | #else |
| 661 | |
| 662 | #define __DEBUG_CHECK_BUDDIES(__prev, __p, field) |
| 663 | #define __DEBUG_VERIFY_LINKS(__first, __n_free, __last_next) |
| 664 | #define __DEBUG_STAT_INCREMENT_INRANGE |
| 665 | #define __DEBUG_STAT_INCREMENT_INSERTS |
| 666 | #define __DEBUG_STAT_INCREMENT_PROMOTES(__n_free) |
| 667 | |
| 668 | #endif /* if DEVELOPMENT || DEBUG */ |
| 669 | |
| 670 | /* |
| 671 | * Insert a new page into a free queue and clump pages within the same 16K boundary together |
| 672 | */ |
| 673 | static inline void |
| 674 | vm_page_queue_enter_clump( |
| 675 | vm_page_queue_t head, |
| 676 | vm_page_t elt) |
| 677 | { |
| 678 | vm_page_queue_entry_t first = NULL; /* first page in the clump */ |
| 679 | vm_page_queue_entry_t last = NULL; /* last page in the clump */ |
| 680 | vm_page_queue_entry_t prev = NULL; |
| 681 | vm_page_queue_entry_t next; |
| 682 | uint_t n_free = 1; |
| 683 | extern unsigned int vm_pages_count; |
| 684 | extern unsigned int vm_clump_size, vm_clump_mask, vm_clump_shift, vm_clump_promote_threshold; |
| 685 | extern unsigned long vm_clump_allocs, vm_clump_inserts, vm_clump_inrange, vm_clump_promotes; |
| 686 | |
| 687 | /* |
| 688 | * If elt is part of the vm_pages[] array, find its neighboring buddies in the array. |
| 689 | */ |
| 690 | if (vm_page_array_beginning_addr <= elt && elt < &vm_pages[vm_pages_count]) { |
| 691 | vm_page_t p; |
| 692 | uint_t i; |
| 693 | uint_t n; |
| 694 | ppnum_t clump_num; |
| 695 | |
| 696 | first = last = (vm_page_queue_entry_t)elt; |
| 697 | clump_num = VM_PAGE_GET_CLUMP(elt); |
| 698 | n = VM_PAGE_GET_PHYS_PAGE(elt) & vm_clump_mask; |
| 699 | |
| 700 | /* |
| 701 | * Check for preceeding vm_pages[] entries in the same chunk |
| 702 | */ |
| 703 | for (i = 0, p = elt - 1; i < n && vm_page_array_beginning_addr <= p; i++, p--) { |
| 704 | if (p->vmp_q_state == VM_PAGE_ON_FREE_Q && clump_num == VM_PAGE_GET_CLUMP(p)) { |
| 705 | if (prev == NULL) { |
| 706 | prev = (vm_page_queue_entry_t)p; |
| 707 | } |
| 708 | first = (vm_page_queue_entry_t)p; |
| 709 | n_free++; |
| 710 | } |
| 711 | } |
| 712 | |
| 713 | /* |
| 714 | * Check the following vm_pages[] entries in the same chunk |
| 715 | */ |
| 716 | for (i = n + 1, p = elt + 1; i < vm_clump_size && p < &vm_pages[vm_pages_count]; i++, p++) { |
| 717 | if (p->vmp_q_state == VM_PAGE_ON_FREE_Q && clump_num == VM_PAGE_GET_CLUMP(p)) { |
| 718 | if (last == (vm_page_queue_entry_t)elt) { /* first one only */ |
| 719 | __DEBUG_CHECK_BUDDIES(prev, p, vmp_pageq); |
| 720 | } |
| 721 | |
| 722 | if (prev == NULL) { |
| 723 | prev = (vm_page_queue_entry_t)VM_PAGE_UNPACK_PTR(p->vmp_pageq.prev); |
| 724 | } |
| 725 | last = (vm_page_queue_entry_t)p; |
| 726 | n_free++; |
| 727 | } |
| 728 | } |
| 729 | __DEBUG_STAT_INCREMENT_INRANGE; |
| 730 | } |
| 731 | |
| 732 | /* if elt is not part of vm_pages or if 1st page in clump, insert at tail */ |
| 733 | if (prev == NULL) { |
| 734 | prev = (vm_page_queue_entry_t)VM_PAGE_UNPACK_PTR(head->prev); |
| 735 | } |
| 736 | |
| 737 | /* insert the element */ |
| 738 | next = (vm_page_queue_entry_t)VM_PAGE_UNPACK_PTR(prev->next); |
| 739 | elt->vmp_pageq.next = prev->next; |
| 740 | elt->vmp_pageq.prev = next->prev; |
| 741 | prev->next = next->prev = VM_PAGE_PACK_PTR(elt); |
| 742 | __DEBUG_STAT_INCREMENT_INSERTS; |
| 743 | |
| 744 | /* |
| 745 | * Check if clump needs to be promoted to head. |
| 746 | */ |
| 747 | if (n_free >= vm_clump_promote_threshold && n_free > 1) { |
| 748 | vm_page_queue_entry_t first_prev; |
| 749 | |
| 750 | first_prev = (vm_page_queue_entry_t)VM_PAGE_UNPACK_PTR(first->prev); |
| 751 | |
| 752 | /* If not at head already */ |
| 753 | if (first_prev != head) { |
| 754 | vm_page_queue_entry_t last_next; |
| 755 | vm_page_queue_entry_t head_next; |
| 756 | |
| 757 | last_next = (vm_page_queue_entry_t)VM_PAGE_UNPACK_PTR(last->next); |
| 758 | |
| 759 | /* verify that the links within the clump are consistent */ |
| 760 | __DEBUG_VERIFY_LINKS(first, n_free, last_next); |
| 761 | |
| 762 | /* promote clump to head */ |
| 763 | first_prev->next = last->next; |
| 764 | last_next->prev = first->prev; |
| 765 | first->prev = VM_PAGE_PACK_PTR(head); |
| 766 | last->next = head->next; |
| 767 | |
| 768 | head_next = (vm_page_queue_entry_t)VM_PAGE_UNPACK_PTR(head->next); |
| 769 | head_next->prev = VM_PAGE_PACK_PTR(last); |
| 770 | head->next = VM_PAGE_PACK_PTR(first); |
| 771 | __DEBUG_STAT_INCREMENT_PROMOTES(n_free); |
| 772 | } |
| 773 | } |
| 774 | } |
| 775 | #endif |
| 776 | |
| 777 | /* |
| 778 | * Macro: vm_page_queue_enter_first |
| 779 | * Function: |
| 780 | * Insert a new element at the head of the vm_page queue. |
| 781 | * Header: |
| 782 | * void queue_enter_first(q, elt, , field) |
| 783 | * queue_t q; |
| 784 | * vm_page_t elt; |
| 785 | * <field> is the linkage field in vm_page |
| 786 | * |
| 787 | * This macro's arguments have to match the generic "queue_enter_first()" macro which is |
| 788 | * what is used for this on 32 bit kernels. |
| 789 | */ |
| 790 | #define vm_page_queue_enter_first(head, elt, field) \ |
| 791 | MACRO_BEGIN \ |
| 792 | vm_page_packed_t __pck_next = (head)->next; \ |
| 793 | vm_page_packed_t __pck_head = VM_PAGE_PACK_PTR(head); \ |
| 794 | vm_page_packed_t __pck_elt = VM_PAGE_PACK_PTR(elt); \ |
| 795 | \ |
| 796 | if (__pck_head == __pck_next) { \ |
| 797 | (head)->prev = __pck_elt; \ |
| 798 | } else { \ |
| 799 | vm_page_t __next; \ |
| 800 | __next = (vm_page_t)VM_PAGE_UNPACK_PTR(__pck_next); \ |
| 801 | __next->field.prev = __pck_elt; \ |
| 802 | } \ |
| 803 | \ |
| 804 | (elt)->field.next = __pck_next; \ |
| 805 | (elt)->field.prev = __pck_head; \ |
| 806 | (head)->next = __pck_elt; \ |
| 807 | MACRO_END |
| 808 | |
| 809 | |
| 810 | /* |
| 811 | * Macro: vm_page_queue_remove |
| 812 | * Function: |
| 813 | * Remove an arbitrary page from a vm_page queue. |
| 814 | * Header: |
| 815 | * void vm_page_queue_remove(q, qe, field) |
| 816 | * arguments as in vm_page_queue_enter |
| 817 | * |
| 818 | * This macro's arguments have to match the generic "queue_enter()" macro which is |
| 819 | * what is used for this on 32 bit kernels. |
| 820 | */ |
| 821 | #define vm_page_queue_remove(head, elt, field) \ |
| 822 | MACRO_BEGIN \ |
| 823 | vm_page_packed_t __pck_next = (elt)->field.next; \ |
| 824 | vm_page_packed_t __pck_prev = (elt)->field.prev; \ |
| 825 | vm_page_t __next = (vm_page_t)VM_PAGE_UNPACK_PTR(__pck_next); \ |
| 826 | vm_page_t __prev = (vm_page_t)VM_PAGE_UNPACK_PTR(__pck_prev); \ |
| 827 | \ |
| 828 | if ((void *)(head) == (void *)__next) { \ |
| 829 | (head)->prev = __pck_prev; \ |
| 830 | } else { \ |
| 831 | __next->field.prev = __pck_prev; \ |
| 832 | } \ |
| 833 | \ |
| 834 | if ((void *)(head) == (void *)__prev) { \ |
| 835 | (head)->next = __pck_next; \ |
| 836 | } else { \ |
| 837 | __prev->field.next = __pck_next; \ |
| 838 | } \ |
| 839 | \ |
| 840 | (elt)->field.next = 0; \ |
| 841 | (elt)->field.prev = 0; \ |
| 842 | MACRO_END |
| 843 | |
| 844 | |
| 845 | /* |
| 846 | * Macro: vm_page_queue_remove_first |
| 847 | * |
| 848 | * Function: |
| 849 | * Remove and return the entry at the head of a vm_page queue. |
| 850 | * |
| 851 | * Header: |
| 852 | * vm_page_queue_remove_first(head, entry, field) |
| 853 | * N.B. entry is returned by reference |
| 854 | * |
| 855 | * This macro's arguments have to match the generic "queue_remove_first()" macro which is |
| 856 | * what is used for this on 32 bit kernels. |
| 857 | */ |
| 858 | #define vm_page_queue_remove_first(head, entry, field) \ |
| 859 | MACRO_BEGIN \ |
| 860 | vm_page_packed_t __pck_head = VM_PAGE_PACK_PTR(head); \ |
| 861 | vm_page_packed_t __pck_next; \ |
| 862 | vm_page_t __next; \ |
| 863 | \ |
| 864 | (entry) = (vm_page_t)VM_PAGE_UNPACK_PTR((head)->next); \ |
| 865 | __pck_next = (entry)->field.next; \ |
| 866 | __next = (vm_page_t)VM_PAGE_UNPACK_PTR(__pck_next); \ |
| 867 | \ |
| 868 | if (__pck_head == __pck_next) { \ |
| 869 | (head)->prev = __pck_head; \ |
| 870 | } else { \ |
| 871 | __next->field.prev = __pck_head; \ |
| 872 | } \ |
| 873 | \ |
| 874 | (head)->next = __pck_next; \ |
| 875 | (entry)->field.next = 0; \ |
| 876 | (entry)->field.prev = 0; \ |
| 877 | MACRO_END |
| 878 | |
| 879 | |
| 880 | #if defined(__x86_64__) |
| 881 | /* |
| 882 | * Macro: vm_page_queue_remove_first_with_clump |
| 883 | * Function: |
| 884 | * Remove and return the entry at the head of the free queue |
| 885 | * end is set to 1 to indicate that we just returned the last page in a clump |
| 886 | * |
| 887 | * Header: |
| 888 | * vm_page_queue_remove_first_with_clump(head, entry, end) |
| 889 | * entry is returned by reference |
| 890 | * end is returned by reference |
| 891 | */ |
| 892 | #define vm_page_queue_remove_first_with_clump(head, entry, end) \ |
| 893 | MACRO_BEGIN \ |
| 894 | vm_page_packed_t __pck_head = VM_PAGE_PACK_PTR(head); \ |
| 895 | vm_page_packed_t __pck_next; \ |
| 896 | vm_page_t __next; \ |
| 897 | \ |
| 898 | (entry) = (vm_page_t)VM_PAGE_UNPACK_PTR((head)->next); \ |
| 899 | __pck_next = (entry)->vmp_pageq.next; \ |
| 900 | __next = (vm_page_t)VM_PAGE_UNPACK_PTR(__pck_next); \ |
| 901 | \ |
| 902 | (end) = 0; \ |
| 903 | if (__pck_head == __pck_next) { \ |
| 904 | (head)->prev = __pck_head; \ |
| 905 | (end) = 1; \ |
| 906 | } else { \ |
| 907 | __next->vmp_pageq.prev = __pck_head; \ |
| 908 | if (VM_PAGE_GET_CLUMP(entry) != VM_PAGE_GET_CLUMP(__next)) { \ |
| 909 | (end) = 1; \ |
| 910 | } \ |
| 911 | } \ |
| 912 | \ |
| 913 | (head)->next = __pck_next; \ |
| 914 | (entry)->vmp_pageq.next = 0; \ |
| 915 | (entry)->vmp_pageq.prev = 0; \ |
| 916 | MACRO_END |
| 917 | #endif |
| 918 | |
| 919 | /* |
| 920 | * Macro: vm_page_queue_end |
| 921 | * Function: |
| 922 | * Tests whether a new entry is really the end of |
| 923 | * the queue. |
| 924 | * Header: |
| 925 | * boolean_t vm_page_queue_end(q, qe) |
| 926 | * vm_page_queue_t q; |
| 927 | * vm_page_queue_entry_t qe; |
| 928 | */ |
| 929 | #define vm_page_queue_end(q, qe) ((q) == (qe)) |
| 930 | |
| 931 | |
| 932 | /* |
| 933 | * Macro: vm_page_queue_empty |
| 934 | * Function: |
| 935 | * Tests whether a queue is empty. |
| 936 | * Header: |
| 937 | * boolean_t vm_page_queue_empty(q) |
| 938 | * vm_page_queue_t q; |
| 939 | */ |
| 940 | #define vm_page_queue_empty(q) vm_page_queue_end((q), ((vm_page_queue_entry_t)vm_page_queue_first(q))) |
| 941 | |
| 942 | |
| 943 | |
| 944 | /* |
| 945 | * Macro: vm_page_queue_first |
| 946 | * Function: |
| 947 | * Returns the first entry in the queue, |
| 948 | * Header: |
| 949 | * uintpr_t vm_page_queue_first(q) |
| 950 | * vm_page_queue_t q; \* IN *\ |
| 951 | */ |
| 952 | #define vm_page_queue_first(q) (VM_PAGE_UNPACK_PTR((q)->next)) |
| 953 | |
| 954 | |
| 955 | |
| 956 | /* |
| 957 | * Macro: vm_page_queue_last |
| 958 | * Function: |
| 959 | * Returns the last entry in the queue. |
| 960 | * Header: |
| 961 | * vm_page_queue_entry_t queue_last(q) |
| 962 | * queue_t q; \* IN *\ |
| 963 | */ |
| 964 | #define vm_page_queue_last(q) (VM_PAGE_UNPACK_PTR((q)->prev)) |
| 965 | |
| 966 | |
| 967 | |
| 968 | /* |
| 969 | * Macro: vm_page_queue_next |
| 970 | * Function: |
| 971 | * Returns the entry after an item in the queue. |
| 972 | * Header: |
| 973 | * uintpr_t vm_page_queue_next(qc) |
| 974 | * vm_page_queue_t qc; |
| 975 | */ |
| 976 | #define vm_page_queue_next(qc) (VM_PAGE_UNPACK_PTR((qc)->next)) |
| 977 | |
| 978 | |
| 979 | |
| 980 | /* |
| 981 | * Macro: vm_page_queue_prev |
| 982 | * Function: |
| 983 | * Returns the entry before an item in the queue. |
| 984 | * Header: |
| 985 | * uinptr_t vm_page_queue_prev(qc) |
| 986 | * vm_page_queue_t qc; |
| 987 | */ |
| 988 | #define vm_page_queue_prev(qc) (VM_PAGE_UNPACK_PTR((qc)->prev)) |
| 989 | |
| 990 | |
| 991 | |
| 992 | /* |
| 993 | * Macro: vm_page_queue_iterate |
| 994 | * Function: |
| 995 | * iterate over each item in a vm_page queue. |
| 996 | * Generates a 'for' loop, setting elt to |
| 997 | * each item in turn (by reference). |
| 998 | * Header: |
| 999 | * vm_page_queue_iterate(q, elt, field) |
| 1000 | * queue_t q; |
| 1001 | * vm_page_t elt; |
| 1002 | * <field> is the chain field in vm_page_t |
| 1003 | */ |
| 1004 | #define vm_page_queue_iterate(head, elt, field) \ |
| 1005 | for ((elt) = (vm_page_t)vm_page_queue_first(head); \ |
| 1006 | !vm_page_queue_end((head), (vm_page_queue_entry_t)(elt)); \ |
| 1007 | (elt) = (vm_page_t)vm_page_queue_next(&(elt)->field)) \ |
| 1008 | |
| 1009 | #else // LP64 |
| 1010 | |
| 1011 | #define VM_VPLQ_ALIGNMENT 128 |
| 1012 | #define VM_PAGE_PACKED_PTR_ALIGNMENT sizeof(vm_offset_t) |
| 1013 | #define VM_PAGE_PACKED_ALIGNED |
| 1014 | #define VM_PAGE_PACKED_PTR_BITS 32 |
| 1015 | #define VM_PAGE_PACKED_PTR_SHIFT 0 |
| 1016 | #define VM_PAGE_PACKED_PTR_BASE 0 |
| 1017 | |
| 1018 | #define VM_PAGE_PACKED_FROM_ARRAY 0 |
| 1019 | |
| 1020 | #define VM_PAGE_PACK_PTR(p) (p) |
| 1021 | #define VM_PAGE_UNPACK_PTR(p) ((uintptr_t)(p)) |
| 1022 | |
| 1023 | #define VM_OBJECT_PACK(o) ((vm_page_object_t)(o)) |
| 1024 | #define VM_OBJECT_UNPACK(p) ((vm_object_t)(p)) |
| 1025 | |
| 1026 | #define VM_PAGE_PACK_OBJECT(o) VM_OBJECT_PACK(o) |
| 1027 | #define VM_PAGE_OBJECT(p) VM_OBJECT_UNPACK((p)->vmp_object) |
| 1028 | |
| 1029 | |
| 1030 | #define VM_PAGE_ZERO_PAGEQ_ENTRY(p) \ |
| 1031 | MACRO_BEGIN \ |
| 1032 | (p)->vmp_pageq.next = 0; \ |
| 1033 | (p)->vmp_pageq.prev = 0; \ |
| 1034 | MACRO_END |
| 1035 | |
| 1036 | #define VM_PAGE_CONVERT_TO_QUEUE_ENTRY(p) ((queue_entry_t)(p)) |
| 1037 | |
| 1038 | #define vm_page_remque remque |
| 1039 | #define vm_page_enqueue_tail enqueue_tail |
| 1040 | #define vm_page_queue_init queue_init |
| 1041 | #define vm_page_queue_enter(h, e, f) queue_enter(h, e, vm_page_t, f) |
| 1042 | #define vm_page_queue_enter_first(h, e, f) queue_enter_first(h, e, vm_page_t, f) |
| 1043 | #define vm_page_queue_remove(h, e, f) queue_remove(h, e, vm_page_t, f) |
| 1044 | #define vm_page_queue_remove_first(h, e, f) queue_remove_first(h, e, vm_page_t, f) |
| 1045 | #define vm_page_queue_end queue_end |
| 1046 | #define vm_page_queue_empty queue_empty |
| 1047 | #define vm_page_queue_first queue_first |
| 1048 | #define vm_page_queue_last queue_last |
| 1049 | #define vm_page_queue_next queue_next |
| 1050 | #define vm_page_queue_prev queue_prev |
| 1051 | #define vm_page_queue_iterate(h, e, f) queue_iterate(h, e, vm_page_t, f) |
| 1052 | |
| 1053 | #endif // __LP64__ |
| 1054 | |
| 1055 | |
| 1056 | |
| 1057 | /* |
| 1058 | * VM_PAGE_MIN_SPECULATIVE_AGE_Q through VM_PAGE_MAX_SPECULATIVE_AGE_Q |
| 1059 | * represents a set of aging bins that are 'protected'... |
| 1060 | * |
| 1061 | * VM_PAGE_SPECULATIVE_AGED_Q is a list of the speculative pages that have |
| 1062 | * not yet been 'claimed' but have been aged out of the protective bins |
| 1063 | * this occurs in vm_page_speculate when it advances to the next bin |
| 1064 | * and discovers that it is still occupied... at that point, all of the |
| 1065 | * pages in that bin are moved to the VM_PAGE_SPECULATIVE_AGED_Q. the pages |
| 1066 | * in that bin are all guaranteed to have reached at least the maximum age |
| 1067 | * we allow for a protected page... they can be older if there is no |
| 1068 | * memory pressure to pull them from the bin, or there are no new speculative pages |
| 1069 | * being generated to push them out. |
| 1070 | * this list is the one that vm_pageout_scan will prefer when looking |
| 1071 | * for pages to move to the underweight free list |
| 1072 | * |
| 1073 | * VM_PAGE_MAX_SPECULATIVE_AGE_Q * VM_PAGE_SPECULATIVE_Q_AGE_MS |
| 1074 | * defines the amount of time a speculative page is normally |
| 1075 | * allowed to live in the 'protected' state (i.e. not available |
| 1076 | * to be stolen if vm_pageout_scan is running and looking for |
| 1077 | * pages)... however, if the total number of speculative pages |
| 1078 | * in the protected state exceeds our limit (defined in vm_pageout.c) |
| 1079 | * and there are none available in VM_PAGE_SPECULATIVE_AGED_Q, then |
| 1080 | * vm_pageout_scan is allowed to steal pages from the protected |
| 1081 | * bucket even if they are underage. |
| 1082 | * |
| 1083 | * vm_pageout_scan is also allowed to pull pages from a protected |
| 1084 | * bin if the bin has reached the "age of consent" we've set |
| 1085 | */ |
| 1086 | #define VM_PAGE_MAX_SPECULATIVE_AGE_Q 10 |
| 1087 | #define VM_PAGE_MIN_SPECULATIVE_AGE_Q 1 |
| 1088 | #define VM_PAGE_SPECULATIVE_AGED_Q 0 |
| 1089 | |
| 1090 | #define VM_PAGE_SPECULATIVE_Q_AGE_MS 500 |
| 1091 | |
| 1092 | struct vm_speculative_age_q { |
| 1093 | /* |
| 1094 | * memory queue for speculative pages via clustered pageins |
| 1095 | */ |
| 1096 | vm_page_queue_head_t age_q; |
| 1097 | mach_timespec_t age_ts; |
| 1098 | } VM_PAGE_PACKED_ALIGNED; |
| 1099 | |
| 1100 | |
| 1101 | |
| 1102 | extern |
| 1103 | struct vm_speculative_age_q vm_page_queue_speculative[]; |
| 1104 | |
| 1105 | extern int speculative_steal_index; |
| 1106 | extern int speculative_age_index; |
| 1107 | extern unsigned int vm_page_speculative_q_age_ms; |
| 1108 | |
| 1109 | |
| 1110 | typedef struct vm_locks_array { |
| 1111 | char pad __attribute__ ((aligned(64))); |
| 1112 | lck_mtx_t vm_page_queue_lock2 __attribute__ ((aligned(64))); |
| 1113 | lck_mtx_t vm_page_queue_free_lock2 __attribute__ ((aligned(64))); |
| 1114 | char pad2 __attribute__ ((aligned(64))); |
| 1115 | } vm_locks_array_t; |
| 1116 | |
| 1117 | |
| 1118 | extern void vm_page_assign_special_state(vm_page_t mem, int mode); |
| 1119 | extern void vm_page_update_special_state(vm_page_t mem); |
| 1120 | extern void vm_page_add_to_specialq(vm_page_t mem, boolean_t first); |
| 1121 | extern void vm_page_remove_from_specialq(vm_page_t mem); |
| 1122 | |
| 1123 | #define VM_PAGE_WIRED(m) ((m)->vmp_q_state == VM_PAGE_IS_WIRED) |
| 1124 | #define NEXT_PAGE(m) ((m)->vmp_snext) |
| 1125 | #define NEXT_PAGE_PTR(m) (&(m)->vmp_snext) |
| 1126 | |
| 1127 | /* |
| 1128 | * XXX The unusual bit should not be necessary. Most of the bit |
| 1129 | * XXX fields above really want to be masks. |
| 1130 | */ |
| 1131 | |
| 1132 | /* |
| 1133 | * For debugging, this macro can be defined to perform |
| 1134 | * some useful check on a page structure. |
| 1135 | * INTENTIONALLY left as a no-op so that the |
| 1136 | * current call-sites can be left intact for future uses. |
| 1137 | */ |
| 1138 | |
| 1139 | #define VM_PAGE_CHECK(mem) \ |
| 1140 | MACRO_BEGIN \ |
| 1141 | MACRO_END |
| 1142 | |
| 1143 | /* Page coloring: |
| 1144 | * |
| 1145 | * The free page list is actually n lists, one per color, |
| 1146 | * where the number of colors is a function of the machine's |
| 1147 | * cache geometry set at system initialization. To disable |
| 1148 | * coloring, set vm_colors to 1 and vm_color_mask to 0. |
| 1149 | * The boot-arg "colors" may be used to override vm_colors. |
| 1150 | * Note that there is little harm in having more colors than needed. |
| 1151 | */ |
| 1152 | |
| 1153 | #define MAX_COLORS 128 |
| 1154 | #define DEFAULT_COLORS 32 |
| 1155 | |
| 1156 | extern |
| 1157 | unsigned int vm_colors; /* must be in range 1..MAX_COLORS */ |
| 1158 | extern |
| 1159 | unsigned int vm_color_mask; /* must be (vm_colors-1) */ |
| 1160 | extern |
| 1161 | unsigned int vm_cache_geometry_colors; /* optimal #colors based on cache geometry */ |
| 1162 | |
| 1163 | /* |
| 1164 | * Wired memory is a very limited resource and we can't let users exhaust it |
| 1165 | * and deadlock the entire system. We enforce the following limits: |
| 1166 | * |
| 1167 | * vm_per_task_user_wire_limit |
| 1168 | * how much memory can be user-wired in one user task |
| 1169 | * |
| 1170 | * vm_global_user_wire_limit (default: same as vm_per_task_user_wire_limit) |
| 1171 | * how much memory can be user-wired in all user tasks |
| 1172 | * |
| 1173 | * These values are set to defaults based on the number of pages managed |
| 1174 | * by the VM system. They can be overriden via sysctls. |
| 1175 | * See kmem_set_user_wire_limits for details on the default values. |
| 1176 | * |
| 1177 | * Regardless of the amount of memory in the system, we never reserve |
| 1178 | * more than VM_NOT_USER_WIREABLE_MAX bytes as unlockable. |
| 1179 | */ |
| 1180 | #if defined(__LP64__) |
| 1181 | #define VM_NOT_USER_WIREABLE_MAX (32ULL*1024*1024*1024) /* 32GB */ |
| 1182 | #else |
| 1183 | #define VM_NOT_USER_WIREABLE_MAX (1UL*1024*1024*1024) /* 1GB */ |
| 1184 | #endif /* __LP64__ */ |
| 1185 | extern |
| 1186 | vm_map_size_t vm_per_task_user_wire_limit; |
| 1187 | extern |
| 1188 | vm_map_size_t vm_global_user_wire_limit; |
| 1189 | extern |
| 1190 | uint64_t vm_add_wire_count_over_global_limit; |
| 1191 | extern |
| 1192 | uint64_t vm_add_wire_count_over_user_limit; |
| 1193 | |
| 1194 | /* |
| 1195 | * Each pageable resident page falls into one of three lists: |
| 1196 | * |
| 1197 | * free |
| 1198 | * Available for allocation now. The free list is |
| 1199 | * actually an array of lists, one per color. |
| 1200 | * inactive |
| 1201 | * Not referenced in any map, but still has an |
| 1202 | * object/offset-page mapping, and may be dirty. |
| 1203 | * This is the list of pages that should be |
| 1204 | * paged out next. There are actually two |
| 1205 | * inactive lists, one for pages brought in from |
| 1206 | * disk or other backing store, and another |
| 1207 | * for "zero-filled" pages. See vm_pageout_scan() |
| 1208 | * for the distinction and usage. |
| 1209 | * active |
| 1210 | * A list of pages which have been placed in |
| 1211 | * at least one physical map. This list is |
| 1212 | * ordered, in LRU-like fashion. |
| 1213 | */ |
| 1214 | |
| 1215 | |
| 1216 | #define VPL_LOCK_SPIN 1 |
| 1217 | |
| 1218 | struct vpl { |
| 1219 | vm_page_queue_head_t vpl_queue; |
| 1220 | unsigned int vpl_count; |
| 1221 | unsigned int vpl_internal_count; |
| 1222 | unsigned int vpl_external_count; |
| 1223 | lck_spin_t vpl_lock; |
| 1224 | }; |
| 1225 | |
| 1226 | extern |
| 1227 | struct vpl * /* __zpercpu */ vm_page_local_q; |
| 1228 | extern |
| 1229 | unsigned int vm_page_local_q_soft_limit; |
| 1230 | extern |
| 1231 | unsigned int vm_page_local_q_hard_limit; |
| 1232 | extern |
| 1233 | vm_locks_array_t vm_page_locks; |
| 1234 | |
| 1235 | extern |
| 1236 | vm_page_queue_head_t vm_lopage_queue_free; /* low memory free queue */ |
| 1237 | extern |
| 1238 | vm_page_queue_head_t vm_page_queue_active; /* active memory queue */ |
| 1239 | extern |
| 1240 | vm_page_queue_head_t vm_page_queue_inactive; /* inactive memory queue for normal pages */ |
| 1241 | #if CONFIG_SECLUDED_MEMORY |
| 1242 | extern |
| 1243 | vm_page_queue_head_t vm_page_queue_secluded; /* reclaimable pages secluded for Camera */ |
| 1244 | #endif /* CONFIG_SECLUDED_MEMORY */ |
| 1245 | extern |
| 1246 | vm_page_queue_head_t vm_page_queue_cleaned; /* clean-queue inactive memory */ |
| 1247 | extern |
| 1248 | vm_page_queue_head_t vm_page_queue_anonymous; /* inactive memory queue for anonymous pages */ |
| 1249 | extern |
| 1250 | vm_page_queue_head_t vm_page_queue_throttled; /* memory queue for throttled pageout pages */ |
| 1251 | |
| 1252 | extern |
| 1253 | queue_head_t vm_objects_wired; |
| 1254 | extern |
| 1255 | lck_spin_t vm_objects_wired_lock; |
| 1256 | |
| 1257 | #define VM_PAGE_DONATE_DISABLED 0 |
| 1258 | #define VM_PAGE_DONATE_ENABLED 1 |
| 1259 | extern |
| 1260 | uint32_t vm_page_donate_mode; |
| 1261 | extern |
| 1262 | bool vm_page_donate_queue_ripe; |
| 1263 | |
| 1264 | #define VM_PAGE_BACKGROUND_TARGET_MAX 50000 |
| 1265 | #define VM_PAGE_BG_DISABLED 0 |
| 1266 | #define VM_PAGE_BG_ENABLED 1 |
| 1267 | |
| 1268 | extern |
| 1269 | vm_page_queue_head_t vm_page_queue_background; |
| 1270 | extern |
| 1271 | uint64_t vm_page_background_promoted_count; |
| 1272 | extern |
| 1273 | uint32_t vm_page_background_count; |
| 1274 | extern |
| 1275 | uint32_t vm_page_background_target; |
| 1276 | extern |
| 1277 | uint32_t vm_page_background_internal_count; |
| 1278 | extern |
| 1279 | uint32_t vm_page_background_external_count; |
| 1280 | extern |
| 1281 | uint32_t vm_page_background_mode; |
| 1282 | extern |
| 1283 | uint32_t vm_page_background_exclude_external; |
| 1284 | |
| 1285 | extern |
| 1286 | vm_page_queue_head_t vm_page_queue_donate; |
| 1287 | extern |
| 1288 | uint32_t vm_page_donate_count; |
| 1289 | extern |
| 1290 | uint32_t vm_page_donate_target_low; |
| 1291 | extern |
| 1292 | uint32_t vm_page_donate_target_high; |
| 1293 | #define VM_PAGE_DONATE_TARGET_LOWWATER (100) |
| 1294 | #define VM_PAGE_DONATE_TARGET_HIGHWATER ((unsigned int)(atop_64(max_mem) / 8)) |
| 1295 | |
| 1296 | extern |
| 1297 | vm_offset_t first_phys_addr; /* physical address for first_page */ |
| 1298 | extern |
| 1299 | vm_offset_t last_phys_addr; /* physical address for last_page */ |
| 1300 | |
| 1301 | extern |
| 1302 | unsigned int vm_page_free_count; /* How many pages are free? (sum of all colors) */ |
| 1303 | extern |
| 1304 | unsigned int vm_page_active_count; /* How many pages are active? */ |
| 1305 | extern |
| 1306 | unsigned int vm_page_inactive_count; /* How many pages are inactive? */ |
| 1307 | extern |
| 1308 | unsigned int vm_page_kernelcache_count; /* How many pages are used for the kernelcache? */ |
| 1309 | extern |
| 1310 | unsigned int vm_page_realtime_count; /* How many pages are used by realtime threads? */ |
| 1311 | #if CONFIG_SECLUDED_MEMORY |
| 1312 | extern |
| 1313 | unsigned int vm_page_secluded_count; /* How many pages are secluded? */ |
| 1314 | extern |
| 1315 | unsigned int vm_page_secluded_count_free; /* how many of them are free? */ |
| 1316 | extern |
| 1317 | unsigned int vm_page_secluded_count_inuse; /* how many of them are in use? */ |
| 1318 | /* |
| 1319 | * We keep filling the secluded pool with new eligible pages and |
| 1320 | * we can overshoot our target by a lot. |
| 1321 | * When there's memory pressure, vm_pageout_scan() will re-balance the queues, |
| 1322 | * pushing the extra secluded pages to the active or free queue. |
| 1323 | * Since these "over target" secluded pages are actually "available", jetsam |
| 1324 | * should consider them as such, so make them visible to jetsam via the |
| 1325 | * "vm_page_secluded_count_over_target" counter and update it whenever we |
| 1326 | * update vm_page_secluded_count or vm_page_secluded_target. |
| 1327 | */ |
| 1328 | extern |
| 1329 | unsigned int vm_page_secluded_count_over_target; |
| 1330 | #define VM_PAGE_SECLUDED_COUNT_OVER_TARGET_UPDATE() \ |
| 1331 | MACRO_BEGIN \ |
| 1332 | if (vm_page_secluded_count > vm_page_secluded_target) { \ |
| 1333 | vm_page_secluded_count_over_target = \ |
| 1334 | (vm_page_secluded_count - vm_page_secluded_target); \ |
| 1335 | } else { \ |
| 1336 | vm_page_secluded_count_over_target = 0; \ |
| 1337 | } \ |
| 1338 | MACRO_END |
| 1339 | #define VM_PAGE_SECLUDED_COUNT_OVER_TARGET() vm_page_secluded_count_over_target |
| 1340 | #else /* CONFIG_SECLUDED_MEMORY */ |
| 1341 | #define VM_PAGE_SECLUDED_COUNT_OVER_TARGET_UPDATE() \ |
| 1342 | MACRO_BEGIN \ |
| 1343 | MACRO_END |
| 1344 | #define VM_PAGE_SECLUDED_COUNT_OVER_TARGET() 0 |
| 1345 | #endif /* CONFIG_SECLUDED_MEMORY */ |
| 1346 | extern |
| 1347 | unsigned int vm_page_cleaned_count; /* How many pages are in the clean queue? */ |
| 1348 | extern |
| 1349 | unsigned int vm_page_throttled_count;/* How many inactives are throttled */ |
| 1350 | extern |
| 1351 | unsigned int vm_page_speculative_count; /* How many speculative pages are unclaimed? */ |
| 1352 | extern unsigned int vm_page_pageable_internal_count; |
| 1353 | extern unsigned int vm_page_pageable_external_count; |
| 1354 | extern |
| 1355 | unsigned int vm_page_xpmapped_external_count; /* How many pages are mapped executable? */ |
| 1356 | extern |
| 1357 | unsigned int vm_page_external_count; /* How many pages are file-backed? */ |
| 1358 | extern |
| 1359 | unsigned int vm_page_internal_count; /* How many pages are anonymous? */ |
| 1360 | extern |
| 1361 | unsigned int vm_page_wire_count; /* How many pages are wired? */ |
| 1362 | extern |
| 1363 | unsigned int vm_page_wire_count_initial; /* How many pages wired at startup */ |
| 1364 | extern |
| 1365 | unsigned int vm_page_wire_count_on_boot; /* even earlier than _initial */ |
| 1366 | extern |
| 1367 | unsigned int vm_page_free_target; /* How many do we want free? */ |
| 1368 | extern |
| 1369 | unsigned int vm_page_free_min; /* When to wakeup pageout */ |
| 1370 | extern |
| 1371 | unsigned int vm_page_throttle_limit; /* When to throttle new page creation */ |
| 1372 | extern |
| 1373 | unsigned int vm_page_inactive_target;/* How many do we want inactive? */ |
| 1374 | #if CONFIG_SECLUDED_MEMORY |
| 1375 | extern |
| 1376 | unsigned int vm_page_secluded_target;/* How many do we want secluded? */ |
| 1377 | #endif /* CONFIG_SECLUDED_MEMORY */ |
| 1378 | extern |
| 1379 | unsigned int vm_page_anonymous_min; /* When it's ok to pre-clean */ |
| 1380 | extern |
| 1381 | unsigned int vm_page_free_reserved; /* How many pages reserved to do pageout */ |
| 1382 | extern |
| 1383 | unsigned int vm_page_gobble_count; |
| 1384 | extern |
| 1385 | unsigned int vm_page_stolen_count; /* Count of stolen pages not acccounted in zones */ |
| 1386 | extern |
| 1387 | unsigned int vm_page_kern_lpage_count; /* Count of large pages used in early boot */ |
| 1388 | |
| 1389 | |
| 1390 | #if DEVELOPMENT || DEBUG |
| 1391 | extern |
| 1392 | unsigned int vm_page_speculative_used; |
| 1393 | #endif |
| 1394 | |
| 1395 | extern |
| 1396 | unsigned int vm_page_purgeable_count;/* How many pages are purgeable now ? */ |
| 1397 | extern |
| 1398 | unsigned int vm_page_purgeable_wired_count;/* How many purgeable pages are wired now ? */ |
| 1399 | extern |
| 1400 | uint64_t vm_page_purged_count; /* How many pages got purged so far ? */ |
| 1401 | |
| 1402 | extern unsigned int vm_page_free_wanted; |
| 1403 | /* how many threads are waiting for memory */ |
| 1404 | |
| 1405 | extern unsigned int vm_page_free_wanted_privileged; |
| 1406 | /* how many VM privileged threads are waiting for memory */ |
| 1407 | #if CONFIG_SECLUDED_MEMORY |
| 1408 | extern unsigned int vm_page_free_wanted_secluded; |
| 1409 | /* how many threads are waiting for secluded memory */ |
| 1410 | #endif /* CONFIG_SECLUDED_MEMORY */ |
| 1411 | |
| 1412 | extern const ppnum_t vm_page_fictitious_addr; |
| 1413 | /* (fake) phys_addr of fictitious pages */ |
| 1414 | |
| 1415 | extern const ppnum_t vm_page_guard_addr; |
| 1416 | /* (fake) phys_addr of guard pages */ |
| 1417 | |
| 1418 | |
| 1419 | extern boolean_t vm_page_deactivate_hint; |
| 1420 | |
| 1421 | extern int vm_compressor_mode; |
| 1422 | |
| 1423 | /* |
| 1424 | * Defaults to true, so highest memory is used first. |
| 1425 | */ |
| 1426 | extern boolean_t vm_himemory_mode; |
| 1427 | |
| 1428 | extern boolean_t vm_lopage_needed; |
| 1429 | extern uint32_t vm_lopage_free_count; |
| 1430 | extern uint32_t vm_lopage_free_limit; |
| 1431 | extern uint32_t vm_lopage_lowater; |
| 1432 | extern boolean_t vm_lopage_refill; |
| 1433 | extern uint64_t max_valid_dma_address; |
| 1434 | extern ppnum_t max_valid_low_ppnum; |
| 1435 | |
| 1436 | /* |
| 1437 | * Prototypes for functions exported by this module. |
| 1438 | */ |
| 1439 | extern void vm_page_bootstrap( |
| 1440 | vm_offset_t *startp, |
| 1441 | vm_offset_t *endp); |
| 1442 | |
| 1443 | extern void vm_page_init_local_q(unsigned int num_cpus); |
| 1444 | |
| 1445 | extern void vm_page_create( |
| 1446 | ppnum_t start, |
| 1447 | ppnum_t end); |
| 1448 | |
| 1449 | extern void vm_page_create_retired( |
| 1450 | ppnum_t pn); |
| 1451 | |
| 1452 | extern boolean_t vm_page_created( |
| 1453 | vm_page_t page); |
| 1454 | |
| 1455 | extern vm_page_t kdp_vm_page_lookup( |
| 1456 | vm_object_t object, |
| 1457 | vm_object_offset_t offset); |
| 1458 | |
| 1459 | extern vm_page_t vm_page_lookup( |
| 1460 | vm_object_t object, |
| 1461 | vm_object_offset_t offset); |
| 1462 | |
| 1463 | extern vm_page_t vm_page_grab_fictitious(boolean_t canwait); |
| 1464 | |
| 1465 | extern vm_page_t vm_page_grab_guard(boolean_t canwait); |
| 1466 | |
| 1467 | extern void vm_page_release_fictitious( |
| 1468 | vm_page_t page); |
| 1469 | |
| 1470 | extern void vm_free_delayed_pages(void); |
| 1471 | |
| 1472 | extern bool vm_pool_low(void); |
| 1473 | |
| 1474 | extern vm_page_t vm_page_grab(void); |
| 1475 | extern vm_page_t vm_page_grab_options(int flags); |
| 1476 | |
| 1477 | #define VM_PAGE_GRAB_OPTIONS_NONE 0x00000000 |
| 1478 | #if CONFIG_SECLUDED_MEMORY |
| 1479 | #define VM_PAGE_GRAB_SECLUDED 0x00000001 |
| 1480 | #endif /* CONFIG_SECLUDED_MEMORY */ |
| 1481 | #define VM_PAGE_GRAB_Q_LOCK_HELD 0x00000002 |
| 1482 | |
| 1483 | extern vm_page_t vm_page_grablo(void); |
| 1484 | |
| 1485 | extern void vm_page_release( |
| 1486 | vm_page_t page, |
| 1487 | boolean_t page_queues_locked); |
| 1488 | |
| 1489 | extern boolean_t vm_page_wait( |
| 1490 | int interruptible ); |
| 1491 | |
| 1492 | extern vm_page_t vm_page_alloc( |
| 1493 | vm_object_t object, |
| 1494 | vm_object_offset_t offset); |
| 1495 | |
| 1496 | extern void vm_page_init( |
| 1497 | vm_page_t page, |
| 1498 | ppnum_t phys_page, |
| 1499 | boolean_t lopage); |
| 1500 | |
| 1501 | extern void vm_page_free( |
| 1502 | vm_page_t page); |
| 1503 | |
| 1504 | extern void vm_page_free_unlocked( |
| 1505 | vm_page_t page, |
| 1506 | boolean_t remove_from_hash); |
| 1507 | |
| 1508 | extern void vm_page_balance_inactive( |
| 1509 | int max_to_move); |
| 1510 | |
| 1511 | extern void vm_page_activate( |
| 1512 | vm_page_t page); |
| 1513 | |
| 1514 | extern void vm_page_deactivate( |
| 1515 | vm_page_t page); |
| 1516 | |
| 1517 | extern void vm_page_deactivate_internal( |
| 1518 | vm_page_t page, |
| 1519 | boolean_t clear_hw_reference); |
| 1520 | |
| 1521 | extern void vm_page_enqueue_cleaned(vm_page_t page); |
| 1522 | |
| 1523 | extern void vm_page_lru( |
| 1524 | vm_page_t page); |
| 1525 | |
| 1526 | extern void vm_page_speculate( |
| 1527 | vm_page_t page, |
| 1528 | boolean_t new); |
| 1529 | |
| 1530 | extern void vm_page_speculate_ageit( |
| 1531 | struct vm_speculative_age_q *aq); |
| 1532 | |
| 1533 | extern void vm_page_reactivate_all_throttled(void); |
| 1534 | |
| 1535 | extern void vm_page_reactivate_local(uint32_t lid, boolean_t force, boolean_t nolocks); |
| 1536 | |
| 1537 | extern void vm_page_rename( |
| 1538 | vm_page_t page, |
| 1539 | vm_object_t new_object, |
| 1540 | vm_object_offset_t new_offset); |
| 1541 | |
| 1542 | extern void vm_page_insert( |
| 1543 | vm_page_t page, |
| 1544 | vm_object_t object, |
| 1545 | vm_object_offset_t offset); |
| 1546 | |
| 1547 | extern void vm_page_insert_wired( |
| 1548 | vm_page_t page, |
| 1549 | vm_object_t object, |
| 1550 | vm_object_offset_t offset, |
| 1551 | vm_tag_t tag); |
| 1552 | |
| 1553 | extern void vm_page_insert_internal( |
| 1554 | vm_page_t page, |
| 1555 | vm_object_t object, |
| 1556 | vm_object_offset_t offset, |
| 1557 | vm_tag_t tag, |
| 1558 | boolean_t queues_lock_held, |
| 1559 | boolean_t insert_in_hash, |
| 1560 | boolean_t batch_pmap_op, |
| 1561 | boolean_t delayed_accounting, |
| 1562 | uint64_t *delayed_ledger_update); |
| 1563 | |
| 1564 | extern void vm_page_replace( |
| 1565 | vm_page_t mem, |
| 1566 | vm_object_t object, |
| 1567 | vm_object_offset_t offset); |
| 1568 | |
| 1569 | extern void vm_page_remove( |
| 1570 | vm_page_t page, |
| 1571 | boolean_t remove_from_hash); |
| 1572 | |
| 1573 | extern void vm_page_zero_fill( |
| 1574 | vm_page_t page); |
| 1575 | |
| 1576 | extern void vm_page_part_zero_fill( |
| 1577 | vm_page_t m, |
| 1578 | vm_offset_t m_pa, |
| 1579 | vm_size_t len); |
| 1580 | |
| 1581 | extern void vm_page_copy( |
| 1582 | vm_page_t src_page, |
| 1583 | vm_page_t dest_page); |
| 1584 | |
| 1585 | extern void vm_page_part_copy( |
| 1586 | vm_page_t src_m, |
| 1587 | vm_offset_t src_pa, |
| 1588 | vm_page_t dst_m, |
| 1589 | vm_offset_t dst_pa, |
| 1590 | vm_size_t len); |
| 1591 | |
| 1592 | extern void vm_page_wire( |
| 1593 | vm_page_t page, |
| 1594 | vm_tag_t tag, |
| 1595 | boolean_t check_memorystatus); |
| 1596 | |
| 1597 | extern void vm_page_unwire( |
| 1598 | vm_page_t page, |
| 1599 | boolean_t queueit); |
| 1600 | |
| 1601 | extern void vm_set_page_size(void); |
| 1602 | |
| 1603 | extern void vm_page_gobble( |
| 1604 | vm_page_t page); |
| 1605 | |
| 1606 | extern void vm_page_validate_cs( |
| 1607 | vm_page_t page, |
| 1608 | vm_map_size_t fault_page_size, |
| 1609 | vm_map_offset_t fault_phys_offset); |
| 1610 | extern void vm_page_validate_cs_mapped( |
| 1611 | vm_page_t page, |
| 1612 | vm_map_size_t fault_page_size, |
| 1613 | vm_map_offset_t fault_phys_offset, |
| 1614 | const void *kaddr); |
| 1615 | extern void vm_page_validate_cs_mapped_slow( |
| 1616 | vm_page_t page, |
| 1617 | const void *kaddr); |
| 1618 | extern void vm_page_validate_cs_mapped_chunk( |
| 1619 | vm_page_t page, |
| 1620 | const void *kaddr, |
| 1621 | vm_offset_t chunk_offset, |
| 1622 | vm_size_t chunk_size, |
| 1623 | boolean_t *validated, |
| 1624 | unsigned *tainted); |
| 1625 | |
| 1626 | extern void vm_page_free_prepare_queues( |
| 1627 | vm_page_t page); |
| 1628 | |
| 1629 | extern void vm_page_free_prepare_object( |
| 1630 | vm_page_t page, |
| 1631 | boolean_t remove_from_hash); |
| 1632 | |
| 1633 | #if CONFIG_IOSCHED |
| 1634 | extern wait_result_t vm_page_sleep( |
| 1635 | vm_object_t object, |
| 1636 | vm_page_t m, |
| 1637 | int interruptible); |
| 1638 | #endif |
| 1639 | |
| 1640 | extern void vm_pressure_response(void); |
| 1641 | |
| 1642 | #if CONFIG_JETSAM |
| 1643 | extern void memorystatus_pages_update(unsigned int pages_avail); |
| 1644 | |
| 1645 | #define VM_CHECK_MEMORYSTATUS do { \ |
| 1646 | memorystatus_pages_update( \ |
| 1647 | vm_page_pageable_external_count + \ |
| 1648 | vm_page_free_count + \ |
| 1649 | VM_PAGE_SECLUDED_COUNT_OVER_TARGET() + \ |
| 1650 | (VM_DYNAMIC_PAGING_ENABLED() ? 0 : vm_page_purgeable_count) \ |
| 1651 | ); \ |
| 1652 | } while(0) |
| 1653 | |
| 1654 | #else /* CONFIG_JETSAM */ |
| 1655 | |
| 1656 | #if !XNU_TARGET_OS_OSX |
| 1657 | |
| 1658 | #define VM_CHECK_MEMORYSTATUS do {} while(0) |
| 1659 | |
| 1660 | #else /* !XNU_TARGET_OS_OSX */ |
| 1661 | |
| 1662 | #define VM_CHECK_MEMORYSTATUS vm_pressure_response() |
| 1663 | |
| 1664 | #endif /* !XNU_TARGET_OS_OSX */ |
| 1665 | |
| 1666 | #endif /* CONFIG_JETSAM */ |
| 1667 | |
| 1668 | /* |
| 1669 | * Functions implemented as macros. m->vmp_wanted and m->vmp_busy are |
| 1670 | * protected by the object lock. |
| 1671 | */ |
| 1672 | |
| 1673 | #if !XNU_TARGET_OS_OSX |
| 1674 | #define SET_PAGE_DIRTY(m, set_pmap_modified) \ |
| 1675 | MACRO_BEGIN \ |
| 1676 | vm_page_t __page__ = (m); \ |
| 1677 | if (__page__->vmp_pmapped == TRUE && \ |
| 1678 | __page__->vmp_wpmapped == TRUE && \ |
| 1679 | __page__->vmp_dirty == FALSE && \ |
| 1680 | (set_pmap_modified)) { \ |
| 1681 | pmap_set_modify(VM_PAGE_GET_PHYS_PAGE(__page__)); \ |
| 1682 | } \ |
| 1683 | __page__->vmp_dirty = TRUE; \ |
| 1684 | MACRO_END |
| 1685 | #else /* !XNU_TARGET_OS_OSX */ |
| 1686 | #define SET_PAGE_DIRTY(m, set_pmap_modified) \ |
| 1687 | MACRO_BEGIN \ |
| 1688 | vm_page_t __page__ = (m); \ |
| 1689 | __page__->vmp_dirty = TRUE; \ |
| 1690 | MACRO_END |
| 1691 | #endif /* !XNU_TARGET_OS_OSX */ |
| 1692 | |
| 1693 | #define PAGE_ASSERT_WAIT(m, interruptible) \ |
| 1694 | (((m)->vmp_wanted = TRUE), \ |
| 1695 | assert_wait((event_t) (m), (interruptible))) |
| 1696 | |
| 1697 | #if CONFIG_IOSCHED |
| 1698 | #define PAGE_SLEEP(o, m, interruptible) \ |
| 1699 | vm_page_sleep(o, m, interruptible) |
| 1700 | #else |
| 1701 | #define PAGE_SLEEP(o, m, interruptible) \ |
| 1702 | (((m)->vmp_wanted = TRUE), \ |
| 1703 | thread_sleep_vm_object((o), (m), (interruptible))) |
| 1704 | #endif |
| 1705 | |
| 1706 | #define PAGE_WAKEUP_DONE(m) \ |
| 1707 | MACRO_BEGIN \ |
| 1708 | (m)->vmp_busy = FALSE; \ |
| 1709 | if ((m)->vmp_wanted) { \ |
| 1710 | (m)->vmp_wanted = FALSE; \ |
| 1711 | thread_wakeup((event_t) (m)); \ |
| 1712 | } \ |
| 1713 | MACRO_END |
| 1714 | |
| 1715 | #define PAGE_WAKEUP(m) \ |
| 1716 | MACRO_BEGIN \ |
| 1717 | if ((m)->vmp_wanted) { \ |
| 1718 | (m)->vmp_wanted = FALSE; \ |
| 1719 | thread_wakeup((event_t) (m)); \ |
| 1720 | } \ |
| 1721 | MACRO_END |
| 1722 | |
| 1723 | #define VM_PAGE_FREE(p) \ |
| 1724 | MACRO_BEGIN \ |
| 1725 | vm_page_free_unlocked(p, TRUE); \ |
| 1726 | MACRO_END |
| 1727 | |
| 1728 | #define VM_PAGE_WAIT() ((void)vm_page_wait(THREAD_UNINT)) |
| 1729 | |
| 1730 | #define vm_page_queue_lock (vm_page_locks.vm_page_queue_lock2) |
| 1731 | #define vm_page_queue_free_lock (vm_page_locks.vm_page_queue_free_lock2) |
| 1732 | |
| 1733 | static inline void |
| 1734 | vm_free_page_lock(void) |
| 1735 | { |
| 1736 | lck_mtx_lock(lck: &vm_page_queue_free_lock); |
| 1737 | } |
| 1738 | |
| 1739 | static inline void |
| 1740 | vm_free_page_lock_spin(void) |
| 1741 | { |
| 1742 | lck_mtx_lock_spin(lck: &vm_page_queue_free_lock); |
| 1743 | } |
| 1744 | |
| 1745 | static inline void |
| 1746 | vm_free_page_unlock(void) |
| 1747 | { |
| 1748 | lck_mtx_unlock(lck: &vm_page_queue_free_lock); |
| 1749 | } |
| 1750 | |
| 1751 | |
| 1752 | static inline void |
| 1753 | vm_page_lock_queues(void) |
| 1754 | { |
| 1755 | lck_mtx_lock(lck: &vm_page_queue_lock); |
| 1756 | } |
| 1757 | |
| 1758 | static inline boolean_t |
| 1759 | vm_page_trylock_queues(void) |
| 1760 | { |
| 1761 | boolean_t ret; |
| 1762 | ret = lck_mtx_try_lock(lck: &vm_page_queue_lock); |
| 1763 | return ret; |
| 1764 | } |
| 1765 | |
| 1766 | static inline void |
| 1767 | vm_page_unlock_queues(void) |
| 1768 | { |
| 1769 | lck_mtx_unlock(lck: &vm_page_queue_lock); |
| 1770 | } |
| 1771 | |
| 1772 | static inline void |
| 1773 | vm_page_lockspin_queues(void) |
| 1774 | { |
| 1775 | lck_mtx_lock_spin(lck: &vm_page_queue_lock); |
| 1776 | } |
| 1777 | |
| 1778 | static inline boolean_t |
| 1779 | vm_page_trylockspin_queues(void) |
| 1780 | { |
| 1781 | boolean_t ret; |
| 1782 | ret = lck_mtx_try_lock_spin(lck: &vm_page_queue_lock); |
| 1783 | return ret; |
| 1784 | } |
| 1785 | #define vm_page_lockconvert_queues() lck_mtx_convert_spin(&vm_page_queue_lock) |
| 1786 | |
| 1787 | #ifdef VPL_LOCK_SPIN |
| 1788 | extern lck_grp_t vm_page_lck_grp_local; |
| 1789 | |
| 1790 | #define VPL_LOCK_INIT(vlq, vpl_grp, vpl_attr) lck_spin_init(&vlq->vpl_lock, vpl_grp, vpl_attr) |
| 1791 | #define VPL_LOCK(vpl) lck_spin_lock_grp(vpl, &vm_page_lck_grp_local) |
| 1792 | #define VPL_UNLOCK(vpl) lck_spin_unlock(vpl) |
| 1793 | #else |
| 1794 | #define VPL_LOCK_INIT(vlq, vpl_grp, vpl_attr) lck_mtx_init(&vlq->vpl_lock, vpl_grp, vpl_attr) |
| 1795 | #define VPL_LOCK(vpl) lck_mtx_lock_spin(vpl) |
| 1796 | #define VPL_UNLOCK(vpl) lck_mtx_unlock(vpl) |
| 1797 | #endif |
| 1798 | |
| 1799 | |
| 1800 | #if DEVELOPMENT || DEBUG |
| 1801 | #define VM_PAGE_SPECULATIVE_USED_ADD() \ |
| 1802 | MACRO_BEGIN \ |
| 1803 | OSAddAtomic(1, &vm_page_speculative_used); \ |
| 1804 | MACRO_END |
| 1805 | #else |
| 1806 | #define VM_PAGE_SPECULATIVE_USED_ADD() |
| 1807 | #endif |
| 1808 | |
| 1809 | |
| 1810 | #define VM_PAGE_CONSUME_CLUSTERED(mem) \ |
| 1811 | MACRO_BEGIN \ |
| 1812 | ppnum_t __phys_page; \ |
| 1813 | __phys_page = VM_PAGE_GET_PHYS_PAGE(mem); \ |
| 1814 | pmap_lock_phys_page(__phys_page); \ |
| 1815 | if (mem->vmp_clustered) { \ |
| 1816 | vm_object_t o; \ |
| 1817 | o = VM_PAGE_OBJECT(mem); \ |
| 1818 | assert(o); \ |
| 1819 | o->pages_used++; \ |
| 1820 | mem->vmp_clustered = FALSE; \ |
| 1821 | VM_PAGE_SPECULATIVE_USED_ADD(); \ |
| 1822 | } \ |
| 1823 | pmap_unlock_phys_page(__phys_page); \ |
| 1824 | MACRO_END |
| 1825 | |
| 1826 | |
| 1827 | #define VM_PAGE_COUNT_AS_PAGEIN(mem) \ |
| 1828 | MACRO_BEGIN \ |
| 1829 | { \ |
| 1830 | vm_object_t o; \ |
| 1831 | o = VM_PAGE_OBJECT(mem); \ |
| 1832 | DTRACE_VM2(pgin, int, 1, (uint64_t *), NULL); \ |
| 1833 | counter_inc(¤t_task()->pageins); \ |
| 1834 | if (o->internal) { \ |
| 1835 | DTRACE_VM2(anonpgin, int, 1, (uint64_t *), NULL); \ |
| 1836 | } else { \ |
| 1837 | DTRACE_VM2(fspgin, int, 1, (uint64_t *), NULL); \ |
| 1838 | } \ |
| 1839 | } \ |
| 1840 | MACRO_END |
| 1841 | |
| 1842 | /* adjust for stolen pages accounted elsewhere */ |
| 1843 | #define VM_PAGE_MOVE_STOLEN(page_count) \ |
| 1844 | MACRO_BEGIN \ |
| 1845 | vm_page_stolen_count -= (page_count); \ |
| 1846 | vm_page_wire_count_initial -= (page_count); \ |
| 1847 | MACRO_END |
| 1848 | |
| 1849 | extern kern_return_t pmap_enter_check( |
| 1850 | pmap_t pmap, |
| 1851 | vm_map_address_t virtual_address, |
| 1852 | vm_page_t page, |
| 1853 | vm_prot_t protection, |
| 1854 | vm_prot_t fault_type, |
| 1855 | unsigned int flags, |
| 1856 | boolean_t wired); |
| 1857 | |
| 1858 | #define DW_vm_page_unwire 0x01 |
| 1859 | #define DW_vm_page_wire 0x02 |
| 1860 | #define DW_vm_page_free 0x04 |
| 1861 | #define DW_vm_page_activate 0x08 |
| 1862 | #define DW_vm_page_deactivate_internal 0x10 |
| 1863 | #define DW_vm_page_speculate 0x20 |
| 1864 | #define DW_vm_page_lru 0x40 |
| 1865 | #define DW_vm_pageout_throttle_up 0x80 |
| 1866 | #define DW_PAGE_WAKEUP 0x100 |
| 1867 | #define DW_clear_busy 0x200 |
| 1868 | #define DW_clear_reference 0x400 |
| 1869 | #define DW_set_reference 0x800 |
| 1870 | #define DW_move_page 0x1000 |
| 1871 | #define DW_VM_PAGE_QUEUES_REMOVE 0x2000 |
| 1872 | #define DW_enqueue_cleaned 0x4000 |
| 1873 | #define DW_vm_phantom_cache_update 0x8000 |
| 1874 | |
| 1875 | struct vm_page_delayed_work { |
| 1876 | vm_page_t dw_m; |
| 1877 | int dw_mask; |
| 1878 | }; |
| 1879 | |
| 1880 | #define DEFAULT_DELAYED_WORK_LIMIT 32 |
| 1881 | |
| 1882 | struct vm_page_delayed_work_ctx { |
| 1883 | struct vm_page_delayed_work dwp[DEFAULT_DELAYED_WORK_LIMIT]; |
| 1884 | thread_t delayed_owner; |
| 1885 | }; |
| 1886 | |
| 1887 | void vm_page_do_delayed_work(vm_object_t object, vm_tag_t tag, struct vm_page_delayed_work *dwp, int dw_count); |
| 1888 | |
| 1889 | extern unsigned int vm_max_delayed_work_limit; |
| 1890 | |
| 1891 | #define DELAYED_WORK_LIMIT(max) ((vm_max_delayed_work_limit >= max ? max : vm_max_delayed_work_limit)) |
| 1892 | |
| 1893 | /* |
| 1894 | * vm_page_do_delayed_work may need to drop the object lock... |
| 1895 | * if it does, we need the pages it's looking at to |
| 1896 | * be held stable via the busy bit, so if busy isn't already |
| 1897 | * set, we need to set it and ask vm_page_do_delayed_work |
| 1898 | * to clear it and wakeup anyone that might have blocked on |
| 1899 | * it once we're done processing the page. |
| 1900 | */ |
| 1901 | |
| 1902 | #define VM_PAGE_ADD_DELAYED_WORK(dwp, mem, dw_cnt) \ |
| 1903 | MACRO_BEGIN \ |
| 1904 | if (mem->vmp_busy == FALSE) { \ |
| 1905 | mem->vmp_busy = TRUE; \ |
| 1906 | if ( !(dwp->dw_mask & DW_vm_page_free)) \ |
| 1907 | dwp->dw_mask |= (DW_clear_busy | DW_PAGE_WAKEUP); \ |
| 1908 | } \ |
| 1909 | dwp->dw_m = mem; \ |
| 1910 | dwp++; \ |
| 1911 | dw_cnt++; \ |
| 1912 | MACRO_END |
| 1913 | |
| 1914 | extern vm_page_t vm_object_page_grab(vm_object_t); |
| 1915 | |
| 1916 | #if VM_PAGE_BUCKETS_CHECK |
| 1917 | extern void vm_page_buckets_check(void); |
| 1918 | #endif /* VM_PAGE_BUCKETS_CHECK */ |
| 1919 | |
| 1920 | extern void vm_page_queues_remove(vm_page_t mem, boolean_t remove_from_specialq); |
| 1921 | extern void vm_page_remove_internal(vm_page_t page); |
| 1922 | extern void vm_page_enqueue_inactive(vm_page_t mem, boolean_t first); |
| 1923 | extern void vm_page_enqueue_active(vm_page_t mem, boolean_t first); |
| 1924 | extern void vm_page_check_pageable_safe(vm_page_t page); |
| 1925 | |
| 1926 | #if CONFIG_SECLUDED_MEMORY |
| 1927 | extern uint64_t secluded_shutoff_trigger; |
| 1928 | extern uint64_t secluded_shutoff_headroom; |
| 1929 | extern void start_secluded_suppression(task_t); |
| 1930 | extern void stop_secluded_suppression(task_t); |
| 1931 | #endif /* CONFIG_SECLUDED_MEMORY */ |
| 1932 | |
| 1933 | extern void vm_retire_boot_pages(void); |
| 1934 | |
| 1935 | |
| 1936 | #define VMP_ERROR_GET(p) ((p)->vmp_error) |
| 1937 | |
| 1938 | |
| 1939 | #endif /* _VM_VM_PAGE_H_ */ |
| 1940 | |