| 1 | /* | 
| 2 |  * Copyright (c) 2000-2020 Apple Inc. All rights reserved. | 
| 3 |  * | 
| 4 |  * @APPLE_OSREFERENCE_LICENSE_HEADER_START@ | 
| 5 |  * | 
| 6 |  * This file contains Original Code and/or Modifications of Original Code | 
| 7 |  * as defined in and that are subject to the Apple Public Source License | 
| 8 |  * Version 2.0 (the 'License'). You may not use this file except in | 
| 9 |  * compliance with the License. The rights granted to you under the License | 
| 10 |  * may not be used to create, or enable the creation or redistribution of, | 
| 11 |  * unlawful or unlicensed copies of an Apple operating system, or to | 
| 12 |  * circumvent, violate, or enable the circumvention or violation of, any | 
| 13 |  * terms of an Apple operating system software license agreement. | 
| 14 |  * | 
| 15 |  * Please obtain a copy of the License at | 
| 16 |  * http://www.opensource.apple.com/apsl/ and read it before using this file. | 
| 17 |  * | 
| 18 |  * The Original Code and all software distributed under the License are | 
| 19 |  * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER | 
| 20 |  * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES, | 
| 21 |  * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY, | 
| 22 |  * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT. | 
| 23 |  * Please see the License for the specific language governing rights and | 
| 24 |  * limitations under the License. | 
| 25 |  * | 
| 26 |  * @APPLE_OSREFERENCE_LICENSE_HEADER_END@ | 
| 27 |  */ | 
| 28 | /* | 
| 29 |  * @OSF_COPYRIGHT@ | 
| 30 |  */ | 
| 31 | /* | 
| 32 |  * Mach Operating System | 
| 33 |  * Copyright (c) 1991,1990,1989,1988,1987 Carnegie Mellon University | 
| 34 |  * All Rights Reserved. | 
| 35 |  * | 
| 36 |  * Permission to use, copy, modify and distribute this software and its | 
| 37 |  * documentation is hereby granted, provided that both the copyright | 
| 38 |  * notice and this permission notice appear in all copies of the | 
| 39 |  * software, derivative works or modified versions, and any portions | 
| 40 |  * thereof, and that both notices appear in supporting documentation. | 
| 41 |  * | 
| 42 |  * CARNEGIE MELLON ALLOWS FREE USE OF THIS SOFTWARE IN ITS "AS IS" | 
| 43 |  * CONDITION.  CARNEGIE MELLON DISCLAIMS ANY LIABILITY OF ANY KIND FOR | 
| 44 |  * ANY DAMAGES WHATSOEVER RESULTING FROM THE USE OF THIS SOFTWARE. | 
| 45 |  * | 
| 46 |  * Carnegie Mellon requests users of this software to return to | 
| 47 |  * | 
| 48 |  *  Software Distribution Coordinator  or  Software.Distribution@CS.CMU.EDU | 
| 49 |  *  School of Computer Science | 
| 50 |  *  Carnegie Mellon University | 
| 51 |  *  Pittsburgh PA 15213-3890 | 
| 52 |  * | 
| 53 |  * any improvements or extensions that they make and grant Carnegie Mellon | 
| 54 |  * the rights to redistribute these changes. | 
| 55 |  */ | 
| 56 | /* | 
| 57 |  */ | 
| 58 | /* | 
| 59 |  *	File:	vm/vm_pageout.h | 
| 60 |  *	Author:	Avadis Tevanian, Jr. | 
| 61 |  *	Date:	1986 | 
| 62 |  * | 
| 63 |  *	Declarations for the pageout daemon interface. | 
| 64 |  */ | 
| 65 |  | 
| 66 | #ifndef _VM_VM_PAGEOUT_H_ | 
| 67 | #define _VM_VM_PAGEOUT_H_ | 
| 68 |  | 
| 69 | #ifdef  KERNEL_PRIVATE | 
| 70 |  | 
| 71 | #include <mach/mach_types.h> | 
| 72 | #include <mach/boolean.h> | 
| 73 | #include <mach/machine/vm_types.h> | 
| 74 | #include <mach/memory_object_types.h> | 
| 75 |  | 
| 76 | #include <kern/kern_types.h> | 
| 77 | #include <kern/locks.h> | 
| 78 | #include <kern/sched_prim.h> | 
| 79 | #include <kern/bits.h> | 
| 80 |  | 
| 81 | #include <libkern/OSAtomic.h> | 
| 82 |  | 
| 83 |  | 
| 84 | #include <vm/vm_options.h> | 
| 85 |  | 
| 86 | #ifdef  MACH_KERNEL_PRIVATE | 
| 87 | #include <vm/vm_page.h> | 
| 88 | #endif | 
| 89 |  | 
| 90 | #include <sys/kdebug.h> | 
| 91 |  | 
| 92 | #define VM_PAGE_AVAILABLE_COUNT()               ((unsigned int)(vm_page_cleaned_count)) | 
| 93 |  | 
| 94 | /* externally manipulated counters */ | 
| 95 | extern unsigned int vm_pageout_cleaned_fault_reactivated; | 
| 96 |  | 
| 97 | #if CONFIG_FREEZE | 
| 98 | extern bool memorystatus_freeze_enabled; | 
| 99 |  | 
| 100 | struct freezer_context { | 
| 101 | 	/* | 
| 102 | 	 * All these counters & variables track the task | 
| 103 | 	 * being frozen. | 
| 104 | 	 * Currently we only freeze one task at a time. Should that | 
| 105 | 	 * change, we'll need to add support for multiple freezer contexts. | 
| 106 | 	 */ | 
| 107 |  | 
| 108 | 	task_t  freezer_ctx_task; /* Task being frozen. */ | 
| 109 |  | 
| 110 | 	void    *freezer_ctx_chead; /* The chead used to track c_segs allocated */ | 
| 111 | 	                            /* to freeze the task.*/ | 
| 112 |  | 
| 113 | 	uint64_t        freezer_ctx_swapped_bytes; /* Tracks # of compressed bytes.*/ | 
| 114 |  | 
| 115 | 	int     freezer_ctx_uncompressed_pages; /* Tracks # of uncompressed pages frozen. */ | 
| 116 |  | 
| 117 | 	char    *freezer_ctx_compressor_scratch_buf; /* Scratch buffer for the compressor algorithm. */ | 
| 118 | }; | 
| 119 |  | 
| 120 | #endif /* CONFIG_FREEZE */ | 
| 121 |  | 
| 122 | #define VM_DYNAMIC_PAGING_ENABLED() (VM_CONFIG_COMPRESSOR_IS_ACTIVE) | 
| 123 |  | 
| 124 | #if VM_PRESSURE_EVENTS | 
| 125 | extern boolean_t vm_pressure_events_enabled; | 
| 126 | #endif /* VM_PRESSURE_EVENTS */ | 
| 127 |  | 
| 128 |  | 
| 129 | /* | 
| 130 |  * the following codes are used in the DBG_MACH_WORKINGSET subclass | 
| 131 |  * of the DBG_MACH class | 
| 132 |  */ | 
| 133 | #define VM_DISCONNECT_ALL_PAGE_MAPPINGS         0x00 | 
| 134 | #define VM_DISCONNECT_TASK_PAGE_MAPPINGS        0x01 | 
| 135 | #define VM_REAL_FAULT_ADDR_INTERNAL             0x02 | 
| 136 | #define VM_REAL_FAULT_ADDR_PURGABLE             0x03 | 
| 137 | #define VM_REAL_FAULT_ADDR_EXTERNAL             0x04 | 
| 138 | #define VM_REAL_FAULT_ADDR_SHAREDCACHE          0x05 | 
| 139 | #define VM_REAL_FAULT_FAST                      0x06 | 
| 140 | #define VM_REAL_FAULT_SLOW                      0x07 | 
| 141 | #define VM_MAP_LOOKUP_OBJECT                    0x08 | 
| 142 |  | 
| 143 |  | 
| 144 |  | 
| 145 | extern int      vm_debug_events; | 
| 146 |  | 
| 147 | #define VMF_CHECK_ZFDELAY               0x100 | 
| 148 | #define VMF_COWDELAY                    0x101 | 
| 149 | #define VMF_ZFDELAY                     0x102 | 
| 150 | #define VMF_COMPRESSORDELAY             0x103 | 
| 151 |  | 
| 152 | #define VM_PAGEOUT_SCAN                 0x104 | 
| 153 | #define VM_PAGEOUT_BALANCE              0x105 | 
| 154 | #define VM_PAGEOUT_FREELIST             0x106 | 
| 155 | #define VM_PAGEOUT_PURGEONE             0x107 | 
| 156 | #define VM_PAGEOUT_CACHE_EVICT          0x108 | 
| 157 | #define VM_PAGEOUT_THREAD_BLOCK         0x109 | 
| 158 | #define VM_PAGEOUT_JETSAM               0x10A | 
| 159 | #define VM_INFO1                        0x10B | 
| 160 | #define VM_INFO2                        0x10C | 
| 161 | #define VM_INFO3                        0x10D | 
| 162 | #define VM_INFO4                        0x10E | 
| 163 | #define VM_INFO5                        0x10F | 
| 164 | #define VM_INFO6                        0x110 | 
| 165 | #define VM_INFO7                        0x111 | 
| 166 | #define VM_INFO8                        0x112 | 
| 167 | #define VM_INFO9                        0x113 | 
| 168 | #define VM_INFO10                       0x114 | 
| 169 |  | 
| 170 | #define VM_UPL_PAGE_WAIT                0x120 | 
| 171 | #define VM_IOPL_PAGE_WAIT               0x121 | 
| 172 | #define VM_PAGE_WAIT_BLOCK              0x122 | 
| 173 |  | 
| 174 | #if CONFIG_IOSCHED | 
| 175 | #define VM_PAGE_SLEEP                   0x123 | 
| 176 | #define VM_PAGE_EXPEDITE                0x124 | 
| 177 | #define VM_PAGE_EXPEDITE_NO_MEMORY      0x125 | 
| 178 | #endif | 
| 179 |  | 
| 180 | #define VM_PAGE_GRAB                    0x126 | 
| 181 | #define VM_PAGE_RELEASE                 0x127 | 
| 182 | #define VM_COMPRESSOR_COMPACT_AND_SWAP  0x128 | 
| 183 | #define VM_COMPRESSOR_DO_DELAYED_COMPACTIONS 0x129 | 
| 184 |  | 
| 185 |  | 
| 186 | #define VM_PRESSURE_EVENT               0x130 | 
| 187 | #define VM_EXECVE                       0x131 | 
| 188 | #define VM_WAKEUP_COMPACTOR_SWAPPER     0x132 | 
| 189 | #define VM_UPL_REQUEST                  0x133 | 
| 190 | #define VM_IOPL_REQUEST                 0x134 | 
| 191 | #define VM_KERN_REQUEST                 0x135 | 
| 192 |  | 
| 193 | #define VM_DATA_WRITE                   0x140 | 
| 194 |  | 
| 195 | #define VM_PRESSURE_LEVEL_CHANGE        0x141 | 
| 196 |  | 
| 197 | #define VM_PHYS_WRITE_ACCT              0x142 | 
| 198 |  | 
| 199 | #define VM_DEBUG_EVENT(name, event, control, arg1, arg2, arg3, arg4)    \ | 
| 200 | 	MACRO_BEGIN                                             \ | 
| 201 | 	if (__improbable(vm_debug_events)) {                    \ | 
| 202 | 	        KERNEL_DEBUG_CONSTANT((MACHDBG_CODE(DBG_MACH_VM, event)) | control, arg1, arg2, arg3, arg4, 0); \ | 
| 203 | 	}                                                       \ | 
| 204 | 	MACRO_END | 
| 205 |  | 
| 206 | #define VM_DEBUG_CONSTANT_EVENT(name, event, control, arg1, arg2, arg3, arg4)   \ | 
| 207 | 	MACRO_BEGIN                                             \ | 
| 208 | 	        KERNEL_DEBUG_CONSTANT((MACHDBG_CODE(DBG_MACH_VM, event)) | control, arg1, arg2, arg3, arg4, 0); \ | 
| 209 | 	MACRO_END | 
| 210 |  | 
| 211 | extern void memoryshot(unsigned int event, unsigned int control); | 
| 212 |  | 
| 213 | extern void update_vm_info(void); | 
| 214 |  | 
| 215 | #if CONFIG_IOSCHED | 
| 216 | extern int upl_get_cached_tier( | 
| 217 | 	upl_t                   upl); | 
| 218 | #endif | 
| 219 |  | 
| 220 | extern void upl_set_iodone(upl_t, void *); | 
| 221 | extern void upl_set_iodone_error(upl_t, int); | 
| 222 | extern void upl_callout_iodone(upl_t); | 
| 223 |  | 
| 224 | extern ppnum_t upl_get_highest_page( | 
| 225 | 	upl_t                   upl); | 
| 226 |  | 
| 227 | extern upl_size_t upl_get_size( | 
| 228 | 	upl_t                   upl); | 
| 229 |  | 
| 230 | extern upl_t upl_associated_upl(upl_t upl); | 
| 231 | extern void upl_set_associated_upl(upl_t upl, upl_t associated_upl); | 
| 232 |  | 
| 233 | #ifndef MACH_KERNEL_PRIVATE | 
| 234 | typedef struct vm_page  *vm_page_t; | 
| 235 | #endif | 
| 236 | #ifdef  XNU_KERNEL_PRIVATE | 
| 237 | #include <vm/vm_kern.h> | 
| 238 |  | 
| 239 | extern upl_size_t upl_adjusted_size( | 
| 240 | 	upl_t upl, | 
| 241 | 	vm_map_offset_t page_mask); | 
| 242 | extern vm_object_offset_t upl_adjusted_offset( | 
| 243 | 	upl_t upl, | 
| 244 | 	vm_map_offset_t page_mask); | 
| 245 | extern vm_object_offset_t upl_get_data_offset( | 
| 246 | 	upl_t upl); | 
| 247 |  | 
| 248 | extern kern_return_t vm_map_create_upl( | 
| 249 | 	vm_map_t                map, | 
| 250 | 	vm_map_address_t        offset, | 
| 251 | 	upl_size_t              *upl_size, | 
| 252 | 	upl_t                   *upl, | 
| 253 | 	upl_page_info_array_t   page_list, | 
| 254 | 	unsigned int            *count, | 
| 255 | 	upl_control_flags_t     *flags, | 
| 256 | 	vm_tag_t            tag); | 
| 257 |  | 
| 258 | extern void iopl_valid_data( | 
| 259 | 	upl_t                   upl_ptr, | 
| 260 | 	vm_tag_t        tag); | 
| 261 |  | 
| 262 | extern void               vm_page_free_list( | 
| 263 | 	vm_page_t   mem, | 
| 264 | 	boolean_t   prepare_object); | 
| 265 |  | 
| 266 | extern kern_return_t vm_page_alloc_list( | 
| 267 | 	vm_size_t   page_count, | 
| 268 | 	kma_flags_t flags, | 
| 269 | 	vm_page_t  *list); | 
| 270 |  | 
| 271 | #endif  /* XNU_KERNEL_PRIVATE */ | 
| 272 |  | 
| 273 | extern struct vnode * upl_lookup_vnode(upl_t upl); | 
| 274 |  | 
| 275 | extern void               vm_page_set_offset(vm_page_t page, vm_object_offset_t offset); | 
| 276 | extern vm_object_offset_t vm_page_get_offset(vm_page_t page); | 
| 277 | extern ppnum_t            vm_page_get_phys_page(vm_page_t page); | 
| 278 | extern vm_page_t          vm_page_get_next(vm_page_t page); | 
| 279 |  | 
| 280 | extern kern_return_t    mach_vm_pressure_level_monitor(boolean_t wait_for_pressure, unsigned int *pressure_level); | 
| 281 | #if KERNEL_PRIVATE | 
| 282 | extern kern_return_t    mach_vm_wire_level_monitor(int64_t requested_pages); | 
| 283 | #endif /* KERNEL_PRIVATE */ | 
| 284 |  | 
| 285 | #if XNU_TARGET_OS_OSX | 
| 286 | extern kern_return_t    vm_pageout_wait(uint64_t deadline); | 
| 287 | #endif /* XNU_TARGET_OS_OSX */ | 
| 288 |  | 
| 289 | #ifdef  MACH_KERNEL_PRIVATE | 
| 290 |  | 
| 291 | #include <vm/vm_page.h> | 
| 292 |  | 
| 293 | extern unsigned int     vm_pageout_scan_event_counter; | 
| 294 | extern unsigned int     vm_page_anonymous_count; | 
| 295 | extern thread_t         vm_pageout_scan_thread; | 
| 296 | extern thread_t         vm_pageout_gc_thread; | 
| 297 |  | 
| 298 | #define VM_PAGEOUT_GC_INIT      ((void *)0) | 
| 299 | #define VM_PAGEOUT_GC_COLLECT   ((void *)1) | 
| 300 | #define VM_PAGEOUT_GC_EVENT     ((event_t)&vm_pageout_garbage_collect) | 
| 301 | extern void vm_pageout_garbage_collect(void *, wait_result_t); | 
| 302 |  | 
| 303 |  | 
| 304 | /* | 
| 305 |  * must hold the page queues lock to | 
| 306 |  * manipulate this structure | 
| 307 |  */ | 
| 308 | struct vm_pageout_queue { | 
| 309 | 	vm_page_queue_head_t pgo_pending;  /* laundry pages to be processed by pager's iothread */ | 
| 310 | 	unsigned int    pgo_laundry;       /* current count of laundry pages on queue or in flight */ | 
| 311 | 	unsigned int    pgo_maxlaundry; | 
| 312 |  | 
| 313 | 	uint32_t | 
| 314 | 	    pgo_busy:1,        /* iothread is currently processing request from pgo_pending */ | 
| 315 | 	    pgo_throttled:1,   /* vm_pageout_scan thread needs a wakeup when pgo_laundry drops */ | 
| 316 | 	    pgo_lowpriority:1, /* iothread is set to use low priority I/O */ | 
| 317 | 	    pgo_draining:1, | 
| 318 | 	    pgo_inited:1, | 
| 319 | 	    pgo_unused_bits:26; | 
| 320 | }; | 
| 321 |  | 
| 322 | #define VM_PAGE_Q_THROTTLED(q)          \ | 
| 323 | 	((q)->pgo_laundry >= (q)->pgo_maxlaundry) | 
| 324 |  | 
| 325 | extern struct   vm_pageout_queue        vm_pageout_queue_internal; | 
| 326 | extern struct   vm_pageout_queue        vm_pageout_queue_external; | 
| 327 |  | 
| 328 |  | 
| 329 | /* | 
| 330 |  *	Routines exported to Mach. | 
| 331 |  */ | 
| 332 | extern void             vm_pageout(void); | 
| 333 |  | 
| 334 | __startup_func extern void             vm_config_init(void); | 
| 335 |  | 
| 336 | extern kern_return_t    vm_pageout_internal_start(void); | 
| 337 |  | 
| 338 | extern void             vm_pageout_object_terminate( | 
| 339 | 	vm_object_t     object); | 
| 340 |  | 
| 341 | extern void             vm_pageout_cluster( | 
| 342 | 	vm_page_t       m); | 
| 343 |  | 
| 344 | extern void             vm_pageout_initialize_page( | 
| 345 | 	vm_page_t       m); | 
| 346 |  | 
| 347 | /* UPL exported routines and structures */ | 
| 348 |  | 
| 349 | #define upl_lock_init(object)   lck_mtx_init(&(object)->Lock, &vm_object_lck_grp, &vm_object_lck_attr) | 
| 350 | #define upl_lock_destroy(object)        lck_mtx_destroy(&(object)->Lock, &vm_object_lck_grp) | 
| 351 | #define upl_lock(object)        lck_mtx_lock(&(object)->Lock) | 
| 352 | #define upl_unlock(object)      lck_mtx_unlock(&(object)->Lock) | 
| 353 | #define upl_try_lock(object)    lck_mtx_try_lock(&(object)->Lock) | 
| 354 |  | 
| 355 | struct _vector_upl_iostates { | 
| 356 | 	upl_offset_t offset; | 
| 357 | 	upl_size_t   size; | 
| 358 | }; | 
| 359 |  | 
| 360 | typedef struct _vector_upl_iostates vector_upl_iostates_t; | 
| 361 |  | 
| 362 | struct _vector_upl { | 
| 363 | 	upl_size_t              size; | 
| 364 | 	uint32_t                num_upls; | 
| 365 | 	uint32_t                invalid_upls; | 
| 366 | 	uint32_t                max_upls; | 
| 367 | 	vm_map_t                submap; | 
| 368 | 	vm_offset_t             submap_dst_addr; | 
| 369 | 	vm_object_offset_t      offset; | 
| 370 | 	upl_page_info_array_t   pagelist; | 
| 371 | 	struct { | 
| 372 | 		upl_t                   elem; | 
| 373 | 		vector_upl_iostates_t   iostate; | 
| 374 | 	} upls[]; | 
| 375 | }; | 
| 376 |  | 
| 377 | typedef struct _vector_upl* vector_upl_t; | 
| 378 |  | 
| 379 | uint32_t vector_upl_max_upls(const upl_t upl); | 
| 380 |  | 
| 381 | /* universal page list structure */ | 
| 382 |  | 
| 383 | #if UPL_DEBUG | 
| 384 | #define UPL_DEBUG_COMMIT_RECORDS 4 | 
| 385 |  | 
| 386 | struct ucd { | 
| 387 | 	upl_offset_t    c_beg; | 
| 388 | 	upl_offset_t    c_end; | 
| 389 | 	int             c_aborted; | 
| 390 | 	uint32_t        c_btref; /* btref_t */ | 
| 391 | }; | 
| 392 | #endif | 
| 393 |  | 
| 394 | struct upl_io_completion { | 
| 395 | 	void     *io_context; | 
| 396 | 	void     (*io_done)(void *, int); | 
| 397 |  | 
| 398 | 	int      io_error; | 
| 399 | }; | 
| 400 |  | 
| 401 |  | 
| 402 | struct upl { | 
| 403 | 	decl_lck_mtx_data(, Lock);      /* Synchronization */ | 
| 404 | 	int             ref_count; | 
| 405 | 	int             ext_ref_count; | 
| 406 | 	int             flags; | 
| 407 | 	/* | 
| 408 | 	 * XXX CAUTION: to accomodate devices with "mixed page sizes", | 
| 409 | 	 * u_offset and u_size are now byte-aligned and no longer | 
| 410 | 	 * page-aligned, on all devices. | 
| 411 | 	 */ | 
| 412 | 	vm_object_offset_t u_offset; | 
| 413 | 	upl_size_t      u_size;       /* size in bytes of the address space */ | 
| 414 | 	upl_size_t      u_mapped_size;       /* size in bytes of the UPL that is mapped */ | 
| 415 | 	vm_offset_t     kaddr;      /* secondary mapping in kernel */ | 
| 416 | 	vm_object_t     map_object; | 
| 417 | 	vector_upl_t    vector_upl; | 
| 418 | 	upl_t           associated_upl; | 
| 419 | 	struct upl_io_completion *upl_iodone; | 
| 420 | 	ppnum_t         highest_page; | 
| 421 | #if CONFIG_IOSCHED | 
| 422 | 	int             upl_priority; | 
| 423 | 	uint64_t        *upl_reprio_info; | 
| 424 | 	void            *decmp_io_upl; | 
| 425 | #endif | 
| 426 | #if CONFIG_IOSCHED || UPL_DEBUG | 
| 427 | 	thread_t        upl_creator; | 
| 428 | 	queue_chain_t   uplq;       /* List of outstanding upls on an obj */ | 
| 429 | #endif | 
| 430 | #if     UPL_DEBUG | 
| 431 | 	uintptr_t       ubc_alias1; | 
| 432 | 	uintptr_t       ubc_alias2; | 
| 433 |  | 
| 434 | 	uint32_t        upl_state; | 
| 435 | 	uint32_t        upl_commit_index; | 
| 436 | 	uint32_t        upl_create_btref; /* btref_t */ | 
| 437 |  | 
| 438 | 	struct  ucd     upl_commit_records[UPL_DEBUG_COMMIT_RECORDS]; | 
| 439 | #endif  /* UPL_DEBUG */ | 
| 440 |  | 
| 441 | 	bitmap_t       *lite_list; | 
| 442 | 	struct upl_page_info page_list[]; | 
| 443 | }; | 
| 444 |  | 
| 445 | /* upl struct flags */ | 
| 446 | #define UPL_PAGE_LIST_MAPPED    0x1 | 
| 447 | #define UPL_KERNEL_MAPPED       0x2 | 
| 448 | #define UPL_CLEAR_DIRTY         0x4 | 
| 449 | #define UPL_COMPOSITE_LIST      0x8 | 
| 450 | #define UPL_INTERNAL            0x10 | 
| 451 | #define UPL_PAGE_SYNC_DONE      0x20 | 
| 452 | #define UPL_DEVICE_MEMORY       0x40 | 
| 453 | #define UPL_PAGEOUT             0x80 | 
| 454 | #define UPL_LITE                0x100 | 
| 455 | #define UPL_IO_WIRE             0x200 | 
| 456 | #define UPL_ACCESS_BLOCKED      0x400 | 
| 457 | #define UPL_SHADOWED            0x1000 | 
| 458 | #define UPL_KERNEL_OBJECT       0x2000 | 
| 459 | #define UPL_VECTOR              0x4000 | 
| 460 | #define UPL_SET_DIRTY           0x8000 | 
| 461 | #define UPL_HAS_BUSY            0x10000 | 
| 462 | #define UPL_TRACKED_BY_OBJECT   0x20000 | 
| 463 | #define UPL_EXPEDITE_SUPPORTED  0x40000 | 
| 464 | #define UPL_DECMP_REQ           0x80000 | 
| 465 | #define UPL_DECMP_REAL_IO       0x100000 | 
| 466 |  | 
| 467 | /* flags for upl_create flags parameter */ | 
| 468 | #define UPL_CREATE_EXTERNAL     0 | 
| 469 | #define UPL_CREATE_INTERNAL     0x1 | 
| 470 | #define UPL_CREATE_LITE         0x2 | 
| 471 | #define UPL_CREATE_IO_TRACKING  0x4 | 
| 472 | #define UPL_CREATE_EXPEDITE_SUP 0x8 | 
| 473 |  | 
| 474 | extern upl_t vector_upl_create(vm_offset_t, uint32_t); | 
| 475 | extern void vector_upl_deallocate(upl_t); | 
| 476 | extern boolean_t vector_upl_is_valid(upl_t); | 
| 477 | extern boolean_t vector_upl_set_subupl(upl_t, upl_t, u_int32_t); | 
| 478 | extern void vector_upl_set_pagelist(upl_t); | 
| 479 | extern void vector_upl_set_submap(upl_t, vm_map_t, vm_offset_t); | 
| 480 | extern void vector_upl_get_submap(upl_t, vm_map_t*, vm_offset_t*); | 
| 481 | extern void vector_upl_set_iostate(upl_t, upl_t, upl_offset_t, upl_size_t); | 
| 482 | extern void vector_upl_get_iostate(upl_t, upl_t, upl_offset_t*, upl_size_t*); | 
| 483 | extern void vector_upl_get_iostate_byindex(upl_t, uint32_t, upl_offset_t*, upl_size_t*); | 
| 484 | extern upl_t vector_upl_subupl_byindex(upl_t, uint32_t); | 
| 485 | extern upl_t vector_upl_subupl_byoffset(upl_t, upl_offset_t*, upl_size_t*); | 
| 486 |  | 
| 487 | extern void vm_object_set_pmap_cache_attr( | 
| 488 | 	vm_object_t             object, | 
| 489 | 	upl_page_info_array_t   user_page_list, | 
| 490 | 	unsigned int            num_pages, | 
| 491 | 	boolean_t               batch_pmap_op); | 
| 492 |  | 
| 493 | extern kern_return_t vm_object_iopl_request( | 
| 494 | 	vm_object_t             object, | 
| 495 | 	vm_object_offset_t      offset, | 
| 496 | 	upl_size_t              size, | 
| 497 | 	upl_t                   *upl_ptr, | 
| 498 | 	upl_page_info_array_t   user_page_list, | 
| 499 | 	unsigned int            *page_list_count, | 
| 500 | 	upl_control_flags_t     cntrl_flags, | 
| 501 | 	vm_tag_t            tag); | 
| 502 |  | 
| 503 | extern kern_return_t vm_object_super_upl_request( | 
| 504 | 	vm_object_t             object, | 
| 505 | 	vm_object_offset_t      offset, | 
| 506 | 	upl_size_t              size, | 
| 507 | 	upl_size_t              super_cluster, | 
| 508 | 	upl_t                   *upl, | 
| 509 | 	upl_page_info_t         *user_page_list, | 
| 510 | 	unsigned int            *page_list_count, | 
| 511 | 	upl_control_flags_t     cntrl_flags, | 
| 512 | 	vm_tag_t            tag); | 
| 513 |  | 
| 514 | /* should be just a regular vm_map_enter() */ | 
| 515 | extern kern_return_t vm_map_enter_upl( | 
| 516 | 	vm_map_t                map, | 
| 517 | 	upl_t                   upl, | 
| 518 | 	vm_map_offset_t         *dst_addr); | 
| 519 |  | 
| 520 | /* should be just a regular vm_map_remove() */ | 
| 521 | extern kern_return_t vm_map_remove_upl( | 
| 522 | 	vm_map_t                map, | 
| 523 | 	upl_t                   upl); | 
| 524 |  | 
| 525 | extern kern_return_t vm_map_enter_upl_range( | 
| 526 | 	vm_map_t                map, | 
| 527 | 	upl_t                   upl, | 
| 528 | 	vm_object_offset_t             offset, | 
| 529 | 	upl_size_t               size, | 
| 530 | 	vm_prot_t               prot, | 
| 531 | 	vm_map_offset_t         *dst_addr); | 
| 532 |  | 
| 533 | extern kern_return_t vm_map_remove_upl_range( | 
| 534 | 	vm_map_t                map, | 
| 535 | 	upl_t                   upl, | 
| 536 | 	vm_object_offset_t             offset, | 
| 537 | 	upl_size_t               size); | 
| 538 |  | 
| 539 | extern struct vm_page_delayed_work* | 
| 540 | vm_page_delayed_work_get_ctx(void); | 
| 541 |  | 
| 542 | extern void | 
| 543 | vm_page_delayed_work_finish_ctx(struct vm_page_delayed_work* dwp); | 
| 544 |  | 
| 545 | extern void vm_page_free_reserve(int pages); | 
| 546 |  | 
| 547 | extern void vm_pageout_throttle_down(vm_page_t page); | 
| 548 | extern void vm_pageout_throttle_up(vm_page_t page); | 
| 549 |  | 
| 550 | extern kern_return_t vm_paging_map_object( | 
| 551 | 	vm_page_t               page, | 
| 552 | 	vm_object_t             object, | 
| 553 | 	vm_object_offset_t      offset, | 
| 554 | 	vm_prot_t               protection, | 
| 555 | 	boolean_t               can_unlock_object, | 
| 556 | 	vm_map_size_t           *size,          /* IN/OUT */ | 
| 557 | 	vm_map_offset_t         *address,       /* OUT */ | 
| 558 | 	boolean_t               *need_unmap);   /* OUT */ | 
| 559 | extern void vm_paging_unmap_object( | 
| 560 | 	vm_object_t             object, | 
| 561 | 	vm_map_offset_t         start, | 
| 562 | 	vm_map_offset_t         end); | 
| 563 | decl_simple_lock_data(extern, vm_paging_lock); | 
| 564 |  | 
| 565 | /* | 
| 566 |  * Backing store throttle when BS is exhausted | 
| 567 |  */ | 
| 568 | extern unsigned int    vm_backing_store_low; | 
| 569 |  | 
| 570 | extern void vm_pageout_steal_laundry( | 
| 571 | 	vm_page_t page, | 
| 572 | 	boolean_t queues_locked); | 
| 573 |  | 
| 574 | #endif  /* MACH_KERNEL_PRIVATE */ | 
| 575 |  | 
| 576 | #if UPL_DEBUG | 
| 577 | extern kern_return_t  upl_ubc_alias_set( | 
| 578 | 	upl_t upl, | 
| 579 | 	uintptr_t alias1, | 
| 580 | 	uintptr_t alias2); | 
| 581 | extern int  upl_ubc_alias_get( | 
| 582 | 	upl_t upl, | 
| 583 | 	uintptr_t * al, | 
| 584 | 	uintptr_t * al2); | 
| 585 | #endif /* UPL_DEBUG */ | 
| 586 |  | 
| 587 | extern void vm_countdirtypages(void); | 
| 588 |  | 
| 589 | extern void vm_backing_store_disable( | 
| 590 | 	boolean_t       suspend); | 
| 591 |  | 
| 592 | extern kern_return_t upl_transpose( | 
| 593 | 	upl_t   upl1, | 
| 594 | 	upl_t   upl2); | 
| 595 |  | 
| 596 | extern kern_return_t mach_vm_pressure_monitor( | 
| 597 | 	boolean_t       wait_for_pressure, | 
| 598 | 	unsigned int    nsecs_monitored, | 
| 599 | 	unsigned int    *pages_reclaimed_p, | 
| 600 | 	unsigned int    *pages_wanted_p); | 
| 601 |  | 
| 602 | extern kern_return_t | 
| 603 | vm_set_buffer_cleanup_callout( | 
| 604 | 	boolean_t       (*func)(int)); | 
| 605 |  | 
| 606 | struct vm_page_stats_reusable { | 
| 607 | 	SInt32          reusable_count; | 
| 608 | 	uint64_t        reusable; | 
| 609 | 	uint64_t        reused; | 
| 610 | 	uint64_t        reused_wire; | 
| 611 | 	uint64_t        reused_remove; | 
| 612 | 	uint64_t        all_reusable_calls; | 
| 613 | 	uint64_t        partial_reusable_calls; | 
| 614 | 	uint64_t        all_reuse_calls; | 
| 615 | 	uint64_t        partial_reuse_calls; | 
| 616 | 	uint64_t        reusable_pages_success; | 
| 617 | 	uint64_t        reusable_pages_failure; | 
| 618 | 	uint64_t        reusable_pages_shared; | 
| 619 | 	uint64_t        reuse_pages_success; | 
| 620 | 	uint64_t        reuse_pages_failure; | 
| 621 | 	uint64_t        can_reuse_success; | 
| 622 | 	uint64_t        can_reuse_failure; | 
| 623 | 	uint64_t        reusable_reclaimed; | 
| 624 | 	uint64_t        reusable_nonwritable; | 
| 625 | 	uint64_t        reusable_shared; | 
| 626 | 	uint64_t        free_shared; | 
| 627 | }; | 
| 628 | extern struct vm_page_stats_reusable vm_page_stats_reusable; | 
| 629 |  | 
| 630 | extern int hibernate_flush_memory(void); | 
| 631 | extern void hibernate_reset_stats(void); | 
| 632 | extern void hibernate_create_paddr_map(void); | 
| 633 |  | 
| 634 | extern void vm_set_restrictions(unsigned int num_cpus); | 
| 635 |  | 
| 636 | extern int vm_compressor_mode; | 
| 637 | extern kern_return_t vm_pageout_compress_page(void **, char *, vm_page_t); | 
| 638 | extern void vm_pageout_anonymous_pages(void); | 
| 639 | extern void vm_pageout_disconnect_all_pages(void); | 
| 640 | extern int vm_toggle_task_selfdonate_pages(task_t); | 
| 641 | extern void vm_task_set_selfdonate_pages(task_t, bool); | 
| 642 |  | 
| 643 | struct  vm_config { | 
| 644 | 	boolean_t       compressor_is_present;          /* compressor is initialized and can be used by the freezer, the sweep or the pager */ | 
| 645 | 	boolean_t       compressor_is_active;           /* pager can actively compress pages...  'compressor_is_present' must be set */ | 
| 646 | 	boolean_t       swap_is_present;                /* swap is initialized and can be used by the freezer, the sweep or the pager */ | 
| 647 | 	boolean_t       swap_is_active;                 /* pager can actively swap out compressed segments... 'swap_is_present' must be set */ | 
| 648 | 	boolean_t       freezer_swap_is_active;         /* freezer can swap out frozen tasks... "compressor_is_present + swap_is_present" must be set */ | 
| 649 | }; | 
| 650 |  | 
| 651 | extern  struct vm_config        vm_config; | 
| 652 |  | 
| 653 |  | 
| 654 | #define                          0x0     /* no compresser or swap configured */ | 
| 655 | #define                                 0x1     /* Use default pager... DEPRECATED */ | 
| 656 | #define                      0x2     /* Active in-core compressor only. */ | 
| 657 | #define                    0x4     /* Active in-core compressor + swap backend. */ | 
| 658 | #define                         0x8     /* Freezer backed by default pager... DEPRECATED */ | 
| 659 | #define              0x10    /* Freezer backed by in-core compressor only i.e. frozen data remain in-core compressed.*/ | 
| 660 | #define    0x20    /* Active in-core compressor + Freezer backed by in-core compressor with swap support too.*/ | 
| 661 |  | 
| 662 | #define                               6       /* Total number of vm compressor modes supported */ | 
| 663 |  | 
| 664 |  | 
| 665 | #define VM_CONFIG_COMPRESSOR_IS_PRESENT         (vm_config.compressor_is_present == TRUE) | 
| 666 | #define VM_CONFIG_COMPRESSOR_IS_ACTIVE          (vm_config.compressor_is_active == TRUE) | 
| 667 | #define VM_CONFIG_SWAP_IS_PRESENT               (vm_config.swap_is_present == TRUE) | 
| 668 | #define VM_CONFIG_SWAP_IS_ACTIVE                (vm_config.swap_is_active == TRUE) | 
| 669 | #define VM_CONFIG_FREEZER_SWAP_IS_ACTIVE        (vm_config.freezer_swap_is_active == TRUE) | 
| 670 |  | 
| 671 | #endif  /* KERNEL_PRIVATE */ | 
| 672 |  | 
| 673 | #ifdef XNU_KERNEL_PRIVATE | 
| 674 |  | 
| 675 | struct vm_pageout_state { | 
| 676 | 	boolean_t vm_pressure_thread_running; | 
| 677 | 	boolean_t vm_pressure_changed; | 
| 678 | 	boolean_t vm_restricted_to_single_processor; | 
| 679 | 	int vm_compressor_thread_count; | 
| 680 |  | 
| 681 | 	unsigned int vm_page_speculative_q_age_ms; | 
| 682 | 	unsigned int vm_page_speculative_percentage; | 
| 683 | 	unsigned int vm_page_speculative_target; | 
| 684 |  | 
| 685 | 	unsigned int vm_pageout_swap_wait; | 
| 686 | 	unsigned int vm_pageout_idle_wait;      /* milliseconds */ | 
| 687 | 	unsigned int vm_pageout_empty_wait;     /* milliseconds */ | 
| 688 | 	unsigned int vm_pageout_burst_wait;     /* milliseconds */ | 
| 689 | 	unsigned int vm_pageout_deadlock_wait;  /* milliseconds */ | 
| 690 | 	unsigned int vm_pageout_deadlock_relief; | 
| 691 | 	unsigned int vm_pageout_burst_inactive_throttle; | 
| 692 |  | 
| 693 | 	unsigned int vm_pageout_inactive; | 
| 694 | 	unsigned int vm_pageout_inactive_used;  /* debugging */ | 
| 695 | 	unsigned int vm_pageout_inactive_clean; /* debugging */ | 
| 696 |  | 
| 697 | 	uint32_t vm_page_filecache_min; | 
| 698 | 	uint32_t vm_page_filecache_min_divisor; | 
| 699 | 	uint32_t vm_page_xpmapped_min; | 
| 700 | 	uint32_t vm_page_xpmapped_min_divisor; | 
| 701 | 	uint64_t vm_pageout_considered_page_last; | 
| 702 |  | 
| 703 | 	int vm_page_free_count_init; | 
| 704 |  | 
| 705 | 	unsigned int vm_memory_pressure; | 
| 706 |  | 
| 707 | 	int memorystatus_purge_on_critical; | 
| 708 | 	int memorystatus_purge_on_warning; | 
| 709 | 	int memorystatus_purge_on_urgent; | 
| 710 |  | 
| 711 | 	thread_t vm_pageout_early_swapout_iothread; | 
| 712 | }; | 
| 713 |  | 
| 714 | extern struct vm_pageout_state vm_pageout_state; | 
| 715 |  | 
| 716 | /* | 
| 717 |  * This structure is used to track the VM_INFO instrumentation | 
| 718 |  */ | 
| 719 | struct vm_pageout_vminfo { | 
| 720 | 	unsigned long vm_pageout_considered_page; | 
| 721 | 	unsigned long vm_pageout_considered_bq_internal; | 
| 722 | 	unsigned long vm_pageout_considered_bq_external; | 
| 723 | 	unsigned long vm_pageout_skipped_external; | 
| 724 | 	unsigned long vm_pageout_skipped_internal; | 
| 725 |  | 
| 726 | 	unsigned long vm_pageout_pages_evicted; | 
| 727 | 	unsigned long vm_pageout_pages_purged; | 
| 728 | 	unsigned long vm_pageout_freed_cleaned; | 
| 729 | 	unsigned long vm_pageout_freed_speculative; | 
| 730 | 	unsigned long vm_pageout_freed_external; | 
| 731 | 	unsigned long vm_pageout_freed_internal; | 
| 732 | 	unsigned long vm_pageout_inactive_dirty_internal; | 
| 733 | 	unsigned long vm_pageout_inactive_dirty_external; | 
| 734 | 	unsigned long vm_pageout_inactive_referenced; | 
| 735 | 	unsigned long vm_pageout_reactivation_limit_exceeded; | 
| 736 | 	unsigned long vm_pageout_inactive_force_reclaim; | 
| 737 | 	unsigned long vm_pageout_inactive_nolock; | 
| 738 | 	unsigned long vm_pageout_filecache_min_reactivated; | 
| 739 | 	unsigned long vm_pageout_scan_inactive_throttled_internal; | 
| 740 | 	unsigned long vm_pageout_scan_inactive_throttled_external; | 
| 741 |  | 
| 742 | 	uint64_t      vm_pageout_compressions; | 
| 743 | 	uint64_t      vm_compressor_pages_grabbed; | 
| 744 | 	unsigned long vm_compressor_failed; | 
| 745 |  | 
| 746 | 	unsigned long vm_page_pages_freed; | 
| 747 |  | 
| 748 | 	unsigned long vm_phantom_cache_found_ghost; | 
| 749 | 	unsigned long vm_phantom_cache_added_ghost; | 
| 750 |  | 
| 751 | 	unsigned long vm_pageout_protected_sharedcache; | 
| 752 | 	unsigned long vm_pageout_forcereclaimed_sharedcache; | 
| 753 | 	unsigned long vm_pageout_protected_realtime; | 
| 754 | 	unsigned long vm_pageout_forcereclaimed_realtime; | 
| 755 | }; | 
| 756 |  | 
| 757 | extern struct vm_pageout_vminfo vm_pageout_vminfo; | 
| 758 |  | 
| 759 | extern void vm_swapout_thread(void); | 
| 760 |  | 
| 761 | #if DEVELOPMENT || DEBUG | 
| 762 |  | 
| 763 | /* | 
| 764 |  *	This structure records the pageout daemon's actions: | 
| 765 |  *	how many pages it looks at and what happens to those pages. | 
| 766 |  *	No locking needed because only one thread modifies the fields. | 
| 767 |  */ | 
| 768 | struct vm_pageout_debug { | 
| 769 | 	uint32_t vm_pageout_balanced; | 
| 770 | 	uint32_t vm_pageout_scan_event_counter; | 
| 771 | 	uint32_t vm_pageout_speculative_dirty; | 
| 772 |  | 
| 773 | 	uint32_t vm_pageout_inactive_busy; | 
| 774 | 	uint32_t vm_pageout_inactive_absent; | 
| 775 | 	uint32_t vm_pageout_inactive_notalive; | 
| 776 | 	uint32_t vm_pageout_inactive_error; | 
| 777 | 	uint32_t vm_pageout_inactive_deactivated; | 
| 778 |  | 
| 779 | 	uint32_t vm_pageout_enqueued_cleaned; | 
| 780 |  | 
| 781 | 	uint32_t vm_pageout_cleaned_busy; | 
| 782 | 	uint32_t vm_pageout_cleaned_nolock; | 
| 783 | 	uint32_t vm_pageout_cleaned_reference_reactivated; | 
| 784 | 	uint32_t vm_pageout_cleaned_volatile_reactivated; | 
| 785 | 	uint32_t vm_pageout_cleaned_reactivated;  /* debugging; how many cleaned pages are found to be referenced on pageout (and are therefore reactivated) */ | 
| 786 | 	uint32_t vm_pageout_cleaned_fault_reactivated; | 
| 787 |  | 
| 788 | 	uint32_t vm_pageout_dirty_no_pager; | 
| 789 | 	uint32_t vm_pageout_purged_objects; | 
| 790 |  | 
| 791 | 	uint32_t vm_pageout_scan_throttle; | 
| 792 | 	uint32_t vm_pageout_scan_reclaimed_throttled; | 
| 793 | 	uint32_t vm_pageout_scan_burst_throttle; | 
| 794 | 	uint32_t vm_pageout_scan_empty_throttle; | 
| 795 | 	uint32_t vm_pageout_scan_swap_throttle; | 
| 796 | 	uint32_t vm_pageout_scan_deadlock_detected; | 
| 797 | 	uint32_t vm_pageout_scan_inactive_throttle_success; | 
| 798 | 	uint32_t vm_pageout_scan_throttle_deferred; | 
| 799 |  | 
| 800 | 	uint32_t vm_pageout_inactive_external_forced_jetsam_count; | 
| 801 |  | 
| 802 | 	uint32_t vm_grab_anon_overrides; | 
| 803 | 	uint32_t vm_grab_anon_nops; | 
| 804 |  | 
| 805 | 	uint32_t vm_pageout_no_victim; | 
| 806 | 	uint32_t vm_pageout_yield_for_free_pages; | 
| 807 | 	unsigned long vm_pageout_throttle_up_count; | 
| 808 | 	uint32_t vm_page_steal_pageout_page; | 
| 809 |  | 
| 810 | 	uint32_t vm_cs_validated_resets; | 
| 811 | 	uint32_t vm_object_iopl_request_sleep_for_cleaning; | 
| 812 | 	uint32_t vm_page_slide_counter; | 
| 813 | 	uint32_t vm_page_slide_errors; | 
| 814 | 	uint32_t vm_page_throttle_count; | 
| 815 | 	/* | 
| 816 | 	 * Statistics about UPL enforcement of copy-on-write obligations. | 
| 817 | 	 */ | 
| 818 | 	unsigned long upl_cow; | 
| 819 | 	unsigned long upl_cow_again; | 
| 820 | 	unsigned long upl_cow_pages; | 
| 821 | 	unsigned long upl_cow_again_pages; | 
| 822 | 	unsigned long iopl_cow; | 
| 823 | 	unsigned long iopl_cow_pages; | 
| 824 | }; | 
| 825 |  | 
| 826 | extern struct vm_pageout_debug vm_pageout_debug; | 
| 827 |  | 
| 828 | #define VM_PAGEOUT_DEBUG(member, value)                 \ | 
| 829 | 	MACRO_BEGIN                                     \ | 
| 830 | 	        vm_pageout_debug.member += value;       \ | 
| 831 | 	MACRO_END | 
| 832 | #else /* DEVELOPMENT || DEBUG */ | 
| 833 | #define VM_PAGEOUT_DEBUG(member, value) | 
| 834 | #endif /* DEVELOPMENT || DEBUG */ | 
| 835 |  | 
| 836 | #define MAX_COMPRESSOR_THREAD_COUNT      8 | 
| 837 |  | 
| 838 | /* | 
| 839 |  * Forward declarations for internal routines. | 
| 840 |  */ | 
| 841 |  | 
| 842 | /* | 
| 843 |  * Contains relevant state for pageout iothreads. Some state is unused by | 
| 844 |  * external (file-backed) thread. | 
| 845 |  */ | 
| 846 | struct pgo_iothread_state { | 
| 847 | 	struct vm_pageout_queue *q; | 
| 848 | 	// cheads unused by external thread | 
| 849 | 	void                    *current_early_swapout_chead; | 
| 850 | 	void                    *current_regular_swapout_chead; | 
| 851 | 	void                    *current_late_swapout_chead; | 
| 852 | 	char                    *scratch_buf; | 
| 853 | 	int                     id; | 
| 854 | 	thread_t                pgo_iothread; // holds a +1 ref | 
| 855 | 	sched_cond_atomic_t     pgo_wakeup; | 
| 856 | #if DEVELOPMENT || DEBUG | 
| 857 | 	// for perf_compressor benchmark | 
| 858 | 	struct vm_pageout_queue *benchmark_q; | 
| 859 | #endif /* DEVELOPMENT || DEBUG */ | 
| 860 | }; | 
| 861 |  | 
| 862 | extern struct pgo_iothread_state pgo_iothread_internal_state[MAX_COMPRESSOR_THREAD_COUNT]; | 
| 863 |  | 
| 864 | extern struct pgo_iothread_state pgo_iothread_external_state; | 
| 865 |  | 
| 866 | struct vm_compressor_swapper_stats { | 
| 867 | 	uint64_t unripe_under_30s; | 
| 868 | 	uint64_t unripe_under_60s; | 
| 869 | 	uint64_t unripe_under_300s; | 
| 870 | 	uint64_t reclaim_swapins; | 
| 871 | 	uint64_t defrag_swapins; | 
| 872 | 	uint64_t compressor_swap_threshold_exceeded; | 
| 873 | 	uint64_t external_q_throttled; | 
| 874 | 	uint64_t free_count_below_reserve; | 
| 875 | 	uint64_t thrashing_detected; | 
| 876 | 	uint64_t fragmentation_detected; | 
| 877 | }; | 
| 878 | extern struct vm_compressor_swapper_stats vmcs_stats; | 
| 879 |  | 
| 880 | #if DEVELOPMENT || DEBUG | 
| 881 | typedef struct vmct_stats_s { | 
| 882 | 	uint64_t vmct_runtimes[MAX_COMPRESSOR_THREAD_COUNT]; | 
| 883 | 	uint64_t vmct_pages[MAX_COMPRESSOR_THREAD_COUNT]; | 
| 884 | 	uint64_t vmct_iterations[MAX_COMPRESSOR_THREAD_COUNT]; | 
| 885 | 	// total mach absolute time that compressor threads has been running | 
| 886 | 	uint64_t vmct_cthreads_total; | 
| 887 | 	int32_t vmct_minpages[MAX_COMPRESSOR_THREAD_COUNT]; | 
| 888 | 	int32_t vmct_maxpages[MAX_COMPRESSOR_THREAD_COUNT]; | 
| 889 | } vmct_stats_t; | 
| 890 | #endif /* DEVELOPMENT || DEBUG */ | 
| 891 | #endif /* XNU_KERNEL_PRIVATE */ | 
| 892 | #endif  /* _VM_VM_PAGEOUT_H_ */ | 
| 893 |  |