| 1 | /* |
| 2 | * Copyright (c) 2000-2016 Apple Inc. All rights reserved. |
| 3 | * |
| 4 | * @APPLE_OSREFERENCE_LICENSE_HEADER_START@ |
| 5 | * |
| 6 | * This file contains Original Code and/or Modifications of Original Code |
| 7 | * as defined in and that are subject to the Apple Public Source License |
| 8 | * Version 2.0 (the 'License'). You may not use this file except in |
| 9 | * compliance with the License. The rights granted to you under the License |
| 10 | * may not be used to create, or enable the creation or redistribution of, |
| 11 | * unlawful or unlicensed copies of an Apple operating system, or to |
| 12 | * circumvent, violate, or enable the circumvention or violation of, any |
| 13 | * terms of an Apple operating system software license agreement. |
| 14 | * |
| 15 | * Please obtain a copy of the License at |
| 16 | * http://www.opensource.apple.com/apsl/ and read it before using this file. |
| 17 | * |
| 18 | * The Original Code and all software distributed under the License are |
| 19 | * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER |
| 20 | * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES, |
| 21 | * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY, |
| 22 | * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT. |
| 23 | * Please see the License for the specific language governing rights and |
| 24 | * limitations under the License. |
| 25 | * |
| 26 | * @APPLE_OSREFERENCE_LICENSE_HEADER_END@ |
| 27 | */ |
| 28 | #ifndef _VM_VM_COMPRESSOR_H_ |
| 29 | #define _VM_VM_COMPRESSOR_H_ |
| 30 | |
| 31 | #include <vm/vm_compressor_pager.h> |
| 32 | #include <vm/vm_kern.h> |
| 33 | #include <vm/vm_page.h> |
| 34 | #include <vm/vm_protos.h> |
| 35 | #include <vm/WKdm_new.h> |
| 36 | #include <vm/vm_object.h> |
| 37 | #include <vm/vm_map.h> |
| 38 | #include <machine/pmap.h> |
| 39 | #include <kern/locks.h> |
| 40 | |
| 41 | #include <sys/kdebug.h> |
| 42 | |
| 43 | #if defined(__arm64__) |
| 44 | #include <arm64/proc_reg.h> |
| 45 | #endif |
| 46 | |
| 47 | #define C_SEG_OFFSET_BITS 16 |
| 48 | |
| 49 | #define C_SEG_MAX_POPULATE_SIZE (4 * PAGE_SIZE) |
| 50 | |
| 51 | #if defined(__arm64__) && (DEVELOPMENT || DEBUG) |
| 52 | |
| 53 | #if defined(XNU_PLATFORM_WatchOS) |
| 54 | #define VALIDATE_C_SEGMENTS (1) |
| 55 | #endif |
| 56 | #endif /* defined(__arm64__) && (DEVELOPMENT || DEBUG) */ |
| 57 | |
| 58 | |
| 59 | #if DEBUG || COMPRESSOR_INTEGRITY_CHECKS |
| 60 | #define ENABLE_SWAP_CHECKS 1 |
| 61 | #define ENABLE_COMPRESSOR_CHECKS 1 |
| 62 | #define POPCOUNT_THE_COMPRESSED_DATA (1) |
| 63 | #else |
| 64 | #define ENABLE_SWAP_CHECKS 0 |
| 65 | #define ENABLE_COMPRESSOR_CHECKS 0 |
| 66 | #endif |
| 67 | |
| 68 | #define CHECKSUM_THE_SWAP ENABLE_SWAP_CHECKS /* Debug swap data */ |
| 69 | #define CHECKSUM_THE_DATA ENABLE_COMPRESSOR_CHECKS /* Debug compressor/decompressor data */ |
| 70 | #define CHECKSUM_THE_COMPRESSED_DATA ENABLE_COMPRESSOR_CHECKS /* Debug compressor/decompressor compressed data */ |
| 71 | |
| 72 | #ifndef VALIDATE_C_SEGMENTS |
| 73 | #define VALIDATE_C_SEGMENTS ENABLE_COMPRESSOR_CHECKS /* Debug compaction */ |
| 74 | #endif |
| 75 | |
| 76 | #define RECORD_THE_COMPRESSED_DATA 0 |
| 77 | |
| 78 | /* |
| 79 | * The c_slot structure embeds a packed pointer to a c_slot_mapping |
| 80 | * (32bits) which we ideally want to span as much VA space as possible |
| 81 | * to not limit zalloc in how it sets itself up. |
| 82 | */ |
| 83 | #if !defined(__LP64__) /* no packing */ |
| 84 | #define C_SLOT_PACKED_PTR_BITS 32 |
| 85 | #define C_SLOT_PACKED_PTR_SHIFT 0 |
| 86 | #define C_SLOT_PACKED_PTR_BASE 0 |
| 87 | |
| 88 | #define C_SLOT_C_SIZE_BITS 12 |
| 89 | #define C_SLOT_C_CODEC_BITS 1 |
| 90 | #define C_SLOT_C_POPCOUNT_BITS 0 |
| 91 | #define C_SLOT_C_PADDING_BITS 3 |
| 92 | |
| 93 | #elif defined(__arm64__) /* 32G from the heap start */ |
| 94 | #define C_SLOT_PACKED_PTR_BITS 33 |
| 95 | #define C_SLOT_PACKED_PTR_SHIFT 2 |
| 96 | #define C_SLOT_PACKED_PTR_BASE ((uintptr_t)KERNEL_PMAP_HEAP_RANGE_START) |
| 97 | |
| 98 | #define C_SLOT_C_SIZE_BITS 14 |
| 99 | #define C_SLOT_C_CODEC_BITS 1 |
| 100 | #define C_SLOT_C_POPCOUNT_BITS 0 |
| 101 | #define C_SLOT_C_PADDING_BITS 0 |
| 102 | |
| 103 | #elif defined(__x86_64__) /* 256G from the heap start */ |
| 104 | #define C_SLOT_PACKED_PTR_BITS 36 |
| 105 | #define C_SLOT_PACKED_PTR_SHIFT 2 |
| 106 | #define C_SLOT_PACKED_PTR_BASE ((uintptr_t)KERNEL_PMAP_HEAP_RANGE_START) |
| 107 | |
| 108 | #define C_SLOT_C_SIZE_BITS 12 |
| 109 | #define C_SLOT_C_CODEC_BITS 0 /* not used */ |
| 110 | #define C_SLOT_C_POPCOUNT_BITS 0 |
| 111 | #define C_SLOT_C_PADDING_BITS 0 |
| 112 | |
| 113 | #else |
| 114 | #error vm_compressor parameters undefined for this architecture |
| 115 | #endif |
| 116 | |
| 117 | /* |
| 118 | * Popcounts needs to represent both 0 and full which requires |
| 119 | * (8 ^ C_SLOT_C_SIZE_BITS) + 1 values and (C_SLOT_C_SIZE_BITS + 4) bits. |
| 120 | * |
| 121 | * We us the (2 * (8 ^ C_SLOT_C_SIZE_BITS) - 1) value to mean "unknown". |
| 122 | */ |
| 123 | #define C_SLOT_NO_POPCOUNT ((16u << C_SLOT_C_SIZE_BITS) - 1) |
| 124 | |
| 125 | static_assert((C_SEG_OFFSET_BITS + C_SLOT_C_SIZE_BITS + |
| 126 | C_SLOT_C_CODEC_BITS + C_SLOT_C_POPCOUNT_BITS + |
| 127 | C_SLOT_C_PADDING_BITS + C_SLOT_PACKED_PTR_BITS) % 32 == 0); |
| 128 | |
| 129 | struct c_slot { |
| 130 | uint64_t c_offset:C_SEG_OFFSET_BITS __kernel_ptr_semantics; |
| 131 | uint64_t c_size:C_SLOT_C_SIZE_BITS; |
| 132 | #if C_SLOT_C_CODEC_BITS |
| 133 | uint64_t c_codec:C_SLOT_C_CODEC_BITS; |
| 134 | #endif |
| 135 | #if C_SLOT_C_POPCOUNT_BITS |
| 136 | /* |
| 137 | * This value may not agree with c_pop_cdata, as it may be the |
| 138 | * population count of the uncompressed data. |
| 139 | * |
| 140 | * This value must be C_SLOT_NO_POPCOUNT when the compression algorithm |
| 141 | * cannot provide it. |
| 142 | */ |
| 143 | uint32_t c_inline_popcount:C_SLOT_C_POPCOUNT_BITS; |
| 144 | #endif |
| 145 | #if C_SLOT_C_PADDING_BITS |
| 146 | uint64_t c_padding:C_SLOT_C_PADDING_BITS; |
| 147 | #endif |
| 148 | uint64_t c_packed_ptr:C_SLOT_PACKED_PTR_BITS __kernel_ptr_semantics; |
| 149 | |
| 150 | /* debugging fields, typically not present on release kernels */ |
| 151 | #if CHECKSUM_THE_DATA |
| 152 | unsigned int c_hash_data; |
| 153 | #endif |
| 154 | #if CHECKSUM_THE_COMPRESSED_DATA |
| 155 | unsigned int c_hash_compressed_data; |
| 156 | #endif |
| 157 | #if POPCOUNT_THE_COMPRESSED_DATA |
| 158 | unsigned int c_pop_cdata; |
| 159 | #endif |
| 160 | } __attribute__((packed, aligned(4))); |
| 161 | |
| 162 | #define C_IS_EMPTY 0 |
| 163 | #define C_IS_FREE 1 |
| 164 | #define C_IS_FILLING 2 |
| 165 | #define C_ON_AGE_Q 3 |
| 166 | #define C_ON_SWAPOUT_Q 4 |
| 167 | #define C_ON_SWAPPEDOUT_Q 5 |
| 168 | #define C_ON_SWAPPEDOUTSPARSE_Q 6 |
| 169 | #define C_ON_SWAPPEDIN_Q 7 |
| 170 | #define C_ON_MAJORCOMPACT_Q 8 |
| 171 | #define C_ON_BAD_Q 9 |
| 172 | #define C_ON_SWAPIO_Q 10 |
| 173 | |
| 174 | |
| 175 | struct c_segment { |
| 176 | lck_mtx_t c_lock; |
| 177 | queue_chain_t c_age_list; |
| 178 | queue_chain_t c_list; |
| 179 | |
| 180 | #if CONFIG_FREEZE |
| 181 | queue_chain_t c_task_list_next_cseg; |
| 182 | task_t c_task_owner; |
| 183 | #endif /* CONFIG_FREEZE */ |
| 184 | |
| 185 | #define C_SEG_MAX_LIMIT (UINT_MAX) /* this needs to track the size of c_mysegno */ |
| 186 | uint32_t c_mysegno; |
| 187 | |
| 188 | uint32_t c_creation_ts; |
| 189 | uint64_t c_generation_id; |
| 190 | |
| 191 | int32_t c_bytes_used; |
| 192 | int32_t c_bytes_unused; |
| 193 | uint32_t c_slots_used; |
| 194 | |
| 195 | uint16_t c_firstemptyslot; |
| 196 | uint16_t c_nextslot; |
| 197 | uint32_t c_nextoffset; |
| 198 | uint32_t c_populated_offset; |
| 199 | |
| 200 | union { |
| 201 | int32_t *c_buffer; |
| 202 | uint64_t c_swap_handle; |
| 203 | } c_store; |
| 204 | |
| 205 | #if VALIDATE_C_SEGMENTS |
| 206 | uint32_t c_was_minor_compacted; |
| 207 | uint32_t c_was_major_compacted; |
| 208 | uint32_t c_was_major_donor; |
| 209 | #endif |
| 210 | #if CHECKSUM_THE_SWAP |
| 211 | unsigned int cseg_hash; |
| 212 | unsigned int cseg_swap_size; |
| 213 | #endif /* CHECKSUM_THE_SWAP */ |
| 214 | |
| 215 | thread_t c_busy_for_thread; |
| 216 | uint32_t c_agedin_ts; |
| 217 | uint32_t c_swappedin_ts; |
| 218 | bool c_swappedin; |
| 219 | /* |
| 220 | * Do not pull c_swappedin above into the bitfield below. |
| 221 | * We update it without always taking the segment |
| 222 | * lock and rely on the segment being busy instead. |
| 223 | * The bitfield needs the segment lock. So updating |
| 224 | * this state, if in the bitfield, without the lock |
| 225 | * will race with the updates to the other fields and |
| 226 | * result in a mess. |
| 227 | */ |
| 228 | uint32_t c_busy:1, |
| 229 | c_busy_swapping:1, |
| 230 | c_wanted:1, |
| 231 | c_on_minorcompact_q:1, /* can also be on the age_q, the majorcompact_q or the swappedin_q */ |
| 232 | |
| 233 | c_state:4, /* what state is the segment in which dictates which q to find it on */ |
| 234 | c_overage_swap:1, |
| 235 | c_has_donated_pages:1, |
| 236 | #if CONFIG_FREEZE |
| 237 | c_has_freezer_pages:1, |
| 238 | c_reserved:21; |
| 239 | #else /* CONFIG_FREEZE */ |
| 240 | c_reserved:22; |
| 241 | #endif /* CONFIG_FREEZE */ |
| 242 | |
| 243 | int c_slot_var_array_len; |
| 244 | struct c_slot *c_slot_var_array; |
| 245 | struct c_slot c_slot_fixed_array[0]; |
| 246 | }; |
| 247 | |
| 248 | |
| 249 | struct c_slot_mapping { |
| 250 | #if !CONFIG_TRACK_UNMODIFIED_ANON_PAGES |
| 251 | uint32_t s_cseg:22, /* segment number + 1 */ |
| 252 | s_cindx:10; /* index in the segment */ |
| 253 | #else /* !CONFIG_TRACK_UNMODIFIED_ANON_PAGES */ |
| 254 | uint32_t s_cseg:21, /* segment number + 1 */ |
| 255 | s_cindx:10, /* index in the segment */ |
| 256 | s_uncompressed:1; /* This bit indicates that the page resides uncompressed in a swapfile. |
| 257 | * This can happen in 2 ways:- |
| 258 | * 1) Page used to be in the compressor, got decompressed, was not |
| 259 | * modified, and so was pushed uncompressed to a different swapfile on disk. |
| 260 | * 2) Page was in its uncompressed form in a swapfile on disk. It got swapped in |
| 261 | * but was not modified. As we are about to reclaim it, we notice that this bit |
| 262 | * is set in its current slot. And so we can safely toss this clean anonymous page |
| 263 | * because its copy exists on disk. |
| 264 | */ |
| 265 | #endif /* !CONFIG_TRACK_UNMODIFIED_ANON_PAGES */ |
| 266 | }; |
| 267 | #define C_SLOT_MAX_INDEX (1 << 10) |
| 268 | |
| 269 | typedef struct c_slot_mapping *c_slot_mapping_t; |
| 270 | |
| 271 | |
| 272 | extern int c_seg_fixed_array_len; |
| 273 | extern vm_offset_t c_buffers; |
| 274 | extern int64_t c_segment_compressed_bytes; |
| 275 | |
| 276 | #define C_SEG_BUFFER_ADDRESS(c_segno) ((c_buffers + ((uint64_t)c_segno * (uint64_t)c_seg_allocsize))) |
| 277 | |
| 278 | #define C_SEG_SLOT_FROM_INDEX(cseg, index) (index < c_seg_fixed_array_len ? &(cseg->c_slot_fixed_array[index]) : &(cseg->c_slot_var_array[index - c_seg_fixed_array_len])) |
| 279 | |
| 280 | #define C_SEG_OFFSET_TO_BYTES(off) ((off) * (int) sizeof(int32_t)) |
| 281 | #define C_SEG_BYTES_TO_OFFSET(bytes) ((bytes) / (int) sizeof(int32_t)) |
| 282 | |
| 283 | #define C_SEG_UNUSED_BYTES(cseg) (cseg->c_bytes_unused + (C_SEG_OFFSET_TO_BYTES(cseg->c_populated_offset - cseg->c_nextoffset))) |
| 284 | |
| 285 | #ifndef __PLATFORM_WKDM_ALIGNMENT_MASK__ |
| 286 | #define C_SEG_OFFSET_ALIGNMENT_MASK 0x3ULL |
| 287 | #define C_SEG_OFFSET_ALIGNMENT_BOUNDARY 0x4 |
| 288 | #else |
| 289 | #define C_SEG_OFFSET_ALIGNMENT_MASK __PLATFORM_WKDM_ALIGNMENT_MASK__ |
| 290 | #define C_SEG_OFFSET_ALIGNMENT_BOUNDARY __PLATFORM_WKDM_ALIGNMENT_BOUNDARY__ |
| 291 | #endif |
| 292 | |
| 293 | #define C_SEG_SHOULD_MINORCOMPACT_NOW(cseg) ((C_SEG_UNUSED_BYTES(cseg) >= (c_seg_bufsize / 4)) ? 1 : 0) |
| 294 | |
| 295 | /* |
| 296 | * the decsion to force a c_seg to be major compacted is based on 2 criteria |
| 297 | * 1) is the c_seg buffer almost empty (i.e. we have a chance to merge it with another c_seg) |
| 298 | * 2) are there at least a minimum number of slots unoccupied so that we have a chance |
| 299 | * of combining this c_seg with another one. |
| 300 | */ |
| 301 | #define C_SEG_SHOULD_MAJORCOMPACT_NOW(cseg) \ |
| 302 | ((((cseg->c_bytes_unused + (c_seg_bufsize - C_SEG_OFFSET_TO_BYTES(c_seg->c_nextoffset))) >= (c_seg_bufsize / 8)) && \ |
| 303 | ((C_SLOT_MAX_INDEX - cseg->c_slots_used) > (c_seg_bufsize / PAGE_SIZE))) \ |
| 304 | ? 1 : 0) |
| 305 | |
| 306 | #define C_SEG_ONDISK_IS_SPARSE(cseg) ((cseg->c_bytes_used < cseg->c_bytes_unused) ? 1 : 0) |
| 307 | #define C_SEG_IS_ONDISK(cseg) ((cseg->c_state == C_ON_SWAPPEDOUT_Q || cseg->c_state == C_ON_SWAPPEDOUTSPARSE_Q)) |
| 308 | #define C_SEG_IS_ON_DISK_OR_SOQ(cseg) ((cseg->c_state == C_ON_SWAPPEDOUT_Q || \ |
| 309 | cseg->c_state == C_ON_SWAPPEDOUTSPARSE_Q || \ |
| 310 | cseg->c_state == C_ON_SWAPOUT_Q || \ |
| 311 | cseg->c_state == C_ON_SWAPIO_Q)) |
| 312 | |
| 313 | |
| 314 | #define C_SEG_WAKEUP_DONE(cseg) \ |
| 315 | MACRO_BEGIN \ |
| 316 | assert((cseg)->c_busy); \ |
| 317 | (cseg)->c_busy = 0; \ |
| 318 | assert((cseg)->c_busy_for_thread != NULL); \ |
| 319 | (cseg)->c_busy_for_thread = NULL; \ |
| 320 | if ((cseg)->c_wanted) { \ |
| 321 | (cseg)->c_wanted = 0; \ |
| 322 | thread_wakeup((event_t) (cseg)); \ |
| 323 | } \ |
| 324 | MACRO_END |
| 325 | |
| 326 | #define C_SEG_BUSY(cseg) \ |
| 327 | MACRO_BEGIN \ |
| 328 | assert((cseg)->c_busy == 0); \ |
| 329 | (cseg)->c_busy = 1; \ |
| 330 | assert((cseg)->c_busy_for_thread == NULL); \ |
| 331 | (cseg)->c_busy_for_thread = current_thread(); \ |
| 332 | MACRO_END |
| 333 | |
| 334 | |
| 335 | extern vm_map_t compressor_map; |
| 336 | |
| 337 | #if DEVELOPMENT || DEBUG |
| 338 | extern boolean_t write_protect_c_segs; |
| 339 | extern int vm_compressor_test_seg_wp; |
| 340 | |
| 341 | #define C_SEG_MAKE_WRITEABLE(cseg) \ |
| 342 | MACRO_BEGIN \ |
| 343 | if (write_protect_c_segs) { \ |
| 344 | vm_map_protect(compressor_map, \ |
| 345 | (vm_map_offset_t)cseg->c_store.c_buffer, \ |
| 346 | (vm_map_offset_t)&cseg->c_store.c_buffer[C_SEG_BYTES_TO_OFFSET(c_seg_allocsize)],\ |
| 347 | VM_PROT_READ | VM_PROT_WRITE, \ |
| 348 | 0); \ |
| 349 | } \ |
| 350 | MACRO_END |
| 351 | |
| 352 | #define C_SEG_WRITE_PROTECT(cseg) \ |
| 353 | MACRO_BEGIN \ |
| 354 | if (write_protect_c_segs) { \ |
| 355 | vm_map_protect(compressor_map, \ |
| 356 | (vm_map_offset_t)cseg->c_store.c_buffer, \ |
| 357 | (vm_map_offset_t)&cseg->c_store.c_buffer[C_SEG_BYTES_TO_OFFSET(c_seg_allocsize)],\ |
| 358 | VM_PROT_READ, \ |
| 359 | 0); \ |
| 360 | } \ |
| 361 | if (vm_compressor_test_seg_wp) { \ |
| 362 | volatile uint32_t vmtstmp = *(volatile uint32_t *)cseg->c_store.c_buffer; \ |
| 363 | *(volatile uint32_t *)cseg->c_store.c_buffer = 0xDEADABCD; \ |
| 364 | (void) vmtstmp; \ |
| 365 | } \ |
| 366 | MACRO_END |
| 367 | #endif |
| 368 | |
| 369 | typedef struct c_segment *c_segment_t; |
| 370 | typedef struct c_slot *c_slot_t; |
| 371 | |
| 372 | uint64_t vm_compressor_total_compressions(void); |
| 373 | void vm_wake_compactor_swapper(void); |
| 374 | void vm_run_compactor(void); |
| 375 | void vm_thrashing_jetsam_done(void); |
| 376 | void vm_consider_waking_compactor_swapper(void); |
| 377 | void vm_consider_swapping(void); |
| 378 | void vm_compressor_flush(void); |
| 379 | void c_seg_free(c_segment_t); |
| 380 | bool vm_compressor_is_thrashing(void); |
| 381 | bool vm_compressor_needs_to_swap(bool wake_memorystatus_thread); |
| 382 | void c_seg_free_locked(c_segment_t); |
| 383 | void c_seg_insert_into_age_q(c_segment_t); |
| 384 | void c_seg_need_delayed_compaction(c_segment_t, boolean_t); |
| 385 | void c_seg_update_task_owner(c_segment_t, task_t); |
| 386 | |
| 387 | void vm_decompressor_lock(void); |
| 388 | void vm_decompressor_unlock(void); |
| 389 | |
| 390 | void vm_compressor_delay_trim(void); |
| 391 | void vm_compressor_do_warmup(void); |
| 392 | void vm_compressor_record_warmup_start(void); |
| 393 | void vm_compressor_record_warmup_end(void); |
| 394 | |
| 395 | int vm_wants_task_throttled(task_t); |
| 396 | |
| 397 | extern void vm_compaction_swapper_do_init(void); |
| 398 | extern void vm_compressor_swap_init(void); |
| 399 | extern lck_rw_t c_master_lock; |
| 400 | |
| 401 | #if ENCRYPTED_SWAP |
| 402 | extern void vm_swap_decrypt(c_segment_t); |
| 403 | #endif /* ENCRYPTED_SWAP */ |
| 404 | |
| 405 | extern int vm_swap_low_on_space(void); |
| 406 | extern int vm_swap_out_of_space(void); |
| 407 | extern kern_return_t vm_swap_get(c_segment_t, uint64_t, uint64_t); |
| 408 | extern void vm_swap_free(uint64_t); |
| 409 | extern void vm_swap_consider_defragmenting(int); |
| 410 | |
| 411 | extern void c_seg_swapin_requeue(c_segment_t, boolean_t, boolean_t, boolean_t); |
| 412 | extern int c_seg_swapin(c_segment_t, boolean_t, boolean_t); |
| 413 | extern void c_seg_wait_on_busy(c_segment_t); |
| 414 | extern void c_seg_trim_tail(c_segment_t); |
| 415 | extern void c_seg_switch_state(c_segment_t, int, boolean_t); |
| 416 | |
| 417 | extern boolean_t fastwake_recording_in_progress; |
| 418 | extern int compaction_swapper_inited; |
| 419 | extern int compaction_swapper_running; |
| 420 | extern uint64_t vm_swap_put_failures; |
| 421 | |
| 422 | extern int c_overage_swapped_count; |
| 423 | extern int c_overage_swapped_limit; |
| 424 | |
| 425 | extern queue_head_t c_minor_list_head; |
| 426 | extern queue_head_t c_age_list_head; |
| 427 | extern queue_head_t c_major_list_head; |
| 428 | extern queue_head_t c_early_swapout_list_head; |
| 429 | extern queue_head_t c_regular_swapout_list_head; |
| 430 | extern queue_head_t c_late_swapout_list_head; |
| 431 | extern queue_head_t c_swappedout_list_head; |
| 432 | extern queue_head_t c_swappedout_sparse_list_head; |
| 433 | |
| 434 | extern uint32_t c_age_count; |
| 435 | extern uint32_t c_early_swapout_count, c_regular_swapout_count, c_late_swapout_count; |
| 436 | extern uint32_t c_swappedout_count; |
| 437 | extern uint32_t c_swappedout_sparse_count; |
| 438 | |
| 439 | extern int64_t compressor_bytes_used; |
| 440 | extern uint64_t first_c_segment_to_warm_generation_id; |
| 441 | extern uint64_t last_c_segment_to_warm_generation_id; |
| 442 | extern boolean_t hibernate_flushing; |
| 443 | extern boolean_t hibernate_no_swapspace; |
| 444 | extern boolean_t hibernate_in_progress_with_pinned_swap; |
| 445 | extern boolean_t hibernate_flush_timed_out; |
| 446 | extern uint32_t swapout_target_age; |
| 447 | |
| 448 | extern void c_seg_insert_into_q(queue_head_t *, c_segment_t); |
| 449 | |
| 450 | extern uint32_t vm_compressor_minorcompact_threshold_divisor; |
| 451 | extern uint32_t vm_compressor_majorcompact_threshold_divisor; |
| 452 | extern uint32_t vm_compressor_unthrottle_threshold_divisor; |
| 453 | extern uint32_t vm_compressor_catchup_threshold_divisor; |
| 454 | |
| 455 | extern uint32_t vm_compressor_minorcompact_threshold_divisor_overridden; |
| 456 | extern uint32_t vm_compressor_majorcompact_threshold_divisor_overridden; |
| 457 | extern uint32_t vm_compressor_unthrottle_threshold_divisor_overridden; |
| 458 | extern uint32_t vm_compressor_catchup_threshold_divisor_overridden; |
| 459 | |
| 460 | extern uint64_t vm_compressor_compute_elapsed_msecs(clock_sec_t, clock_nsec_t, clock_sec_t, clock_nsec_t); |
| 461 | |
| 462 | extern void kdp_compressor_busy_find_owner(event64_t wait_event, thread_waitinfo_t *waitinfo); |
| 463 | |
| 464 | #define PAGE_REPLACEMENT_DISALLOWED(enable) (enable == TRUE ? lck_rw_lock_shared(&c_master_lock) : lck_rw_done(&c_master_lock)) |
| 465 | #define PAGE_REPLACEMENT_ALLOWED(enable) (enable == TRUE ? lck_rw_lock_exclusive(&c_master_lock) : lck_rw_done(&c_master_lock)) |
| 466 | |
| 467 | |
| 468 | #define AVAILABLE_NON_COMPRESSED_MEMORY (vm_page_active_count + vm_page_inactive_count + vm_page_free_count + vm_page_speculative_count) |
| 469 | #define AVAILABLE_MEMORY (AVAILABLE_NON_COMPRESSED_MEMORY + VM_PAGE_COMPRESSOR_COUNT) |
| 470 | |
| 471 | /* |
| 472 | * TODO, there may be a minor optimisation opportunity to replace these divisions |
| 473 | * with multiplies and shifts |
| 474 | * |
| 475 | * By multiplying by 10, the divisors can have more precision w/o resorting to floating point... a divisor specified as 25 is in reality a divide by 2.5 |
| 476 | * By multiplying by 9, you get a number ~11% smaller which allows us to have another limit point derived from the same base |
| 477 | * By multiplying by 11, you get a number ~10% bigger which allows us to generate a reset limit derived from the same base which is useful for hysteresis |
| 478 | */ |
| 479 | |
| 480 | #define VM_PAGE_COMPRESSOR_COMPACT_THRESHOLD (((AVAILABLE_MEMORY) * 10) / (vm_compressor_minorcompact_threshold_divisor ? vm_compressor_minorcompact_threshold_divisor : 10)) |
| 481 | #define VM_PAGE_COMPRESSOR_SWAP_THRESHOLD (((AVAILABLE_MEMORY) * 10) / (vm_compressor_majorcompact_threshold_divisor ? vm_compressor_majorcompact_threshold_divisor : 10)) |
| 482 | |
| 483 | #define VM_PAGE_COMPRESSOR_SWAP_UNTHROTTLE_THRESHOLD (((AVAILABLE_MEMORY) * 10) / (vm_compressor_unthrottle_threshold_divisor ? vm_compressor_unthrottle_threshold_divisor : 10)) |
| 484 | #define VM_PAGE_COMPRESSOR_SWAP_RETHROTTLE_THRESHOLD (((AVAILABLE_MEMORY) * 11) / (vm_compressor_unthrottle_threshold_divisor ? vm_compressor_unthrottle_threshold_divisor : 11)) |
| 485 | |
| 486 | #define VM_PAGE_COMPRESSOR_SWAP_HAS_CAUGHTUP_THRESHOLD (((AVAILABLE_MEMORY) * 11) / (vm_compressor_catchup_threshold_divisor ? vm_compressor_catchup_threshold_divisor : 11)) |
| 487 | #define VM_PAGE_COMPRESSOR_SWAP_CATCHUP_THRESHOLD (((AVAILABLE_MEMORY) * 10) / (vm_compressor_catchup_threshold_divisor ? vm_compressor_catchup_threshold_divisor : 10)) |
| 488 | #define VM_PAGE_COMPRESSOR_HARD_THROTTLE_THRESHOLD (((AVAILABLE_MEMORY) * 9) / (vm_compressor_catchup_threshold_divisor ? vm_compressor_catchup_threshold_divisor : 9)) |
| 489 | |
| 490 | #if !XNU_TARGET_OS_OSX |
| 491 | #define AVAILABLE_NON_COMPRESSED_MIN 20000 |
| 492 | #define COMPRESSOR_NEEDS_TO_SWAP() (((AVAILABLE_NON_COMPRESSED_MEMORY < VM_PAGE_COMPRESSOR_SWAP_THRESHOLD) || \ |
| 493 | (AVAILABLE_NON_COMPRESSED_MEMORY < AVAILABLE_NON_COMPRESSED_MIN)) ? 1 : 0) |
| 494 | #else /* !XNU_TARGET_OS_OSX */ |
| 495 | #define COMPRESSOR_NEEDS_TO_SWAP() ((AVAILABLE_NON_COMPRESSED_MEMORY < VM_PAGE_COMPRESSOR_SWAP_THRESHOLD) ? 1 : 0) |
| 496 | #endif /* !XNU_TARGET_OS_OSX */ |
| 497 | |
| 498 | #define HARD_THROTTLE_LIMIT_REACHED() ((AVAILABLE_NON_COMPRESSED_MEMORY < VM_PAGE_COMPRESSOR_HARD_THROTTLE_THRESHOLD) ? 1 : 0) |
| 499 | #define SWAPPER_NEEDS_TO_UNTHROTTLE() ((AVAILABLE_NON_COMPRESSED_MEMORY < VM_PAGE_COMPRESSOR_SWAP_UNTHROTTLE_THRESHOLD) ? 1 : 0) |
| 500 | #define SWAPPER_NEEDS_TO_RETHROTTLE() ((AVAILABLE_NON_COMPRESSED_MEMORY > VM_PAGE_COMPRESSOR_SWAP_RETHROTTLE_THRESHOLD) ? 1 : 0) |
| 501 | #define SWAPPER_NEEDS_TO_CATCHUP() ((AVAILABLE_NON_COMPRESSED_MEMORY < VM_PAGE_COMPRESSOR_SWAP_CATCHUP_THRESHOLD) ? 1 : 0) |
| 502 | #define SWAPPER_HAS_CAUGHTUP() ((AVAILABLE_NON_COMPRESSED_MEMORY > VM_PAGE_COMPRESSOR_SWAP_HAS_CAUGHTUP_THRESHOLD) ? 1 : 0) |
| 503 | #define COMPRESSOR_NEEDS_TO_MINOR_COMPACT() ((AVAILABLE_NON_COMPRESSED_MEMORY < VM_PAGE_COMPRESSOR_COMPACT_THRESHOLD) ? 1 : 0) |
| 504 | |
| 505 | |
| 506 | #if !XNU_TARGET_OS_OSX |
| 507 | #define COMPRESSOR_FREE_RESERVED_LIMIT 28 |
| 508 | #else /* !XNU_TARGET_OS_OSX */ |
| 509 | #define COMPRESSOR_FREE_RESERVED_LIMIT 128 |
| 510 | #endif /* !XNU_TARGET_OS_OSX */ |
| 511 | |
| 512 | uint32_t vm_compressor_get_encode_scratch_size(void) __pure2; |
| 513 | uint32_t vm_compressor_get_decode_scratch_size(void) __pure2; |
| 514 | |
| 515 | #define COMPRESSOR_SCRATCH_BUF_SIZE vm_compressor_get_encode_scratch_size() |
| 516 | |
| 517 | #if RECORD_THE_COMPRESSED_DATA |
| 518 | extern void c_compressed_record_init(void); |
| 519 | extern void c_compressed_record_write(char *, int); |
| 520 | #endif |
| 521 | |
| 522 | extern lck_mtx_t c_list_lock_storage; |
| 523 | #define c_list_lock (&c_list_lock_storage) |
| 524 | |
| 525 | #if DEVELOPMENT || DEBUG |
| 526 | extern uint32_t vm_ktrace_enabled; |
| 527 | |
| 528 | #define VMKDBG(x, ...) \ |
| 529 | MACRO_BEGIN \ |
| 530 | if (vm_ktrace_enabled) { \ |
| 531 | KDBG(x, ## __VA_ARGS__);\ |
| 532 | } \ |
| 533 | MACRO_END |
| 534 | |
| 535 | #if DEVELOPMENT || DEBUG |
| 536 | extern bool compressor_running_perf_test; |
| 537 | extern uint64_t compressor_perf_test_pages_processed; |
| 538 | #endif /* DEVELOPMENT || DEBUG */ |
| 539 | #endif |
| 540 | |
| 541 | #endif /* _VM_VM_COMPRESSOR_H_ */ |
| 542 | |