1 | /* |
2 | * Copyright (c) 2000-2020 Apple Inc. All rights reserved. |
3 | * |
4 | * @APPLE_OSREFERENCE_LICENSE_HEADER_START@ |
5 | * |
6 | * This file contains Original Code and/or Modifications of Original Code |
7 | * as defined in and that are subject to the Apple Public Source License |
8 | * Version 2.0 (the 'License'). You may not use this file except in |
9 | * compliance with the License. The rights granted to you under the License |
10 | * may not be used to create, or enable the creation or redistribution of, |
11 | * unlawful or unlicensed copies of an Apple operating system, or to |
12 | * circumvent, violate, or enable the circumvention or violation of, any |
13 | * terms of an Apple operating system software license agreement. |
14 | * |
15 | * Please obtain a copy of the License at |
16 | * http://www.opensource.apple.com/apsl/ and read it before using this file. |
17 | * |
18 | * The Original Code and all software distributed under the License are |
19 | * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER |
20 | * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES, |
21 | * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY, |
22 | * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT. |
23 | * Please see the License for the specific language governing rights and |
24 | * limitations under the License. |
25 | * |
26 | * @APPLE_OSREFERENCE_LICENSE_HEADER_END@ |
27 | */ |
28 | /* |
29 | * @OSF_COPYRIGHT@ |
30 | */ |
31 | /* |
32 | * Mach Operating System |
33 | * Copyright (c) 1991,1990,1989,1988,1987 Carnegie Mellon University |
34 | * All Rights Reserved. |
35 | * |
36 | * Permission to use, copy, modify and distribute this software and its |
37 | * documentation is hereby granted, provided that both the copyright |
38 | * notice and this permission notice appear in all copies of the |
39 | * software, derivative works or modified versions, and any portions |
40 | * thereof, and that both notices appear in supporting documentation. |
41 | * |
42 | * CARNEGIE MELLON ALLOWS FREE USE OF THIS SOFTWARE IN ITS "AS IS" |
43 | * CONDITION. CARNEGIE MELLON DISCLAIMS ANY LIABILITY OF ANY KIND FOR |
44 | * ANY DAMAGES WHATSOEVER RESULTING FROM THE USE OF THIS SOFTWARE. |
45 | * |
46 | * Carnegie Mellon requests users of this software to return to |
47 | * |
48 | * Software Distribution Coordinator or Software.Distribution@CS.CMU.EDU |
49 | * School of Computer Science |
50 | * Carnegie Mellon University |
51 | * Pittsburgh PA 15213-3890 |
52 | * |
53 | * any improvements or extensions that they make and grant Carnegie Mellon |
54 | * the rights to redistribute these changes. |
55 | */ |
56 | /* |
57 | */ |
58 | /* |
59 | * File: mach/vm_statistics.h |
60 | * Author: Avadis Tevanian, Jr., Michael Wayne Young, David Golub |
61 | * |
62 | * Virtual memory statistics structure. |
63 | * |
64 | */ |
65 | |
66 | #ifndef _MACH_VM_STATISTICS_H_ |
67 | #define _MACH_VM_STATISTICS_H_ |
68 | |
69 | #include <sys/cdefs.h> |
70 | |
71 | #include <mach/machine/vm_types.h> |
72 | #include <mach/machine/kern_return.h> |
73 | |
74 | __BEGIN_DECLS |
75 | |
76 | /* |
77 | * vm_statistics |
78 | * |
79 | * History: |
80 | * rev0 - original structure. |
81 | * rev1 - added purgable info (purgable_count and purges). |
82 | * rev2 - added speculative_count. |
83 | * |
84 | * Note: you cannot add any new fields to this structure. Add them below in |
85 | * vm_statistics64. |
86 | */ |
87 | |
88 | struct vm_statistics { |
89 | natural_t free_count; /* # of pages free */ |
90 | natural_t active_count; /* # of pages active */ |
91 | natural_t inactive_count; /* # of pages inactive */ |
92 | natural_t wire_count; /* # of pages wired down */ |
93 | natural_t zero_fill_count; /* # of zero fill pages */ |
94 | natural_t reactivations; /* # of pages reactivated */ |
95 | natural_t pageins; /* # of pageins */ |
96 | natural_t pageouts; /* # of pageouts */ |
97 | natural_t faults; /* # of faults */ |
98 | natural_t cow_faults; /* # of copy-on-writes */ |
99 | natural_t lookups; /* object cache lookups */ |
100 | natural_t hits; /* object cache hits */ |
101 | |
102 | /* added for rev1 */ |
103 | natural_t purgeable_count; /* # of pages purgeable */ |
104 | natural_t purges; /* # of pages purged */ |
105 | |
106 | /* added for rev2 */ |
107 | /* |
108 | * NB: speculative pages are already accounted for in "free_count", |
109 | * so "speculative_count" is the number of "free" pages that are |
110 | * used to hold data that was read speculatively from disk but |
111 | * haven't actually been used by anyone so far. |
112 | */ |
113 | natural_t speculative_count; /* # of pages speculative */ |
114 | }; |
115 | |
116 | /* Used by all architectures */ |
117 | typedef struct vm_statistics *vm_statistics_t; |
118 | typedef struct vm_statistics vm_statistics_data_t; |
119 | |
120 | /* |
121 | * vm_statistics64 |
122 | * |
123 | * History: |
124 | * rev0 - original structure. |
125 | * rev1 - added purgable info (purgable_count and purges). |
126 | * rev2 - added speculative_count. |
127 | * ---- |
128 | * rev3 - changed name to vm_statistics64. |
129 | * changed some fields in structure to 64-bit on |
130 | * arm, i386 and x86_64 architectures. |
131 | * rev4 - require 64-bit alignment for efficient access |
132 | * in the kernel. No change to reported data. |
133 | * |
134 | */ |
135 | |
136 | struct vm_statistics64 { |
137 | natural_t free_count; /* # of pages free */ |
138 | natural_t active_count; /* # of pages active */ |
139 | natural_t inactive_count; /* # of pages inactive */ |
140 | natural_t wire_count; /* # of pages wired down */ |
141 | uint64_t zero_fill_count; /* # of zero fill pages */ |
142 | uint64_t reactivations; /* # of pages reactivated */ |
143 | uint64_t pageins; /* # of pageins */ |
144 | uint64_t pageouts; /* # of pageouts */ |
145 | uint64_t faults; /* # of faults */ |
146 | uint64_t cow_faults; /* # of copy-on-writes */ |
147 | uint64_t lookups; /* object cache lookups */ |
148 | uint64_t hits; /* object cache hits */ |
149 | uint64_t purges; /* # of pages purged */ |
150 | natural_t purgeable_count; /* # of pages purgeable */ |
151 | /* |
152 | * NB: speculative pages are already accounted for in "free_count", |
153 | * so "speculative_count" is the number of "free" pages that are |
154 | * used to hold data that was read speculatively from disk but |
155 | * haven't actually been used by anyone so far. |
156 | */ |
157 | natural_t speculative_count; /* # of pages speculative */ |
158 | |
159 | /* added for rev1 */ |
160 | uint64_t decompressions; /* # of pages decompressed */ |
161 | uint64_t compressions; /* # of pages compressed */ |
162 | uint64_t swapins; /* # of pages swapped in (via compression segments) */ |
163 | uint64_t swapouts; /* # of pages swapped out (via compression segments) */ |
164 | natural_t compressor_page_count; /* # of pages used by the compressed pager to hold all the compressed data */ |
165 | natural_t throttled_count; /* # of pages throttled */ |
166 | natural_t external_page_count; /* # of pages that are file-backed (non-swap) */ |
167 | natural_t internal_page_count; /* # of pages that are anonymous */ |
168 | uint64_t total_uncompressed_pages_in_compressor; /* # of pages (uncompressed) held within the compressor. */ |
169 | } __attribute__((aligned(8))); |
170 | |
171 | typedef struct vm_statistics64 *vm_statistics64_t; |
172 | typedef struct vm_statistics64 vm_statistics64_data_t; |
173 | |
174 | kern_return_t vm_stats(void *info, unsigned int *count); |
175 | |
176 | /* |
177 | * VM_STATISTICS_TRUNCATE_TO_32_BIT |
178 | * |
179 | * This is used by host_statistics() to truncate and peg the 64-bit in-kernel values from |
180 | * vm_statistics64 to the 32-bit values of the older structure above (vm_statistics). |
181 | */ |
182 | #define VM_STATISTICS_TRUNCATE_TO_32_BIT(value) ((uint32_t)(((value) > UINT32_MAX ) ? UINT32_MAX : (value))) |
183 | |
184 | /* |
185 | * vm_extmod_statistics |
186 | * |
187 | * Structure to record modifications to a task by an |
188 | * external agent. |
189 | * |
190 | * History: |
191 | * rev0 - original structure. |
192 | */ |
193 | |
194 | struct vm_extmod_statistics { |
195 | int64_t task_for_pid_count; /* # of times task port was looked up */ |
196 | int64_t task_for_pid_caller_count; /* # of times this task called task_for_pid */ |
197 | int64_t thread_creation_count; /* # of threads created in task */ |
198 | int64_t thread_creation_caller_count; /* # of threads created by task */ |
199 | int64_t thread_set_state_count; /* # of register state sets in task */ |
200 | int64_t thread_set_state_caller_count; /* # of register state sets by task */ |
201 | } __attribute__((aligned(8))); |
202 | |
203 | typedef struct vm_extmod_statistics *vm_extmod_statistics_t; |
204 | typedef struct vm_extmod_statistics vm_extmod_statistics_data_t; |
205 | |
206 | typedef struct vm_purgeable_stat { |
207 | uint64_t count; |
208 | uint64_t size; |
209 | }vm_purgeable_stat_t; |
210 | |
211 | struct vm_purgeable_info { |
212 | vm_purgeable_stat_t fifo_data[8]; |
213 | vm_purgeable_stat_t obsolete_data; |
214 | vm_purgeable_stat_t lifo_data[8]; |
215 | }; |
216 | |
217 | typedef struct vm_purgeable_info *vm_purgeable_info_t; |
218 | |
219 | /* included for the vm_map_page_query call */ |
220 | |
221 | #define VM_PAGE_QUERY_PAGE_PRESENT 0x1 |
222 | #define VM_PAGE_QUERY_PAGE_FICTITIOUS 0x2 |
223 | #define VM_PAGE_QUERY_PAGE_REF 0x4 |
224 | #define VM_PAGE_QUERY_PAGE_DIRTY 0x8 |
225 | #define VM_PAGE_QUERY_PAGE_PAGED_OUT 0x10 |
226 | #define VM_PAGE_QUERY_PAGE_COPIED 0x20 |
227 | #define VM_PAGE_QUERY_PAGE_SPECULATIVE 0x40 |
228 | #define VM_PAGE_QUERY_PAGE_EXTERNAL 0x80 |
229 | #define VM_PAGE_QUERY_PAGE_CS_VALIDATED 0x100 |
230 | #define VM_PAGE_QUERY_PAGE_CS_TAINTED 0x200 |
231 | #define VM_PAGE_QUERY_PAGE_CS_NX 0x400 |
232 | #define VM_PAGE_QUERY_PAGE_REUSABLE 0x800 |
233 | |
234 | /* |
235 | * VM allocation flags: |
236 | * |
237 | * VM_FLAGS_FIXED |
238 | * (really the absence of VM_FLAGS_ANYWHERE) |
239 | * Allocate new VM region at the specified virtual address, if possible. |
240 | * |
241 | * VM_FLAGS_ANYWHERE |
242 | * Allocate new VM region anywhere it would fit in the address space. |
243 | * |
244 | * VM_FLAGS_PURGABLE |
245 | * Create a purgable VM object for that new VM region. |
246 | * |
247 | * VM_FLAGS_4GB_CHUNK |
248 | * The new VM region will be chunked up into 4GB sized pieces. |
249 | * |
250 | * VM_FLAGS_NO_PMAP_CHECK |
251 | * (for DEBUG kernel config only, ignored for other configs) |
252 | * Do not check that there is no stale pmap mapping for the new VM region. |
253 | * This is useful for kernel memory allocations at bootstrap when building |
254 | * the initial kernel address space while some memory is already in use. |
255 | * |
256 | * VM_FLAGS_OVERWRITE |
257 | * The new VM region can replace existing VM regions if necessary |
258 | * (to be used in combination with VM_FLAGS_FIXED). |
259 | * |
260 | * VM_FLAGS_NO_CACHE |
261 | * Pages brought in to this VM region are placed on the speculative |
262 | * queue instead of the active queue. In other words, they are not |
263 | * cached so that they will be stolen first if memory runs low. |
264 | */ |
265 | |
266 | #define VM_FLAGS_FIXED 0x00000000 |
267 | #define VM_FLAGS_ANYWHERE 0x00000001 |
268 | #define VM_FLAGS_PURGABLE 0x00000002 |
269 | #define VM_FLAGS_4GB_CHUNK 0x00000004 |
270 | #define VM_FLAGS_RANDOM_ADDR 0x00000008 |
271 | #define VM_FLAGS_NO_CACHE 0x00000010 |
272 | #define VM_FLAGS_RESILIENT_CODESIGN 0x00000020 |
273 | #define VM_FLAGS_RESILIENT_MEDIA 0x00000040 |
274 | #define VM_FLAGS_PERMANENT 0x00000080 |
275 | #define VM_FLAGS_TPRO 0x00001000 |
276 | #define VM_FLAGS_OVERWRITE 0x00004000 /* delete any existing mappings first */ |
277 | /* |
278 | * VM_FLAGS_SUPERPAGE_MASK |
279 | * 3 bits that specify whether large pages should be used instead of |
280 | * base pages (!=0), as well as the requested page size. |
281 | */ |
282 | #define VM_FLAGS_SUPERPAGE_MASK 0x00070000 /* bits 0x10000, 0x20000, 0x40000 */ |
283 | #define VM_FLAGS_RETURN_DATA_ADDR 0x00100000 /* Return address of target data, rather than base of page */ |
284 | #define VM_FLAGS_RETURN_4K_DATA_ADDR 0x00800000 /* Return 4K aligned address of target data */ |
285 | #define VM_FLAGS_ALIAS_MASK 0xFF000000 |
286 | #define VM_GET_FLAGS_ALIAS(flags, alias) \ |
287 | (alias) = (((flags) >> 24) & 0xff) |
288 | #if !XNU_KERNEL_PRIVATE |
289 | #define VM_SET_FLAGS_ALIAS(flags, alias) \ |
290 | (flags) = (((flags) & ~VM_FLAGS_ALIAS_MASK) | \ |
291 | (((alias) & ~VM_FLAGS_ALIAS_MASK) << 24)) |
292 | #endif /* !XNU_KERNEL_PRIVATE */ |
293 | |
294 | #if XNU_KERNEL_PRIVATE |
295 | /* |
296 | * When making a new VM_FLAG_*: |
297 | * - add it to this mask |
298 | * - add a vmf_* field to vm_map_kernel_flags_t in the right spot |
299 | * - add a check in vm_map_kernel_flags_check_vmflags() |
300 | */ |
301 | #define VM_FLAGS_ANY_MASK (VM_FLAGS_FIXED | \ |
302 | VM_FLAGS_ANYWHERE | \ |
303 | VM_FLAGS_PURGABLE | \ |
304 | VM_FLAGS_4GB_CHUNK | \ |
305 | VM_FLAGS_RANDOM_ADDR | \ |
306 | VM_FLAGS_NO_CACHE | \ |
307 | VM_FLAGS_RESILIENT_CODESIGN | \ |
308 | VM_FLAGS_RESILIENT_MEDIA | \ |
309 | VM_FLAGS_PERMANENT | \ |
310 | VM_FLAGS_TPRO | \ |
311 | VM_FLAGS_OVERWRITE | \ |
312 | VM_FLAGS_SUPERPAGE_MASK | \ |
313 | VM_FLAGS_RETURN_DATA_ADDR | \ |
314 | VM_FLAGS_RETURN_4K_DATA_ADDR | \ |
315 | VM_FLAGS_ALIAS_MASK) |
316 | #endif /* XNU_KERNEL_PRIVATE */ |
317 | #define VM_FLAGS_HW (VM_FLAGS_TPRO) |
318 | |
319 | /* These are the flags that we accept from user-space */ |
320 | #define VM_FLAGS_USER_ALLOCATE (VM_FLAGS_FIXED | \ |
321 | VM_FLAGS_ANYWHERE | \ |
322 | VM_FLAGS_PURGABLE | \ |
323 | VM_FLAGS_4GB_CHUNK | \ |
324 | VM_FLAGS_RANDOM_ADDR | \ |
325 | VM_FLAGS_NO_CACHE | \ |
326 | VM_FLAGS_PERMANENT | \ |
327 | VM_FLAGS_OVERWRITE | \ |
328 | VM_FLAGS_SUPERPAGE_MASK | \ |
329 | VM_FLAGS_HW | \ |
330 | VM_FLAGS_ALIAS_MASK) |
331 | |
332 | #define VM_FLAGS_USER_MAP (VM_FLAGS_USER_ALLOCATE | \ |
333 | VM_FLAGS_RETURN_4K_DATA_ADDR | \ |
334 | VM_FLAGS_RETURN_DATA_ADDR) |
335 | |
336 | #define VM_FLAGS_USER_REMAP (VM_FLAGS_FIXED | \ |
337 | VM_FLAGS_ANYWHERE | \ |
338 | VM_FLAGS_RANDOM_ADDR | \ |
339 | VM_FLAGS_OVERWRITE| \ |
340 | VM_FLAGS_RETURN_DATA_ADDR | \ |
341 | VM_FLAGS_RESILIENT_CODESIGN | \ |
342 | VM_FLAGS_RESILIENT_MEDIA) |
343 | |
344 | #define VM_FLAGS_SUPERPAGE_SHIFT 16 |
345 | #define SUPERPAGE_NONE 0 /* no superpages, if all bits are 0 */ |
346 | #define SUPERPAGE_SIZE_ANY 1 |
347 | #define VM_FLAGS_SUPERPAGE_NONE (SUPERPAGE_NONE << VM_FLAGS_SUPERPAGE_SHIFT) |
348 | #define VM_FLAGS_SUPERPAGE_SIZE_ANY (SUPERPAGE_SIZE_ANY << VM_FLAGS_SUPERPAGE_SHIFT) |
349 | #if defined(__x86_64__) || !defined(KERNEL) |
350 | #define SUPERPAGE_SIZE_2MB 2 |
351 | #define VM_FLAGS_SUPERPAGE_SIZE_2MB (SUPERPAGE_SIZE_2MB<<VM_FLAGS_SUPERPAGE_SHIFT) |
352 | #endif |
353 | |
354 | /* |
355 | * EXC_GUARD definitions for virtual memory. |
356 | */ |
357 | #define GUARD_TYPE_VIRT_MEMORY 0x5 |
358 | |
359 | /* Reasons for exception for virtual memory */ |
360 | enum virtual_memory_guard_exception_codes { |
361 | kGUARD_EXC_DEALLOC_GAP = 1u << 0, |
362 | kGUARD_EXC_RECLAIM_COPYIO_FAILURE = 1u << 1, |
363 | kGUARD_EXC_RECLAIM_INDEX_FAILURE = 1u << 2, |
364 | kGUARD_EXC_RECLAIM_DEALLOCATE_FAILURE = 1u << 3, |
365 | }; |
366 | |
367 | #ifdef XNU_KERNEL_PRIVATE |
368 | |
369 | /*! |
370 | * @enum vm_map_range_id_t |
371 | * |
372 | * @brief |
373 | * Enumerate a particular vm_map range. |
374 | * |
375 | * @discussion |
376 | * The kernel_map VA has been split into the following ranges. Userspace |
377 | * VA for any given process can also optionally be split by the following user |
378 | * ranges. |
379 | * |
380 | * @const KMEM_RANGE_ID_NONE |
381 | * This range is only used for early initialization. |
382 | * |
383 | * @const KMEM_RANGE_ID_PTR_* |
384 | * Range containing general purpose allocations from kalloc, etc that |
385 | * contain pointers. |
386 | * |
387 | * @const KMEM_RANGE_ID_SPRAYQTN |
388 | * The spray quarantine range contains allocations that have the following |
389 | * properties: |
390 | * - An attacker could control the size, lifetime and number of allocations |
391 | * of this type (or from this callsite). |
392 | * - The pointer to the allocation is zeroed to ensure that it isn't left |
393 | * dangling limiting the use of UaFs. |
394 | * - OOBs on the allocation is carefully considered and sufficiently |
395 | * addressed. |
396 | * |
397 | * @const KMEM_RANGE_ID_DATA |
398 | * Range containing allocations that are bags of bytes and contain no |
399 | * pointers. |
400 | */ |
401 | __enum_decl(vm_map_range_id_t, uint8_t, { |
402 | KMEM_RANGE_ID_NONE, |
403 | KMEM_RANGE_ID_PTR_0, |
404 | KMEM_RANGE_ID_PTR_1, |
405 | KMEM_RANGE_ID_PTR_2, |
406 | KMEM_RANGE_ID_SPRAYQTN, |
407 | KMEM_RANGE_ID_DATA, |
408 | |
409 | KMEM_RANGE_ID_FIRST = KMEM_RANGE_ID_PTR_0, |
410 | KMEM_RANGE_ID_NUM_PTR = KMEM_RANGE_ID_PTR_2, |
411 | KMEM_RANGE_ID_MAX = KMEM_RANGE_ID_DATA, |
412 | |
413 | /* these UMEM_* correspond to the MACH_VM_RANGE_* tags and are ABI */ |
414 | UMEM_RANGE_ID_DEFAULT = 0, /* same as MACH_VM_RANGE_DEFAULT */ |
415 | UMEM_RANGE_ID_HEAP, /* same as MACH_VM_RANGE_DATA */ |
416 | UMEM_RANGE_ID_FIXED, /* same as MACH_VM_RANGE_FIXED */ |
417 | |
418 | /* these UMEM_* are XNU internal only range IDs, and aren't ABI */ |
419 | UMEM_RANGE_ID_MAX = UMEM_RANGE_ID_FIXED, |
420 | |
421 | #define KMEM_RANGE_COUNT (KMEM_RANGE_ID_MAX + 1) |
422 | }); |
423 | |
424 | typedef vm_map_range_id_t kmem_range_id_t; |
425 | |
426 | #define kmem_log2down(mask) (31 - __builtin_clz(mask)) |
427 | #define KMEM_RANGE_MAX (UMEM_RANGE_ID_MAX < KMEM_RANGE_ID_MAX \ |
428 | ? KMEM_RANGE_ID_MAX : UMEM_RANGE_ID_MAX) |
429 | #define KMEM_RANGE_BITS kmem_log2down(2 * KMEM_RANGE_MAX - 1) |
430 | |
431 | typedef union { |
432 | struct { |
433 | unsigned long long |
434 | /* |
435 | * VM_FLAG_* flags |
436 | */ |
437 | vmf_fixed:1, |
438 | vmf_purgeable:1, |
439 | vmf_4gb_chunk:1, |
440 | vmf_random_addr:1, |
441 | vmf_no_cache:1, |
442 | vmf_resilient_codesign:1, |
443 | vmf_resilient_media:1, |
444 | vmf_permanent:1, |
445 | |
446 | __unused_bit_8:1, |
447 | __unused_bit_9:1, |
448 | __unused_bit_10:1, |
449 | __unused_bit_11:1, |
450 | vmf_tpro:1, |
451 | __unused_bit_13:1, |
452 | vmf_overwrite:1, |
453 | __unused_bit_15:1, |
454 | |
455 | vmf_superpage_size:3, |
456 | __unused_bit_19:1, |
457 | vmf_return_data_addr:1, |
458 | __unused_bit_21:1, |
459 | __unused_bit_22:1, |
460 | vmf_return_4k_data_addr:1, |
461 | |
462 | /* |
463 | * VM tag (user or kernel) |
464 | * |
465 | * User tags are limited to 8 bits, |
466 | * kernel tags can use up to 12 bits |
467 | * with -zt or similar features. |
468 | */ |
469 | vm_tag : 12, /* same as VME_ALIAS_BITS */ |
470 | |
471 | /* |
472 | * General kernel flags |
473 | */ |
474 | vmkf_already:1, /* OK if same mapping already exists */ |
475 | vmkf_beyond_max:1, /* map beyond the map's max offset */ |
476 | vmkf_no_pmap_check:1, /* do not check that pmap is empty */ |
477 | vmkf_map_jit:1, /* mark entry as JIT region */ |
478 | vmkf_iokit_acct:1, /* IOKit accounting */ |
479 | vmkf_keep_map_locked:1, /* keep map locked when returning from vm_map_enter() */ |
480 | vmkf_fourk:1, /* use fourk pager */ |
481 | vmkf_overwrite_immutable:1, /* can overwrite immutable mappings */ |
482 | vmkf_remap_prot_copy:1, /* vm_remap for VM_PROT_COPY */ |
483 | vmkf_cs_enforcement_override:1, /* override CS_ENFORCEMENT */ |
484 | vmkf_cs_enforcement:1, /* new value for CS_ENFORCEMENT */ |
485 | vmkf_nested_pmap:1, /* use a nested pmap */ |
486 | vmkf_no_copy_on_read:1, /* do not use copy_on_read */ |
487 | vmkf_copy_single_object:1, /* vm_map_copy only 1 VM object */ |
488 | vmkf_copy_pageable:1, /* vm_map_copy with pageable entries */ |
489 | vmkf_copy_same_map:1, /* vm_map_copy to remap in original map */ |
490 | vmkf_translated_allow_execute:1, /* allow execute in translated processes */ |
491 | vmkf_tpro_enforcement_override:1, /* override TPRO propagation */ |
492 | |
493 | /* |
494 | * Submap creation, altering vm_map_enter() only |
495 | */ |
496 | vmkf_submap:1, /* mapping a VM submap */ |
497 | vmkf_submap_atomic:1, /* keep entry atomic (no splitting/coalescing) */ |
498 | vmkf_submap_adjust:1, /* the submap needs to be adjusted */ |
499 | |
500 | /* |
501 | * Flags altering the behavior of vm_map_locate_space() |
502 | */ |
503 | vmkf_32bit_map_va:1, /* allocate in low 32-bits range */ |
504 | vmkf_guard_before:1, /* guard page before the mapping */ |
505 | vmkf_last_free:1, /* find space from the end */ |
506 | vmkf_range_id:KMEM_RANGE_BITS, /* kmem range to allocate in */ |
507 | |
508 | __vmkf_unused:1; |
509 | }; |
510 | |
511 | /* |
512 | * do not access these directly, |
513 | * use vm_map_kernel_flags_check_vmflags*() |
514 | */ |
515 | uint32_t __vm_flags : 24; |
516 | } vm_map_kernel_flags_t; |
517 | |
518 | /* |
519 | * using this means that vmf_* flags can't be used |
520 | * until vm_map_kernel_flags_set_vmflags() is set, |
521 | * or some manual careful init is done. |
522 | * |
523 | * Prefer VM_MAP_KERNEL_FLAGS_(FIXED,ANYWHERE) instead. |
524 | */ |
525 | #define VM_MAP_KERNEL_FLAGS_NONE \ |
526 | (vm_map_kernel_flags_t){ } |
527 | |
528 | #define VM_MAP_KERNEL_FLAGS_FIXED(...) \ |
529 | (vm_map_kernel_flags_t){ .vmf_fixed = true, __VA_ARGS__ } |
530 | |
531 | #define VM_MAP_KERNEL_FLAGS_ANYWHERE(...) \ |
532 | (vm_map_kernel_flags_t){ .vmf_fixed = false, __VA_ARGS__ } |
533 | |
534 | #define VM_MAP_KERNEL_FLAGS_FIXED_PERMANENT(...) \ |
535 | VM_MAP_KERNEL_FLAGS_FIXED(.vmf_permanent = true, __VA_ARGS__) |
536 | |
537 | #define VM_MAP_KERNEL_FLAGS_DATA_ANYWHERE(...) \ |
538 | VM_MAP_KERNEL_FLAGS_ANYWHERE(.vmkf_range_id = KMEM_RANGE_ID_DATA, __VA_ARGS__) |
539 | |
540 | typedef struct { |
541 | unsigned int |
542 | vmnekf_ledger_tag:3, |
543 | :1, |
544 | __vmnekf_unused:28; |
545 | } vm_named_entry_kernel_flags_t; |
546 | #define VM_NAMED_ENTRY_KERNEL_FLAGS_NONE (vm_named_entry_kernel_flags_t) { \ |
547 | .vmnekf_ledger_tag = 0, \ |
548 | .vmnekf_ledger_no_footprint = 0, \ |
549 | .__vmnekf_unused = 0 \ |
550 | } |
551 | |
552 | #endif /* XNU_KERNEL_PRIVATE */ |
553 | |
554 | /* current accounting postmark */ |
555 | #define __VM_LEDGER_ACCOUNTING_POSTMARK 2019032600 |
556 | |
557 | /* discrete values: */ |
558 | #define VM_LEDGER_TAG_NONE 0x00000000 |
559 | #define VM_LEDGER_TAG_DEFAULT 0x00000001 |
560 | #define VM_LEDGER_TAG_NETWORK 0x00000002 |
561 | #define VM_LEDGER_TAG_MEDIA 0x00000003 |
562 | #define VM_LEDGER_TAG_GRAPHICS 0x00000004 |
563 | #define VM_LEDGER_TAG_NEURAL 0x00000005 |
564 | #define VM_LEDGER_TAG_MAX 0x00000005 |
565 | #define VM_LEDGER_TAG_UNCHANGED ((int)-1) |
566 | |
567 | /* individual bits: */ |
568 | #define (1 << 0) |
569 | #define (1 << 1) |
570 | #define VM_LEDGER_FLAGS (VM_LEDGER_FLAG_NO_FOOTPRINT | VM_LEDGER_FLAG_NO_FOOTPRINT_FOR_DEBUG) |
571 | |
572 | |
573 | #define VM_MEMORY_MALLOC 1 |
574 | #define VM_MEMORY_MALLOC_SMALL 2 |
575 | #define VM_MEMORY_MALLOC_LARGE 3 |
576 | #define VM_MEMORY_MALLOC_HUGE 4 |
577 | #define VM_MEMORY_SBRK 5// uninteresting -- no one should call |
578 | #define VM_MEMORY_REALLOC 6 |
579 | #define VM_MEMORY_MALLOC_TINY 7 |
580 | #define VM_MEMORY_MALLOC_LARGE_REUSABLE 8 |
581 | #define VM_MEMORY_MALLOC_LARGE_REUSED 9 |
582 | |
583 | #define VM_MEMORY_ANALYSIS_TOOL 10 |
584 | |
585 | #define VM_MEMORY_MALLOC_NANO 11 |
586 | #define VM_MEMORY_MALLOC_MEDIUM 12 |
587 | #define VM_MEMORY_MALLOC_PROB_GUARD 13 |
588 | |
589 | #define VM_MEMORY_MACH_MSG 20 |
590 | #define VM_MEMORY_IOKIT 21 |
591 | #define VM_MEMORY_STACK 30 |
592 | #define VM_MEMORY_GUARD 31 |
593 | #define VM_MEMORY_SHARED_PMAP 32 |
594 | /* memory containing a dylib */ |
595 | #define VM_MEMORY_DYLIB 33 |
596 | #define VM_MEMORY_OBJC_DISPATCHERS 34 |
597 | |
598 | /* Was a nested pmap (VM_MEMORY_SHARED_PMAP) which has now been unnested */ |
599 | #define VM_MEMORY_UNSHARED_PMAP 35 |
600 | |
601 | |
602 | // Placeholders for now -- as we analyze the libraries and find how they |
603 | // use memory, we can make these labels more specific. |
604 | #define VM_MEMORY_APPKIT 40 |
605 | #define VM_MEMORY_FOUNDATION 41 |
606 | #define VM_MEMORY_COREGRAPHICS 42 |
607 | #define VM_MEMORY_CORESERVICES 43 |
608 | #define VM_MEMORY_CARBON VM_MEMORY_CORESERVICES |
609 | #define VM_MEMORY_JAVA 44 |
610 | #define VM_MEMORY_COREDATA 45 |
611 | #define VM_MEMORY_COREDATA_OBJECTIDS 46 |
612 | #define VM_MEMORY_ATS 50 |
613 | #define VM_MEMORY_LAYERKIT 51 |
614 | #define VM_MEMORY_CGIMAGE 52 |
615 | #define VM_MEMORY_TCMALLOC 53 |
616 | |
617 | /* private raster data (i.e. layers, some images, QGL allocator) */ |
618 | #define VM_MEMORY_COREGRAPHICS_DATA 54 |
619 | |
620 | /* shared image and font caches */ |
621 | #define VM_MEMORY_COREGRAPHICS_SHARED 55 |
622 | |
623 | /* Memory used for virtual framebuffers, shadowing buffers, etc... */ |
624 | #define VM_MEMORY_COREGRAPHICS_FRAMEBUFFERS 56 |
625 | |
626 | /* Window backing stores, custom shadow data, and compressed backing stores */ |
627 | #define VM_MEMORY_COREGRAPHICS_BACKINGSTORES 57 |
628 | |
629 | /* x-alloc'd memory */ |
630 | #define VM_MEMORY_COREGRAPHICS_XALLOC 58 |
631 | |
632 | /* catch-all for other uses, such as the read-only shared data page */ |
633 | #define VM_MEMORY_COREGRAPHICS_MISC VM_MEMORY_COREGRAPHICS |
634 | |
635 | /* memory allocated by the dynamic loader for itself */ |
636 | #define VM_MEMORY_DYLD 60 |
637 | /* malloc'd memory created by dyld */ |
638 | #define VM_MEMORY_DYLD_MALLOC 61 |
639 | |
640 | /* Used for sqlite page cache */ |
641 | #define VM_MEMORY_SQLITE 62 |
642 | |
643 | /* JavaScriptCore heaps */ |
644 | #define VM_MEMORY_JAVASCRIPT_CORE 63 |
645 | #define VM_MEMORY_WEBASSEMBLY VM_MEMORY_JAVASCRIPT_CORE |
646 | /* memory allocated for the JIT */ |
647 | #define VM_MEMORY_JAVASCRIPT_JIT_EXECUTABLE_ALLOCATOR 64 |
648 | #define VM_MEMORY_JAVASCRIPT_JIT_REGISTER_FILE 65 |
649 | |
650 | /* memory allocated for GLSL */ |
651 | #define VM_MEMORY_GLSL 66 |
652 | |
653 | /* memory allocated for OpenCL.framework */ |
654 | #define VM_MEMORY_OPENCL 67 |
655 | |
656 | /* memory allocated for QuartzCore.framework */ |
657 | #define VM_MEMORY_COREIMAGE 68 |
658 | |
659 | /* memory allocated for WebCore Purgeable Buffers */ |
660 | #define VM_MEMORY_WEBCORE_PURGEABLE_BUFFERS 69 |
661 | |
662 | /* ImageIO memory */ |
663 | #define VM_MEMORY_IMAGEIO 70 |
664 | |
665 | /* CoreProfile memory */ |
666 | #define VM_MEMORY_COREPROFILE 71 |
667 | |
668 | /* assetsd / MobileSlideShow memory */ |
669 | #define VM_MEMORY_ASSETSD 72 |
670 | |
671 | /* libsystem_kernel os_once_alloc */ |
672 | #define VM_MEMORY_OS_ALLOC_ONCE 73 |
673 | |
674 | /* libdispatch internal allocator */ |
675 | #define VM_MEMORY_LIBDISPATCH 74 |
676 | |
677 | /* Accelerate.framework image backing stores */ |
678 | #define VM_MEMORY_ACCELERATE 75 |
679 | |
680 | /* CoreUI image block data */ |
681 | #define VM_MEMORY_COREUI 76 |
682 | |
683 | /* CoreUI image file */ |
684 | #define VM_MEMORY_COREUIFILE 77 |
685 | |
686 | /* Genealogy buffers */ |
687 | #define VM_MEMORY_GENEALOGY 78 |
688 | |
689 | /* RawCamera VM allocated memory */ |
690 | #define VM_MEMORY_RAWCAMERA 79 |
691 | |
692 | /* corpse info for dead process */ |
693 | #define VM_MEMORY_CORPSEINFO 80 |
694 | |
695 | /* Apple System Logger (ASL) messages */ |
696 | #define VM_MEMORY_ASL 81 |
697 | |
698 | /* Swift runtime */ |
699 | #define VM_MEMORY_SWIFT_RUNTIME 82 |
700 | |
701 | /* Swift metadata */ |
702 | #define VM_MEMORY_SWIFT_METADATA 83 |
703 | |
704 | /* DHMM data */ |
705 | #define VM_MEMORY_DHMM 84 |
706 | |
707 | |
708 | /* memory allocated by SceneKit.framework */ |
709 | #define VM_MEMORY_SCENEKIT 86 |
710 | |
711 | /* memory allocated by skywalk networking */ |
712 | #define VM_MEMORY_SKYWALK 87 |
713 | |
714 | #define VM_MEMORY_IOSURFACE 88 |
715 | |
716 | #define VM_MEMORY_LIBNETWORK 89 |
717 | |
718 | #define VM_MEMORY_AUDIO 90 |
719 | |
720 | #define VM_MEMORY_VIDEOBITSTREAM 91 |
721 | |
722 | /* memory allocated by CoreMedia */ |
723 | #define VM_MEMORY_CM_XPC 92 |
724 | |
725 | #define VM_MEMORY_CM_RPC 93 |
726 | |
727 | #define VM_MEMORY_CM_MEMORYPOOL 94 |
728 | |
729 | #define VM_MEMORY_CM_READCACHE 95 |
730 | |
731 | #define VM_MEMORY_CM_CRABS 96 |
732 | |
733 | /* memory allocated for QuickLookThumbnailing */ |
734 | #define VM_MEMORY_QUICKLOOK_THUMBNAILS 97 |
735 | |
736 | /* memory allocated by Accounts framework */ |
737 | #define VM_MEMORY_ACCOUNTS 98 |
738 | |
739 | /* memory allocated by Sanitizer runtime libraries */ |
740 | #define VM_MEMORY_SANITIZER 99 |
741 | |
742 | /* Differentiate memory needed by GPU drivers and frameworks from generic IOKit allocations */ |
743 | #define VM_MEMORY_IOACCELERATOR 100 |
744 | |
745 | /* memory allocated by CoreMedia for global image registration of frames */ |
746 | #define VM_MEMORY_CM_REGWARP 101 |
747 | |
748 | /* memory allocated by EmbeddedAcousticRecognition for speech decoder */ |
749 | #define VM_MEMORY_EAR_DECODER 102 |
750 | |
751 | /* CoreUI cached image data */ |
752 | #define VM_MEMORY_COREUI_CACHED_IMAGE_DATA 103 |
753 | |
754 | /* ColorSync is using mmap for read-only copies of ICC profile data */ |
755 | #define VM_MEMORY_COLORSYNC 104 |
756 | |
757 | /* backtrace info for simulated crashes */ |
758 | #define VM_MEMORY_BTINFO 105 |
759 | |
760 | /* memory allocated by CoreMedia */ |
761 | #define VM_MEMORY_CM_HLS 106 |
762 | |
763 | /* Reserve 230-239 for Rosetta */ |
764 | #define VM_MEMORY_ROSETTA 230 |
765 | #define VM_MEMORY_ROSETTA_THREAD_CONTEXT 231 |
766 | #define VM_MEMORY_ROSETTA_INDIRECT_BRANCH_MAP 232 |
767 | #define VM_MEMORY_ROSETTA_RETURN_STACK 233 |
768 | #define VM_MEMORY_ROSETTA_EXECUTABLE_HEAP 234 |
769 | #define VM_MEMORY_ROSETTA_USER_LDT 235 |
770 | #define VM_MEMORY_ROSETTA_ARENA 236 |
771 | #define VM_MEMORY_ROSETTA_10 239 |
772 | |
773 | /* Reserve 240-255 for application */ |
774 | #define VM_MEMORY_APPLICATION_SPECIFIC_1 240 |
775 | #define VM_MEMORY_APPLICATION_SPECIFIC_16 255 |
776 | |
777 | #define VM_MEMORY_COUNT 256 |
778 | |
779 | #if !XNU_KERNEL_PRIVATE |
780 | #define VM_MAKE_TAG(tag) ((tag) << 24) |
781 | #endif /* XNU_KERNEL_PRIVATE */ |
782 | |
783 | |
784 | #if KERNEL_PRIVATE |
785 | |
786 | /* kernel map tags */ |
787 | /* please add new definition strings to zprint */ |
788 | |
789 | #define VM_KERN_MEMORY_NONE 0 |
790 | |
791 | #define VM_KERN_MEMORY_OSFMK 1 |
792 | #define VM_KERN_MEMORY_BSD 2 |
793 | #define VM_KERN_MEMORY_IOKIT 3 |
794 | #define VM_KERN_MEMORY_LIBKERN 4 |
795 | #define VM_KERN_MEMORY_OSKEXT 5 |
796 | #define VM_KERN_MEMORY_KEXT 6 |
797 | #define VM_KERN_MEMORY_IPC 7 |
798 | #define VM_KERN_MEMORY_STACK 8 |
799 | #define VM_KERN_MEMORY_CPU 9 |
800 | #define VM_KERN_MEMORY_PMAP 10 |
801 | #define VM_KERN_MEMORY_PTE 11 |
802 | #define VM_KERN_MEMORY_ZONE 12 |
803 | #define VM_KERN_MEMORY_KALLOC 13 |
804 | #define VM_KERN_MEMORY_COMPRESSOR 14 |
805 | #define VM_KERN_MEMORY_COMPRESSED_DATA 15 |
806 | #define VM_KERN_MEMORY_PHANTOM_CACHE 16 |
807 | #define VM_KERN_MEMORY_WAITQ 17 |
808 | #define VM_KERN_MEMORY_DIAG 18 |
809 | #define VM_KERN_MEMORY_LOG 19 |
810 | #define VM_KERN_MEMORY_FILE 20 |
811 | #define VM_KERN_MEMORY_MBUF 21 |
812 | #define VM_KERN_MEMORY_UBC 22 |
813 | #define VM_KERN_MEMORY_SECURITY 23 |
814 | #define VM_KERN_MEMORY_MLOCK 24 |
815 | #define VM_KERN_MEMORY_REASON 25 |
816 | #define VM_KERN_MEMORY_SKYWALK 26 |
817 | #define VM_KERN_MEMORY_LTABLE 27 |
818 | #define VM_KERN_MEMORY_HV 28 |
819 | #define VM_KERN_MEMORY_KALLOC_DATA 29 |
820 | #define VM_KERN_MEMORY_RETIRED 30 |
821 | #define VM_KERN_MEMORY_KALLOC_TYPE 31 |
822 | #define VM_KERN_MEMORY_TRIAGE 32 |
823 | #define VM_KERN_MEMORY_RECOUNT 33 |
824 | #define VM_KERN_MEMORY_EXCLAVES 35 |
825 | /* add new tags here and adjust first-dynamic value */ |
826 | #define VM_KERN_MEMORY_FIRST_DYNAMIC 36 |
827 | |
828 | /* out of tags: */ |
829 | #define VM_KERN_MEMORY_ANY 255 |
830 | #define VM_KERN_MEMORY_COUNT 256 |
831 | |
832 | /* end kernel map tags */ |
833 | |
834 | // mach_memory_info.flags |
835 | #define VM_KERN_SITE_TYPE 0x000000FF |
836 | #define VM_KERN_SITE_TAG 0x00000000 |
837 | #define VM_KERN_SITE_KMOD 0x00000001 |
838 | #define VM_KERN_SITE_KERNEL 0x00000002 |
839 | #define VM_KERN_SITE_COUNTER 0x00000003 |
840 | #define VM_KERN_SITE_WIRED 0x00000100 /* add to wired count */ |
841 | #define VM_KERN_SITE_HIDE 0x00000200 /* no zprint */ |
842 | #define VM_KERN_SITE_NAMED 0x00000400 |
843 | #define VM_KERN_SITE_ZONE 0x00000800 |
844 | #define VM_KERN_SITE_ZONE_VIEW 0x00001000 |
845 | #define VM_KERN_SITE_KALLOC 0x00002000 /* zone field is size class */ |
846 | |
847 | #define VM_KERN_COUNT_MANAGED 0 |
848 | #define VM_KERN_COUNT_RESERVED 1 |
849 | #define VM_KERN_COUNT_WIRED 2 |
850 | #define VM_KERN_COUNT_WIRED_MANAGED 3 |
851 | #define VM_KERN_COUNT_STOLEN 4 |
852 | #define VM_KERN_COUNT_LOPAGE 5 |
853 | #define VM_KERN_COUNT_MAP_KERNEL 6 |
854 | #define VM_KERN_COUNT_MAP_ZONE 7 |
855 | #define VM_KERN_COUNT_MAP_KALLOC 8 |
856 | |
857 | #define VM_KERN_COUNT_WIRED_BOOT 9 |
858 | |
859 | #define VM_KERN_COUNT_BOOT_STOLEN 10 |
860 | |
861 | /* The number of bytes from the kernel cache that are wired in memory */ |
862 | #define VM_KERN_COUNT_WIRED_STATIC_KERNELCACHE 11 |
863 | |
864 | #define VM_KERN_COUNT_MAP_KALLOC_LARGE VM_KERN_COUNT_MAP_KALLOC |
865 | #define VM_KERN_COUNT_MAP_KALLOC_LARGE_DATA 12 |
866 | #define VM_KERN_COUNT_MAP_KERNEL_DATA 13 |
867 | |
868 | #define VM_KERN_COUNTER_COUNT 14 |
869 | |
870 | #endif /* KERNEL_PRIVATE */ |
871 | |
872 | __END_DECLS |
873 | |
874 | #endif /* _MACH_VM_STATISTICS_H_ */ |
875 | |