1 | /* |
2 | * Copyright (c) 2000-2021 Apple Inc. All rights reserved. |
3 | * |
4 | * @APPLE_OSREFERENCE_LICENSE_HEADER_START@ |
5 | * |
6 | * This file contains Original Code and/or Modifications of Original Code |
7 | * as defined in and that are subject to the Apple Public Source License |
8 | * Version 2.0 (the 'License'). You may not use this file except in |
9 | * compliance with the License. The rights granted to you under the License |
10 | * may not be used to create, or enable the creation or redistribution of, |
11 | * unlawful or unlicensed copies of an Apple operating system, or to |
12 | * circumvent, violate, or enable the circumvention or violation of, any |
13 | * terms of an Apple operating system software license agreement. |
14 | * |
15 | * Please obtain a copy of the License at |
16 | * http://www.opensource.apple.com/apsl/ and read it before using this file. |
17 | * |
18 | * The Original Code and all software distributed under the License are |
19 | * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER |
20 | * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES, |
21 | * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY, |
22 | * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT. |
23 | * Please see the License for the specific language governing rights and |
24 | * limitations under the License. |
25 | * |
26 | * @APPLE_OSREFERENCE_LICENSE_HEADER_END@ |
27 | */ |
28 | /* |
29 | * @OSF_COPYRIGHT@ |
30 | */ |
31 | /* |
32 | * Mach Operating System |
33 | * Copyright (c) 1991,1990,1989,1988,1987 Carnegie Mellon University |
34 | * All Rights Reserved. |
35 | * |
36 | * Permission to use, copy, modify and distribute this software and its |
37 | * documentation is hereby granted, provided that both the copyright |
38 | * notice and this permission notice appear in all copies of the |
39 | * software, derivative works or modified versions, and any portions |
40 | * thereof, and that both notices appear in supporting documentation. |
41 | * |
42 | * CARNEGIE MELLON ALLOWS FREE USE OF THIS SOFTWARE IN ITS "AS IS" |
43 | * CONDITION. CARNEGIE MELLON DISCLAIMS ANY LIABILITY OF ANY KIND FOR |
44 | * ANY DAMAGES WHATSOEVER RESULTING FROM THE USE OF THIS SOFTWARE. |
45 | * |
46 | * Carnegie Mellon requests users of this software to return to |
47 | * |
48 | * Software Distribution Coordinator or Software.Distribution@CS.CMU.EDU |
49 | * School of Computer Science |
50 | * Carnegie Mellon University |
51 | * Pittsburgh PA 15213-3890 |
52 | * |
53 | * any improvements or extensions that they make and grant Carnegie Mellon |
54 | * the rights to redistribute these changes. |
55 | */ |
56 | /* |
57 | */ |
58 | |
59 | /* |
60 | * File: vm/vm_map.h |
61 | * Author: Avadis Tevanian, Jr., Michael Wayne Young |
62 | * Date: 1985 |
63 | * |
64 | * Virtual memory map module definitions. |
65 | * |
66 | * Contributors: |
67 | * avie, dlb, mwyoung |
68 | */ |
69 | |
70 | #ifndef _VM_VM_MAP_H_ |
71 | #define _VM_VM_MAP_H_ |
72 | |
73 | #include <sys/cdefs.h> |
74 | |
75 | #include <mach/mach_types.h> |
76 | #include <mach/kern_return.h> |
77 | #include <mach/boolean.h> |
78 | #include <mach/vm_types.h> |
79 | #include <mach/vm_prot.h> |
80 | #include <mach/vm_inherit.h> |
81 | #include <mach/vm_behavior.h> |
82 | #include <mach/vm_param.h> |
83 | #include <mach/sdt.h> |
84 | #include <vm/pmap.h> |
85 | #include <os/overflow.h> |
86 | #ifdef XNU_KERNEL_PRIVATE |
87 | #include <vm/vm_protos.h> |
88 | #endif /* XNU_KERNEL_PRIVATE */ |
89 | #ifdef MACH_KERNEL_PRIVATE |
90 | #include <mach_assert.h> |
91 | #include <vm/vm_map_store.h> |
92 | #include <vm/vm_object.h> |
93 | #include <vm/vm_page.h> |
94 | #include <kern/locks.h> |
95 | #include <kern/zalloc.h> |
96 | #include <kern/macro_help.h> |
97 | |
98 | #include <kern/thread.h> |
99 | #include <os/refcnt.h> |
100 | #endif /* MACH_KERNEL_PRIVATE */ |
101 | |
102 | __BEGIN_DECLS |
103 | |
104 | #ifdef KERNEL_PRIVATE |
105 | |
106 | extern void vm_map_reference(vm_map_t map); |
107 | extern vm_map_t current_map(void); |
108 | |
109 | /* Setup reserved areas in a new VM map */ |
110 | extern kern_return_t vm_map_exec( |
111 | vm_map_t new_map, |
112 | task_t task, |
113 | boolean_t is64bit, |
114 | void *fsroot, |
115 | cpu_type_t cpu, |
116 | cpu_subtype_t cpu_subtype, |
117 | boolean_t reslide, |
118 | boolean_t is_driverkit, |
119 | uint32_t rsr_version); |
120 | |
121 | #ifdef MACH_KERNEL_PRIVATE |
122 | |
123 | #define current_map_fast() (current_thread()->map) |
124 | #define current_map() (current_map_fast()) |
125 | |
126 | /* |
127 | * Types defined: |
128 | * |
129 | * vm_map_t the high-level address map data structure. |
130 | * vm_map_entry_t an entry in an address map. |
131 | * vm_map_version_t a timestamp of a map, for use with vm_map_lookup |
132 | * vm_map_copy_t represents memory copied from an address map, |
133 | * used for inter-map copy operations |
134 | */ |
135 | typedef struct vm_map_entry *vm_map_entry_t; |
136 | #define VM_MAP_ENTRY_NULL ((vm_map_entry_t) NULL) |
137 | |
138 | |
139 | #define named_entry_lock_init(object) lck_mtx_init(&(object)->Lock, &vm_object_lck_grp, &vm_object_lck_attr) |
140 | #define named_entry_lock_destroy(object) lck_mtx_destroy(&(object)->Lock, &vm_object_lck_grp) |
141 | #define named_entry_lock(object) lck_mtx_lock(&(object)->Lock) |
142 | #define named_entry_unlock(object) lck_mtx_unlock(&(object)->Lock) |
143 | |
144 | /* |
145 | * Type: vm_named_entry_t [internal use only] |
146 | * |
147 | * Description: |
148 | * Description of a mapping to a memory cache object. |
149 | * |
150 | * Implementation: |
151 | * While the handle to this object is used as a means to map |
152 | * and pass around the right to map regions backed by pagers |
153 | * of all sorts, the named_entry itself is only manipulated |
154 | * by the kernel. Named entries hold information on the |
155 | * right to map a region of a cached object. Namely, |
156 | * the target cache object, the beginning and ending of the |
157 | * region to be mapped, and the permissions, (read, write) |
158 | * with which it can be mapped. |
159 | * |
160 | */ |
161 | |
162 | struct vm_named_entry { |
163 | decl_lck_mtx_data(, Lock); /* Synchronization */ |
164 | union { |
165 | vm_map_t map; /* map backing submap */ |
166 | vm_map_copy_t copy; /* a VM map copy */ |
167 | } backing; |
168 | vm_object_offset_t offset; /* offset into object */ |
169 | vm_object_size_t size; /* size of region */ |
170 | vm_object_offset_t data_offset; /* offset to first byte of data */ |
171 | unsigned int /* Is backing.xxx : */ |
172 | /* unsigned */ access:8, /* MAP_MEM_* */ |
173 | /* vm_prot_t */ protection:4, /* access permissions */ |
174 | /* boolean_t */ is_object:1, /* ... a VM object (wrapped in a VM map copy) */ |
175 | /* boolean_t */ internal:1, /* ... an internal object */ |
176 | /* boolean_t */ is_sub_map:1, /* ... a submap? */ |
177 | /* boolean_t */ is_copy:1, /* ... a VM map copy */ |
178 | /* boolean_t */ is_fully_owned:1; /* ... all objects are owned */ |
179 | #if VM_NAMED_ENTRY_DEBUG |
180 | uint32_t named_entry_bt; /* btref_t */ |
181 | #endif /* VM_NAMED_ENTRY_DEBUG */ |
182 | }; |
183 | |
184 | /* |
185 | * Bit 3 of the protection and max_protection bitfields in a vm_map_entry |
186 | * does not correspond to bit 3 of a vm_prot_t, so these macros provide a means |
187 | * to convert between the "packed" representation in the vm_map_entry's fields |
188 | * and the equivalent bits defined in vm_prot_t. |
189 | */ |
190 | #if defined(__x86_64__) |
191 | #define VM_VALID_VMPROTECT_FLAGS (VM_PROT_ALL | VM_PROT_COPY | VM_PROT_UEXEC) |
192 | #else |
193 | #define VM_VALID_VMPROTECT_FLAGS (VM_PROT_ALL | VM_PROT_COPY) |
194 | #endif |
195 | |
196 | /* |
197 | * FOOTPRINT ACCOUNTING: |
198 | * The "memory footprint" is better described in the pmap layer. |
199 | * |
200 | * At the VM level, these 2 vm_map_entry_t fields are relevant: |
201 | * iokit_mapped: |
202 | * For an "iokit_mapped" entry, we add the size of the entry to the |
203 | * footprint when the entry is entered into the map and we subtract that |
204 | * size when the entry is removed. No other accounting should take place. |
205 | * "use_pmap" should be FALSE but is not taken into account. |
206 | * use_pmap: (only when is_sub_map is FALSE) |
207 | * This indicates if we should ask the pmap layer to account for pages |
208 | * in this mapping. If FALSE, we expect that another form of accounting |
209 | * is being used (e.g. "iokit_mapped" or the explicit accounting of |
210 | * non-volatile purgable memory). |
211 | * |
212 | * So the logic is mostly: |
213 | * if entry->is_sub_map == TRUE |
214 | * anything in a submap does not count for the footprint |
215 | * else if entry->iokit_mapped == TRUE |
216 | * footprint includes the entire virtual size of this entry |
217 | * else if entry->use_pmap == FALSE |
218 | * tell pmap NOT to account for pages being pmap_enter()'d from this |
219 | * mapping (i.e. use "alternate accounting") |
220 | * else |
221 | * pmap will account for pages being pmap_enter()'d from this mapping |
222 | * as it sees fit (only if anonymous, etc...) |
223 | */ |
224 | |
225 | #define VME_ALIAS_BITS 12 |
226 | #define VME_ALIAS_MASK ((1u << VME_ALIAS_BITS) - 1) |
227 | #define VME_OFFSET_SHIFT VME_ALIAS_BITS |
228 | #define VME_OFFSET_BITS (64 - VME_ALIAS_BITS) |
229 | #define VME_SUBMAP_SHIFT 2 |
230 | #define VME_SUBMAP_BITS (sizeof(vm_offset_t) * 8 - VME_SUBMAP_SHIFT) |
231 | |
232 | struct vm_map_entry { |
233 | struct vm_map_links links; /* links to other entries */ |
234 | #define vme_prev links.prev |
235 | #define vme_next links.next |
236 | #define vme_start links.start |
237 | #define vme_end links.end |
238 | |
239 | struct vm_map_store store; |
240 | |
241 | union { |
242 | vm_offset_t vme_object_value; |
243 | struct { |
244 | vm_offset_t vme_atomic:1; /* entry cannot be split/coalesced */ |
245 | vm_offset_t is_sub_map:1; /* Is "object" a submap? */ |
246 | vm_offset_t vme_submap:VME_SUBMAP_BITS; |
247 | }; |
248 | struct { |
249 | uint32_t vme_ctx_atomic : 1; |
250 | uint32_t vme_ctx_is_sub_map : 1; |
251 | uint32_t vme_context : 30; |
252 | |
253 | /** |
254 | * If vme_kernel_object==1 && KASAN, |
255 | * vme_object_or_delta holds the delta. |
256 | * |
257 | * If vme_kernel_object==1 && !KASAN, |
258 | * vme_tag_btref holds a btref when vme_alias is equal to the "vmtaglog" |
259 | * boot-arg. |
260 | * |
261 | * If vme_kernel_object==0, |
262 | * vme_object_or_delta holds the packed vm object. |
263 | */ |
264 | union { |
265 | vm_page_object_t vme_object_or_delta; |
266 | btref_t vme_tag_btref; |
267 | }; |
268 | }; |
269 | }; |
270 | |
271 | unsigned long long |
272 | /* vm_tag_t */ vme_alias:VME_ALIAS_BITS, /* entry VM tag */ |
273 | /* vm_object_offset_t*/ vme_offset:VME_OFFSET_BITS, /* offset into object */ |
274 | |
275 | /* boolean_t */ is_shared:1, /* region is shared */ |
276 | /* boolean_t */ __unused1:1, |
277 | /* boolean_t */ in_transition:1, /* Entry being changed */ |
278 | /* boolean_t */ needs_wakeup:1, /* Waiters on in_transition */ |
279 | /* behavior is not defined for submap type */ |
280 | /* vm_behavior_t */ behavior:2, /* user paging behavior hint */ |
281 | /* boolean_t */ needs_copy:1, /* object need to be copied? */ |
282 | |
283 | /* Only in task maps: */ |
284 | #if defined(__arm64e__) |
285 | /* |
286 | * On ARM, the fourth protection bit is unused (UEXEC is x86_64 only). |
287 | * We reuse it here to keep track of mappings that have hardware support |
288 | * for read-only/read-write trusted paths. |
289 | */ |
290 | /* vm_prot_t-like */ protection:3, /* protection code */ |
291 | /* boolean_t */ used_for_tpro:1, |
292 | #else /* __arm64e__ */ |
293 | /* vm_prot_t-like */protection:4, /* protection code, bit3=UEXEC */ |
294 | #endif /* __arm64e__ */ |
295 | |
296 | /* vm_prot_t-like */ max_protection:4, /* maximum protection, bit3=UEXEC */ |
297 | /* vm_inherit_t */ inheritance:2, /* inheritance */ |
298 | |
299 | /* |
300 | * use_pmap is overloaded: |
301 | * if "is_sub_map": |
302 | * use a nested pmap? |
303 | * else (i.e. if object): |
304 | * use pmap accounting |
305 | * for footprint? |
306 | */ |
307 | /* boolean_t */ use_pmap:1, |
308 | /* boolean_t */ no_cache:1, /* should new pages be cached? */ |
309 | /* boolean_t */ vme_permanent:1, /* mapping can not be removed */ |
310 | /* boolean_t */ superpage_size:1, /* use superpages of a certain size */ |
311 | /* boolean_t */ map_aligned:1, /* align to map's page size */ |
312 | /* |
313 | * zero out the wired pages of this entry |
314 | * if is being deleted without unwiring them |
315 | */ |
316 | /* boolean_t */ zero_wired_pages:1, |
317 | /* boolean_t */ used_for_jit:1, |
318 | /* boolean_t */ csm_associated:1, /* code signing monitor will validate */ |
319 | |
320 | /* iokit accounting: use the virtual size rather than resident size: */ |
321 | /* boolean_t */ iokit_acct:1, |
322 | /* boolean_t */ vme_resilient_codesign:1, |
323 | /* boolean_t */ vme_resilient_media:1, |
324 | /* boolean_t */ vme_xnu_user_debug:1, |
325 | /* boolean_t */ vme_no_copy_on_read:1, |
326 | /* boolean_t */ translated_allow_execute:1, /* execute in translated processes */ |
327 | /* boolean_t */ vme_kernel_object:1; /* vme_object is kernel_object */ |
328 | |
329 | unsigned short wired_count; /* can be paged if = 0 */ |
330 | unsigned short user_wired_count; /* for vm_wire */ |
331 | |
332 | #if DEBUG |
333 | #define MAP_ENTRY_CREATION_DEBUG (1) |
334 | #define MAP_ENTRY_INSERTION_DEBUG (1) |
335 | #endif /* DEBUG */ |
336 | #if MAP_ENTRY_CREATION_DEBUG |
337 | struct vm_map_header *vme_creation_maphdr; |
338 | uint32_t vme_creation_bt; /* btref_t */ |
339 | #endif /* MAP_ENTRY_CREATION_DEBUG */ |
340 | #if MAP_ENTRY_INSERTION_DEBUG |
341 | uint32_t vme_insertion_bt; /* btref_t */ |
342 | vm_map_offset_t vme_start_original; |
343 | vm_map_offset_t vme_end_original; |
344 | #endif /* MAP_ENTRY_INSERTION_DEBUG */ |
345 | }; |
346 | |
347 | #define VME_ALIAS(entry) \ |
348 | ((entry)->vme_alias) |
349 | |
350 | static inline vm_map_t |
351 | _VME_SUBMAP( |
352 | vm_map_entry_t entry) |
353 | { |
354 | __builtin_assume(entry->vme_submap); |
355 | return (vm_map_t)(entry->vme_submap << VME_SUBMAP_SHIFT); |
356 | } |
357 | #define VME_SUBMAP(entry) ({ assert((entry)->is_sub_map); _VME_SUBMAP(entry); }) |
358 | |
359 | static inline void |
360 | VME_SUBMAP_SET( |
361 | vm_map_entry_t entry, |
362 | vm_map_t submap) |
363 | { |
364 | __builtin_assume(((vm_offset_t)submap & 3) == 0); |
365 | |
366 | entry->is_sub_map = true; |
367 | entry->vme_submap = (vm_offset_t)submap >> VME_SUBMAP_SHIFT; |
368 | } |
369 | |
370 | static inline vm_object_t |
371 | _VME_OBJECT( |
372 | vm_map_entry_t entry) |
373 | { |
374 | vm_object_t object; |
375 | |
376 | if (!entry->vme_kernel_object) { |
377 | object = VM_OBJECT_UNPACK(entry->vme_object_or_delta); |
378 | __builtin_assume(!is_kernel_object(object)); |
379 | } else { |
380 | object = kernel_object_default; |
381 | } |
382 | return object; |
383 | } |
384 | #define VME_OBJECT(entry) ({ assert(!(entry)->is_sub_map); _VME_OBJECT(entry); }) |
385 | |
386 | static inline void |
387 | VME_OBJECT_SET( |
388 | vm_map_entry_t entry, |
389 | vm_object_t object, |
390 | bool atomic, |
391 | uint32_t context) |
392 | { |
393 | __builtin_assume(((vm_offset_t)object & 3) == 0); |
394 | |
395 | entry->vme_atomic = atomic; |
396 | entry->is_sub_map = false; |
397 | if (atomic) { |
398 | entry->vme_context = context; |
399 | } else { |
400 | entry->vme_context = 0; |
401 | } |
402 | |
403 | if (!object) { |
404 | entry->vme_object_or_delta = 0; |
405 | } else if (is_kernel_object(object)) { |
406 | #if VM_BTLOG_TAGS |
407 | if (!(entry->vme_kernel_object && entry->vme_tag_btref)) |
408 | #endif /* VM_BTLOG_TAGS */ |
409 | { |
410 | entry->vme_object_or_delta = 0; |
411 | } |
412 | } else { |
413 | #if VM_BTLOG_TAGS |
414 | if (entry->vme_kernel_object && entry->vme_tag_btref) { |
415 | btref_put(entry->vme_tag_btref); |
416 | } |
417 | #endif /* VM_BTLOG_TAGS */ |
418 | entry->vme_object_or_delta = VM_OBJECT_PACK(object); |
419 | } |
420 | |
421 | entry->vme_kernel_object = is_kernel_object(object); |
422 | entry->vme_resilient_codesign = false; |
423 | entry->used_for_jit = false; |
424 | } |
425 | |
426 | static inline vm_object_offset_t |
427 | VME_OFFSET( |
428 | vm_map_entry_t entry) |
429 | { |
430 | return entry->vme_offset << VME_OFFSET_SHIFT; |
431 | } |
432 | |
433 | static inline void |
434 | VME_OFFSET_SET( |
435 | vm_map_entry_t entry, |
436 | vm_object_offset_t offset) |
437 | { |
438 | entry->vme_offset = offset >> VME_OFFSET_SHIFT; |
439 | assert3u(VME_OFFSET(entry), ==, offset); |
440 | } |
441 | |
442 | /* |
443 | * IMPORTANT: |
444 | * The "alias" field can be updated while holding the VM map lock |
445 | * "shared". It's OK as along as it's the only field that can be |
446 | * updated without the VM map "exclusive" lock. |
447 | */ |
448 | static inline void |
449 | VME_ALIAS_SET( |
450 | vm_map_entry_t entry, |
451 | unsigned int alias) |
452 | { |
453 | assert3u(alias & VME_ALIAS_MASK, ==, alias); |
454 | entry->vme_alias = alias; |
455 | } |
456 | |
457 | static inline void |
458 | VME_OBJECT_SHADOW( |
459 | vm_map_entry_t entry, |
460 | vm_object_size_t length, |
461 | bool always) |
462 | { |
463 | vm_object_t object; |
464 | vm_object_offset_t offset; |
465 | |
466 | object = VME_OBJECT(entry); |
467 | offset = VME_OFFSET(entry); |
468 | vm_object_shadow(&object, &offset, length, always); |
469 | if (object != VME_OBJECT(entry)) { |
470 | entry->vme_object_or_delta = VM_OBJECT_PACK(object); |
471 | entry->use_pmap = true; |
472 | } |
473 | if (offset != VME_OFFSET(entry)) { |
474 | VME_OFFSET_SET(entry, offset); |
475 | } |
476 | } |
477 | |
478 | #if (DEBUG || DEVELOPMENT) && !KASAN |
479 | #define VM_BTLOG_TAGS 1 |
480 | #else |
481 | #define VM_BTLOG_TAGS 0 |
482 | #endif |
483 | |
484 | extern vm_tag_t vmtaglog_tag; /* Collected from a tunable in vm_resident.c */ |
485 | static inline void |
486 | vme_btref_consider_and_set(__unused vm_map_entry_t entry, __unused void *fp) |
487 | { |
488 | #if VM_BTLOG_TAGS |
489 | if (vmtaglog_tag && (VME_ALIAS(entry) == vmtaglog_tag) && entry->vme_kernel_object && entry->wired_count) { |
490 | assert(!entry->vme_tag_btref); /* We should have already zeroed and freed the btref if we're here. */ |
491 | entry->vme_tag_btref = btref_get(fp, BTREF_GET_NOWAIT); |
492 | } |
493 | #endif /* VM_BTLOG_TAGS */ |
494 | } |
495 | |
496 | static inline void |
497 | vme_btref_consider_and_put(__unused vm_map_entry_t entry) |
498 | { |
499 | #if VM_BTLOG_TAGS |
500 | if (entry->vme_tag_btref && entry->vme_kernel_object && (entry->wired_count == 0) && (entry->user_wired_count == 0)) { |
501 | btref_put(entry->vme_tag_btref); |
502 | entry->vme_tag_btref = 0; |
503 | } |
504 | #endif /* VM_BTLOG_TAGS */ |
505 | } |
506 | |
507 | |
508 | /* |
509 | * Convenience macros for dealing with superpages |
510 | * SUPERPAGE_NBASEPAGES is architecture dependent and defined in pmap.h |
511 | */ |
512 | #define SUPERPAGE_SIZE (PAGE_SIZE*SUPERPAGE_NBASEPAGES) |
513 | #define SUPERPAGE_MASK (-SUPERPAGE_SIZE) |
514 | #define SUPERPAGE_ROUND_DOWN(a) (a & SUPERPAGE_MASK) |
515 | #define SUPERPAGE_ROUND_UP(a) ((a + SUPERPAGE_SIZE-1) & SUPERPAGE_MASK) |
516 | |
517 | /* |
518 | * wired_counts are unsigned short. This value is used to safeguard |
519 | * against any mishaps due to runaway user programs. |
520 | */ |
521 | #define MAX_WIRE_COUNT 65535 |
522 | |
523 | typedef struct vm_map_user_range { |
524 | vm_map_address_t vmur_min_address __kernel_data_semantics; |
525 | |
526 | vm_map_address_t vmur_max_address : 56 __kernel_data_semantics; |
527 | vm_map_range_id_t vmur_range_id : 8; |
528 | } *vm_map_user_range_t; |
529 | |
530 | /* |
531 | * Type: vm_map_t [exported; contents invisible] |
532 | * |
533 | * Description: |
534 | * An address map -- a directory relating valid |
535 | * regions of a task's address space to the corresponding |
536 | * virtual memory objects. |
537 | * |
538 | * Implementation: |
539 | * Maps are doubly-linked lists of map entries, sorted |
540 | * by address. One hint is used to start |
541 | * searches again from the last successful search, |
542 | * insertion, or removal. Another hint is used to |
543 | * quickly find free space. |
544 | * |
545 | * Note: |
546 | * vm_map_relocate_early_elem() knows about this layout, |
547 | * and needs to be kept in sync. |
548 | */ |
549 | struct _vm_map { |
550 | lck_rw_t lock; /* map lock */ |
551 | struct vm_map_header hdr; /* Map entry header */ |
552 | #define min_offset hdr.links.start /* start of range */ |
553 | #define max_offset hdr.links.end /* end of range */ |
554 | pmap_t XNU_PTRAUTH_SIGNED_PTR("_vm_map.pmap" ) pmap; /* Physical map */ |
555 | vm_map_size_t size; /* virtual size */ |
556 | uint64_t size_limit; /* rlimit on address space size */ |
557 | uint64_t data_limit; /* rlimit on data size */ |
558 | vm_map_size_t user_wire_limit;/* rlimit on user locked memory */ |
559 | vm_map_size_t user_wire_size; /* current size of user locked memory in this map */ |
560 | #if __x86_64__ |
561 | vm_map_offset_t vmmap_high_start; |
562 | #endif /* __x86_64__ */ |
563 | |
564 | os_ref_atomic_t map_refcnt; /* Reference count */ |
565 | |
566 | #if CONFIG_MAP_RANGES |
567 | #define VM_MAP_EXTRA_RANGES_MAX 1024 |
568 | struct mach_vm_range default_range; |
569 | struct mach_vm_range data_range; |
570 | |
571 | uint16_t extra_ranges_count; |
572 | vm_map_user_range_t extra_ranges; |
573 | #endif /* CONFIG_MAP_RANGES */ |
574 | |
575 | union { |
576 | /* |
577 | * If map->disable_vmentry_reuse == TRUE: |
578 | * the end address of the highest allocated vm_map_entry_t. |
579 | */ |
580 | vm_map_offset_t vmu1_highest_entry_end; |
581 | /* |
582 | * For a nested VM map: |
583 | * the lowest address in this nested VM map that we would |
584 | * expect to be unnested under normal operation (i.e. for |
585 | * regular copy-on-write on DATA section). |
586 | */ |
587 | vm_map_offset_t vmu1_lowest_unnestable_start; |
588 | } vmu1; |
589 | #define highest_entry_end vmu1.vmu1_highest_entry_end |
590 | #define lowest_unnestable_start vmu1.vmu1_lowest_unnestable_start |
591 | vm_map_entry_t hint; /* hint for quick lookups */ |
592 | union { |
593 | struct vm_map_links* vmmap_hole_hint; /* hint for quick hole lookups */ |
594 | struct vm_map_corpse_footprint_header *vmmap_corpse_footprint; |
595 | } vmmap_u_1; |
596 | #define hole_hint vmmap_u_1.vmmap_hole_hint |
597 | #define vmmap_corpse_footprint vmmap_u_1.vmmap_corpse_footprint |
598 | union { |
599 | vm_map_entry_t _first_free; /* First free space hint */ |
600 | struct vm_map_links* _holes; /* links all holes between entries */ |
601 | } f_s; /* Union for free space data structures being used */ |
602 | |
603 | #define first_free f_s._first_free |
604 | #define holes_list f_s._holes |
605 | |
606 | unsigned int |
607 | /* boolean_t */ wait_for_space:1, /* Should callers wait for space? */ |
608 | /* boolean_t */ wiring_required:1, /* All memory wired? */ |
609 | /* boolean_t */ no_zero_fill:1, /* No zero fill absent pages */ |
610 | /* boolean_t */ mapped_in_other_pmaps:1, /* has this submap been mapped in maps that use a different pmap */ |
611 | /* boolean_t */ switch_protect:1, /* Protect map from write faults while switched */ |
612 | /* boolean_t */ disable_vmentry_reuse:1, /* All vm entries should keep using newer and higher addresses in the map */ |
613 | /* boolean_t */ map_disallow_data_exec:1, /* Disallow execution from data pages on exec-permissive architectures */ |
614 | /* boolean_t */ holelistenabled:1, |
615 | /* boolean_t */ is_nested_map:1, |
616 | /* boolean_t */ map_disallow_new_exec:1, /* Disallow new executable code */ |
617 | /* boolean_t */ jit_entry_exists:1, |
618 | /* boolean_t */ has_corpse_footprint:1, |
619 | /* boolean_t */ terminated:1, |
620 | /* boolean_t */ is_alien:1, /* for platform simulation, i.e. PLATFORM_IOS on OSX */ |
621 | /* boolean_t */ cs_enforcement:1, /* code-signing enforcement */ |
622 | /* boolean_t */ cs_debugged:1, /* code-signed but debugged */ |
623 | /* boolean_t */ reserved_regions:1, /* has reserved regions. The map size that userspace sees should ignore these. */ |
624 | /* boolean_t */ single_jit:1, /* only allow one JIT mapping */ |
625 | /* boolean_t */ never_faults:1, /* this map should never cause faults */ |
626 | /* boolean_t */ uses_user_ranges:1, /* has the map been configured to use user VM ranges */ |
627 | /* boolean_t */ tpro_enforcement:1, /* enforce TPRO propagation */ |
628 | /* boolean_t */ corpse_source:1, /* map is being used to create a corpse for diagnostics.*/ |
629 | /* reserved */ res0:1, |
630 | /* reserved */pad:9; |
631 | unsigned int timestamp; /* Version number */ |
632 | }; |
633 | |
634 | #define CAST_TO_VM_MAP_ENTRY(x) ((struct vm_map_entry *)(uintptr_t)(x)) |
635 | #define vm_map_to_entry(map) CAST_TO_VM_MAP_ENTRY(&(map)->hdr.links) |
636 | #define vm_map_first_entry(map) ((map)->hdr.links.next) |
637 | #define vm_map_last_entry(map) ((map)->hdr.links.prev) |
638 | |
639 | /* |
640 | * Type: vm_map_version_t [exported; contents invisible] |
641 | * |
642 | * Description: |
643 | * Map versions may be used to quickly validate a previous |
644 | * lookup operation. |
645 | * |
646 | * Usage note: |
647 | * Because they are bulky objects, map versions are usually |
648 | * passed by reference. |
649 | * |
650 | * Implementation: |
651 | * Just a timestamp for the main map. |
652 | */ |
653 | typedef struct vm_map_version { |
654 | unsigned int main_timestamp; |
655 | } vm_map_version_t; |
656 | |
657 | /* |
658 | * Type: vm_map_copy_t [exported; contents invisible] |
659 | * |
660 | * Description: |
661 | * A map copy object represents a region of virtual memory |
662 | * that has been copied from an address map but is still |
663 | * in transit. |
664 | * |
665 | * A map copy object may only be used by a single thread |
666 | * at a time. |
667 | * |
668 | * Implementation: |
669 | * There are two formats for map copy objects. |
670 | * The first is very similar to the main |
671 | * address map in structure, and as a result, some |
672 | * of the internal maintenance functions/macros can |
673 | * be used with either address maps or map copy objects. |
674 | * |
675 | * The map copy object contains a header links |
676 | * entry onto which the other entries that represent |
677 | * the region are chained. |
678 | * |
679 | * The second format is a kernel buffer copy object - for data |
680 | * small enough that physical copies were the most efficient |
681 | * method. This method uses a zero-sized array unioned with |
682 | * other format-specific data in the 'c_u' member. This unsized |
683 | * array overlaps the other elements and allows us to use this |
684 | * extra structure space for physical memory copies. On 64-bit |
685 | * systems this saves ~64 bytes per vm_map_copy. |
686 | */ |
687 | |
688 | struct vm_map_copy { |
689 | #define VM_MAP_COPY_ENTRY_LIST 1 |
690 | #define VM_MAP_COPY_KERNEL_BUFFER 2 |
691 | uint16_t type; |
692 | bool is_kernel_range; |
693 | bool is_user_range; |
694 | vm_map_range_id_t orig_range; |
695 | vm_object_offset_t offset; |
696 | vm_map_size_t size; |
697 | union { |
698 | struct vm_map_header hdr; /* ENTRY_LIST */ |
699 | void *XNU_PTRAUTH_SIGNED_PTR("vm_map_copy.kdata" ) kdata; /* KERNEL_BUFFER */ |
700 | } c_u; |
701 | }; |
702 | |
703 | |
704 | ZONE_DECLARE_ID(ZONE_ID_VM_MAP_ENTRY, struct vm_map_entry); |
705 | #define vm_map_entry_zone (&zone_array[ZONE_ID_VM_MAP_ENTRY]) |
706 | |
707 | ZONE_DECLARE_ID(ZONE_ID_VM_MAP_HOLES, struct vm_map_links); |
708 | #define vm_map_holes_zone (&zone_array[ZONE_ID_VM_MAP_HOLES]) |
709 | |
710 | ZONE_DECLARE_ID(ZONE_ID_VM_MAP, struct _vm_map); |
711 | #define vm_map_zone (&zone_array[ZONE_ID_VM_MAP]) |
712 | |
713 | |
714 | #define cpy_hdr c_u.hdr |
715 | #define cpy_kdata c_u.kdata |
716 | |
717 | #define VM_MAP_COPY_PAGE_SHIFT(copy) ((copy)->cpy_hdr.page_shift) |
718 | #define VM_MAP_COPY_PAGE_SIZE(copy) (1 << VM_MAP_COPY_PAGE_SHIFT((copy))) |
719 | #define VM_MAP_COPY_PAGE_MASK(copy) (VM_MAP_COPY_PAGE_SIZE((copy)) - 1) |
720 | |
721 | /* |
722 | * Useful macros for entry list copy objects |
723 | */ |
724 | |
725 | #define vm_map_copy_to_entry(copy) CAST_TO_VM_MAP_ENTRY(&(copy)->cpy_hdr.links) |
726 | #define vm_map_copy_first_entry(copy) \ |
727 | ((copy)->cpy_hdr.links.next) |
728 | #define vm_map_copy_last_entry(copy) \ |
729 | ((copy)->cpy_hdr.links.prev) |
730 | |
731 | extern kern_return_t |
732 | vm_map_copy_adjust_to_target( |
733 | vm_map_copy_t copy_map, |
734 | vm_map_offset_t offset, |
735 | vm_map_size_t size, |
736 | vm_map_t target_map, |
737 | boolean_t copy, |
738 | vm_map_copy_t *target_copy_map_p, |
739 | vm_map_offset_t *overmap_start_p, |
740 | vm_map_offset_t *overmap_end_p, |
741 | vm_map_offset_t *trimmed_start_p); |
742 | |
743 | /* |
744 | * Macros: vm_map_lock, etc. [internal use only] |
745 | * Description: |
746 | * Perform locking on the data portion of a map. |
747 | * When multiple maps are to be locked, order by map address. |
748 | * (See vm_map.c::vm_remap()) |
749 | */ |
750 | |
751 | #define vm_map_lock_init(map) \ |
752 | ((map)->timestamp = 0 , \ |
753 | lck_rw_init(&(map)->lock, &vm_map_lck_grp, &vm_map_lck_rw_attr)) |
754 | |
755 | #define vm_map_lock(map) \ |
756 | MACRO_BEGIN \ |
757 | DTRACE_VM(vm_map_lock_w); \ |
758 | lck_rw_lock_exclusive(&(map)->lock); \ |
759 | MACRO_END |
760 | |
761 | #define vm_map_unlock(map) \ |
762 | MACRO_BEGIN \ |
763 | DTRACE_VM(vm_map_unlock_w); \ |
764 | (map)->timestamp++; \ |
765 | lck_rw_done(&(map)->lock); \ |
766 | MACRO_END |
767 | |
768 | #define vm_map_lock_read(map) \ |
769 | MACRO_BEGIN \ |
770 | DTRACE_VM(vm_map_lock_r); \ |
771 | lck_rw_lock_shared(&(map)->lock); \ |
772 | MACRO_END |
773 | |
774 | #define vm_map_unlock_read(map) \ |
775 | MACRO_BEGIN \ |
776 | DTRACE_VM(vm_map_unlock_r); \ |
777 | lck_rw_done(&(map)->lock); \ |
778 | MACRO_END |
779 | |
780 | #define vm_map_lock_write_to_read(map) \ |
781 | MACRO_BEGIN \ |
782 | DTRACE_VM(vm_map_lock_downgrade); \ |
783 | (map)->timestamp++; \ |
784 | lck_rw_lock_exclusive_to_shared(&(map)->lock); \ |
785 | MACRO_END |
786 | |
787 | __attribute__((always_inline)) |
788 | int vm_map_lock_read_to_write(vm_map_t map); |
789 | |
790 | __attribute__((always_inline)) |
791 | boolean_t vm_map_try_lock(vm_map_t map); |
792 | |
793 | __attribute__((always_inline)) |
794 | boolean_t vm_map_try_lock_read(vm_map_t map); |
795 | |
796 | int vm_self_region_page_shift(vm_map_t target_map); |
797 | int vm_self_region_page_shift_safely(vm_map_t target_map); |
798 | |
799 | #define vm_map_lock_assert_held(map) \ |
800 | LCK_RW_ASSERT(&(map)->lock, LCK_RW_ASSERT_HELD) |
801 | #define vm_map_lock_assert_shared(map) \ |
802 | LCK_RW_ASSERT(&(map)->lock, LCK_RW_ASSERT_SHARED) |
803 | #define vm_map_lock_assert_exclusive(map) \ |
804 | LCK_RW_ASSERT(&(map)->lock, LCK_RW_ASSERT_EXCLUSIVE) |
805 | #define vm_map_lock_assert_notheld(map) \ |
806 | LCK_RW_ASSERT(&(map)->lock, LCK_RW_ASSERT_NOTHELD) |
807 | |
808 | /* |
809 | * Exported procedures that operate on vm_map_t. |
810 | */ |
811 | |
812 | /* Lookup map entry containing or the specified address in the given map */ |
813 | extern boolean_t vm_map_lookup_entry( |
814 | vm_map_t map, |
815 | vm_map_address_t address, |
816 | vm_map_entry_t *entry); /* OUT */ |
817 | |
818 | /* Lookup map entry containing or the specified address in the given map */ |
819 | extern boolean_t vm_map_lookup_entry_or_next( |
820 | vm_map_t map, |
821 | vm_map_address_t address, |
822 | vm_map_entry_t *entry); /* OUT */ |
823 | |
824 | /* like vm_map_lookup_entry without the PGZ bear trap */ |
825 | #if CONFIG_PROB_GZALLOC |
826 | extern boolean_t vm_map_lookup_entry_allow_pgz( |
827 | vm_map_t map, |
828 | vm_map_address_t address, |
829 | vm_map_entry_t *entry); /* OUT */ |
830 | #else /* !CONFIG_PROB_GZALLOC */ |
831 | #define vm_map_lookup_entry_allow_pgz vm_map_lookup_entry |
832 | #endif /* !CONFIG_PROB_GZALLOC */ |
833 | |
834 | extern void vm_map_copy_remap( |
835 | vm_map_t map, |
836 | vm_map_entry_t where, |
837 | vm_map_copy_t copy, |
838 | vm_map_offset_t adjustment, |
839 | vm_prot_t cur_prot, |
840 | vm_prot_t max_prot, |
841 | vm_inherit_t inheritance); |
842 | |
843 | /* Find the VM object, offset, and protection for a given virtual address |
844 | * in the specified map, assuming a page fault of the type specified. */ |
845 | extern kern_return_t vm_map_lookup_and_lock_object( |
846 | vm_map_t *var_map, /* IN/OUT */ |
847 | vm_map_address_t vaddr, |
848 | vm_prot_t fault_type, |
849 | int object_lock_type, |
850 | vm_map_version_t *out_version, /* OUT */ |
851 | vm_object_t *object, /* OUT */ |
852 | vm_object_offset_t *offset, /* OUT */ |
853 | vm_prot_t *out_prot, /* OUT */ |
854 | boolean_t *wired, /* OUT */ |
855 | vm_object_fault_info_t fault_info, /* OUT */ |
856 | vm_map_t *real_map, /* OUT */ |
857 | bool *contended); /* OUT */ |
858 | |
859 | /* Verifies that the map has not changed since the given version. */ |
860 | extern boolean_t vm_map_verify( |
861 | vm_map_t map, |
862 | vm_map_version_t *version); /* REF */ |
863 | |
864 | |
865 | /* |
866 | * Functions implemented as macros |
867 | */ |
868 | #define vm_map_min(map) ((map)->min_offset) |
869 | /* Lowest valid address in |
870 | * a map */ |
871 | |
872 | #define vm_map_max(map) ((map)->max_offset) |
873 | /* Highest valid address */ |
874 | |
875 | #define vm_map_pmap(map) ((map)->pmap) |
876 | /* Physical map associated |
877 | * with this address map */ |
878 | |
879 | /* Gain a reference to an existing map */ |
880 | extern void vm_map_reference( |
881 | vm_map_t map); |
882 | |
883 | /* |
884 | * Wait and wakeup macros for in_transition map entries. |
885 | */ |
886 | #define vm_map_entry_wait(map, interruptible) \ |
887 | ((map)->timestamp++ , \ |
888 | lck_rw_sleep(&(map)->lock, LCK_SLEEP_EXCLUSIVE|LCK_SLEEP_PROMOTED_PRI, \ |
889 | (event_t)&(map)->hdr, interruptible)) |
890 | |
891 | |
892 | #define vm_map_entry_wakeup(map) \ |
893 | thread_wakeup((event_t)(&(map)->hdr)) |
894 | |
895 | |
896 | /* simplify map entries */ |
897 | extern void vm_map_simplify_entry( |
898 | vm_map_t map, |
899 | vm_map_entry_t this_entry); |
900 | extern void vm_map_simplify( |
901 | vm_map_t map, |
902 | vm_map_offset_t start); |
903 | |
904 | #if XNU_PLATFORM_MacOSX |
905 | |
906 | /* Move the information in a map copy object to a new map copy object */ |
907 | extern vm_map_copy_t vm_map_copy_copy( |
908 | vm_map_copy_t copy); |
909 | |
910 | #endif /* XNU_PLATFORM_MacOSX */ |
911 | |
912 | /* Enter a mapping */ |
913 | extern kern_return_t vm_map_enter( |
914 | vm_map_t map, |
915 | vm_map_offset_t *address, |
916 | vm_map_size_t size, |
917 | vm_map_offset_t mask, |
918 | vm_map_kernel_flags_t vmk_flags, |
919 | vm_object_t object, |
920 | vm_object_offset_t offset, |
921 | boolean_t needs_copy, |
922 | vm_prot_t cur_protection, |
923 | vm_prot_t max_protection, |
924 | vm_inherit_t inheritance); |
925 | |
926 | #if __arm64__ |
927 | extern kern_return_t vm_map_enter_fourk( |
928 | vm_map_t map, |
929 | vm_map_offset_t *address, |
930 | vm_map_size_t size, |
931 | vm_map_offset_t mask, |
932 | vm_map_kernel_flags_t vmk_flags, |
933 | vm_object_t object, |
934 | vm_object_offset_t offset, |
935 | boolean_t needs_copy, |
936 | vm_prot_t cur_protection, |
937 | vm_prot_t max_protection, |
938 | vm_inherit_t inheritance); |
939 | #endif /* __arm64__ */ |
940 | |
941 | /* XXX should go away - replaced with regular enter of contig object */ |
942 | extern kern_return_t vm_map_enter_cpm( |
943 | vm_map_t map, |
944 | vm_map_address_t *addr, |
945 | vm_map_size_t size, |
946 | vm_map_kernel_flags_t vmk_flags); |
947 | |
948 | extern kern_return_t vm_map_remap( |
949 | vm_map_t target_map, |
950 | vm_map_offset_t *address, |
951 | vm_map_size_t size, |
952 | vm_map_offset_t mask, |
953 | vm_map_kernel_flags_t vmk_flags, |
954 | vm_map_t src_map, |
955 | vm_map_offset_t memory_address, |
956 | boolean_t copy, |
957 | vm_prot_t *cur_protection, |
958 | vm_prot_t *max_protection, |
959 | vm_inherit_t inheritance); |
960 | |
961 | |
962 | /* |
963 | * Read and write from a kernel buffer to a specified map. |
964 | */ |
965 | extern kern_return_t vm_map_write_user( |
966 | vm_map_t map, |
967 | void *src_p, |
968 | vm_map_offset_t dst_addr, |
969 | vm_size_t size); |
970 | |
971 | extern kern_return_t vm_map_read_user( |
972 | vm_map_t map, |
973 | vm_map_offset_t src_addr, |
974 | void *dst_p, |
975 | vm_size_t size); |
976 | |
977 | extern void vm_map_inherit_limits( |
978 | vm_map_t new_map, |
979 | const struct _vm_map *old_map); |
980 | |
981 | /* Create a new task map using an existing task map as a template. */ |
982 | extern vm_map_t vm_map_fork( |
983 | ledger_t ledger, |
984 | vm_map_t old_map, |
985 | int options); |
986 | #define VM_MAP_FORK_SHARE_IF_INHERIT_NONE 0x00000001 |
987 | #define VM_MAP_FORK_PRESERVE_PURGEABLE 0x00000002 |
988 | #define VM_MAP_FORK_CORPSE_FOOTPRINT 0x00000004 |
989 | |
990 | /* Change inheritance */ |
991 | extern kern_return_t vm_map_inherit( |
992 | vm_map_t map, |
993 | vm_map_offset_t start, |
994 | vm_map_offset_t end, |
995 | vm_inherit_t new_inheritance); |
996 | |
997 | /* Add or remove machine-dependent attributes from map regions */ |
998 | extern kern_return_t vm_map_machine_attribute( |
999 | vm_map_t map, |
1000 | vm_map_offset_t start, |
1001 | vm_map_offset_t end, |
1002 | vm_machine_attribute_t attribute, |
1003 | vm_machine_attribute_val_t* value); /* IN/OUT */ |
1004 | |
1005 | extern kern_return_t vm_map_msync( |
1006 | vm_map_t map, |
1007 | vm_map_address_t address, |
1008 | vm_map_size_t size, |
1009 | vm_sync_t sync_flags); |
1010 | |
1011 | /* Set paging behavior */ |
1012 | extern kern_return_t vm_map_behavior_set( |
1013 | vm_map_t map, |
1014 | vm_map_offset_t start, |
1015 | vm_map_offset_t end, |
1016 | vm_behavior_t new_behavior); |
1017 | |
1018 | extern kern_return_t vm_map_region( |
1019 | vm_map_t map, |
1020 | vm_map_offset_t *address, |
1021 | vm_map_size_t *size, |
1022 | vm_region_flavor_t flavor, |
1023 | vm_region_info_t info, |
1024 | mach_msg_type_number_t *count, |
1025 | mach_port_t *object_name); |
1026 | |
1027 | extern kern_return_t vm_map_region_recurse_64( |
1028 | vm_map_t map, |
1029 | vm_map_offset_t *address, |
1030 | vm_map_size_t *size, |
1031 | natural_t *nesting_depth, |
1032 | vm_region_submap_info_64_t info, |
1033 | mach_msg_type_number_t *count); |
1034 | |
1035 | extern kern_return_t vm_map_page_query_internal( |
1036 | vm_map_t map, |
1037 | vm_map_offset_t offset, |
1038 | int *disposition, |
1039 | int *ref_count); |
1040 | |
1041 | extern kern_return_t vm_map_query_volatile( |
1042 | vm_map_t map, |
1043 | mach_vm_size_t *volatile_virtual_size_p, |
1044 | mach_vm_size_t *volatile_resident_size_p, |
1045 | mach_vm_size_t *volatile_compressed_size_p, |
1046 | mach_vm_size_t *volatile_pmap_size_p, |
1047 | mach_vm_size_t *volatile_compressed_pmap_size_p); |
1048 | |
1049 | /* Convert from a map entry port to a map */ |
1050 | extern vm_map_t convert_port_entry_to_map( |
1051 | ipc_port_t port); |
1052 | |
1053 | |
1054 | extern kern_return_t vm_map_set_cache_attr( |
1055 | vm_map_t map, |
1056 | vm_map_offset_t va); |
1057 | |
1058 | |
1059 | /* definitions related to overriding the NX behavior */ |
1060 | |
1061 | #define VM_ABI_32 0x1 |
1062 | #define VM_ABI_64 0x2 |
1063 | |
1064 | extern int override_nx(vm_map_t map, uint32_t user_tag); |
1065 | |
1066 | extern void vm_map_region_top_walk( |
1067 | vm_map_entry_t entry, |
1068 | vm_region_top_info_t top); |
1069 | extern void vm_map_region_walk( |
1070 | vm_map_t map, |
1071 | vm_map_offset_t va, |
1072 | vm_map_entry_t entry, |
1073 | vm_object_offset_t offset, |
1074 | vm_object_size_t range, |
1075 | vm_region_extended_info_t extended, |
1076 | boolean_t look_for_pages, |
1077 | mach_msg_type_number_t count); |
1078 | |
1079 | |
1080 | |
1081 | extern void vm_map_copy_footprint_ledgers( |
1082 | task_t old_task, |
1083 | task_t new_task); |
1084 | extern void vm_map_copy_ledger( |
1085 | task_t old_task, |
1086 | task_t new_task, |
1087 | int ledger_entry); |
1088 | |
1089 | /** |
1090 | * Represents a single region of virtual address space that should be reserved |
1091 | * (pre-mapped) in a user address space. |
1092 | */ |
1093 | struct vm_reserved_region { |
1094 | const char *vmrr_name; |
1095 | vm_map_offset_t vmrr_addr; |
1096 | vm_map_size_t vmrr_size; |
1097 | }; |
1098 | |
1099 | /** |
1100 | * Return back a machine-dependent array of address space regions that should be |
1101 | * reserved by the VM. This function is defined in the machine-dependent |
1102 | * machine_routines.c files. |
1103 | */ |
1104 | extern size_t ml_get_vm_reserved_regions( |
1105 | bool vm_is64bit, |
1106 | const struct vm_reserved_region **regions); |
1107 | |
1108 | /** |
1109 | * Explicitly preallocates a floating point save area. This function is defined |
1110 | * in the machine-dependent machine_routines.c files. |
1111 | */ |
1112 | extern void ml_fp_save_area_prealloc(void); |
1113 | |
1114 | #endif /* MACH_KERNEL_PRIVATE */ |
1115 | |
1116 | /* Create an empty map */ |
1117 | extern vm_map_t vm_map_create( |
1118 | pmap_t pmap, |
1119 | vm_map_offset_t min_off, |
1120 | vm_map_offset_t max_off, |
1121 | boolean_t pageable); |
1122 | |
1123 | extern vm_map_size_t vm_map_adjusted_size(vm_map_t map); |
1124 | |
1125 | extern void vm_map_disable_hole_optimization(vm_map_t map); |
1126 | |
1127 | /* Get rid of a map */ |
1128 | extern void vm_map_destroy( |
1129 | vm_map_t map); |
1130 | |
1131 | /* Lose a reference */ |
1132 | extern void vm_map_deallocate( |
1133 | vm_map_t map); |
1134 | |
1135 | /* Lose a reference */ |
1136 | extern void vm_map_inspect_deallocate( |
1137 | vm_map_inspect_t map); |
1138 | |
1139 | /* Lose a reference */ |
1140 | extern void vm_map_read_deallocate( |
1141 | vm_map_read_t map); |
1142 | |
1143 | extern vm_map_t vm_map_switch( |
1144 | vm_map_t map); |
1145 | |
1146 | /* Change protection */ |
1147 | extern kern_return_t vm_map_protect( |
1148 | vm_map_t map, |
1149 | vm_map_offset_t start, |
1150 | vm_map_offset_t end, |
1151 | vm_prot_t new_prot, |
1152 | boolean_t set_max); |
1153 | |
1154 | /* Check protection */ |
1155 | extern boolean_t vm_map_check_protection( |
1156 | vm_map_t map, |
1157 | vm_map_offset_t start, |
1158 | vm_map_offset_t end, |
1159 | vm_prot_t protection); |
1160 | |
1161 | extern boolean_t vm_map_cs_enforcement( |
1162 | vm_map_t map); |
1163 | extern void vm_map_cs_enforcement_set( |
1164 | vm_map_t map, |
1165 | boolean_t val); |
1166 | |
1167 | extern void vm_map_cs_debugged_set( |
1168 | vm_map_t map, |
1169 | boolean_t val); |
1170 | |
1171 | extern kern_return_t vm_map_cs_wx_enable(vm_map_t map); |
1172 | extern kern_return_t vm_map_csm_allow_jit(vm_map_t map); |
1173 | |
1174 | /* wire down a region */ |
1175 | |
1176 | #ifdef XNU_KERNEL_PRIVATE |
1177 | |
1178 | extern void vm_map_will_allocate_early_map( |
1179 | vm_map_t *map_owner); |
1180 | |
1181 | extern void vm_map_relocate_early_maps( |
1182 | vm_offset_t delta); |
1183 | |
1184 | extern void vm_map_relocate_early_elem( |
1185 | uint32_t zone_id, |
1186 | vm_offset_t new_addr, |
1187 | vm_offset_t delta); |
1188 | |
1189 | /* never fails */ |
1190 | extern vm_map_t vm_map_create_options( |
1191 | pmap_t pmap, |
1192 | vm_map_offset_t min_off, |
1193 | vm_map_offset_t max_off, |
1194 | vm_map_create_options_t options); |
1195 | |
1196 | extern kern_return_t vm_map_wire_kernel( |
1197 | vm_map_t map, |
1198 | vm_map_offset_t start, |
1199 | vm_map_offset_t end, |
1200 | vm_prot_t access_type, |
1201 | vm_tag_t tag, |
1202 | boolean_t user_wire); |
1203 | |
1204 | extern kern_return_t vm_map_wire_and_extract_kernel( |
1205 | vm_map_t map, |
1206 | vm_map_offset_t start, |
1207 | vm_prot_t access_type, |
1208 | vm_tag_t tag, |
1209 | boolean_t user_wire, |
1210 | ppnum_t *physpage_p); |
1211 | |
1212 | /* kext exported versions */ |
1213 | |
1214 | extern kern_return_t vm_map_wire_external( |
1215 | vm_map_t map, |
1216 | vm_map_offset_t start, |
1217 | vm_map_offset_t end, |
1218 | vm_prot_t access_type, |
1219 | boolean_t user_wire); |
1220 | |
1221 | extern kern_return_t vm_map_wire_and_extract_external( |
1222 | vm_map_t map, |
1223 | vm_map_offset_t start, |
1224 | vm_prot_t access_type, |
1225 | boolean_t user_wire, |
1226 | ppnum_t *physpage_p); |
1227 | |
1228 | #else /* XNU_KERNEL_PRIVATE */ |
1229 | |
1230 | extern kern_return_t vm_map_wire( |
1231 | vm_map_t map, |
1232 | vm_map_offset_t start, |
1233 | vm_map_offset_t end, |
1234 | vm_prot_t access_type, |
1235 | boolean_t user_wire); |
1236 | |
1237 | extern kern_return_t vm_map_wire_and_extract( |
1238 | vm_map_t map, |
1239 | vm_map_offset_t start, |
1240 | vm_prot_t access_type, |
1241 | boolean_t user_wire, |
1242 | ppnum_t *physpage_p); |
1243 | |
1244 | #endif /* !XNU_KERNEL_PRIVATE */ |
1245 | |
1246 | /* unwire a region */ |
1247 | extern kern_return_t vm_map_unwire( |
1248 | vm_map_t map, |
1249 | vm_map_offset_t start, |
1250 | vm_map_offset_t end, |
1251 | boolean_t user_wire); |
1252 | |
1253 | #ifdef XNU_KERNEL_PRIVATE |
1254 | |
1255 | /* Enter a mapping of a memory object */ |
1256 | extern kern_return_t vm_map_enter_mem_object( |
1257 | vm_map_t map, |
1258 | vm_map_offset_t *address, |
1259 | vm_map_size_t size, |
1260 | vm_map_offset_t mask, |
1261 | vm_map_kernel_flags_t vmk_flags, |
1262 | ipc_port_t port, |
1263 | vm_object_offset_t offset, |
1264 | boolean_t needs_copy, |
1265 | vm_prot_t cur_protection, |
1266 | vm_prot_t max_protection, |
1267 | vm_inherit_t inheritance); |
1268 | |
1269 | /* Enter a mapping of a memory object */ |
1270 | extern kern_return_t vm_map_enter_mem_object_prefault( |
1271 | vm_map_t map, |
1272 | vm_map_offset_t *address, |
1273 | vm_map_size_t size, |
1274 | vm_map_offset_t mask, |
1275 | vm_map_kernel_flags_t vmk_flags, |
1276 | ipc_port_t port, |
1277 | vm_object_offset_t offset, |
1278 | vm_prot_t cur_protection, |
1279 | vm_prot_t max_protection, |
1280 | upl_page_list_ptr_t page_list, |
1281 | unsigned int page_list_count); |
1282 | |
1283 | /* Enter a mapping of a memory object */ |
1284 | extern kern_return_t vm_map_enter_mem_object_control( |
1285 | vm_map_t map, |
1286 | vm_map_offset_t *address, |
1287 | vm_map_size_t size, |
1288 | vm_map_offset_t mask, |
1289 | vm_map_kernel_flags_t vmk_flags, |
1290 | memory_object_control_t control, |
1291 | vm_object_offset_t offset, |
1292 | boolean_t needs_copy, |
1293 | vm_prot_t cur_protection, |
1294 | vm_prot_t max_protection, |
1295 | vm_inherit_t inheritance); |
1296 | |
1297 | extern kern_return_t vm_map_terminate( |
1298 | vm_map_t map); |
1299 | |
1300 | extern void vm_map_require( |
1301 | vm_map_t map); |
1302 | |
1303 | extern void vm_map_copy_require( |
1304 | vm_map_copy_t copy); |
1305 | |
1306 | extern kern_return_t ( |
1307 | vm_map_t src_map, |
1308 | vm_map_address_t src_addr, |
1309 | vm_map_size_t len, |
1310 | boolean_t copy, |
1311 | vm_map_copy_t *copy_result, /* OUT */ |
1312 | vm_prot_t *cur_prot, /* OUT */ |
1313 | vm_prot_t *max_prot, /* OUT */ |
1314 | vm_inherit_t inheritance, |
1315 | vm_map_kernel_flags_t vmk_flags); |
1316 | |
1317 | #endif /* !XNU_KERNEL_PRIVATE */ |
1318 | |
1319 | /* Discard a copy without using it */ |
1320 | extern void vm_map_copy_discard( |
1321 | vm_map_copy_t copy); |
1322 | |
1323 | /* Overwrite existing memory with a copy */ |
1324 | extern kern_return_t vm_map_copy_overwrite( |
1325 | vm_map_t dst_map, |
1326 | vm_map_address_t dst_addr, |
1327 | vm_map_copy_t copy, |
1328 | vm_map_size_t copy_size, |
1329 | boolean_t interruptible); |
1330 | |
1331 | #define VM_MAP_COPY_OVERWRITE_OPTIMIZATION_THRESHOLD_PAGES (3) |
1332 | |
1333 | |
1334 | /* returns TRUE if size of vm_map_copy == size parameter FALSE otherwise */ |
1335 | extern boolean_t vm_map_copy_validate_size( |
1336 | vm_map_t dst_map, |
1337 | vm_map_copy_t copy, |
1338 | vm_map_size_t *size); |
1339 | |
1340 | /* Place a copy into a map */ |
1341 | extern kern_return_t vm_map_copyout( |
1342 | vm_map_t dst_map, |
1343 | vm_map_address_t *dst_addr, /* OUT */ |
1344 | vm_map_copy_t copy); |
1345 | |
1346 | extern kern_return_t vm_map_copyout_size( |
1347 | vm_map_t dst_map, |
1348 | vm_map_address_t *dst_addr, /* OUT */ |
1349 | vm_map_copy_t copy, |
1350 | vm_map_size_t copy_size); |
1351 | |
1352 | extern kern_return_t vm_map_copyout_internal( |
1353 | vm_map_t dst_map, |
1354 | vm_map_address_t *dst_addr, /* OUT */ |
1355 | vm_map_copy_t copy, |
1356 | vm_map_size_t copy_size, |
1357 | boolean_t consume_on_success, |
1358 | vm_prot_t cur_protection, |
1359 | vm_prot_t max_protection, |
1360 | vm_inherit_t inheritance); |
1361 | |
1362 | extern kern_return_t vm_map_copyin( |
1363 | vm_map_t src_map, |
1364 | vm_map_address_t src_addr, |
1365 | vm_map_size_t len, |
1366 | boolean_t src_destroy, |
1367 | vm_map_copy_t *copy_result); /* OUT */ |
1368 | |
1369 | extern kern_return_t vm_map_copyin_common( |
1370 | vm_map_t src_map, |
1371 | vm_map_address_t src_addr, |
1372 | vm_map_size_t len, |
1373 | boolean_t src_destroy, |
1374 | boolean_t src_volatile, |
1375 | vm_map_copy_t *copy_result, /* OUT */ |
1376 | boolean_t use_maxprot); |
1377 | |
1378 | #define VM_MAP_COPYIN_SRC_DESTROY 0x00000001 |
1379 | #define VM_MAP_COPYIN_USE_MAXPROT 0x00000002 |
1380 | #define VM_MAP_COPYIN_ENTRY_LIST 0x00000004 |
1381 | #define VM_MAP_COPYIN_PRESERVE_PURGEABLE 0x00000008 |
1382 | #define VM_MAP_COPYIN_FORK 0x00000010 |
1383 | #define VM_MAP_COPYIN_ALL_FLAGS 0x0000001F |
1384 | extern kern_return_t vm_map_copyin_internal( |
1385 | vm_map_t src_map, |
1386 | vm_map_address_t src_addr, |
1387 | vm_map_size_t len, |
1388 | int flags, |
1389 | vm_map_copy_t *copy_result); /* OUT */ |
1390 | |
1391 | |
1392 | extern void vm_map_disable_NX( |
1393 | vm_map_t map); |
1394 | |
1395 | extern void vm_map_disallow_data_exec( |
1396 | vm_map_t map); |
1397 | |
1398 | extern void vm_map_set_64bit( |
1399 | vm_map_t map); |
1400 | |
1401 | extern void vm_map_set_32bit( |
1402 | vm_map_t map); |
1403 | |
1404 | extern void vm_map_set_jumbo( |
1405 | vm_map_t map); |
1406 | |
1407 | extern void vm_map_set_jit_entitled( |
1408 | vm_map_t map); |
1409 | |
1410 | extern void vm_map_set_max_addr( |
1411 | vm_map_t map, vm_map_offset_t new_max_offset); |
1412 | |
1413 | extern boolean_t vm_map_has_hard_pagezero( |
1414 | vm_map_t map, |
1415 | vm_map_offset_t pagezero_size); |
1416 | extern void vm_commit_pagezero_status(vm_map_t tmap); |
1417 | |
1418 | extern boolean_t vm_map_tpro( |
1419 | vm_map_t map); |
1420 | |
1421 | extern void vm_map_set_tpro( |
1422 | vm_map_t map); |
1423 | |
1424 | extern boolean_t vm_map_tpro_enforcement( |
1425 | vm_map_t map); |
1426 | |
1427 | extern void vm_map_set_tpro_enforcement( |
1428 | vm_map_t map); |
1429 | |
1430 | extern boolean_t vm_map_set_tpro_range( |
1431 | vm_map_t map, |
1432 | vm_map_address_t start, |
1433 | vm_map_address_t end); |
1434 | |
1435 | extern boolean_t vm_map_is_64bit( |
1436 | vm_map_t map); |
1437 | |
1438 | extern kern_return_t vm_map_raise_max_offset( |
1439 | vm_map_t map, |
1440 | vm_map_offset_t new_max_offset); |
1441 | |
1442 | extern kern_return_t vm_map_raise_min_offset( |
1443 | vm_map_t map, |
1444 | vm_map_offset_t new_min_offset); |
1445 | |
1446 | #if XNU_TARGET_OS_OSX |
1447 | extern void vm_map_set_high_start( |
1448 | vm_map_t map, |
1449 | vm_map_offset_t high_start); |
1450 | #endif /* XNU_TARGET_OS_OSX */ |
1451 | |
1452 | extern vm_map_offset_t vm_compute_max_offset( |
1453 | boolean_t is64); |
1454 | |
1455 | extern void vm_map_get_max_aslr_slide_section( |
1456 | vm_map_t map, |
1457 | int64_t *max_sections, |
1458 | int64_t *section_size); |
1459 | |
1460 | extern uint64_t vm_map_get_max_aslr_slide_pages( |
1461 | vm_map_t map); |
1462 | |
1463 | extern uint64_t vm_map_get_max_loader_aslr_slide_pages( |
1464 | vm_map_t map); |
1465 | |
1466 | extern kern_return_t vm_map_set_size_limit( |
1467 | vm_map_t map, |
1468 | uint64_t limit); |
1469 | |
1470 | extern kern_return_t vm_map_set_data_limit( |
1471 | vm_map_t map, |
1472 | uint64_t limit); |
1473 | |
1474 | extern void vm_map_set_user_wire_limit( |
1475 | vm_map_t map, |
1476 | vm_size_t limit); |
1477 | |
1478 | extern void vm_map_switch_protect( |
1479 | vm_map_t map, |
1480 | boolean_t val); |
1481 | |
1482 | extern void vm_map_iokit_mapped_region( |
1483 | vm_map_t map, |
1484 | vm_size_t bytes); |
1485 | |
1486 | extern void vm_map_iokit_unmapped_region( |
1487 | vm_map_t map, |
1488 | vm_size_t bytes); |
1489 | |
1490 | |
1491 | extern boolean_t first_free_is_valid(vm_map_t); |
1492 | |
1493 | extern int vm_map_page_shift( |
1494 | vm_map_t map); |
1495 | |
1496 | extern vm_map_offset_t vm_map_page_mask( |
1497 | vm_map_t map); |
1498 | |
1499 | extern int vm_map_page_size( |
1500 | vm_map_t map); |
1501 | |
1502 | extern vm_map_offset_t vm_map_round_page_mask( |
1503 | vm_map_offset_t offset, |
1504 | vm_map_offset_t mask); |
1505 | |
1506 | extern vm_map_offset_t vm_map_trunc_page_mask( |
1507 | vm_map_offset_t offset, |
1508 | vm_map_offset_t mask); |
1509 | |
1510 | extern boolean_t vm_map_page_aligned( |
1511 | vm_map_offset_t offset, |
1512 | vm_map_offset_t mask); |
1513 | |
1514 | extern bool vm_map_range_overflows( |
1515 | vm_map_t map, |
1516 | vm_map_offset_t addr, |
1517 | vm_map_size_t size); |
1518 | #ifdef XNU_KERNEL_PRIVATE |
1519 | |
1520 | /* Support for vm_map ranges */ |
1521 | extern kern_return_t vm_map_range_configure( |
1522 | vm_map_t map); |
1523 | |
1524 | extern void vm_map_range_fork( |
1525 | vm_map_t new_map, |
1526 | vm_map_t old_map); |
1527 | |
1528 | extern int vm_map_get_user_range( |
1529 | vm_map_t map, |
1530 | vm_map_range_id_t range_id, |
1531 | mach_vm_range_t range); |
1532 | |
1533 | /*! |
1534 | * @function vm_map_kernel_flags_update_range_id() |
1535 | * |
1536 | * @brief |
1537 | * Updates the @c vmkf_range_id field with the adequate value |
1538 | * according to the policy for specified map and tag set in @c vmk_flags. |
1539 | * |
1540 | * @discussion |
1541 | * This function is meant to be called by Mach VM entry points, |
1542 | * which matters for the kernel: allocations with pointers _MUST_ |
1543 | * be allocated with @c kmem_*() functions. |
1544 | * |
1545 | * If the range ID is already set, it is preserved. |
1546 | */ |
1547 | extern void vm_map_kernel_flags_update_range_id( |
1548 | vm_map_kernel_flags_t *flags, |
1549 | vm_map_t map); |
1550 | |
1551 | #if XNU_TARGET_OS_OSX |
1552 | extern void vm_map_mark_alien(vm_map_t map); |
1553 | extern void vm_map_single_jit(vm_map_t map); |
1554 | #endif /* XNU_TARGET_OS_OSX */ |
1555 | |
1556 | extern kern_return_t vm_map_page_info( |
1557 | vm_map_t map, |
1558 | vm_map_offset_t offset, |
1559 | vm_page_info_flavor_t flavor, |
1560 | vm_page_info_t info, |
1561 | mach_msg_type_number_t *count); |
1562 | extern kern_return_t vm_map_page_range_info_internal( |
1563 | vm_map_t map, |
1564 | vm_map_offset_t start_offset, |
1565 | vm_map_offset_t end_offset, |
1566 | int effective_page_shift, |
1567 | vm_page_info_flavor_t flavor, |
1568 | vm_page_info_t info, |
1569 | mach_msg_type_number_t *count); |
1570 | |
1571 | #endif /* XNU_KERNEL_PRIVATE */ |
1572 | #ifdef MACH_KERNEL_PRIVATE |
1573 | |
1574 | |
1575 | /* |
1576 | * Internal macros for rounding and truncation of vm_map offsets and sizes |
1577 | */ |
1578 | #define VM_MAP_ROUND_PAGE(x, pgmask) (((vm_map_offset_t)(x) + (pgmask)) & ~((signed)(pgmask))) |
1579 | #define VM_MAP_TRUNC_PAGE(x, pgmask) ((vm_map_offset_t)(x) & ~((signed)(pgmask))) |
1580 | |
1581 | /* |
1582 | * Macros for rounding and truncation of vm_map offsets and sizes |
1583 | */ |
1584 | static inline int |
1585 | VM_MAP_PAGE_SHIFT( |
1586 | vm_map_t map) |
1587 | { |
1588 | int shift = map ? map->hdr.page_shift : PAGE_SHIFT; |
1589 | /* |
1590 | * help ubsan and codegen in general, |
1591 | * cannot use PAGE_{MIN,MAX}_SHIFT |
1592 | * because of testing code which |
1593 | * tests 16k aligned maps on 4k only systems. |
1594 | */ |
1595 | __builtin_assume(shift >= 12 && shift <= 14); |
1596 | return shift; |
1597 | } |
1598 | |
1599 | #define VM_MAP_PAGE_SIZE(map) (1 << VM_MAP_PAGE_SHIFT((map))) |
1600 | #define VM_MAP_PAGE_MASK(map) (VM_MAP_PAGE_SIZE((map)) - 1) |
1601 | #define VM_MAP_PAGE_ALIGNED(x, pgmask) (((x) & (pgmask)) == 0) |
1602 | |
1603 | static inline bool |
1604 | VM_MAP_IS_EXOTIC( |
1605 | vm_map_t map __unused) |
1606 | { |
1607 | #if __arm64__ |
1608 | if (VM_MAP_PAGE_SHIFT(map) < PAGE_SHIFT || |
1609 | pmap_is_exotic(map->pmap)) { |
1610 | return true; |
1611 | } |
1612 | #endif /* __arm64__ */ |
1613 | return false; |
1614 | } |
1615 | |
1616 | static inline bool |
1617 | VM_MAP_IS_ALIEN( |
1618 | vm_map_t map __unused) |
1619 | { |
1620 | /* |
1621 | * An "alien" process/task/map/pmap should mostly behave |
1622 | * as it currently would on iOS. |
1623 | */ |
1624 | #if XNU_TARGET_OS_OSX |
1625 | if (map->is_alien) { |
1626 | return true; |
1627 | } |
1628 | return false; |
1629 | #else /* XNU_TARGET_OS_OSX */ |
1630 | return true; |
1631 | #endif /* XNU_TARGET_OS_OSX */ |
1632 | } |
1633 | |
1634 | static inline bool |
1635 | VM_MAP_POLICY_WX_FAIL( |
1636 | vm_map_t map __unused) |
1637 | { |
1638 | if (VM_MAP_IS_ALIEN(map)) { |
1639 | return false; |
1640 | } |
1641 | return true; |
1642 | } |
1643 | |
1644 | static inline bool |
1645 | VM_MAP_POLICY_WX_STRIP_X( |
1646 | vm_map_t map __unused) |
1647 | { |
1648 | if (VM_MAP_IS_ALIEN(map)) { |
1649 | return true; |
1650 | } |
1651 | return false; |
1652 | } |
1653 | |
1654 | static inline bool |
1655 | VM_MAP_POLICY_ALLOW_MULTIPLE_JIT( |
1656 | vm_map_t map __unused) |
1657 | { |
1658 | if (VM_MAP_IS_ALIEN(map) || map->single_jit) { |
1659 | return false; |
1660 | } |
1661 | return true; |
1662 | } |
1663 | |
1664 | static inline bool |
1665 | VM_MAP_POLICY_ALLOW_JIT_RANDOM_ADDRESS( |
1666 | vm_map_t map) |
1667 | { |
1668 | return VM_MAP_IS_ALIEN(map); |
1669 | } |
1670 | |
1671 | static inline bool |
1672 | VM_MAP_POLICY_ALLOW_JIT_INHERIT( |
1673 | vm_map_t map __unused) |
1674 | { |
1675 | if (VM_MAP_IS_ALIEN(map)) { |
1676 | return false; |
1677 | } |
1678 | return true; |
1679 | } |
1680 | |
1681 | static inline bool |
1682 | VM_MAP_POLICY_ALLOW_JIT_SHARING( |
1683 | vm_map_t map __unused) |
1684 | { |
1685 | if (VM_MAP_IS_ALIEN(map)) { |
1686 | return false; |
1687 | } |
1688 | return true; |
1689 | } |
1690 | |
1691 | static inline bool |
1692 | VM_MAP_POLICY_ALLOW_JIT_COPY( |
1693 | vm_map_t map __unused) |
1694 | { |
1695 | if (VM_MAP_IS_ALIEN(map)) { |
1696 | return false; |
1697 | } |
1698 | return true; |
1699 | } |
1700 | |
1701 | static inline bool |
1702 | VM_MAP_POLICY_WRITABLE_SHARED_REGION( |
1703 | vm_map_t map __unused) |
1704 | { |
1705 | #if __x86_64__ |
1706 | return true; |
1707 | #else /* __x86_64__ */ |
1708 | if (VM_MAP_IS_EXOTIC(map)) { |
1709 | return true; |
1710 | } |
1711 | return false; |
1712 | #endif /* __x86_64__ */ |
1713 | } |
1714 | |
1715 | static inline void |
1716 | vm_prot_to_wimg(unsigned int prot, unsigned int *wimg) |
1717 | { |
1718 | switch (prot) { |
1719 | case MAP_MEM_NOOP: break; |
1720 | case MAP_MEM_IO: *wimg = VM_WIMG_IO; break; |
1721 | case MAP_MEM_COPYBACK: *wimg = VM_WIMG_USE_DEFAULT; break; |
1722 | case MAP_MEM_INNERWBACK: *wimg = VM_WIMG_INNERWBACK; break; |
1723 | case MAP_MEM_POSTED: *wimg = VM_WIMG_POSTED; break; |
1724 | case MAP_MEM_POSTED_REORDERED: *wimg = VM_WIMG_POSTED_REORDERED; break; |
1725 | case MAP_MEM_POSTED_COMBINED_REORDERED: *wimg = VM_WIMG_POSTED_COMBINED_REORDERED; break; |
1726 | case MAP_MEM_WTHRU: *wimg = VM_WIMG_WTHRU; break; |
1727 | case MAP_MEM_WCOMB: *wimg = VM_WIMG_WCOMB; break; |
1728 | case MAP_MEM_RT: *wimg = VM_WIMG_RT; break; |
1729 | default: break; |
1730 | } |
1731 | } |
1732 | |
1733 | static inline boolean_t |
1734 | vm_map_always_shadow(vm_map_t map) |
1735 | { |
1736 | if (map->mapped_in_other_pmaps) { |
1737 | /* |
1738 | * This is a submap, mapped in other maps. |
1739 | * Even if a VM object is mapped only once in this submap, |
1740 | * the submap itself could be mapped multiple times, |
1741 | * so vm_object_shadow() should always create a shadow |
1742 | * object, even if the object has only 1 reference. |
1743 | */ |
1744 | return TRUE; |
1745 | } |
1746 | return FALSE; |
1747 | } |
1748 | |
1749 | #endif /* MACH_KERNEL_PRIVATE */ |
1750 | #ifdef XNU_KERNEL_PRIVATE |
1751 | |
1752 | extern kern_return_t vm_map_set_page_shift(vm_map_t map, int pageshift); |
1753 | extern bool vm_map_is_exotic(vm_map_t map); |
1754 | extern bool vm_map_is_alien(vm_map_t map); |
1755 | extern pmap_t vm_map_get_pmap(vm_map_t map); |
1756 | |
1757 | extern bool vm_map_is_corpse_source(vm_map_t map); |
1758 | extern void vm_map_set_corpse_source(vm_map_t map); |
1759 | extern void vm_map_unset_corpse_source(vm_map_t map); |
1760 | #endif /* XNU_KERNEL_PRIVATE */ |
1761 | |
1762 | #define vm_map_round_page(x, pgmask) (((vm_map_offset_t)(x) + (pgmask)) & ~((signed)(pgmask))) |
1763 | #define vm_map_trunc_page(x, pgmask) ((vm_map_offset_t)(x) & ~((signed)(pgmask))) |
1764 | |
1765 | /* Support for UPLs from vm_maps */ |
1766 | |
1767 | #ifdef XNU_KERNEL_PRIVATE |
1768 | |
1769 | extern kern_return_t vm_map_get_upl( |
1770 | vm_map_t target_map, |
1771 | vm_map_offset_t map_offset, |
1772 | upl_size_t *size, |
1773 | upl_t *upl, |
1774 | upl_page_info_array_t page_info, |
1775 | unsigned int *page_infoCnt, |
1776 | upl_control_flags_t *flags, |
1777 | vm_tag_t tag, |
1778 | int force_data_sync); |
1779 | |
1780 | #endif /* XNU_KERNEL_PRIVATE */ |
1781 | |
1782 | extern void |
1783 | vm_map_sizes(vm_map_t map, |
1784 | vm_map_size_t * psize, |
1785 | vm_map_size_t * pfree, |
1786 | vm_map_size_t * plargest_free); |
1787 | |
1788 | #if CONFIG_DYNAMIC_CODE_SIGNING |
1789 | |
1790 | extern kern_return_t vm_map_sign(vm_map_t map, |
1791 | vm_map_offset_t start, |
1792 | vm_map_offset_t end); |
1793 | |
1794 | #endif /* CONFIG_DYNAMIC_CODE_SIGNING */ |
1795 | |
1796 | extern kern_return_t vm_map_partial_reap( |
1797 | vm_map_t map, |
1798 | unsigned int *reclaimed_resident, |
1799 | unsigned int *reclaimed_compressed); |
1800 | |
1801 | |
1802 | #if DEVELOPMENT || DEBUG |
1803 | |
1804 | extern int vm_map_disconnect_page_mappings( |
1805 | vm_map_t map, |
1806 | boolean_t); |
1807 | |
1808 | extern kern_return_t vm_map_inject_error(vm_map_t map, vm_map_offset_t vaddr); |
1809 | |
1810 | #endif /* DEVELOPMENT || DEBUG */ |
1811 | |
1812 | #if CONFIG_FREEZE |
1813 | |
1814 | extern kern_return_t vm_map_freeze( |
1815 | task_t task, |
1816 | unsigned int *purgeable_count, |
1817 | unsigned int *wired_count, |
1818 | unsigned int *clean_count, |
1819 | unsigned int *dirty_count, |
1820 | unsigned int dirty_budget, |
1821 | unsigned int *shared_count, |
1822 | int *freezer_error_code, |
1823 | boolean_t eval_only); |
1824 | |
1825 | __enum_decl(freezer_error_code_t, int, { |
1826 | FREEZER_ERROR_GENERIC = -1, |
1827 | FREEZER_ERROR_EXCESS_SHARED_MEMORY = -2, |
1828 | FREEZER_ERROR_LOW_PRIVATE_SHARED_RATIO = -3, |
1829 | FREEZER_ERROR_NO_COMPRESSOR_SPACE = -4, |
1830 | FREEZER_ERROR_NO_SWAP_SPACE = -5, |
1831 | FREEZER_ERROR_NO_SLOTS = -6, |
1832 | }); |
1833 | |
1834 | #endif /* CONFIG_FREEZE */ |
1835 | #if XNU_KERNEL_PRIVATE |
1836 | |
1837 | boolean_t kdp_vm_map_is_acquired_exclusive(vm_map_t map); |
1838 | |
1839 | boolean_t (vm_map_t, vm_map_offset_t vaddr); |
1840 | |
1841 | #endif /* XNU_KERNEL_PRIVATE */ |
1842 | |
1843 | /* |
1844 | * In some cases, we don't have a real VM object but still want to return a |
1845 | * unique ID (to avoid a memory region looking like shared memory), so build |
1846 | * a fake pointer based on the map's ledger and the index of the ledger being |
1847 | * reported. |
1848 | */ |
1849 | #define VM_OBJECT_ID_FAKE(map, ledger_id) ((uint32_t)(uintptr_t)VM_KERNEL_ADDRHASH((int*)((map)->pmap->ledger)+(ledger_id))) |
1850 | |
1851 | #endif /* KERNEL_PRIVATE */ |
1852 | |
1853 | __END_DECLS |
1854 | |
1855 | #endif /* _VM_VM_MAP_H_ */ |
1856 | |