1/*
2 * Copyright (c) 2000-2020 Apple Inc. All rights reserved.
3 *
4 * @APPLE_OSREFERENCE_LICENSE_HEADER_START@
5 *
6 * This file contains Original Code and/or Modifications of Original Code
7 * as defined in and that are subject to the Apple Public Source License
8 * Version 2.0 (the 'License'). You may not use this file except in
9 * compliance with the License. The rights granted to you under the License
10 * may not be used to create, or enable the creation or redistribution of,
11 * unlawful or unlicensed copies of an Apple operating system, or to
12 * circumvent, violate, or enable the circumvention or violation of, any
13 * terms of an Apple operating system software license agreement.
14 *
15 * Please obtain a copy of the License at
16 * http://www.opensource.apple.com/apsl/ and read it before using this file.
17 *
18 * The Original Code and all software distributed under the License are
19 * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER
20 * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES,
21 * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY,
22 * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT.
23 * Please see the License for the specific language governing rights and
24 * limitations under the License.
25 *
26 * @APPLE_OSREFERENCE_LICENSE_HEADER_END@
27 */
28/*
29 * @OSF_COPYRIGHT@
30 */
31/*
32 * Mach Operating System
33 * Copyright (c) 1991,1990,1989,1988,1987 Carnegie Mellon University
34 * All Rights Reserved.
35 *
36 * Permission to use, copy, modify and distribute this software and its
37 * documentation is hereby granted, provided that both the copyright
38 * notice and this permission notice appear in all copies of the
39 * software, derivative works or modified versions, and any portions
40 * thereof, and that both notices appear in supporting documentation.
41 *
42 * CARNEGIE MELLON ALLOWS FREE USE OF THIS SOFTWARE IN ITS "AS IS"
43 * CONDITION. CARNEGIE MELLON DISCLAIMS ANY LIABILITY OF ANY KIND FOR
44 * ANY DAMAGES WHATSOEVER RESULTING FROM THE USE OF THIS SOFTWARE.
45 *
46 * Carnegie Mellon requests users of this software to return to
47 *
48 * Software Distribution Coordinator or Software.Distribution@CS.CMU.EDU
49 * School of Computer Science
50 * Carnegie Mellon University
51 * Pittsburgh PA 15213-3890
52 *
53 * any improvements or extensions that they make and grant Carnegie Mellon
54 * the rights to redistribute these changes.
55 */
56/*
57 */
58/*
59 * File: vm/pmap.h
60 * Author: Avadis Tevanian, Jr.
61 * Date: 1985
62 *
63 * Machine address mapping definitions -- machine-independent
64 * section. [For machine-dependent section, see "machine/pmap.h".]
65 */
66
67#ifndef _VM_PMAP_H_
68#define _VM_PMAP_H_
69
70#include <mach/kern_return.h>
71#include <mach/vm_param.h>
72#include <mach/vm_types.h>
73#include <mach/vm_attributes.h>
74#include <mach/boolean.h>
75#include <mach/vm_prot.h>
76#include <kern/trustcache.h>
77
78#if __has_include(<CoreEntitlements/CoreEntitlements.h>)
79#include <CoreEntitlements/CoreEntitlements.h>
80#endif
81
82#ifdef KERNEL_PRIVATE
83
84/*
85 * The following is a description of the interface to the
86 * machine-dependent "physical map" data structure. The module
87 * must provide a "pmap_t" data type that represents the
88 * set of valid virtual-to-physical addresses for one user
89 * address space. [The kernel address space is represented
90 * by a distinguished "pmap_t".] The routines described manage
91 * this type, install and update virtual-to-physical mappings,
92 * and perform operations on physical addresses common to
93 * many address spaces.
94 */
95
96/* Copy between a physical page and a virtual address */
97/* LP64todo - switch to vm_map_offset_t when it grows */
98extern kern_return_t copypv(
99 addr64_t source,
100 addr64_t sink,
101 unsigned int size,
102 int which);
103#define cppvPsnk 1
104#define cppvPsnkb 31
105#define cppvPsrc 2
106#define cppvPsrcb 30
107#define cppvFsnk 4
108#define cppvFsnkb 29
109#define cppvFsrc 8
110#define cppvFsrcb 28
111#define cppvNoModSnk 16
112#define cppvNoModSnkb 27
113#define cppvNoRefSrc 32
114#define cppvNoRefSrcb 26
115#define cppvKmap 64 /* Use the kernel's vm_map */
116#define cppvKmapb 25
117
118extern boolean_t pmap_has_managed_page(ppnum_t first, ppnum_t last);
119
120#if MACH_KERNEL_PRIVATE || BSD_KERNEL_PRIVATE
121#include <mach/mach_types.h>
122#include <vm/memory_types.h>
123
124/*
125 * Routines used during BSD process creation.
126 */
127
128extern pmap_t pmap_create_options( /* Create a pmap_t. */
129 ledger_t ledger,
130 vm_map_size_t size,
131 unsigned int flags);
132
133#if __has_feature(ptrauth_calls) && (defined(XNU_TARGET_OS_OSX) || (DEVELOPMENT || DEBUG))
134/**
135 * Informs the pmap layer that a process will be running with user JOP disabled,
136 * as if PMAP_CREATE_DISABLE_JOP had been passed during pmap creation.
137 *
138 * @note This function cannot be used once the target process has started
139 * executing code. It is intended for cases where user JOP is disabled based on
140 * the code signature (e.g., special "keys-off" entitlements), which is too late
141 * to change the flags passed to pmap_create_options.
142 *
143 * @param pmap The pmap belonging to the target process
144 */
145extern void pmap_disable_user_jop(
146 pmap_t pmap);
147#endif /* __has_feature(ptrauth_calls) && (defined(XNU_TARGET_OS_OSX) || (DEVELOPMENT || DEBUG)) */
148#endif /* MACH_KERNEL_PRIVATE || BSD_KERNEL_PRIVATE */
149
150#ifdef MACH_KERNEL_PRIVATE
151
152#include <mach_assert.h>
153
154#include <machine/pmap.h>
155/*
156 * Routines used for initialization.
157 * There is traditionally also a pmap_bootstrap,
158 * used very early by machine-dependent code,
159 * but it is not part of the interface.
160 *
161 * LP64todo -
162 * These interfaces are tied to the size of the
163 * kernel pmap - and therefore use the "local"
164 * vm_offset_t, etc... types.
165 */
166
167extern void *pmap_steal_memory(vm_size_t size, vm_size_t alignment); /* Early memory allocation */
168extern void *pmap_steal_freeable_memory(vm_size_t size); /* Early memory allocation */
169extern void *pmap_steal_zone_memory(vm_size_t size, vm_size_t alignment); /* Early zone memory allocation */
170
171extern uint_t pmap_free_pages(void); /* report remaining unused physical pages */
172#if defined(__arm__) || defined(__arm64__)
173extern uint_t pmap_free_pages_span(void); /* report phys address range of unused physical pages */
174#endif /* defined(__arm__) || defined(__arm64__) */
175
176extern void pmap_startup(vm_offset_t *startp, vm_offset_t *endp); /* allocate vm_page structs */
177
178extern void pmap_init(void); /* Initialization, once we have kernel virtual memory. */
179
180extern void mapping_adjust(void); /* Adjust free mapping count */
181
182extern void mapping_free_prime(void); /* Primes the mapping block release list */
183
184#ifndef MACHINE_PAGES
185/*
186 * If machine/pmap.h defines MACHINE_PAGES, it must implement
187 * the above functions. The pmap module has complete control.
188 * Otherwise, it must implement the following functions:
189 * pmap_free_pages
190 * pmap_virtual_space
191 * pmap_next_page
192 * pmap_init
193 * and vm/vm_resident.c implements pmap_steal_memory and pmap_startup
194 * using pmap_free_pages, pmap_next_page, pmap_virtual_space,
195 * and pmap_enter. pmap_free_pages may over-estimate the number
196 * of unused physical pages, and pmap_next_page may return FALSE
197 * to indicate that there are no more unused pages to return.
198 * However, for best performance pmap_free_pages should be accurate.
199 */
200
201/*
202 * Routines to return the next unused physical page.
203 */
204extern boolean_t pmap_next_page(ppnum_t *pnum);
205extern boolean_t pmap_next_page_hi(ppnum_t *pnum, boolean_t might_free);
206#ifdef __x86_64__
207extern kern_return_t pmap_next_page_large(ppnum_t *pnum);
208extern void pmap_hi_pages_done(void);
209#endif
210
211#if CONFIG_SPTM
212__enum_decl(pmap_mapping_type_t, uint8_t, {
213 PMAP_MAPPING_TYPE_INFER = SPTM_UNTYPED,
214 PMAP_MAPPING_TYPE_DEFAULT = XNU_DEFAULT,
215 PMAP_MAPPING_TYPE_ROZONE = XNU_ROZONE,
216 PMAP_MAPPING_TYPE_RESTRICTED = XNU_KERNEL_RESTRICTED
217});
218#else
219__enum_decl(pmap_mapping_type_t, uint8_t, {
220 PMAP_MAPPING_TYPE_INFER = 0,
221 PMAP_MAPPING_TYPE_DEFAULT,
222 PMAP_MAPPING_TYPE_ROZONE,
223 PMAP_MAPPING_TYPE_RESTRICTED
224});
225#endif
226
227/*
228 * Report virtual space available for the kernel.
229 */
230extern void pmap_virtual_space(
231 vm_offset_t *virtual_start,
232 vm_offset_t *virtual_end);
233#endif /* MACHINE_PAGES */
234
235/*
236 * Routines to manage the physical map data structure.
237 */
238extern pmap_t(pmap_kernel)(void); /* Return the kernel's pmap */
239extern void pmap_reference(pmap_t pmap); /* Gain a reference. */
240extern void pmap_destroy(pmap_t pmap); /* Release a reference. */
241extern void pmap_switch(pmap_t);
242extern void pmap_require(pmap_t pmap);
243
244#if MACH_ASSERT
245extern void pmap_set_process(pmap_t pmap,
246 int pid,
247 char *procname);
248#endif /* MACH_ASSERT */
249
250extern kern_return_t pmap_enter( /* Enter a mapping */
251 pmap_t pmap,
252 vm_map_offset_t v,
253 ppnum_t pn,
254 vm_prot_t prot,
255 vm_prot_t fault_type,
256 unsigned int flags,
257 boolean_t wired,
258 pmap_mapping_type_t mapping_type);
259
260extern kern_return_t pmap_enter_options(
261 pmap_t pmap,
262 vm_map_offset_t v,
263 ppnum_t pn,
264 vm_prot_t prot,
265 vm_prot_t fault_type,
266 unsigned int flags,
267 boolean_t wired,
268 unsigned int options,
269 void *arg,
270 pmap_mapping_type_t mapping_type);
271extern kern_return_t pmap_enter_options_addr(
272 pmap_t pmap,
273 vm_map_offset_t v,
274 pmap_paddr_t pa,
275 vm_prot_t prot,
276 vm_prot_t fault_type,
277 unsigned int flags,
278 boolean_t wired,
279 unsigned int options,
280 void *arg,
281 pmap_mapping_type_t mapping_type);
282
283extern void pmap_remove_some_phys(
284 pmap_t pmap,
285 ppnum_t pn);
286
287extern void pmap_lock_phys_page(
288 ppnum_t pn);
289
290extern void pmap_unlock_phys_page(
291 ppnum_t pn);
292
293
294/*
295 * Routines that operate on physical addresses.
296 */
297
298extern void pmap_page_protect( /* Restrict access to page. */
299 ppnum_t phys,
300 vm_prot_t prot);
301
302extern void pmap_page_protect_options( /* Restrict access to page. */
303 ppnum_t phys,
304 vm_prot_t prot,
305 unsigned int options,
306 void *arg);
307
308extern void(pmap_zero_page)(
309 ppnum_t pn);
310
311extern void(pmap_zero_part_page)(
312 ppnum_t pn,
313 vm_offset_t offset,
314 vm_size_t len);
315
316extern void(pmap_copy_page)(
317 ppnum_t src,
318 ppnum_t dest);
319
320extern void(pmap_copy_part_page)(
321 ppnum_t src,
322 vm_offset_t src_offset,
323 ppnum_t dst,
324 vm_offset_t dst_offset,
325 vm_size_t len);
326
327extern void(pmap_copy_part_lpage)(
328 vm_offset_t src,
329 ppnum_t dst,
330 vm_offset_t dst_offset,
331 vm_size_t len);
332
333extern void(pmap_copy_part_rpage)(
334 ppnum_t src,
335 vm_offset_t src_offset,
336 vm_offset_t dst,
337 vm_size_t len);
338
339extern unsigned int(pmap_disconnect)( /* disconnect mappings and return reference and change */
340 ppnum_t phys);
341
342extern unsigned int(pmap_disconnect_options)( /* disconnect mappings and return reference and change */
343 ppnum_t phys,
344 unsigned int options,
345 void *arg);
346
347extern kern_return_t(pmap_attribute_cache_sync)( /* Flush appropriate
348 * cache based on
349 * page number sent */
350 ppnum_t pn,
351 vm_size_t size,
352 vm_machine_attribute_t attribute,
353 vm_machine_attribute_val_t* value);
354
355extern unsigned int(pmap_cache_attributes)(
356 ppnum_t pn);
357
358/*
359 * Set (override) cache attributes for the specified physical page
360 */
361extern void pmap_set_cache_attributes(
362 ppnum_t,
363 unsigned int);
364
365extern void *pmap_map_compressor_page(
366 ppnum_t);
367
368extern void pmap_unmap_compressor_page(
369 ppnum_t,
370 void*);
371
372#if defined(__arm__) || defined(__arm64__)
373extern bool pmap_batch_set_cache_attributes(
374 upl_page_info_array_t,
375 unsigned int,
376 unsigned int);
377#endif
378extern void pmap_sync_page_data_phys(ppnum_t pa);
379extern void pmap_sync_page_attributes_phys(ppnum_t pa);
380
381/*
382 * debug/assertions. pmap_verify_free returns true iff
383 * the given physical page is mapped into no pmap.
384 * pmap_assert_free() will panic() if pn is not free.
385 */
386extern bool pmap_verify_free(ppnum_t pn);
387#if MACH_ASSERT
388extern void pmap_assert_free(ppnum_t pn);
389#endif
390
391
392/*
393 * Sundry required (internal) routines
394 */
395#ifdef CURRENTLY_UNUSED_AND_UNTESTED
396extern void pmap_collect(pmap_t pmap);/* Perform garbage
397 * collection, if any */
398#endif
399/*
400 * Optional routines
401 */
402extern void(pmap_copy)( /* Copy range of mappings,
403 * if desired. */
404 pmap_t dest,
405 pmap_t source,
406 vm_map_offset_t dest_va,
407 vm_map_size_t size,
408 vm_map_offset_t source_va);
409
410extern kern_return_t(pmap_attribute)( /* Get/Set special memory
411 * attributes */
412 pmap_t pmap,
413 vm_map_offset_t va,
414 vm_map_size_t size,
415 vm_machine_attribute_t attribute,
416 vm_machine_attribute_val_t* value);
417
418/*
419 * Routines defined as macros.
420 */
421#ifndef PMAP_ACTIVATE_USER
422#ifndef PMAP_ACTIVATE
423#define PMAP_ACTIVATE_USER(thr, cpu)
424#else /* PMAP_ACTIVATE */
425#define PMAP_ACTIVATE_USER(thr, cpu) { \
426 pmap_t pmap; \
427 \
428 pmap = (thr)->map->pmap; \
429 if (pmap != pmap_kernel()) \
430 PMAP_ACTIVATE(pmap, (thr), (cpu)); \
431}
432#endif /* PMAP_ACTIVATE */
433#endif /* PMAP_ACTIVATE_USER */
434
435#ifndef PMAP_DEACTIVATE_USER
436#ifndef PMAP_DEACTIVATE
437#define PMAP_DEACTIVATE_USER(thr, cpu)
438#else /* PMAP_DEACTIVATE */
439#define PMAP_DEACTIVATE_USER(thr, cpu) { \
440 pmap_t pmap; \
441 \
442 pmap = (thr)->map->pmap; \
443 if ((pmap) != pmap_kernel()) \
444 PMAP_DEACTIVATE(pmap, (thr), (cpu)); \
445}
446#endif /* PMAP_DEACTIVATE */
447#endif /* PMAP_DEACTIVATE_USER */
448
449#ifndef PMAP_ACTIVATE_KERNEL
450#ifndef PMAP_ACTIVATE
451#define PMAP_ACTIVATE_KERNEL(cpu)
452#else /* PMAP_ACTIVATE */
453#define PMAP_ACTIVATE_KERNEL(cpu) \
454 PMAP_ACTIVATE(pmap_kernel(), THREAD_NULL, cpu)
455#endif /* PMAP_ACTIVATE */
456#endif /* PMAP_ACTIVATE_KERNEL */
457
458#ifndef PMAP_DEACTIVATE_KERNEL
459#ifndef PMAP_DEACTIVATE
460#define PMAP_DEACTIVATE_KERNEL(cpu)
461#else /* PMAP_DEACTIVATE */
462#define PMAP_DEACTIVATE_KERNEL(cpu) \
463 PMAP_DEACTIVATE(pmap_kernel(), THREAD_NULL, cpu)
464#endif /* PMAP_DEACTIVATE */
465#endif /* PMAP_DEACTIVATE_KERNEL */
466
467#ifndef PMAP_SET_CACHE_ATTR
468#define PMAP_SET_CACHE_ATTR(mem, object, cache_attr, batch_pmap_op) \
469 MACRO_BEGIN \
470 if (!batch_pmap_op) { \
471 pmap_set_cache_attributes(VM_PAGE_GET_PHYS_PAGE(mem), cache_attr); \
472 object->set_cache_attr = TRUE; \
473 } \
474 MACRO_END
475#endif /* PMAP_SET_CACHE_ATTR */
476
477#ifndef PMAP_BATCH_SET_CACHE_ATTR
478#if defined(__arm__) || defined(__arm64__)
479#define PMAP_BATCH_SET_CACHE_ATTR(object, user_page_list, \
480 cache_attr, num_pages, batch_pmap_op) \
481 MACRO_BEGIN \
482 if ((batch_pmap_op)) { \
483 (void)pmap_batch_set_cache_attributes( \
484 (user_page_list), \
485 (num_pages), \
486 (cache_attr)); \
487 (object)->set_cache_attr = TRUE; \
488 } \
489 MACRO_END
490#else
491#define PMAP_BATCH_SET_CACHE_ATTR(object, user_page_list, \
492 cache_attr, num_pages, batch_pmap_op) \
493 MACRO_BEGIN \
494 if ((batch_pmap_op)) { \
495 unsigned int __page_idx=0; \
496 while (__page_idx < (num_pages)) { \
497 pmap_set_cache_attributes( \
498 user_page_list[__page_idx].phys_addr, \
499 (cache_attr)); \
500 __page_idx++; \
501 } \
502 (object)->set_cache_attr = TRUE; \
503 } \
504 MACRO_END
505#endif
506#endif /* PMAP_BATCH_SET_CACHE_ATTR */
507
508/*
509 * Routines to manage reference/modify bits based on
510 * physical addresses, simulating them if not provided
511 * by the hardware.
512 */
513struct pfc {
514 long pfc_cpus;
515 long pfc_invalid_global;
516};
517
518typedef struct pfc pmap_flush_context;
519
520/* Clear reference bit */
521extern void pmap_clear_reference(ppnum_t pn);
522/* Return reference bit */
523extern boolean_t(pmap_is_referenced)(ppnum_t pn);
524/* Set modify bit */
525extern void pmap_set_modify(ppnum_t pn);
526/* Clear modify bit */
527extern void pmap_clear_modify(ppnum_t pn);
528/* Return modify bit */
529extern boolean_t pmap_is_modified(ppnum_t pn);
530/* Return modified and referenced bits */
531extern unsigned int pmap_get_refmod(ppnum_t pn);
532/* Clear modified and referenced bits */
533extern void pmap_clear_refmod(ppnum_t pn, unsigned int mask);
534#define VM_MEM_MODIFIED 0x01 /* Modified bit */
535#define VM_MEM_REFERENCED 0x02 /* Referenced bit */
536extern void pmap_clear_refmod_options(ppnum_t pn, unsigned int mask, unsigned int options, void *);
537
538/*
539 * Clears the reference and/or modified bits on a range of virtually
540 * contiguous pages.
541 * It returns true if the operation succeeded. If it returns false,
542 * nothing has been modified.
543 * This operation is only supported on some platforms, so callers MUST
544 * handle the case where it returns false.
545 */
546extern bool
547pmap_clear_refmod_range_options(
548 pmap_t pmap,
549 vm_map_address_t start,
550 vm_map_address_t end,
551 unsigned int mask,
552 unsigned int options);
553
554
555extern void pmap_flush_context_init(pmap_flush_context *);
556extern void pmap_flush(pmap_flush_context *);
557
558/*
559 * Routines that operate on ranges of virtual addresses.
560 */
561extern void pmap_protect( /* Change protections. */
562 pmap_t map,
563 vm_map_offset_t s,
564 vm_map_offset_t e,
565 vm_prot_t prot);
566
567extern void pmap_protect_options( /* Change protections. */
568 pmap_t map,
569 vm_map_offset_t s,
570 vm_map_offset_t e,
571 vm_prot_t prot,
572 unsigned int options,
573 void *arg);
574
575extern void(pmap_pageable)(
576 pmap_t pmap,
577 vm_map_offset_t start,
578 vm_map_offset_t end,
579 boolean_t pageable);
580
581extern uint64_t pmap_shared_region_size_min(pmap_t map);
582
583extern kern_return_t pmap_nest(pmap_t,
584 pmap_t,
585 addr64_t,
586 uint64_t);
587extern kern_return_t pmap_unnest(pmap_t,
588 addr64_t,
589 uint64_t);
590
591#define PMAP_UNNEST_CLEAN 1
592
593#if __arm64__
594#if CONFIG_SPTM
595#define PMAP_FORK_NEST 1
596#endif /* CONFIG_SPTM */
597
598#if PMAP_FORK_NEST
599extern kern_return_t pmap_fork_nest(
600 pmap_t old_pmap,
601 pmap_t new_pmap,
602 vm_map_offset_t *nesting_start,
603 vm_map_offset_t *nesting_end);
604#endif /* PMAP_FORK_NEST */
605#endif /* __arm64__ */
606
607extern kern_return_t pmap_unnest_options(pmap_t,
608 addr64_t,
609 uint64_t,
610 unsigned int);
611extern boolean_t pmap_adjust_unnest_parameters(pmap_t, vm_map_offset_t *, vm_map_offset_t *);
612extern void pmap_advise_pagezero_range(pmap_t, uint64_t);
613#endif /* MACH_KERNEL_PRIVATE */
614
615extern boolean_t pmap_is_noencrypt(ppnum_t);
616extern void pmap_set_noencrypt(ppnum_t pn);
617extern void pmap_clear_noencrypt(ppnum_t pn);
618
619/*
620 * JMM - This portion is exported to other kernel components right now,
621 * but will be pulled back in the future when the needed functionality
622 * is provided in a cleaner manner.
623 */
624
625extern const pmap_t kernel_pmap; /* The kernel's map */
626#define pmap_kernel() (kernel_pmap)
627
628#define VM_MEM_SUPERPAGE 0x100 /* map a superpage instead of a base page */
629#define VM_MEM_STACK 0x200
630
631/* N.B. These use the same numerical space as the PMAP_EXPAND_OPTIONS
632 * definitions in i386/pmap_internal.h
633 */
634#define PMAP_CREATE_64BIT 0x1
635
636#if __x86_64__
637
638#define PMAP_CREATE_EPT 0x2
639#define PMAP_CREATE_TEST 0x4 /* pmap will be used for testing purposes only */
640#define PMAP_CREATE_KNOWN_FLAGS (PMAP_CREATE_64BIT | PMAP_CREATE_EPT | PMAP_CREATE_TEST)
641
642#else
643
644#define PMAP_CREATE_STAGE2 0
645#if __arm64e__
646#define PMAP_CREATE_DISABLE_JOP 0x4
647#else
648#define PMAP_CREATE_DISABLE_JOP 0
649#endif
650#if __ARM_MIXED_PAGE_SIZE__
651#define PMAP_CREATE_FORCE_4K_PAGES 0x8
652#else
653#define PMAP_CREATE_FORCE_4K_PAGES 0
654#endif /* __ARM_MIXED_PAGE_SIZE__ */
655#define PMAP_CREATE_X86_64 0
656#if CONFIG_ROSETTA
657#define PMAP_CREATE_ROSETTA 0x20
658#else
659#define PMAP_CREATE_ROSETTA 0
660#endif /* CONFIG_ROSETTA */
661
662#define PMAP_CREATE_TEST 0x40 /* pmap will be used for testing purposes only */
663
664/* Define PMAP_CREATE_KNOWN_FLAGS in terms of optional flags */
665#define PMAP_CREATE_KNOWN_FLAGS (PMAP_CREATE_64BIT | PMAP_CREATE_STAGE2 | PMAP_CREATE_DISABLE_JOP | \
666 PMAP_CREATE_FORCE_4K_PAGES | PMAP_CREATE_X86_64 | PMAP_CREATE_ROSETTA | PMAP_CREATE_TEST)
667
668#endif /* __x86_64__ */
669
670#define PMAP_OPTIONS_NOWAIT 0x1 /* don't block, return
671 * KERN_RESOURCE_SHORTAGE
672 * instead */
673#define PMAP_OPTIONS_NOENTER 0x2 /* expand pmap if needed
674 * but don't enter mapping
675 */
676#define PMAP_OPTIONS_COMPRESSOR 0x4 /* credit the compressor for
677 * this operation */
678#define PMAP_OPTIONS_INTERNAL 0x8 /* page from internal object */
679#define PMAP_OPTIONS_REUSABLE 0x10 /* page is "reusable" */
680#define PMAP_OPTIONS_NOFLUSH 0x20 /* delay flushing of pmap */
681#define PMAP_OPTIONS_NOREFMOD 0x40 /* don't need ref/mod on disconnect */
682#define PMAP_OPTIONS_ALT_ACCT 0x80 /* use alternate accounting scheme for page */
683#define PMAP_OPTIONS_REMOVE 0x100 /* removing a mapping */
684#define PMAP_OPTIONS_SET_REUSABLE 0x200 /* page is now "reusable" */
685#define PMAP_OPTIONS_CLEAR_REUSABLE 0x400 /* page no longer "reusable" */
686#define PMAP_OPTIONS_COMPRESSOR_IFF_MODIFIED 0x800 /* credit the compressor
687 * iff page was modified */
688#define PMAP_OPTIONS_PROTECT_IMMEDIATE 0x1000 /* allow protections to be
689 * be upgraded */
690#define PMAP_OPTIONS_CLEAR_WRITE 0x2000
691#define PMAP_OPTIONS_TRANSLATED_ALLOW_EXECUTE 0x4000 /* Honor execute for translated processes */
692#if defined(__arm__) || defined(__arm64__)
693#define PMAP_OPTIONS_FF_LOCKED 0x8000
694#define PMAP_OPTIONS_FF_WIRED 0x10000
695#endif
696#define PMAP_OPTIONS_XNU_USER_DEBUG 0x20000
697
698/* Indicates that pmap_enter() or pmap_remove() is being called with preemption already disabled. */
699#define PMAP_OPTIONS_NOPREEMPT 0x80000
700
701#define PMAP_OPTIONS_MAP_TPRO 0x40000
702
703#define PMAP_OPTIONS_RESERVED_MASK 0xFF000000 /* encoding space reserved for internal pmap use */
704
705#if !defined(__LP64__)
706extern vm_offset_t pmap_extract(pmap_t pmap,
707 vm_map_offset_t va);
708#endif
709extern void pmap_change_wiring( /* Specify pageability */
710 pmap_t pmap,
711 vm_map_offset_t va,
712 boolean_t wired);
713
714/* LP64todo - switch to vm_map_offset_t when it grows */
715extern void pmap_remove( /* Remove mappings. */
716 pmap_t map,
717 vm_map_offset_t s,
718 vm_map_offset_t e);
719
720extern void pmap_remove_options( /* Remove mappings. */
721 pmap_t map,
722 vm_map_offset_t s,
723 vm_map_offset_t e,
724 int options);
725
726extern void fillPage(ppnum_t pa, unsigned int fill);
727
728#if defined(__LP64__)
729extern void pmap_pre_expand(pmap_t pmap, vm_map_offset_t vaddr);
730extern kern_return_t pmap_pre_expand_large(pmap_t pmap, vm_map_offset_t vaddr);
731extern vm_size_t pmap_query_pagesize(pmap_t map, vm_map_offset_t vaddr);
732#endif
733
734mach_vm_size_t pmap_query_resident(pmap_t pmap,
735 vm_map_offset_t s,
736 vm_map_offset_t e,
737 mach_vm_size_t *compressed_bytes_p);
738
739extern void pmap_set_vm_map_cs_enforced(pmap_t pmap, bool new_value);
740extern bool pmap_get_vm_map_cs_enforced(pmap_t pmap);
741
742/* Inform the pmap layer that there is a JIT entry in this map. */
743extern void pmap_set_jit_entitled(pmap_t pmap);
744
745/* Ask the pmap layer if there is a JIT entry in this map. */
746extern bool pmap_get_jit_entitled(pmap_t pmap);
747
748/* Inform the pmap layer that the XO register is repurposed for this map */
749extern void pmap_set_tpro(pmap_t pmap);
750
751/* Ask the pmap layer if there is a TPRO entry in this map. */
752extern bool pmap_get_tpro(pmap_t pmap);
753
754/*
755 * Tell the pmap layer what range within the nested region the VM intends to
756 * use.
757 */
758extern void pmap_trim(pmap_t grand, pmap_t subord, addr64_t vstart, uint64_t size);
759
760extern bool pmap_is_nested(pmap_t pmap);
761
762/*
763 * Dump page table contents into the specified buffer. Returns KERN_INSUFFICIENT_BUFFER_SIZE
764 * if insufficient space, KERN_NOT_SUPPORTED if unsupported in the current configuration.
765 * This is expected to only be called from kernel debugger context,
766 * so synchronization is not required.
767 */
768
769extern kern_return_t pmap_dump_page_tables(pmap_t pmap, void *bufp, void *buf_end, unsigned int level_mask, size_t *bytes_copied);
770
771/* Asks the pmap layer for number of bits used for VA address. */
772extern uint32_t pmap_user_va_bits(pmap_t pmap);
773extern uint32_t pmap_kernel_va_bits(void);
774
775/*
776 * Indicates if any special policy is applied to this protection by the pmap
777 * layer.
778 */
779bool pmap_has_prot_policy(pmap_t pmap, bool translated_allow_execute, vm_prot_t prot);
780
781/*
782 * Causes the pmap to return any available pages that it can return cheaply to
783 * the VM.
784 */
785uint64_t pmap_release_pages_fast(void);
786
787#define PMAP_QUERY_PAGE_PRESENT 0x01
788#define PMAP_QUERY_PAGE_REUSABLE 0x02
789#define PMAP_QUERY_PAGE_INTERNAL 0x04
790#define PMAP_QUERY_PAGE_ALTACCT 0x08
791#define PMAP_QUERY_PAGE_COMPRESSED 0x10
792#define PMAP_QUERY_PAGE_COMPRESSED_ALTACCT 0x20
793extern kern_return_t pmap_query_page_info(
794 pmap_t pmap,
795 vm_map_offset_t va,
796 int *disp);
797
798extern bool pmap_in_ppl(void);
799
800extern uint32_t pmap_lookup_in_static_trust_cache(const uint8_t cdhash[CS_CDHASH_LEN]);
801extern bool pmap_lookup_in_loaded_trust_caches(const uint8_t cdhash[CS_CDHASH_LEN]);
802
803/**
804 * Indicates whether the device supports register-level MMIO access control.
805 *
806 * @note Unlike the pmap-io-ranges mechanism, which enforces PPL-only register
807 * writability at page granularity, this mechanism allows specific registers
808 * on a read-mostly page to be written using a dedicated guarded mode trap
809 * without requiring a full PPL driver extension.
810 *
811 * @return True if the device supports register-level MMIO access control.
812 */
813extern bool pmap_has_iofilter_protected_write(void);
814
815/**
816 * Performs a write to the I/O register specified by addr on supported devices.
817 *
818 * @note On supported devices (determined by pmap_has_iofilter_protected_write()), this
819 * function goes over the sorted I/O filter entry table. If there is a hit, the
820 * write is performed from Guarded Mode. Otherwise, the write is performed from
821 * Normal Mode (kernel mode). Note that you can still hit an exception if the
822 * register is owned by PPL but not allowed by an io-filter-entry in the device tree.
823 *
824 * @note On unsupported devices, this function will panic.
825 *
826 * @param addr The address of the register.
827 * @param value The value to be written.
828 * @param width The width of the I/O register, supported values are 1, 2, 4 and 8.
829 */
830extern void pmap_iofilter_protected_write(vm_address_t addr, uint64_t value, uint64_t width);
831
832extern void *pmap_claim_reserved_ppl_page(void);
833extern void pmap_free_reserved_ppl_page(void *kva);
834
835extern void pmap_ledger_verify_size(size_t);
836extern ledger_t pmap_ledger_alloc(void);
837extern void pmap_ledger_free(ledger_t);
838
839extern bool pmap_is_bad_ram(ppnum_t ppn);
840
841#if __arm64__
842extern bool pmap_is_exotic(pmap_t pmap);
843#else /* __arm64__ */
844#define pmap_is_exotic(pmap) false
845#endif /* __arm64__ */
846
847
848/*
849 * Returns a subset of pmap_cs non-default configuration,
850 * e.g. loosening up of some restrictions through pmap_cs or amfi
851 * boot-args. The return value is a bit field with possible bits
852 * described below. If default, the function will return 0. Note that
853 * this does not work the other way: 0 does not imply that pmap_cs
854 * runs in default configuration, and only a small configuration
855 * subset is returned by this function.
856 *
857 * Never assume the system is "secure" if this returns 0.
858 */
859extern int pmap_cs_configuration(void);
860
861#if XNU_KERNEL_PRIVATE
862
863#if defined(__arm64__)
864
865/**
866 * Check if a particular pmap is used for stage2 translations or not.
867 */
868extern bool
869pmap_performs_stage2_translations(const pmap_t pmap);
870
871#endif /* defined(__arm64__) */
872#endif /* XNU_KERNEL_PRIVATE */
873
874#if CONFIG_SPTM
875/*
876 * The TrustedExecutionMonitor address space data structure is kept within the
877 * pmap structure in order to provide a coherent API to the rest of the kernel
878 * for working with code signing monitors.
879 *
880 * However, a lot of parts of the kernel don't have visibility into the pmap
881 * data structure as they are opaque unless you're in the Mach portion of the
882 * kernel. To allievate this, we provide pmap APIs to the rest of the kernel.
883 */
884#include <TrustedExecutionMonitor/API.h>
885
886/*
887 * All pages allocated by TXM are also kept within the TXM VM object, which allows
888 * tracking it for accounting and debugging purposes.
889 */
890extern vm_object_t txm_vm_object;
891
892/**
893 * Acquire the pointer of the kernel pmap being used for the system.
894 */
895extern pmap_t
896pmap_txm_kernel_pmap(void);
897
898/**
899 * Acquire the TXM address space object stored within the pmap.
900 */
901extern TXMAddressSpace_t*
902pmap_txm_addr_space(const pmap_t pmap);
903
904/**
905 * Set the TXM address space object within the pmap.
906 */
907extern void
908pmap_txm_set_addr_space(
909 pmap_t pmap,
910 TXMAddressSpace_t *txm_addr_space);
911
912/**
913 * Set the trust level of the TXM address space object within the pmap.
914 */
915extern void
916pmap_txm_set_trust_level(
917 pmap_t pmap,
918 CSTrust_t trust_level);
919
920/**
921 * Get the trust level of the TXM address space object within the pmap.
922 */
923extern kern_return_t
924pmap_txm_get_trust_level_kdp(
925 pmap_t pmap,
926 CSTrust_t *trust_level);
927
928/**
929 * Take a shared lock on the pmap in order to enforce safe concurrency for
930 * an operation on the TXM address space object. Passing in NULL takes the lock
931 * on the current pmap.
932 */
933extern void
934pmap_txm_acquire_shared_lock(pmap_t pmap);
935
936/**
937 * Release the shared lock which was previously acquired for operations on
938 * the TXM address space object. Passing in NULL releases the lock for the
939 * current pmap.
940 */
941extern void
942pmap_txm_release_shared_lock(pmap_t pmap);
943
944/**
945 * Take an exclusive lock on the pmap in order to enforce safe concurrency for
946 * an operation on the TXM address space object. Passing in NULL takes the lock
947 * on the current pmap.
948 */
949extern void
950pmap_txm_acquire_exclusive_lock(pmap_t pmap);
951
952/**
953 * Release the exclusive lock which was previously acquired for operations on
954 * the TXM address space object. Passing in NULL releases the lock for the
955 * current pmap.
956 */
957extern void
958pmap_txm_release_exclusive_lock(pmap_t pmap);
959
960/**
961 * Transfer a page to the TXM_DEFAULT type after resolving its mapping from its
962 * virtual to physical address.
963 */
964extern void
965pmap_txm_transfer_page(const vm_address_t addr);
966
967/**
968 * Grab an available page from the VM free list, add it to the TXM VM object and
969 * then transfer it to be owned by TXM.
970 *
971 * Returns the physical address of the page allocated.
972 */
973extern vm_map_address_t
974pmap_txm_allocate_page(void);
975
976#endif /* CONFIG_SPTM */
977
978
979#endif /* KERNEL_PRIVATE */
980
981#endif /* _VM_PMAP_H_ */
982