1/*
2 * Copyright (c) 2007-2021, 2023 Apple Inc. All rights reserved.
3 *
4 * @APPLE_OSREFERENCE_LICENSE_HEADER_START@
5 *
6 * This file contains Original Code and/or Modifications of Original Code
7 * as defined in and that are subject to the Apple Public Source License
8 * Version 2.0 (the 'License'). You may not use this file except in
9 * compliance with the License. The rights granted to you under the License
10 * may not be used to create, or enable the creation or redistribution of,
11 * unlawful or unlicensed copies of an Apple operating system, or to
12 * circumvent, violate, or enable the circumvention or violation of, any
13 * terms of an Apple operating system software license agreement.
14 *
15 * Please obtain a copy of the License at
16 * http://www.opensource.apple.com/apsl/ and read it before using this file.
17 *
18 * The Original Code and all software distributed under the License are
19 * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER
20 * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES,
21 * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY,
22 * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT.
23 * Please see the License for the specific language governing rights and
24 * limitations under the License.
25 *
26 * @APPLE_OSREFERENCE_LICENSE_HEADER_END@
27 */
28/**
29 * Machine-dependent structures for the physical map module.
30 *
31 * This header file contains the types and prototypes that make up the public
32 * pmap API that's exposed to the rest of the kernel. Any types/prototypes used
33 * strictly by the pmap itself should be placed into one of the osfmk/arm/pmap/
34 * header files.
35 *
36 * To prevent circular dependencies and exposing anything not needed by the
37 * rest of the kernel, this file shouldn't include ANY of the internal
38 * osfmk/arm/pmap/ header files.
39 */
40#ifndef _ARM_PMAP_H_
41#define _ARM_PMAP_H_
42
43#include <mach_assert.h>
44#include <arm64/proc_reg.h>
45
46#ifndef ASSEMBLER
47
48#include <stdatomic.h>
49#include <stdbool.h>
50#include <libkern/section_keywords.h>
51#include <mach/kern_return.h>
52#include <mach/machine/vm_types.h>
53#include <arm/pmap_public.h>
54#include <kern/ast.h>
55#include <mach/arm/thread_status.h>
56
57#if defined(__arm64__)
58#include <arm64/tlb.h>
59#else /* defined(__arm64__) */
60#include <arm/tlb.h>
61#endif /* defined(__arm64__) */
62
63
64/* Shift for 2048 max virtual ASIDs (2048 pmaps). */
65#define ASID_SHIFT (11)
66
67/* Max supported ASIDs (can be virtual). */
68#define MAX_ASIDS (1 << ASID_SHIFT)
69
70/* Shift for the maximum ARM ASID value (256 or 65536) */
71#ifndef ARM_ASID_SHIFT
72#if HAS_16BIT_ASID
73#define ARM_ASID_SHIFT (16)
74#else
75#define ARM_ASID_SHIFT (8)
76#endif /* HAS_16BIT_ASID */
77#endif /* ARM_ASID_SHIFT */
78
79/* Max ASIDs supported by the hardware. */
80#define ARM_MAX_ASIDS (1 << ARM_ASID_SHIFT)
81
82/* Number of bits in a byte. */
83#define NBBY (8)
84
85/**
86 * The maximum number of hardware ASIDs used by the pmap for user address spaces.
87 *
88 * One ASID is always dedicated to the kernel (ASID 0). On systems with software-
89 * based spectre/meltdown mitigations, each address space technically uses two
90 * hardware ASIDs (one for EL1 and one for EL0) so the total number of available
91 * ASIDs a user process can use is halved on those systems.
92 */
93#if __ARM_KERNEL_PROTECT__
94#define MAX_HW_ASIDS ((ARM_MAX_ASIDS >> 1) - 1)
95#else /* __ARM_KERNEL_PROTECT__ */
96#define MAX_HW_ASIDS (ARM_MAX_ASIDS - 1)
97#endif /* __ARM_KERNEL_PROTECT__ */
98
99/* Maximum number of Virtual Machine IDs */
100#ifndef ARM_VMID_SHIFT
101#define ARM_VMID_SHIFT (8)
102#endif /* ARM_VMID_SHIFT */
103#define ARM_MAX_VMIDS (1 << ARM_VMID_SHIFT)
104
105/* XPRR virtual register map */
106
107/* Maximum number of CPU windows per-cpu. */
108#define CPUWINDOWS_MAX 4
109
110#if defined(__arm64__)
111
112#if defined(ARM_LARGE_MEMORY)
113/*
114 * 2 L1 tables (Linear KVA and V=P), plus 2*16 L2 tables map up to (16*64GB) 1TB of DRAM
115 * Upper limit on how many pages can be consumed by bootstrap page tables
116 */
117#define BOOTSTRAP_TABLE_SIZE (ARM_PGBYTES * 34)
118#else /* defined(ARM_LARGE_MEMORY) */
119#define BOOTSTRAP_TABLE_SIZE (ARM_PGBYTES * 8)
120#endif /* defined(ARM_LARGE_MEMORY) */
121
122typedef uint64_t tt_entry_t; /* translation table entry type */
123typedef uint64_t pt_entry_t; /* page table entry type */
124#else /* defined(__arm64__) */
125#error unknown arch
126#endif /* defined(__arm64__) */
127
128/* Used to represent a NULL page/translation table entry pointer. */
129#define PT_ENTRY_NULL ((pt_entry_t *) 0)
130#define TT_ENTRY_NULL ((tt_entry_t *) 0)
131
132/**
133 * Number of PTE pointers in a single PVE. This must be 2, since the algorithm
134 * has been optimized to that case. Should this change in the future, both
135 * enter_pv() and remove_pv() will need to be modified accordingly. In addition
136 * to this, the documentation and the LLDB macros that walk PV lists will also
137 * need to be adapted.
138 */
139#define PTE_PER_PVE 2
140_Static_assert(PTE_PER_PVE == 2, "PTE_PER_PVE is not 2");
141
142/**
143 * Structure to track the active mappings for a given page. This structure is
144 * used in the pv_head_table when a physical page has more than one mapping to
145 * it. Each entry in this linked list of structures can represent
146 * up to PTE_PER_PVE mappings.
147 */
148typedef struct pv_entry {
149 /* Linked list to the next mapping of the physical page. */
150 struct pv_entry *pve_next;
151
152 /* Pointer to the page table entry for this mapping. */
153 pt_entry_t *pve_ptep[PTE_PER_PVE];
154} pv_entry_t;
155
156/**
157 * Structure that tracks free pv_entry nodes for the pv_head_table. Each one
158 * of these nodes represents a single mapping to a physical page, so a new node
159 * is allocated whenever a new mapping is created.
160 */
161typedef struct {
162 pv_entry_t *list;
163 uint32_t count;
164} pv_free_list_t;
165
166/**
167 * Forward declaration of the structure that controls page table geometry and
168 * TTE/PTE format.
169 */
170struct page_table_attr;
171
172struct pmap_cpu_data {
173#if XNU_MONITOR
174 const volatile struct pmap * _Atomic active_pmap;
175 const volatile struct pmap * _Atomic inflight_pmap;
176 uint64_t pvh_info[4];
177 void *ppl_kern_saved_sp;
178 void *ppl_stack;
179 arm_context_t *save_area;
180 unsigned int ppl_state;
181
182#if HAS_GUARDED_IO_FILTER
183 void *iofilter_stack;
184 void *iofilter_saved_sp;
185#endif
186
187 void *scratch_page;
188#endif /* XNU_MONITOR */
189 pmap_t cpu_nested_pmap;
190#if __ARM_MIXED_PAGE_SIZE__
191 uint64_t commpage_page_shift;
192#endif
193#if defined(__arm64__)
194 const struct page_table_attr *cpu_nested_pmap_attr;
195 vm_map_address_t cpu_nested_region_addr;
196 vm_map_offset_t cpu_nested_region_size;
197#else /* defined(__arm64__) */
198 pmap_t cpu_user_pmap;
199#endif /* defined(__arm64__) */
200 unsigned int cpu_number;
201 bool copywindow_strong_sync[CPUWINDOWS_MAX];
202 bool inflight_disconnect;
203 pv_free_list_t pv_free;
204 pv_entry_t *pv_free_spill_marker;
205
206#if !HAS_16BIT_ASID
207 /*
208 * This supports overloading of ARM ASIDs by the pmap. The field needs
209 * to be wide enough to cover all the virtual bits in a virtual ASID.
210 * With 256 physical ASIDs, 8-bit fields let us support up to 65536
211 * Virtual ASIDs, minus all that would map on to 0 (as 0 is a global
212 * ASID).
213 *
214 * If we were to use bitfield shenanigans here, we could save a bit of
215 * memory by only having enough bits to support MAX_ASIDS. However, such
216 * an implementation would be more error prone.
217 */
218 uint8_t cpu_sw_asids[MAX_HW_ASIDS];
219#endif /* !HAS_16BIT_ASID */
220};
221typedef struct pmap_cpu_data pmap_cpu_data_t;
222
223#include <mach/vm_prot.h>
224#include <mach/vm_statistics.h>
225#include <mach/machine/vm_param.h>
226#include <kern/kern_types.h>
227#include <kern/thread.h>
228#include <kern/queue.h>
229
230
231#include <sys/cdefs.h>
232
233/* Base address for low globals. */
234#if defined(ARM_LARGE_MEMORY)
235#define LOW_GLOBAL_BASE_ADDRESS 0xfffffe0000000000ULL
236#else /* defined(ARM_LARGE_MEMORY) */
237#define LOW_GLOBAL_BASE_ADDRESS 0xfffffff000000000ULL
238#endif /* defined(ARM_LARGE_MEMORY) */
239
240/*
241 * This indicates (roughly) where there is free space for the VM
242 * to use for the heap; this does not need to be precise.
243 */
244#if defined(KERNEL_INTEGRITY_KTRR) || defined(KERNEL_INTEGRITY_CTRR)
245#if defined(ARM_LARGE_MEMORY)
246#define KERNEL_PMAP_HEAP_RANGE_START (VM_MIN_KERNEL_AND_KEXT_ADDRESS+ARM_TT_L1_SIZE)
247#else /* defined(ARM_LARGE_MEMORY) */
248#define KERNEL_PMAP_HEAP_RANGE_START VM_MIN_KERNEL_AND_KEXT_ADDRESS
249#endif /* defined(ARM_LARGE_MEMORY) */
250#else /* defined(KERNEL_INTEGRITY_KTRR) || defined(KERNEL_INTEGRITY_CTRR) */
251#if defined(ARM_LARGE_MEMORY)
252/* For large memory systems with no KTRR/CTRR such as virtual machines */
253#define KERNEL_PMAP_HEAP_RANGE_START (VM_MIN_KERNEL_AND_KEXT_ADDRESS+ARM_TT_L1_SIZE)
254#else
255#define KERNEL_PMAP_HEAP_RANGE_START LOW_GLOBAL_BASE_ADDRESS
256#endif
257#endif /* defined(KERNEL_INTEGRITY_KTRR) || defined(KERNEL_INTEGRITY_CTRR) */
258
259/**
260 * For setups where the VM page size does not match the hardware page size (the
261 * VM page size must be a multiple of the hardware page size), we will need to
262 * determine what the page ratio is.
263 */
264#define PAGE_RATIO ((1 << PAGE_SHIFT) >> ARM_PGSHIFT)
265#define TEST_PAGE_RATIO_4 (PAGE_RATIO == 4)
266
267
268
269/* superpages */
270#define SUPERPAGE_NBASEPAGES 1 /* No superpages support */
271
272/* Convert addresses to pages and vice versa. No rounding is used. */
273#define arm_atop(x) (((vm_map_address_t)(x)) >> ARM_PGSHIFT)
274#define arm_ptoa(x) (((vm_map_address_t)(x)) << ARM_PGSHIFT)
275
276/**
277 * Round off or truncate to the nearest page. These will work for either
278 * addresses or counts (i.e. 1 byte rounds to 1 page bytes).
279 */
280#define arm_round_page(x) ((((vm_map_address_t)(x)) + ARM_PGMASK) & ~ARM_PGMASK)
281#define arm_trunc_page(x) (((vm_map_address_t)(x)) & ~ARM_PGMASK)
282
283extern void flush_mmu_tlb_region(vm_offset_t va, unsigned length);
284
285#if defined(__arm64__)
286extern uint64_t get_mmu_control(void);
287extern uint64_t get_aux_control(void);
288extern void set_aux_control(uint64_t);
289extern void set_mmu_ttb(uint64_t);
290extern void set_mmu_ttb_alternate(uint64_t);
291extern uint64_t get_tcr(void);
292extern void set_tcr(uint64_t);
293extern uint64_t pmap_get_arm64_prot(pmap_t, vm_offset_t);
294#else /* defined(__arm64__) */
295#error Unsupported architecture
296#endif /* defined(__arm64__) */
297
298extern pmap_paddr_t get_mmu_ttb(void);
299extern pmap_paddr_t mmu_kvtop(vm_offset_t va);
300extern pmap_paddr_t mmu_kvtop_wpreflight(vm_offset_t va);
301extern pmap_paddr_t mmu_uvtop(vm_offset_t va);
302
303
304/* Convert address offset to translation table index */
305#define ttel0num(a) ((a & ARM_TTE_L0_MASK) >> ARM_TT_L0_SHIFT)
306#define ttel1num(a) ((a & ARM_TTE_L1_MASK) >> ARM_TT_L1_SHIFT)
307#define ttel2num(a) ((a & ARM_TTE_L2_MASK) >> ARM_TT_L2_SHIFT)
308
309#define pa_to_tte(a) ((a) & ARM_TTE_TABLE_MASK)
310#define tte_to_pa(p) ((p) & ARM_TTE_TABLE_MASK)
311
312#define pa_to_pte(a) ((a) & ARM_PTE_PAGE_MASK)
313#define pte_to_pa(p) ((p) & ARM_PTE_PAGE_MASK)
314#define pte_to_ap(p) (((p) & ARM_PTE_APMASK) >> ARM_PTE_APSHIFT)
315#define pte_increment_pa(p) ((p) += ptoa(1))
316
317#define TLBFLUSH_SIZE (ARM_TTE_MAX/((sizeof(unsigned int))*BYTE_SIZE))
318
319
320
321#define pmap_cs_log(level, fmt, args...)
322#define pmap_cs_log_debug(fmt, args...)
323#define pmap_cs_log_info(fmt, args...)
324#define pmap_cs_log_error(fmt, args...)
325#define pmap_cs_log_force(level, fmt, args...)
326
327
328
329
330/* Convert translation/page table entry to kernel virtual address. */
331#define ttetokv(a) (phystokv(tte_to_pa(a)))
332#define ptetokv(a) (phystokv(pte_to_pa(a)))
333
334struct pmap {
335 /* Pointer to the root translation table. */
336 tt_entry_t *tte;
337
338 /* Physical page of the root translation table. */
339 pmap_paddr_t ttep;
340
341 /*
342 * The min and max fields represent the lowest and highest addressable VAs
343 * as dictated strictly by the paging hierarchy (root level + root table size)
344 * in conjunction with whether the root table is used with TTBR0, TTBR1, or VTTBR.
345 * These fields do not encapsulate any higher-level address-space partitioning
346 * policies.
347 */
348
349 /* Lowest supported VA (inclusive) */
350 vm_map_address_t min;
351
352 /* Highest supported VA (exclusive) */
353 vm_map_address_t max;
354
355#if ARM_PARAMETERIZED_PMAP
356 /* Details about the page table layout. */
357 const struct page_table_attr * pmap_pt_attr;
358#endif /* ARM_PARAMETERIZED_PMAP */
359
360 /* Ledger tracking phys mappings */
361 ledger_t ledger;
362
363 decl_lck_rw_data(, rwlock);
364
365 /* Global list of pmaps */
366 queue_chain_t pmaps;
367
368 /* Free list of translation table pages. */
369 tt_entry_t *tt_entry_free;
370
371 /* Information representing the "nested" (shared) region in this pmap. */
372 struct pmap *nested_pmap;
373 vm_map_address_t nested_region_addr;
374 vm_map_offset_t nested_region_size;
375 vm_map_offset_t nested_region_true_start;
376 vm_map_offset_t nested_region_true_end;
377 unsigned int *nested_region_unnested_table_bitmap;
378 unsigned int nested_region_unnested_table_bitmap_size;
379
380
381 void * reserved0;
382 void * reserved1;
383 uint8_t reserved12;
384 uint64_t reserved2;
385 uint64_t reserved3;
386
387 /* PMAP reference count */
388 _Atomic int32_t ref_count;
389
390#if XNU_MONITOR
391 /* number of pmaps in which this pmap is nested */
392 _Atomic int32_t nested_count;
393#endif
394
395 /* Number of pmaps that nested this pmap without bounds set. */
396 uint32_t nested_no_bounds_refcnt;
397
398 /**
399 * Represents the real hardware ASID inserted into each TLB entry within
400 * this address space.
401 */
402 uint16_t hw_asid;
403
404 /**
405 * Represents the virtual "software" ASID. Any real hardware ASID can have
406 * multiple software ASIDs associated with it. This is used to know when to
407 * perform TLB flushes during context switches.
408 */
409 uint8_t sw_asid;
410
411#if MACH_ASSERT
412 int pmap_pid;
413 char pmap_procname[17];
414#endif /* MACH_ASSERT */
415
416 bool reserved4;
417
418 bool pmap_vm_map_cs_enforced;
419
420 bool reserved5;
421 unsigned int reserved6;
422 unsigned int reserved7;
423
424 bool reserved8;
425 bool reserved9;
426
427#if defined(CONFIG_ROSETTA)
428 /* Whether the pmap is used for Rosetta. */
429 bool is_rosetta;
430#else
431 bool reserved10;
432#endif /* defined(CONFIG_ROSETTA) */
433
434#if DEVELOPMENT || DEBUG
435 bool footprint_suspended;
436 bool footprint_was_suspended;
437#endif /* DEVELOPMENT || DEBUG */
438
439 /* Whether the No-Execute functionality is enabled. */
440 bool nx_enabled;
441
442 /* Whether this pmap represents a 64-bit address space. */
443 bool is_64bit;
444
445 enum : uint8_t {
446 /**
447 * pmap contains no lingering mappings outside the established
448 * bounds of pmap->nested_pmap, and its reference has been removed
449 * from pmap->nested_pmap->nested_no_bounds_refcnt.
450 */
451 NESTED_NO_BOUNDS_REF_NONE = 0,
452 /**
453 * pmap's mappings outside the established bounds of pmap->nested_pmap
454 * have been removed, but pmap->nested_pmap->nested_no_bounds_refcnt
455 * still contains pmap's reference.
456 */
457 NESTED_NO_BOUNDS_REF_SUBORD,
458 /**
459 * pmap contains mappings after the end of the established bounds
460 * of pmap->nested_pmap.
461 */
462 NESTED_NO_BOUNDS_REF_AFTER,
463 /**
464 * pmap contains mappings before the beginning and after the end of
465 * the established bounds of pmap->nested_pmap.
466 */
467 NESTED_NO_BOUNDS_REF_BEFORE_AND_AFTER,
468 } nested_no_bounds_ref_state;
469
470 /* The nesting bounds have been set. */
471 bool nested_bounds_set;
472
473#if HAS_APPLE_PAC
474 bool disable_jop;
475#else
476 bool reserved10;
477#endif /* HAS_APPLE_PAC */
478
479 bool reserved11;
480
481#define PMAP_TYPE_USER 0 /* ordinary pmap */
482#define PMAP_TYPE_KERNEL 1 /* kernel pmap */
483#define PMAP_TYPE_COMMPAGE 2 /* commpage pmap */
484#define PMAP_TYPE_NESTED 3 /* pmap nested within another pmap */
485 uint8_t type;
486};
487
488#define PMAP_VASID(pmap) (((uint32_t)((pmap)->sw_asid) << 16) | pmap->hw_asid)
489
490#if VM_DEBUG
491extern int pmap_list_resident_pages(
492 pmap_t pmap,
493 vm_offset_t *listp,
494 int space);
495#else /* VM_DEBUG */
496#define pmap_list_resident_pages(pmap, listp, space) (0)
497#endif /* VM_DEBUG */
498
499extern int copysafe(vm_map_address_t from, vm_map_address_t to, uint32_t cnt, int type, uint32_t *bytes_copied);
500
501/* Globals shared between arm_vm_init and pmap */
502extern tt_entry_t *cpu_tte; /* First CPUs translation table (shared with kernel pmap) */
503extern pmap_paddr_t cpu_ttep; /* Physical translation table addr */
504
505#if __arm64__
506extern void *ropagetable_begin;
507extern void *ropagetable_end;
508#endif /* __arm64__ */
509
510#if __arm64__
511extern tt_entry_t *invalid_tte; /* Global invalid translation table */
512extern pmap_paddr_t invalid_ttep; /* Physical invalid translation table addr */
513#endif /* __arm64__ */
514
515#define PMAP_CONTEXT(pmap, thread)
516
517/**
518 * Platform dependent Prototypes
519 */
520extern void pmap_clear_user_ttb(void);
521extern void pmap_bootstrap(vm_offset_t);
522extern vm_map_address_t pmap_ptov(pmap_t, ppnum_t);
523extern pmap_paddr_t pmap_find_pa(pmap_t map, addr64_t va);
524extern pmap_paddr_t pmap_find_pa_nofault(pmap_t map, addr64_t va);
525extern ppnum_t pmap_find_phys(pmap_t map, addr64_t va);
526extern ppnum_t pmap_find_phys_nofault(pmap_t map, addr64_t va);
527extern void pmap_switch_user(thread_t th, vm_map_t map);
528extern void pmap_set_pmap(pmap_t pmap, thread_t thread);
529extern void pmap_gc(void);
530#if HAS_APPLE_PAC
531extern void * pmap_sign_user_ptr(void *value, ptrauth_key key, uint64_t data, uint64_t jop_key);
532extern void * pmap_auth_user_ptr(void *value, ptrauth_key key, uint64_t data, uint64_t jop_key);
533#endif /* HAS_APPLE_PAC */
534
535/**
536 * Interfaces implemented as macros.
537 */
538
539#define PMAP_SWITCH_USER(th, new_map, my_cpu) pmap_switch_user((th), (new_map))
540
541#define pmap_kernel() (kernel_pmap)
542
543#define pmap_kernel_va(VA) \
544 (((VA) >= VM_MIN_KERNEL_ADDRESS) && ((VA) <= VM_MAX_KERNEL_ADDRESS))
545
546#define pmap_attribute(pmap, addr, size, attr, value) (KERN_INVALID_ADDRESS)
547
548#define copyinmsg(from, to, cnt) copyin(from, to, cnt)
549#define copyoutmsg(from, to, cnt) copyout(from, to, cnt)
550
551/* Unimplemented interfaces. */
552#define MACRO_NOOP
553#define pmap_copy(dst_pmap, src_pmap, dst_addr, len, src_addr) MACRO_NOOP
554#define pmap_pageable(pmap, start, end, pageable) MACRO_NOOP
555
556extern pmap_paddr_t kvtophys(vm_offset_t va);
557extern pmap_paddr_t kvtophys_nofail(vm_offset_t va);
558extern vm_map_address_t phystokv(pmap_paddr_t pa);
559extern vm_map_address_t phystokv_range(pmap_paddr_t pa, vm_size_t *max_len);
560
561extern vm_map_address_t pmap_map(vm_map_address_t va, vm_offset_t sa, vm_offset_t ea, vm_prot_t prot, unsigned int flags);
562extern vm_map_address_t pmap_map_high_window_bd( vm_offset_t pa, vm_size_t len, vm_prot_t prot);
563extern kern_return_t pmap_map_block(pmap_t pmap, addr64_t va, ppnum_t pa, uint32_t size, vm_prot_t prot, int attr, unsigned int flags);
564extern kern_return_t pmap_map_block_addr(pmap_t pmap, addr64_t va, pmap_paddr_t pa, uint32_t size, vm_prot_t prot, int attr, unsigned int flags);
565extern void pmap_map_globals(void);
566
567#define PMAP_MAP_BD_DEVICE 0x0
568#define PMAP_MAP_BD_WCOMB 0x1
569#define PMAP_MAP_BD_POSTED 0x2
570#define PMAP_MAP_BD_POSTED_REORDERED 0x3
571#define PMAP_MAP_BD_POSTED_COMBINED_REORDERED 0x4
572#define PMAP_MAP_BD_MASK 0x7
573
574extern vm_map_address_t pmap_map_bd_with_options(vm_map_address_t va, vm_offset_t sa, vm_offset_t ea, vm_prot_t prot, int32_t options);
575extern vm_map_address_t pmap_map_bd(vm_map_address_t va, vm_offset_t sa, vm_offset_t ea, vm_prot_t prot);
576
577extern void pmap_init_pte_page(pmap_t, pt_entry_t *, vm_offset_t, unsigned int ttlevel, boolean_t alloc_ptd);
578
579extern boolean_t pmap_valid_address(pmap_paddr_t addr);
580extern void pmap_disable_NX(pmap_t pmap);
581extern void pmap_set_nested(pmap_t pmap);
582extern void pmap_create_commpages(vm_map_address_t *kernel_data_addr, vm_map_address_t *kernel_text_addr,
583 vm_map_address_t *kernel_ro_data_addr, vm_map_address_t *user_text_addr);
584extern void pmap_insert_commpage(pmap_t pmap);
585
586extern vm_offset_t pmap_cpu_windows_copy_addr(int cpu_num, unsigned int index);
587extern unsigned int pmap_map_cpu_windows_copy(ppnum_t pn, vm_prot_t prot, unsigned int wimg_bits);
588extern void pmap_unmap_cpu_windows_copy(unsigned int index);
589
590static inline vm_offset_t
591pmap_ro_zone_align(vm_offset_t value)
592{
593 return value;
594}
595
596extern void pmap_ro_zone_memcpy(zone_id_t zid, vm_offset_t va, vm_offset_t offset,
597 vm_offset_t new_data, vm_size_t new_data_size);
598extern uint64_t pmap_ro_zone_atomic_op(zone_id_t zid, vm_offset_t va, vm_offset_t offset,
599 uint32_t op, uint64_t value);
600extern void pmap_ro_zone_bzero(zone_id_t zid, vm_offset_t va, vm_offset_t offset, vm_size_t size);
601
602#if XNU_MONITOR
603/* exposed for use by the HMAC SHA driver */
604extern void pmap_invoke_with_page(ppnum_t page_number, void *ctx,
605 void (*callback)(void *ctx, ppnum_t page_number, const void *page));
606extern void pmap_hibernate_invoke(void *ctx, void (*callback)(void *ctx, uint64_t addr, uint64_t len));
607extern void pmap_set_ppl_hashed_flag(const pmap_paddr_t addr);
608extern void pmap_clear_ppl_hashed_flag_all(void);
609extern void pmap_check_ppl_hashed_flag_all(void);
610#endif /* XNU_MONITOR */
611
612extern boolean_t pmap_valid_page(ppnum_t pn);
613extern boolean_t pmap_bootloader_page(ppnum_t pn);
614
615extern boolean_t pmap_is_empty(pmap_t pmap, vm_map_offset_t start, vm_map_offset_t end);
616
617#define ARM_PMAP_MAX_OFFSET_DEFAULT 0x01
618#define ARM_PMAP_MAX_OFFSET_MIN 0x02
619#define ARM_PMAP_MAX_OFFSET_MAX 0x04
620#define ARM_PMAP_MAX_OFFSET_DEVICE 0x08
621#define ARM_PMAP_MAX_OFFSET_JUMBO 0x10
622
623extern vm_map_offset_t pmap_max_offset(boolean_t is64, unsigned int option);
624extern vm_map_offset_t pmap_max_64bit_offset(unsigned int option);
625extern vm_map_offset_t pmap_max_32bit_offset(unsigned int option);
626
627boolean_t pmap_virtual_region(unsigned int region_select, vm_map_offset_t *startp, vm_map_size_t *size);
628
629boolean_t pmap_enforces_execute_only(pmap_t pmap);
630
631void pmap_pin_kernel_pages(vm_offset_t kva, size_t nbytes);
632void pmap_unpin_kernel_pages(vm_offset_t kva, size_t nbytes);
633
634void pmap_abandon_measurement(void);
635
636
637
638/* pmap dispatch indices */
639#define ARM_FAST_FAULT_INDEX 0
640#define ARM_FORCE_FAST_FAULT_INDEX 1
641#define MAPPING_FREE_PRIME_INDEX 2
642#define MAPPING_REPLENISH_INDEX 3
643#define PHYS_ATTRIBUTE_CLEAR_INDEX 4
644#define PHYS_ATTRIBUTE_SET_INDEX 5
645#define PMAP_BATCH_SET_CACHE_ATTRIBUTES_INDEX 6
646#define PMAP_CHANGE_WIRING_INDEX 7
647#define PMAP_CREATE_INDEX 8
648#define PMAP_DESTROY_INDEX 9
649#define PMAP_ENTER_OPTIONS_INDEX 10
650/* #define PMAP_EXTRACT_INDEX 11 -- Not used*/
651#define PMAP_FIND_PA_INDEX 12
652#define PMAP_INSERT_COMMPAGE_INDEX 13
653#define PMAP_IS_EMPTY_INDEX 14
654#define PMAP_MAP_CPU_WINDOWS_COPY_INDEX 15
655#define PMAP_MARK_PAGE_AS_PMAP_PAGE_INDEX 16
656#define PMAP_NEST_INDEX 17
657#define PMAP_PAGE_PROTECT_OPTIONS_INDEX 18
658#define PMAP_PROTECT_OPTIONS_INDEX 19
659#define PMAP_QUERY_PAGE_INFO_INDEX 20
660#define PMAP_QUERY_RESIDENT_INDEX 21
661#define PMAP_REFERENCE_INDEX 22
662#define PMAP_REMOVE_OPTIONS_INDEX 23
663#define PMAP_SET_CACHE_ATTRIBUTES_INDEX 25
664#define PMAP_SET_NESTED_INDEX 26
665#define PMAP_SET_PROCESS_INDEX 27
666#define PMAP_SWITCH_INDEX 28
667#define PMAP_SWITCH_USER_TTB_INDEX 29
668#define PMAP_CLEAR_USER_TTB_INDEX 30
669#define PMAP_UNMAP_CPU_WINDOWS_COPY_INDEX 31
670#define PMAP_UNNEST_OPTIONS_INDEX 32
671#define PMAP_FOOTPRINT_SUSPEND_INDEX 33
672#define PMAP_CPU_DATA_INIT_INDEX 34
673#define PMAP_RELEASE_PAGES_TO_KERNEL_INDEX 35
674#define PMAP_SET_JIT_ENTITLED_INDEX 36
675
676
677#define PMAP_UPDATE_COMPRESSOR_PAGE_INDEX 55
678#define PMAP_TRIM_INDEX 56
679#define PMAP_LEDGER_VERIFY_SIZE_INDEX 57
680#define PMAP_LEDGER_ALLOC_INDEX 58
681#define PMAP_LEDGER_FREE_INDEX 59
682
683#if HAS_APPLE_PAC
684#define PMAP_SIGN_USER_PTR 60
685#define PMAP_AUTH_USER_PTR 61
686#endif /* HAS_APPLE_PAC */
687
688#define PHYS_ATTRIBUTE_CLEAR_RANGE_INDEX 66
689
690
691#if __has_feature(ptrauth_calls) && (defined(XNU_TARGET_OS_OSX) || (DEVELOPMENT || DEBUG))
692#define PMAP_DISABLE_USER_JOP_INDEX 69
693#endif /* __has_feature(ptrauth_calls) && (defined(XNU_TARGET_OS_OSX) || (DEVELOPMENT || DEBUG)) */
694
695
696#define PMAP_SET_VM_MAP_CS_ENFORCED_INDEX 72
697
698#define PMAP_SET_COMPILATION_SERVICE_CDHASH_INDEX 73
699#define PMAP_MATCH_COMPILATION_SERVICE_CDHASH_INDEX 74
700#define PMAP_NOP_INDEX 75
701
702#define PMAP_RO_ZONE_MEMCPY_INDEX 76
703#define PMAP_RO_ZONE_ATOMIC_OP_INDEX 77
704
705#if DEVELOPMENT || DEBUG
706#define PMAP_TEST_TEXT_CORRUPTION_INDEX 79
707#endif /* DEVELOPMENT || DEBUG */
708
709
710
711#define PMAP_SET_LOCAL_SIGNING_PUBLIC_KEY_INDEX 84
712#define PMAP_UNRESTRICT_LOCAL_SIGNING_INDEX 85
713
714
715#define PMAP_RO_ZONE_BZERO_INDEX 90
716
717
718
719
720#define PMAP_LOAD_TRUST_CACHE_WITH_TYPE_INDEX 98
721#define PMAP_QUERY_TRUST_CACHE_INDEX 99
722#define PMAP_TOGGLE_DEVELOPER_MODE_INDEX 100
723#define PMAP_REGISTER_PROVISIONING_PROFILE_INDEX 101
724#define PMAP_UNREGISTER_PROVISIONING_PROFILE_INDEX 102
725#define PMAP_ASSOCIATE_PROVISIONING_PROFILE_INDEX 103
726#define PMAP_DISASSOCIATE_PROVISIONING_PROFILE_INDEX 104
727
728/* HW read-only/read-write trusted path support */
729#define PMAP_SET_TPRO_INDEX 105
730
731#define PMAP_ASSOCIATE_KERNEL_ENTITLEMENTS_INDEX 106
732#define PMAP_RESOLVE_KERNEL_ENTITLEMENTS_INDEX 107
733#define PMAP_ACCELERATE_ENTITLEMENTS_INDEX 108
734#define PMAP_CHECK_TRUST_CACHE_RUNTIME_FOR_UUID_INDEX 109
735#define PMAP_IMAGE4_MONITOR_TRAP_INDEX 110
736
737#define PMAP_COUNT 111
738
739/**
740 * Value used when initializing pmap per-cpu data to denote that the structure
741 * hasn't been initialized with its associated CPU number yet.
742 */
743#define PMAP_INVALID_CPU_NUM (~0U)
744
745/**
746 * Align the pmap per-cpu data to the L2 cache size for each individual CPU's
747 * data. This prevents accesses from one CPU affecting another, especially
748 * when atomically updating fields.
749 */
750struct pmap_cpu_data_array_entry {
751 pmap_cpu_data_t cpu_data;
752} __attribute__((aligned(MAX_L2_CLINE_BYTES)));
753
754/* Initialize the pmap per-CPU data for the current CPU. */
755extern void pmap_cpu_data_init(void);
756
757/* Get the pmap per-CPU data for the current CPU. */
758extern pmap_cpu_data_t *pmap_get_cpu_data(void);
759
760/* Get the pmap per-CPU data for an arbitrary CPU. */
761extern pmap_cpu_data_t *pmap_get_remote_cpu_data(unsigned int cpu);
762
763/*
764 * For long-running PV list operations, we pick a reasonable maximum chunk size
765 * beyond which we will exit to preemptible context to avoid excessive preemption
766 * latency and PVH lock timeouts.
767 */
768#define PMAP_MAX_PV_LIST_CHUNK_SIZE 64
769
770/*
771 * For most batched page operations, we pick a sane default page count
772 * interval at which to check for pending preemption and exit the PPL if found.
773 */
774#define PMAP_DEFAULT_PREEMPTION_CHECK_PAGE_INTERVAL 64
775
776static inline bool
777_pmap_pending_preemption_real(void)
778{
779 return !!(*((volatile ast_t*)ast_pending()) & AST_URGENT);
780}
781
782#if SCHED_HYGIENE_DEBUG && (DEBUG || DEVELOPMENT)
783bool pmap_pending_preemption(void); // more complicated, so externally defined
784#else /* SCHED_HYGIENE_DEBUG && (DEBUG || DEVELOPMENT) */
785#define pmap_pending_preemption _pmap_pending_preemption_real
786#endif /* SCHED_HYGIENE_DEBUG && (DEBUG || DEVELOPMENT) */
787
788#if XNU_MONITOR
789extern boolean_t pmap_ppl_locked_down;
790
791/*
792 * Denotes the bounds of the PPL stacks. These are visible so that other code
793 * can check if addresses are part of the PPL stacks.
794 */
795extern void *pmap_stacks_start;
796extern void *pmap_stacks_end;
797
798#if HAS_GUARDED_IO_FILTER
799extern void *iofilter_stacks_start;
800extern void *iofilter_stacks_end;
801#endif
802
803/* Asks if a page belongs to the monitor. */
804extern boolean_t pmap_is_monitor(ppnum_t pn);
805
806/*
807 * Indicates that we are done with our static bootstrap
808 * allocations, so the monitor may now mark the pages
809 * that it owns.
810 */
811extern void pmap_static_allocations_done(void);
812
813
814#ifdef KASAN
815#define PPL_STACK_SIZE (PAGE_SIZE << 2)
816#else /* KASAN */
817#define PPL_STACK_SIZE PAGE_SIZE
818#endif /* KASAN */
819
820/* One stack for each CPU, plus a guard page below each stack and above the last stack. */
821#define PPL_STACK_REGION_SIZE ((MAX_CPUS * (PPL_STACK_SIZE + ARM_PGBYTES)) + ARM_PGBYTES)
822
823/* We don't expect heavy stack usage by I/O filter, so one page of stack even for KASAN. */
824#define IOFILTER_STACK_SIZE PAGE_SIZE
825
826/* One stack for each CPU, plus a guard page below each stack and above the last stack. */
827#define IOFILTER_STACK_REGION_SIZE ((MAX_CPUS * (IOFILTER_STACK_SIZE + ARM_PGBYTES)) + ARM_PGBYTES)
828
829#define PPL_DATA_SEGMENT_SECTION_NAME "__PPLDATA,__data"
830#define PPL_TEXT_SEGMENT_SECTION_NAME "__PPLTEXT,__text,regular,pure_instructions"
831#define PPL_DATACONST_SEGMENT_SECTION_NAME "__PPLDATA,__const"
832
833#define MARK_AS_PMAP_DATA \
834 __PLACE_IN_SECTION(PPL_DATA_SEGMENT_SECTION_NAME)
835#define MARK_AS_PMAP_TEXT \
836 __attribute__((used, section(PPL_TEXT_SEGMENT_SECTION_NAME), noinline))
837#define MARK_AS_PMAP_RODATA \
838 __PLACE_IN_SECTION(PPL_DATACONST_SEGMENT_SECTION_NAME)
839
840#else /* XNU_MONITOR */
841
842#define MARK_AS_PMAP_TEXT
843#define MARK_AS_PMAP_DATA
844#define MARK_AS_PMAP_RODATA
845
846#endif /* XNU_MONITOR */
847
848/*
849 * Indicates that we are done mutating sensitive state in the system, and that
850 * the pmap may now restrict access as dictated by system security policy.
851 */
852extern void pmap_lockdown_ppl(void);
853
854
855extern void pmap_nop(pmap_t);
856
857extern lck_grp_t pmap_lck_grp;
858
859extern void CleanPoC_DcacheRegion_Force_nopreempt_nohid(vm_offset_t va, size_t length);
860
861#if XNU_MONITOR
862extern void CleanPoC_DcacheRegion_Force_nopreempt(vm_offset_t va, size_t length);
863#define pmap_force_dcache_clean(va, sz) CleanPoC_DcacheRegion_Force_nopreempt(va, sz)
864#define pmap_simple_lock(l) simple_lock_nopreempt(l, &pmap_lck_grp)
865#define pmap_simple_unlock(l) simple_unlock_nopreempt(l)
866#define pmap_simple_lock_try(l) simple_lock_try_nopreempt(l, &pmap_lck_grp)
867#define pmap_simple_lock_assert(l, t) simple_lock_assert(l, t)
868#define pmap_lock_bit(l, i) hw_lock_bit_nopreempt(l, i, &pmap_lck_grp)
869#define pmap_unlock_bit(l, i) hw_unlock_bit_nopreempt(l, i)
870#else /* XNU_MONITOR */
871#define pmap_force_dcache_clean(va, sz) CleanPoC_DcacheRegion_Force(va, sz)
872#define pmap_simple_lock(l) simple_lock(l, &pmap_lck_grp)
873#define pmap_simple_unlock(l) simple_unlock(l)
874#define pmap_simple_lock_try(l) simple_lock_try(l, &pmap_lck_grp)
875#define pmap_simple_lock_assert(l, t) simple_lock_assert(l, t)
876#define pmap_lock_bit(l, i) hw_lock_bit(l, i, &pmap_lck_grp)
877#define pmap_unlock_bit(l, i) hw_unlock_bit(l, i)
878#endif /* XNU_MONITOR */
879
880#if DEVELOPMENT || DEBUG
881extern kern_return_t pmap_test_text_corruption(pmap_paddr_t);
882#endif /* DEVELOPMENT || DEBUG */
883
884#endif /* #ifndef ASSEMBLER */
885
886#if __ARM_KERNEL_PROTECT__
887/*
888 * The exception vector mappings start at the middle of the kernel page table
889 * range (so that the EL0 mapping can be located at the base of the range).
890 */
891#define ARM_KERNEL_PROTECT_EXCEPTION_START ((~((ARM_TT_ROOT_SIZE + ARM_TT_ROOT_INDEX_MASK) / 2ULL)) + 1ULL)
892#endif /* __ARM_KERNEL_PROTECT__ */
893
894#endif /* #ifndef _ARM_PMAP_H_ */
895