1/*
2 *
3 * Copyright (c) 2007-2016 Apple Inc. All rights reserved.
4 *
5 * @APPLE_OSREFERENCE_LICENSE_HEADER_START@
6 *
7 * This file contains Original Code and/or Modifications of Original Code
8 * as defined in and that are subject to the Apple Public Source License
9 * Version 2.0 (the 'License'). You may not use this file except in
10 * compliance with the License. The rights granted to you under the License
11 * may not be used to create, or enable the creation or redistribution of,
12 * unlawful or unlicensed copies of an Apple operating system, or to
13 * circumvent, violate, or enable the circumvention or violation of, any
14 * terms of an Apple operating system software license agreement.
15 *
16 * Please obtain a copy of the License at
17 * http://www.opensource.apple.com/apsl/ and read it before using this file.
18 *
19 * The Original Code and all software distributed under the License are
20 * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER
21 * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES,
22 * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY,
23 * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT.
24 * Please see the License for the specific language governing rights and
25 * limitations under the License.
26 *
27 * @APPLE_OSREFERENCE_LICENSE_HEADER_END@
28 */
29#ifndef _ARM_PMAP_H_
30#define _ARM_PMAP_H_ 1
31
32#include <mach_assert.h>
33
34#include <arm/proc_reg.h>
35#if defined(__arm64__)
36#include <arm64/proc_reg.h>
37#endif
38
39/*
40 * Machine-dependent structures for the physical map module.
41 */
42
43#ifndef ASSEMBLER
44
45#include <stdatomic.h>
46#include <libkern/section_keywords.h>
47#include <mach/kern_return.h>
48#include <mach/machine/vm_types.h>
49#include <arm/pmap_public.h>
50#include <mach/arm/thread_status.h>
51
52#if __ARM_KERNEL_PROTECT__
53/*
54 * For __ARM_KERNEL_PROTECT__, we need twice as many ASIDs to support having
55 * unique EL0 and EL1 ASIDs for each pmap.
56 */
57#define ASID_SHIFT (12) /* Shift for the maximum virtual ASID value (2048)*/
58#else /* __ARM_KERNEL_PROTECT__ */
59#define ASID_SHIFT (11) /* Shift for the maximum virtual ASID value (2048) */
60#endif /* __ARM_KERNEL_PROTECT__ */
61#define MAX_ASID (1 << ASID_SHIFT) /* Max supported ASIDs (can be virtual) */
62#define ARM_ASID_SHIFT (8) /* Shift for the maximum ARM ASID value (256) */
63#define ARM_MAX_ASID (1 << ARM_ASID_SHIFT) /* Max ASIDs supported by the hardware */
64#define ASID_VIRT_BITS (ASID_SHIFT - ARM_ASID_SHIFT) /* The number of virtual bits in a virtaul ASID */
65#define NBBY 8
66
67struct pmap_cpu_data {
68#if defined(__arm64__)
69 pmap_t cpu_nested_pmap;
70#else
71 pmap_t cpu_user_pmap;
72 unsigned int cpu_user_pmap_stamp;
73#endif
74 unsigned int cpu_number;
75
76
77 /*
78 * This supports overloading of ARM ASIDs by the pmap. The field needs
79 * to be wide enough to cover all the virtual bits in a virtual ASID.
80 * With 256 physical ASIDs, 8-bit fields let us support up to 65536
81 * Virtual ASIDs, minus all that would map on to 0 (as 0 is a global
82 * ASID).
83 *
84 * If we were to use bitfield shenanigans here, we could save a bit of
85 * memory by only having enough bits to support MAX_ASID. However, such
86 * an implementation would be more error prone.
87 */
88 uint8_t cpu_asid_high_bits[ARM_MAX_ASID];
89};
90typedef struct pmap_cpu_data pmap_cpu_data_t;
91
92#include <mach/vm_prot.h>
93#include <mach/vm_statistics.h>
94#include <mach/machine/vm_param.h>
95#include <kern/kern_types.h>
96#include <kern/thread.h>
97#include <kern/queue.h>
98
99
100#include <sys/cdefs.h>
101
102/* Base address for low globals. */
103#define LOW_GLOBAL_BASE_ADDRESS 0xfffffff000000000ULL
104
105/*
106 * This indicates (roughly) where there is free space for the VM
107 * to use for the heap; this does not need to be precise.
108 */
109#if __ARM64_PMAP_SUBPAGE_L1__ && __ARM_16K_PG__
110#define KERNEL_PMAP_HEAP_RANGE_START VM_MIN_KERNEL_AND_KEXT_ADDRESS
111#else
112#define KERNEL_PMAP_HEAP_RANGE_START LOW_GLOBAL_BASE_ADDRESS
113#endif
114
115#if defined(__arm64__)
116
117#define BOOTSTRAP_TABLE_SIZE (ARM_PGBYTES * 8)
118
119typedef uint64_t tt_entry_t; /* translation table entry type */
120#define TT_ENTRY_NULL ((tt_entry_t *) 0)
121
122typedef uint64_t pt_entry_t; /* page table entry type */
123#define PT_ENTRY_NULL ((pt_entry_t *) 0)
124
125#elif defined(__arm__)
126
127typedef uint32_t tt_entry_t; /* translation table entry type */
128#define PT_ENTRY_NULL ((pt_entry_t *) 0)
129
130typedef uint32_t pt_entry_t; /* page table entry type */
131#define TT_ENTRY_NULL ((tt_entry_t *) 0)
132
133#else
134#error unknown arch
135#endif
136
137
138/* superpages */
139#define SUPERPAGE_NBASEPAGES 1 /* No superpages support */
140
141/*
142 * Convert addresses to pages and vice versa.
143 * No rounding is used.
144 */
145#define arm_atop(x) (((vm_map_address_t)(x)) >> ARM_PGSHIFT)
146#define arm_ptoa(x) (((vm_map_address_t)(x)) << ARM_PGSHIFT)
147
148/*
149 * Round off or truncate to the nearest page. These will work
150 * for either addresses or counts. (i.e. 1 byte rounds to 1 page
151 * bytes.
152 */
153#define arm_round_page(x) \
154 ((((vm_map_address_t)(x)) + ARM_PGMASK) & ~ARM_PGMASK)
155#define arm_trunc_page(x) (((vm_map_address_t)(x)) & ~ARM_PGMASK)
156
157/* Convert address offset to page table index */
158#define ptenum(a) ((((a) & ARM_TT_LEAF_INDEX_MASK) >> ARM_TT_LEAF_SHIFT))
159
160/*
161 * For setups where the kernel page size does not match the hardware
162 * page size (assumably, the kernel page size must be a multiple of
163 * the hardware page size), we will need to determine what the page
164 * ratio is.
165 */
166#define PAGE_RATIO ((1 << PAGE_SHIFT) >> ARM_PGSHIFT)
167#define TEST_PAGE_RATIO_4 (PAGE_RATIO == 4)
168
169#if (__ARM_VMSA__ <= 7)
170#define NTTES (ARM_PGBYTES / sizeof(tt_entry_t))
171#define NPTES ((ARM_PGBYTES/4) /sizeof(pt_entry_t))
172#else
173#define NTTES (ARM_PGBYTES / sizeof(tt_entry_t))
174#define NPTES (ARM_PGBYTES / sizeof(pt_entry_t))
175#endif
176
177extern void sync_tlb_flush(void);
178extern void flush_mmu_tlb_async(void);
179extern void flush_mmu_tlb(void);
180extern void flush_core_tlb_async(void);
181extern void flush_core_tlb(void);
182#if defined(__arm64__)
183extern void flush_mmu_tlb_allentries_async(uint64_t, uint64_t);
184extern void flush_mmu_tlb_allentries(uint64_t, uint64_t);
185extern void flush_mmu_tlb_entry_async(uint64_t);
186extern void flush_mmu_tlb_entry(uint64_t);
187extern void flush_mmu_tlb_entries_async(uint64_t, uint64_t);
188extern void flush_mmu_tlb_entries(uint64_t, uint64_t);
189extern void flush_mmu_tlb_asid_async(uint64_t);
190extern void flush_mmu_tlb_asid(uint64_t);
191extern void flush_core_tlb_asid_async(uint64_t);
192extern void flush_core_tlb_asid(uint64_t);
193
194#define tlbi_addr(x) (((x) >> TLBI_ADDR_SHIFT) & TLBI_ADDR_MASK)
195#define tlbi_asid(x) (((uint64_t)x << TLBI_ASID_SHIFT) & TLBI_ASID_MASK)
196#else
197extern void flush_mmu_tlb_entry_async(uint32_t);
198extern void flush_mmu_tlb_entry(uint32_t);
199extern void flush_mmu_tlb_entries_async(uint32_t, uint32_t);
200extern void flush_mmu_tlb_entries(uint32_t, uint32_t);
201extern void flush_mmu_tlb_mva_entries_async(uint32_t);
202extern void flush_mmu_tlb_mva_entries(uint32_t);
203extern void flush_mmu_tlb_asid_async(uint32_t);
204extern void flush_mmu_tlb_asid(uint32_t);
205extern void flush_core_tlb_asid_async(uint32_t);
206extern void flush_core_tlb_asid(uint32_t);
207#endif
208extern void flush_mmu_tlb_region(vm_offset_t va, unsigned length);
209
210#if defined(__arm64__)
211extern uint64_t get_mmu_control(void);
212extern uint64_t get_aux_control(void);
213extern void set_aux_control(uint64_t);
214extern void set_mmu_ttb(uint64_t);
215extern void set_mmu_ttb_alternate(uint64_t);
216extern uint64_t get_tcr(void);
217extern void set_tcr(uint64_t);
218extern uint64_t pmap_get_arm64_prot(pmap_t, vm_offset_t);
219#else
220extern uint32_t get_mmu_control(void);
221extern void set_mmu_control(uint32_t);
222extern uint32_t get_aux_control(void);
223extern void set_aux_control(uint32_t);
224extern void set_mmu_ttb(pmap_paddr_t);
225extern void set_mmu_ttb_alternate(pmap_paddr_t);
226extern void set_context_id(uint32_t);
227#endif
228
229extern pmap_paddr_t get_mmu_ttb(void);
230extern pmap_paddr_t mmu_kvtop(vm_offset_t va);
231extern pmap_paddr_t mmu_kvtop_wpreflight(vm_offset_t va);
232extern pmap_paddr_t mmu_uvtop(vm_offset_t va);
233
234#if (__ARM_VMSA__ <= 7)
235/* Convert address offset to translation table index */
236#define ttenum(a) ((a) >> ARM_TT_L1_SHIFT)
237
238/* Convert translation table index to user virtual address */
239#define tteitova(a) ((a) << ARM_TT_L1_SHIFT)
240
241#define pa_to_suptte(a) ((a) & ARM_TTE_SUPER_L1_MASK)
242#define suptte_to_pa(p) ((p) & ARM_TTE_SUPER_L1_MASK)
243
244#define pa_to_sectte(a) ((a) & ARM_TTE_BLOCK_L1_MASK)
245#define sectte_to_pa(p) ((p) & ARM_TTE_BLOCK_L1_MASK)
246
247#define pa_to_tte(a) ((a) & ARM_TTE_TABLE_MASK)
248#define tte_to_pa(p) ((p) & ARM_TTE_TABLE_MASK)
249
250#define pa_to_pte(a) ((a) & ARM_PTE_PAGE_MASK)
251#define pte_to_pa(p) ((p) & ARM_PTE_PAGE_MASK)
252#define pte_increment_pa(p) ((p) += ptoa(1))
253
254#define ARM_NESTING_SIZE_MIN ((PAGE_SIZE/0x1000)*4*ARM_TT_L1_SIZE)
255#define ARM_NESTING_SIZE_MAX ((256*ARM_TT_L1_SIZE))
256
257#else
258
259/* Convert address offset to translation table index */
260#define ttel0num(a) ((a & ARM_TTE_L0_MASK) >> ARM_TT_L0_SHIFT)
261#define ttel1num(a) ((a & ARM_TTE_L1_MASK) >> ARM_TT_L1_SHIFT)
262#define ttel2num(a) ((a & ARM_TTE_L2_MASK) >> ARM_TT_L2_SHIFT)
263
264#define pa_to_tte(a) ((a) & ARM_TTE_TABLE_MASK)
265#define tte_to_pa(p) ((p) & ARM_TTE_TABLE_MASK)
266
267#define pa_to_pte(a) ((a) & ARM_PTE_MASK)
268#define pte_to_pa(p) ((p) & ARM_PTE_MASK)
269#define pte_to_ap(p) (((p) & ARM_PTE_APMASK) >> ARM_PTE_APSHIFT)
270#define pte_increment_pa(p) ((p) += ptoa(1))
271
272#define ARM_NESTING_SIZE_MIN ((PAGE_SIZE/ARM_PGBYTES)*ARM_TT_L2_SIZE)
273#define ARM_NESTING_SIZE_MAX (0x0000000010000000ULL)
274
275#define TLBFLUSH_SIZE (ARM_TTE_MAX/((sizeof(unsigned int))*BYTE_SIZE))
276
277#endif /* __ARM_VMSA__ <= 7 */
278
279#define PMAP_GC_INFLIGHT 1
280#define PMAP_GC_WAIT 2
281
282#if DEVELOPMENT || DEBUG
283#define pmap_cs_log(msg, args...) printf("PMAP_CS: " msg "\n", args)
284#define pmap_cs_log_h(msg, args...) { if(pmap_cs_log_hacks) printf("PMAP_CS: " msg "\n", args); }
285
286#define PMAP_CS_EXCEPTION_LIST_HACK 1
287
288#else
289#define pmap_cs_log(msg, args...)
290#define pmap_cs_log_h(msg, args...)
291#endif /* DEVELOPMENT || DEBUG */
292
293
294/*
295 * Convert translation/page table entry to kernel virtual address
296 */
297#define ttetokv(a) (phystokv(tte_to_pa(a)))
298#define ptetokv(a) (phystokv(pte_to_pa(a)))
299
300struct pmap {
301 tt_entry_t *tte; /* translation table entries */
302 pmap_paddr_t ttep; /* translation table physical */
303 vm_map_address_t min; /* min address in pmap */
304 vm_map_address_t max; /* max address in pmap */
305 ledger_t ledger; /* ledger tracking phys mappings */
306 decl_simple_lock_data(,lock) /* lock on map */
307 struct pmap_statistics stats; /* map statistics */
308 queue_chain_t pmaps; /* global list of pmaps */
309 tt_entry_t *tt_entry_free; /* free translation table entries */
310 tt_entry_t *prev_tte; /* previous translation table */
311 struct pmap *nested_pmap; /* nested pmap */
312 vm_map_address_t nested_region_grand_addr;
313 vm_map_address_t nested_region_subord_addr;
314 vm_map_offset_t nested_region_size;
315 vm_map_offset_t nested_region_true_start;
316 vm_map_offset_t nested_region_true_end;
317 unsigned int *nested_region_asid_bitmap;
318
319#if (__ARM_VMSA__ <= 7)
320 decl_simple_lock_data(,tt1_lock) /* lock on tt1 */
321 unsigned int cpu_ref; /* number of cpus using pmap */
322#endif
323
324
325 unsigned int asid; /* address space id */
326 unsigned int vasid; /* Virtual address space id */
327 unsigned int stamp; /* creation stamp */
328 _Atomic int32_t ref_count; /* pmap reference count */
329 unsigned int gc_status; /* gc status */
330 unsigned int nested_region_asid_bitmap_size;
331 unsigned int tte_index_max; /* max tte index in translation table entries */
332 uint32_t nested_no_bounds_refcnt;/* number of pmaps that nested this pmap without bounds set */
333
334#if MACH_ASSERT
335 int pmap_pid;
336 char pmap_procname[17];
337 bool pmap_stats_assert;
338#endif /* MACH_ASSERT */
339#if DEVELOPMENT || DEBUG
340 bool footprint_suspended;
341 bool footprint_was_suspended;
342#endif /* DEVELOPMENT || DEBUG */
343 bool nx_enabled; /* no execute */
344 bool nested; /* is nested */
345 bool is_64bit; /* is 64bit */
346 bool nested_has_no_bounds_ref; /* nested a pmap when the bounds were not set */
347 bool nested_bounds_set; /* The nesting bounds have been set */
348};
349
350/* typedef struct pmap *pmap_t; */
351#define PMAP_NULL ((pmap_t) 0)
352
353
354/*
355 * WIMG control
356 */
357#define VM_MEM_INNER 0x10
358#define VM_MEM_RT 0x10 // intentionally alias VM_MEM_INNER; will be used with mutually exclusive caching policies
359#define VM_MEM_EARLY_ACK 0x20
360
361#define VM_WIMG_DEFAULT (VM_MEM_COHERENT)
362#define VM_WIMG_COPYBACK (VM_MEM_COHERENT)
363#define VM_WIMG_INNERWBACK (VM_MEM_COHERENT | VM_MEM_INNER)
364#define VM_WIMG_IO (VM_MEM_COHERENT | VM_MEM_NOT_CACHEABLE | VM_MEM_GUARDED)
365#define VM_WIMG_POSTED (VM_MEM_COHERENT | VM_MEM_NOT_CACHEABLE | VM_MEM_GUARDED | VM_MEM_EARLY_ACK)
366#define VM_WIMG_WTHRU (VM_MEM_WRITE_THROUGH | VM_MEM_COHERENT | VM_MEM_GUARDED)
367#define VM_WIMG_WCOMB (VM_MEM_NOT_CACHEABLE | VM_MEM_COHERENT)
368#define VM_WIMG_RT (VM_WIMG_IO | VM_MEM_RT)
369
370#if VM_DEBUG
371extern int pmap_list_resident_pages(
372 pmap_t pmap,
373 vm_offset_t *listp,
374 int space
375 );
376#else /* #if VM_DEBUG */
377#define pmap_list_resident_pages(pmap, listp, space) (0)
378#endif /* #if VM_DEBUG */
379
380extern int copysafe(vm_map_address_t from, vm_map_address_t to, uint32_t cnt, int type, uint32_t *bytes_copied);
381
382/* globals shared between arm_vm_init and pmap */
383extern tt_entry_t *cpu_tte; /* first CPUs translation table (shared with kernel pmap) */
384extern pmap_paddr_t cpu_ttep; /* physical translation table addr */
385
386#if __arm64__
387extern void *ropagetable_begin;
388extern void *ropagetable_end;
389#endif
390
391#if __arm64__
392extern tt_entry_t *invalid_tte; /* global invalid translation table */
393extern pmap_paddr_t invalid_ttep; /* physical invalid translation table addr */
394#endif
395
396#define PMAP_CONTEXT(pmap, thread)
397
398/*
399 * platform dependent Prototypes
400 */
401extern void pmap_switch_user_ttb(pmap_t pmap);
402extern void pmap_clear_user_ttb(void);
403extern void pmap_bootstrap(vm_offset_t);
404extern vm_map_address_t pmap_ptov(pmap_t, ppnum_t);
405extern ppnum_t pmap_find_phys(pmap_t map, addr64_t va);
406extern void pmap_set_pmap(pmap_t pmap, thread_t thread);
407extern void pmap_collect(pmap_t pmap);
408extern void pmap_gc(void);
409#if defined(__arm64__)
410extern vm_offset_t pmap_extract(pmap_t pmap, vm_map_offset_t va);
411#endif
412
413/*
414 * Interfaces implemented as macros.
415 */
416
417#define PMAP_SWITCH_USER(th, new_map, my_cpu) { \
418 th->map = new_map; \
419 pmap_set_pmap(vm_map_pmap(new_map), th); \
420}
421
422#define pmap_kernel() \
423 (kernel_pmap)
424
425#define pmap_compressed(pmap) \
426 ((pmap)->stats.compressed)
427
428#define pmap_resident_count(pmap) \
429 ((pmap)->stats.resident_count)
430
431#define pmap_resident_max(pmap) \
432 ((pmap)->stats.resident_max)
433
434#define MACRO_NOOP
435
436#define pmap_copy(dst_pmap, src_pmap, dst_addr, len, src_addr) \
437 MACRO_NOOP
438
439#define pmap_pageable(pmap, start, end, pageable) \
440 MACRO_NOOP
441
442#define pmap_kernel_va(VA) \
443 (((VA) >= VM_MIN_KERNEL_ADDRESS) && ((VA) <= VM_MAX_KERNEL_ADDRESS))
444
445#define pmap_attribute(pmap,addr,size,attr,value) \
446 (KERN_INVALID_ADDRESS)
447
448#define copyinmsg(from, to, cnt) \
449 copyin(from, to, cnt)
450
451#define copyoutmsg(from, to, cnt) \
452 copyout(from, to, cnt)
453
454extern pmap_paddr_t kvtophys(vm_offset_t va);
455extern vm_map_address_t phystokv(pmap_paddr_t pa);
456extern vm_map_address_t phystokv_range(pmap_paddr_t pa, vm_size_t *max_len);
457
458extern vm_map_address_t pmap_map(vm_map_address_t va, vm_offset_t sa, vm_offset_t ea, vm_prot_t prot, unsigned int flags);
459extern vm_map_address_t pmap_map_high_window_bd( vm_offset_t pa, vm_size_t len, vm_prot_t prot);
460extern kern_return_t pmap_map_block(pmap_t pmap, addr64_t va, ppnum_t pa, uint32_t size, vm_prot_t prot, int attr, unsigned int flags);
461extern void pmap_map_globals(void);
462
463#define PMAP_MAP_BD_DEVICE 0x1
464#define PMAP_MAP_BD_WCOMB 0x2
465#define PMAP_MAP_BD_POSTED 0x3
466#define PMAP_MAP_BD_MASK 0x3
467
468extern vm_map_address_t pmap_map_bd_with_options(vm_map_address_t va, vm_offset_t sa, vm_offset_t ea, vm_prot_t prot, int32_t options);
469extern vm_map_address_t pmap_map_bd(vm_map_address_t va, vm_offset_t sa, vm_offset_t ea, vm_prot_t prot);
470
471extern void pmap_init_pte_page(pmap_t, pt_entry_t *, vm_offset_t, unsigned int ttlevel, boolean_t alloc_ptd);
472
473extern boolean_t pmap_valid_address(pmap_paddr_t addr);
474extern void pmap_disable_NX(pmap_t pmap);
475extern void pmap_set_nested(pmap_t pmap);
476extern vm_map_address_t pmap_create_sharedpage(void);
477extern void pmap_insert_sharedpage(pmap_t pmap);
478extern void pmap_protect_sharedpage(void);
479
480extern vm_offset_t pmap_cpu_windows_copy_addr(int cpu_num, unsigned int index);
481extern unsigned int pmap_map_cpu_windows_copy(ppnum_t pn, vm_prot_t prot, unsigned int wimg_bits);
482extern void pmap_unmap_cpu_windows_copy(unsigned int index);
483
484extern void pt_fake_zone_init(int);
485extern void pt_fake_zone_info(int *, vm_size_t *, vm_size_t *, vm_size_t *, vm_size_t *,
486 uint64_t *, int *, int *, int *);
487
488extern boolean_t pmap_valid_page(ppnum_t pn);
489
490#define MACHINE_PMAP_IS_EMPTY 1
491extern boolean_t pmap_is_empty(pmap_t pmap, vm_map_offset_t start, vm_map_offset_t end);
492
493#define ARM_PMAP_MAX_OFFSET_DEFAULT 0x01
494#define ARM_PMAP_MAX_OFFSET_MIN 0x02
495#define ARM_PMAP_MAX_OFFSET_MAX 0x04
496#define ARM_PMAP_MAX_OFFSET_DEVICE 0x08
497#define ARM_PMAP_MAX_OFFSET_JUMBO 0x10
498
499
500extern vm_map_offset_t pmap_max_offset(boolean_t is64, unsigned int option);
501extern vm_map_offset_t pmap_max_64bit_offset(unsigned int option);
502extern vm_map_offset_t pmap_max_32bit_offset(unsigned int option);
503
504boolean_t pmap_virtual_region(unsigned int region_select, vm_map_offset_t *startp, vm_map_size_t *size);
505
506boolean_t pmap_enforces_execute_only(pmap_t pmap);
507
508/* pmap dispatch indices */
509#define ARM_FAST_FAULT_INDEX 0
510#define ARM_FORCE_FAST_FAULT_INDEX 1
511#define MAPPING_FREE_PRIME_INDEX 2
512#define MAPPING_REPLENISH_INDEX 3
513#define PHYS_ATTRIBUTE_CLEAR_INDEX 4
514#define PHYS_ATTRIBUTE_SET_INDEX 5
515#define PMAP_BATCH_SET_CACHE_ATTRIBUTES_INDEX 6
516#define PMAP_CHANGE_WIRING_INDEX 7
517#define PMAP_CREATE_INDEX 8
518#define PMAP_DESTROY_INDEX 9
519#define PMAP_ENTER_OPTIONS_INDEX 10
520#define PMAP_EXTRACT_INDEX 11
521#define PMAP_FIND_PHYS_INDEX 12
522#define PMAP_INSERT_SHAREDPAGE_INDEX 13
523#define PMAP_IS_EMPTY_INDEX 14
524#define PMAP_MAP_CPU_WINDOWS_COPY_INDEX 15
525#define PMAP_MARK_PAGE_AS_PMAP_PAGE_INDEX 16
526#define PMAP_NEST_INDEX 17
527#define PMAP_PAGE_PROTECT_OPTIONS_INDEX 18
528#define PMAP_PROTECT_OPTIONS_INDEX 19
529#define PMAP_QUERY_PAGE_INFO_INDEX 20
530#define PMAP_QUERY_RESIDENT_INDEX 21
531#define PMAP_REFERENCE_INDEX 22
532#define PMAP_REMOVE_OPTIONS_INDEX 23
533#define PMAP_RETURN_INDEX 24
534#define PMAP_SET_CACHE_ATTRIBUTES_INDEX 25
535#define PMAP_SET_NESTED_INDEX 26
536#define PMAP_SET_PROCESS_INDEX 27
537#define PMAP_SWITCH_INDEX 28
538#define PMAP_SWITCH_USER_TTB_INDEX 29
539#define PMAP_CLEAR_USER_TTB_INDEX 30
540#define PMAP_UNMAP_CPU_WINDOWS_COPY_INDEX 31
541#define PMAP_UNNEST_OPTIONS_INDEX 32
542#define PMAP_FOOTPRINT_SUSPEND_INDEX 33
543#define PMAP_CPU_DATA_INIT_INDEX 34
544#define PMAP_RELEASE_PAGES_TO_KERNEL_INDEX 35
545#define PMAP_SET_JIT_ENTITLED_INDEX 36
546
547
548#define PMAP_TRIM_INDEX 64
549#define PMAP_LEDGER_ALLOC_INIT_INDEX 65
550#define PMAP_LEDGER_ALLOC_INDEX 66
551#define PMAP_LEDGER_FREE_INDEX 67
552
553#define PMAP_COUNT 68
554
555#define PMAP_INVALID_CPU_NUM (~0U)
556
557struct pmap_cpu_data_array_entry {
558 pmap_cpu_data_t cpu_data;
559} __attribute__((aligned(1 << L2_CLINE)));
560
561/* Initialize the pmap per-CPU data for the current CPU. */
562extern void pmap_cpu_data_init(void);
563
564/* Get the pmap per-CPU data for the current CPU. */
565extern pmap_cpu_data_t * pmap_get_cpu_data(void);
566
567
568#define MARK_AS_PMAP_TEXT
569#define MARK_AS_PMAP_DATA
570#define MARK_AS_PMAP_RODATA
571
572
573
574extern kern_return_t pmap_return(boolean_t do_panic, boolean_t do_recurse);
575
576#define pmap_force_dcache_clean(va, sz) CleanPoC_DcacheRegion_Force(va, sz)
577#define pmap_simple_lock(l) simple_lock(l)
578#define pmap_simple_unlock(l) simple_unlock(l)
579#define pmap_simple_lock_try(l) simple_lock_try(l)
580#define pmap_lock_bit(l, i) hw_lock_bit(l, i)
581#define pmap_unlock_bit(l, i) hw_unlock_bit(l, i)
582
583#endif /* #ifndef ASSEMBLER */
584
585#if __ARM_KERNEL_PROTECT__
586/*
587 * The exception vector mappings start at the middle of the kernel page table
588 * range (so that the EL0 mapping can be located at the base of the range).
589 */
590#define ARM_KERNEL_PROTECT_EXCEPTION_START ((~((ARM_TT_ROOT_SIZE + ARM_TT_ROOT_INDEX_MASK) / 2ULL)) + 1ULL)
591#endif /* __ARM_KERNEL_PROTECT__ */
592
593#endif /* #ifndef _ARM_PMAP_H_ */
594