1 | /* |
2 | * Copyright (c) 2020 Apple Inc. All rights reserved. |
3 | * |
4 | * @APPLE_OSREFERENCE_LICENSE_HEADER_START@ |
5 | * |
6 | * This file contains Original Code and/or Modifications of Original Code |
7 | * as defined in and that are subject to the Apple Public Source License |
8 | * Version 2.0 (the 'License'). You may not use this file except in |
9 | * compliance with the License. The rights granted to you under the License |
10 | * may not be used to create, or enable the creation or redistribution of, |
11 | * unlawful or unlicensed copies of an Apple operating system, or to |
12 | * circumvent, violate, or enable the circumvention or violation of, any |
13 | * terms of an Apple operating system software license agreement. |
14 | * |
15 | * Please obtain a copy of the License at |
16 | * http://www.opensource.apple.com/apsl/ and read it before using this file. |
17 | * |
18 | * The Original Code and all software distributed under the License are |
19 | * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER |
20 | * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES, |
21 | * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY, |
22 | * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT. |
23 | * Please see the License for the specific language governing rights and |
24 | * limitations under the License. |
25 | * |
26 | * @APPLE_OSREFERENCE_LICENSE_HEADER_END@ |
27 | */ |
28 | /** |
29 | * PMAP Page Table Geometry. |
30 | * |
31 | * This header file is used to store the types, and inline functions related to |
32 | * retrieving information about and parsing page table hierarchies. |
33 | * |
34 | * To prevent circular dependencies, this file shouldn't include any of the |
35 | * other internal osfmk/arm/pmap/ header files. |
36 | */ |
37 | #ifndef _ARM_PMAP_PMAP_PT_GEOMETRY_H_ |
38 | #define _ARM_PMAP_PMAP_PT_GEOMETRY_H_ |
39 | |
40 | #include <stdint.h> |
41 | |
42 | #include <kern/debug.h> |
43 | #include <kern/locks.h> |
44 | #include <mach/vm_types.h> |
45 | #include <mach_assert.h> |
46 | |
47 | #include <arm64/proc_reg.h> |
48 | |
49 | /** |
50 | * arm/pmap.h is safe to be included in this file since it shouldn't rely on any |
51 | * of the internal pmap header files (so no circular dependencies). |
52 | */ |
53 | #include <arm/pmap.h> |
54 | |
55 | /** |
56 | * Structure representing parameters of a single page table level. An array of |
57 | * these structures are used to represent the geometry for an entire page table |
58 | * hierarchy. |
59 | */ |
60 | struct page_table_level_info { |
61 | const uint64_t size; |
62 | const uint64_t offmask; |
63 | const uint64_t shift; |
64 | const uint64_t index_mask; |
65 | const uint64_t valid_mask; |
66 | const uint64_t type_mask; |
67 | const uint64_t type_block; |
68 | }; |
69 | |
70 | /** |
71 | * Operations that are dependent on the type of page table. This is useful, for |
72 | * instance, when dealing with stage 1 vs stage 2 pmaps. |
73 | */ |
74 | struct page_table_ops { |
75 | bool (*alloc_id)(pmap_t pmap); |
76 | void (*free_id)(pmap_t pmap); |
77 | void (*flush_tlb_region_async)(vm_offset_t va, size_t length, pmap_t pmap, bool last_level_only, bool strong); |
78 | void (*flush_tlb_async)(pmap_t pmap); |
79 | pt_entry_t (*wimg_to_pte)(unsigned int wimg, pmap_paddr_t pa); |
80 | }; |
81 | |
82 | /** |
83 | * The Page Table Attribute structure is used for both parameterizing the |
84 | * different possible page table geometries, but also for abstracting out the |
85 | * differences between stage 1 and stage 2 page tables. This allows one set of |
86 | * code to seamlessly handle the differences between various address space |
87 | * layouts as well as stage 1 vs stage 2 page tables on the fly. See |
88 | * doc/arm_pmap.md for more details. |
89 | * |
90 | * Instead of accessing the fields in this structure directly, it is recommended |
91 | * to use the page table attribute getter functions defined below. |
92 | */ |
93 | struct page_table_attr { |
94 | /* Sizes and offsets for each level in the page table hierarchy. */ |
95 | const struct page_table_level_info * const pta_level_info; |
96 | |
97 | /* Operations that are dependent on the type of page table. */ |
98 | const struct page_table_ops * const pta_ops; |
99 | |
100 | /** |
101 | * The Access Permissions bits have different layouts within a page table |
102 | * entry depending on whether it's an entry for a stage 1 or stage 2 pmap. |
103 | * |
104 | * These fields describe the correct PTE bits to set to get the wanted |
105 | * permissions for the page tables described by this attribute structure. |
106 | */ |
107 | const uintptr_t ap_ro; |
108 | const uintptr_t ap_rw; |
109 | const uintptr_t ap_rona; |
110 | const uintptr_t ap_rwna; |
111 | const uintptr_t ap_xn; |
112 | const uintptr_t ap_x; |
113 | |
114 | /* The page table level at which the hierarchy begins. */ |
115 | const unsigned int pta_root_level; |
116 | |
117 | /* The page table level at which the commpage is nested into an address space. */ |
118 | const unsigned int pta_commpage_level; |
119 | |
120 | /* The last level in the page table hierarchy (ARM supports up to four levels). */ |
121 | const unsigned int pta_max_level; |
122 | |
123 | |
124 | /** |
125 | * Value to set the Translation Control Register (TCR) to in order to inform |
126 | * the hardware of this page table geometry. |
127 | */ |
128 | const uint64_t pta_tcr_value; |
129 | |
130 | /* Page Table/Granule Size. */ |
131 | const uint64_t pta_page_size; |
132 | |
133 | /** |
134 | * Size (in bytes) of the VA region at the beginning of the address space |
135 | * into which mappings should not be allowed. |
136 | */ |
137 | const uint64_t pta_pagezero_size; |
138 | |
139 | /** |
140 | * How many bits to shift "1" by to get the page table size. Alternatively, |
141 | * could also be thought of as how many bits make up the page offset in a |
142 | * virtual address. |
143 | */ |
144 | const uint64_t pta_page_shift; |
145 | }; |
146 | |
147 | typedef struct page_table_attr pt_attr_t; |
148 | |
149 | /* The default page table attributes for a system. */ |
150 | extern const struct page_table_attr * const native_pt_attr; |
151 | extern const struct page_table_ops native_pt_ops; |
152 | |
153 | /** |
154 | * Macros for getting pmap attributes/operations; not functions for const |
155 | * propagation. |
156 | */ |
157 | #if ARM_PARAMETERIZED_PMAP |
158 | |
159 | /* The page table attributes are linked to the pmap */ |
160 | #define pmap_get_pt_attr(pmap) ((pmap)->pmap_pt_attr) |
161 | #define pmap_get_pt_ops(pmap) ((pmap)->pmap_pt_attr->pta_ops) |
162 | |
163 | #else /* ARM_PARAMETERIZED_PMAP */ |
164 | |
165 | /* The page table attributes are fixed (to allow for const propagation) */ |
166 | #define pmap_get_pt_attr(pmap) (native_pt_attr) |
167 | #define pmap_get_pt_ops(pmap) (&native_pt_ops) |
168 | |
169 | #endif /* ARM_PARAMETERIZED_PMAP */ |
170 | |
171 | /* Defines representing a level in a page table hierarchy. */ |
172 | #define PMAP_TT_L0_LEVEL 0x0 |
173 | #define PMAP_TT_L1_LEVEL 0x1 |
174 | #define PMAP_TT_L2_LEVEL 0x2 |
175 | #define PMAP_TT_L3_LEVEL 0x3 |
176 | |
177 | /** |
178 | * Inline functions exported for usage by other pmap modules. |
179 | * |
180 | * In an effort to not cause any performance regressions while breaking up the |
181 | * pmap, I'm keeping all functions originally marked as "static inline", as |
182 | * inline and moving them into header files to be shared across the pmap |
183 | * modules. In reality, many of these functions probably don't need to be inline |
184 | * and can be moved back into a .c file. |
185 | * |
186 | * TODO: rdar://70538514 (PMAP Cleanup: re-evaluate whether inline functions should actually be inline) |
187 | */ |
188 | |
189 | /** |
190 | * Keep the following in mind when looking at the available attribute getters: |
191 | * |
192 | * We tend to use standard terms to describe various levels in a page table |
193 | * hierarchy. The "root" level is the top of a hierarchy. The root page table is |
194 | * the one that will programmed into the Translation Table Base Register (TTBR) |
195 | * to inform the hardware of where to begin when performing page table walks. |
196 | * The "twig" level is always one up from the last level, and the "leaf" level |
197 | * is the last page table level in a hierarchy. The leaf page tables always |
198 | * contain block entries, but the higher levels can contain either table or |
199 | * block entries. |
200 | * |
201 | * ARM supports up to four levels of page tables. The levels start at L0 and |
202 | * increase to L3 the deeper into a hierarchy you get, although L0 isn't |
203 | * necessarily always the root level. For example, in a four-level hierarchy, |
204 | * the root would be L0, the twig would be L2, and the leaf would be L3. But for |
205 | * a three-level hierarchy, the root would be L1, the twig would be L2, and the |
206 | * leaf would be L3. |
207 | */ |
208 | /* Page size getter. */ |
209 | static inline uint64_t |
210 | pt_attr_page_size(const pt_attr_t * const pt_attr) |
211 | { |
212 | return pt_attr->pta_page_size; |
213 | } |
214 | |
215 | /* Pagezero region size getter. */ |
216 | static inline uint64_t |
217 | pt_attr_pagezero_size(const pt_attr_t * const pt_attr) |
218 | { |
219 | return pt_attr->pta_pagezero_size; |
220 | } |
221 | |
222 | /** |
223 | * Return the size of the virtual address space covered by a single TTE at a |
224 | * specified level in the hierarchy. |
225 | */ |
226 | __unused static inline uint64_t |
227 | pt_attr_ln_size(const pt_attr_t * const pt_attr, unsigned int level) |
228 | { |
229 | return pt_attr->pta_level_info[level].size; |
230 | } |
231 | |
232 | /** |
233 | * Return the page descriptor shift for a specified level in the hierarchy. This |
234 | * shift value can be used to get the index into a page table at this level in |
235 | * the hierarchy from a given virtual address. |
236 | */ |
237 | __unused static inline uint64_t |
238 | pt_attr_ln_shift(const pt_attr_t * const pt_attr, unsigned int level) |
239 | { |
240 | return pt_attr->pta_level_info[level].shift; |
241 | } |
242 | |
243 | /** |
244 | * Return a mask of the offset for a specified level in the hierarchy. |
245 | * |
246 | * This should be equivalent to the value returned by pt_attr_ln_size() - 1. |
247 | */ |
248 | static inline uint64_t |
249 | pt_attr_ln_offmask(const pt_attr_t * const pt_attr, unsigned int level) |
250 | { |
251 | return pt_attr->pta_level_info[level].offmask; |
252 | } |
253 | |
254 | /** |
255 | * Return the mask for getting a page table index out of a virtual address for a |
256 | * specified level in the hierarchy. This can be combined with the value |
257 | * returned by pt_attr_ln_shift() to get the index into a page table. |
258 | */ |
259 | __unused static inline uint64_t |
260 | pt_attr_ln_index_mask(const pt_attr_t * const pt_attr, unsigned int level) |
261 | { |
262 | return pt_attr->pta_level_info[level].index_mask; |
263 | } |
264 | |
265 | /** |
266 | * Return the second to last page table level. |
267 | */ |
268 | static inline unsigned int |
269 | pt_attr_twig_level(const pt_attr_t * const pt_attr) |
270 | { |
271 | return pt_attr->pta_max_level - 1; |
272 | } |
273 | |
274 | /** |
275 | * Return the first page table level. This is what will be programmed into the |
276 | * Translation Table Base Register (TTBR) to inform the hardware of where to |
277 | * begin page table walks. |
278 | */ |
279 | static inline unsigned int |
280 | pt_attr_root_level(const pt_attr_t * const pt_attr) |
281 | { |
282 | return pt_attr->pta_root_level; |
283 | } |
284 | |
285 | /** |
286 | * Return the level at which to nest the commpage pmap into userspace pmaps. |
287 | * Since the commpage is shared across all userspace address maps, memory is |
288 | * saved by sharing the commpage page tables with every userspace pmap. The |
289 | * level at which to nest the commpage is dependent on the page table geometry. |
290 | * |
291 | * Typically this is L1 for 4KB page tables, and L2 for 16KB page tables. In |
292 | * this way, the commpage's L2/L3 page tables are reused in every 4KB task, and |
293 | * the L3 page table is reused in every 16KB task. |
294 | */ |
295 | static inline unsigned int |
296 | pt_attr_commpage_level(const pt_attr_t * const pt_attr) |
297 | { |
298 | return pt_attr->pta_commpage_level; |
299 | } |
300 | |
301 | /** |
302 | * Return the size of the virtual address space covered by a single PTE at the |
303 | * leaf level. |
304 | */ |
305 | static __unused inline uint64_t |
306 | pt_attr_leaf_size(const pt_attr_t * const pt_attr) |
307 | { |
308 | return pt_attr->pta_level_info[pt_attr->pta_max_level].size; |
309 | } |
310 | |
311 | /** |
312 | * Return a mask of the offset for a leaf table. |
313 | * |
314 | * This should be equivalent to the value returned by pt_attr_leaf_size() - 1. |
315 | */ |
316 | static __unused inline uint64_t |
317 | pt_attr_leaf_offmask(const pt_attr_t * const pt_attr) |
318 | { |
319 | return pt_attr->pta_level_info[pt_attr->pta_max_level].offmask; |
320 | } |
321 | |
322 | /** |
323 | * Return the page descriptor shift for a leaf table entry. This shift value can |
324 | * be used to get the index into a leaf page table from a given virtual address. |
325 | */ |
326 | static inline uint64_t |
327 | pt_attr_leaf_shift(const pt_attr_t * const pt_attr) |
328 | { |
329 | return pt_attr->pta_level_info[pt_attr->pta_max_level].shift; |
330 | } |
331 | |
332 | /** |
333 | * Return the mask for getting a leaf table index out of a virtual address. This |
334 | * can be combined with the value returned by pt_attr_leaf_shift() to get the |
335 | * index into a leaf table. |
336 | */ |
337 | static __unused inline uint64_t |
338 | pt_attr_leaf_index_mask(const pt_attr_t * const pt_attr) |
339 | { |
340 | return pt_attr->pta_level_info[pt_attr->pta_max_level].index_mask; |
341 | } |
342 | |
343 | /** |
344 | * Return the size of the virtual address space covered by a single TTE at the |
345 | * twig level. |
346 | */ |
347 | static inline uint64_t |
348 | pt_attr_twig_size(const pt_attr_t * const pt_attr) |
349 | { |
350 | return pt_attr->pta_level_info[pt_attr->pta_max_level - 1].size; |
351 | } |
352 | |
353 | /** |
354 | * Return a mask of the offset for a twig table. |
355 | * |
356 | * This should be equivalent to the value returned by pt_attr_twig_size() - 1. |
357 | */ |
358 | static inline uint64_t |
359 | pt_attr_twig_offmask(const pt_attr_t * const pt_attr) |
360 | { |
361 | return pt_attr->pta_level_info[pt_attr->pta_max_level - 1].offmask; |
362 | } |
363 | |
364 | /** |
365 | * Return the page descriptor shift for a twig table entry. This shift value can |
366 | * be used to get the index into a twig page table from a given virtual address. |
367 | */ |
368 | static inline uint64_t |
369 | pt_attr_twig_shift(const pt_attr_t * const pt_attr) |
370 | { |
371 | return pt_attr->pta_level_info[pt_attr->pta_max_level - 1].shift; |
372 | } |
373 | |
374 | /** |
375 | * Return the mask for getting a twig table index out of a virtual address. This |
376 | * can be combined with the value returned by pt_attr_twig_shift() to get the |
377 | * index into a twig table. |
378 | */ |
379 | static __unused inline uint64_t |
380 | pt_attr_twig_index_mask(const pt_attr_t * const pt_attr) |
381 | { |
382 | return pt_attr->pta_level_info[pt_attr->pta_max_level - 1].index_mask; |
383 | } |
384 | |
385 | /** |
386 | * Return the amount of memory that a leaf table takes up. This is equivalent |
387 | * to the amount of virtual address space covered by a single twig TTE. |
388 | */ |
389 | static inline uint64_t |
390 | pt_attr_leaf_table_size(const pt_attr_t * const pt_attr) |
391 | { |
392 | return pt_attr_twig_size(pt_attr); |
393 | } |
394 | |
395 | /** |
396 | * Return the offset mask for the memory used by a leaf page table. |
397 | * |
398 | * This should be equivalent to the value returned by pt_attr_twig_size() - 1. |
399 | */ |
400 | static inline uint64_t |
401 | pt_attr_leaf_table_offmask(const pt_attr_t * const pt_attr) |
402 | { |
403 | return pt_attr_twig_offmask(pt_attr); |
404 | } |
405 | |
406 | /** |
407 | * Return the Access Permissions bits required to specify User and Kernel |
408 | * Read/Write permissions on a PTE in this type of page table hierarchy (stage 1 |
409 | * vs stage 2). |
410 | */ |
411 | static inline uintptr_t |
412 | pt_attr_leaf_rw(const pt_attr_t * const pt_attr) |
413 | { |
414 | return pt_attr->ap_rw; |
415 | } |
416 | |
417 | /** |
418 | * Return the Access Permissions bits required to specify User and Kernel |
419 | * Read-Only permissions on a PTE in this type of page table hierarchy (stage 1 |
420 | * vs stage 2). |
421 | */ |
422 | static inline uintptr_t |
423 | pt_attr_leaf_ro(const pt_attr_t * const pt_attr) |
424 | { |
425 | return pt_attr->ap_ro; |
426 | } |
427 | |
428 | /** |
429 | * Return the Access Permissions bits required to specify just Kernel Read-Only |
430 | * permissions on a PTE in this type of page table hierarchy (stage 1 vs stage |
431 | * 2). |
432 | */ |
433 | static inline uintptr_t |
434 | pt_attr_leaf_rona(const pt_attr_t * const pt_attr) |
435 | { |
436 | return pt_attr->ap_rona; |
437 | } |
438 | |
439 | /** |
440 | * Return the Access Permissions bits required to specify just Kernel Read/Write |
441 | * permissions on a PTE in this type of page table hierarchy (stage 1 vs stage |
442 | * 2). |
443 | */ |
444 | static inline uintptr_t |
445 | pt_attr_leaf_rwna(const pt_attr_t * const pt_attr) |
446 | { |
447 | return pt_attr->ap_rwna; |
448 | } |
449 | |
450 | /** |
451 | * Return the mask of the page table entry bits required to set both the |
452 | * privileged and unprivileged execute never bits. |
453 | */ |
454 | static inline uintptr_t |
455 | pt_attr_leaf_xn(const pt_attr_t * const pt_attr) |
456 | { |
457 | return pt_attr->ap_xn; |
458 | } |
459 | |
460 | /** |
461 | * Return the mask of the page table entry bits required to set just the |
462 | * privileged execute never bit. |
463 | */ |
464 | static inline uintptr_t |
465 | pt_attr_leaf_x(const pt_attr_t * const pt_attr) |
466 | { |
467 | return pt_attr->ap_x; |
468 | } |
469 | |
470 | |
471 | /** |
472 | * Return the last level in the page table hierarchy. |
473 | */ |
474 | static inline unsigned int |
475 | pt_attr_leaf_level(const pt_attr_t * const pt_attr) |
476 | { |
477 | return pt_attr_twig_level(pt_attr) + 1; |
478 | } |
479 | |
480 | |
481 | /** |
482 | * Return the index into a specific level of page table for a given virtual |
483 | * address. |
484 | * |
485 | * @param pt_attr Page table attribute structure describing the hierarchy. |
486 | * @param addr The virtual address to get the index from. |
487 | * @param pt_level The page table whose index should be returned. |
488 | */ |
489 | static inline unsigned int |
490 | ttn_index(const pt_attr_t * const pt_attr, vm_map_address_t addr, unsigned int pt_level) |
491 | { |
492 | const uint64_t index_unshifted = addr & pt_attr_ln_index_mask(pt_attr, level: pt_level); |
493 | return (unsigned int)(index_unshifted >> pt_attr_ln_shift(pt_attr, level: pt_level)); |
494 | } |
495 | |
496 | /** |
497 | * Return the index into a twig page table for a given virtual address. |
498 | * |
499 | * @param pt_attr Page table attribute structure describing the hierarchy. |
500 | * @param addr The virtual address to get the index from. |
501 | */ |
502 | static inline unsigned int |
503 | tte_index(const pt_attr_t * const pt_attr, vm_map_address_t addr) |
504 | { |
505 | return ttn_index(pt_attr, addr, PMAP_TT_L2_LEVEL); |
506 | } |
507 | |
508 | /** |
509 | * Return the index into a leaf page table for a given virtual address. |
510 | * |
511 | * @param pt_attr Page table attribute structure describing the hierarchy. |
512 | * @param addr The virtual address to get the index from. |
513 | */ |
514 | static inline unsigned int |
515 | pte_index(const pt_attr_t * const pt_attr, vm_map_address_t addr) |
516 | { |
517 | return ttn_index(pt_attr, addr, PMAP_TT_L3_LEVEL); |
518 | } |
519 | |
520 | |
521 | |
522 | /** |
523 | * Given an address and a map, compute the address of the table entry at the |
524 | * specified page table level. If the address is invalid with respect to the map |
525 | * then TT_ENTRY_NULL is returned. |
526 | * |
527 | * @param pmap The pmap whose page tables to parse. |
528 | * @param target_level The page table level at which to stop parsing the |
529 | * hierarchy at. |
530 | * @param addr The virtual address to calculate the table indices off of. |
531 | */ |
532 | static inline tt_entry_t * |
533 | pmap_ttne(pmap_t pmap, unsigned int target_level, vm_map_address_t addr) |
534 | { |
535 | tt_entry_t *table_ttep = TT_ENTRY_NULL; |
536 | tt_entry_t *ttep = TT_ENTRY_NULL; |
537 | tt_entry_t tte = ARM_TTE_EMPTY; |
538 | unsigned int cur_level; |
539 | |
540 | const pt_attr_t * const pt_attr = pmap_get_pt_attr(pmap); |
541 | |
542 | if (__improbable((addr < pmap->min) || (addr >= pmap->max))) { |
543 | return TT_ENTRY_NULL; |
544 | } |
545 | /* Start parsing at the root page table. */ |
546 | table_ttep = pmap->tte; |
547 | |
548 | assert(target_level <= pt_attr->pta_max_level); |
549 | |
550 | for (cur_level = pt_attr->pta_root_level; cur_level <= target_level; cur_level++) { |
551 | ttep = &table_ttep[ttn_index(pt_attr, addr, pt_level: cur_level)]; |
552 | |
553 | if (cur_level == target_level) { |
554 | break; |
555 | } |
556 | |
557 | tte = *ttep; |
558 | |
559 | #if MACH_ASSERT |
560 | if ((tte & (ARM_TTE_TYPE_MASK | ARM_TTE_VALID)) == (ARM_TTE_TYPE_BLOCK | ARM_TTE_VALID)) { |
561 | panic("%s: Attempt to demote L%u block, tte=0x%llx, pmap=%p, target_level=%u, addr=%p" , |
562 | __func__, cur_level, tte, pmap, target_level, (void*)addr); |
563 | } |
564 | #endif |
565 | if ((tte & (ARM_TTE_TYPE_MASK | ARM_TTE_VALID)) != (ARM_TTE_TYPE_TABLE | ARM_TTE_VALID)) { |
566 | return TT_ENTRY_NULL; |
567 | } |
568 | |
569 | table_ttep = (tt_entry_t*)phystokv(pa: tte & ARM_TTE_TABLE_MASK); |
570 | } |
571 | |
572 | return ttep; |
573 | } |
574 | |
575 | /** |
576 | * Given an address and a map, compute the address of the level 1 translation |
577 | * table entry. If the address is invalid with respect to the map then |
578 | * TT_ENTRY_NULL is returned. |
579 | * |
580 | * @param pmap The pmap whose page tables to parse. |
581 | * @param addr The virtual address to calculate the table indices off of. |
582 | */ |
583 | static inline tt_entry_t * |
584 | pmap_tt1e(pmap_t pmap, vm_map_address_t addr) |
585 | { |
586 | return pmap_ttne(pmap, PMAP_TT_L1_LEVEL, addr); |
587 | } |
588 | |
589 | /** |
590 | * Given an address and a map, compute the address of the level 2 translation |
591 | * table entry. If the address is invalid with respect to the map then |
592 | * TT_ENTRY_NULL is returned. |
593 | * |
594 | * @param pmap The pmap whose page tables to parse. |
595 | * @param addr The virtual address to calculate the table indices off of. |
596 | */ |
597 | static inline tt_entry_t * |
598 | pmap_tt2e(pmap_t pmap, vm_map_address_t addr) |
599 | { |
600 | return pmap_ttne(pmap, PMAP_TT_L2_LEVEL, addr); |
601 | } |
602 | |
603 | /** |
604 | * Given an address and a map, compute the address of the level 3 page table |
605 | * entry. If the address is invalid with respect to the map then PT_ENTRY_NULL |
606 | * is returned. |
607 | * |
608 | * @param pmap The pmap whose page tables to parse. |
609 | * @param addr The virtual address to calculate the table indices off of. |
610 | */ |
611 | static inline pt_entry_t * |
612 | pmap_tt3e(pmap_t pmap, vm_map_address_t addr) |
613 | { |
614 | return (pt_entry_t*)pmap_ttne(pmap, PMAP_TT_L3_LEVEL, addr); |
615 | } |
616 | |
617 | /** |
618 | * Given an address and a map, compute the address of the twig translation table |
619 | * entry. If the address is invalid with respect to the map then TT_ENTRY_NULL |
620 | * is returned. |
621 | * |
622 | * @param pmap The pmap whose page tables to parse. |
623 | * @param addr The virtual address to calculate the table indices off of. |
624 | */ |
625 | static inline tt_entry_t * |
626 | pmap_tte(pmap_t pmap, vm_map_address_t addr) |
627 | { |
628 | return pmap_tt2e(pmap, addr); |
629 | } |
630 | |
631 | /** |
632 | * Given an address and a map, compute the address of the leaf page table entry. |
633 | * If the address is invalid with respect to the map then PT_ENTRY_NULL is |
634 | * returned. |
635 | * |
636 | * @param pmap The pmap whose page tables to parse. |
637 | * @param addr The virtual address to calculate the table indices off of. |
638 | */ |
639 | static inline pt_entry_t * |
640 | pmap_pte(pmap_t pmap, vm_map_address_t addr) |
641 | { |
642 | return pmap_tt3e(pmap, addr); |
643 | } |
644 | |
645 | |
646 | #endif /* _ARM_PMAP_PMAP_PT_GEOMETRY_H_ */ |
647 | |