1 | /* |
2 | * Copyright (c) 2000-2012 Apple Inc. All rights reserved. |
3 | * |
4 | * @APPLE_OSREFERENCE_LICENSE_HEADER_START@ |
5 | * |
6 | * This file contains Original Code and/or Modifications of Original Code |
7 | * as defined in and that are subject to the Apple Public Source License |
8 | * Version 2.0 (the 'License'). You may not use this file except in |
9 | * compliance with the License. The rights granted to you under the License |
10 | * may not be used to create, or enable the creation or redistribution of, |
11 | * unlawful or unlicensed copies of an Apple operating system, or to |
12 | * circumvent, violate, or enable the circumvention or violation of, any |
13 | * terms of an Apple operating system software license agreement. |
14 | * |
15 | * Please obtain a copy of the License at |
16 | * http://www.opensource.apple.com/apsl/ and read it before using this file. |
17 | * |
18 | * The Original Code and all software distributed under the License are |
19 | * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER |
20 | * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES, |
21 | * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY, |
22 | * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT. |
23 | * Please see the License for the specific language governing rights and |
24 | * limitations under the License. |
25 | * |
26 | * @APPLE_OSREFERENCE_LICENSE_HEADER_END@ |
27 | */ |
28 | /* |
29 | * @OSF_COPYRIGHT@ |
30 | */ |
31 | /* |
32 | * Mach Operating System |
33 | * Copyright (c) 1991,1990,1989,1988,1987 Carnegie Mellon University |
34 | * All Rights Reserved. |
35 | * |
36 | * Permission to use, copy, modify and distribute this software and its |
37 | * documentation is hereby granted, provided that both the copyright |
38 | * notice and this permission notice appear in all copies of the |
39 | * software, derivative works or modified versions, and any portions |
40 | * thereof, and that both notices appear in supporting documentation. |
41 | * |
42 | * CARNEGIE MELLON ALLOWS FREE USE OF THIS SOFTWARE IN ITS "AS IS" |
43 | * CONDITION. CARNEGIE MELLON DISCLAIMS ANY LIABILITY OF ANY KIND FOR |
44 | * ANY DAMAGES WHATSOEVER RESULTING FROM THE USE OF THIS SOFTWARE. |
45 | * |
46 | * Carnegie Mellon requests users of this software to return to |
47 | * |
48 | * Software Distribution Coordinator or Software.Distribution@CS.CMU.EDU |
49 | * School of Computer Science |
50 | * Carnegie Mellon University |
51 | * Pittsburgh PA 15213-3890 |
52 | * |
53 | * any improvements or extensions that they make and grant Carnegie Mellon |
54 | * the rights to redistribute these changes. |
55 | */ |
56 | /* |
57 | */ |
58 | /* |
59 | * File: vm/pmap.h |
60 | * Author: Avadis Tevanian, Jr. |
61 | * Date: 1985 |
62 | * |
63 | * Machine address mapping definitions -- machine-independent |
64 | * section. [For machine-dependent section, see "machine/pmap.h".] |
65 | */ |
66 | |
67 | #ifndef _VM_PMAP_H_ |
68 | #define _VM_PMAP_H_ |
69 | |
70 | #include <mach/kern_return.h> |
71 | #include <mach/vm_param.h> |
72 | #include <mach/vm_types.h> |
73 | #include <mach/vm_attributes.h> |
74 | #include <mach/boolean.h> |
75 | #include <mach/vm_prot.h> |
76 | |
77 | #include <kern/trustcache.h> |
78 | |
79 | #ifdef KERNEL_PRIVATE |
80 | |
81 | /* |
82 | * The following is a description of the interface to the |
83 | * machine-dependent "physical map" data structure. The module |
84 | * must provide a "pmap_t" data type that represents the |
85 | * set of valid virtual-to-physical addresses for one user |
86 | * address space. [The kernel address space is represented |
87 | * by a distinguished "pmap_t".] The routines described manage |
88 | * this type, install and update virtual-to-physical mappings, |
89 | * and perform operations on physical addresses common to |
90 | * many address spaces. |
91 | */ |
92 | |
93 | /* Copy between a physical page and a virtual address */ |
94 | /* LP64todo - switch to vm_map_offset_t when it grows */ |
95 | extern kern_return_t copypv( |
96 | addr64_t source, |
97 | addr64_t sink, |
98 | unsigned int size, |
99 | int which); |
100 | #define cppvPsnk 1 |
101 | #define cppvPsnkb 31 |
102 | #define cppvPsrc 2 |
103 | #define cppvPsrcb 30 |
104 | #define cppvFsnk 4 |
105 | #define cppvFsnkb 29 |
106 | #define cppvFsrc 8 |
107 | #define cppvFsrcb 28 |
108 | #define cppvNoModSnk 16 |
109 | #define cppvNoModSnkb 27 |
110 | #define cppvNoRefSrc 32 |
111 | #define cppvNoRefSrcb 26 |
112 | #define cppvKmap 64 /* Use the kernel's vm_map */ |
113 | #define cppvKmapb 25 |
114 | |
115 | extern boolean_t pmap_has_managed_page(ppnum_t first, ppnum_t last); |
116 | |
117 | #ifdef MACH_KERNEL_PRIVATE |
118 | |
119 | #include <mach_assert.h> |
120 | |
121 | #include <machine/pmap.h> |
122 | |
123 | /* |
124 | * Routines used for initialization. |
125 | * There is traditionally also a pmap_bootstrap, |
126 | * used very early by machine-dependent code, |
127 | * but it is not part of the interface. |
128 | * |
129 | * LP64todo - |
130 | * These interfaces are tied to the size of the |
131 | * kernel pmap - and therefore use the "local" |
132 | * vm_offset_t, etc... types. |
133 | */ |
134 | |
135 | extern void *pmap_steal_memory(vm_size_t size); |
136 | /* During VM initialization, |
137 | * steal a chunk of memory. |
138 | */ |
139 | extern unsigned int pmap_free_pages(void); /* During VM initialization, |
140 | * report remaining unused |
141 | * physical pages. |
142 | */ |
143 | extern void pmap_startup( |
144 | vm_offset_t *startp, |
145 | vm_offset_t *endp); |
146 | /* During VM initialization, |
147 | * use remaining physical pages |
148 | * to allocate page frames. |
149 | */ |
150 | extern void pmap_init(void); |
151 | /* Initialization, |
152 | * after kernel runs |
153 | * in virtual memory. |
154 | */ |
155 | |
156 | extern void mapping_adjust(void); /* Adjust free mapping count */ |
157 | |
158 | extern void mapping_free_prime(void); /* Primes the mapping block release list */ |
159 | |
160 | #ifndef MACHINE_PAGES |
161 | /* |
162 | * If machine/pmap.h defines MACHINE_PAGES, it must implement |
163 | * the above functions. The pmap module has complete control. |
164 | * Otherwise, it must implement |
165 | * pmap_free_pages |
166 | * pmap_virtual_space |
167 | * pmap_next_page |
168 | * pmap_init |
169 | * and vm/vm_resident.c implements pmap_steal_memory and pmap_startup |
170 | * using pmap_free_pages, pmap_next_page, pmap_virtual_space, |
171 | * and pmap_enter. pmap_free_pages may over-estimate the number |
172 | * of unused physical pages, and pmap_next_page may return FALSE |
173 | * to indicate that there are no more unused pages to return. |
174 | * However, for best performance pmap_free_pages should be accurate. |
175 | */ |
176 | |
177 | extern boolean_t pmap_next_page(ppnum_t *pnum); |
178 | extern boolean_t pmap_next_page_hi(ppnum_t *pnum); |
179 | /* During VM initialization, |
180 | * return the next unused |
181 | * physical page. |
182 | */ |
183 | extern void pmap_virtual_space( |
184 | vm_offset_t *virtual_start, |
185 | vm_offset_t *virtual_end); |
186 | /* During VM initialization, |
187 | * report virtual space |
188 | * available for the kernel. |
189 | */ |
190 | #endif /* MACHINE_PAGES */ |
191 | |
192 | /* |
193 | * Routines to manage the physical map data structure. |
194 | */ |
195 | extern pmap_t pmap_create( /* Create a pmap_t. */ |
196 | ledger_t ledger, |
197 | vm_map_size_t size, |
198 | boolean_t is_64bit); |
199 | #if __x86_64__ |
200 | extern pmap_t pmap_create_options( |
201 | ledger_t ledger, |
202 | vm_map_size_t size, |
203 | int flags); |
204 | #endif |
205 | |
206 | extern pmap_t (pmap_kernel)(void); /* Return the kernel's pmap */ |
207 | extern void pmap_reference(pmap_t pmap); /* Gain a reference. */ |
208 | extern void pmap_destroy(pmap_t pmap); /* Release a reference. */ |
209 | extern void pmap_switch(pmap_t); |
210 | |
211 | #if MACH_ASSERT |
212 | extern void pmap_set_process(pmap_t pmap, |
213 | int pid, |
214 | char *procname); |
215 | #endif /* MACH_ASSERT */ |
216 | |
217 | extern kern_return_t pmap_enter( /* Enter a mapping */ |
218 | pmap_t pmap, |
219 | vm_map_offset_t v, |
220 | ppnum_t pn, |
221 | vm_prot_t prot, |
222 | vm_prot_t fault_type, |
223 | unsigned int flags, |
224 | boolean_t wired); |
225 | |
226 | extern kern_return_t pmap_enter_options( |
227 | pmap_t pmap, |
228 | vm_map_offset_t v, |
229 | ppnum_t pn, |
230 | vm_prot_t prot, |
231 | vm_prot_t fault_type, |
232 | unsigned int flags, |
233 | boolean_t wired, |
234 | unsigned int options, |
235 | void *arg); |
236 | |
237 | extern void pmap_remove_some_phys( |
238 | pmap_t pmap, |
239 | ppnum_t pn); |
240 | |
241 | extern void pmap_lock_phys_page( |
242 | ppnum_t pn); |
243 | |
244 | extern void pmap_unlock_phys_page( |
245 | ppnum_t pn); |
246 | |
247 | |
248 | /* |
249 | * Routines that operate on physical addresses. |
250 | */ |
251 | |
252 | extern void pmap_page_protect( /* Restrict access to page. */ |
253 | ppnum_t phys, |
254 | vm_prot_t prot); |
255 | |
256 | extern void pmap_page_protect_options( /* Restrict access to page. */ |
257 | ppnum_t phys, |
258 | vm_prot_t prot, |
259 | unsigned int options, |
260 | void *arg); |
261 | |
262 | extern void (pmap_zero_page)( |
263 | ppnum_t pn); |
264 | |
265 | extern void (pmap_zero_part_page)( |
266 | ppnum_t pn, |
267 | vm_offset_t offset, |
268 | vm_size_t len); |
269 | |
270 | extern void (pmap_copy_page)( |
271 | ppnum_t src, |
272 | ppnum_t dest); |
273 | |
274 | extern void (pmap_copy_part_page)( |
275 | ppnum_t src, |
276 | vm_offset_t src_offset, |
277 | ppnum_t dst, |
278 | vm_offset_t dst_offset, |
279 | vm_size_t len); |
280 | |
281 | extern void (pmap_copy_part_lpage)( |
282 | vm_offset_t src, |
283 | ppnum_t dst, |
284 | vm_offset_t dst_offset, |
285 | vm_size_t len); |
286 | |
287 | extern void (pmap_copy_part_rpage)( |
288 | ppnum_t src, |
289 | vm_offset_t src_offset, |
290 | vm_offset_t dst, |
291 | vm_size_t len); |
292 | |
293 | extern unsigned int (pmap_disconnect)( /* disconnect mappings and return reference and change */ |
294 | ppnum_t phys); |
295 | |
296 | extern unsigned int (pmap_disconnect_options)( /* disconnect mappings and return reference and change */ |
297 | ppnum_t phys, |
298 | unsigned int options, |
299 | void *arg); |
300 | |
301 | extern kern_return_t (pmap_attribute_cache_sync)( /* Flush appropriate |
302 | * cache based on |
303 | * page number sent */ |
304 | ppnum_t pn, |
305 | vm_size_t size, |
306 | vm_machine_attribute_t attribute, |
307 | vm_machine_attribute_val_t* value); |
308 | |
309 | extern unsigned int (pmap_cache_attributes)( |
310 | ppnum_t pn); |
311 | |
312 | /* |
313 | * Set (override) cache attributes for the specified physical page |
314 | */ |
315 | extern void pmap_set_cache_attributes( |
316 | ppnum_t, |
317 | unsigned int); |
318 | #if defined(__arm__) || defined(__arm64__) |
319 | /* ARM64_TODO */ |
320 | extern boolean_t pmap_batch_set_cache_attributes( |
321 | ppnum_t, |
322 | unsigned int, |
323 | unsigned int, |
324 | unsigned int, |
325 | boolean_t, |
326 | unsigned int*); |
327 | #endif |
328 | extern void pmap_sync_page_data_phys(ppnum_t pa); |
329 | extern void pmap_sync_page_attributes_phys(ppnum_t pa); |
330 | |
331 | /* |
332 | * debug/assertions. pmap_verify_free returns true iff |
333 | * the given physical page is mapped into no pmap. |
334 | */ |
335 | extern boolean_t pmap_verify_free(ppnum_t pn); |
336 | |
337 | /* |
338 | * Statistics routines |
339 | */ |
340 | extern int (pmap_compressed)(pmap_t pmap); |
341 | extern int (pmap_resident_count)(pmap_t pmap); |
342 | extern int (pmap_resident_max)(pmap_t pmap); |
343 | |
344 | /* |
345 | * Sundry required (internal) routines |
346 | */ |
347 | #ifdef CURRENTLY_UNUSED_AND_UNTESTED |
348 | extern void pmap_collect(pmap_t pmap);/* Perform garbage |
349 | * collection, if any */ |
350 | #endif |
351 | /* |
352 | * Optional routines |
353 | */ |
354 | extern void (pmap_copy)( /* Copy range of mappings, |
355 | * if desired. */ |
356 | pmap_t dest, |
357 | pmap_t source, |
358 | vm_map_offset_t dest_va, |
359 | vm_map_size_t size, |
360 | vm_map_offset_t source_va); |
361 | |
362 | extern kern_return_t (pmap_attribute)( /* Get/Set special memory |
363 | * attributes */ |
364 | pmap_t pmap, |
365 | vm_map_offset_t va, |
366 | vm_map_size_t size, |
367 | vm_machine_attribute_t attribute, |
368 | vm_machine_attribute_val_t* value); |
369 | |
370 | /* |
371 | * Routines defined as macros. |
372 | */ |
373 | #ifndef PMAP_ACTIVATE_USER |
374 | #ifndef PMAP_ACTIVATE |
375 | #define PMAP_ACTIVATE_USER(thr, cpu) |
376 | #else /* PMAP_ACTIVATE */ |
377 | #define PMAP_ACTIVATE_USER(thr, cpu) { \ |
378 | pmap_t pmap; \ |
379 | \ |
380 | pmap = (thr)->map->pmap; \ |
381 | if (pmap != pmap_kernel()) \ |
382 | PMAP_ACTIVATE(pmap, (thr), (cpu)); \ |
383 | } |
384 | #endif /* PMAP_ACTIVATE */ |
385 | #endif /* PMAP_ACTIVATE_USER */ |
386 | |
387 | #ifndef PMAP_DEACTIVATE_USER |
388 | #ifndef PMAP_DEACTIVATE |
389 | #define PMAP_DEACTIVATE_USER(thr, cpu) |
390 | #else /* PMAP_DEACTIVATE */ |
391 | #define PMAP_DEACTIVATE_USER(thr, cpu) { \ |
392 | pmap_t pmap; \ |
393 | \ |
394 | pmap = (thr)->map->pmap; \ |
395 | if ((pmap) != pmap_kernel()) \ |
396 | PMAP_DEACTIVATE(pmap, (thr), (cpu)); \ |
397 | } |
398 | #endif /* PMAP_DEACTIVATE */ |
399 | #endif /* PMAP_DEACTIVATE_USER */ |
400 | |
401 | #ifndef PMAP_ACTIVATE_KERNEL |
402 | #ifndef PMAP_ACTIVATE |
403 | #define PMAP_ACTIVATE_KERNEL(cpu) |
404 | #else /* PMAP_ACTIVATE */ |
405 | #define PMAP_ACTIVATE_KERNEL(cpu) \ |
406 | PMAP_ACTIVATE(pmap_kernel(), THREAD_NULL, cpu) |
407 | #endif /* PMAP_ACTIVATE */ |
408 | #endif /* PMAP_ACTIVATE_KERNEL */ |
409 | |
410 | #ifndef PMAP_DEACTIVATE_KERNEL |
411 | #ifndef PMAP_DEACTIVATE |
412 | #define PMAP_DEACTIVATE_KERNEL(cpu) |
413 | #else /* PMAP_DEACTIVATE */ |
414 | #define PMAP_DEACTIVATE_KERNEL(cpu) \ |
415 | PMAP_DEACTIVATE(pmap_kernel(), THREAD_NULL, cpu) |
416 | #endif /* PMAP_DEACTIVATE */ |
417 | #endif /* PMAP_DEACTIVATE_KERNEL */ |
418 | |
419 | #ifndef PMAP_ENTER |
420 | /* |
421 | * Macro to be used in place of pmap_enter() |
422 | */ |
423 | #define PMAP_ENTER(pmap, virtual_address, page, protection, fault_type, \ |
424 | flags, wired, result) \ |
425 | MACRO_BEGIN \ |
426 | pmap_t __pmap = (pmap); \ |
427 | vm_page_t __page = (page); \ |
428 | int __options = 0; \ |
429 | vm_object_t __obj; \ |
430 | \ |
431 | PMAP_ENTER_CHECK(__pmap, __page) \ |
432 | __obj = VM_PAGE_OBJECT(__page); \ |
433 | if (__obj->internal) { \ |
434 | __options |= PMAP_OPTIONS_INTERNAL; \ |
435 | } \ |
436 | if (__page->vmp_reusable || __obj->all_reusable) { \ |
437 | __options |= PMAP_OPTIONS_REUSABLE; \ |
438 | } \ |
439 | result = pmap_enter_options(__pmap, \ |
440 | (virtual_address), \ |
441 | VM_PAGE_GET_PHYS_PAGE(__page), \ |
442 | (protection), \ |
443 | (fault_type), \ |
444 | (flags), \ |
445 | (wired), \ |
446 | __options, \ |
447 | NULL); \ |
448 | MACRO_END |
449 | #endif /* !PMAP_ENTER */ |
450 | |
451 | #ifndef PMAP_ENTER_OPTIONS |
452 | #define PMAP_ENTER_OPTIONS(pmap, virtual_address, page, protection, \ |
453 | fault_type, flags, wired, options, result) \ |
454 | MACRO_BEGIN \ |
455 | pmap_t __pmap = (pmap); \ |
456 | vm_page_t __page = (page); \ |
457 | int __extra_options = 0; \ |
458 | vm_object_t __obj; \ |
459 | \ |
460 | PMAP_ENTER_CHECK(__pmap, __page) \ |
461 | __obj = VM_PAGE_OBJECT(__page); \ |
462 | if (__obj->internal) { \ |
463 | __extra_options |= PMAP_OPTIONS_INTERNAL; \ |
464 | } \ |
465 | if (__page->vmp_reusable || __obj->all_reusable) { \ |
466 | __extra_options |= PMAP_OPTIONS_REUSABLE; \ |
467 | } \ |
468 | result = pmap_enter_options(__pmap, \ |
469 | (virtual_address), \ |
470 | VM_PAGE_GET_PHYS_PAGE(__page), \ |
471 | (protection), \ |
472 | (fault_type), \ |
473 | (flags), \ |
474 | (wired), \ |
475 | (options) | __extra_options, \ |
476 | NULL); \ |
477 | MACRO_END |
478 | #endif /* !PMAP_ENTER_OPTIONS */ |
479 | |
480 | #ifndef PMAP_SET_CACHE_ATTR |
481 | #define PMAP_SET_CACHE_ATTR(mem, object, cache_attr, batch_pmap_op) \ |
482 | MACRO_BEGIN \ |
483 | if (!batch_pmap_op) { \ |
484 | pmap_set_cache_attributes(VM_PAGE_GET_PHYS_PAGE(mem), cache_attr); \ |
485 | object->set_cache_attr = TRUE; \ |
486 | } \ |
487 | MACRO_END |
488 | #endif /* PMAP_SET_CACHE_ATTR */ |
489 | |
490 | #ifndef PMAP_BATCH_SET_CACHE_ATTR |
491 | #if defined(__arm__) || defined(__arm64__) |
492 | #define PMAP_BATCH_SET_CACHE_ATTR(object, user_page_list, \ |
493 | cache_attr, num_pages, batch_pmap_op) \ |
494 | MACRO_BEGIN \ |
495 | if ((batch_pmap_op)) { \ |
496 | unsigned int __page_idx=0; \ |
497 | unsigned int res=0; \ |
498 | boolean_t batch=TRUE; \ |
499 | while (__page_idx < (num_pages)) { \ |
500 | if (!pmap_batch_set_cache_attributes( \ |
501 | user_page_list[__page_idx].phys_addr, \ |
502 | (cache_attr), \ |
503 | (num_pages), \ |
504 | (__page_idx), \ |
505 | FALSE, \ |
506 | (&res))) { \ |
507 | batch = FALSE; \ |
508 | break; \ |
509 | } \ |
510 | __page_idx++; \ |
511 | } \ |
512 | __page_idx=0; \ |
513 | res=0; \ |
514 | while (__page_idx < (num_pages)) { \ |
515 | if (batch) \ |
516 | (void)pmap_batch_set_cache_attributes( \ |
517 | user_page_list[__page_idx].phys_addr, \ |
518 | (cache_attr), \ |
519 | (num_pages), \ |
520 | (__page_idx), \ |
521 | TRUE, \ |
522 | (&res)); \ |
523 | else \ |
524 | pmap_set_cache_attributes( \ |
525 | user_page_list[__page_idx].phys_addr, \ |
526 | (cache_attr)); \ |
527 | __page_idx++; \ |
528 | } \ |
529 | (object)->set_cache_attr = TRUE; \ |
530 | } \ |
531 | MACRO_END |
532 | #else |
533 | #define PMAP_BATCH_SET_CACHE_ATTR(object, user_page_list, \ |
534 | cache_attr, num_pages, batch_pmap_op) \ |
535 | MACRO_BEGIN \ |
536 | if ((batch_pmap_op)) { \ |
537 | unsigned int __page_idx=0; \ |
538 | while (__page_idx < (num_pages)) { \ |
539 | pmap_set_cache_attributes( \ |
540 | user_page_list[__page_idx].phys_addr, \ |
541 | (cache_attr)); \ |
542 | __page_idx++; \ |
543 | } \ |
544 | (object)->set_cache_attr = TRUE; \ |
545 | } \ |
546 | MACRO_END |
547 | #endif |
548 | #endif /* PMAP_BATCH_SET_CACHE_ATTR */ |
549 | |
550 | #define PMAP_ENTER_CHECK(pmap, page) \ |
551 | { \ |
552 | if ((page)->vmp_error) { \ |
553 | panic("VM page %p should not have an error\n", \ |
554 | (page)); \ |
555 | } \ |
556 | } |
557 | |
558 | /* |
559 | * Routines to manage reference/modify bits based on |
560 | * physical addresses, simulating them if not provided |
561 | * by the hardware. |
562 | */ |
563 | struct pfc { |
564 | long pfc_cpus; |
565 | long pfc_invalid_global; |
566 | }; |
567 | |
568 | typedef struct pfc pmap_flush_context; |
569 | |
570 | /* Clear reference bit */ |
571 | extern void pmap_clear_reference(ppnum_t pn); |
572 | /* Return reference bit */ |
573 | extern boolean_t (pmap_is_referenced)(ppnum_t pn); |
574 | /* Set modify bit */ |
575 | extern void pmap_set_modify(ppnum_t pn); |
576 | /* Clear modify bit */ |
577 | extern void pmap_clear_modify(ppnum_t pn); |
578 | /* Return modify bit */ |
579 | extern boolean_t pmap_is_modified(ppnum_t pn); |
580 | /* Return modified and referenced bits */ |
581 | extern unsigned int pmap_get_refmod(ppnum_t pn); |
582 | /* Clear modified and referenced bits */ |
583 | extern void pmap_clear_refmod(ppnum_t pn, unsigned int mask); |
584 | #define VM_MEM_MODIFIED 0x01 /* Modified bit */ |
585 | #define VM_MEM_REFERENCED 0x02 /* Referenced bit */ |
586 | extern void pmap_clear_refmod_options(ppnum_t pn, unsigned int mask, unsigned int options, void *); |
587 | |
588 | |
589 | extern void pmap_flush_context_init(pmap_flush_context *); |
590 | extern void pmap_flush(pmap_flush_context *); |
591 | |
592 | /* |
593 | * Routines that operate on ranges of virtual addresses. |
594 | */ |
595 | extern void pmap_protect( /* Change protections. */ |
596 | pmap_t map, |
597 | vm_map_offset_t s, |
598 | vm_map_offset_t e, |
599 | vm_prot_t prot); |
600 | |
601 | extern void pmap_protect_options( /* Change protections. */ |
602 | pmap_t map, |
603 | vm_map_offset_t s, |
604 | vm_map_offset_t e, |
605 | vm_prot_t prot, |
606 | unsigned int options, |
607 | void *arg); |
608 | |
609 | extern void (pmap_pageable)( |
610 | pmap_t pmap, |
611 | vm_map_offset_t start, |
612 | vm_map_offset_t end, |
613 | boolean_t pageable); |
614 | |
615 | |
616 | extern uint64_t pmap_nesting_size_min; |
617 | extern uint64_t pmap_nesting_size_max; |
618 | |
619 | extern kern_return_t pmap_nest(pmap_t, |
620 | pmap_t, |
621 | addr64_t, |
622 | addr64_t, |
623 | uint64_t); |
624 | extern kern_return_t pmap_unnest(pmap_t, |
625 | addr64_t, |
626 | uint64_t); |
627 | |
628 | #define PMAP_UNNEST_CLEAN 1 |
629 | |
630 | extern kern_return_t pmap_unnest_options(pmap_t, |
631 | addr64_t, |
632 | uint64_t, |
633 | unsigned int); |
634 | extern boolean_t pmap_adjust_unnest_parameters(pmap_t, vm_map_offset_t *, vm_map_offset_t *); |
635 | extern void pmap_advise_pagezero_range(pmap_t, uint64_t); |
636 | #endif /* MACH_KERNEL_PRIVATE */ |
637 | |
638 | extern boolean_t pmap_is_noencrypt(ppnum_t); |
639 | extern void pmap_set_noencrypt(ppnum_t pn); |
640 | extern void pmap_clear_noencrypt(ppnum_t pn); |
641 | |
642 | /* |
643 | * JMM - This portion is exported to other kernel components right now, |
644 | * but will be pulled back in the future when the needed functionality |
645 | * is provided in a cleaner manner. |
646 | */ |
647 | |
648 | extern pmap_t kernel_pmap; /* The kernel's map */ |
649 | #define pmap_kernel() (kernel_pmap) |
650 | |
651 | /* machine independent WIMG bits */ |
652 | |
653 | #define VM_MEM_GUARDED 0x1 /* (G) Guarded Storage */ |
654 | #define VM_MEM_COHERENT 0x2 /* (M) Memory Coherency */ |
655 | #define VM_MEM_NOT_CACHEABLE 0x4 /* (I) Cache Inhibit */ |
656 | #define VM_MEM_WRITE_THROUGH 0x8 /* (W) Write-Through */ |
657 | |
658 | #define VM_WIMG_USE_DEFAULT 0x80 |
659 | #define VM_WIMG_MASK 0xFF |
660 | |
661 | #define VM_MEM_SUPERPAGE 0x100 /* map a superpage instead of a base page */ |
662 | #define VM_MEM_STACK 0x200 |
663 | |
664 | #if __x86_64__ |
665 | /* N.B. These use the same numerical space as the PMAP_EXPAND_OPTIONS |
666 | * definitions in i386/pmap_internal.h |
667 | */ |
668 | #define PMAP_CREATE_64BIT 0x1 |
669 | #define PMAP_CREATE_EPT 0x2 |
670 | #define PMAP_CREATE_KNOWN_FLAGS (PMAP_CREATE_64BIT | PMAP_CREATE_EPT) |
671 | #endif |
672 | |
673 | #define PMAP_OPTIONS_NOWAIT 0x1 /* don't block, return |
674 | * KERN_RESOURCE_SHORTAGE |
675 | * instead */ |
676 | #define PMAP_OPTIONS_NOENTER 0x2 /* expand pmap if needed |
677 | * but don't enter mapping |
678 | */ |
679 | #define PMAP_OPTIONS_COMPRESSOR 0x4 /* credit the compressor for |
680 | * this operation */ |
681 | #define PMAP_OPTIONS_INTERNAL 0x8 /* page from internal object */ |
682 | #define PMAP_OPTIONS_REUSABLE 0x10 /* page is "reusable" */ |
683 | #define PMAP_OPTIONS_NOFLUSH 0x20 /* delay flushing of pmap */ |
684 | #define PMAP_OPTIONS_NOREFMOD 0x40 /* don't need ref/mod on disconnect */ |
685 | #define PMAP_OPTIONS_ALT_ACCT 0x80 /* use alternate accounting scheme for page */ |
686 | #define PMAP_OPTIONS_REMOVE 0x100 /* removing a mapping */ |
687 | #define PMAP_OPTIONS_SET_REUSABLE 0x200 /* page is now "reusable" */ |
688 | #define PMAP_OPTIONS_CLEAR_REUSABLE 0x400 /* page no longer "reusable" */ |
689 | #define PMAP_OPTIONS_COMPRESSOR_IFF_MODIFIED 0x800 /* credit the compressor |
690 | * iff page was modified */ |
691 | #define PMAP_OPTIONS_PROTECT_IMMEDIATE 0x1000 /* allow protections to be |
692 | * be upgraded */ |
693 | #define PMAP_OPTIONS_CLEAR_WRITE 0x2000 |
694 | |
695 | |
696 | #if !defined(__LP64__) |
697 | extern vm_offset_t pmap_extract(pmap_t pmap, |
698 | vm_map_offset_t va); |
699 | #endif |
700 | extern void pmap_change_wiring( /* Specify pageability */ |
701 | pmap_t pmap, |
702 | vm_map_offset_t va, |
703 | boolean_t wired); |
704 | |
705 | /* LP64todo - switch to vm_map_offset_t when it grows */ |
706 | extern void pmap_remove( /* Remove mappings. */ |
707 | pmap_t map, |
708 | vm_map_offset_t s, |
709 | vm_map_offset_t e); |
710 | |
711 | extern void pmap_remove_options( /* Remove mappings. */ |
712 | pmap_t map, |
713 | vm_map_offset_t s, |
714 | vm_map_offset_t e, |
715 | int options); |
716 | |
717 | extern void fillPage(ppnum_t pa, unsigned int fill); |
718 | |
719 | #if defined(__LP64__) |
720 | void pmap_pre_expand(pmap_t pmap, vm_map_offset_t vaddr); |
721 | #endif |
722 | |
723 | mach_vm_size_t pmap_query_resident(pmap_t pmap, |
724 | vm_map_offset_t s, |
725 | vm_map_offset_t e, |
726 | mach_vm_size_t *compressed_bytes_p); |
727 | |
728 | /* Inform the pmap layer that there is a JIT entry in this map. */ |
729 | extern void pmap_set_jit_entitled(pmap_t pmap); |
730 | |
731 | /* |
732 | * Tell the pmap layer what range within the nested region the VM intends to |
733 | * use. |
734 | */ |
735 | extern void pmap_trim(pmap_t grand, pmap_t subord, addr64_t vstart, addr64_t nstart, uint64_t size); |
736 | |
737 | /* |
738 | * Dump page table contents into the specified buffer. Returns the number of |
739 | * bytes copied, 0 if insufficient space, (size_t)-1 if unsupported. |
740 | * This is expected to only be called from kernel debugger context, |
741 | * so synchronization is not required. |
742 | */ |
743 | |
744 | extern size_t pmap_dump_page_tables(pmap_t pmap, void *bufp, void *buf_end); |
745 | |
746 | /* |
747 | * Indicates if any special policy is applied to this protection by the pmap |
748 | * layer. |
749 | */ |
750 | bool pmap_has_prot_policy(vm_prot_t prot); |
751 | |
752 | /* |
753 | * Causes the pmap to return any available pages that it can return cheaply to |
754 | * the VM. |
755 | */ |
756 | uint64_t pmap_release_pages_fast(void); |
757 | |
758 | #define PMAP_QUERY_PAGE_PRESENT 0x01 |
759 | #define PMAP_QUERY_PAGE_REUSABLE 0x02 |
760 | #define PMAP_QUERY_PAGE_INTERNAL 0x04 |
761 | #define PMAP_QUERY_PAGE_ALTACCT 0x08 |
762 | #define PMAP_QUERY_PAGE_COMPRESSED 0x10 |
763 | #define PMAP_QUERY_PAGE_COMPRESSED_ALTACCT 0x20 |
764 | extern kern_return_t pmap_query_page_info( |
765 | pmap_t pmap, |
766 | vm_map_offset_t va, |
767 | int *disp); |
768 | |
769 | #if CONFIG_PGTRACE |
770 | int pmap_pgtrace_add_page(pmap_t pmap, vm_map_offset_t start, vm_map_offset_t end); |
771 | int pmap_pgtrace_delete_page(pmap_t pmap, vm_map_offset_t start, vm_map_offset_t end); |
772 | kern_return_t pmap_pgtrace_fault(pmap_t pmap, vm_map_offset_t va, arm_saved_state_t *ss); |
773 | #endif |
774 | |
775 | |
776 | extern void pmap_ledger_alloc_init(size_t); |
777 | extern ledger_t pmap_ledger_alloc(void); |
778 | extern void pmap_ledger_free(ledger_t); |
779 | |
780 | #endif /* KERNEL_PRIVATE */ |
781 | |
782 | #endif /* _VM_PMAP_H_ */ |
783 | |