1 | /* |
2 | * Copyright (c) 2004-2020 Apple Inc. All rights reserved. |
3 | * |
4 | * @APPLE_OSREFERENCE_LICENSE_HEADER_START@ |
5 | * |
6 | * This file contains Original Code and/or Modifications of Original Code |
7 | * as defined in and that are subject to the Apple Public Source License |
8 | * Version 2.0 (the 'License'). You may not use this file except in |
9 | * compliance with the License. The rights granted to you under the License |
10 | * may not be used to create, or enable the creation or redistribution of, |
11 | * unlawful or unlicensed copies of an Apple operating system, or to |
12 | * circumvent, violate, or enable the circumvention or violation of, any |
13 | * terms of an Apple operating system software license agreement. |
14 | * |
15 | * Please obtain a copy of the License at |
16 | * http://www.opensource.apple.com/apsl/ and read it before using this file. |
17 | * |
18 | * The Original Code and all software distributed under the License are |
19 | * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER |
20 | * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES, |
21 | * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY, |
22 | * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT. |
23 | * Please see the License for the specific language governing rights and |
24 | * limitations under the License. |
25 | * |
26 | * @APPLE_OSREFERENCE_LICENSE_HEADER_END@ |
27 | */ |
28 | |
29 | #ifdef XNU_KERNEL_PRIVATE |
30 | |
31 | #ifndef _VM_VM_PROTOS_H_ |
32 | #define _VM_VM_PROTOS_H_ |
33 | |
34 | #include <mach/mach_types.h> |
35 | #include <kern/kern_types.h> |
36 | #include <vm/vm_options.h> |
37 | |
38 | #ifdef __cplusplus |
39 | extern "C" { |
40 | #endif |
41 | |
42 | /* |
43 | * This file contains various type definitions and routine prototypes |
44 | * that are needed to avoid compilation warnings for VM code (in osfmk, |
45 | * default_pager and bsd). |
46 | * Most of these should eventually go into more appropriate header files. |
47 | * |
48 | * Include it after all other header files since it doesn't include any |
49 | * type definitions and it works around some conflicts with other header |
50 | * files. |
51 | */ |
52 | |
53 | /* |
54 | * iokit |
55 | */ |
56 | extern kern_return_t device_data_action( |
57 | uintptr_t device_handle, |
58 | ipc_port_t , |
59 | vm_prot_t protection, |
60 | vm_object_offset_t offset, |
61 | vm_size_t size); |
62 | |
63 | extern kern_return_t device_close( |
64 | uintptr_t device_handle); |
65 | |
66 | extern boolean_t vm_swap_files_pinned(void); |
67 | |
68 | /* |
69 | * osfmk |
70 | */ |
71 | #ifndef _IPC_IPC_PORT_H_ |
72 | extern mach_port_name_t ipc_port_copyout_send( |
73 | ipc_port_t sright, |
74 | ipc_space_t space); |
75 | extern mach_port_name_t ipc_port_copyout_send_pinned( |
76 | ipc_port_t sright, |
77 | ipc_space_t space); |
78 | #endif /* _IPC_IPC_PORT_H_ */ |
79 | |
80 | #ifndef _KERN_IPC_TT_H_ |
81 | |
82 | #define port_name_to_task(name) port_name_to_task_kernel(name) |
83 | |
84 | extern task_t port_name_to_task_kernel( |
85 | mach_port_name_t name); |
86 | extern task_t port_name_to_task_read( |
87 | mach_port_name_t name); |
88 | extern task_t port_name_to_task_name( |
89 | mach_port_name_t name); |
90 | extern void ipc_port_release_send( |
91 | ipc_port_t port); |
92 | #endif /* _KERN_IPC_TT_H_ */ |
93 | |
94 | extern ipc_space_t get_task_ipcspace( |
95 | task_t t); |
96 | |
97 | #if CONFIG_MEMORYSTATUS |
98 | extern int ; /* Per-task limit on physical memory consumption in megabytes */ |
99 | #endif /* CONFIG_MEMORYSTATUS */ |
100 | |
101 | /* Some loose-ends VM stuff */ |
102 | |
103 | extern const vm_size_t msg_ool_size_small; |
104 | |
105 | extern kern_return_t vm_tests(void); |
106 | extern void consider_machine_adjust(void); |
107 | extern vm_map_offset_t get_map_min(vm_map_t); |
108 | extern vm_map_offset_t get_map_max(vm_map_t); |
109 | extern vm_map_size_t get_vmmap_size(vm_map_t); |
110 | extern int get_task_page_size(task_t); |
111 | #if CONFIG_COREDUMP |
112 | extern int get_vmmap_entries(vm_map_t); |
113 | #endif |
114 | extern int get_map_nentries(vm_map_t); |
115 | |
116 | extern vm_map_offset_t vm_map_page_mask(vm_map_t); |
117 | |
118 | extern kern_return_t vm_map_purgable_control( |
119 | vm_map_t map, |
120 | vm_map_offset_t address, |
121 | vm_purgable_t control, |
122 | int *state); |
123 | |
124 | #if MACH_ASSERT |
125 | extern void vm_map_pmap_set_process( |
126 | vm_map_t map, |
127 | int pid, |
128 | char *procname); |
129 | extern void vm_map_pmap_check_ledgers( |
130 | pmap_t pmap, |
131 | ledger_t ledger, |
132 | int pid, |
133 | char *procname); |
134 | #endif /* MACH_ASSERT */ |
135 | |
136 | extern kern_return_t |
137 | ( |
138 | memory_object_t mem_obj, |
139 | uintptr_t * vnodeaddr, |
140 | uint32_t * vid); |
141 | |
142 | #if CONFIG_COREDUMP |
143 | extern boolean_t coredumpok(vm_map_t map, mach_vm_offset_t va); |
144 | #endif |
145 | |
146 | /* |
147 | * VM routines that used to be published to |
148 | * user space, and are now restricted to the kernel. |
149 | * |
150 | * They should eventually go away entirely - |
151 | * to be replaced with standard vm_map() and |
152 | * vm_deallocate() calls. |
153 | */ |
154 | |
155 | extern kern_return_t vm_upl_map |
156 | ( |
157 | vm_map_t target_task, |
158 | upl_t upl, |
159 | vm_address_t *address |
160 | ); |
161 | |
162 | extern kern_return_t vm_upl_unmap |
163 | ( |
164 | vm_map_t target_task, |
165 | upl_t upl |
166 | ); |
167 | |
168 | extern kern_return_t vm_upl_map_range |
169 | ( |
170 | vm_map_t target_task, |
171 | upl_t upl, |
172 | vm_offset_t offset, |
173 | vm_size_t size, |
174 | vm_prot_t prot, |
175 | vm_address_t *address |
176 | ); |
177 | |
178 | extern kern_return_t vm_upl_unmap_range |
179 | ( |
180 | vm_map_t target_task, |
181 | upl_t upl, |
182 | vm_offset_t offset, |
183 | vm_size_t size |
184 | ); |
185 | |
186 | extern kern_return_t vm_region_object_create |
187 | ( |
188 | vm_map_t target_task, |
189 | vm_size_t size, |
190 | ipc_port_t *object_handle |
191 | ); |
192 | |
193 | #if CONFIG_CODE_DECRYPTION |
194 | #define VM_MAP_DEBUG_APPLE_PROTECT MACH_ASSERT |
195 | #if VM_MAP_DEBUG_APPLE_PROTECT |
196 | extern int vm_map_debug_apple_protect; |
197 | #endif /* VM_MAP_DEBUG_APPLE_PROTECT */ |
198 | struct ; |
199 | extern kern_return_t vm_map_apple_protected( |
200 | vm_map_t map, |
201 | vm_map_offset_t start, |
202 | vm_map_offset_t end, |
203 | vm_object_offset_t crypto_backing_offset, |
204 | struct pager_crypt_info *crypt_info, |
205 | uint32_t cryptid); |
206 | extern memory_object_t ( |
207 | vm_object_t backing_object, |
208 | vm_object_offset_t backing_offset, |
209 | vm_object_offset_t crypto_backing_offset, |
210 | struct pager_crypt_info *crypt_info, |
211 | vm_object_offset_t crypto_start, |
212 | vm_object_offset_t crypto_end, |
213 | boolean_t ); |
214 | #endif /* CONFIG_CODE_DECRYPTION */ |
215 | |
216 | struct vm_shared_region_slide_info; |
217 | extern kern_return_t vm_map_shared_region( |
218 | vm_map_t map, |
219 | vm_map_offset_t start, |
220 | vm_map_offset_t end, |
221 | vm_object_offset_t backing_offset, |
222 | struct vm_shared_region_slide_info *slide_info); |
223 | |
224 | extern memory_object_t ( |
225 | vm_object_t backing_object, |
226 | vm_object_offset_t backing_offset, |
227 | struct vm_shared_region_slide_info *slide_info, |
228 | uint64_t jop_key); |
229 | |
230 | extern uint64_t (void); |
231 | extern uint64_t (void); |
232 | extern uint64_t (void); |
233 | |
234 | #if __has_feature(ptrauth_calls) |
235 | extern memory_object_t shared_region_pager_match( |
236 | vm_object_t backing_object, |
237 | vm_object_offset_t backing_offset, |
238 | struct vm_shared_region_slide_info *slide_info, |
239 | uint64_t jop_key); |
240 | extern void shared_region_key_alloc( |
241 | char *shared_region_id, |
242 | bool inherit, |
243 | uint64_t inherited_key); |
244 | extern void shared_region_key_dealloc( |
245 | char *shared_region_id); |
246 | extern uint64_t generate_jop_key(void); |
247 | extern void shared_region_pager_match_task_key(memory_object_t memobj, task_t task); |
248 | #endif /* __has_feature(ptrauth_calls) */ |
249 | extern bool vm_shared_region_is_reslide(struct task *task); |
250 | |
251 | struct vnode; |
252 | extern memory_object_t (struct vnode *vp); |
253 | extern memory_object_control_t (memory_object_t mem_obj); |
254 | |
255 | #if __arm64__ || (__ARM_ARCH_7K__ >= 2) |
256 | #define SIXTEENK_PAGE_SIZE 0x4000 |
257 | #define SIXTEENK_PAGE_MASK 0x3FFF |
258 | #define SIXTEENK_PAGE_SHIFT 14 |
259 | #endif /* __arm64__ || (__ARM_ARCH_7K__ >= 2) */ |
260 | |
261 | #define FOURK_PAGE_SIZE 0x1000 |
262 | #define FOURK_PAGE_MASK 0xFFF |
263 | #define FOURK_PAGE_SHIFT 12 |
264 | |
265 | #if __arm64__ |
266 | |
267 | extern unsigned int page_shift_user32; |
268 | |
269 | #define VM_MAP_DEBUG_FOURK MACH_ASSERT |
270 | #if VM_MAP_DEBUG_FOURK |
271 | extern int vm_map_debug_fourk; |
272 | #endif /* VM_MAP_DEBUG_FOURK */ |
273 | extern memory_object_t (void); |
274 | extern vm_object_t (memory_object_t mem_obj); |
275 | extern kern_return_t ( |
276 | memory_object_t mem_obj, |
277 | boolean_t overwrite, |
278 | int index, |
279 | vm_object_t new_backing_object, |
280 | vm_object_offset_t new_backing_offset, |
281 | vm_object_t *old_backing_object, |
282 | vm_object_offset_t *old_backing_offset); |
283 | #endif /* __arm64__ */ |
284 | |
285 | /* |
286 | * bsd |
287 | */ |
288 | struct vnode; |
289 | |
290 | extern void vnode_setswapmount(struct vnode *); |
291 | extern int64_t vnode_getswappin_avail(struct vnode *); |
292 | |
293 | extern void ( |
294 | struct vnode *, |
295 | vm_object_offset_t, |
296 | vm_object_offset_t); |
297 | |
298 | typedef int ; |
299 | extern pager_return_t vnode_pagein( |
300 | struct vnode *, upl_t, |
301 | upl_offset_t, vm_object_offset_t, |
302 | upl_size_t, int, int *); |
303 | extern pager_return_t vnode_pageout( |
304 | struct vnode *, upl_t, |
305 | upl_offset_t, vm_object_offset_t, |
306 | upl_size_t, int, int *); |
307 | extern uint32_t vnode_trim(struct vnode *, int64_t offset, unsigned long len); |
308 | extern memory_object_t ( |
309 | struct vnode *, memory_object_t); |
310 | extern vm_object_offset_t ( |
311 | struct vnode *); |
312 | extern uint32_t ( |
313 | struct vnode *); |
314 | extern boolean_t ( |
315 | struct vnode *); |
316 | #if FBDP_DEBUG_OBJECT_NO_PAGER |
317 | extern bool vnode_pager_forced_unmount( |
318 | struct vnode *); |
319 | #endif /* FBDP_DEBUG_OBJECT_NO_PAGER */ |
320 | extern void ( |
321 | void); |
322 | extern uint32_t ( |
323 | struct vnode *, |
324 | uint32_t *); |
325 | extern kern_return_t ( |
326 | struct vnode *vp, |
327 | char *pathname, |
328 | vm_size_t pathname_len, |
329 | char *filename, |
330 | vm_size_t filename_len, |
331 | boolean_t *truncated_path_p); |
332 | struct timespec; |
333 | extern kern_return_t ( |
334 | struct vnode *vp, |
335 | struct timespec *mtime, |
336 | struct timespec *cs_mtime); |
337 | extern kern_return_t ( |
338 | struct vnode *vp, |
339 | void **blobs); |
340 | |
341 | #if CONFIG_IOSCHED |
342 | void ( |
343 | struct vnode *devvp, |
344 | uint64_t blkno, |
345 | uint32_t len, |
346 | int priority); |
347 | #endif |
348 | |
349 | #if CHECK_CS_VALIDATION_BITMAP |
350 | /* used by the vnode_pager_cs_validation_bitmap routine*/ |
351 | #define CS_BITMAP_SET 1 |
352 | #define CS_BITMAP_CLEAR 2 |
353 | #define CS_BITMAP_CHECK 3 |
354 | |
355 | #endif /* CHECK_CS_VALIDATION_BITMAP */ |
356 | |
357 | extern kern_return_t |
358 | ( |
359 | memory_object_t mem_obj, |
360 | memory_object_offset_t offset, |
361 | memory_object_size_t size, |
362 | vm_prot_t desired_access); |
363 | extern kern_return_t ( |
364 | memory_object_t, |
365 | memory_object_control_t, |
366 | memory_object_cluster_size_t); |
367 | extern kern_return_t ( |
368 | memory_object_t, |
369 | memory_object_offset_t *); |
370 | |
371 | #if CONFIG_IOSCHED |
372 | extern kern_return_t ( |
373 | memory_object_t, |
374 | uintptr_t *); |
375 | #endif |
376 | |
377 | extern void ( |
378 | memory_object_t, |
379 | vm_object_offset_t, |
380 | vm_object_offset_t); |
381 | extern kern_return_t ( |
382 | memory_object_t, |
383 | uint32_t *); |
384 | extern kern_return_t ( |
385 | memory_object_t, |
386 | boolean_t *); |
387 | #if FBDP_DEBUG_OBJECT_NO_PAGER |
388 | extern kern_return_t vnode_pager_get_forced_unmount( |
389 | memory_object_t, |
390 | bool *); |
391 | #endif /* FBDP_DEBUG_OBJECT_NO_PAGER */ |
392 | extern kern_return_t ( |
393 | memory_object_t, |
394 | uint32_t *); |
395 | extern kern_return_t ( |
396 | memory_object_t mem_obj, |
397 | char *pathname, |
398 | vm_size_t pathname_len, |
399 | char *filename, |
400 | vm_size_t filename_len, |
401 | boolean_t *truncated_path_p); |
402 | extern kern_return_t ( |
403 | memory_object_t mem_obj, |
404 | struct timespec *mtime, |
405 | struct timespec *cs_mtime); |
406 | |
407 | #if CHECK_CS_VALIDATION_BITMAP |
408 | extern kern_return_t vnode_pager_cs_check_validation_bitmap( |
409 | memory_object_t mem_obj, |
410 | memory_object_offset_t offset, |
411 | int optype); |
412 | #endif /*CHECK_CS_VALIDATION_BITMAP*/ |
413 | |
414 | extern kern_return_t ubc_cs_check_validation_bitmap( |
415 | struct vnode *vp, |
416 | memory_object_offset_t offset, |
417 | int optype); |
418 | |
419 | extern kern_return_t ( |
420 | memory_object_t, |
421 | memory_object_offset_t, |
422 | memory_object_cluster_size_t, |
423 | vm_prot_t, |
424 | memory_object_fault_info_t); |
425 | extern kern_return_t ( |
426 | memory_object_t, |
427 | memory_object_offset_t, |
428 | memory_object_cluster_size_t, |
429 | memory_object_offset_t *, |
430 | int *, |
431 | boolean_t, |
432 | boolean_t, |
433 | int); |
434 | extern kern_return_t ( |
435 | memory_object_t, |
436 | memory_object_offset_t, |
437 | memory_object_cluster_size_t); |
438 | extern void ( |
439 | memory_object_t mem_obj); |
440 | extern kern_return_t ( |
441 | memory_object_t mem_obj, |
442 | vm_prot_t prot); |
443 | extern kern_return_t ( |
444 | memory_object_t mem_obj); |
445 | extern void ( |
446 | memory_object_t); |
447 | extern kern_return_t ( |
448 | memory_object_t); |
449 | extern void ( |
450 | struct vnode *vp); |
451 | extern struct vnode *( |
452 | memory_object_t); |
453 | |
454 | extern int ubc_map( |
455 | struct vnode *vp, |
456 | int flags); |
457 | extern void ubc_unmap( |
458 | struct vnode *vp); |
459 | |
460 | struct vm_map_entry; |
461 | extern struct vm_object *find_vnode_object(struct vm_map_entry *entry); |
462 | |
463 | extern void (memory_object_t); |
464 | extern void (memory_object_t); |
465 | extern kern_return_t (memory_object_t, |
466 | memory_object_control_t, |
467 | memory_object_cluster_size_t); |
468 | extern kern_return_t (memory_object_t); |
469 | extern kern_return_t (memory_object_t, |
470 | memory_object_offset_t, |
471 | memory_object_cluster_size_t, |
472 | vm_prot_t, |
473 | memory_object_fault_info_t); |
474 | extern kern_return_t (memory_object_t, |
475 | memory_object_offset_t, |
476 | memory_object_cluster_size_t, |
477 | memory_object_offset_t *, |
478 | int *, |
479 | boolean_t, |
480 | boolean_t, |
481 | int); |
482 | extern kern_return_t (memory_object_t, |
483 | memory_object_offset_t, |
484 | memory_object_cluster_size_t); |
485 | extern kern_return_t (memory_object_t, vm_prot_t); |
486 | extern kern_return_t (memory_object_t); |
487 | extern kern_return_t ( |
488 | memory_object_t device, |
489 | memory_object_offset_t offset, |
490 | ppnum_t page_num, |
491 | vm_size_t size); |
492 | extern memory_object_t ( |
493 | memory_object_t, |
494 | uintptr_t, |
495 | vm_size_t, |
496 | int); |
497 | |
498 | extern boolean_t (const struct memory_object_pager_ops *); |
499 | |
500 | extern kern_return_t ( |
501 | memory_object_control_t object, |
502 | memory_object_offset_t offset, |
503 | addr64_t base_vaddr, |
504 | vm_size_t size); |
505 | |
506 | extern kern_return_t memory_object_create_named( |
507 | memory_object_t , |
508 | memory_object_offset_t size, |
509 | memory_object_control_t *control); |
510 | |
511 | struct macx_triggers_args; |
512 | extern int mach_macx_triggers( |
513 | struct macx_triggers_args *args); |
514 | |
515 | extern int macx_swapinfo( |
516 | memory_object_size_t *total_p, |
517 | memory_object_size_t *avail_p, |
518 | vm_size_t *pagesize_p, |
519 | boolean_t *encrypted_p); |
520 | |
521 | extern void log_stack_execution_failure(addr64_t vaddr, vm_prot_t prot); |
522 | extern void log_unnest_badness( |
523 | vm_map_t map, |
524 | vm_map_offset_t start_unnest, |
525 | vm_map_offset_t end_unnest, |
526 | boolean_t is_nested_map, |
527 | vm_map_offset_t lowest_unnestable_addr); |
528 | |
529 | struct proc; |
530 | struct proc *current_proc(void); |
531 | extern int cs_allow_invalid(struct proc *p); |
532 | extern int cs_invalid_page(addr64_t vaddr, boolean_t *cs_killed); |
533 | |
534 | #define CS_VALIDATE_TAINTED 0x00000001 |
535 | #define CS_VALIDATE_NX 0x00000002 |
536 | extern boolean_t cs_validate_range(struct vnode *vp, |
537 | memory_object_t , |
538 | memory_object_offset_t offset, |
539 | const void *data, |
540 | vm_size_t size, |
541 | unsigned *result); |
542 | extern void cs_validate_page( |
543 | struct vnode *vp, |
544 | memory_object_t , |
545 | memory_object_offset_t offset, |
546 | const void *data, |
547 | int *validated_p, |
548 | int *tainted_p, |
549 | int *nx_p); |
550 | |
551 | extern kern_return_t memory_entry_purgeable_control_internal( |
552 | ipc_port_t entry_port, |
553 | vm_purgable_t control, |
554 | int *state); |
555 | |
556 | extern kern_return_t memory_entry_access_tracking_internal( |
557 | ipc_port_t entry_port, |
558 | int *access_tracking, |
559 | uint32_t *access_tracking_reads, |
560 | uint32_t *access_tracking_writes); |
561 | |
562 | extern kern_return_t mach_memory_object_memory_entry_64( |
563 | host_t host, |
564 | boolean_t internal, |
565 | vm_object_offset_t size, |
566 | vm_prot_t permission, |
567 | memory_object_t , |
568 | ipc_port_t *entry_handle); |
569 | |
570 | extern kern_return_t mach_memory_entry_purgable_control( |
571 | ipc_port_t entry_port, |
572 | vm_purgable_t control, |
573 | int *state); |
574 | |
575 | extern kern_return_t mach_memory_entry_get_page_counts( |
576 | ipc_port_t entry_port, |
577 | unsigned int *resident_page_count, |
578 | unsigned int *dirty_page_count); |
579 | |
580 | extern kern_return_t mach_memory_entry_phys_page_offset( |
581 | ipc_port_t entry_port, |
582 | vm_object_offset_t *offset_p); |
583 | |
584 | extern kern_return_t mach_memory_entry_map_size( |
585 | ipc_port_t entry_port, |
586 | vm_map_t map, |
587 | memory_object_offset_t offset, |
588 | memory_object_offset_t size, |
589 | mach_vm_size_t *map_size); |
590 | |
591 | extern kern_return_t vm_map_range_physical_size( |
592 | vm_map_t map, |
593 | vm_map_address_t start, |
594 | mach_vm_size_t size, |
595 | mach_vm_size_t * phys_size); |
596 | |
597 | extern kern_return_t mach_memory_entry_page_op( |
598 | ipc_port_t entry_port, |
599 | vm_object_offset_t offset, |
600 | int ops, |
601 | ppnum_t *phys_entry, |
602 | int *flags); |
603 | |
604 | extern kern_return_t mach_memory_entry_range_op( |
605 | ipc_port_t entry_port, |
606 | vm_object_offset_t offset_beg, |
607 | vm_object_offset_t offset_end, |
608 | int ops, |
609 | int *range); |
610 | |
611 | extern void mach_memory_entry_port_release(ipc_port_t port); |
612 | extern vm_named_entry_t mach_memory_entry_from_port(ipc_port_t port); |
613 | extern struct vm_named_entry *mach_memory_entry_allocate(ipc_port_t *user_handle_p); |
614 | extern vm_object_t vm_named_entry_to_vm_object( |
615 | vm_named_entry_t named_entry); |
616 | extern void vm_named_entry_associate_vm_object( |
617 | vm_named_entry_t named_entry, |
618 | vm_object_t object, |
619 | vm_object_offset_t offset, |
620 | vm_object_size_t size, |
621 | vm_prot_t prot); |
622 | |
623 | extern int macx_backing_store_compaction(int flags); |
624 | extern unsigned int mach_vm_ctl_page_free_wanted(void); |
625 | |
626 | extern int no_paging_space_action(void); |
627 | |
628 | extern unsigned int vmtc_total; /* total # of text page corruptions detected */ |
629 | |
630 | extern kern_return_t revalidate_text_page(task_t, vm_map_offset_t); |
631 | |
632 | #define VM_TOGGLE_CLEAR 0 |
633 | #define VM_TOGGLE_SET 1 |
634 | #define VM_TOGGLE_GETVALUE 999 |
635 | int vm_toggle_entry_reuse(int, int*); |
636 | |
637 | #define SWAP_WRITE 0x00000000 /* Write buffer (pseudo flag). */ |
638 | #define SWAP_READ 0x00000001 /* Read buffer. */ |
639 | #define SWAP_ASYNC 0x00000002 /* Start I/O, do not wait. */ |
640 | |
641 | extern kern_return_t compressor_memory_object_create( |
642 | memory_object_size_t, |
643 | memory_object_t *); |
644 | |
645 | extern boolean_t vm_compressor_low_on_space(void); |
646 | extern bool vm_compressor_compressed_pages_nearing_limit(void); |
647 | extern boolean_t vm_compressor_out_of_space(void); |
648 | extern int vm_swap_low_on_space(void); |
649 | extern int vm_swap_out_of_space(void); |
650 | void do_fastwake_warmup_all(void); |
651 | |
652 | #if defined(__arm64__) |
653 | extern void vm_panic_hibernate_write_image_failed(int err); |
654 | #endif /* __arm64__ */ |
655 | |
656 | #if CONFIG_JETSAM |
657 | extern int proc_get_memstat_priority(struct proc*, boolean_t); |
658 | #endif /* CONFIG_JETSAM */ |
659 | |
660 | /* the object purger. purges the next eligible object from memory. */ |
661 | /* returns TRUE if an object was purged, otherwise FALSE. */ |
662 | boolean_t vm_purgeable_object_purge_one_unlocked(int force_purge_below_group); |
663 | void vm_purgeable_nonvolatile_owner_update(task_t owner, |
664 | int delta); |
665 | void vm_purgeable_volatile_owner_update(task_t owner, |
666 | int delta); |
667 | void vm_owned_objects_disown(task_t task); |
668 | |
669 | |
670 | struct trim_list { |
671 | uint64_t tl_offset; |
672 | uint64_t tl_length; |
673 | struct trim_list *tl_next; |
674 | }; |
675 | |
676 | u_int32_t vnode_trim_list(struct vnode *vp, struct trim_list *tl, boolean_t route_only); |
677 | |
678 | #define MAX_SWAPFILENAME_LEN 1024 |
679 | #define SWAPFILENAME_INDEX_LEN 2 /* Doesn't include the terminating NULL character */ |
680 | |
681 | extern char swapfilename[MAX_SWAPFILENAME_LEN + 1]; |
682 | |
683 | struct vm_counters { |
684 | unsigned int do_collapse_compressor; |
685 | unsigned int do_collapse_compressor_pages; |
686 | unsigned int do_collapse_terminate; |
687 | unsigned int do_collapse_terminate_failure; |
688 | unsigned int should_cow_but_wired; |
689 | unsigned int ; |
690 | unsigned int ; |
691 | unsigned int create_upl_lookup_failure_write; |
692 | unsigned int create_upl_lookup_failure_copy; |
693 | }; |
694 | extern struct vm_counters vm_counters; |
695 | |
696 | #if CONFIG_SECLUDED_MEMORY |
697 | struct vm_page_secluded_data { |
698 | int eligible_for_secluded; |
699 | int grab_success_free; |
700 | int grab_success_other; |
701 | int grab_failure_locked; |
702 | int grab_failure_state; |
703 | int grab_failure_realtime; |
704 | int grab_failure_dirty; |
705 | int grab_for_iokit; |
706 | int grab_for_iokit_success; |
707 | }; |
708 | extern struct vm_page_secluded_data vm_page_secluded; |
709 | |
710 | extern int num_tasks_can_use_secluded_mem; |
711 | |
712 | /* boot-args */ |
713 | |
714 | __enum_decl(secluded_filecache_mode_t, uint8_t, { |
715 | /* |
716 | * SECLUDED_FILECACHE_NONE: |
717 | * + no file contents in secluded pool |
718 | */ |
719 | SECLUDED_FILECACHE_NONE = 0, |
720 | /* |
721 | * SECLUDED_FILECACHE_APPS |
722 | * + no files from / |
723 | * + files from /Applications/ are OK |
724 | * + files from /Applications/Camera are not OK |
725 | * + no files that are open for write |
726 | */ |
727 | SECLUDED_FILECACHE_APPS = 1, |
728 | /* |
729 | * SECLUDED_FILECACHE_RDONLY |
730 | * + all read-only files OK, except: |
731 | * + dyld_shared_cache_arm64* |
732 | * + Camera |
733 | * + mediaserverd |
734 | */ |
735 | SECLUDED_FILECACHE_RDONLY = 2, |
736 | }); |
737 | |
738 | extern secluded_filecache_mode_t secluded_for_filecache; |
739 | extern bool secluded_for_apps; |
740 | extern bool secluded_for_iokit; |
741 | |
742 | extern uint64_t vm_page_secluded_drain(void); |
743 | extern void memory_object_mark_eligible_for_secluded( |
744 | memory_object_control_t control, |
745 | boolean_t eligible_for_secluded); |
746 | |
747 | #endif /* CONFIG_SECLUDED_MEMORY */ |
748 | |
749 | extern void memory_object_mark_for_realtime( |
750 | memory_object_control_t control, |
751 | bool for_realtime); |
752 | |
753 | #if FBDP_DEBUG_OBJECT_NO_PAGER |
754 | extern kern_return_t memory_object_mark_as_tracked( |
755 | memory_object_control_t control, |
756 | bool new_value, |
757 | bool *old_value); |
758 | #endif /* FBDP_DEBUG_OBJECT_NO_PAGER */ |
759 | |
760 | #define MAX_PAGE_RANGE_QUERY (1ULL * 1024 * 1024 * 1024) /* 1 GB */ |
761 | |
762 | extern kern_return_t mach_make_memory_entry_internal( |
763 | vm_map_t target_map, |
764 | memory_object_size_t *size, |
765 | memory_object_offset_t offset, |
766 | vm_prot_t permission, |
767 | vm_named_entry_kernel_flags_t vmne_kflags, |
768 | ipc_port_t *object_handle, |
769 | ipc_port_t parent_handle); |
770 | |
771 | extern kern_return_t |
772 | memory_entry_check_for_adjustment( |
773 | vm_map_t src_map, |
774 | ipc_port_t port, |
775 | vm_map_offset_t *overmap_start, |
776 | vm_map_offset_t *overmap_end); |
777 | |
778 | extern uint64_t (void); |
779 | |
780 | #define roundup(x, y) ((((x) % (y)) == 0) ? \ |
781 | (x) : ((x) + ((y) - ((x) % (y))))) |
782 | |
783 | #ifdef __cplusplus |
784 | } |
785 | #endif |
786 | |
787 | /* |
788 | * Flags for the VM swapper/reclaimer. |
789 | * Used by vm_swap_consider_defragment() |
790 | * to force defrag/reclaim by the swap |
791 | * GC thread. |
792 | */ |
793 | #define VM_SWAP_FLAGS_NONE 0 |
794 | #define VM_SWAP_FLAGS_FORCE_DEFRAG 1 |
795 | #define VM_SWAP_FLAGS_FORCE_RECLAIM 2 |
796 | |
797 | #if __arm64__ |
798 | /* |
799 | * Flags to control the behavior of |
800 | * the legacy footprint entitlement. |
801 | */ |
802 | #define (1) |
803 | #define (2) |
804 | #define (3) |
805 | |
806 | #endif /* __arm64__ */ |
807 | |
808 | #if MACH_ASSERT |
809 | struct proc; |
810 | extern struct proc *current_proc(void); |
811 | extern int proc_pid(struct proc *); |
812 | extern char *proc_best_name(struct proc *); |
813 | struct thread; |
814 | extern uint64_t thread_tid(struct thread *); |
815 | extern int debug4k_filter; |
816 | extern int debug4k_proc_filter; |
817 | extern char debug4k_proc_name[]; |
818 | extern const char *debug4k_category_name[]; |
819 | |
820 | #define __DEBUG4K(category, fmt, ...) \ |
821 | MACRO_BEGIN \ |
822 | int __category = (category); \ |
823 | struct thread *__t = NULL; \ |
824 | struct proc *__p = NULL; \ |
825 | const char *__pname = "?"; \ |
826 | boolean_t __do_log = FALSE; \ |
827 | \ |
828 | if ((1 << __category) & debug4k_filter) { \ |
829 | __do_log = TRUE; \ |
830 | } else if (((1 << __category) & debug4k_proc_filter) && \ |
831 | debug4k_proc_name[0] != '\0') { \ |
832 | __p = current_proc(); \ |
833 | if (__p != NULL) { \ |
834 | __pname = proc_best_name(__p); \ |
835 | } \ |
836 | if (!strcmp(debug4k_proc_name, __pname)) { \ |
837 | __do_log = TRUE; \ |
838 | } \ |
839 | } \ |
840 | if (__do_log) { \ |
841 | if (__p == NULL) { \ |
842 | __p = current_proc(); \ |
843 | if (__p != NULL) { \ |
844 | __pname = proc_best_name(__p); \ |
845 | } \ |
846 | } \ |
847 | __t = current_thread(); \ |
848 | printf("DEBUG4K(%s) %d[%s] %p(0x%llx) %s:%d: " fmt, \ |
849 | debug4k_category_name[__category], \ |
850 | __p ? proc_pid(__p) : 0, \ |
851 | __pname, \ |
852 | __t, \ |
853 | thread_tid(__t), \ |
854 | __FUNCTION__, \ |
855 | __LINE__, \ |
856 | ##__VA_ARGS__); \ |
857 | } \ |
858 | MACRO_END |
859 | |
860 | #define __DEBUG4K_ERROR 0 |
861 | #define __DEBUG4K_LIFE 1 |
862 | #define __DEBUG4K_LOAD 2 |
863 | #define __DEBUG4K_FAULT 3 |
864 | #define __DEBUG4K_COPY 4 |
865 | #define __DEBUG4K_SHARE 5 |
866 | #define __DEBUG4K_ADJUST 6 |
867 | #define __DEBUG4K_PMAP 7 |
868 | #define __DEBUG4K_MEMENTRY 8 |
869 | #define __DEBUG4K_IOKIT 9 |
870 | #define __DEBUG4K_UPL 10 |
871 | #define __DEBUG4K_EXC 11 |
872 | #define __DEBUG4K_VFS 12 |
873 | |
874 | #define DEBUG4K_ERROR(...) __DEBUG4K(__DEBUG4K_ERROR, ##__VA_ARGS__) |
875 | #define DEBUG4K_LIFE(...) __DEBUG4K(__DEBUG4K_LIFE, ##__VA_ARGS__) |
876 | #define DEBUG4K_LOAD(...) __DEBUG4K(__DEBUG4K_LOAD, ##__VA_ARGS__) |
877 | #define DEBUG4K_FAULT(...) __DEBUG4K(__DEBUG4K_FAULT, ##__VA_ARGS__) |
878 | #define DEBUG4K_COPY(...) __DEBUG4K(__DEBUG4K_COPY, ##__VA_ARGS__) |
879 | #define DEBUG4K_SHARE(...) __DEBUG4K(__DEBUG4K_SHARE, ##__VA_ARGS__) |
880 | #define DEBUG4K_ADJUST(...) __DEBUG4K(__DEBUG4K_ADJUST, ##__VA_ARGS__) |
881 | #define DEBUG4K_PMAP(...) __DEBUG4K(__DEBUG4K_PMAP, ##__VA_ARGS__) |
882 | #define DEBUG4K_MEMENTRY(...) __DEBUG4K(__DEBUG4K_MEMENTRY, ##__VA_ARGS__) |
883 | #define DEBUG4K_IOKIT(...) __DEBUG4K(__DEBUG4K_IOKIT, ##__VA_ARGS__) |
884 | #define DEBUG4K_UPL(...) __DEBUG4K(__DEBUG4K_UPL, ##__VA_ARGS__) |
885 | #define DEBUG4K_EXC(...) __DEBUG4K(__DEBUG4K_EXC, ##__VA_ARGS__) |
886 | #define DEBUG4K_VFS(...) __DEBUG4K(__DEBUG4K_VFS, ##__VA_ARGS__) |
887 | |
888 | #else /* MACH_ASSERT */ |
889 | |
890 | #define DEBUG4K_ERROR(...) |
891 | #define DEBUG4K_LIFE(...) |
892 | #define DEBUG4K_LOAD(...) |
893 | #define DEBUG4K_FAULT(...) |
894 | #define DEBUG4K_COPY(...) |
895 | #define DEBUG4K_SHARE(...) |
896 | #define DEBUG4K_ADJUST(...) |
897 | #define DEBUG4K_PMAP(...) |
898 | #define DEBUG4K_MEMENTRY(...) |
899 | #define DEBUG4K_IOKIT(...) |
900 | #define DEBUG4K_UPL(...) |
901 | #define DEBUG4K_EXC(...) |
902 | #define DEBUG4K_VFS(...) |
903 | |
904 | #endif /* MACH_ASSERT */ |
905 | |
906 | |
907 | __enum_decl(vm_object_destroy_reason_t, uint8_t, { |
908 | VM_OBJECT_DESTROY_UNKNOWN_REASON = 0, |
909 | VM_OBJECT_DESTROY_FORCED_UNMOUNT = 1, |
910 | VM_OBJECT_DESTROY_UNGRAFT = 2, |
911 | VM_OBJECT_DESTROY_MAX = 2, |
912 | }); |
913 | _Static_assert(VM_OBJECT_DESTROY_MAX < 4, "Need to fit in `no_pager_reason`'s number of bits" ); |
914 | |
915 | #endif /* _VM_VM_PROTOS_H_ */ |
916 | |
917 | #endif /* XNU_KERNEL_PRIVATE */ |
918 | |