1 | /* |
2 | * Copyright (c) 2000-2006 Apple Computer, Inc. All rights reserved. |
3 | * |
4 | * @APPLE_OSREFERENCE_LICENSE_HEADER_START@ |
5 | * |
6 | * This file contains Original Code and/or Modifications of Original Code |
7 | * as defined in and that are subject to the Apple Public Source License |
8 | * Version 2.0 (the 'License'). You may not use this file except in |
9 | * compliance with the License. The rights granted to you under the License |
10 | * may not be used to create, or enable the creation or redistribution of, |
11 | * unlawful or unlicensed copies of an Apple operating system, or to |
12 | * circumvent, violate, or enable the circumvention or violation of, any |
13 | * terms of an Apple operating system software license agreement. |
14 | * |
15 | * Please obtain a copy of the License at |
16 | * http://www.opensource.apple.com/apsl/ and read it before using this file. |
17 | * |
18 | * The Original Code and all software distributed under the License are |
19 | * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER |
20 | * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES, |
21 | * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY, |
22 | * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT. |
23 | * Please see the License for the specific language governing rights and |
24 | * limitations under the License. |
25 | * |
26 | * @APPLE_OSREFERENCE_LICENSE_HEADER_END@ |
27 | */ |
28 | /* |
29 | * @OSF_COPYRIGHT@ |
30 | */ |
31 | /* |
32 | * Mach Operating System |
33 | * Copyright (c) 1991,1990,1989,1988,1987 Carnegie Mellon University |
34 | * All Rights Reserved. |
35 | * |
36 | * Permission to use, copy, modify and distribute this software and its |
37 | * documentation is hereby granted, provided that both the copyright |
38 | * notice and this permission notice appear in all copies of the |
39 | * software, derivative works or modified versions, and any portions |
40 | * thereof, and that both notices appear in supporting documentation. |
41 | * |
42 | * CARNEGIE MELLON ALLOWS FREE USE OF THIS SOFTWARE IN ITS "AS IS" |
43 | * CONDITION. CARNEGIE MELLON DISCLAIMS ANY LIABILITY OF ANY KIND FOR |
44 | * ANY DAMAGES WHATSOEVER RESULTING FROM THE USE OF THIS SOFTWARE. |
45 | * |
46 | * Carnegie Mellon requests users of this software to return to |
47 | * |
48 | * Software Distribution Coordinator or Software.Distribution@CS.CMU.EDU |
49 | * School of Computer Science |
50 | * Carnegie Mellon University |
51 | * Pittsburgh PA 15213-3890 |
52 | * |
53 | * any improvements or extensions that they make and grant Carnegie Mellon |
54 | * the rights to redistribute these changes. |
55 | */ |
56 | /* |
57 | */ |
58 | /* |
59 | * File: vm/vm_kern.h |
60 | * Author: Avadis Tevanian, Jr., Michael Wayne Young |
61 | * Date: 1985 |
62 | * |
63 | * Kernel memory management definitions. |
64 | */ |
65 | |
66 | #ifndef _VM_VM_KERN_H_ |
67 | #define _VM_VM_KERN_H_ |
68 | |
69 | #include <mach/mach_types.h> |
70 | #include <mach/boolean.h> |
71 | #include <mach/kern_return.h> |
72 | #include <mach/vm_types.h> |
73 | #ifdef XNU_KERNEL_PRIVATE |
74 | #include <kern/locks.h> |
75 | #endif /* XNU_KERNEL_PRIVATE */ |
76 | |
77 | __BEGIN_DECLS |
78 | |
79 | #ifdef KERNEL_PRIVATE |
80 | extern vm_map_t kernel_map; |
81 | extern vm_map_t ipc_kernel_map; |
82 | extern vm_map_t g_kext_map; |
83 | #endif /* KERNEL_PRIVATE */ |
84 | |
85 | #pragma mark - the kmem subsystem |
86 | #ifdef XNU_KERNEL_PRIVATE |
87 | #pragma GCC visibility push(hidden) |
88 | |
89 | /* |
90 | * "kmem" is a set of methods that provide interfaces suitable |
91 | * to allocate memory from the VM in the kernel map or submaps. |
92 | * |
93 | * It provide leaner alternatives to some of the VM functions, |
94 | * closer to a typical allocator. |
95 | */ |
96 | |
97 | struct vm_page; |
98 | struct vm_map_entry; |
99 | |
100 | /*! |
101 | * @typedef |
102 | * |
103 | * @brief |
104 | * Pair of a return code and size/address/... used by kmem interfaces. |
105 | * |
106 | * @discussion |
107 | * Using a pair of integers allows the compiler to return everything |
108 | * through registers, and doesn't need to use stack values to get results, |
109 | * which yields significantly better codegen. |
110 | * |
111 | * If @c kmr_return is not @c KERN_SUCCESS, then the other field |
112 | * of the union is always supposed to be 0. |
113 | */ |
114 | typedef struct { |
115 | kern_return_t kmr_return; |
116 | union { |
117 | vm_address_t kmr_address; |
118 | vm_size_t kmr_size; |
119 | void *kmr_ptr; |
120 | vm_map_t kmr_submap; |
121 | }; |
122 | } kmem_return_t; |
123 | |
124 | /*! |
125 | * @typedef kmem_guard_t |
126 | * |
127 | * @brief |
128 | * KMEM guards are used by the kmem_* subsystem to secure atomic allocations. |
129 | * |
130 | * @discussion |
131 | * This parameter is used to transmit the tag for the allocation. |
132 | * |
133 | * If @c kmg_atomic is set, then the other fields are also taken into account |
134 | * and will affect the allocation behavior for this allocation. |
135 | * |
136 | * @field kmg_tag The VM_KERN_MEMORY_* tag for this entry. |
137 | * @field kmg_type_hash Some hash related to the type of the allocation. |
138 | * @field kmg_atomic Whether the entry is atomic. |
139 | * @field kmg_submap Whether the entry is for a submap. |
140 | * @field kmg_context A use defined 30 bits that will be stored |
141 | * on the entry on allocation and checked |
142 | * on other operations. |
143 | */ |
144 | typedef struct { |
145 | uint16_t kmg_tag; |
146 | uint16_t kmg_type_hash; |
147 | uint32_t kmg_atomic : 1; |
148 | uint32_t kmg_submap : 1; |
149 | uint32_t kmg_context : 30; |
150 | } kmem_guard_t; |
151 | #define KMEM_GUARD_NONE (kmem_guard_t){ } |
152 | #define KMEM_GUARD_SUBMAP (kmem_guard_t){ .kmg_atomic = 0, .kmg_submap = 1 } |
153 | |
154 | |
155 | /*! |
156 | * @typedef kmem_flags_t |
157 | * |
158 | * @brief |
159 | * Sets of flags taken by several of the @c kmem_* family of functions. |
160 | * |
161 | * @discussion |
162 | * This type is not used directly by any function, it is an underlying raw |
163 | * type that is re-vended under different namespaces for each @c kmem_* |
164 | * interface. |
165 | * |
166 | * - @c kmem_alloc uses @c kma_flags_t / @c KMA_* namespaced values. |
167 | * - @c kmem_suballoc uses @c kms_flags_t / @c KMS_* namespaced values. |
168 | * - @c kmem_realloc uses @c kmr_flags_t / @c KMR_* namespaced values. |
169 | * - @c kmem_free uses @c kmf_flags_t / @c KMF_* napespaced values. |
170 | * |
171 | * |
172 | * <h2>Call behavior</h2> |
173 | * |
174 | * @const KMEM_NONE (all) |
175 | * Pass this when no special options is to be used. |
176 | * |
177 | * @const KMEM_NOFAIL (alloc, suballoc) |
178 | * When this flag is passed, any allocation failure results into a panic(). |
179 | * Using this flag should really be limited to cases when failure is not |
180 | * recoverable and possibly during early boot only. |
181 | * |
182 | * @const KMEM_NOPAGEWAIT (alloc, realloc) |
183 | * Pass this flag if the system should not wait in VM_PAGE_WAIT(). |
184 | * |
185 | * @const KMEM_FREEOLD (realloc) |
186 | * Pass this flag if @c kmem_realloc should free the old mapping |
187 | * (when the address changed) as part of the call. |
188 | * |
189 | * @const KMEM_REALLOCF (realloc) |
190 | * Similar to @c Z_REALLOCF: if the call is failing, |
191 | * then free the old allocation too. |
192 | * |
193 | * |
194 | * <h2>How the entry is populated</h2> |
195 | * |
196 | * @const KMEM_VAONLY (alloc) |
197 | * By default memory allocated by the kmem subsystem is wired and mapped. |
198 | * Passing @c KMEM_VAONLY will cause the range to still be wired, |
199 | * but no page is actually mapped. |
200 | * |
201 | * @const KMEM_PAGEABLE (alloc) |
202 | * By default memory allocated by the kmem subsystem is wired and mapped. |
203 | * Passing @c KMEM_PAGEABLE makes the entry non wired, and pages will be |
204 | * added to the entry as it faults. |
205 | * |
206 | * @const KMEM_ZERO (alloc, realloc) |
207 | * Any new page added is zeroed. |
208 | * |
209 | * |
210 | * <h2>VM object to use for the entry</h2> |
211 | * |
212 | * @const KMEM_KOBJECT (alloc, realloc) |
213 | * The entry will be made for the @c kernel_object. |
214 | * |
215 | * Note that the @c kernel_object is just a "collection of pages". |
216 | * Pages in that object can't be remaped or present in several VM maps |
217 | * like traditional objects. |
218 | * |
219 | * If neither @c KMEM_KOBJECT nor @c KMEM_COMPRESSOR is passed, |
220 | * the a new fresh VM object will be made for this allocation. |
221 | * This is expensive and should be limited to allocations that |
222 | * need the features associated with a VM object. |
223 | * |
224 | * @const KMEM_COMPRESSOR (alloc) |
225 | * The entry is allocated for the @c compressor_object. |
226 | * Pages belonging to the compressor are not on the paging queues, |
227 | * nor are they counted as wired. |
228 | * |
229 | * Only the VM Compressor subsystem should use this. |
230 | * |
231 | * |
232 | * <h2>How to look for addresses</h2> |
233 | * |
234 | * @const KMEM_LOMEM (alloc, realloc) |
235 | * The physical memory allocated must be in the first 4G of memory, |
236 | * in order to support hardware controllers incapable of generating DMAs |
237 | * with more than 32bits of physical address. |
238 | * |
239 | * @const KMEM_LAST_FREE (alloc, suballoc, realloc) |
240 | * When looking for space in the specified map, |
241 | * start scanning for addresses from the end of the map |
242 | * rather than the start. |
243 | * |
244 | * @const KMEM_DATA (alloc, suballoc, realloc) |
245 | * The memory must be allocated from the "Data" range. |
246 | * |
247 | * @const KMEM_SPRAYQTN (alloc, realloc) |
248 | * The memory must be allocated from the "spray quarantine" range. For more |
249 | * details on what allocations qualify to use this flag see |
250 | * @c KMEM_RANGE_ID_SPRAYQTN. |
251 | * |
252 | * @const KMEM_GUESS_SIZE (free) |
253 | * When freeing an atomic entry (requires a valid kmem guard), |
254 | * then look up the entry size because the caller didn't |
255 | * preserve it. |
256 | * |
257 | * This flag is only here in order to support kfree_data_addr(), |
258 | * and shall not be used by any other clients. |
259 | * |
260 | * <h2>Entry properties</h2> |
261 | * |
262 | * @const KMEM_PERMANENT (alloc, suballoc) |
263 | * The entry is made permanent. |
264 | * |
265 | * In the kernel maps, permanent entries can never be deleted. |
266 | * Calling @c kmem_free() on such a range will panic. |
267 | * |
268 | * In user maps, permanent entries will only be deleted |
269 | * whenthe map is terminated. |
270 | * |
271 | * @const KMEM_GUARD_FIRST (alloc, realloc) |
272 | * @const KMEM_GUARD_LAST (alloc, realloc) |
273 | * Asks @c kmem_* to put a guard page at the beginning (resp. end) |
274 | * of the allocation. |
275 | * |
276 | * The allocation size will not be extended to accomodate for guards, |
277 | * and the client of this interface must take them into account. |
278 | * Typically if a usable range of 3 pages is needed with both guards, |
279 | * then 5 pages must be asked. |
280 | * |
281 | * Alignment constraints take guards into account (the aligment applies |
282 | * to the address right after the first guard page). |
283 | * |
284 | * The returned address for allocation will pointing at the entry start, |
285 | * which is the address of the left guard page if any. |
286 | * |
287 | * Note that if @c kmem_realloc* is called, the *exact* same |
288 | * guard flags must be passed for this entry. The KMEM subsystem |
289 | * is generally oblivious to guards, and passing inconsistent flags |
290 | * will cause pages to be moved incorrectly. |
291 | * |
292 | * @const KMEM_KSTACK (alloc) |
293 | * This flag must be passed when the allocation is for kernel stacks. |
294 | * This only has an effect on Intel. |
295 | * |
296 | * @const KMEM_NOENCRYPT (alloc) |
297 | * Obsolete, will be repurposed soon. |
298 | * |
299 | * @const KMEM_KASAN_GUARD (alloc, realloc, free) |
300 | * Under KASAN_CLASSIC add guards left and right to this allocation |
301 | * in order to detect out of bounds. |
302 | * |
303 | * This can't be passed if any of @c KMEM_GUARD_FIRST |
304 | * or @c KMEM_GUARD_LAST is used. |
305 | * |
306 | * @const KMEM_TAG (alloc, realloc, free) |
307 | * Under KASAN_TBI, this allocation is tagged non canonically. |
308 | */ |
309 | __options_decl(kmem_flags_t, uint32_t, { |
310 | KMEM_NONE = 0x00000000, |
311 | |
312 | /* Call behavior */ |
313 | KMEM_NOFAIL = 0x00000001, |
314 | KMEM_NOPAGEWAIT = 0x00000002, |
315 | KMEM_FREEOLD = 0x00000004, |
316 | KMEM_REALLOCF = 0x00000008, |
317 | |
318 | /* How the entry is populated */ |
319 | KMEM_VAONLY = 0x00000010, |
320 | KMEM_PAGEABLE = 0x00000020, |
321 | KMEM_ZERO = 0x00000040, |
322 | |
323 | /* VM object to use for the entry */ |
324 | KMEM_KOBJECT = 0x00000100, |
325 | KMEM_COMPRESSOR = 0x00000200, |
326 | |
327 | /* How to look for addresses */ |
328 | KMEM_LOMEM = 0x00001000, |
329 | KMEM_LAST_FREE = 0x00002000, |
330 | KMEM_GUESS_SIZE = 0x00004000, |
331 | KMEM_DATA = 0x00008000, |
332 | KMEM_SPRAYQTN = 0x00010000, |
333 | |
334 | /* Entry properties */ |
335 | KMEM_PERMANENT = 0x00100000, |
336 | KMEM_GUARD_FIRST = 0x00200000, |
337 | KMEM_GUARD_LAST = 0x00400000, |
338 | KMEM_KSTACK = 0x00800000, |
339 | KMEM_NOENCRYPT = 0x01000000, |
340 | KMEM_KASAN_GUARD = 0x02000000, |
341 | KMEM_TAG = 0x04000000, |
342 | }); |
343 | |
344 | |
345 | #pragma mark kmem range methods |
346 | |
347 | extern struct mach_vm_range kmem_ranges[KMEM_RANGE_COUNT]; |
348 | extern struct mach_vm_range kmem_large_ranges[KMEM_RANGE_COUNT]; |
349 | #define KMEM_RANGE_MASK 0x3fff |
350 | #define KMEM_HASH_SET 0x4000 |
351 | #define KMEM_DIRECTION_MASK 0x8000 |
352 | |
353 | __stateful_pure |
354 | extern mach_vm_size_t mach_vm_range_size( |
355 | const struct mach_vm_range *r); |
356 | |
357 | __attribute__((overloadable, pure)) |
358 | extern bool mach_vm_range_contains( |
359 | const struct mach_vm_range *r, |
360 | mach_vm_offset_t addr); |
361 | |
362 | __attribute__((overloadable, pure)) |
363 | extern bool mach_vm_range_contains( |
364 | const struct mach_vm_range *r, |
365 | mach_vm_offset_t addr, |
366 | mach_vm_offset_t size); |
367 | |
368 | __attribute__((overloadable, pure)) |
369 | extern bool mach_vm_range_intersects( |
370 | const struct mach_vm_range *r1, |
371 | const struct mach_vm_range *r2); |
372 | |
373 | __attribute__((overloadable, pure)) |
374 | extern bool mach_vm_range_intersects( |
375 | const struct mach_vm_range *r1, |
376 | mach_vm_offset_t addr, |
377 | mach_vm_offset_t size); |
378 | |
379 | /* |
380 | * @function kmem_range_id_contains |
381 | * |
382 | * @abstract Return whether the region of `[addr, addr + size)` is completely |
383 | * within the memory range. |
384 | */ |
385 | __pure2 |
386 | extern bool kmem_range_id_contains( |
387 | kmem_range_id_t range_id, |
388 | vm_map_offset_t addr, |
389 | vm_map_size_t size); |
390 | |
391 | /* |
392 | * @function kmem_range_id_size |
393 | * |
394 | * @abstract Return the addressable size of the memory range. |
395 | */ |
396 | __pure2 |
397 | extern vm_map_size_t kmem_range_id_size( |
398 | kmem_range_id_t range_id); |
399 | |
400 | __pure2 |
401 | extern kmem_range_id_t kmem_addr_get_range( |
402 | vm_map_offset_t addr, |
403 | vm_map_size_t size); |
404 | |
405 | extern kmem_range_id_t kmem_adjust_range_id( |
406 | uint32_t hash); |
407 | |
408 | |
409 | /** |
410 | * @enum kmem_claims_flags_t |
411 | * |
412 | * @abstract |
413 | * Set of flags used in the processing of kmem_range claims |
414 | * |
415 | * @discussion |
416 | * These flags are used by the kmem subsytem while processing kmem_range |
417 | * claims and are not explicitly passed by the caller registering the claim. |
418 | * |
419 | * @const KC_NO_ENTRY |
420 | * A vm map entry should not be created for the respective claim. |
421 | * |
422 | * @const KC_NO_MOVE |
423 | * The range shouldn't be moved once it has been placed as it has constraints. |
424 | */ |
425 | __options_decl(kmem_claims_flags_t, uint32_t, { |
426 | KC_NONE = 0x00000000, |
427 | KC_NO_ENTRY = 0x00000001, |
428 | KC_NO_MOVE = 0x00000002, |
429 | }); |
430 | |
431 | /* |
432 | * Security config that creates the additional splits in non data part of |
433 | * kernel_map |
434 | */ |
435 | #if KASAN || (__arm64__ && !defined(KERNEL_INTEGRITY_KTRR) && !defined(KERNEL_INTEGRITY_CTRR)) |
436 | # define ZSECURITY_CONFIG_KERNEL_PTR_SPLIT OFF |
437 | #else |
438 | # define ZSECURITY_CONFIG_KERNEL_PTR_SPLIT ON |
439 | #endif |
440 | |
441 | #define ZSECURITY_NOT_A_COMPILE_TIME_CONFIG__OFF() 0 |
442 | #define ZSECURITY_NOT_A_COMPILE_TIME_CONFIG__ON() 1 |
443 | #define ZSECURITY_CONFIG2(v) ZSECURITY_NOT_A_COMPILE_TIME_CONFIG__##v() |
444 | #define ZSECURITY_CONFIG1(v) ZSECURITY_CONFIG2(v) |
445 | #define ZSECURITY_CONFIG(opt) ZSECURITY_CONFIG1(ZSECURITY_CONFIG_##opt) |
446 | |
447 | struct kmem_range_startup_spec { |
448 | const char *kc_name; |
449 | struct mach_vm_range *kc_range; |
450 | vm_map_size_t kc_size; |
451 | vm_map_size_t (^kc_calculate_sz)(void); |
452 | kmem_claims_flags_t kc_flags; |
453 | }; |
454 | |
455 | extern void kmem_range_startup_init( |
456 | struct kmem_range_startup_spec *sp); |
457 | |
458 | /*! |
459 | * @macro KMEM_RANGE_REGISTER_* |
460 | * |
461 | * @abstract |
462 | * Register a claim for kmem range or submap. |
463 | * |
464 | * @discussion |
465 | * Claims are shuffled during startup to randomize the layout of the kernel map. |
466 | * Temporary entries are created in place of the claims, therefore the caller |
467 | * must provide the start of the assigned range as a hint and |
468 | * @c{VM_FLAGS_FIXED | VM_FLAGS_OVERWRITE} to kmem_suballoc to replace the mapping. |
469 | * |
470 | * Min/max constraints can be provided in the range when the claim is |
471 | * registered. |
472 | * |
473 | * This macro comes in 2 flavors: |
474 | * - STATIC : When the size of the range/submap is known at compile time |
475 | * - DYNAMIC: When the size of the range/submap needs to be computed |
476 | * Temporary entries are create |
477 | * The start of the |
478 | * |
479 | * @param name the name of the claim |
480 | * @param range the assigned range for the claim |
481 | * @param size the size of submap/range (if known at compile time) |
482 | * @param calculate_sz a block that returns the computed size of submap/range |
483 | */ |
484 | #define KMEM_RANGE_REGISTER_STATIC(name, range, size) \ |
485 | static __startup_data struct kmem_range_startup_spec \ |
486 | __startup_kmem_range_spec_ ## name = { #name, range, size, NULL, KC_NONE}; \ |
487 | STARTUP_ARG(KMEM, STARTUP_RANK_SECOND, kmem_range_startup_init, \ |
488 | &__startup_kmem_range_spec_ ## name) |
489 | |
490 | #define KMEM_RANGE_REGISTER_DYNAMIC(name, range, calculate_sz) \ |
491 | static __startup_data struct kmem_range_startup_spec \ |
492 | __startup_kmem_range_spec_ ## name = { #name, range, 0, calculate_sz, \ |
493 | KC_NONE}; \ |
494 | STARTUP_ARG(KMEM, STARTUP_RANK_SECOND, kmem_range_startup_init, \ |
495 | &__startup_kmem_range_spec_ ## name) |
496 | |
497 | __startup_func |
498 | extern uint16_t kmem_get_random16( |
499 | uint16_t upper_limit); |
500 | |
501 | __startup_func |
502 | extern void kmem_shuffle( |
503 | uint16_t *shuffle_buf, |
504 | uint16_t count); |
505 | |
506 | |
507 | #pragma mark kmem entry parameters |
508 | |
509 | /*! |
510 | * @function kmem_entry_validate_guard() |
511 | * |
512 | * @brief |
513 | * Validates that the entry matches the input parameters, panic otherwise. |
514 | * |
515 | * @discussion |
516 | * If the guard has a zero @c kmg_guard value, |
517 | * then the entry must be non atomic. |
518 | * |
519 | * The guard tag is not used for validation as the VM subsystems |
520 | * (particularly in IOKit) might decide to substitute it in ways |
521 | * that are difficult to predict for the programmer. |
522 | * |
523 | * @param entry the entry to validate |
524 | * @param addr the supposed start address |
525 | * @param size the supposed size of the entry |
526 | * @param guard the guard to use to "authenticate" the allocation. |
527 | */ |
528 | extern void kmem_entry_validate_guard( |
529 | vm_map_t map, |
530 | struct vm_map_entry *entry, |
531 | vm_offset_t addr, |
532 | vm_size_t size, |
533 | kmem_guard_t guard); |
534 | |
535 | /*! |
536 | * @function kmem_size_guard() |
537 | * |
538 | * @brief |
539 | * Returns the size of an atomic kalloc allocation made in the specified map, |
540 | * according to the guard. |
541 | * |
542 | * @param map a kernel map to lookup the entry into. |
543 | * @param addr the kernel address to lookup. |
544 | * @param guard the guard to use to "authenticate" the allocation. |
545 | */ |
546 | extern vm_size_t kmem_size_guard( |
547 | vm_map_t map, |
548 | vm_offset_t addr, |
549 | kmem_guard_t guard); |
550 | |
551 | #pragma mark kmem allocations |
552 | |
553 | /*! |
554 | * @typedef kma_flags_t |
555 | * |
556 | * @brief |
557 | * Flags used by the @c kmem_alloc* family of flags. |
558 | */ |
559 | __options_decl(kma_flags_t, uint32_t, { |
560 | KMA_NONE = KMEM_NONE, |
561 | |
562 | /* Call behavior */ |
563 | KMA_NOFAIL = KMEM_NOFAIL, |
564 | KMA_NOPAGEWAIT = KMEM_NOPAGEWAIT, |
565 | |
566 | /* How the entry is populated */ |
567 | KMA_VAONLY = KMEM_VAONLY, |
568 | KMA_PAGEABLE = KMEM_PAGEABLE, |
569 | KMA_ZERO = KMEM_ZERO, |
570 | |
571 | /* VM object to use for the entry */ |
572 | KMA_KOBJECT = KMEM_KOBJECT, |
573 | KMA_COMPRESSOR = KMEM_COMPRESSOR, |
574 | |
575 | /* How to look for addresses */ |
576 | KMA_LOMEM = KMEM_LOMEM, |
577 | KMA_LAST_FREE = KMEM_LAST_FREE, |
578 | KMA_DATA = KMEM_DATA, |
579 | KMA_SPRAYQTN = KMEM_SPRAYQTN, |
580 | |
581 | /* Entry properties */ |
582 | KMA_PERMANENT = KMEM_PERMANENT, |
583 | KMA_GUARD_FIRST = KMEM_GUARD_FIRST, |
584 | KMA_GUARD_LAST = KMEM_GUARD_LAST, |
585 | KMA_KSTACK = KMEM_KSTACK, |
586 | KMA_NOENCRYPT = KMEM_NOENCRYPT, |
587 | KMA_KASAN_GUARD = KMEM_KASAN_GUARD, |
588 | KMA_TAG = KMEM_TAG, |
589 | }); |
590 | |
591 | |
592 | /*! |
593 | * @function kmem_alloc_guard() |
594 | * |
595 | * @brief |
596 | * Master entry point for allocating kernel memory. |
597 | * |
598 | * @param map map to allocate into, must be a kernel map. |
599 | * @param size the size of the entry to allocate, must not be 0. |
600 | * @param mask an alignment mask that the returned allocation |
601 | * will be aligned to (ignoring guards, see @const |
602 | * KMEM_GUARD_FIRST). |
603 | * @param flags a set of @c KMA_* flags, (@see @c kmem_flags_t) |
604 | * @param guard how to guard the allocation. |
605 | * |
606 | * @returns |
607 | * - the non zero address of the allocaation on success in @c kmr_address. |
608 | * - @c KERN_NO_SPACE if the target map is out of address space. |
609 | * - @c KERN_RESOURCE_SHORTAGE if the kernel is out of pages. |
610 | */ |
611 | extern kmem_return_t kmem_alloc_guard( |
612 | vm_map_t map, |
613 | vm_size_t size, |
614 | vm_offset_t mask, |
615 | kma_flags_t flags, |
616 | kmem_guard_t guard) __result_use_check; |
617 | |
618 | static inline kern_return_t |
619 | kernel_memory_allocate( |
620 | vm_map_t map, |
621 | vm_offset_t *addrp, |
622 | vm_size_t size, |
623 | vm_offset_t mask, |
624 | kma_flags_t flags, |
625 | vm_tag_t tag) |
626 | { |
627 | kmem_guard_t guard = { |
628 | .kmg_tag = tag, |
629 | }; |
630 | kmem_return_t kmr; |
631 | |
632 | kmr = kmem_alloc_guard(map, size, mask, flags, guard); |
633 | if (kmr.kmr_return == KERN_SUCCESS) { |
634 | __builtin_assume(kmr.kmr_address != 0); |
635 | } else { |
636 | __builtin_assume(kmr.kmr_address == 0); |
637 | } |
638 | *addrp = kmr.kmr_address; |
639 | return kmr.kmr_return; |
640 | } |
641 | |
642 | static inline kern_return_t |
643 | kmem_alloc( |
644 | vm_map_t map, |
645 | vm_offset_t *addrp, |
646 | vm_size_t size, |
647 | kma_flags_t flags, |
648 | vm_tag_t tag) |
649 | { |
650 | return kernel_memory_allocate(map, addrp, size, mask: 0, flags, tag); |
651 | } |
652 | |
653 | /*! |
654 | * @function kmem_alloc_contig_guard() |
655 | * |
656 | * @brief |
657 | * Variant of kmem_alloc_guard() that allocates a contiguous range |
658 | * of physical memory. |
659 | * |
660 | * @param map map to allocate into, must be a kernel map. |
661 | * @param size the size of the entry to allocate, must not be 0. |
662 | * @param mask an alignment mask that the returned allocation |
663 | * will be aligned to (ignoring guards, see @const |
664 | * KMEM_GUARD_FIRST). |
665 | * @param max_pnum The maximum page number to allocate, or 0. |
666 | * @param pnum_mask A page number alignment mask for the first allocated |
667 | * page, or 0. |
668 | * @param flags a set of @c KMA_* flags, (@see @c kmem_flags_t) |
669 | * @param guard how to guard the allocation. |
670 | * |
671 | * @returns |
672 | * - the non zero address of the allocaation on success in @c kmr_address. |
673 | * - @c KERN_NO_SPACE if the target map is out of address space. |
674 | * - @c KERN_RESOURCE_SHORTAGE if the kernel is out of pages. |
675 | */ |
676 | extern kmem_return_t kmem_alloc_contig_guard( |
677 | vm_map_t map, |
678 | vm_size_t size, |
679 | vm_offset_t mask, |
680 | ppnum_t max_pnum, |
681 | ppnum_t pnum_mask, |
682 | kma_flags_t flags, |
683 | kmem_guard_t guard); |
684 | |
685 | static inline kern_return_t |
686 | kmem_alloc_contig( |
687 | vm_map_t map, |
688 | vm_offset_t *addrp, |
689 | vm_size_t size, |
690 | vm_offset_t mask, |
691 | ppnum_t max_pnum, |
692 | ppnum_t pnum_mask, |
693 | kma_flags_t flags, |
694 | vm_tag_t tag) |
695 | { |
696 | kmem_guard_t guard = { |
697 | .kmg_tag = tag, |
698 | }; |
699 | kmem_return_t kmr; |
700 | |
701 | kmr = kmem_alloc_contig_guard(map, size, mask, |
702 | max_pnum, pnum_mask, flags, guard); |
703 | if (kmr.kmr_return == KERN_SUCCESS) { |
704 | __builtin_assume(kmr.kmr_address != 0); |
705 | } else { |
706 | __builtin_assume(kmr.kmr_address == 0); |
707 | } |
708 | *addrp = kmr.kmr_address; |
709 | return kmr.kmr_return; |
710 | } |
711 | |
712 | |
713 | /*! |
714 | * @typedef kms_flags_t |
715 | * |
716 | * @brief |
717 | * Flags used by @c kmem_suballoc. |
718 | */ |
719 | __options_decl(kms_flags_t, uint32_t, { |
720 | KMS_NONE = KMEM_NONE, |
721 | |
722 | /* Call behavior */ |
723 | KMS_NOFAIL = KMEM_NOFAIL, |
724 | |
725 | /* How to look for addresses */ |
726 | KMS_LAST_FREE = KMEM_LAST_FREE, |
727 | KMS_DATA = KMEM_DATA, |
728 | |
729 | /* Entry properties */ |
730 | KMS_PERMANENT = KMEM_PERMANENT, |
731 | }); |
732 | |
733 | /*! |
734 | * @function kmem_suballoc() |
735 | * |
736 | * @brief |
737 | * Create a kernel submap, in an atomic entry guarded with KMEM_GUARD_SUBMAP. |
738 | * |
739 | * @param parent map to allocate into, must be a kernel map. |
740 | * @param addr (in/out) the address for the map (see vm_map_enter) |
741 | * @param size the size of the entry to allocate, must not be 0. |
742 | * @param vmc_options the map creation options |
743 | * @param vm_flags a set of @c VM_FLAGS_* flags |
744 | * @param flags a set of @c KMS_* flags, (@see @c kmem_flags_t) |
745 | * @param tag the tag for this submap's entry. |
746 | */ |
747 | extern kmem_return_t kmem_suballoc( |
748 | vm_map_t parent, |
749 | mach_vm_offset_t *addr, |
750 | vm_size_t size, |
751 | vm_map_create_options_t vmc_options, |
752 | int vm_flags, |
753 | kms_flags_t flags, |
754 | vm_tag_t tag); |
755 | |
756 | |
757 | #pragma mark kmem reallocation |
758 | |
759 | /*! |
760 | * @typedef kmr_flags_t |
761 | * |
762 | * @brief |
763 | * Flags used by the @c kmem_realloc* family of flags. |
764 | */ |
765 | __options_decl(kmr_flags_t, uint32_t, { |
766 | KMR_NONE = KMEM_NONE, |
767 | |
768 | /* Call behavior */ |
769 | KMR_NOPAGEWAIT = KMEM_NOPAGEWAIT, |
770 | KMR_FREEOLD = KMEM_FREEOLD, |
771 | KMR_REALLOCF = KMEM_REALLOCF, |
772 | |
773 | /* How the entry is populated */ |
774 | KMR_ZERO = KMEM_ZERO, |
775 | |
776 | /* VM object to use for the entry */ |
777 | KMR_KOBJECT = KMEM_KOBJECT, |
778 | |
779 | /* How to look for addresses */ |
780 | KMR_LOMEM = KMEM_LOMEM, |
781 | KMR_LAST_FREE = KMEM_LAST_FREE, |
782 | KMR_DATA = KMEM_DATA, |
783 | KMR_SPRAYQTN = KMEM_SPRAYQTN, |
784 | |
785 | /* Entry properties */ |
786 | KMR_GUARD_FIRST = KMEM_GUARD_FIRST, |
787 | KMR_GUARD_LAST = KMEM_GUARD_LAST, |
788 | KMR_KASAN_GUARD = KMEM_KASAN_GUARD, |
789 | KMR_TAG = KMEM_TAG, |
790 | }); |
791 | |
792 | #define KMEM_REALLOC_FLAGS_VALID(flags) \ |
793 | (((flags) & (KMR_KOBJECT | KMEM_GUARD_LAST | KMEM_KASAN_GUARD | KMR_DATA)) == KMR_DATA || ((flags) & KMR_FREEOLD)) |
794 | |
795 | /*! |
796 | * @function kmem_realloc_guard() |
797 | * |
798 | * @brief |
799 | * Reallocates memory allocated with kmem_alloc_guard() |
800 | * |
801 | * @discussion |
802 | * @c kmem_realloc_guard() either mandates a guard with atomicity set, |
803 | * or must use KMR_DATA (this is not an implementation limitation but |
804 | * but a security policy). |
805 | * |
806 | * If kmem_realloc_guard() is called for the kernel object |
807 | * (with @c KMR_KOBJECT) or with any trailing guard page, |
808 | * then the use of @c KMR_FREEOLD is mandatory. |
809 | * |
810 | * When @c KMR_FREEOLD isn't used, if the allocation was relocated |
811 | * as opposed to be extended or truncated in place, the caller |
812 | * must free its old mapping manually by calling @c kmem_free_guard(). |
813 | * |
814 | * Note that if the entry is truncated, it will always be done in place. |
815 | * |
816 | * |
817 | * @param map map to allocate into, must be a kernel map. |
818 | * @param oldaddr the address to reallocate, |
819 | * passing 0 means @c kmem_alloc_guard() will be called. |
820 | * @param oldsize the current size of the entry |
821 | * @param newsize the new size of the entry, |
822 | * 0 means kmem_free_guard() will be called. |
823 | * @param flags a set of @c KMR_* flags, (@see @c kmem_flags_t) |
824 | * the exact same set of @c KMR_GUARD_* flags must |
825 | * be passed for all calls (@see kmem_flags_t). |
826 | * @param guard the allocation guard. |
827 | * |
828 | * @returns |
829 | * - the newly allocated address on success in @c kmr_address |
830 | * (note that if newsize is 0, then address will be 0 too). |
831 | * - @c KERN_NO_SPACE if the target map is out of address space. |
832 | * - @c KERN_RESOURCE_SHORTAGE if the kernel is out of pages. |
833 | */ |
834 | extern kmem_return_t kmem_realloc_guard( |
835 | vm_map_t map, |
836 | vm_offset_t oldaddr, |
837 | vm_size_t oldsize, |
838 | vm_size_t newsize, |
839 | kmr_flags_t flags, |
840 | kmem_guard_t guard) __result_use_check |
841 | __attribute__((diagnose_if(!KMEM_REALLOC_FLAGS_VALID(flags), |
842 | "invalid realloc flags passed" , "error" ))); |
843 | |
844 | /*! |
845 | * @function kmem_realloc_should_free() |
846 | * |
847 | * @brief |
848 | * Returns whether the old address passed to a @c kmem_realloc_guard() |
849 | * call without @c KMR_FREEOLD must be freed. |
850 | * |
851 | * @param oldaddr the "oldaddr" passed to @c kmem_realloc_guard(). |
852 | * @param kmr the result of that @c kmem_realloc_should_free() call. |
853 | */ |
854 | static inline bool |
855 | kmem_realloc_should_free( |
856 | vm_offset_t oldaddr, |
857 | kmem_return_t kmr) |
858 | { |
859 | return oldaddr && oldaddr != kmr.kmr_address; |
860 | } |
861 | |
862 | |
863 | #pragma mark kmem free |
864 | |
865 | /*! |
866 | * @typedef kmf_flags_t |
867 | * |
868 | * @brief |
869 | * Flags used by the @c kmem_free* family of flags. |
870 | */ |
871 | __options_decl(kmf_flags_t, uint32_t, { |
872 | KMF_NONE = KMEM_NONE, |
873 | |
874 | /* Call behavior */ |
875 | |
876 | /* How the entry is populated */ |
877 | |
878 | /* How to look for addresses */ |
879 | KMF_GUESS_SIZE = KMEM_GUESS_SIZE, |
880 | KMF_KASAN_GUARD = KMEM_KASAN_GUARD, |
881 | KMF_TAG = KMEM_TAG, |
882 | }); |
883 | |
884 | |
885 | /*! |
886 | * @function kmem_free_guard() |
887 | * |
888 | * @brief |
889 | * Frees memory allocated with @c kmem_alloc or @c kmem_realloc. |
890 | * |
891 | * @param map map to free from, must be a kernel map. |
892 | * @param addr the address to free |
893 | * @param size the size of the memory to free |
894 | * @param flags a set of @c KMF_* flags, (@see @c kmem_flags_t) |
895 | * @param guard the allocation guard. |
896 | * |
897 | * @returns the size of the entry that was deleted. |
898 | * (useful when @c KMF_GUESS_SIZE was used) |
899 | */ |
900 | extern vm_size_t kmem_free_guard( |
901 | vm_map_t map, |
902 | vm_offset_t addr, |
903 | vm_size_t size, |
904 | kmf_flags_t flags, |
905 | kmem_guard_t guard); |
906 | |
907 | static inline void |
908 | kmem_free( |
909 | vm_map_t map, |
910 | vm_offset_t addr, |
911 | vm_size_t size) |
912 | { |
913 | kmem_free_guard(map, addr, size, flags: KMF_NONE, KMEM_GUARD_NONE); |
914 | } |
915 | |
916 | #pragma mark kmem population |
917 | |
918 | extern kern_return_t kernel_memory_populate( |
919 | vm_offset_t addr, |
920 | vm_size_t size, |
921 | kma_flags_t flags, |
922 | vm_tag_t tag); |
923 | |
924 | extern void kernel_memory_depopulate( |
925 | vm_offset_t addr, |
926 | vm_size_t size, |
927 | kma_flags_t flags, |
928 | vm_tag_t tag); |
929 | |
930 | #pragma GCC visibility pop |
931 | #elif KERNEL_PRIVATE /* XNU_KERNEL_PRIVATE */ |
932 | |
933 | extern kern_return_t kmem_alloc( |
934 | vm_map_t map, |
935 | vm_offset_t *addrp, |
936 | vm_size_t size); |
937 | |
938 | extern kern_return_t kmem_alloc_pageable( |
939 | vm_map_t map, |
940 | vm_offset_t *addrp, |
941 | vm_size_t size); |
942 | |
943 | extern kern_return_t kmem_alloc_kobject( |
944 | vm_map_t map, |
945 | vm_offset_t *addrp, |
946 | vm_size_t size); |
947 | |
948 | extern void kmem_free( |
949 | vm_map_t map, |
950 | vm_offset_t addr, |
951 | vm_size_t size); |
952 | |
953 | #endif /* KERNEL_PRIVATE */ |
954 | |
955 | #pragma mark - kernel address obfuscation / hashhing for logging |
956 | |
957 | extern vm_offset_t vm_kernel_addrperm_ext; |
958 | |
959 | extern void vm_kernel_addrhide( |
960 | vm_offset_t addr, |
961 | vm_offset_t *hide_addr); |
962 | |
963 | extern void vm_kernel_addrperm_external( |
964 | vm_offset_t addr, |
965 | vm_offset_t *perm_addr); |
966 | |
967 | extern void vm_kernel_unslide_or_perm_external( |
968 | vm_offset_t addr, |
969 | vm_offset_t *up_addr); |
970 | |
971 | #if !XNU_KERNEL_PRIVATE |
972 | |
973 | extern vm_offset_t vm_kernel_addrhash( |
974 | vm_offset_t addr); |
975 | |
976 | #else /* XNU_KERNEL_PRIVATE */ |
977 | #pragma GCC visibility push(hidden) |
978 | |
979 | extern uint64_t vm_kernel_addrhash_salt; |
980 | extern uint64_t vm_kernel_addrhash_salt_ext; |
981 | |
982 | extern vm_offset_t vm_kernel_addrhash_internal( |
983 | vm_offset_t addr, |
984 | uint64_t salt); |
985 | |
986 | static inline vm_offset_t |
987 | vm_kernel_addrhash(vm_offset_t addr) |
988 | { |
989 | return vm_kernel_addrhash_internal(addr, salt: vm_kernel_addrhash_salt); |
990 | } |
991 | |
992 | #pragma mark - kernel variants of the Mach VM interfaces |
993 | |
994 | /*! |
995 | * @function vm_map_kernel_flags_vmflags() |
996 | * |
997 | * @brief |
998 | * Return the vmflags set in the specified @c vmk_flags. |
999 | */ |
1000 | extern int vm_map_kernel_flags_vmflags( |
1001 | vm_map_kernel_flags_t vmk_flags); |
1002 | |
1003 | /*! |
1004 | * @function vm_map_kernel_flags_set_vmflags() |
1005 | * |
1006 | * @brief |
1007 | * Populates the @c vmf_* and @c vm_tag fields of the vmk flags, |
1008 | * with the specified vm flags (@c VM_FLAG_* from <mach/vm_statistics.h>). |
1009 | */ |
1010 | __attribute__((overloadable)) |
1011 | extern void vm_map_kernel_flags_set_vmflags( |
1012 | vm_map_kernel_flags_t *vmk_flags, |
1013 | int vm_flags, |
1014 | vm_tag_t vm_tag); |
1015 | |
1016 | /*! |
1017 | * @function vm_map_kernel_flags_set_vmflags() |
1018 | * |
1019 | * @brief |
1020 | * Populates the @c vmf_* and @c vm_tag fields of the vmk flags, |
1021 | * with the specified vm flags (@c VM_FLAG_* from <mach/vm_statistics.h>). |
1022 | * |
1023 | * @discussion |
1024 | * This variant takes the tag from the top byte of the flags. |
1025 | */ |
1026 | __attribute__((overloadable)) |
1027 | extern void vm_map_kernel_flags_set_vmflags( |
1028 | vm_map_kernel_flags_t *vmk_flags, |
1029 | int vm_flags_and_tag); |
1030 | |
1031 | /*! |
1032 | * @function vm_map_kernel_flags_and_vmflags() |
1033 | * |
1034 | * @brief |
1035 | * Apply a mask to the vmflags. |
1036 | */ |
1037 | extern void vm_map_kernel_flags_and_vmflags( |
1038 | vm_map_kernel_flags_t *vmk_flags, |
1039 | int vm_flags_mask); |
1040 | |
1041 | /*! |
1042 | * @function vm_map_kernel_flags_check_vmflags() |
1043 | * |
1044 | * @brief |
1045 | * Returns whether the @c vmk_flags @c vmf_* fields |
1046 | * are limited to the specified mask. |
1047 | */ |
1048 | extern bool vm_map_kernel_flags_check_vmflags( |
1049 | vm_map_kernel_flags_t vmk_flags, |
1050 | int vm_flags_mask); |
1051 | |
1052 | |
1053 | extern kern_return_t mach_vm_allocate_kernel( |
1054 | vm_map_t map, |
1055 | mach_vm_offset_t *addr, |
1056 | mach_vm_size_t size, |
1057 | int flags, |
1058 | vm_tag_t tag); |
1059 | |
1060 | extern kern_return_t mach_vm_map_kernel( |
1061 | vm_map_t target_map, |
1062 | mach_vm_offset_t *address, |
1063 | mach_vm_size_t initial_size, |
1064 | mach_vm_offset_t mask, |
1065 | vm_map_kernel_flags_t vmk_flags, |
1066 | ipc_port_t port, |
1067 | vm_object_offset_t offset, |
1068 | boolean_t copy, |
1069 | vm_prot_t cur_protection, |
1070 | vm_prot_t max_protection, |
1071 | vm_inherit_t inheritance); |
1072 | |
1073 | |
1074 | extern kern_return_t mach_vm_remap_kernel( |
1075 | vm_map_t target_map, |
1076 | mach_vm_offset_t *address, |
1077 | mach_vm_size_t size, |
1078 | mach_vm_offset_t mask, |
1079 | int flags, |
1080 | vm_tag_t tag, |
1081 | vm_map_t src_map, |
1082 | mach_vm_offset_t memory_address, |
1083 | boolean_t copy, |
1084 | vm_prot_t *cur_protection, |
1085 | vm_prot_t *max_protection, |
1086 | vm_inherit_t inheritance); |
1087 | |
1088 | extern kern_return_t mach_vm_remap_new_kernel( |
1089 | vm_map_t target_map, |
1090 | mach_vm_offset_t *address, |
1091 | mach_vm_size_t size, |
1092 | mach_vm_offset_t mask, |
1093 | int flags, |
1094 | vm_tag_t tag, |
1095 | vm_map_t src_map, |
1096 | mach_vm_offset_t memory_address, |
1097 | boolean_t copy, |
1098 | vm_prot_t *cur_protection, |
1099 | vm_prot_t *max_protection, |
1100 | vm_inherit_t inheritance); |
1101 | |
1102 | extern kern_return_t mach_vm_wire_kernel( |
1103 | vm_map_t map, |
1104 | mach_vm_offset_t start, |
1105 | mach_vm_size_t size, |
1106 | vm_prot_t access, |
1107 | vm_tag_t tag); |
1108 | |
1109 | extern kern_return_t vm_map_wire_kernel( |
1110 | vm_map_t map, |
1111 | vm_map_offset_t start, |
1112 | vm_map_offset_t end, |
1113 | vm_prot_t caller_prot, |
1114 | vm_tag_t tag, |
1115 | boolean_t user_wire); |
1116 | |
1117 | extern kern_return_t memory_object_iopl_request( |
1118 | ipc_port_t port, |
1119 | memory_object_offset_t offset, |
1120 | upl_size_t *upl_size, |
1121 | upl_t *upl_ptr, |
1122 | upl_page_info_array_t user_page_list, |
1123 | unsigned int *page_list_count, |
1124 | upl_control_flags_t *flags, |
1125 | vm_tag_t tag); |
1126 | |
1127 | #ifdef MACH_KERNEL_PRIVATE |
1128 | |
1129 | extern kern_return_t copyinmap( |
1130 | vm_map_t map, |
1131 | vm_map_offset_t fromaddr, |
1132 | void *todata, |
1133 | vm_size_t length); |
1134 | |
1135 | extern kern_return_t copyoutmap( |
1136 | vm_map_t map, |
1137 | void *fromdata, |
1138 | vm_map_offset_t toaddr, |
1139 | vm_size_t length); |
1140 | |
1141 | extern kern_return_t copyoutmap_atomic32( |
1142 | vm_map_t map, |
1143 | uint32_t value, |
1144 | vm_map_offset_t toaddr); |
1145 | |
1146 | extern kern_return_t copyoutmap_atomic64( |
1147 | vm_map_t map, |
1148 | uint64_t value, |
1149 | vm_map_offset_t toaddr); |
1150 | |
1151 | #endif /* MACH_KERNEL_PRIVATE */ |
1152 | #pragma GCC visibility pop |
1153 | #endif /* XNU_KERNEL_PRIVATE */ |
1154 | #ifdef KERNEL_PRIVATE |
1155 | #pragma mark - unsorted interfaces |
1156 | |
1157 | #ifdef XNU_KERNEL_PRIVATE |
1158 | typedef struct vm_allocation_site kern_allocation_name; |
1159 | typedef kern_allocation_name * kern_allocation_name_t; |
1160 | #else /* XNU_KERNEL_PRIVATE */ |
1161 | struct kern_allocation_name; |
1162 | typedef struct kern_allocation_name * kern_allocation_name_t; |
1163 | #endif /* !XNU_KERNEL_PRIVATE */ |
1164 | |
1165 | extern kern_allocation_name_t kern_allocation_name_allocate(const char * name, uint16_t suballocs); |
1166 | extern void kern_allocation_name_release(kern_allocation_name_t allocation); |
1167 | extern const char * kern_allocation_get_name(kern_allocation_name_t allocation); |
1168 | |
1169 | #endif /* KERNEL_PRIVATE */ |
1170 | #ifdef XNU_KERNEL_PRIVATE |
1171 | #pragma GCC visibility push(hidden) |
1172 | |
1173 | extern void kern_allocation_update_size(kern_allocation_name_t allocation, int64_t delta, vm_object_t object); |
1174 | extern void kern_allocation_update_subtotal(kern_allocation_name_t allocation, uint32_t subtag, int64_t delta); |
1175 | extern vm_tag_t kern_allocation_name_get_vm_tag(kern_allocation_name_t allocation); |
1176 | |
1177 | struct mach_memory_info; |
1178 | extern kern_return_t vm_page_diagnose( |
1179 | struct mach_memory_info *info, |
1180 | unsigned int num_info, |
1181 | uint64_t zones_collectable_bytes, |
1182 | bool redact_info); |
1183 | |
1184 | extern uint32_t vm_page_diagnose_estimate(void); |
1185 | |
1186 | extern void vm_init_before_launchd(void); |
1187 | |
1188 | typedef enum { |
1189 | PMAP_FEAT_UEXEC = 1 |
1190 | } pmap_feature_flags_t; |
1191 | |
1192 | #if defined(__x86_64__) |
1193 | extern bool pmap_supported_feature(pmap_t pmap, pmap_feature_flags_t feat); |
1194 | #endif |
1195 | |
1196 | #if DEBUG || DEVELOPMENT |
1197 | typedef struct { |
1198 | vm_map_size_t meta_sz; |
1199 | vm_map_size_t pte_sz; |
1200 | vm_map_size_t total_va; |
1201 | vm_map_size_t total_used; |
1202 | } kmem_gobj_stats; |
1203 | |
1204 | extern kern_return_t vm_kern_allocation_info(uintptr_t addr, vm_size_t * size, vm_tag_t * tag, vm_size_t * zone_size); |
1205 | extern kmem_gobj_stats kmem_get_gobj_stats(void); |
1206 | |
1207 | #endif /* DEBUG || DEVELOPMENT */ |
1208 | |
1209 | #if HIBERNATION |
1210 | extern void hibernate_rebuild_vm_structs(void); |
1211 | #endif /* HIBERNATION */ |
1212 | |
1213 | extern vm_tag_t vm_tag_bt(void); |
1214 | |
1215 | extern vm_tag_t vm_tag_alloc(vm_allocation_site_t * site); |
1216 | |
1217 | extern void vm_tag_alloc_locked(vm_allocation_site_t * site, vm_allocation_site_t ** releasesiteP); |
1218 | |
1219 | extern void vm_tag_update_size(vm_tag_t tag, int64_t size, vm_object_t object); |
1220 | |
1221 | extern uint64_t vm_tag_get_size(vm_tag_t tag); |
1222 | |
1223 | #if VM_TAG_SIZECLASSES |
1224 | |
1225 | extern void vm_allocation_zones_init(void); |
1226 | extern vm_tag_t vm_tag_will_update_zone(vm_tag_t tag, uint32_t zidx, uint32_t zflags); |
1227 | extern void vm_tag_update_zone_size(vm_tag_t tag, uint32_t zidx, long delta); |
1228 | |
1229 | #endif /* VM_TAG_SIZECLASSES */ |
1230 | |
1231 | extern vm_tag_t vm_tag_bt_debug(void); |
1232 | |
1233 | extern uint32_t vm_tag_get_kext(vm_tag_t tag, char * name, vm_size_t namelen); |
1234 | |
1235 | extern boolean_t vm_kernel_map_is_kernel(vm_map_t map); |
1236 | |
1237 | extern ppnum_t kernel_pmap_present_mapping(uint64_t vaddr, uint64_t * pvincr, uintptr_t * pvphysaddr); |
1238 | |
1239 | #pragma GCC visibility pop |
1240 | #endif /* XNU_KERNEL_PRIVATE */ |
1241 | |
1242 | __END_DECLS |
1243 | |
1244 | #endif /* _VM_VM_KERN_H_ */ |
1245 | |