| 1 | /* |
| 2 | * Copyright (c) 2000-2020 Apple Computer, Inc. All rights reserved. |
| 3 | * |
| 4 | * @APPLE_OSREFERENCE_LICENSE_HEADER_START@ |
| 5 | * |
| 6 | * This file contains Original Code and/or Modifications of Original Code |
| 7 | * as defined in and that are subject to the Apple Public Source License |
| 8 | * Version 2.0 (the 'License'). You may not use this file except in |
| 9 | * compliance with the License. The rights granted to you under the License |
| 10 | * may not be used to create, or enable the creation or redistribution of, |
| 11 | * unlawful or unlicensed copies of an Apple operating system, or to |
| 12 | * circumvent, violate, or enable the circumvention or violation of, any |
| 13 | * terms of an Apple operating system software license agreement. |
| 14 | * |
| 15 | * Please obtain a copy of the License at |
| 16 | * http://www.opensource.apple.com/apsl/ and read it before using this file. |
| 17 | * |
| 18 | * The Original Code and all software distributed under the License are |
| 19 | * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER |
| 20 | * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES, |
| 21 | * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY, |
| 22 | * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT. |
| 23 | * Please see the License for the specific language governing rights and |
| 24 | * limitations under the License. |
| 25 | * |
| 26 | * @APPLE_OSREFERENCE_LICENSE_HEADER_END@ |
| 27 | */ |
| 28 | /* |
| 29 | * @OSF_COPYRIGHT@ |
| 30 | */ |
| 31 | /* |
| 32 | * Mach Operating System |
| 33 | * Copyright (c) 1991,1990,1989,1988,1987 Carnegie Mellon University |
| 34 | * All Rights Reserved. |
| 35 | * |
| 36 | * Permission to use, copy, modify and distribute this software and its |
| 37 | * documentation is hereby granted, provided that both the copyright |
| 38 | * notice and this permission notice appear in all copies of the |
| 39 | * software, derivative works or modified versions, and any portions |
| 40 | * thereof, and that both notices appear in supporting documentation. |
| 41 | * |
| 42 | * CARNEGIE MELLON ALLOWS FREE USE OF THIS SOFTWARE IN ITS "AS IS" |
| 43 | * CONDITION. CARNEGIE MELLON DISCLAIMS ANY LIABILITY OF ANY KIND FOR |
| 44 | * ANY DAMAGES WHATSOEVER RESULTING FROM THE USE OF THIS SOFTWARE. |
| 45 | * |
| 46 | * Carnegie Mellon requests users of this software to return to |
| 47 | * |
| 48 | * Software Distribution Coordinator or Software.Distribution@CS.CMU.EDU |
| 49 | * School of Computer Science |
| 50 | * Carnegie Mellon University |
| 51 | * Pittsburgh PA 15213-3890 |
| 52 | * |
| 53 | * any improvements or extensions that they make and grant Carnegie Mellon |
| 54 | * the rights to redistribute these changes. |
| 55 | */ |
| 56 | /* |
| 57 | */ |
| 58 | /* |
| 59 | * File: mach/vm_param.h |
| 60 | * Author: Avadis Tevanian, Jr., Michael Wayne Young |
| 61 | * Date: 1985 |
| 62 | * |
| 63 | * Machine independent virtual memory parameters. |
| 64 | * |
| 65 | */ |
| 66 | |
| 67 | #ifndef _MACH_VM_PARAM_H_ |
| 68 | #define _MACH_VM_PARAM_H_ |
| 69 | |
| 70 | #include <mach/machine/vm_param.h> |
| 71 | |
| 72 | #ifdef KERNEL |
| 73 | |
| 74 | #ifndef ASSEMBLER |
| 75 | #include <mach/vm_types.h> |
| 76 | #endif /* ASSEMBLER */ |
| 77 | |
| 78 | #include <os/base.h> |
| 79 | #include <os/overflow.h> |
| 80 | |
| 81 | /* |
| 82 | * The machine independent pages are refered to as PAGES. A page |
| 83 | * is some number of hardware pages, depending on the target machine. |
| 84 | */ |
| 85 | |
| 86 | #ifndef ASSEMBLER |
| 87 | |
| 88 | #define PAGE_SIZE_64 (unsigned long long)PAGE_SIZE /* pagesize in addr units */ |
| 89 | #define PAGE_MASK_64 (unsigned long long)PAGE_MASK /* mask for off in page */ |
| 90 | |
| 91 | /* |
| 92 | * Convert addresses to pages and vice versa. No rounding is used. |
| 93 | * The atop_32 and ptoa_32 macros should not be use on 64 bit types. |
| 94 | * The round_page_64 and trunc_page_64 macros should be used instead. |
| 95 | */ |
| 96 | |
| 97 | #define atop_32(x) ((uint32_t)(x) >> PAGE_SHIFT) |
| 98 | #define ptoa_32(x) ((uint32_t)(x) << PAGE_SHIFT) |
| 99 | #define atop_64(x) ((uint64_t)(x) >> PAGE_SHIFT) |
| 100 | #define ptoa_64(x) ((uint64_t)(x) << PAGE_SHIFT) |
| 101 | |
| 102 | #define atop_kernel(x) ((vm_address_t)(x) >> PAGE_SHIFT) |
| 103 | #define ptoa_kernel(x) ((vm_address_t)(x) << PAGE_SHIFT) |
| 104 | |
| 105 | /* |
| 106 | * While the following block is enabled, the legacy atop and ptoa |
| 107 | * macros will behave correctly. If not, they will generate |
| 108 | * invalid lvalue errors. |
| 109 | */ |
| 110 | |
| 111 | #if 1 |
| 112 | #define atop(x) ((vm_address_t)(x) >> PAGE_SHIFT) |
| 113 | #define ptoa(x) ((vm_address_t)(x) << PAGE_SHIFT) |
| 114 | #else |
| 115 | #define atop(x) (0UL = 0) |
| 116 | #define ptoa(x) (0UL = 0) |
| 117 | #endif |
| 118 | |
| 119 | /* |
| 120 | * Page-size rounding macros for the Public fixed-width VM types. |
| 121 | */ |
| 122 | #define mach_vm_round_page(x) (((mach_vm_offset_t)(x) + PAGE_MASK) & ~((signed)PAGE_MASK)) |
| 123 | #define mach_vm_trunc_page(x) ((mach_vm_offset_t)(x) & ~((signed)PAGE_MASK)) |
| 124 | |
| 125 | #define round_page_overflow(in, out) __os_warn_unused(({ \ |
| 126 | bool __ovr = os_add_overflow(in, (__typeof__(*out))PAGE_MASK, out); \ |
| 127 | *out &= ~((__typeof__(*out))PAGE_MASK); \ |
| 128 | __ovr; \ |
| 129 | })) |
| 130 | |
| 131 | static inline int OS_WARN_RESULT |
| 132 | mach_vm_round_page_overflow(mach_vm_offset_t in, mach_vm_offset_t *out) |
| 133 | { |
| 134 | return round_page_overflow(in, out); |
| 135 | } |
| 136 | |
| 137 | #define memory_object_round_page(x) (((memory_object_offset_t)(x) + PAGE_MASK) & ~((signed)PAGE_MASK)) |
| 138 | #define memory_object_trunc_page(x) ((memory_object_offset_t)(x) & ~((signed)PAGE_MASK)) |
| 139 | |
| 140 | /* |
| 141 | * Rounding macros for the legacy (scalable with the current task's |
| 142 | * address space size) VM types. |
| 143 | */ |
| 144 | |
| 145 | #define round_page(x) (((vm_offset_t)(x) + PAGE_MASK) & ~((vm_offset_t)PAGE_MASK)) |
| 146 | #define trunc_page(x) ((vm_offset_t)(x) & ~((vm_offset_t)PAGE_MASK)) |
| 147 | |
| 148 | /* |
| 149 | * Round off or truncate to the nearest page. These will work |
| 150 | * for either addresses or counts. (i.e. 1 byte rounds to 1 page |
| 151 | * bytes. The round_page_32 and trunc_page_32 macros should not be |
| 152 | * use on 64 bit types. The round_page_64 and trunc_page_64 macros |
| 153 | * should be used instead. |
| 154 | * |
| 155 | * These should only be used in the rare case the size of the address |
| 156 | * or length is hard-coded as 32 or 64 bit. Otherwise, the macros |
| 157 | * associated with the specific VM type should be used. |
| 158 | */ |
| 159 | |
| 160 | #define round_page_32(x) (((uint32_t)(x) + PAGE_MASK) & ~((uint32_t)PAGE_MASK)) |
| 161 | #define trunc_page_32(x) ((uint32_t)(x) & ~((uint32_t)PAGE_MASK)) |
| 162 | #define round_page_64(x) (((uint64_t)(x) + PAGE_MASK_64) & ~((uint64_t)PAGE_MASK_64)) |
| 163 | #define trunc_page_64(x) ((uint64_t)(x) & ~((uint64_t)PAGE_MASK_64)) |
| 164 | |
| 165 | #define round_page_mask_32(x, mask) (((uint32_t)(x) + (mask)) & ~((uint32_t)(mask))) |
| 166 | #define trunc_page_mask_32(x, mask) ((uint32_t)(x) & ~((uint32_t)(mask))) |
| 167 | #define round_page_mask_64(x, mask) (((uint64_t)(x) + (mask)) & ~((uint64_t)(mask))) |
| 168 | #define trunc_page_mask_64(x, mask) ((uint64_t)(x) & ~((uint64_t)(mask))) |
| 169 | |
| 170 | /* |
| 171 | * Enable the following block to find uses of xxx_32 macros that should |
| 172 | * be xxx_64. These macros only work in C code, not C++. The resulting |
| 173 | * binaries are not functional. Look for invalid lvalue errors in |
| 174 | * the compiler output. |
| 175 | * |
| 176 | * Enabling the following block will also find use of the xxx_64 macros |
| 177 | * that have been passed pointers. The parameters should be case to an |
| 178 | * unsigned long type first. Look for invalid operands to binary + error |
| 179 | * in the compiler output. |
| 180 | */ |
| 181 | |
| 182 | #if 0 |
| 183 | #undef atop_32 |
| 184 | #undef ptoa_32 |
| 185 | #undef round_page_32 |
| 186 | #undef trunc_page_32 |
| 187 | #undef atop_64 |
| 188 | #undef ptoa_64 |
| 189 | #undef round_page_64 |
| 190 | #undef trunc_page_64 |
| 191 | |
| 192 | #ifndef __cplusplus |
| 193 | |
| 194 | #define atop_32(x) \ |
| 195 | (__builtin_choose_expr (sizeof(x) != sizeof(uint64_t), \ |
| 196 | (*(long *)0), \ |
| 197 | (0UL)) = 0) |
| 198 | |
| 199 | #define ptoa_32(x) \ |
| 200 | (__builtin_choose_expr (sizeof(x) != sizeof(uint64_t), \ |
| 201 | (*(long *)0), \ |
| 202 | (0UL)) = 0) |
| 203 | |
| 204 | #define round_page_32(x) \ |
| 205 | (__builtin_choose_expr (sizeof(x) != sizeof(uint64_t), \ |
| 206 | (*(long *)0), \ |
| 207 | (0UL)) = 0) |
| 208 | |
| 209 | #define trunc_page_32(x) \ |
| 210 | (__builtin_choose_expr (sizeof(x) != sizeof(uint64_t), \ |
| 211 | (*(long *)0), \ |
| 212 | (0UL)) = 0) |
| 213 | #else |
| 214 | |
| 215 | #define atop_32(x) (0) |
| 216 | #define ptoa_32(x) (0) |
| 217 | #define round_page_32(x) (0) |
| 218 | #define trunc_page_32(x) (0) |
| 219 | |
| 220 | #endif /* ! __cplusplus */ |
| 221 | |
| 222 | #define atop_64(x) ((uint64_t)((x) + (uint8_t *)0)) |
| 223 | #define ptoa_64(x) ((uint64_t)((x) + (uint8_t *)0)) |
| 224 | #define round_page_64(x) ((uint64_t)((x) + (uint8_t *)0)) |
| 225 | #define trunc_page_64(x) ((uint64_t)((x) + (uint8_t *)0)) |
| 226 | |
| 227 | #endif |
| 228 | |
| 229 | /* |
| 230 | * Determine whether an address is page-aligned, or a count is |
| 231 | * an exact page multiple. |
| 232 | */ |
| 233 | |
| 234 | #define page_aligned(x) (((x) & PAGE_MASK) == 0) |
| 235 | |
| 236 | extern vm_size_t mem_size; /* 32-bit size of memory - limited by maxmem - deprecated */ |
| 237 | extern uint64_t max_mem; /* 64-bit size of memory - limited by maxmem */ |
| 238 | |
| 239 | /* |
| 240 | * The VM compressor pager uses 32-bit page numbers, so this limits the size |
| 241 | * of anonymous memory objects to 0xffffffff pages. |
| 242 | * When we need to allocate a chunk of anonymous memory over that size, |
| 243 | * we have to allocate more than one chunk. |
| 244 | */ |
| 245 | #define ANON_MAX_PAGES 0xFFFFFFFFULL |
| 246 | #define ANON_MAX_SIZE (ANON_MAX_PAGES << PAGE_SHIFT) |
| 247 | /* |
| 248 | * Work-around for <rdar://problem/6626493> |
| 249 | * Break large anonymous memory areas into 128MB chunks to alleviate |
| 250 | * the cost of copying when copy-on-write is not possible because a small |
| 251 | * portion of it being wired. |
| 252 | */ |
| 253 | #define ANON_CHUNK_SIZE (128ULL * 1024 * 1024) /* 128MB */ |
| 254 | |
| 255 | /* |
| 256 | * The 'medium' malloc allocator would like its regions |
| 257 | * to be chunked up into MALLOC_MEDIUM_CHUNK_SIZE chunks |
| 258 | * and backed by different objects. This avoids contention |
| 259 | * on a single large object and showed solid improvements on high |
| 260 | * core machines with workloads involving video and graphics processing. |
| 261 | */ |
| 262 | #define MALLOC_MEDIUM_CHUNK_SIZE (8ULL * 1024 * 1024) /* 8 MB */ |
| 263 | |
| 264 | #ifdef KERNEL_PRIVATE |
| 265 | extern uint64_t sane_size; /* Memory size to use for defaults calculations */ |
| 266 | #endif /* KERNEL_PRIVATE */ |
| 267 | |
| 268 | #ifdef XNU_KERNEL_PRIVATE |
| 269 | |
| 270 | #include <kern/debug.h> |
| 271 | #include <vm/vm_memtag.h> |
| 272 | |
| 273 | extern uint64_t mem_actual; /* 64-bit size of memory - not limited by maxmem */ |
| 274 | extern uint64_t max_mem_actual; /* Size of physical memory adjusted by maxmem */ |
| 275 | extern addr64_t vm_last_addr; /* Highest kernel virtual address known to the VM system */ |
| 276 | extern addr64_t first_avail_phys; /* First available physical address */ |
| 277 | |
| 278 | extern const vm_offset_t vm_min_kernel_address; |
| 279 | extern const vm_offset_t vm_max_kernel_address; |
| 280 | |
| 281 | extern vm_offset_t vm_kernel_stext; |
| 282 | extern vm_offset_t vm_kernel_etext; |
| 283 | extern vm_offset_t vm_kernel_slid_base; |
| 284 | extern vm_offset_t vm_kernel_slid_top; |
| 285 | extern vm_offset_t vm_kernel_slide; |
| 286 | |
| 287 | #if CONFIG_SPTM |
| 288 | typedef struct { |
| 289 | vm_offset_t unslid_base; |
| 290 | vm_offset_t unslid_top; |
| 291 | vm_offset_t slid_base; |
| 292 | vm_offset_t slid_top; |
| 293 | vm_offset_t slide; |
| 294 | } vm_image_offsets; |
| 295 | |
| 296 | extern vm_image_offsets vm_sptm_offsets; |
| 297 | extern vm_image_offsets vm_txm_offsets; |
| 298 | #endif /* CONFIG_SPTM */ |
| 299 | |
| 300 | extern vm_offset_t vm_kernel_addrperm; |
| 301 | extern vm_offset_t vm_kext_base; |
| 302 | extern vm_offset_t vm_kext_top; |
| 303 | extern vm_offset_t vm_kernel_base; |
| 304 | extern vm_offset_t vm_kernel_top; |
| 305 | extern vm_offset_t vm_hib_base; |
| 306 | |
| 307 | extern vm_offset_t vm_kernel_builtinkmod_text; |
| 308 | extern vm_offset_t vm_kernel_builtinkmod_text_end; |
| 309 | |
| 310 | /** |
| 311 | * While these function's implementations are machine specific, due to the need |
| 312 | * to prevent header file circular dependencies, they need to be externed here |
| 313 | * for usage in the sliding/unsliding macros. |
| 314 | */ |
| 315 | __BEGIN_DECLS |
| 316 | vm_offset_t ml_static_slide(vm_offset_t vaddr); |
| 317 | vm_offset_t ml_static_unslide(vm_offset_t vaddr); |
| 318 | __END_DECLS |
| 319 | |
| 320 | /** |
| 321 | * Determine whether a given address is an address within a static region (i.e., |
| 322 | * coming from TEXT or DATA) that was slid during boot. Addresses of this type |
| 323 | * should have the slide removed before exposing them to userspace so as to not |
| 324 | * leak the slide itself to userspace. |
| 325 | * |
| 326 | * @param addr The virtual address to check. |
| 327 | * |
| 328 | * @return True if the address is a static/slid kernel address, false otherwise. |
| 329 | */ |
| 330 | static inline bool |
| 331 | vm_is_addr_slid(vm_offset_t addr) |
| 332 | { |
| 333 | const vm_offset_t stripped_addr = (vm_offset_t)VM_KERNEL_STRIP_PTR(addr); |
| 334 | const bool is_slid_kern_addr = |
| 335 | (stripped_addr >= vm_kernel_slid_base) && (stripped_addr < vm_kernel_slid_top); |
| 336 | |
| 337 | #if CONFIG_SPTM |
| 338 | const bool is_slid_sptm_addr = |
| 339 | (stripped_addr >= vm_sptm_offsets.slid_base) && (stripped_addr < vm_sptm_offsets.slid_top); |
| 340 | |
| 341 | const bool is_slid_txm_addr = |
| 342 | (stripped_addr >= vm_txm_offsets.slid_base) && (stripped_addr < vm_txm_offsets.slid_top); |
| 343 | |
| 344 | return is_slid_kern_addr || is_slid_sptm_addr || is_slid_txm_addr; |
| 345 | #else |
| 346 | return is_slid_kern_addr; |
| 347 | #endif /* CONFIG_SPTM */ |
| 348 | } |
| 349 | |
| 350 | #define VM_KERNEL_IS_SLID(_o) (vm_is_addr_slid((vm_offset_t)(_o))) |
| 351 | |
| 352 | #define VM_KERNEL_SLIDE(_u) (ml_static_slide((vm_offset_t)(_u))) |
| 353 | |
| 354 | /* |
| 355 | * The following macros are to be used when exposing kernel addresses to |
| 356 | * userspace via any of the various debug or info facilities that might exist |
| 357 | * (e.g. stackshot, proc_info syscall, etc.). It is important to understand |
| 358 | * the goal of each macro and choose the right one depending on what you are |
| 359 | * trying to do. Misuse of these macros can result in critical data leaks |
| 360 | * which in turn lead to all sorts of system vulnerabilities. It is invalid to |
| 361 | * call these macros on a non-kernel address (NULL is allowed). |
| 362 | * |
| 363 | * VM_KERNEL_UNSLIDE: |
| 364 | * Use this macro when you are exposing an address to userspace which is |
| 365 | * *guaranteed* to be a "static" kernel or kext address (i.e. coming from text |
| 366 | * or data sections). These are the addresses which get "slid" via ASLR on |
| 367 | * kernel or kext load, and it's precisely the slide value we are trying to |
| 368 | * protect from userspace. |
| 369 | * |
| 370 | * VM_KERNEL_ADDRHIDE: |
| 371 | * Use when exposing an address for internal purposes: debugging, tracing, |
| 372 | * etc. The address will be unslid if necessary. Other addresses will be |
| 373 | * hidden on customer builds, and unmodified on internal builds. |
| 374 | * |
| 375 | * VM_KERNEL_ADDRHASH: |
| 376 | * Use this macro when exposing a kernel address to userspace on customer |
| 377 | * builds. The address can be from the static kernel or kext regions, or the |
| 378 | * kernel heap. The address will be unslid or hashed as appropriate. |
| 379 | * |
| 380 | * |
| 381 | * ** SECURITY WARNING: The following macros can leak kernel secrets. |
| 382 | * Use *only* in performance *critical* code. |
| 383 | * |
| 384 | * VM_KERNEL_ADDRPERM: |
| 385 | * VM_KERNEL_UNSLIDE_OR_PERM: |
| 386 | * Use these macros when exposing a kernel address to userspace on customer |
| 387 | * builds. The address can be from the static kernel or kext regions, or the |
| 388 | * kernel heap. The address will be unslid or permuted as appropriate. |
| 389 | * |
| 390 | * Nesting of these macros should be considered invalid. |
| 391 | */ |
| 392 | |
| 393 | #define __DO_UNSLIDE(_v) (ml_static_unslide((vm_offset_t)VM_KERNEL_STRIP_PTR(_v))) |
| 394 | |
| 395 | #if DEBUG || DEVELOPMENT |
| 396 | #define VM_KERNEL_ADDRHIDE(_v) (VM_KERNEL_IS_SLID(_v) ? __DO_UNSLIDE(_v) : (vm_address_t)VM_KERNEL_STRIP_PTR(_v)) |
| 397 | #else |
| 398 | #define VM_KERNEL_ADDRHIDE(_v) (VM_KERNEL_IS_SLID(_v) ? __DO_UNSLIDE(_v) : (vm_address_t)0) |
| 399 | #endif /* DEBUG || DEVELOPMENT */ |
| 400 | |
| 401 | #define VM_KERNEL_ADDRHASH(_v) vm_kernel_addrhash((vm_offset_t)(_v)) |
| 402 | |
| 403 | #define VM_KERNEL_UNSLIDE_OR_PERM(_v) ({ \ |
| 404 | VM_KERNEL_IS_SLID(_v) ? __DO_UNSLIDE(_v) : \ |
| 405 | VM_KERNEL_ADDRESS(_v) ? ((vm_offset_t)VM_KERNEL_STRIP_PTR(_v) + vm_kernel_addrperm) : \ |
| 406 | (vm_offset_t)VM_KERNEL_STRIP_PTR(_v); \ |
| 407 | }) |
| 408 | |
| 409 | #define VM_KERNEL_UNSLIDE(_v) ({ \ |
| 410 | VM_KERNEL_IS_SLID(_v) ? __DO_UNSLIDE(_v) : (vm_offset_t)0; \ |
| 411 | }) |
| 412 | |
| 413 | #define VM_KERNEL_ADDRPERM(_v) VM_KERNEL_UNSLIDE_OR_PERM(_v) |
| 414 | |
| 415 | #undef mach_vm_round_page |
| 416 | #undef round_page |
| 417 | #undef round_page_32 |
| 418 | #undef round_page_64 |
| 419 | |
| 420 | static inline int |
| 421 | mach_vm_size_unit(mach_vm_size_t size) |
| 422 | { |
| 423 | uint32_t bits = 64u - (uint32_t)__builtin_clzll((size / 10) | 1); |
| 424 | |
| 425 | return "BKMGTPE" [bits / 10]; |
| 426 | } |
| 427 | |
| 428 | static inline uint32_t |
| 429 | mach_vm_size_pretty(mach_vm_size_t size) |
| 430 | { |
| 431 | uint32_t bits = 64u - (uint32_t)__builtin_clzll((size / 10) | 1); |
| 432 | |
| 433 | return (uint32_t)(size >> (bits - bits % 10)); |
| 434 | } |
| 435 | |
| 436 | static inline mach_vm_offset_t |
| 437 | mach_vm_round_page(mach_vm_offset_t x) |
| 438 | { |
| 439 | if (round_page_overflow(x, &x)) { |
| 440 | panic("overflow detected" ); |
| 441 | } |
| 442 | return x; |
| 443 | } |
| 444 | |
| 445 | static inline vm_offset_t |
| 446 | round_page(vm_offset_t x) |
| 447 | { |
| 448 | if (round_page_overflow(x, &x)) { |
| 449 | panic("overflow detected" ); |
| 450 | } |
| 451 | return x; |
| 452 | } |
| 453 | |
| 454 | static inline mach_vm_offset_t |
| 455 | round_page_64(mach_vm_offset_t x) |
| 456 | { |
| 457 | if (round_page_overflow(x, &x)) { |
| 458 | panic("overflow detected" ); |
| 459 | } |
| 460 | return x; |
| 461 | } |
| 462 | |
| 463 | static inline uint32_t |
| 464 | round_page_32(uint32_t x) |
| 465 | { |
| 466 | if (round_page_overflow(x, &x)) { |
| 467 | panic("overflow detected" ); |
| 468 | } |
| 469 | return x; |
| 470 | } |
| 471 | |
| 472 | |
| 473 | /*! |
| 474 | * @typedef vm_packing_params_t |
| 475 | * |
| 476 | * @brief |
| 477 | * Data structure representing the packing parameters for a given packed pointer |
| 478 | * encoding. |
| 479 | * |
| 480 | * @discussion |
| 481 | * Several data structures wish to pack their pointers on less than 64bits |
| 482 | * on LP64 in order to save memory. |
| 483 | * |
| 484 | * Adopters are supposed to define 3 macros: |
| 485 | * - @c *_BITS: number of storage bits used for the packing, |
| 486 | * - @c *_SHIFT: number of non significant low bits (expected to be 0), |
| 487 | * - @c *_BASE: the base against which to encode. |
| 488 | * |
| 489 | * The encoding is a no-op when @c *_BITS is equal to @c __WORDSIZE and |
| 490 | * @c *_SHIFT is 0. |
| 491 | * |
| 492 | * |
| 493 | * The convenience macro @c VM_PACKING_PARAMS can be used to create |
| 494 | * a @c vm_packing_params_t structure out of those definitions. |
| 495 | * |
| 496 | * It is customary to declare a constant global per scheme for the sake |
| 497 | * of debuggers to be able to dynamically decide how to unpack various schemes. |
| 498 | * |
| 499 | * |
| 500 | * This uses 2 possible schemes (who both preserve @c NULL): |
| 501 | * |
| 502 | * 1. When the storage bits and shift are sufficiently large (strictly more than |
| 503 | * VM_KERNEL_POINTER_SIGNIFICANT_BITS), a sign-extension scheme can be used. |
| 504 | * |
| 505 | * This allows to represent any kernel pointer. |
| 506 | * |
| 507 | * 2. Else, a base-relative scheme can be used, typical bases are: |
| 508 | * |
| 509 | * - @c KERNEL_PMAP_HEAP_RANGE_START when only pointers to heap (zone) |
| 510 | * allocated objects need to be packed, |
| 511 | * |
| 512 | * - @c VM_MIN_KERNEL_AND_KEXT_ADDRESS when pointers to kernel globals also |
| 513 | * need this. |
| 514 | * |
| 515 | * When such an ecoding is used, @c zone_restricted_va_max() must be taught |
| 516 | * about it. |
| 517 | */ |
| 518 | typedef struct vm_packing_params { |
| 519 | vm_offset_t vmpp_base; |
| 520 | uint8_t vmpp_bits; |
| 521 | uint8_t vmpp_shift; |
| 522 | bool vmpp_base_relative; |
| 523 | } vm_packing_params_t; |
| 524 | |
| 525 | |
| 526 | /*! |
| 527 | * @macro VM_PACKING_IS_BASE_RELATIVE |
| 528 | * |
| 529 | * @brief |
| 530 | * Whether the packing scheme with those parameters will be base-relative. |
| 531 | */ |
| 532 | #define VM_PACKING_IS_BASE_RELATIVE(ns) \ |
| 533 | (ns##_BITS + ns##_SHIFT <= VM_KERNEL_POINTER_SIGNIFICANT_BITS) |
| 534 | |
| 535 | |
| 536 | /*! |
| 537 | * @macro VM_PACKING_PARAMS |
| 538 | * |
| 539 | * @brief |
| 540 | * Constructs a @c vm_packing_params_t structure based on the convention that |
| 541 | * macros with the @c _BASE, @c _BITS and @c _SHIFT suffixes have been defined |
| 542 | * to the proper values. |
| 543 | */ |
| 544 | #define VM_PACKING_PARAMS(ns) \ |
| 545 | (vm_packing_params_t){ \ |
| 546 | .vmpp_base = ns##_BASE, \ |
| 547 | .vmpp_bits = ns##_BITS, \ |
| 548 | .vmpp_shift = ns##_SHIFT, \ |
| 549 | .vmpp_base_relative = VM_PACKING_IS_BASE_RELATIVE(ns), \ |
| 550 | } |
| 551 | |
| 552 | /** |
| 553 | * @function vm_pack_pointer |
| 554 | * |
| 555 | * @brief |
| 556 | * Packs a pointer according to the specified parameters. |
| 557 | * |
| 558 | * @discussion |
| 559 | * The convenience @c VM_PACK_POINTER macro allows to synthesize |
| 560 | * the @c params argument. |
| 561 | * |
| 562 | * @param ptr The pointer to pack. |
| 563 | * @param params The encoding parameters. |
| 564 | * @returns The packed pointer. |
| 565 | */ |
| 566 | static inline vm_offset_t |
| 567 | vm_pack_pointer(vm_offset_t ptr, vm_packing_params_t params) |
| 568 | { |
| 569 | if (ptr != 0) { |
| 570 | ptr = vm_memtag_canonicalize_address(ptr); |
| 571 | } |
| 572 | |
| 573 | if (!params.vmpp_base_relative) { |
| 574 | return ptr >> params.vmpp_shift; |
| 575 | } |
| 576 | if (ptr) { |
| 577 | return (ptr - params.vmpp_base) >> params.vmpp_shift; |
| 578 | } |
| 579 | return (vm_offset_t)0; |
| 580 | } |
| 581 | #define VM_PACK_POINTER(ptr, ns) \ |
| 582 | vm_pack_pointer(ptr, VM_PACKING_PARAMS(ns)) |
| 583 | |
| 584 | /** |
| 585 | * @function vm_unpack_pointer |
| 586 | * |
| 587 | * @brief |
| 588 | * Unpacks a pointer packed with @c vm_pack_pointer(). |
| 589 | * |
| 590 | * @discussion |
| 591 | * The convenience @c VM_UNPACK_POINTER macro allows to synthesize |
| 592 | * the @c params argument. |
| 593 | * |
| 594 | * @param packed The packed value to decode. |
| 595 | * @param params The encoding parameters. |
| 596 | * @returns The unpacked pointer. |
| 597 | */ |
| 598 | static inline vm_offset_t |
| 599 | vm_unpack_pointer(vm_offset_t packed, vm_packing_params_t params) |
| 600 | { |
| 601 | if (!params.vmpp_base_relative) { |
| 602 | intptr_t addr = (intptr_t)packed; |
| 603 | addr <<= __WORDSIZE - params.vmpp_bits; |
| 604 | addr >>= __WORDSIZE - params.vmpp_bits - params.vmpp_shift; |
| 605 | return vm_memtag_fixup_ptr((vm_offset_t)addr); |
| 606 | } |
| 607 | if (packed) { |
| 608 | return vm_memtag_fixup_ptr((packed << params.vmpp_shift) + params.vmpp_base); |
| 609 | } |
| 610 | return (vm_offset_t)0; |
| 611 | } |
| 612 | #define VM_UNPACK_POINTER(packed, ns) \ |
| 613 | vm_unpack_pointer(packed, VM_PACKING_PARAMS(ns)) |
| 614 | |
| 615 | /** |
| 616 | * @function vm_packing_max_packable |
| 617 | * |
| 618 | * @brief |
| 619 | * Returns the largest packable address for the given parameters. |
| 620 | * |
| 621 | * @discussion |
| 622 | * The convenience @c VM_PACKING_MAX_PACKABLE macro allows to synthesize |
| 623 | * the @c params argument. |
| 624 | * |
| 625 | * @param params The encoding parameters. |
| 626 | * @returns The largest packable pointer. |
| 627 | */ |
| 628 | static inline vm_offset_t |
| 629 | vm_packing_max_packable(vm_packing_params_t params) |
| 630 | { |
| 631 | if (!params.vmpp_base_relative) { |
| 632 | return VM_MAX_KERNEL_ADDRESS; |
| 633 | } |
| 634 | |
| 635 | vm_offset_t ptr = params.vmpp_base + |
| 636 | (((1ul << params.vmpp_bits) - 1) << params.vmpp_shift); |
| 637 | |
| 638 | return ptr >= params.vmpp_base ? ptr : VM_MAX_KERNEL_ADDRESS; |
| 639 | } |
| 640 | #define VM_PACKING_MAX_PACKABLE(ns) \ |
| 641 | vm_packing_max_packable(VM_PACKING_PARAMS(ns)) |
| 642 | |
| 643 | |
| 644 | __abortlike |
| 645 | extern void |
| 646 | vm_packing_pointer_invalid(vm_offset_t ptr, vm_packing_params_t params); |
| 647 | |
| 648 | /** |
| 649 | * @function vm_verify_pointer_packable |
| 650 | * |
| 651 | * @brief |
| 652 | * Panics if the specified pointer cannot be packed with the specified |
| 653 | * parameters. |
| 654 | * |
| 655 | * @discussion |
| 656 | * The convenience @c VM_VERIFY_POINTER_PACKABLE macro allows to synthesize |
| 657 | * the @c params argument. |
| 658 | * |
| 659 | * The convenience @c VM_ASSERT_POINTER_PACKABLE macro allows to synthesize |
| 660 | * the @c params argument, and is erased when assertions are disabled. |
| 661 | * |
| 662 | * @param ptr The packed value to decode. |
| 663 | * @param params The encoding parameters. |
| 664 | */ |
| 665 | static inline void |
| 666 | vm_verify_pointer_packable(vm_offset_t ptr, vm_packing_params_t params) |
| 667 | { |
| 668 | if (ptr != 0) { |
| 669 | ptr = vm_memtag_canonicalize_address(ptr); |
| 670 | } |
| 671 | |
| 672 | if (ptr & ((1ul << params.vmpp_shift) - 1)) { |
| 673 | vm_packing_pointer_invalid(ptr, params); |
| 674 | } |
| 675 | if (!params.vmpp_base_relative || ptr == 0) { |
| 676 | return; |
| 677 | } |
| 678 | if (ptr <= params.vmpp_base || ptr > vm_packing_max_packable(params)) { |
| 679 | vm_packing_pointer_invalid(ptr, params); |
| 680 | } |
| 681 | } |
| 682 | #define VM_VERIFY_POINTER_PACKABLE(ptr, ns) \ |
| 683 | vm_verify_pointer_packable(ptr, VM_PACKING_PARAMS(ns)) |
| 684 | |
| 685 | #if DEBUG || DEVELOPMENT |
| 686 | #define VM_ASSERT_POINTER_PACKABLE(ptr, ns) \ |
| 687 | VM_VERIFY_POINTER_PACKABLE(ptr, ns) |
| 688 | #else |
| 689 | #define VM_ASSERT_POINTER_PACKABLE(ptr, ns) ((void)(ptr)) |
| 690 | #endif |
| 691 | |
| 692 | /** |
| 693 | * @function vm_verify_pointer_range |
| 694 | * |
| 695 | * @brief |
| 696 | * Panics if some pointers in the specified range can't be packed with the |
| 697 | * specified parameters. |
| 698 | * |
| 699 | * @param subsystem The subsystem requiring the packing. |
| 700 | * @param min_address The smallest address of the range. |
| 701 | * @param max_address The largest address of the range. |
| 702 | * @param params The encoding parameters. |
| 703 | */ |
| 704 | extern void |
| 705 | vm_packing_verify_range( |
| 706 | const char *subsystem, |
| 707 | vm_offset_t min_address, |
| 708 | vm_offset_t max_address, |
| 709 | vm_packing_params_t params); |
| 710 | |
| 711 | #endif /* XNU_KERNEL_PRIVATE */ |
| 712 | |
| 713 | extern vm_size_t page_size; |
| 714 | extern vm_size_t page_mask; |
| 715 | extern int page_shift; |
| 716 | |
| 717 | /* We need a way to get rid of compiler warnings when we cast from */ |
| 718 | /* a 64 bit value to an address (which may be 32 bits or 64-bits). */ |
| 719 | /* An intptr_t is used convert the value to the right precision, and */ |
| 720 | /* then to an address. This macro is also used to convert addresses */ |
| 721 | /* to 32-bit integers, which is a hard failure for a 64-bit kernel */ |
| 722 | #include <stdint.h> |
| 723 | #ifndef __CAST_DOWN_CHECK |
| 724 | #define __CAST_DOWN_CHECK |
| 725 | |
| 726 | #define CAST_DOWN( type, addr ) \ |
| 727 | ( ((type)((uintptr_t) (addr)/(sizeof(type) < sizeof(uintptr_t) ? 0 : 1))) ) |
| 728 | |
| 729 | #define CAST_DOWN_EXPLICIT( type, addr ) ( ((type)((uintptr_t) (addr))) ) |
| 730 | |
| 731 | #endif /* __CAST_DOWN_CHECK */ |
| 732 | |
| 733 | #endif /* ASSEMBLER */ |
| 734 | |
| 735 | #endif /* KERNEL */ |
| 736 | |
| 737 | #endif /* _MACH_VM_PARAM_H_ */ |
| 738 | |