1 | /* |
2 | * Copyright (c) 2000-2006 Apple Computer, Inc. All rights reserved. |
3 | * |
4 | * @APPLE_OSREFERENCE_LICENSE_HEADER_START@ |
5 | * |
6 | * This file contains Original Code and/or Modifications of Original Code |
7 | * as defined in and that are subject to the Apple Public Source License |
8 | * Version 2.0 (the 'License'). You may not use this file except in |
9 | * compliance with the License. The rights granted to you under the License |
10 | * may not be used to create, or enable the creation or redistribution of, |
11 | * unlawful or unlicensed copies of an Apple operating system, or to |
12 | * circumvent, violate, or enable the circumvention or violation of, any |
13 | * terms of an Apple operating system software license agreement. |
14 | * |
15 | * Please obtain a copy of the License at |
16 | * http://www.opensource.apple.com/apsl/ and read it before using this file. |
17 | * |
18 | * The Original Code and all software distributed under the License are |
19 | * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER |
20 | * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES, |
21 | * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY, |
22 | * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT. |
23 | * Please see the License for the specific language governing rights and |
24 | * limitations under the License. |
25 | * |
26 | * @APPLE_OSREFERENCE_LICENSE_HEADER_END@ |
27 | */ |
28 | /* |
29 | * @OSF_COPYRIGHT@ |
30 | */ |
31 | /* |
32 | * Mach Operating System |
33 | * Copyright (c) 1991,1990,1989,1988,1987 Carnegie Mellon University |
34 | * All Rights Reserved. |
35 | * |
36 | * Permission to use, copy, modify and distribute this software and its |
37 | * documentation is hereby granted, provided that both the copyright |
38 | * notice and this permission notice appear in all copies of the |
39 | * software, derivative works or modified versions, and any portions |
40 | * thereof, and that both notices appear in supporting documentation. |
41 | * |
42 | * CARNEGIE MELLON ALLOWS FREE USE OF THIS SOFTWARE IN ITS "AS IS" |
43 | * CONDITION. CARNEGIE MELLON DISCLAIMS ANY LIABILITY OF ANY KIND FOR |
44 | * ANY DAMAGES WHATSOEVER RESULTING FROM THE USE OF THIS SOFTWARE. |
45 | * |
46 | * Carnegie Mellon requests users of this software to return to |
47 | * |
48 | * Software Distribution Coordinator or Software.Distribution@CS.CMU.EDU |
49 | * School of Computer Science |
50 | * Carnegie Mellon University |
51 | * Pittsburgh PA 15213-3890 |
52 | * |
53 | * any improvements or extensions that they make and grant Carnegie Mellon |
54 | * the rights to redistribute these changes. |
55 | */ |
56 | /* |
57 | */ |
58 | /* |
59 | * File: mach/vm_param.h |
60 | * Author: Avadis Tevanian, Jr., Michael Wayne Young |
61 | * Date: 1985 |
62 | * |
63 | * Machine independent virtual memory parameters. |
64 | * |
65 | */ |
66 | |
67 | #ifndef _MACH_VM_PARAM_H_ |
68 | #define _MACH_VM_PARAM_H_ |
69 | |
70 | #include <mach/machine/vm_param.h> |
71 | |
72 | #ifdef KERNEL |
73 | |
74 | #ifndef ASSEMBLER |
75 | #include <mach/vm_types.h> |
76 | #endif /* ASSEMBLER */ |
77 | |
78 | #include <os/base.h> |
79 | #include <os/overflow.h> |
80 | |
81 | /* |
82 | * The machine independent pages are refered to as PAGES. A page |
83 | * is some number of hardware pages, depending on the target machine. |
84 | */ |
85 | |
86 | #ifndef ASSEMBLER |
87 | |
88 | #define PAGE_SIZE_64 (unsigned long long)PAGE_SIZE /* pagesize in addr units */ |
89 | #define PAGE_MASK_64 (unsigned long long)PAGE_MASK /* mask for off in page */ |
90 | |
91 | /* |
92 | * Convert addresses to pages and vice versa. No rounding is used. |
93 | * The atop_32 and ptoa_32 macros should not be use on 64 bit types. |
94 | * The round_page_64 and trunc_page_64 macros should be used instead. |
95 | */ |
96 | |
97 | #define atop_32(x) ((uint32_t)(x) >> PAGE_SHIFT) |
98 | #define ptoa_32(x) ((uint32_t)(x) << PAGE_SHIFT) |
99 | #define atop_64(x) ((uint64_t)(x) >> PAGE_SHIFT) |
100 | #define ptoa_64(x) ((uint64_t)(x) << PAGE_SHIFT) |
101 | |
102 | #define atop_kernel(x) ((vm_address_t)(x) >> PAGE_SHIFT) |
103 | #define ptoa_kernel(x) ((vm_address_t)(x) << PAGE_SHIFT) |
104 | |
105 | /* |
106 | * While the following block is enabled, the legacy atop and ptoa |
107 | * macros will behave correctly. If not, they will generate |
108 | * invalid lvalue errors. |
109 | */ |
110 | |
111 | #if 1 |
112 | #define atop(x) ((vm_address_t)(x) >> PAGE_SHIFT) |
113 | #define ptoa(x) ((vm_address_t)(x) << PAGE_SHIFT) |
114 | #else |
115 | #define atop(x) (0UL = 0) |
116 | #define ptoa(x) (0UL = 0) |
117 | #endif |
118 | |
119 | /* |
120 | * Page-size rounding macros for the Public fixed-width VM types. |
121 | */ |
122 | #define mach_vm_round_page(x) (((mach_vm_offset_t)(x) + PAGE_MASK) & ~((signed)PAGE_MASK)) |
123 | #define mach_vm_trunc_page(x) ((mach_vm_offset_t)(x) & ~((signed)PAGE_MASK)) |
124 | |
125 | #define round_page_overflow(in, out) __os_warn_unused(({ \ |
126 | bool __ovr = os_add_overflow(in, (__typeof__(*out))PAGE_MASK, out); \ |
127 | *out &= ~((__typeof__(*out))PAGE_MASK); \ |
128 | __ovr; \ |
129 | })) |
130 | |
131 | static inline int OS_WARN_RESULT |
132 | mach_vm_round_page_overflow(mach_vm_offset_t in, mach_vm_offset_t *out) |
133 | { |
134 | return round_page_overflow(in, out); |
135 | } |
136 | |
137 | #define memory_object_round_page(x) (((memory_object_offset_t)(x) + PAGE_MASK) & ~((signed)PAGE_MASK)) |
138 | #define memory_object_trunc_page(x) ((memory_object_offset_t)(x) & ~((signed)PAGE_MASK)) |
139 | |
140 | /* |
141 | * Rounding macros for the legacy (scalable with the current task's |
142 | * address space size) VM types. |
143 | */ |
144 | |
145 | #define round_page(x) (((vm_offset_t)(x) + PAGE_MASK) & ~((vm_offset_t)PAGE_MASK)) |
146 | #define trunc_page(x) ((vm_offset_t)(x) & ~((vm_offset_t)PAGE_MASK)) |
147 | |
148 | /* |
149 | * Round off or truncate to the nearest page. These will work |
150 | * for either addresses or counts. (i.e. 1 byte rounds to 1 page |
151 | * bytes. The round_page_32 and trunc_page_32 macros should not be |
152 | * use on 64 bit types. The round_page_64 and trunc_page_64 macros |
153 | * should be used instead. |
154 | * |
155 | * These should only be used in the rare case the size of the address |
156 | * or length is hard-coded as 32 or 64 bit. Otherwise, the macros |
157 | * associated with the specific VM type should be used. |
158 | */ |
159 | |
160 | #define round_page_32(x) (((uint32_t)(x) + PAGE_MASK) & ~((uint32_t)PAGE_MASK)) |
161 | #define trunc_page_32(x) ((uint32_t)(x) & ~((uint32_t)PAGE_MASK)) |
162 | #define round_page_64(x) (((uint64_t)(x) + PAGE_MASK_64) & ~((uint64_t)PAGE_MASK_64)) |
163 | #define trunc_page_64(x) ((uint64_t)(x) & ~((uint64_t)PAGE_MASK_64)) |
164 | |
165 | /* |
166 | * Enable the following block to find uses of xxx_32 macros that should |
167 | * be xxx_64. These macros only work in C code, not C++. The resulting |
168 | * binaries are not functional. Look for invalid lvalue errors in |
169 | * the compiler output. |
170 | * |
171 | * Enabling the following block will also find use of the xxx_64 macros |
172 | * that have been passed pointers. The parameters should be case to an |
173 | * unsigned long type first. Look for invalid operands to binary + error |
174 | * in the compiler output. |
175 | */ |
176 | |
177 | #if 0 |
178 | #undef atop_32 |
179 | #undef ptoa_32 |
180 | #undef round_page_32 |
181 | #undef trunc_page_32 |
182 | #undef atop_64 |
183 | #undef ptoa_64 |
184 | #undef round_page_64 |
185 | #undef trunc_page_64 |
186 | |
187 | #ifndef __cplusplus |
188 | |
189 | #define atop_32(x) \ |
190 | (__builtin_choose_expr (sizeof(x) != sizeof(uint64_t), \ |
191 | (*(long *)0), \ |
192 | (0UL)) = 0) |
193 | |
194 | #define ptoa_32(x) \ |
195 | (__builtin_choose_expr (sizeof(x) != sizeof(uint64_t), \ |
196 | (*(long *)0), \ |
197 | (0UL)) = 0) |
198 | |
199 | #define round_page_32(x) \ |
200 | (__builtin_choose_expr (sizeof(x) != sizeof(uint64_t), \ |
201 | (*(long *)0), \ |
202 | (0UL)) = 0) |
203 | |
204 | #define trunc_page_32(x) \ |
205 | (__builtin_choose_expr (sizeof(x) != sizeof(uint64_t), \ |
206 | (*(long *)0), \ |
207 | (0UL)) = 0) |
208 | #else |
209 | |
210 | #define atop_32(x) (0) |
211 | #define ptoa_32(x) (0) |
212 | #define round_page_32(x) (0) |
213 | #define trunc_page_32(x) (0) |
214 | |
215 | #endif /* ! __cplusplus */ |
216 | |
217 | #define atop_64(x) ((uint64_t)((x) + (uint8_t *)0)) |
218 | #define ptoa_64(x) ((uint64_t)((x) + (uint8_t *)0)) |
219 | #define round_page_64(x) ((uint64_t)((x) + (uint8_t *)0)) |
220 | #define trunc_page_64(x) ((uint64_t)((x) + (uint8_t *)0)) |
221 | |
222 | #endif |
223 | |
224 | /* |
225 | * Determine whether an address is page-aligned, or a count is |
226 | * an exact page multiple. |
227 | */ |
228 | |
229 | #define page_aligned(x) (((x) & PAGE_MASK) == 0) |
230 | |
231 | extern vm_size_t mem_size; /* 32-bit size of memory - limited by maxmem - deprecated */ |
232 | extern uint64_t max_mem; /* 64-bit size of memory - limited by maxmem */ |
233 | |
234 | /* |
235 | * The default pager does not handle 64-bit offsets inside its objects, |
236 | * so this limits the size of anonymous memory objects to 4GB minus 1 page. |
237 | * When we need to allocate a chunk of anonymous memory over that size, |
238 | * we have to allocate more than one chunk. |
239 | */ |
240 | #define ANON_MAX_SIZE 0xFFFFF000ULL |
241 | /* |
242 | * Work-around for <rdar://problem/6626493> |
243 | * Break large anonymous memory areas into 128MB chunks to alleviate |
244 | * the cost of copying when copy-on-write is not possible because a small |
245 | * portion of it being wired. |
246 | */ |
247 | #define ANON_CHUNK_SIZE (128ULL * 1024 * 1024) /* 128MB */ |
248 | |
249 | #ifdef XNU_KERNEL_PRIVATE |
250 | |
251 | #include <kern/debug.h> |
252 | |
253 | extern uint64_t mem_actual; /* 64-bit size of memory - not limited by maxmem */ |
254 | extern uint64_t sane_size; /* Memory size to use for defaults calculations */ |
255 | extern addr64_t vm_last_addr; /* Highest kernel virtual address known to the VM system */ |
256 | |
257 | extern const vm_offset_t vm_min_kernel_address; |
258 | extern const vm_offset_t vm_max_kernel_address; |
259 | |
260 | extern vm_offset_t vm_kernel_stext; |
261 | extern vm_offset_t vm_kernel_etext; |
262 | extern vm_offset_t vm_kernel_slid_base; |
263 | extern vm_offset_t vm_kernel_slid_top; |
264 | extern vm_offset_t vm_kernel_slide; |
265 | extern vm_offset_t vm_kernel_addrperm; |
266 | extern vm_offset_t vm_kext_base; |
267 | extern vm_offset_t vm_kext_top; |
268 | extern vm_offset_t vm_kernel_base; |
269 | extern vm_offset_t vm_kernel_top; |
270 | extern vm_offset_t vm_hib_base; |
271 | |
272 | extern vm_offset_t vm_kernel_builtinkmod_text; |
273 | extern vm_offset_t vm_kernel_builtinkmod_text_end; |
274 | |
275 | #define VM_KERNEL_IS_SLID(_o) \ |
276 | (((vm_offset_t)VM_KERNEL_STRIP_PTR(_o) >= vm_kernel_slid_base) && \ |
277 | ((vm_offset_t)VM_KERNEL_STRIP_PTR(_o) < vm_kernel_slid_top)) |
278 | |
279 | #define VM_KERNEL_SLIDE(_u) ((vm_offset_t)(_u) + vm_kernel_slide) |
280 | |
281 | /* |
282 | * The following macros are to be used when exposing kernel addresses to |
283 | * userspace via any of the various debug or info facilities that might exist |
284 | * (e.g. stackshot, proc_info syscall, etc.). It is important to understand |
285 | * the goal of each macro and choose the right one depending on what you are |
286 | * trying to do. Misuse of these macros can result in critical data leaks |
287 | * which in turn lead to all sorts of system vulnerabilities. It is invalid to |
288 | * call these macros on a non-kernel address (NULL is allowed). |
289 | * |
290 | * VM_KERNEL_UNSLIDE: |
291 | * Use this macro when you are exposing an address to userspace which is |
292 | * *guaranteed* to be a "static" kernel or kext address (i.e. coming from text |
293 | * or data sections). These are the addresses which get "slid" via ASLR on |
294 | * kernel or kext load, and it's precisely the slide value we are trying to |
295 | * protect from userspace. |
296 | * |
297 | * VM_KERNEL_ADDRHIDE: |
298 | * Use when exposing an address for internal purposes: debugging, tracing, |
299 | * etc. The address will be unslid if necessary. Other addresses will be |
300 | * hidden on customer builds, and unmodified on internal builds. |
301 | * |
302 | * VM_KERNEL_ADDRHASH: |
303 | * Use this macro when exposing a kernel address to userspace on customer |
304 | * builds. The address can be from the static kernel or kext regions, or the |
305 | * kernel heap. The address will be unslid or hashed as appropriate. |
306 | * |
307 | * |
308 | * ** SECURITY WARNING: The following macros can leak kernel secrets. |
309 | * Use *only* in performance *critical* code. |
310 | * |
311 | * VM_KERNEL_ADDRPERM: |
312 | * VM_KERNEL_UNSLIDE_OR_PERM: |
313 | * Use these macros when exposing a kernel address to userspace on customer |
314 | * builds. The address can be from the static kernel or kext regions, or the |
315 | * kernel heap. The address will be unslid or permuted as appropriate. |
316 | * |
317 | * Nesting of these macros should be considered invalid. |
318 | */ |
319 | |
320 | __BEGIN_DECLS |
321 | extern vm_offset_t vm_kernel_addrhash(vm_offset_t addr); |
322 | __END_DECLS |
323 | |
324 | #define __DO_UNSLIDE(_v) ((vm_offset_t)VM_KERNEL_STRIP_PTR(_v) - vm_kernel_slide) |
325 | |
326 | #if DEBUG || DEVELOPMENT |
327 | #define VM_KERNEL_ADDRHIDE(_v) (VM_KERNEL_IS_SLID(_v) ? __DO_UNSLIDE(_v) : (vm_address_t)VM_KERNEL_STRIP_PTR(_v)) |
328 | #else |
329 | #define VM_KERNEL_ADDRHIDE(_v) (VM_KERNEL_IS_SLID(_v) ? __DO_UNSLIDE(_v) : (vm_address_t)0) |
330 | #endif /* DEBUG || DEVELOPMENT */ |
331 | |
332 | #define VM_KERNEL_ADDRHASH(_v) vm_kernel_addrhash((vm_offset_t)(_v)) |
333 | |
334 | #define VM_KERNEL_UNSLIDE_OR_PERM(_v) ({ \ |
335 | VM_KERNEL_IS_SLID(_v) ? __DO_UNSLIDE(_v) : \ |
336 | VM_KERNEL_ADDRESS(_v) ? ((vm_offset_t)VM_KERNEL_STRIP_PTR(_v) + vm_kernel_addrperm) : \ |
337 | (vm_offset_t)VM_KERNEL_STRIP_PTR(_v); \ |
338 | }) |
339 | |
340 | #define VM_KERNEL_UNSLIDE(_v) ({ \ |
341 | VM_KERNEL_IS_SLID(_v) ? __DO_UNSLIDE(_v) : (vm_offset_t)0; \ |
342 | }) |
343 | |
344 | #define VM_KERNEL_ADDRPERM(_v) VM_KERNEL_UNSLIDE_OR_PERM(_v) |
345 | |
346 | #undef mach_vm_round_page |
347 | #undef round_page |
348 | #undef round_page_32 |
349 | #undef round_page_64 |
350 | |
351 | static inline mach_vm_offset_t |
352 | mach_vm_round_page(mach_vm_offset_t x) |
353 | { |
354 | if (round_page_overflow(x, &x)) { |
355 | panic("overflow detected" ); |
356 | } |
357 | return x; |
358 | } |
359 | |
360 | static inline vm_offset_t |
361 | round_page(vm_offset_t x) |
362 | { |
363 | if (round_page_overflow(x, &x)) { |
364 | panic("overflow detected" ); |
365 | } |
366 | return x; |
367 | } |
368 | |
369 | static inline mach_vm_offset_t |
370 | round_page_64(mach_vm_offset_t x) |
371 | { |
372 | if (round_page_overflow(x, &x)) { |
373 | panic("overflow detected" ); |
374 | } |
375 | return x; |
376 | } |
377 | |
378 | static inline uint32_t |
379 | round_page_32(uint32_t x) |
380 | { |
381 | if (round_page_overflow(x, &x)) { |
382 | panic("overflow detected" ); |
383 | } |
384 | return x; |
385 | } |
386 | |
387 | #endif /* XNU_KERNEL_PRIVATE */ |
388 | |
389 | extern vm_size_t page_size; |
390 | extern vm_size_t page_mask; |
391 | extern int page_shift; |
392 | |
393 | /* We need a way to get rid of compiler warnings when we cast from */ |
394 | /* a 64 bit value to an address (which may be 32 bits or 64-bits). */ |
395 | /* An intptr_t is used convert the value to the right precision, and */ |
396 | /* then to an address. This macro is also used to convert addresses */ |
397 | /* to 32-bit integers, which is a hard failure for a 64-bit kernel */ |
398 | #include <stdint.h> |
399 | #ifndef __CAST_DOWN_CHECK |
400 | #define __CAST_DOWN_CHECK |
401 | |
402 | #define CAST_DOWN( type, addr ) \ |
403 | ( ((type)((uintptr_t) (addr)/(sizeof(type) < sizeof(uintptr_t) ? 0 : 1))) ) |
404 | |
405 | #define CAST_DOWN_EXPLICIT( type, addr ) ( ((type)((uintptr_t) (addr))) ) |
406 | |
407 | #endif /* __CAST_DOWN_CHECK */ |
408 | |
409 | #endif /* ASSEMBLER */ |
410 | |
411 | #endif /* KERNEL */ |
412 | |
413 | #endif /* _MACH_VM_PARAM_H_ */ |
414 | |