1 | /* |
2 | * Copyright (c) 2000-2016 Apple Computer, Inc. All rights reserved. |
3 | * |
4 | * @APPLE_OSREFERENCE_LICENSE_HEADER_START@ |
5 | * |
6 | * This file contains Original Code and/or Modifications of Original Code |
7 | * as defined in and that are subject to the Apple Public Source License |
8 | * Version 2.0 (the 'License'). You may not use this file except in |
9 | * compliance with the License. The rights granted to you under the License |
10 | * may not be used to create, or enable the creation or redistribution of, |
11 | * unlawful or unlicensed copies of an Apple operating system, or to |
12 | * circumvent, violate, or enable the circumvention or violation of, any |
13 | * terms of an Apple operating system software license agreement. |
14 | * |
15 | * Please obtain a copy of the License at |
16 | * http://www.opensource.apple.com/apsl/ and read it before using this file. |
17 | * |
18 | * The Original Code and all software distributed under the License are |
19 | * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER |
20 | * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES, |
21 | * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY, |
22 | * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT. |
23 | * Please see the License for the specific language governing rights and |
24 | * limitations under the License. |
25 | * |
26 | * @APPLE_OSREFERENCE_LICENSE_HEADER_END@ |
27 | */ |
28 | /* |
29 | * @OSF_COPYRIGHT@ |
30 | */ |
31 | /* |
32 | * Mach Operating System |
33 | * Copyright (c) 1991,1990,1989,1988 Carnegie Mellon University |
34 | * All Rights Reserved. |
35 | * |
36 | * Permission to use, copy, modify and distribute this software and its |
37 | * documentation is hereby granted, provided that both the copyright |
38 | * notice and this permission notice appear in all copies of the |
39 | * software, derivative works or modified versions, and any portions |
40 | * thereof, and that both notices appear in supporting documentation. |
41 | * |
42 | * CARNEGIE MELLON ALLOWS FREE USE OF THIS SOFTWARE IN ITS "AS IS" |
43 | * CONDITION. CARNEGIE MELLON DISCLAIMS ANY LIABILITY OF ANY KIND FOR |
44 | * ANY DAMAGES WHATSOEVER RESULTING FROM THE USE OF THIS SOFTWARE. |
45 | * |
46 | * Carnegie Mellon requests users of this software to return to |
47 | * |
48 | * Software Distribution Coordinator or Software.Distribution@CS.CMU.EDU |
49 | * School of Computer Science |
50 | * Carnegie Mellon University |
51 | * Pittsburgh PA 15213-3890 |
52 | * |
53 | * any improvements or extensions that they make and grant Carnegie Mellon |
54 | * the rights to redistribute these changes. |
55 | */ |
56 | /* |
57 | */ |
58 | /* |
59 | * File: memory_object.h |
60 | * Author: Michael Wayne Young |
61 | * |
62 | * External memory management interface definition. |
63 | */ |
64 | |
65 | #ifndef _MACH_MEMORY_OBJECT_TYPES_H_ |
66 | #define _MACH_MEMORY_OBJECT_TYPES_H_ |
67 | |
68 | /* |
69 | * User-visible types used in the external memory |
70 | * management interface: |
71 | */ |
72 | |
73 | #include <mach/port.h> |
74 | #include <mach/message.h> |
75 | #include <mach/vm_prot.h> |
76 | #include <mach/vm_sync.h> |
77 | #include <mach/vm_types.h> |
78 | #include <mach/machine/vm_types.h> |
79 | |
80 | #include <sys/cdefs.h> |
81 | |
82 | #define VM_64_BIT_DATA_OBJECTS |
83 | |
84 | typedef unsigned long long memory_object_offset_t; |
85 | typedef unsigned long long memory_object_size_t; |
86 | typedef natural_t memory_object_cluster_size_t; |
87 | typedef natural_t * memory_object_fault_info_t; |
88 | |
89 | typedef unsigned long long vm_object_id_t; |
90 | |
91 | |
92 | /* |
93 | * Temporary until real EMMI version gets re-implemented |
94 | */ |
95 | |
96 | #ifdef KERNEL_PRIVATE |
97 | |
98 | /* IMPORTANT: this type must match "ipc_object_bits_t" from ipc/ipc_port.h */ |
99 | typedef natural_t mo_ipc_object_bits_t; |
100 | |
101 | struct memory_object_pager_ops; /* forward declaration */ |
102 | |
103 | /* |
104 | * "memory_object" and "memory_object_control" types used to be Mach ports |
105 | * in user space and can be passed as such to some kernel APIs. |
106 | * Their first field must match the "io_bits" field of a |
107 | * "struct ipc_object" to identify them as a "IKOT_MEMORY_OBJECT" and |
108 | * "IKOT_MEM_OBJ_CONTROL" respectively. |
109 | */ |
110 | typedef struct memory_object { |
111 | mo_ipc_object_bits_t mo_ikot; /* DO NOT CHANGE */ |
112 | const struct memory_object_pager_ops *; |
113 | struct memory_object_control *mo_control; |
114 | } *memory_object_t; |
115 | |
116 | typedef struct memory_object_control { |
117 | mo_ipc_object_bits_t moc_ikot; /* DO NOT CHANGE */ |
118 | struct vm_object *moc_object; |
119 | } *memory_object_control_t; |
120 | |
121 | typedef const struct { |
122 | void (*)( |
123 | memory_object_t mem_obj); |
124 | void (*)( |
125 | memory_object_t mem_obj); |
126 | kern_return_t (*)( |
127 | memory_object_t mem_obj, |
128 | memory_object_control_t mem_control, |
129 | memory_object_cluster_size_t size); |
130 | kern_return_t (*)( |
131 | memory_object_t mem_obj); |
132 | kern_return_t (*)( |
133 | memory_object_t mem_obj, |
134 | memory_object_offset_t offset, |
135 | memory_object_cluster_size_t length, |
136 | vm_prot_t desired_access, |
137 | memory_object_fault_info_t fault_info); |
138 | kern_return_t (*)( |
139 | memory_object_t mem_obj, |
140 | memory_object_offset_t offset, |
141 | memory_object_cluster_size_t size, |
142 | memory_object_offset_t *resid_offset, |
143 | int *io_error, |
144 | boolean_t dirty, |
145 | boolean_t kernel_copy, |
146 | int upl_flags); |
147 | kern_return_t (*)( |
148 | memory_object_t mem_obj, |
149 | memory_object_offset_t offset, |
150 | memory_object_cluster_size_t size); |
151 | kern_return_t (*)( |
152 | memory_object_t mem_obj, |
153 | memory_object_offset_t offset, |
154 | memory_object_size_t size, |
155 | vm_prot_t desired_access); |
156 | kern_return_t (*)( |
157 | memory_object_t mem_obj, |
158 | memory_object_offset_t offset, |
159 | memory_object_size_t size, |
160 | vm_sync_t sync_flags); |
161 | kern_return_t (*)( |
162 | memory_object_t mem_obj, |
163 | vm_prot_t prot); |
164 | kern_return_t (*)( |
165 | memory_object_t mem_obj); |
166 | kern_return_t (*)( |
167 | memory_object_t mem_obj, |
168 | boolean_t reclaim_backing_store); |
169 | const char *; |
170 | } * ; |
171 | |
172 | #else /* KERNEL_PRIVATE */ |
173 | |
174 | typedef mach_port_t memory_object_t; |
175 | typedef mach_port_t memory_object_control_t; |
176 | |
177 | #endif /* KERNEL_PRIVATE */ |
178 | |
179 | typedef memory_object_t *memory_object_array_t; |
180 | /* A memory object ... */ |
181 | /* Used by the kernel to retrieve */ |
182 | /* or store data */ |
183 | |
184 | typedef mach_port_t memory_object_name_t; |
185 | /* Used to describe the memory ... */ |
186 | /* object in vm_regions() calls */ |
187 | |
188 | typedef mach_port_t memory_object_default_t; |
189 | /* Registered with the host ... */ |
190 | /* for creating new internal objects */ |
191 | |
192 | #define MEMORY_OBJECT_NULL ((memory_object_t) 0) |
193 | #define MEMORY_OBJECT_CONTROL_NULL ((memory_object_control_t) 0) |
194 | #define MEMORY_OBJECT_NAME_NULL ((memory_object_name_t) 0) |
195 | #define MEMORY_OBJECT_DEFAULT_NULL ((memory_object_default_t) 0) |
196 | |
197 | |
198 | typedef int memory_object_copy_strategy_t; |
199 | /* How memory manager handles copy: */ |
200 | #define MEMORY_OBJECT_COPY_NONE 0 |
201 | /* ... No special support */ |
202 | #define MEMORY_OBJECT_COPY_CALL 1 |
203 | /* ... Make call on memory manager */ |
204 | #define MEMORY_OBJECT_COPY_DELAY 2 |
205 | /* ... Memory manager doesn't |
206 | * change data externally. |
207 | */ |
208 | #define MEMORY_OBJECT_COPY_TEMPORARY 3 |
209 | /* ... Memory manager doesn't |
210 | * change data externally, and |
211 | * doesn't need to see changes. |
212 | */ |
213 | #define MEMORY_OBJECT_COPY_SYMMETRIC 4 |
214 | /* ... Memory manager doesn't |
215 | * change data externally, |
216 | * doesn't need to see changes, |
217 | * and object will not be |
218 | * multiply mapped. |
219 | * |
220 | * XXX |
221 | * Not yet safe for non-kernel use. |
222 | */ |
223 | |
224 | #define MEMORY_OBJECT_COPY_INVALID 5 |
225 | /* ... An invalid copy strategy, |
226 | * for external objects which |
227 | * have not been initialized. |
228 | * Allows copy_strategy to be |
229 | * examined without also |
230 | * examining pager_ready and |
231 | * internal. |
232 | */ |
233 | |
234 | typedef int memory_object_return_t; |
235 | /* Which pages to return to manager |
236 | this time (lock_request) */ |
237 | #define MEMORY_OBJECT_RETURN_NONE 0 |
238 | /* ... don't return any. */ |
239 | #define MEMORY_OBJECT_RETURN_DIRTY 1 |
240 | /* ... only dirty pages. */ |
241 | #define MEMORY_OBJECT_RETURN_ALL 2 |
242 | /* ... dirty and precious pages. */ |
243 | #define MEMORY_OBJECT_RETURN_ANYTHING 3 |
244 | /* ... any resident page. */ |
245 | |
246 | /* |
247 | * Data lock request flags |
248 | */ |
249 | |
250 | #define MEMORY_OBJECT_DATA_FLUSH 0x1 |
251 | #define MEMORY_OBJECT_DATA_NO_CHANGE 0x2 |
252 | #define MEMORY_OBJECT_DATA_PURGE 0x4 |
253 | #define MEMORY_OBJECT_COPY_SYNC 0x8 |
254 | #define MEMORY_OBJECT_DATA_SYNC 0x10 |
255 | #define MEMORY_OBJECT_IO_SYNC 0x20 |
256 | #define MEMORY_OBJECT_DATA_FLUSH_ALL 0x40 |
257 | |
258 | /* |
259 | * Types for the memory object flavor interfaces |
260 | */ |
261 | |
262 | #define MEMORY_OBJECT_INFO_MAX (1024) |
263 | typedef int *memory_object_info_t; |
264 | typedef int memory_object_flavor_t; |
265 | typedef int memory_object_info_data_t[MEMORY_OBJECT_INFO_MAX]; |
266 | |
267 | |
268 | #define MEMORY_OBJECT_PERFORMANCE_INFO 11 |
269 | #define MEMORY_OBJECT_ATTRIBUTE_INFO 14 |
270 | #define MEMORY_OBJECT_BEHAVIOR_INFO 15 |
271 | |
272 | #ifdef PRIVATE |
273 | |
274 | #define OLD_MEMORY_OBJECT_BEHAVIOR_INFO 10 |
275 | #define OLD_MEMORY_OBJECT_ATTRIBUTE_INFO 12 |
276 | |
277 | struct old_memory_object_behave_info { |
278 | memory_object_copy_strategy_t copy_strategy; |
279 | boolean_t temporary; |
280 | boolean_t invalidate; |
281 | }; |
282 | |
283 | struct old_memory_object_attr_info { /* old attr list */ |
284 | boolean_t object_ready; |
285 | boolean_t may_cache; |
286 | memory_object_copy_strategy_t copy_strategy; |
287 | }; |
288 | |
289 | typedef struct old_memory_object_behave_info *old_memory_object_behave_info_t; |
290 | typedef struct old_memory_object_behave_info old_memory_object_behave_info_data_t; |
291 | typedef struct old_memory_object_attr_info *old_memory_object_attr_info_t; |
292 | typedef struct old_memory_object_attr_info old_memory_object_attr_info_data_t; |
293 | |
294 | #define OLD_MEMORY_OBJECT_BEHAVE_INFO_COUNT ((mach_msg_type_number_t) \ |
295 | (sizeof(old_memory_object_behave_info_data_t)/sizeof(int))) |
296 | #define OLD_MEMORY_OBJECT_ATTR_INFO_COUNT ((mach_msg_type_number_t) \ |
297 | (sizeof(old_memory_object_attr_info_data_t)/sizeof(int))) |
298 | |
299 | #ifdef KERNEL |
300 | |
301 | __BEGIN_DECLS |
302 | extern void memory_object_reference(memory_object_t object); |
303 | extern void memory_object_deallocate(memory_object_t object); |
304 | |
305 | extern void memory_object_default_reference(memory_object_default_t); |
306 | extern void memory_object_default_deallocate(memory_object_default_t); |
307 | |
308 | extern void memory_object_control_reference(memory_object_control_t control); |
309 | extern void memory_object_control_deallocate(memory_object_control_t control); |
310 | extern int memory_object_control_uiomove(memory_object_control_t, memory_object_offset_t, void *, int, int, int, int); |
311 | __END_DECLS |
312 | |
313 | #endif /* KERNEL */ |
314 | |
315 | #endif /* PRIVATE */ |
316 | |
317 | struct memory_object_perf_info { |
318 | memory_object_cluster_size_t cluster_size; |
319 | boolean_t may_cache; |
320 | }; |
321 | |
322 | struct memory_object_attr_info { |
323 | memory_object_copy_strategy_t copy_strategy; |
324 | memory_object_cluster_size_t cluster_size; |
325 | boolean_t may_cache_object; |
326 | boolean_t temporary; |
327 | }; |
328 | |
329 | struct memory_object_behave_info { |
330 | memory_object_copy_strategy_t copy_strategy; |
331 | boolean_t temporary; |
332 | boolean_t invalidate; |
333 | boolean_t silent_overwrite; |
334 | boolean_t advisory_pageout; |
335 | }; |
336 | |
337 | |
338 | typedef struct memory_object_behave_info *memory_object_behave_info_t; |
339 | typedef struct memory_object_behave_info memory_object_behave_info_data_t; |
340 | |
341 | typedef struct memory_object_perf_info *memory_object_perf_info_t; |
342 | typedef struct memory_object_perf_info memory_object_perf_info_data_t; |
343 | |
344 | typedef struct memory_object_attr_info *memory_object_attr_info_t; |
345 | typedef struct memory_object_attr_info memory_object_attr_info_data_t; |
346 | |
347 | #define MEMORY_OBJECT_BEHAVE_INFO_COUNT ((mach_msg_type_number_t) \ |
348 | (sizeof(memory_object_behave_info_data_t)/sizeof(int))) |
349 | #define MEMORY_OBJECT_PERF_INFO_COUNT ((mach_msg_type_number_t) \ |
350 | (sizeof(memory_object_perf_info_data_t)/sizeof(int))) |
351 | #define MEMORY_OBJECT_ATTR_INFO_COUNT ((mach_msg_type_number_t) \ |
352 | (sizeof(memory_object_attr_info_data_t)/sizeof(int))) |
353 | |
354 | #define invalid_memory_object_flavor(f) \ |
355 | (f != MEMORY_OBJECT_ATTRIBUTE_INFO && \ |
356 | f != MEMORY_OBJECT_PERFORMANCE_INFO && \ |
357 | f != OLD_MEMORY_OBJECT_BEHAVIOR_INFO && \ |
358 | f != MEMORY_OBJECT_BEHAVIOR_INFO && \ |
359 | f != OLD_MEMORY_OBJECT_ATTRIBUTE_INFO) |
360 | |
361 | |
362 | /* |
363 | * Used to support options on memory_object_release_name call |
364 | */ |
365 | #define MEMORY_OBJECT_TERMINATE_IDLE 0x1 |
366 | #define MEMORY_OBJECT_RESPECT_CACHE 0x2 |
367 | #define MEMORY_OBJECT_RELEASE_NO_OP 0x4 |
368 | |
369 | |
370 | /* named entry processor mapping options */ |
371 | /* enumerated */ |
372 | #define MAP_MEM_NOOP 0 |
373 | #define MAP_MEM_COPYBACK 1 |
374 | #define MAP_MEM_IO 2 |
375 | #define MAP_MEM_WTHRU 3 |
376 | #define MAP_MEM_WCOMB 4 /* Write combining mode */ |
377 | /* aka store gather */ |
378 | #define MAP_MEM_INNERWBACK 5 |
379 | #define MAP_MEM_POSTED 6 |
380 | |
381 | #define GET_MAP_MEM(flags) \ |
382 | ((((unsigned int)(flags)) >> 24) & 0xFF) |
383 | |
384 | #define SET_MAP_MEM(caching, flags) \ |
385 | ((flags) = ((((unsigned int)(caching)) << 24) \ |
386 | & 0xFF000000) | ((flags) & 0xFFFFFF)); |
387 | |
388 | /* leave room for vm_prot bits (0xFF ?) */ |
389 | #define MAP_MEM_LEDGER_TAG_NETWORK 0x002000 /* charge to "network" ledger */ |
390 | #define MAP_MEM_PURGABLE_KERNEL_ONLY 0x004000 /* volatility controlled by kernel */ |
391 | #define MAP_MEM_GRAB_SECLUDED 0x008000 /* can grab secluded pages */ |
392 | #define MAP_MEM_ONLY 0x010000 /* change processor caching */ |
393 | #define MAP_MEM_NAMED_CREATE 0x020000 /* create extant object */ |
394 | #define MAP_MEM_PURGABLE 0x040000 /* create a purgable VM object */ |
395 | #define MAP_MEM_NAMED_REUSE 0x080000 /* reuse provided entry if identical */ |
396 | #define MAP_MEM_USE_DATA_ADDR 0x100000 /* preserve address of data, rather than base of page */ |
397 | #define MAP_MEM_VM_COPY 0x200000 /* make a copy of a VM range */ |
398 | #define MAP_MEM_VM_SHARE 0x400000 /* extract a VM range for remap */ |
399 | #define MAP_MEM_4K_DATA_ADDR 0x800000 /* preserve 4K aligned address of data */ |
400 | |
401 | #define MAP_MEM_FLAGS_MASK 0x00FFFF00 |
402 | #define MAP_MEM_FLAGS_USER ( \ |
403 | MAP_MEM_PURGABLE_KERNEL_ONLY | \ |
404 | MAP_MEM_GRAB_SECLUDED | \ |
405 | MAP_MEM_ONLY | \ |
406 | MAP_MEM_NAMED_CREATE | \ |
407 | MAP_MEM_PURGABLE | \ |
408 | MAP_MEM_NAMED_REUSE | \ |
409 | MAP_MEM_USE_DATA_ADDR | \ |
410 | MAP_MEM_VM_COPY | \ |
411 | MAP_MEM_VM_SHARE | \ |
412 | MAP_MEM_4K_DATA_ADDR) |
413 | #define MAP_MEM_FLAGS_ALL ( \ |
414 | MAP_MEM_LEDGER_TAG_NETWORK | \ |
415 | MAP_MEM_FLAGS_USER) |
416 | |
417 | #ifdef KERNEL |
418 | |
419 | /* |
420 | * Universal Page List data structures |
421 | * |
422 | * A UPL describes a bounded set of physical pages |
423 | * associated with some range of an object or map |
424 | * and a snapshot of the attributes associated with |
425 | * each of those pages. |
426 | */ |
427 | #ifdef PRIVATE |
428 | #define MAX_UPL_TRANSFER_BYTES (1024 * 1024) |
429 | #define MAX_UPL_SIZE_BYTES (1024 * 1024 * 64) |
430 | |
431 | #ifndef CONFIG_EMBEDDED |
432 | #define MAX_UPL_SIZE (MAX_UPL_SIZE_BYTES / PAGE_SIZE) |
433 | #define MAX_UPL_TRANSFER (MAX_UPL_TRANSFER_BYTES / PAGE_SIZE) |
434 | #endif |
435 | |
436 | |
437 | struct upl_page_info { |
438 | ppnum_t phys_addr; /* physical page index number */ |
439 | unsigned int |
440 | #ifdef XNU_KERNEL_PRIVATE |
441 | free_when_done:1,/* page is to be freed on commit */ |
442 | absent:1, /* No valid data in this page */ |
443 | dirty:1, /* Page must be cleaned (O) */ |
444 | precious:1, /* must be cleaned, we have only copy */ |
445 | device:1, /* no page data, mapped dev memory */ |
446 | speculative:1, /* page is valid, but not yet accessed */ |
447 | cs_validated:1, /* CODE SIGNING: page was validated */ |
448 | cs_tainted:1, /* CODE SIGNING: page is tainted */ |
449 | cs_nx:1, /* CODE SIGNING: page is NX */ |
450 | needed:1, /* page should be left in cache on abort */ |
451 | mark:1, /* a mark flag for the creator to use as they wish */ |
452 | :0; /* force to long boundary */ |
453 | #else |
454 | opaque; /* use upl_page_xxx() accessor funcs */ |
455 | #endif /* XNU_KERNEL_PRIVATE */ |
456 | }; |
457 | |
458 | #else |
459 | |
460 | struct upl_page_info { |
461 | unsigned int opaque[2]; /* use upl_page_xxx() accessor funcs */ |
462 | }; |
463 | |
464 | #endif /* PRIVATE */ |
465 | |
466 | typedef struct upl_page_info upl_page_info_t; |
467 | typedef upl_page_info_t *upl_page_info_array_t; |
468 | typedef upl_page_info_array_t upl_page_list_ptr_t; |
469 | |
470 | typedef uint32_t upl_offset_t; /* page-aligned byte offset */ |
471 | typedef uint32_t upl_size_t; /* page-aligned byte size */ |
472 | |
473 | /* upl invocation flags */ |
474 | /* top nibble is used by super upl */ |
475 | |
476 | typedef uint64_t upl_control_flags_t; |
477 | |
478 | #define UPL_FLAGS_NONE 0x00000000ULL |
479 | #define UPL_COPYOUT_FROM 0x00000001ULL |
480 | #define UPL_PRECIOUS 0x00000002ULL |
481 | #define UPL_NO_SYNC 0x00000004ULL |
482 | #define UPL_CLEAN_IN_PLACE 0x00000008ULL |
483 | #define UPL_NOBLOCK 0x00000010ULL |
484 | #define UPL_RET_ONLY_DIRTY 0x00000020ULL |
485 | #define UPL_SET_INTERNAL 0x00000040ULL |
486 | #define UPL_QUERY_OBJECT_TYPE 0x00000080ULL |
487 | #define UPL_RET_ONLY_ABSENT 0x00000100ULL /* used only for COPY_FROM = FALSE */ |
488 | #define UPL_FILE_IO 0x00000200ULL |
489 | #define UPL_SET_LITE 0x00000400ULL |
490 | #define UPL_SET_INTERRUPTIBLE 0x00000800ULL |
491 | #define UPL_SET_IO_WIRE 0x00001000ULL |
492 | #define UPL_FOR_PAGEOUT 0x00002000ULL |
493 | #define UPL_WILL_BE_DUMPED 0x00004000ULL |
494 | #define UPL_FORCE_DATA_SYNC 0x00008000ULL |
495 | /* continued after the ticket bits... */ |
496 | |
497 | #define UPL_PAGE_TICKET_MASK 0x000F0000ULL |
498 | #define UPL_PAGE_TICKET_SHIFT 16 |
499 | |
500 | /* ... flags resume here */ |
501 | #define UPL_BLOCK_ACCESS 0x00100000ULL |
502 | #define UPL_ENCRYPT 0x00200000ULL |
503 | #define UPL_NOZEROFILL 0x00400000ULL |
504 | #define UPL_WILL_MODIFY 0x00800000ULL /* caller will modify the pages */ |
505 | |
506 | #define UPL_NEED_32BIT_ADDR 0x01000000ULL |
507 | #define UPL_UBC_MSYNC 0x02000000ULL |
508 | #define UPL_UBC_PAGEOUT 0x04000000ULL |
509 | #define UPL_UBC_PAGEIN 0x08000000ULL |
510 | #define UPL_REQUEST_SET_DIRTY 0x10000000ULL |
511 | #define UPL_REQUEST_NO_FAULT 0x20000000ULL /* fail if pages not all resident */ |
512 | #define UPL_NOZEROFILLIO 0x40000000ULL /* allow non zerofill pages present */ |
513 | #define UPL_REQUEST_FORCE_COHERENCY 0x80000000ULL |
514 | |
515 | /* UPL flags known by this kernel */ |
516 | #define UPL_VALID_FLAGS 0xFFFFFFFFFFULL |
517 | |
518 | |
519 | /* upl abort error flags */ |
520 | #define UPL_ABORT_RESTART 0x1 |
521 | #define UPL_ABORT_UNAVAILABLE 0x2 |
522 | #define UPL_ABORT_ERROR 0x4 |
523 | #define UPL_ABORT_FREE_ON_EMPTY 0x8 /* only implemented in wrappers */ |
524 | #define UPL_ABORT_DUMP_PAGES 0x10 |
525 | #define UPL_ABORT_NOTIFY_EMPTY 0x20 |
526 | /* deprecated: #define UPL_ABORT_ALLOW_ACCESS 0x40 */ |
527 | #define UPL_ABORT_REFERENCE 0x80 |
528 | |
529 | /* upl pages check flags */ |
530 | #define UPL_CHECK_DIRTY 0x1 |
531 | |
532 | |
533 | /* |
534 | * upl pagein/pageout flags |
535 | * |
536 | * |
537 | * when I/O is issued from this UPL it should be done synchronously |
538 | */ |
539 | #define UPL_IOSYNC 0x1 |
540 | |
541 | /* |
542 | * the passed in UPL should not have either a commit or abort |
543 | * applied to it by the underlying layers... the site that |
544 | * created the UPL is responsible for cleaning it up. |
545 | */ |
546 | #define UPL_NOCOMMIT 0x2 |
547 | |
548 | /* |
549 | * turn off any speculative read-ahead applied at the I/O layer |
550 | */ |
551 | #define UPL_NORDAHEAD 0x4 |
552 | |
553 | /* |
554 | * pageout request is targeting a real file |
555 | * as opposed to a swap file. |
556 | */ |
557 | |
558 | #define 0x8 |
559 | /* |
560 | * this pageout is being originated as part of an explicit |
561 | * memory synchronization operation... no speculative clustering |
562 | * should be applied, only the range specified should be pushed. |
563 | */ |
564 | #define UPL_MSYNC 0x10 |
565 | |
566 | /* |
567 | * |
568 | */ |
569 | #define UPL_PAGING_ENCRYPTED 0x20 |
570 | |
571 | /* |
572 | * this pageout is being originated as part of an explicit |
573 | * memory synchronization operation that is checking for I/O |
574 | * errors and taking it's own action... if an error occurs, |
575 | * just abort the pages back into the cache unchanged |
576 | */ |
577 | #define UPL_KEEPCACHED 0x40 |
578 | |
579 | /* |
580 | * this pageout originated from within cluster_io to deal |
581 | * with a dirty page that hasn't yet been seen by the FS |
582 | * that backs it... tag it so that the FS can take the |
583 | * appropriate action w/r to its locking model since the |
584 | * pageout will reenter the FS for the same file currently |
585 | * being handled in this context. |
586 | */ |
587 | #define UPL_NESTED_PAGEOUT 0x80 |
588 | |
589 | /* |
590 | * we've detected a sequential access pattern and |
591 | * we are speculatively and aggressively pulling |
592 | * pages in... do not count these as real PAGEINs |
593 | * w/r to our hard throttle maintenance |
594 | */ |
595 | #define UPL_IOSTREAMING 0x100 |
596 | |
597 | /* |
598 | * Currently, it's only used for the swap pagein path. |
599 | * Since the swap + compressed pager layer manage their |
600 | * pages, these pages are not marked "absent" i.e. these |
601 | * are "valid" pages. The pagein path will _not_ issue an |
602 | * I/O (correctly) for valid pages. So, this flag is used |
603 | * to override that logic in the vnode I/O path. |
604 | */ |
605 | #define UPL_IGNORE_VALID_PAGE_CHECK 0x200 |
606 | |
607 | |
608 | |
609 | /* upl commit flags */ |
610 | #define UPL_COMMIT_FREE_ON_EMPTY 0x1 /* only implemented in wrappers */ |
611 | #define UPL_COMMIT_CLEAR_DIRTY 0x2 |
612 | #define UPL_COMMIT_SET_DIRTY 0x4 |
613 | #define UPL_COMMIT_INACTIVATE 0x8 |
614 | #define UPL_COMMIT_NOTIFY_EMPTY 0x10 |
615 | /* deprecated: #define UPL_COMMIT_ALLOW_ACCESS 0x20 */ |
616 | #define UPL_COMMIT_CS_VALIDATED 0x40 |
617 | #define UPL_COMMIT_CLEAR_PRECIOUS 0x80 |
618 | #define UPL_COMMIT_SPECULATE 0x100 |
619 | #define UPL_COMMIT_FREE_ABSENT 0x200 |
620 | #define UPL_COMMIT_WRITTEN_BY_KERNEL 0x400 |
621 | |
622 | #define UPL_COMMIT_KERNEL_ONLY_FLAGS (UPL_COMMIT_CS_VALIDATED | UPL_COMMIT_FREE_ABSENT) |
623 | |
624 | /* flags for return of state from vm_map_get_upl, vm_upl address space */ |
625 | /* based call */ |
626 | #define UPL_DEV_MEMORY 0x1 |
627 | #define UPL_PHYS_CONTIG 0x2 |
628 | |
629 | |
630 | /* |
631 | * Flags for the UPL page ops routine. This routine is not exported |
632 | * out of the kernel at the moment and so the defs live here. |
633 | */ |
634 | #define UPL_POP_DIRTY 0x1 |
635 | #define UPL_POP_PAGEOUT 0x2 |
636 | #define UPL_POP_PRECIOUS 0x4 |
637 | #define UPL_POP_ABSENT 0x8 |
638 | #define UPL_POP_BUSY 0x10 |
639 | |
640 | #define UPL_POP_PHYSICAL 0x10000000 |
641 | #define UPL_POP_DUMP 0x20000000 |
642 | #define UPL_POP_SET 0x40000000 |
643 | #define UPL_POP_CLR 0x80000000 |
644 | |
645 | /* |
646 | * Flags for the UPL range op routine. This routine is not exported |
647 | * out of the kernel at the moemet and so the defs live here. |
648 | */ |
649 | /* |
650 | * UPL_ROP_ABSENT: Returns the extent of the range presented which |
651 | * is absent, starting with the start address presented |
652 | */ |
653 | #define UPL_ROP_ABSENT 0x01 |
654 | /* |
655 | * UPL_ROP_PRESENT: Returns the extent of the range presented which |
656 | * is present (i.e. resident), starting with the start address presented |
657 | */ |
658 | #define UPL_ROP_PRESENT 0x02 |
659 | /* |
660 | * UPL_ROP_DUMP: Dump the pages which are found in the target object |
661 | * for the target range. |
662 | */ |
663 | #define UPL_ROP_DUMP 0x04 |
664 | |
665 | #ifdef PRIVATE |
666 | |
667 | #define UPL_REPRIO_INFO_MASK (0xFFFFFFFF) |
668 | #define UPL_REPRIO_INFO_SHIFT 32 |
669 | |
670 | /* access macros for upl_t */ |
671 | |
672 | #define UPL_DEVICE_PAGE(upl) \ |
673 | (((upl)[0].phys_addr != 0) ? ((upl)[0].device) : FALSE) |
674 | |
675 | #define UPL_PAGE_PRESENT(upl, index) \ |
676 | ((upl)[(index)].phys_addr != 0) |
677 | |
678 | #define UPL_PHYS_PAGE(upl, index) \ |
679 | ((upl)[(index)].phys_addr) |
680 | |
681 | #define UPL_SPECULATIVE_PAGE(upl, index) \ |
682 | (((upl)[(index)].phys_addr != 0) ? ((upl)[(index)].speculative) : FALSE) |
683 | |
684 | #define UPL_DIRTY_PAGE(upl, index) \ |
685 | (((upl)[(index)].phys_addr != 0) ? ((upl)[(index)].dirty) : FALSE) |
686 | |
687 | #define UPL_PRECIOUS_PAGE(upl, index) \ |
688 | (((upl)[(index)].phys_addr != 0) ? ((upl)[(index)].precious) : FALSE) |
689 | |
690 | #define UPL_VALID_PAGE(upl, index) \ |
691 | (((upl)[(index)].phys_addr != 0) ? (!((upl)[(index)].absent)) : FALSE) |
692 | |
693 | #define UPL_PAGEOUT_PAGE(upl, index) \ |
694 | (((upl)[(index)].phys_addr != 0) ? ((upl)[(index)].free_when_done) : FALSE) |
695 | |
696 | #define UPL_SET_PAGE_FREE_ON_COMMIT(upl, index) \ |
697 | (((upl)[(index)].phys_addr != 0) ? \ |
698 | ((upl)[(index)].free_when_done = TRUE) : FALSE) |
699 | |
700 | #define UPL_CLR_PAGE_FREE_ON_COMMIT(upl, index) \ |
701 | (((upl)[(index)].phys_addr != 0) ? \ |
702 | ((upl)[(index)].free_when_done = FALSE) : FALSE) |
703 | |
704 | #define UPL_REPRIO_INFO_BLKNO(upl, index) \ |
705 | (((upl)->upl_reprio_info[(index)]) & UPL_REPRIO_INFO_MASK) |
706 | |
707 | #define UPL_REPRIO_INFO_LEN(upl, index) \ |
708 | ((((upl)->upl_reprio_info[(index)]) >> UPL_REPRIO_INFO_SHIFT) & UPL_REPRIO_INFO_MASK) |
709 | |
710 | /* modifier macros for upl_t */ |
711 | |
712 | #define UPL_SET_CS_VALIDATED(upl, index, value) \ |
713 | ((upl)[(index)].cs_validated = ((value) ? TRUE : FALSE)) |
714 | |
715 | #define UPL_SET_CS_TAINTED(upl, index, value) \ |
716 | ((upl)[(index)].cs_tainted = ((value) ? TRUE : FALSE)) |
717 | |
718 | #define UPL_SET_CS_NX(upl, index, value) \ |
719 | ((upl)[(index)].cs_nx = ((value) ? TRUE : FALSE)) |
720 | |
721 | #define UPL_SET_REPRIO_INFO(upl, index, blkno, len) \ |
722 | ((upl)->upl_reprio_info[(index)]) = (((uint64_t)(blkno) & UPL_REPRIO_INFO_MASK) | \ |
723 | (((uint64_t)(len) & UPL_REPRIO_INFO_MASK) << UPL_REPRIO_INFO_SHIFT)) |
724 | |
725 | /* The call prototyped below is used strictly by UPL_GET_INTERNAL_PAGE_LIST */ |
726 | |
727 | extern vm_size_t upl_offset_to_pagelist; |
728 | extern vm_size_t upl_get_internal_pagelist_offset(void); |
729 | extern void* upl_get_internal_vectorupl(upl_t); |
730 | extern upl_page_info_t* upl_get_internal_vectorupl_pagelist(upl_t); |
731 | |
732 | /*Use this variant to get the UPL's page list iff:*/ |
733 | /*- the upl being passed in is already part of a vector UPL*/ |
734 | /*- the page list you want is that of this "sub-upl" and not that of the entire vector-upl*/ |
735 | |
736 | #define UPL_GET_INTERNAL_PAGE_LIST_SIMPLE(upl) \ |
737 | ((upl_page_info_t *)((upl_offset_to_pagelist == 0) ? \ |
738 | (uintptr_t)upl + (unsigned int)(upl_offset_to_pagelist = upl_get_internal_pagelist_offset()): \ |
739 | (uintptr_t)upl + (unsigned int)upl_offset_to_pagelist)) |
740 | |
741 | /* UPL_GET_INTERNAL_PAGE_LIST is only valid on internal objects where the */ |
742 | /* list request was made with the UPL_INTERNAL flag */ |
743 | |
744 | |
745 | #define UPL_GET_INTERNAL_PAGE_LIST(upl) \ |
746 | ((upl_get_internal_vectorupl(upl) != NULL ) ? (upl_get_internal_vectorupl_pagelist(upl)) : \ |
747 | ((upl_page_info_t *)((upl_offset_to_pagelist == 0) ? \ |
748 | (uintptr_t)upl + (unsigned int)(upl_offset_to_pagelist = upl_get_internal_pagelist_offset()): \ |
749 | (uintptr_t)upl + (unsigned int)upl_offset_to_pagelist))) |
750 | |
751 | __BEGIN_DECLS |
752 | |
753 | extern ppnum_t upl_phys_page(upl_page_info_t *upl, int index); |
754 | extern boolean_t upl_device_page(upl_page_info_t *upl); |
755 | extern boolean_t upl_speculative_page(upl_page_info_t *upl, int index); |
756 | extern void upl_clear_dirty(upl_t upl, boolean_t value); |
757 | extern void upl_set_referenced(upl_t upl, boolean_t value); |
758 | extern void upl_range_needed(upl_t upl, int index, int count); |
759 | #if CONFIG_IOSCHED |
760 | extern int64_t upl_blkno(upl_page_info_t *upl, int index); |
761 | extern void upl_set_blkno(upl_t upl, vm_offset_t upl_offset, int size, int64_t blkno); |
762 | #endif |
763 | |
764 | __END_DECLS |
765 | |
766 | #endif /* PRIVATE */ |
767 | |
768 | __BEGIN_DECLS |
769 | |
770 | extern boolean_t upl_page_present(upl_page_info_t *upl, int index); |
771 | extern boolean_t upl_dirty_page(upl_page_info_t *upl, int index); |
772 | extern boolean_t upl_valid_page(upl_page_info_t *upl, int index); |
773 | extern void upl_deallocate(upl_t upl); |
774 | extern void upl_mark_decmp(upl_t upl); |
775 | extern void upl_unmark_decmp(upl_t upl); |
776 | |
777 | #ifdef KERNEL_PRIVATE |
778 | |
779 | void upl_page_set_mark(upl_page_info_t *upl, int index, boolean_t v); |
780 | boolean_t upl_page_get_mark(upl_page_info_t *upl, int index); |
781 | |
782 | #endif // KERNEL_PRIVATE |
783 | |
784 | __END_DECLS |
785 | |
786 | #endif /* KERNEL */ |
787 | |
788 | #endif /* _MACH_MEMORY_OBJECT_TYPES_H_ */ |
789 | |