1/*
2 * Copyright (c) 2000-2016 Apple Computer, Inc. All rights reserved.
3 *
4 * @APPLE_OSREFERENCE_LICENSE_HEADER_START@
5 *
6 * This file contains Original Code and/or Modifications of Original Code
7 * as defined in and that are subject to the Apple Public Source License
8 * Version 2.0 (the 'License'). You may not use this file except in
9 * compliance with the License. The rights granted to you under the License
10 * may not be used to create, or enable the creation or redistribution of,
11 * unlawful or unlicensed copies of an Apple operating system, or to
12 * circumvent, violate, or enable the circumvention or violation of, any
13 * terms of an Apple operating system software license agreement.
14 *
15 * Please obtain a copy of the License at
16 * http://www.opensource.apple.com/apsl/ and read it before using this file.
17 *
18 * The Original Code and all software distributed under the License are
19 * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER
20 * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES,
21 * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY,
22 * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT.
23 * Please see the License for the specific language governing rights and
24 * limitations under the License.
25 *
26 * @APPLE_OSREFERENCE_LICENSE_HEADER_END@
27 */
28/*
29 * @OSF_COPYRIGHT@
30 */
31/*
32 * Mach Operating System
33 * Copyright (c) 1991,1990,1989,1988 Carnegie Mellon University
34 * All Rights Reserved.
35 *
36 * Permission to use, copy, modify and distribute this software and its
37 * documentation is hereby granted, provided that both the copyright
38 * notice and this permission notice appear in all copies of the
39 * software, derivative works or modified versions, and any portions
40 * thereof, and that both notices appear in supporting documentation.
41 *
42 * CARNEGIE MELLON ALLOWS FREE USE OF THIS SOFTWARE IN ITS "AS IS"
43 * CONDITION. CARNEGIE MELLON DISCLAIMS ANY LIABILITY OF ANY KIND FOR
44 * ANY DAMAGES WHATSOEVER RESULTING FROM THE USE OF THIS SOFTWARE.
45 *
46 * Carnegie Mellon requests users of this software to return to
47 *
48 * Software Distribution Coordinator or Software.Distribution@CS.CMU.EDU
49 * School of Computer Science
50 * Carnegie Mellon University
51 * Pittsburgh PA 15213-3890
52 *
53 * any improvements or extensions that they make and grant Carnegie Mellon
54 * the rights to redistribute these changes.
55 */
56/*
57 */
58/*
59 * File: memory_object.h
60 * Author: Michael Wayne Young
61 *
62 * External memory management interface definition.
63 */
64
65#ifndef _MACH_MEMORY_OBJECT_TYPES_H_
66#define _MACH_MEMORY_OBJECT_TYPES_H_
67
68/*
69 * User-visible types used in the external memory
70 * management interface:
71 */
72
73#include <mach/port.h>
74#include <mach/message.h>
75#include <mach/vm_prot.h>
76#include <mach/vm_sync.h>
77#include <mach/vm_types.h>
78#include <mach/machine/vm_types.h>
79
80#include <sys/cdefs.h>
81
82#if XNU_KERNEL_PRIVATE
83#include <os/refcnt.h>
84#if __LP64__
85#define MEMORY_OBJECT_HAS_REFCOUNT 1
86#else
87#define MEMORY_OBJECT_HAS_REFCOUNT 0
88#endif
89#endif /* XNU_KERNEL_PRIVATE */
90
91#define VM_64_BIT_DATA_OBJECTS
92
93typedef unsigned long long memory_object_offset_t;
94typedef unsigned long long memory_object_size_t;
95typedef natural_t memory_object_cluster_size_t;
96typedef natural_t * memory_object_fault_info_t;
97
98typedef unsigned long long vm_object_id_t;
99
100
101/*
102 * Temporary until real EMMI version gets re-implemented
103 */
104
105#ifdef KERNEL_PRIVATE
106
107/* IMPORTANT: this type must match "ipc_object_bits_t" from ipc/ipc_port.h */
108typedef natural_t mo_ipc_object_bits_t;
109
110struct memory_object_pager_ops; /* forward declaration */
111
112typedef struct vm_object *memory_object_control_t;
113/*
114 * "memory_object" used to be a Mach port in user space and could be passed
115 * as such to some kernel APIs.
116 *
117 * Its first field must match the "io_bits" field of a
118 * "struct ipc_object" to identify them as a "IKOT_MEMORY_OBJECT".
119 */
120typedef struct memory_object {
121 mo_ipc_object_bits_t mo_ikot; /* DO NOT CHANGE */
122#if __LP64__
123#if XNU_KERNEL_PRIVATE
124 /*
125 * On LP64 there's a 4 byte hole that is perfect for a refcount.
126 * Expose it so that all pagers can take advantage of it.
127 */
128 os_ref_atomic_t mo_ref;
129#else
130 unsigned int __mo_padding;
131#endif /* XNU_KERNEL_PRIVATE */
132#endif /* __LP64__ */
133 const struct memory_object_pager_ops *mo_pager_ops;
134 memory_object_control_t mo_control;
135} *memory_object_t;
136
137typedef const struct memory_object_pager_ops {
138 void (*memory_object_reference)(
139 memory_object_t mem_obj);
140 void (*memory_object_deallocate)(
141 memory_object_t mem_obj);
142 kern_return_t (*memory_object_init)(
143 memory_object_t mem_obj,
144 memory_object_control_t mem_control,
145 memory_object_cluster_size_t size);
146 kern_return_t (*memory_object_terminate)(
147 memory_object_t mem_obj);
148 kern_return_t (*memory_object_data_request)(
149 memory_object_t mem_obj,
150 memory_object_offset_t offset,
151 memory_object_cluster_size_t length,
152 vm_prot_t desired_access,
153 memory_object_fault_info_t fault_info);
154 kern_return_t (*memory_object_data_return)(
155 memory_object_t mem_obj,
156 memory_object_offset_t offset,
157 memory_object_cluster_size_t size,
158 memory_object_offset_t *resid_offset,
159 int *io_error,
160 boolean_t dirty,
161 boolean_t kernel_copy,
162 int upl_flags);
163 kern_return_t (*memory_object_data_initialize)(
164 memory_object_t mem_obj,
165 memory_object_offset_t offset,
166 memory_object_cluster_size_t size);
167#if XNU_KERNEL_PRIVATE
168 void *__obsolete_memory_object_data_unlock;
169 void *__obsolete_memory_object_synchronize;
170#else
171 kern_return_t (*memory_object_data_unlock)(
172 memory_object_t mem_obj,
173 memory_object_offset_t offset,
174 memory_object_size_t size,
175 vm_prot_t desired_access); /* obsolete */
176 kern_return_t (*memory_object_synchronize)(
177 memory_object_t mem_obj,
178 memory_object_offset_t offset,
179 memory_object_size_t size,
180 vm_sync_t sync_flags); /* obsolete */
181#endif /* !XNU_KERNEL_PRIVATE */
182 kern_return_t (*memory_object_map)(
183 memory_object_t mem_obj,
184 vm_prot_t prot);
185 kern_return_t (*memory_object_last_unmap)(
186 memory_object_t mem_obj);
187#if XNU_KERNEL_PRIVATE
188 void *__obsolete_memory_object_data_reclaim;
189#else
190 kern_return_t (*memory_object_data_reclaim)(
191 memory_object_t mem_obj,
192 boolean_t reclaim_backing_store); /* obsolete */
193#endif /* !XNU_KERNEL_PRIVATE */
194 boolean_t (*memory_object_backing_object)(
195 memory_object_t mem_obj,
196 memory_object_offset_t mem_obj_offset,
197 vm_object_t *backing_object,
198 vm_object_offset_t *backing_offset);
199 const char *memory_object_pager_name;
200} * memory_object_pager_ops_t;
201
202#else /* KERNEL_PRIVATE */
203
204typedef mach_port_t memory_object_t;
205/*
206 * vestigial, maintained for source compatibility,
207 * no MIG interface will accept or return non NULL
208 * objects for those.
209 */
210typedef mach_port_t memory_object_control_t;
211
212#endif /* KERNEL_PRIVATE */
213
214typedef memory_object_t *memory_object_array_t;
215/* A memory object ... */
216/* Used by the kernel to retrieve */
217/* or store data */
218
219typedef mach_port_t memory_object_name_t;
220/* Used to describe the memory ... */
221/* object in vm_regions() calls */
222
223typedef mach_port_t memory_object_default_t;
224/* Registered with the host ... */
225/* for creating new internal objects */
226
227#define MEMORY_OBJECT_NULL ((memory_object_t) 0)
228#define MEMORY_OBJECT_CONTROL_NULL ((memory_object_control_t) 0)
229#define MEMORY_OBJECT_NAME_NULL ((memory_object_name_t) 0)
230#define MEMORY_OBJECT_DEFAULT_NULL ((memory_object_default_t) 0)
231
232
233typedef int memory_object_copy_strategy_t;
234/* How memory manager handles copy: */
235#define MEMORY_OBJECT_COPY_NONE 0
236/* ... No special support */
237#define MEMORY_OBJECT_COPY_CALL 1
238/* ... Make call on memory manager */
239#define MEMORY_OBJECT_COPY_DELAY 2
240/* ... Memory manager doesn't
241 * change data externally.
242 */
243#define MEMORY_OBJECT_COPY_TEMPORARY 3
244/* ... Memory manager doesn't
245 * change data externally, and
246 * doesn't need to see changes.
247 */
248#define MEMORY_OBJECT_COPY_SYMMETRIC 4
249/* ... Memory manager doesn't
250 * change data externally,
251 * doesn't need to see changes,
252 * and object will not be
253 * multiply mapped.
254 *
255 * XXX
256 * Not yet safe for non-kernel use.
257 */
258
259#define MEMORY_OBJECT_COPY_INVALID 5
260/* ... An invalid copy strategy,
261 * for external objects which
262 * have not been initialized.
263 * Allows copy_strategy to be
264 * examined without also
265 * examining pager_ready and
266 * internal.
267 */
268
269#define MEMORY_OBJECT_COPY_DELAY_FORK 6
270/*
271 * ... Like MEMORY_OBJECT_COPY_DELAY for vm_map_fork() but like
272 * MEMORY_OBJECT_COPY_NONE otherwise.
273 */
274
275typedef int memory_object_return_t;
276/* Which pages to return to manager
277 * this time (lock_request) */
278#define MEMORY_OBJECT_RETURN_NONE 0
279/* ... don't return any. */
280#define MEMORY_OBJECT_RETURN_DIRTY 1
281/* ... only dirty pages. */
282#define MEMORY_OBJECT_RETURN_ALL 2
283/* ... dirty and precious pages. */
284#define MEMORY_OBJECT_RETURN_ANYTHING 3
285/* ... any resident page. */
286
287/*
288 * Data lock request flags
289 */
290
291#define MEMORY_OBJECT_DATA_FLUSH 0x1
292#define MEMORY_OBJECT_DATA_NO_CHANGE 0x2
293#define MEMORY_OBJECT_DATA_PURGE 0x4
294#define MEMORY_OBJECT_COPY_SYNC 0x8
295#define MEMORY_OBJECT_DATA_SYNC 0x10
296#define MEMORY_OBJECT_IO_SYNC 0x20
297#define MEMORY_OBJECT_DATA_FLUSH_ALL 0x40
298
299/*
300 * Types for the memory object flavor interfaces
301 */
302
303#define MEMORY_OBJECT_INFO_MAX (1024)
304typedef int *memory_object_info_t;
305typedef int memory_object_flavor_t;
306typedef int memory_object_info_data_t[MEMORY_OBJECT_INFO_MAX];
307
308
309#define MEMORY_OBJECT_PERFORMANCE_INFO 11
310#define MEMORY_OBJECT_ATTRIBUTE_INFO 14
311#define MEMORY_OBJECT_BEHAVIOR_INFO 15
312
313#ifdef PRIVATE
314
315#define OLD_MEMORY_OBJECT_BEHAVIOR_INFO 10
316#define OLD_MEMORY_OBJECT_ATTRIBUTE_INFO 12
317
318struct old_memory_object_behave_info {
319 memory_object_copy_strategy_t copy_strategy;
320 boolean_t temporary;
321 boolean_t invalidate;
322};
323
324struct old_memory_object_attr_info { /* old attr list */
325 boolean_t object_ready;
326 boolean_t may_cache;
327 memory_object_copy_strategy_t copy_strategy;
328};
329
330typedef struct old_memory_object_behave_info *old_memory_object_behave_info_t;
331typedef struct old_memory_object_behave_info old_memory_object_behave_info_data_t;
332typedef struct old_memory_object_attr_info *old_memory_object_attr_info_t;
333typedef struct old_memory_object_attr_info old_memory_object_attr_info_data_t;
334
335#define OLD_MEMORY_OBJECT_BEHAVE_INFO_COUNT ((mach_msg_type_number_t) \
336 (sizeof(old_memory_object_behave_info_data_t)/sizeof(int)))
337#define OLD_MEMORY_OBJECT_ATTR_INFO_COUNT ((mach_msg_type_number_t) \
338 (sizeof(old_memory_object_attr_info_data_t)/sizeof(int)))
339
340#ifdef KERNEL
341
342__BEGIN_DECLS
343extern void memory_object_reference(memory_object_t object);
344extern void memory_object_deallocate(memory_object_t object);
345extern boolean_t memory_object_backing_object(
346 memory_object_t mem_obj,
347 memory_object_offset_t offset,
348 vm_object_t *backing_object,
349 vm_object_offset_t *backing_offset);
350
351extern void memory_object_control_reference(memory_object_control_t control);
352extern void memory_object_control_deallocate(memory_object_control_t control);
353extern int memory_object_control_uiomove(memory_object_control_t, memory_object_offset_t, void *, int, int, int, int);
354__END_DECLS
355
356#endif /* KERNEL */
357
358#endif /* PRIVATE */
359
360struct memory_object_perf_info {
361 memory_object_cluster_size_t cluster_size;
362 boolean_t may_cache;
363};
364
365struct memory_object_attr_info {
366 memory_object_copy_strategy_t copy_strategy;
367 memory_object_cluster_size_t cluster_size;
368 boolean_t may_cache_object;
369 boolean_t temporary;
370};
371
372struct memory_object_behave_info {
373 memory_object_copy_strategy_t copy_strategy;
374 boolean_t temporary;
375 boolean_t invalidate;
376 boolean_t silent_overwrite;
377 boolean_t advisory_pageout;
378};
379
380
381typedef struct memory_object_behave_info *memory_object_behave_info_t;
382typedef struct memory_object_behave_info memory_object_behave_info_data_t;
383
384typedef struct memory_object_perf_info *memory_object_perf_info_t;
385typedef struct memory_object_perf_info memory_object_perf_info_data_t;
386
387typedef struct memory_object_attr_info *memory_object_attr_info_t;
388typedef struct memory_object_attr_info memory_object_attr_info_data_t;
389
390#define MEMORY_OBJECT_BEHAVE_INFO_COUNT ((mach_msg_type_number_t) \
391 (sizeof(memory_object_behave_info_data_t)/sizeof(int)))
392#define MEMORY_OBJECT_PERF_INFO_COUNT ((mach_msg_type_number_t) \
393 (sizeof(memory_object_perf_info_data_t)/sizeof(int)))
394#define MEMORY_OBJECT_ATTR_INFO_COUNT ((mach_msg_type_number_t) \
395 (sizeof(memory_object_attr_info_data_t)/sizeof(int)))
396
397#define invalid_memory_object_flavor(f) \
398 (f != MEMORY_OBJECT_ATTRIBUTE_INFO && \
399 f != MEMORY_OBJECT_PERFORMANCE_INFO && \
400 f != OLD_MEMORY_OBJECT_BEHAVIOR_INFO && \
401 f != MEMORY_OBJECT_BEHAVIOR_INFO && \
402 f != OLD_MEMORY_OBJECT_ATTRIBUTE_INFO)
403
404
405/*
406 * Used to support options on memory_object_release_name call
407 */
408#define MEMORY_OBJECT_TERMINATE_IDLE 0x1
409#define MEMORY_OBJECT_RESPECT_CACHE 0x2
410#define MEMORY_OBJECT_RELEASE_NO_OP 0x4
411
412
413/* named entry processor mapping options */
414/* enumerated */
415#define MAP_MEM_NOOP 0
416#define MAP_MEM_COPYBACK 1
417#define MAP_MEM_IO 2
418#define MAP_MEM_WTHRU 3
419#define MAP_MEM_WCOMB 4 /* Write combining mode */
420 /* aka store gather */
421#define MAP_MEM_INNERWBACK 5
422#define MAP_MEM_POSTED 6
423#define MAP_MEM_RT 7
424#define MAP_MEM_POSTED_REORDERED 8
425#define MAP_MEM_POSTED_COMBINED_REORDERED 9
426
427#define GET_MAP_MEM(flags) \
428 ((((unsigned int)(flags)) >> 24) & 0xFF)
429
430#define SET_MAP_MEM(caching, flags) \
431 ((flags) = ((((unsigned int)(caching)) << 24) \
432 & 0xFF000000) | ((flags) & 0xFFFFFF));
433
434/* leave room for vm_prot bits (0xFF ?) */
435#define MAP_MEM_LEDGER_TAGGED 0x002000 /* object owned by a specific task and ledger */
436#define MAP_MEM_PURGABLE_KERNEL_ONLY 0x004000 /* volatility controlled by kernel */
437#define MAP_MEM_GRAB_SECLUDED 0x008000 /* can grab secluded pages */
438#define MAP_MEM_ONLY 0x010000 /* change processor caching */
439#define MAP_MEM_NAMED_CREATE 0x020000 /* create extant object */
440#define MAP_MEM_PURGABLE 0x040000 /* create a purgable VM object */
441#define MAP_MEM_NAMED_REUSE 0x080000 /* reuse provided entry if identical */
442#define MAP_MEM_USE_DATA_ADDR 0x100000 /* preserve address of data, rather than base of page */
443#define MAP_MEM_VM_COPY 0x200000 /* make a copy of a VM range */
444#define MAP_MEM_VM_SHARE 0x400000 /* extract a VM range for remap */
445#define MAP_MEM_4K_DATA_ADDR 0x800000 /* preserve 4K aligned address of data */
446
447#define MAP_MEM_FLAGS_MASK 0x00FFFF00
448#define MAP_MEM_FLAGS_USER ( \
449 MAP_MEM_PURGABLE_KERNEL_ONLY | \
450 MAP_MEM_GRAB_SECLUDED | \
451 MAP_MEM_ONLY | \
452 MAP_MEM_NAMED_CREATE | \
453 MAP_MEM_PURGABLE | \
454 MAP_MEM_NAMED_REUSE | \
455 MAP_MEM_USE_DATA_ADDR | \
456 MAP_MEM_VM_COPY | \
457 MAP_MEM_VM_SHARE | \
458 MAP_MEM_LEDGER_TAGGED | \
459 MAP_MEM_4K_DATA_ADDR)
460#define MAP_MEM_FLAGS_ALL ( \
461 MAP_MEM_FLAGS_USER)
462
463#ifdef KERNEL
464
465/*
466 * Universal Page List data structures
467 *
468 * A UPL describes a bounded set of physical pages
469 * associated with some range of an object or map
470 * and a snapshot of the attributes associated with
471 * each of those pages.
472 */
473#ifdef PRIVATE
474#define MAX_UPL_TRANSFER_BYTES (1024 * 1024)
475#define MAX_UPL_SIZE_BYTES (1024 * 1024 * 64)
476
477#define MAX_UPL_SIZE (MAX_UPL_SIZE_BYTES / PAGE_SIZE)
478#define MAX_UPL_TRANSFER (MAX_UPL_TRANSFER_BYTES / PAGE_SIZE)
479
480struct upl_page_info {
481 ppnum_t phys_addr; /* physical page index number */
482 unsigned int
483#ifdef XNU_KERNEL_PRIVATE
484 free_when_done:1, /* page is to be freed on commit */
485 absent:1, /* No valid data in this page */
486 dirty:1, /* Page must be cleaned (O) */
487 precious:1, /* must be cleaned, we have only copy */
488 device:1, /* no page data, mapped dev memory */
489 speculative:1, /* page is valid, but not yet accessed */
490#define VMP_CS_BITS 4
491#define VMP_CS_ALL_FALSE 0x0
492#define VMP_CS_ALL_TRUE 0xF
493 cs_validated:VMP_CS_BITS, /* CODE SIGNING: page was validated */
494 cs_tainted:VMP_CS_BITS, /* CODE SIGNING: page is tainted */
495 cs_nx:VMP_CS_BITS, /* CODE SIGNING: page is NX */
496
497 needed:1, /* page should be left in cache on abort */
498 mark:1, /* a mark flag for the creator to use as they wish */
499 :0; /* force to long boundary */
500#else
501 opaque; /* use upl_page_xxx() accessor funcs */
502#endif /* XNU_KERNEL_PRIVATE */
503};
504
505#else
506
507struct upl_page_info {
508 unsigned int opaque[2]; /* use upl_page_xxx() accessor funcs */
509};
510
511#endif /* PRIVATE */
512
513typedef struct upl_page_info upl_page_info_t;
514typedef upl_page_info_t *upl_page_info_array_t;
515typedef upl_page_info_array_t upl_page_list_ptr_t;
516
517typedef uint32_t upl_offset_t; /* page-aligned byte offset */
518typedef uint32_t upl_size_t; /* page-aligned byte size */
519
520/* upl invocation flags */
521/* top nibble is used by super upl */
522
523typedef uint64_t upl_control_flags_t;
524
525#define UPL_FLAGS_NONE 0x00000000ULL
526#define UPL_COPYOUT_FROM 0x00000001ULL
527#define UPL_PRECIOUS 0x00000002ULL
528#define UPL_NO_SYNC 0x00000004ULL
529#define UPL_CLEAN_IN_PLACE 0x00000008ULL
530#define UPL_NOBLOCK 0x00000010ULL
531#define UPL_RET_ONLY_DIRTY 0x00000020ULL
532#define UPL_SET_INTERNAL 0x00000040ULL
533#define UPL_QUERY_OBJECT_TYPE 0x00000080ULL
534#define UPL_RET_ONLY_ABSENT 0x00000100ULL /* used only for COPY_FROM = FALSE */
535#define UPL_FILE_IO 0x00000200ULL
536#define UPL_SET_LITE 0x00000400ULL
537#define UPL_SET_INTERRUPTIBLE 0x00000800ULL
538#define UPL_SET_IO_WIRE 0x00001000ULL
539#define UPL_FOR_PAGEOUT 0x00002000ULL
540#define UPL_WILL_BE_DUMPED 0x00004000ULL
541#define UPL_FORCE_DATA_SYNC 0x00008000ULL
542/* continued after the ticket bits... */
543
544#define UPL_PAGE_TICKET_MASK 0x000F0000ULL
545#define UPL_PAGE_TICKET_SHIFT 16
546
547/* ... flags resume here */
548#define UPL_BLOCK_ACCESS 0x00100000ULL
549#define UPL_ENCRYPT 0x00200000ULL
550#define UPL_NOZEROFILL 0x00400000ULL
551#define UPL_WILL_MODIFY 0x00800000ULL /* caller will modify the pages */
552
553#define UPL_NEED_32BIT_ADDR 0x01000000ULL
554#define UPL_UBC_MSYNC 0x02000000ULL
555#define UPL_UBC_PAGEOUT 0x04000000ULL
556#define UPL_UBC_PAGEIN 0x08000000ULL
557#define UPL_REQUEST_SET_DIRTY 0x10000000ULL
558#define UPL_REQUEST_NO_FAULT 0x20000000ULL /* fail if pages not all resident */
559#define UPL_NOZEROFILLIO 0x40000000ULL /* allow non zerofill pages present */
560#define UPL_REQUEST_FORCE_COHERENCY 0x80000000ULL
561
562/* UPL flags known by this kernel */
563#define UPL_VALID_FLAGS 0xFFFFFFFFFFULL
564
565
566/* upl abort error flags */
567#define UPL_ABORT_RESTART 0x1
568#define UPL_ABORT_UNAVAILABLE 0x2
569#define UPL_ABORT_ERROR 0x4
570#define UPL_ABORT_FREE_ON_EMPTY 0x8 /* only implemented in wrappers */
571#define UPL_ABORT_DUMP_PAGES 0x10
572#define UPL_ABORT_NOTIFY_EMPTY 0x20
573/* deprecated: #define UPL_ABORT_ALLOW_ACCESS 0x40 */
574#define UPL_ABORT_REFERENCE 0x80
575
576/* upl pages check flags */
577#define UPL_CHECK_DIRTY 0x1
578
579
580/*
581 * upl pagein/pageout flags
582 *
583 *
584 * when I/O is issued from this UPL it should be done synchronously
585 */
586#define UPL_IOSYNC 0x1
587
588/*
589 * the passed in UPL should not have either a commit or abort
590 * applied to it by the underlying layers... the site that
591 * created the UPL is responsible for cleaning it up.
592 */
593#define UPL_NOCOMMIT 0x2
594
595/*
596 * turn off any speculative read-ahead applied at the I/O layer
597 */
598#define UPL_NORDAHEAD 0x4
599
600/*
601 * pageout request is targeting a real file
602 * as opposed to a swap file.
603 */
604
605#define UPL_VNODE_PAGER 0x8
606/*
607 * this pageout is being originated as part of an explicit
608 * memory synchronization operation... no speculative clustering
609 * should be applied, only the range specified should be pushed.
610 */
611#define UPL_MSYNC 0x10
612
613/*
614 *
615 */
616#define UPL_PAGING_ENCRYPTED 0x20
617
618/*
619 * this pageout is being originated as part of an explicit
620 * memory synchronization operation that is checking for I/O
621 * errors and taking it's own action... if an error occurs,
622 * just abort the pages back into the cache unchanged
623 */
624#define UPL_KEEPCACHED 0x40
625
626/*
627 * this pageout originated from within cluster_io to deal
628 * with a dirty page that hasn't yet been seen by the FS
629 * that backs it... tag it so that the FS can take the
630 * appropriate action w/r to its locking model since the
631 * pageout will reenter the FS for the same file currently
632 * being handled in this context.
633 */
634#define UPL_NESTED_PAGEOUT 0x80
635
636/*
637 * we've detected a sequential access pattern and
638 * we are speculatively and aggressively pulling
639 * pages in... do not count these as real PAGEINs
640 * w/r to our hard throttle maintenance
641 */
642#define UPL_IOSTREAMING 0x100
643
644/*
645 * Currently, it's only used for the swap pagein path.
646 * Since the swap + compressed pager layer manage their
647 * pages, these pages are not marked "absent" i.e. these
648 * are "valid" pages. The pagein path will _not_ issue an
649 * I/O (correctly) for valid pages. So, this flag is used
650 * to override that logic in the vnode I/O path.
651 */
652#define UPL_IGNORE_VALID_PAGE_CHECK 0x200
653
654
655
656/* upl commit flags */
657#define UPL_COMMIT_FREE_ON_EMPTY 0x1 /* only implemented in wrappers */
658#define UPL_COMMIT_CLEAR_DIRTY 0x2
659#define UPL_COMMIT_SET_DIRTY 0x4
660#define UPL_COMMIT_INACTIVATE 0x8
661#define UPL_COMMIT_NOTIFY_EMPTY 0x10
662/* deprecated: #define UPL_COMMIT_ALLOW_ACCESS 0x20 */
663#define UPL_COMMIT_CS_VALIDATED 0x40
664#define UPL_COMMIT_CLEAR_PRECIOUS 0x80
665#define UPL_COMMIT_SPECULATE 0x100
666#define UPL_COMMIT_FREE_ABSENT 0x200
667#define UPL_COMMIT_WRITTEN_BY_KERNEL 0x400
668
669#define UPL_COMMIT_KERNEL_ONLY_FLAGS (UPL_COMMIT_CS_VALIDATED | UPL_COMMIT_FREE_ABSENT)
670
671/* flags for return of state from vm_map_get_upl, vm_upl address space */
672/* based call */
673#define UPL_DEV_MEMORY 0x1
674#define UPL_PHYS_CONTIG 0x2
675
676
677/*
678 * Flags for the UPL page ops routine. This routine is not exported
679 * out of the kernel at the moment and so the defs live here.
680 */
681#define UPL_POP_DIRTY 0x1
682#define UPL_POP_PAGEOUT 0x2
683#define UPL_POP_PRECIOUS 0x4
684#define UPL_POP_ABSENT 0x8
685#define UPL_POP_BUSY 0x10
686
687#define UPL_POP_PHYSICAL 0x10000000
688#define UPL_POP_DUMP 0x20000000
689#define UPL_POP_SET 0x40000000
690#define UPL_POP_CLR 0x80000000
691
692/*
693 * Flags for the UPL range op routine. This routine is not exported
694 * out of the kernel at the moemet and so the defs live here.
695 */
696/*
697 * UPL_ROP_ABSENT: Returns the extent of the range presented which
698 * is absent, starting with the start address presented
699 */
700#define UPL_ROP_ABSENT 0x01
701/*
702 * UPL_ROP_PRESENT: Returns the extent of the range presented which
703 * is present (i.e. resident), starting with the start address presented
704 */
705#define UPL_ROP_PRESENT 0x02
706/*
707 * UPL_ROP_DUMP: Dump the pages which are found in the target object
708 * for the target range.
709 */
710#define UPL_ROP_DUMP 0x04
711
712#ifdef PRIVATE
713
714#define UPL_REPRIO_INFO_MASK (0xFFFFFFFF)
715#define UPL_REPRIO_INFO_SHIFT 32
716
717/* access macros for upl_t */
718
719#define UPL_DEVICE_PAGE(upl) \
720 (((upl)[0].phys_addr != 0) ? ((upl)[0].device) : FALSE)
721
722#define UPL_PAGE_PRESENT(upl, index) \
723 ((upl)[(index)].phys_addr != 0)
724
725#define UPL_PHYS_PAGE(upl, index) \
726 ((upl)[(index)].phys_addr)
727
728#define UPL_SPECULATIVE_PAGE(upl, index) \
729 (((upl)[(index)].phys_addr != 0) ? ((upl)[(index)].speculative) : FALSE)
730
731#define UPL_DIRTY_PAGE(upl, index) \
732 (((upl)[(index)].phys_addr != 0) ? ((upl)[(index)].dirty) : FALSE)
733
734#define UPL_PRECIOUS_PAGE(upl, index) \
735 (((upl)[(index)].phys_addr != 0) ? ((upl)[(index)].precious) : FALSE)
736
737#define UPL_VALID_PAGE(upl, index) \
738 (((upl)[(index)].phys_addr != 0) ? (!((upl)[(index)].absent)) : FALSE)
739
740#define UPL_PAGEOUT_PAGE(upl, index) \
741 (((upl)[(index)].phys_addr != 0) ? ((upl)[(index)].free_when_done) : FALSE)
742
743#define UPL_SET_PAGE_FREE_ON_COMMIT(upl, index) \
744 (((upl)[(index)].phys_addr != 0) ? \
745 ((upl)[(index)].free_when_done = TRUE) : FALSE)
746
747#define UPL_CLR_PAGE_FREE_ON_COMMIT(upl, index) \
748 (((upl)[(index)].phys_addr != 0) ? \
749 ((upl)[(index)].free_when_done = FALSE) : FALSE)
750
751#define UPL_REPRIO_INFO_BLKNO(upl, index) \
752 (((upl)->upl_reprio_info[(index)]) & UPL_REPRIO_INFO_MASK)
753
754#define UPL_REPRIO_INFO_LEN(upl, index) \
755 ((((upl)->upl_reprio_info[(index)]) >> UPL_REPRIO_INFO_SHIFT) & UPL_REPRIO_INFO_MASK)
756
757/* modifier macros for upl_t */
758
759#define UPL_SET_CS_VALIDATED(upl, index, value) \
760 ((upl)[(index)].cs_validated = (value))
761
762#define UPL_SET_CS_TAINTED(upl, index, value) \
763 ((upl)[(index)].cs_tainted = (value))
764
765#define UPL_SET_CS_NX(upl, index, value) \
766 ((upl)[(index)].cs_nx = (value))
767
768#define UPL_SET_REPRIO_INFO(upl, index, blkno, len) \
769 ((upl)->upl_reprio_info[(index)]) = (((uint64_t)(blkno) & UPL_REPRIO_INFO_MASK) | \
770 (((uint64_t)(len) & UPL_REPRIO_INFO_MASK) << UPL_REPRIO_INFO_SHIFT))
771
772/* UPL_GET_INTERNAL_PAGE_LIST is only valid on internal objects where the */
773/* list request was made with the UPL_INTERNAL flag */
774
775#define UPL_GET_INTERNAL_PAGE_LIST(upl) upl_get_internal_page_list(upl)
776
777__BEGIN_DECLS
778
779extern void *upl_get_internal_vectorupl(upl_t);
780extern upl_page_info_t *upl_get_internal_vectorupl_pagelist(upl_t);
781extern upl_page_info_t *upl_get_internal_page_list(upl_t upl);
782extern ppnum_t upl_phys_page(upl_page_info_t *upl, int index);
783extern boolean_t upl_device_page(upl_page_info_t *upl);
784extern boolean_t upl_speculative_page(upl_page_info_t *upl, int index);
785extern void upl_clear_dirty(upl_t upl, boolean_t value);
786extern void upl_set_referenced(upl_t upl, boolean_t value);
787extern void upl_range_needed(upl_t upl, int index, int count);
788#if CONFIG_IOSCHED
789extern int64_t upl_blkno(upl_page_info_t *upl, int index);
790extern void upl_set_blkno(upl_t upl, vm_offset_t upl_offset, int size, int64_t blkno);
791#endif
792
793__END_DECLS
794
795#endif /* PRIVATE */
796
797__BEGIN_DECLS
798
799extern boolean_t upl_page_present(upl_page_info_t *upl, int index);
800extern boolean_t upl_dirty_page(upl_page_info_t *upl, int index);
801extern boolean_t upl_valid_page(upl_page_info_t *upl, int index);
802extern void upl_deallocate(upl_t upl);
803extern void upl_mark_decmp(upl_t upl);
804extern void upl_unmark_decmp(upl_t upl);
805
806#ifdef KERNEL_PRIVATE
807
808void upl_page_set_mark(upl_page_info_t *upl, int index, boolean_t v);
809boolean_t upl_page_get_mark(upl_page_info_t *upl, int index);
810
811#endif // KERNEL_PRIVATE
812
813__END_DECLS
814
815#endif /* KERNEL */
816
817#endif /* _MACH_MEMORY_OBJECT_TYPES_H_ */
818