1/*
2 * Copyright (c) 2000-2020 Apple Inc. All rights reserved.
3 *
4 * @APPLE_OSREFERENCE_LICENSE_HEADER_START@
5 *
6 * This file contains Original Code and/or Modifications of Original Code
7 * as defined in and that are subject to the Apple Public Source License
8 * Version 2.0 (the 'License'). You may not use this file except in
9 * compliance with the License. The rights granted to you under the License
10 * may not be used to create, or enable the creation or redistribution of,
11 * unlawful or unlicensed copies of an Apple operating system, or to
12 * circumvent, violate, or enable the circumvention or violation of, any
13 * terms of an Apple operating system software license agreement.
14 *
15 * Please obtain a copy of the License at
16 * http://www.opensource.apple.com/apsl/ and read it before using this file.
17 *
18 * The Original Code and all software distributed under the License are
19 * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER
20 * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES,
21 * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY,
22 * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT.
23 * Please see the License for the specific language governing rights and
24 * limitations under the License.
25 *
26 * @APPLE_OSREFERENCE_LICENSE_HEADER_END@
27 */
28/*
29 * @OSF_COPYRIGHT@
30 */
31/*
32 * Mach Operating System
33 * Copyright (c) 1991,1990,1989,1988,1987 Carnegie Mellon University
34 * All Rights Reserved.
35 *
36 * Permission to use, copy, modify and distribute this software and its
37 * documentation is hereby granted, provided that both the copyright
38 * notice and this permission notice appear in all copies of the
39 * software, derivative works or modified versions, and any portions
40 * thereof, and that both notices appear in supporting documentation.
41 *
42 * CARNEGIE MELLON ALLOWS FREE USE OF THIS SOFTWARE IN ITS "AS IS"
43 * CONDITION. CARNEGIE MELLON DISCLAIMS ANY LIABILITY OF ANY KIND FOR
44 * ANY DAMAGES WHATSOEVER RESULTING FROM THE USE OF THIS SOFTWARE.
45 *
46 * Carnegie Mellon requests users of this software to return to
47 *
48 * Software Distribution Coordinator or Software.Distribution@CS.CMU.EDU
49 * School of Computer Science
50 * Carnegie Mellon University
51 * Pittsburgh PA 15213-3890
52 *
53 * any improvements or extensions that they make and grant Carnegie Mellon
54 * the rights to redistribute these changes.
55 */
56/*
57 */
58/*
59 * File: vm_object.h
60 * Author: Avadis Tevanian, Jr., Michael Wayne Young
61 * Date: 1985
62 *
63 * Virtual memory object module definitions.
64 */
65
66#ifndef _VM_VM_OBJECT_H_
67#define _VM_VM_OBJECT_H_
68
69#include <debug.h>
70#include <mach_assert.h>
71
72#include <mach/kern_return.h>
73#include <mach/boolean.h>
74#include <mach/memory_object_types.h>
75#include <mach/port.h>
76#include <mach/vm_prot.h>
77#include <mach/vm_param.h>
78#include <mach/machine/vm_types.h>
79#include <kern/queue.h>
80#include <kern/locks.h>
81#include <kern/assert.h>
82#include <kern/misc_protos.h>
83#include <kern/macro_help.h>
84#include <ipc/ipc_types.h>
85#include <vm/pmap.h>
86
87#include <vm/vm_external.h>
88
89#include <vm/vm_options.h>
90#include <vm/vm_page.h>
91
92#if VM_OBJECT_TRACKING
93#include <libkern/OSDebug.h>
94#include <kern/btlog.h>
95extern void vm_object_tracking_init(void);
96extern btlog_t vm_object_tracking_btlog;
97#define VM_OBJECT_TRACKING_NUM_RECORDS 50000
98#define VM_OBJECT_TRACKING_OP_CREATED 1
99#define VM_OBJECT_TRACKING_OP_MODIFIED 2
100#define VM_OBJECT_TRACKING_OP_TRUESHARE 3
101#endif /* VM_OBJECT_TRACKING */
102
103struct vm_page;
104
105/*
106 * Types defined:
107 *
108 * vm_object_t Virtual memory object.
109 * vm_object_fault_info_t Used to determine cluster size.
110 */
111
112struct vm_object_fault_info {
113 int interruptible;
114 uint32_t user_tag;
115 vm_size_t cluster_size;
116 vm_behavior_t behavior;
117 vm_object_offset_t lo_offset;
118 vm_object_offset_t hi_offset;
119 unsigned int
120 /* boolean_t */ no_cache:1,
121 /* boolean_t */ stealth:1,
122 /* boolean_t */ io_sync:1,
123 /* boolean_t */ cs_bypass:1,
124 /* boolean_t */ csm_associated:1,
125 /* boolean_t */ mark_zf_absent:1,
126 /* boolean_t */ batch_pmap_op:1,
127 /* boolean_t */ resilient_media:1,
128 /* boolean_t */ no_copy_on_read:1,
129 /* boolean_t */ fi_xnu_user_debug:1,
130 /* boolean_t */ fi_used_for_tpro:1,
131 __vm_object_fault_info_unused_bits:21;
132 int pmap_options;
133};
134
135
136#define vo_size vo_un1.vou_size
137#define vo_cache_pages_to_scan vo_un1.vou_cache_pages_to_scan
138#define vo_shadow_offset vo_un2.vou_shadow_offset
139#define vo_cache_ts vo_un2.vou_cache_ts
140#define vo_owner vo_un2.vou_owner
141
142struct vm_object {
143 /*
144 * on 64 bit systems we pack the pointers hung off the memq.
145 * those pointers have to be able to point back to the memq.
146 * the packed pointers are required to be on a 64 byte boundary
147 * which means 2 things for the vm_object... (1) the memq
148 * struct has to be the first element of the structure so that
149 * we can control it's alignment... (2) the vm_object must be
150 * aligned on a 64 byte boundary... for static vm_object's
151 * this is accomplished via the 'aligned' attribute... for
152 * vm_object's in the zone pool, this is accomplished by
153 * rounding the size of the vm_object element to the nearest
154 * 64 byte size before creating the zone.
155 */
156 vm_page_queue_head_t memq; /* Resident memory - must be first */
157 lck_rw_t Lock; /* Synchronization */
158
159 union {
160 vm_object_size_t vou_size; /* Object size (only valid if internal) */
161 int vou_cache_pages_to_scan; /* pages yet to be visited in an
162 * external object in cache
163 */
164 } vo_un1;
165
166 struct vm_page *memq_hint;
167 int ref_count; /* Number of references */
168 unsigned int resident_page_count;
169 /* number of resident pages */
170 unsigned int wired_page_count; /* number of wired pages
171 * use VM_OBJECT_WIRED_PAGE_UPDATE macros to update */
172 unsigned int reusable_page_count;
173
174 struct vm_object *vo_copy; /* Object that should receive
175 * a copy of my changed pages,
176 * for copy_delay, or just the
177 * temporary object that
178 * shadows this object, for
179 * copy_call.
180 */
181 uint32_t vo_copy_version;
182 uint32_t __vo_unused_padding;
183 struct vm_object *shadow; /* My shadow */
184 memory_object_t pager; /* Where to get data */
185
186 union {
187 vm_object_offset_t vou_shadow_offset; /* Offset into shadow */
188 clock_sec_t vou_cache_ts; /* age of an external object
189 * present in cache
190 */
191 task_t vou_owner; /* If the object is purgeable
192 * or has a "ledger_tag", this
193 * is the task that owns it.
194 */
195 } vo_un2;
196
197 vm_object_offset_t paging_offset; /* Offset into memory object */
198 memory_object_control_t pager_control; /* Where data comes back */
199
200 memory_object_copy_strategy_t
201 copy_strategy; /* How to handle data copy */
202
203 /*
204 * Some user processes (mostly VirtualMachine software) take a large
205 * number of UPLs (via IOMemoryDescriptors) to wire pages in large
206 * VM objects and overflow the 16-bit "activity_in_progress" counter.
207 * Since we never enforced any limit there, let's give them 32 bits
208 * for backwards compatibility's sake.
209 */
210 unsigned short paging_in_progress:16;
211 unsigned short vo_size_delta;
212 unsigned int activity_in_progress;
213
214 /* The memory object ports are
215 * being used (e.g., for pagein
216 * or pageout) -- don't change
217 * any of these fields (i.e.,
218 * don't collapse, destroy or
219 * terminate)
220 */
221
222 unsigned int
223 /* boolean_t array */ all_wanted:6, /* Bit array of "want to be
224 * awakened" notations. See
225 * VM_OBJECT_EVENT_* items
226 * below */
227 /* boolean_t */ pager_created:1, /* Has pager been created? */
228 /* boolean_t */ pager_initialized:1, /* Are fields ready to use? */
229 /* boolean_t */ pager_ready:1, /* Will pager take requests? */
230
231 /* boolean_t */ pager_trusted:1, /* The pager for this object
232 * is trusted. This is true for
233 * all internal objects (backed
234 * by the default pager)
235 */
236 /* boolean_t */ can_persist:1, /* The kernel may keep the data
237 * for this object (and rights
238 * to the memory object) after
239 * all address map references
240 * are deallocated?
241 */
242 /* boolean_t */ internal:1, /* Created by the kernel (and
243 * therefore, managed by the
244 * default memory manger)
245 */
246 /* boolean_t */ private:1, /* magic device_pager object,
247 * holds private pages only */
248 /* boolean_t */ pageout:1, /* pageout object. contains
249 * private pages that refer to
250 * a real memory object. */
251 /* boolean_t */ alive:1, /* Not yet terminated */
252
253 /* boolean_t */ purgable:2, /* Purgable state. See
254 * VM_PURGABLE_*
255 */
256 /* boolean_t */ purgeable_only_by_kernel:1,
257 /* boolean_t */ purgeable_when_ripe:1, /* Purgeable when a token
258 * becomes ripe.
259 */
260 /* boolean_t */ shadowed:1, /* Shadow may exist */
261 /* boolean_t */ true_share:1,
262 /* This object is mapped
263 * in more than one place
264 * and hence cannot be
265 * coalesced */
266 /* boolean_t */ terminating:1,
267 /* Allows vm_object_lookup
268 * and vm_object_deallocate
269 * to special case their
270 * behavior when they are
271 * called as a result of
272 * page cleaning during
273 * object termination
274 */
275 /* boolean_t */ named:1, /* An enforces an internal
276 * naming convention, by
277 * calling the right routines
278 * for allocation and
279 * destruction, UBC references
280 * against the vm_object are
281 * checked.
282 */
283 /* boolean_t */ shadow_severed:1,
284 /* When a permanent object
285 * backing a COW goes away
286 * unexpectedly. This bit
287 * allows vm_fault to return
288 * an error rather than a
289 * zero filled page.
290 */
291 /* boolean_t */ phys_contiguous:1,
292 /* Memory is wired and
293 * guaranteed physically
294 * contiguous. However
295 * it is not device memory
296 * and obeys normal virtual
297 * memory rules w.r.t pmap
298 * access bits.
299 */
300 /* boolean_t */ nophyscache:1,
301 /* When mapped at the
302 * pmap level, don't allow
303 * primary caching. (for
304 * I/O)
305 */
306 /* boolean_t */ for_realtime:1,
307 /* Might be needed for realtime code path */
308 /* vm_object_destroy_reason_t */ no_pager_reason:2,
309 /* differentiate known and unknown causes */
310#if FBDP_DEBUG_OBJECT_NO_PAGER
311 /* boolean_t */ fbdp_tracked:1,
312 __object1_unused_bits:2;
313#else /* FBDP_DEBUG_OBJECT_NO_PAGER */
314 __object1_unused_bits:3;
315#endif /* FBDP_DEBUG_OBJECT_NO_PAGER */
316
317 queue_chain_t cached_list; /* Attachment point for the
318 * list of objects cached as a
319 * result of their can_persist
320 * value
321 */
322 /*
323 * the following fields are not protected by any locks
324 * they are updated via atomic compare and swap
325 */
326 vm_object_offset_t last_alloc; /* last allocation offset */
327 vm_offset_t cow_hint; /* last page present in */
328 /* shadow but not in object */
329 int sequential; /* sequential access size */
330
331 uint32_t pages_created;
332 uint32_t pages_used;
333 /* hold object lock when altering */
334 unsigned int
335 wimg_bits:8, /* cache WIMG bits */
336 code_signed:1, /* pages are signed and should be
337 * validated; the signatures are stored
338 * with the pager */
339 transposed:1, /* object was transposed with another */
340 mapping_in_progress:1, /* pager being mapped/unmapped */
341 phantom_isssd:1,
342 volatile_empty:1,
343 volatile_fault:1,
344 all_reusable:1,
345 blocked_access:1,
346 set_cache_attr:1,
347 object_is_shared_cache:1,
348 purgeable_queue_type:2,
349 purgeable_queue_group:3,
350 io_tracking:1,
351 no_tag_update:1, /* */
352#if CONFIG_SECLUDED_MEMORY
353 eligible_for_secluded:1,
354 can_grab_secluded:1,
355#else /* CONFIG_SECLUDED_MEMORY */
356 __object3_unused_bits:2,
357#endif /* CONFIG_SECLUDED_MEMORY */
358#if VM_OBJECT_ACCESS_TRACKING
359 access_tracking:1,
360#else /* VM_OBJECT_ACCESS_TRACKING */
361 __unused_access_tracking:1,
362#endif /* VM_OBJECT_ACCESS_TRACKING */
363 vo_ledger_tag:3,
364 vo_no_footprint:1;
365
366#if VM_OBJECT_ACCESS_TRACKING
367 uint32_t access_tracking_reads;
368 uint32_t access_tracking_writes;
369#endif /* VM_OBJECT_ACCESS_TRACKING */
370
371 uint8_t scan_collisions;
372 uint8_t __object4_unused_bits[1];
373 vm_tag_t wire_tag;
374
375#if CONFIG_PHANTOM_CACHE
376 uint32_t phantom_object_id;
377#endif
378#if CONFIG_IOSCHED || UPL_DEBUG
379 queue_head_t uplq; /* List of outstanding upls */
380#endif
381
382#ifdef VM_PIP_DEBUG
383/*
384 * Keep track of the stack traces for the first holders
385 * of a "paging_in_progress" reference for this VM object.
386 */
387#define VM_PIP_DEBUG_STACK_FRAMES 25 /* depth of each stack trace */
388#define VM_PIP_DEBUG_MAX_REFS 10 /* track that many references */
389 struct __pip_backtrace {
390 void *pip_retaddr[VM_PIP_DEBUG_STACK_FRAMES];
391 } pip_holders[VM_PIP_DEBUG_MAX_REFS];
392#endif /* VM_PIP_DEBUG */
393
394 queue_chain_t objq; /* object queue - currently used for purgable queues */
395 queue_chain_t task_objq; /* objects owned by task - protected by task lock */
396
397#if !VM_TAG_ACTIVE_UPDATE
398 queue_chain_t wired_objq;
399#endif /* !VM_TAG_ACTIVE_UPDATE */
400
401#if DEBUG
402 void *purgeable_owner_bt[16];
403 task_t vo_purgeable_volatilizer; /* who made it volatile? */
404 void *purgeable_volatilizer_bt[16];
405#endif /* DEBUG */
406};
407
408#define VM_OBJECT_PURGEABLE_FAULT_ERROR(object) \
409 ((object)->volatile_fault && \
410 ((object)->purgable == VM_PURGABLE_VOLATILE || \
411 (object)->purgable == VM_PURGABLE_EMPTY))
412
413#if VM_OBJECT_ACCESS_TRACKING
414extern uint64_t vm_object_access_tracking_reads;
415extern uint64_t vm_object_access_tracking_writes;
416extern void vm_object_access_tracking(vm_object_t object,
417 int *access_tracking,
418 uint32_t *access_tracking_reads,
419 uint32_t *acess_tracking_writes);
420#endif /* VM_OBJECT_ACCESS_TRACKING */
421
422extern const vm_object_t kernel_object_default; /* the default kernel object */
423
424extern const vm_object_t compressor_object; /* the single compressor object */
425
426extern const vm_object_t retired_pages_object; /* pages retired due to ECC, should never be used */
427
428#define is_kernel_object(object) ((object) == kernel_object_default)
429
430extern const vm_object_t exclaves_object; /* holds VM pages owned by exclaves */
431
432# define VM_MSYNC_INITIALIZED 0
433# define VM_MSYNC_SYNCHRONIZING 1
434# define VM_MSYNC_DONE 2
435
436
437extern lck_grp_t vm_map_lck_grp;
438extern lck_attr_t vm_map_lck_attr;
439
440#ifndef VM_TAG_ACTIVE_UPDATE
441#error VM_TAG_ACTIVE_UPDATE
442#endif
443
444#if VM_TAG_ACTIVE_UPDATE
445#define VM_OBJECT_WIRED_ENQUEUE(object) panic("VM_OBJECT_WIRED_ENQUEUE")
446#define VM_OBJECT_WIRED_DEQUEUE(object) panic("VM_OBJECT_WIRED_DEQUEUE")
447#else /* VM_TAG_ACTIVE_UPDATE */
448#define VM_OBJECT_WIRED_ENQUEUE(object) \
449 MACRO_BEGIN \
450 lck_spin_lock_grp(&vm_objects_wired_lock, &vm_page_lck_grp_bucket); \
451 assert(!(object)->wired_objq.next); \
452 assert(!(object)->wired_objq.prev); \
453 queue_enter(&vm_objects_wired, (object), \
454 vm_object_t, wired_objq); \
455 lck_spin_unlock(&vm_objects_wired_lock); \
456 MACRO_END
457#define VM_OBJECT_WIRED_DEQUEUE(object) \
458 MACRO_BEGIN \
459 if ((object)->wired_objq.next) { \
460 lck_spin_lock_grp(&vm_objects_wired_lock, &vm_page_lck_grp_bucket); \
461 queue_remove(&vm_objects_wired, (object), \
462 vm_object_t, wired_objq); \
463 lck_spin_unlock(&vm_objects_wired_lock); \
464 } \
465 MACRO_END
466#endif /* VM_TAG_ACTIVE_UPDATE */
467
468#define VM_OBJECT_WIRED(object, tag) \
469 MACRO_BEGIN \
470 assert(VM_KERN_MEMORY_NONE != (tag)); \
471 assert(VM_KERN_MEMORY_NONE == (object)->wire_tag); \
472 (object)->wire_tag = (tag); \
473 if (!VM_TAG_ACTIVE_UPDATE) { \
474 VM_OBJECT_WIRED_ENQUEUE((object)); \
475 } \
476 MACRO_END
477
478#define VM_OBJECT_UNWIRED(object) \
479 MACRO_BEGIN \
480 if (!VM_TAG_ACTIVE_UPDATE) { \
481 VM_OBJECT_WIRED_DEQUEUE((object)); \
482 } \
483 if (VM_KERN_MEMORY_NONE != (object)->wire_tag) { \
484 vm_tag_update_size((object)->wire_tag, -ptoa_64((object)->wired_page_count), (object)); \
485 (object)->wire_tag = VM_KERN_MEMORY_NONE; \
486 } \
487 MACRO_END
488
489// These two macros start & end a C block
490#define VM_OBJECT_WIRED_PAGE_UPDATE_START(object) \
491 MACRO_BEGIN \
492 { \
493 int64_t __wireddelta = 0; vm_tag_t __waswired = (object)->wire_tag;
494
495#define VM_OBJECT_WIRED_PAGE_UPDATE_END(object, tag) \
496 if (__wireddelta) { \
497 boolean_t __overflow __assert_only = \
498 os_add_overflow((object)->wired_page_count, __wireddelta, \
499 &(object)->wired_page_count); \
500 assert(!__overflow); \
501 if (!(object)->pageout && !(object)->no_tag_update) { \
502 if (__wireddelta > 0) { \
503 assert (VM_KERN_MEMORY_NONE != (tag)); \
504 if (VM_KERN_MEMORY_NONE == __waswired) { \
505 VM_OBJECT_WIRED((object), (tag)); \
506 } \
507 vm_tag_update_size((object)->wire_tag, ptoa_64(__wireddelta), (object)); \
508 } else if (VM_KERN_MEMORY_NONE != __waswired) { \
509 assert (VM_KERN_MEMORY_NONE != (object)->wire_tag); \
510 vm_tag_update_size((object)->wire_tag, ptoa_64(__wireddelta), (object)); \
511 if (!(object)->wired_page_count) { \
512 VM_OBJECT_UNWIRED((object)); \
513 } \
514 } \
515 } \
516 } \
517 } \
518 MACRO_END
519
520#define VM_OBJECT_WIRED_PAGE_COUNT(object, delta) \
521 __wireddelta += delta; \
522
523#define VM_OBJECT_WIRED_PAGE_ADD(object, m) \
524 if (!(m)->vmp_private && !(m)->vmp_fictitious) __wireddelta++;
525
526#define VM_OBJECT_WIRED_PAGE_REMOVE(object, m) \
527 if (!(m)->vmp_private && !(m)->vmp_fictitious) __wireddelta--;
528
529
530
531#define OBJECT_LOCK_SHARED 0
532#define OBJECT_LOCK_EXCLUSIVE 1
533
534extern lck_grp_t vm_object_lck_grp;
535extern lck_attr_t vm_object_lck_attr;
536extern lck_attr_t kernel_object_lck_attr;
537extern lck_attr_t compressor_object_lck_attr;
538
539extern vm_object_t vm_pageout_scan_wants_object;
540
541extern void vm_object_lock(vm_object_t);
542extern bool vm_object_lock_check_contended(vm_object_t);
543extern boolean_t vm_object_lock_try(vm_object_t);
544extern boolean_t _vm_object_lock_try(vm_object_t);
545extern boolean_t vm_object_lock_avoid(vm_object_t);
546extern void vm_object_lock_shared(vm_object_t);
547extern boolean_t vm_object_lock_yield_shared(vm_object_t);
548extern boolean_t vm_object_lock_try_shared(vm_object_t);
549extern void vm_object_unlock(vm_object_t);
550extern boolean_t vm_object_lock_upgrade(vm_object_t);
551
552/*
553 * Object locking macros
554 */
555
556#define vm_object_lock_init(object) \
557 lck_rw_init(&(object)->Lock, &vm_object_lck_grp, \
558 (is_kernel_object(object) ? \
559 &kernel_object_lck_attr : \
560 (((object) == compressor_object) ? \
561 &compressor_object_lck_attr : \
562 &vm_object_lck_attr)))
563#define vm_object_lock_destroy(object) lck_rw_destroy(&(object)->Lock, &vm_object_lck_grp)
564
565#define vm_object_lock_try_scan(object) _vm_object_lock_try(object)
566
567/*
568 * CAUTION: the following vm_object_lock_assert_held*() macros merely
569 * check if anyone is holding the lock, but the holder may not necessarily
570 * be the caller...
571 */
572#define vm_object_lock_assert_held(object) \
573 LCK_RW_ASSERT(&(object)->Lock, LCK_RW_ASSERT_HELD)
574#define vm_object_lock_assert_shared(object) \
575 LCK_RW_ASSERT(&(object)->Lock, LCK_RW_ASSERT_SHARED)
576#define vm_object_lock_assert_exclusive(object) \
577 LCK_RW_ASSERT(&(object)->Lock, LCK_RW_ASSERT_EXCLUSIVE)
578#define vm_object_lock_assert_notheld(object) \
579 LCK_RW_ASSERT(&(object)->Lock, LCK_RW_ASSERT_NOTHELD)
580
581
582static inline void
583VM_OBJECT_SET_PAGER_CREATED(
584 vm_object_t object,
585 bool value)
586{
587 vm_object_lock_assert_exclusive(object);
588 object->pager_created = value;
589}
590static inline void
591VM_OBJECT_SET_PAGER_INITIALIZED(
592 vm_object_t object,
593 bool value)
594{
595 vm_object_lock_assert_exclusive(object);
596 object->pager_initialized = value;
597}
598static inline void
599VM_OBJECT_SET_PAGER_READY(
600 vm_object_t object,
601 bool value)
602{
603 vm_object_lock_assert_exclusive(object);
604 object->pager_ready = value;
605}
606static inline void
607VM_OBJECT_SET_PAGER_TRUSTED(
608 vm_object_t object,
609 bool value)
610{
611 vm_object_lock_assert_exclusive(object);
612 object->pager_trusted = value;
613}
614static inline void
615VM_OBJECT_SET_CAN_PERSIST(
616 vm_object_t object,
617 bool value)
618{
619 vm_object_lock_assert_exclusive(object);
620 object->can_persist = value;
621}
622static inline void
623VM_OBJECT_SET_INTERNAL(
624 vm_object_t object,
625 bool value)
626{
627 vm_object_lock_assert_exclusive(object);
628 object->internal = value;
629}
630static inline void
631VM_OBJECT_SET_PRIVATE(
632 vm_object_t object,
633 bool value)
634{
635 vm_object_lock_assert_exclusive(object);
636 object->private = value;
637}
638static inline void
639VM_OBJECT_SET_PAGEOUT(
640 vm_object_t object,
641 bool value)
642{
643 vm_object_lock_assert_exclusive(object);
644 object->pageout = value;
645}
646static inline void
647VM_OBJECT_SET_ALIVE(
648 vm_object_t object,
649 bool value)
650{
651 vm_object_lock_assert_exclusive(object);
652 object->alive = value;
653}
654static inline void
655VM_OBJECT_SET_PURGABLE(
656 vm_object_t object,
657 int value)
658{
659 vm_object_lock_assert_exclusive(object);
660 object->purgable = value;
661 assertf(object->purgable == value, "0x%x != 0x%x", value, object->purgable);
662}
663static inline void
664VM_OBJECT_SET_PURGEABLE_ONLY_BY_KERNEL(
665 vm_object_t object,
666 bool value)
667{
668 vm_object_lock_assert_exclusive(object);
669 object->purgeable_only_by_kernel = value;
670}
671static inline void
672VM_OBJECT_SET_PURGEABLE_WHEN_RIPE(
673 vm_object_t object,
674 bool value)
675{
676 vm_object_lock_assert_exclusive(object);
677 object->purgeable_when_ripe = value;
678}
679static inline void
680VM_OBJECT_SET_SHADOWED(
681 vm_object_t object,
682 bool value)
683{
684 vm_object_lock_assert_exclusive(object);
685 object->shadowed = value;
686}
687static inline void
688VM_OBJECT_SET_TRUE_SHARE(
689 vm_object_t object,
690 bool value)
691{
692 vm_object_lock_assert_exclusive(object);
693 object->true_share = value;
694}
695static inline void
696VM_OBJECT_SET_TERMINATING(
697 vm_object_t object,
698 bool value)
699{
700 vm_object_lock_assert_exclusive(object);
701 object->terminating = value;
702}
703static inline void
704VM_OBJECT_SET_NAMED(
705 vm_object_t object,
706 bool value)
707{
708 vm_object_lock_assert_exclusive(object);
709 object->named = value;
710}
711static inline void
712VM_OBJECT_SET_SHADOW_SEVERED(
713 vm_object_t object,
714 bool value)
715{
716 vm_object_lock_assert_exclusive(object);
717 object->shadow_severed = value;
718}
719static inline void
720VM_OBJECT_SET_PHYS_CONTIGUOUS(
721 vm_object_t object,
722 bool value)
723{
724 vm_object_lock_assert_exclusive(object);
725 object->phys_contiguous = value;
726}
727static inline void
728VM_OBJECT_SET_NOPHYSCACHE(
729 vm_object_t object,
730 bool value)
731{
732 vm_object_lock_assert_exclusive(object);
733 object->nophyscache = value;
734}
735static inline void
736VM_OBJECT_SET_FOR_REALTIME(
737 vm_object_t object,
738 bool value)
739{
740 vm_object_lock_assert_exclusive(object);
741 object->for_realtime = value;
742}
743static inline void
744VM_OBJECT_SET_NO_PAGER_REASON(
745 vm_object_t object,
746 int value)
747{
748 vm_object_lock_assert_exclusive(object);
749 object->no_pager_reason = value;
750 assertf(object->no_pager_reason == value, "0x%x != 0x%x", value, object->no_pager_reason);
751}
752#if FBDP_DEBUG_OBJECT_NO_PAGER
753static inline void
754VM_OBJECT_SET_FBDP_TRACKED(
755 vm_object_t object,
756 bool value)
757{
758 vm_object_lock_assert_exclusive(object);
759 object->fbdp_tracked = value;
760}
761#endif /* FBDP_DEBUG_OBJECT_NO_PAGER */
762
763/*
764 * Declare procedures that operate on VM objects.
765 */
766
767__private_extern__ void vm_object_bootstrap(void);
768
769__private_extern__ void vm_object_reaper_init(void);
770
771__private_extern__ vm_object_t vm_object_allocate(vm_object_size_t size);
772
773__private_extern__ void _vm_object_allocate(vm_object_size_t size,
774 vm_object_t object);
775
776__private_extern__ void vm_object_set_size(
777 vm_object_t object,
778 vm_object_size_t outer_size,
779 vm_object_size_t inner_size);
780
781#define vm_object_reference_locked(object) \
782 MACRO_BEGIN \
783 vm_object_t RLObject = (object); \
784 vm_object_lock_assert_exclusive(object); \
785 assert((RLObject)->ref_count > 0); \
786 (RLObject)->ref_count++; \
787 assert((RLObject)->ref_count > 1); \
788 MACRO_END
789
790
791#define vm_object_reference_shared(object) \
792 MACRO_BEGIN \
793 vm_object_t RLObject = (object); \
794 vm_object_lock_assert_shared(object); \
795 assert((RLObject)->ref_count > 0); \
796 OSAddAtomic(1, &(RLObject)->ref_count); \
797 assert((RLObject)->ref_count > 0); \
798 MACRO_END
799
800
801__private_extern__ void vm_object_reference(
802 vm_object_t object);
803
804#if !MACH_ASSERT
805
806#define vm_object_reference(object) \
807MACRO_BEGIN \
808 vm_object_t RObject = (object); \
809 if (RObject) { \
810 vm_object_lock_shared(RObject); \
811 vm_object_reference_shared(RObject); \
812 vm_object_unlock(RObject); \
813 } \
814MACRO_END
815
816#endif /* MACH_ASSERT */
817
818__private_extern__ void vm_object_deallocate(
819 vm_object_t object);
820
821__private_extern__ void vm_object_pmap_protect(
822 vm_object_t object,
823 vm_object_offset_t offset,
824 vm_object_size_t size,
825 pmap_t pmap,
826 vm_map_size_t pmap_page_size,
827 vm_map_offset_t pmap_start,
828 vm_prot_t prot);
829
830__private_extern__ void vm_object_pmap_protect_options(
831 vm_object_t object,
832 vm_object_offset_t offset,
833 vm_object_size_t size,
834 pmap_t pmap,
835 vm_map_size_t pmap_page_size,
836 vm_map_offset_t pmap_start,
837 vm_prot_t prot,
838 int options);
839
840__private_extern__ void vm_object_page_remove(
841 vm_object_t object,
842 vm_object_offset_t start,
843 vm_object_offset_t end);
844
845__private_extern__ void vm_object_deactivate_pages(
846 vm_object_t object,
847 vm_object_offset_t offset,
848 vm_object_size_t size,
849 boolean_t kill_page,
850 boolean_t reusable_page,
851 boolean_t reusable_no_write,
852 struct pmap *pmap,
853/* XXX TODO4K: need pmap_page_size here too? */
854 vm_map_offset_t pmap_offset);
855
856__private_extern__ void vm_object_reuse_pages(
857 vm_object_t object,
858 vm_object_offset_t start_offset,
859 vm_object_offset_t end_offset,
860 boolean_t allow_partial_reuse);
861
862__private_extern__ kern_return_t vm_object_zero(
863 vm_object_t object,
864 vm_object_offset_t cur_offset,
865 vm_object_offset_t end_offset);
866
867__private_extern__ uint64_t vm_object_purge(
868 vm_object_t object,
869 int flags);
870
871__private_extern__ kern_return_t vm_object_purgable_control(
872 vm_object_t object,
873 vm_purgable_t control,
874 int *state);
875
876__private_extern__ kern_return_t vm_object_get_page_counts(
877 vm_object_t object,
878 vm_object_offset_t offset,
879 vm_object_size_t size,
880 unsigned int *resident_page_count,
881 unsigned int *dirty_page_count);
882
883__private_extern__ boolean_t vm_object_coalesce(
884 vm_object_t prev_object,
885 vm_object_t next_object,
886 vm_object_offset_t prev_offset,
887 vm_object_offset_t next_offset,
888 vm_object_size_t prev_size,
889 vm_object_size_t next_size);
890
891__private_extern__ boolean_t vm_object_shadow(
892 vm_object_t *object,
893 vm_object_offset_t *offset,
894 vm_object_size_t length,
895 boolean_t always_shadow);
896
897__private_extern__ void vm_object_collapse(
898 vm_object_t object,
899 vm_object_offset_t offset,
900 boolean_t can_bypass);
901
902__private_extern__ boolean_t vm_object_copy_quickly(
903 vm_object_t object,
904 vm_object_offset_t src_offset,
905 vm_object_size_t size,
906 boolean_t *_src_needs_copy,
907 boolean_t *_dst_needs_copy);
908
909__private_extern__ kern_return_t vm_object_copy_strategically(
910 vm_object_t src_object,
911 vm_object_offset_t src_offset,
912 vm_object_size_t size,
913 bool forking,
914 vm_object_t *dst_object,
915 vm_object_offset_t *dst_offset,
916 boolean_t *dst_needs_copy);
917
918__private_extern__ kern_return_t vm_object_copy_slowly(
919 vm_object_t src_object,
920 vm_object_offset_t src_offset,
921 vm_object_size_t size,
922 boolean_t interruptible,
923 vm_object_t *_result_object);
924
925__private_extern__ vm_object_t vm_object_copy_delayed(
926 vm_object_t src_object,
927 vm_object_offset_t src_offset,
928 vm_object_size_t size,
929 boolean_t src_object_shared);
930
931
932
933__private_extern__ kern_return_t vm_object_destroy(
934 vm_object_t object,
935 vm_object_destroy_reason_t reason);
936
937__private_extern__ void vm_object_pager_create(
938 vm_object_t object);
939
940__private_extern__ void vm_object_compressor_pager_create(
941 vm_object_t object);
942
943__private_extern__ void vm_object_page_map(
944 vm_object_t object,
945 vm_object_offset_t offset,
946 vm_object_size_t size,
947 vm_object_offset_t (*map_fn)
948 (void *, vm_object_offset_t),
949 void *map_fn_data);
950
951__private_extern__ kern_return_t vm_object_upl_request(
952 vm_object_t object,
953 vm_object_offset_t offset,
954 upl_size_t size,
955 upl_t *upl,
956 upl_page_info_t *page_info,
957 unsigned int *count,
958 upl_control_flags_t flags,
959 vm_tag_t tag);
960
961__private_extern__ kern_return_t vm_object_transpose(
962 vm_object_t object1,
963 vm_object_t object2,
964 vm_object_size_t transpose_size);
965
966__private_extern__ boolean_t vm_object_sync(
967 vm_object_t object,
968 vm_object_offset_t offset,
969 vm_object_size_t size,
970 boolean_t should_flush,
971 boolean_t should_return,
972 boolean_t should_iosync);
973
974__private_extern__ kern_return_t vm_object_update(
975 vm_object_t object,
976 vm_object_offset_t offset,
977 vm_object_size_t size,
978 vm_object_offset_t *error_offset,
979 int *io_errno,
980 memory_object_return_t should_return,
981 int flags,
982 vm_prot_t prot);
983
984__private_extern__ kern_return_t vm_object_lock_request(
985 vm_object_t object,
986 vm_object_offset_t offset,
987 vm_object_size_t size,
988 memory_object_return_t should_return,
989 int flags,
990 vm_prot_t prot);
991
992
993
994__private_extern__ vm_object_t vm_object_memory_object_associate(
995 memory_object_t pager,
996 vm_object_t object,
997 vm_object_size_t size,
998 boolean_t check_named);
999
1000
1001__private_extern__ void vm_object_cluster_size(
1002 vm_object_t object,
1003 vm_object_offset_t *start,
1004 vm_size_t *length,
1005 vm_object_fault_info_t fault_info,
1006 uint32_t *io_streaming);
1007
1008__private_extern__ kern_return_t vm_object_populate_with_private(
1009 vm_object_t object,
1010 vm_object_offset_t offset,
1011 ppnum_t phys_page,
1012 vm_size_t size);
1013
1014__private_extern__ void vm_object_change_wimg_mode(
1015 vm_object_t object,
1016 unsigned int wimg_mode);
1017
1018extern kern_return_t adjust_vm_object_cache(
1019 vm_size_t oval,
1020 vm_size_t nval);
1021
1022extern kern_return_t vm_object_page_op(
1023 vm_object_t object,
1024 vm_object_offset_t offset,
1025 int ops,
1026 ppnum_t *phys_entry,
1027 int *flags);
1028
1029extern kern_return_t vm_object_range_op(
1030 vm_object_t object,
1031 vm_object_offset_t offset_beg,
1032 vm_object_offset_t offset_end,
1033 int ops,
1034 uint32_t *range);
1035
1036
1037__private_extern__ void vm_object_reap_pages(
1038 vm_object_t object,
1039 int reap_type);
1040#define REAP_REAP 0
1041#define REAP_TERMINATE 1
1042#define REAP_PURGEABLE 2
1043#define REAP_DATA_FLUSH 3
1044
1045#if CONFIG_FREEZE
1046
1047__private_extern__ uint32_t
1048vm_object_compressed_freezer_pageout(
1049 vm_object_t object, uint32_t dirty_budget);
1050
1051__private_extern__ void
1052vm_object_compressed_freezer_done(
1053 void);
1054
1055#endif /* CONFIG_FREEZE */
1056
1057__private_extern__ void
1058vm_object_pageout(
1059 vm_object_t object);
1060
1061#if CONFIG_IOSCHED
1062struct io_reprioritize_req {
1063 uint64_t blkno;
1064 uint32_t len;
1065 int priority;
1066 struct vnode *devvp;
1067 queue_chain_t io_reprioritize_list;
1068};
1069typedef struct io_reprioritize_req *io_reprioritize_req_t;
1070
1071extern void vm_io_reprioritize_init(void);
1072#endif
1073
1074/*
1075 * Event waiting handling
1076 */
1077
1078#define VM_OBJECT_EVENT_INITIALIZED 0
1079#define VM_OBJECT_EVENT_PAGER_READY 1
1080#define VM_OBJECT_EVENT_PAGING_IN_PROGRESS 2
1081#define VM_OBJECT_EVENT_MAPPING_IN_PROGRESS 3
1082#define VM_OBJECT_EVENT_UNBLOCKED 4
1083#define VM_OBJECT_EVENT_PAGING_ONLY_IN_PROGRESS 5
1084
1085#define VM_OBJECT_EVENT_MAX 5 /* 6 bits in "all_wanted", so 0->5 */
1086
1087static __inline__ wait_result_t
1088vm_object_assert_wait(
1089 vm_object_t object,
1090 int event,
1091 wait_interrupt_t interruptible)
1092{
1093 wait_result_t wr;
1094
1095 vm_object_lock_assert_exclusive(object);
1096 assert(event >= 0 && event <= VM_OBJECT_EVENT_MAX);
1097
1098 object->all_wanted |= 1 << event;
1099 wr = assert_wait(event: (event_t)((vm_offset_t)object + (vm_offset_t)event),
1100 interruptible);
1101 return wr;
1102}
1103
1104static __inline__ wait_result_t
1105vm_object_wait(
1106 vm_object_t object,
1107 int event,
1108 wait_interrupt_t interruptible)
1109{
1110 wait_result_t wr;
1111
1112 vm_object_assert_wait(object, event, interruptible);
1113 vm_object_unlock(object);
1114 wr = thread_block(THREAD_CONTINUE_NULL);
1115 return wr;
1116}
1117
1118static __inline__ wait_result_t
1119thread_sleep_vm_object(
1120 vm_object_t object,
1121 event_t event,
1122 wait_interrupt_t interruptible)
1123{
1124 wait_result_t wr;
1125
1126 wr = lck_rw_sleep(lck: &object->Lock,
1127 lck_sleep_action: LCK_SLEEP_PROMOTED_PRI,
1128 event,
1129 interruptible);
1130 return wr;
1131}
1132
1133static __inline__ wait_result_t
1134vm_object_sleep(
1135 vm_object_t object,
1136 int event,
1137 wait_interrupt_t interruptible)
1138{
1139 wait_result_t wr;
1140
1141 vm_object_lock_assert_exclusive(object);
1142 assert(event >= 0 && event <= VM_OBJECT_EVENT_MAX);
1143
1144 object->all_wanted |= 1 << event;
1145 wr = thread_sleep_vm_object(object,
1146 event: (event_t)((vm_offset_t)object + (vm_offset_t)event),
1147 interruptible);
1148 return wr;
1149}
1150
1151static __inline__ void
1152vm_object_wakeup(
1153 vm_object_t object,
1154 int event)
1155{
1156 vm_object_lock_assert_exclusive(object);
1157 assert(event >= 0 && event <= VM_OBJECT_EVENT_MAX);
1158
1159 if (object->all_wanted & (1 << event)) {
1160 thread_wakeup((event_t)((vm_offset_t)object + (vm_offset_t)event));
1161 }
1162 object->all_wanted &= ~(1 << event);
1163}
1164
1165static __inline__ void
1166vm_object_set_wanted(
1167 vm_object_t object,
1168 int event)
1169{
1170 vm_object_lock_assert_exclusive(object);
1171 assert(event >= 0 && event <= VM_OBJECT_EVENT_MAX);
1172
1173 object->all_wanted |= (1 << event);
1174}
1175
1176static __inline__ int
1177vm_object_wanted(
1178 vm_object_t object,
1179 int event)
1180{
1181 vm_object_lock_assert_held(object);
1182 assert(event >= 0 && event <= VM_OBJECT_EVENT_MAX);
1183
1184 return object->all_wanted & (1 << event);
1185}
1186
1187/*
1188 * Routines implemented as macros
1189 */
1190#ifdef VM_PIP_DEBUG
1191#include <libkern/OSDebug.h>
1192#define VM_PIP_DEBUG_BEGIN(object) \
1193 MACRO_BEGIN \
1194 int pip = ((object)->paging_in_progress + \
1195 (object)->activity_in_progress); \
1196 if (pip < VM_PIP_DEBUG_MAX_REFS) { \
1197 (void) OSBacktrace(&(object)->pip_holders[pip].pip_retaddr[0], \
1198 VM_PIP_DEBUG_STACK_FRAMES); \
1199 } \
1200 MACRO_END
1201#else /* VM_PIP_DEBUG */
1202#define VM_PIP_DEBUG_BEGIN(object)
1203#endif /* VM_PIP_DEBUG */
1204
1205#define vm_object_activity_begin(object) \
1206 MACRO_BEGIN \
1207 vm_object_lock_assert_exclusive((object)); \
1208 VM_PIP_DEBUG_BEGIN((object)); \
1209 (object)->activity_in_progress++; \
1210 if ((object)->activity_in_progress == 0) { \
1211 panic("vm_object_activity_begin(%p): overflow\n", (object));\
1212 } \
1213 MACRO_END
1214
1215#define vm_object_activity_end(object) \
1216 MACRO_BEGIN \
1217 vm_object_lock_assert_exclusive((object)); \
1218 if ((object)->activity_in_progress == 0) { \
1219 panic("vm_object_activity_end(%p): underflow\n", (object));\
1220 } \
1221 (object)->activity_in_progress--; \
1222 if ((object)->paging_in_progress == 0 && \
1223 (object)->activity_in_progress == 0) \
1224 vm_object_wakeup((object), \
1225 VM_OBJECT_EVENT_PAGING_IN_PROGRESS); \
1226 MACRO_END
1227
1228#define vm_object_paging_begin(object) \
1229 MACRO_BEGIN \
1230 vm_object_lock_assert_exclusive((object)); \
1231 VM_PIP_DEBUG_BEGIN((object)); \
1232 (object)->paging_in_progress++; \
1233 if ((object)->paging_in_progress == 0) { \
1234 panic("vm_object_paging_begin(%p): overflow\n", (object));\
1235 } \
1236 MACRO_END
1237
1238#define vm_object_paging_end(object) \
1239 MACRO_BEGIN \
1240 vm_object_lock_assert_exclusive((object)); \
1241 if ((object)->paging_in_progress == 0) { \
1242 panic("vm_object_paging_end(%p): underflow\n", (object));\
1243 } \
1244 (object)->paging_in_progress--; \
1245 if ((object)->paging_in_progress == 0) { \
1246 vm_object_wakeup((object), \
1247 VM_OBJECT_EVENT_PAGING_ONLY_IN_PROGRESS); \
1248 if ((object)->activity_in_progress == 0) \
1249 vm_object_wakeup((object), \
1250 VM_OBJECT_EVENT_PAGING_IN_PROGRESS); \
1251 } \
1252 MACRO_END
1253
1254#define vm_object_paging_wait(object, interruptible) \
1255 MACRO_BEGIN \
1256 vm_object_lock_assert_exclusive((object)); \
1257 while ((object)->paging_in_progress != 0 || \
1258 (object)->activity_in_progress != 0) { \
1259 wait_result_t _wr; \
1260 \
1261 _wr = vm_object_sleep((object), \
1262 VM_OBJECT_EVENT_PAGING_IN_PROGRESS, \
1263 (interruptible)); \
1264 \
1265 /*XXX if ((interruptible) && (_wr != THREAD_AWAKENED))*/ \
1266 /*XXX break; */ \
1267 } \
1268 MACRO_END
1269
1270#define vm_object_paging_only_wait(object, interruptible) \
1271 MACRO_BEGIN \
1272 vm_object_lock_assert_exclusive((object)); \
1273 while ((object)->paging_in_progress != 0) { \
1274 wait_result_t _wr; \
1275 \
1276 _wr = vm_object_sleep((object), \
1277 VM_OBJECT_EVENT_PAGING_ONLY_IN_PROGRESS,\
1278 (interruptible)); \
1279 \
1280 /*XXX if ((interruptible) && (_wr != THREAD_AWAKENED))*/ \
1281 /*XXX break; */ \
1282 } \
1283 MACRO_END
1284
1285
1286#define vm_object_mapping_begin(object) \
1287 MACRO_BEGIN \
1288 vm_object_lock_assert_exclusive((object)); \
1289 assert(! (object)->mapping_in_progress); \
1290 (object)->mapping_in_progress = TRUE; \
1291 MACRO_END
1292
1293#define vm_object_mapping_end(object) \
1294 MACRO_BEGIN \
1295 vm_object_lock_assert_exclusive((object)); \
1296 assert((object)->mapping_in_progress); \
1297 (object)->mapping_in_progress = FALSE; \
1298 vm_object_wakeup((object), \
1299 VM_OBJECT_EVENT_MAPPING_IN_PROGRESS); \
1300 MACRO_END
1301
1302#define vm_object_mapping_wait(object, interruptible) \
1303 MACRO_BEGIN \
1304 vm_object_lock_assert_exclusive((object)); \
1305 while ((object)->mapping_in_progress) { \
1306 wait_result_t _wr; \
1307 \
1308 _wr = vm_object_sleep((object), \
1309 VM_OBJECT_EVENT_MAPPING_IN_PROGRESS, \
1310 (interruptible)); \
1311 /*XXX if ((interruptible) && (_wr != THREAD_AWAKENED))*/ \
1312 /*XXX break; */ \
1313 } \
1314 assert(!(object)->mapping_in_progress); \
1315 MACRO_END
1316
1317
1318
1319#define vm_object_round_page(x) (((vm_object_offset_t)(x) + PAGE_MASK) & ~((signed)PAGE_MASK))
1320#define vm_object_trunc_page(x) ((vm_object_offset_t)(x) & ~((signed)PAGE_MASK))
1321
1322extern void vm_object_cache_add(vm_object_t);
1323extern void vm_object_cache_remove(vm_object_t);
1324extern int vm_object_cache_evict(int, int);
1325
1326#define VM_OBJECT_OWNER_DISOWNED ((task_t) -1)
1327#define VM_OBJECT_OWNER_UNCHANGED ((task_t) -2)
1328#define VM_OBJECT_OWNER(object) \
1329 ((object == VM_OBJECT_NULL || \
1330 ((object)->purgable == VM_PURGABLE_DENY && \
1331 (object)->vo_ledger_tag == 0) || \
1332 (object)->vo_owner == TASK_NULL) \
1333 ? TASK_NULL /* not owned */ \
1334 : (((object)->vo_owner == VM_OBJECT_OWNER_DISOWNED) \
1335 ? kernel_task /* disowned -> kernel */ \
1336 : (object)->vo_owner)) /* explicit owner */ \
1337
1338extern void vm_object_ledger_tag_ledgers(
1339 vm_object_t object,
1340 int *ledger_idx_volatile,
1341 int *ledger_idx_nonvolatile,
1342 int *ledger_idx_volatile_compressed,
1343 int *ledger_idx_nonvolatile_compressed,
1344 boolean_t *do_footprint);
1345extern kern_return_t vm_object_ownership_change(
1346 vm_object_t object,
1347 int new_ledger_tag,
1348 task_t new_owner,
1349 int new_ledger_flags,
1350 boolean_t task_objq_locked);
1351
1352// LP64todo: all the current tools are 32bit, obviously never worked for 64b
1353// so probably should be a real 32b ID vs. ptr.
1354// Current users just check for equality
1355#define VM_OBJECT_ID(o) ((uint32_t)(uintptr_t)VM_KERNEL_ADDRHASH((o)))
1356
1357static inline void
1358VM_OBJECT_COPY_SET(
1359 vm_object_t object,
1360 vm_object_t copy)
1361{
1362 vm_object_lock_assert_exclusive(object);
1363 object->vo_copy = copy;
1364 if (copy != VM_OBJECT_NULL) {
1365 object->vo_copy_version++;
1366 }
1367}
1368
1369#endif /* _VM_VM_OBJECT_H_ */
1370