1 | /* |
2 | * Copyright (c) 2009 Apple Inc. All rights reserved. |
3 | * |
4 | * @APPLE_OSREFERENCE_LICENSE_HEADER_START@ |
5 | * |
6 | * This file contains Original Code and/or Modifications of Original Code |
7 | * as defined in and that are subject to the Apple Public Source License |
8 | * Version 2.0 (the 'License'). You may not use this file except in |
9 | * compliance with the License. The rights granted to you under the License |
10 | * may not be used to create, or enable the creation or redistribution of, |
11 | * unlawful or unlicensed copies of an Apple operating system, or to |
12 | * circumvent, violate, or enable the circumvention or violation of, any |
13 | * terms of an Apple operating system software license agreement. |
14 | * |
15 | * Please obtain a copy of the License at |
16 | * http://www.opensource.apple.com/apsl/ and read it before using this file. |
17 | * |
18 | * The Original Code and all software distributed under the License are |
19 | * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER |
20 | * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES, |
21 | * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY, |
22 | * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT. |
23 | * Please see the License for the specific language governing rights and |
24 | * limitations under the License. |
25 | * |
26 | * @APPLE_OSREFERENCE_LICENSE_HEADER_END@ |
27 | */ |
28 | |
29 | #include <kern/backtrace.h> |
30 | #include <mach/sdt.h> |
31 | #include <vm/vm_map_store.h> |
32 | #include <vm/vm_pageout.h> /* for vm_debug_events */ |
33 | |
34 | #if MACH_ASSERT |
35 | boolean_t |
36 | first_free_is_valid_store( vm_map_t map ) |
37 | { |
38 | return(first_free_is_valid_ll( map )); |
39 | } |
40 | #endif |
41 | |
42 | boolean_t |
43 | vm_map_store_has_RB_support( struct vm_map_header *hdr ) |
44 | { |
45 | if ((void*)hdr->rb_head_store.rbh_root == (void*)(int)SKIP_RB_TREE) { |
46 | return FALSE; |
47 | } |
48 | return TRUE; |
49 | } |
50 | |
51 | void |
52 | vm_map_store_init( struct vm_map_header *hdr ) |
53 | { |
54 | vm_map_store_init_ll( hdr ); |
55 | #ifdef VM_MAP_STORE_USE_RB |
56 | if (vm_map_store_has_RB_support( hdr )) { |
57 | vm_map_store_init_rb( hdr ); |
58 | } |
59 | #endif |
60 | } |
61 | |
62 | boolean_t |
63 | vm_map_store_lookup_entry( |
64 | vm_map_t map, |
65 | vm_map_offset_t address, |
66 | vm_map_entry_t *entry) /* OUT */ |
67 | { |
68 | #ifdef VM_MAP_STORE_USE_LL |
69 | return (vm_map_store_lookup_entry_ll( map, address, entry )); |
70 | #elif defined VM_MAP_STORE_USE_RB |
71 | if (vm_map_store_has_RB_support( &map->hdr )) { |
72 | return (vm_map_store_lookup_entry_rb( map, address, entry )); |
73 | } else { |
74 | panic("VM map lookups need RB tree support.\n" ); |
75 | return FALSE; /* For compiler warning.*/ |
76 | } |
77 | #endif |
78 | } |
79 | |
80 | void |
81 | vm_map_store_update( vm_map_t map, vm_map_entry_t entry, int update_type ) |
82 | { |
83 | switch (update_type) { |
84 | case VM_MAP_ENTRY_CREATE: |
85 | break; |
86 | case VM_MAP_ENTRY_DELETE: |
87 | if((map->holelistenabled == FALSE) && ((entry) == (map)->first_free)) { |
88 | (map)->first_free = vm_map_to_entry(map); |
89 | } |
90 | if((entry) == (map)->hint) { |
91 | (map)->hint = vm_map_to_entry(map); |
92 | } |
93 | break; |
94 | default: |
95 | break; |
96 | } |
97 | } |
98 | |
99 | /* |
100 | * vm_map_entry_{un,}link: |
101 | * |
102 | * Insert/remove entries from maps (or map copies). |
103 | * The _vm_map_store_entry_{un,}link variants are used at |
104 | * some places where updating first_free is not needed & |
105 | * copy maps are being modified. Also note the first argument |
106 | * is the map header. |
107 | * Modifying the vm_map_store_entry_{un,}link functions to |
108 | * deal with these call sites made the interface confusing |
109 | * and clunky. |
110 | */ |
111 | |
112 | void |
113 | _vm_map_store_entry_link( struct vm_map_header * mapHdr, vm_map_entry_t after_where, vm_map_entry_t entry) |
114 | { |
115 | assert(entry->vme_start < entry->vme_end); |
116 | if (__improbable(vm_debug_events)) |
117 | DTRACE_VM4(map_entry_link, vm_map_t, (char *)mapHdr - sizeof (lck_rw_t), vm_map_entry_t, entry, vm_address_t, entry->links.start, vm_address_t, entry->links.end); |
118 | |
119 | vm_map_store_entry_link_ll(mapHdr, after_where, entry); |
120 | #ifdef VM_MAP_STORE_USE_RB |
121 | if (vm_map_store_has_RB_support( mapHdr )) { |
122 | vm_map_store_entry_link_rb(mapHdr, after_where, entry); |
123 | } |
124 | #endif |
125 | #if MAP_ENTRY_INSERTION_DEBUG |
126 | backtrace(&entry->vme_insertion_bt[0], |
127 | (sizeof (entry->vme_insertion_bt) / sizeof (uintptr_t))); |
128 | #endif |
129 | } |
130 | |
131 | void |
132 | vm_map_store_entry_link( |
133 | vm_map_t map, |
134 | vm_map_entry_t after_where, |
135 | vm_map_entry_t entry, |
136 | vm_map_kernel_flags_t vmk_flags) |
137 | { |
138 | vm_map_t VMEL_map; |
139 | vm_map_entry_t VMEL_entry; |
140 | VMEL_map = (map); |
141 | VMEL_entry = (entry); |
142 | |
143 | _vm_map_store_entry_link(&VMEL_map->hdr, after_where, VMEL_entry); |
144 | if( VMEL_map->disable_vmentry_reuse == TRUE ) { |
145 | UPDATE_HIGHEST_ENTRY_END( VMEL_map, VMEL_entry); |
146 | } else { |
147 | update_first_free_ll(VMEL_map, VMEL_map->first_free); |
148 | #ifdef VM_MAP_STORE_USE_RB |
149 | if (vm_map_store_has_RB_support( &VMEL_map->hdr )) { |
150 | update_first_free_rb(VMEL_map, entry, TRUE); |
151 | } |
152 | #endif |
153 | } |
154 | #if PMAP_CS |
155 | (void) vm_map_entry_cs_associate(map, entry, vmk_flags); |
156 | #else /* PMAP_CS */ |
157 | (void) vmk_flags; |
158 | #endif /* PMAP_CS */ |
159 | } |
160 | |
161 | void |
162 | _vm_map_store_entry_unlink( struct vm_map_header * mapHdr, vm_map_entry_t entry) |
163 | { |
164 | if (__improbable(vm_debug_events)) |
165 | DTRACE_VM4(map_entry_unlink, vm_map_t, (char *)mapHdr - sizeof (lck_rw_t), vm_map_entry_t, entry, vm_address_t, entry->links.start, vm_address_t, entry->links.end); |
166 | |
167 | vm_map_store_entry_unlink_ll(mapHdr, entry); |
168 | #ifdef VM_MAP_STORE_USE_RB |
169 | if (vm_map_store_has_RB_support( mapHdr )) { |
170 | vm_map_store_entry_unlink_rb(mapHdr, entry); |
171 | } |
172 | #endif |
173 | } |
174 | |
175 | void |
176 | vm_map_store_entry_unlink( vm_map_t map, vm_map_entry_t entry) |
177 | { |
178 | vm_map_t VMEU_map; |
179 | vm_map_entry_t VMEU_entry = NULL; |
180 | vm_map_entry_t VMEU_first_free = NULL; |
181 | VMEU_map = (map); |
182 | VMEU_entry = (entry); |
183 | |
184 | if (map->holelistenabled == FALSE) { |
185 | if (VMEU_entry->vme_start <= VMEU_map->first_free->vme_start){ |
186 | VMEU_first_free = VMEU_entry->vme_prev; |
187 | } else { |
188 | VMEU_first_free = VMEU_map->first_free; |
189 | } |
190 | } |
191 | _vm_map_store_entry_unlink(&VMEU_map->hdr, VMEU_entry); |
192 | vm_map_store_update( map, entry, VM_MAP_ENTRY_DELETE); |
193 | update_first_free_ll(VMEU_map, VMEU_first_free); |
194 | #ifdef VM_MAP_STORE_USE_RB |
195 | if (vm_map_store_has_RB_support( &VMEU_map->hdr )) { |
196 | update_first_free_rb(VMEU_map, entry, FALSE); |
197 | } |
198 | #endif |
199 | } |
200 | |
201 | void |
202 | vm_map_store_copy_reset( vm_map_copy_t copy,vm_map_entry_t entry) |
203 | { |
204 | int nentries = copy->cpy_hdr.nentries; |
205 | vm_map_store_copy_reset_ll(copy, entry, nentries); |
206 | #ifdef VM_MAP_STORE_USE_RB |
207 | if (vm_map_store_has_RB_support( ©->c_u.hdr )) { |
208 | vm_map_store_copy_reset_rb(copy, entry, nentries); |
209 | } |
210 | #endif |
211 | } |
212 | |
213 | void |
214 | vm_map_store_update_first_free( vm_map_t map, vm_map_entry_t first_free_entry, boolean_t new_entry_creation) |
215 | { |
216 | update_first_free_ll(map, first_free_entry); |
217 | #ifdef VM_MAP_STORE_USE_RB |
218 | if (vm_map_store_has_RB_support( &map->hdr )) { |
219 | update_first_free_rb(map, first_free_entry, new_entry_creation); |
220 | } |
221 | #endif |
222 | } |
223 | |