1 | /* |
2 | * Copyright (c) 2009 Apple Inc. All rights reserved. |
3 | * |
4 | * @APPLE_OSREFERENCE_LICENSE_HEADER_START@ |
5 | * |
6 | * This file contains Original Code and/or Modifications of Original Code |
7 | * as defined in and that are subject to the Apple Public Source License |
8 | * Version 2.0 (the 'License'). You may not use this file except in |
9 | * compliance with the License. The rights granted to you under the License |
10 | * may not be used to create, or enable the creation or redistribution of, |
11 | * unlawful or unlicensed copies of an Apple operating system, or to |
12 | * circumvent, violate, or enable the circumvention or violation of, any |
13 | * terms of an Apple operating system software license agreement. |
14 | * |
15 | * Please obtain a copy of the License at |
16 | * http://www.opensource.apple.com/apsl/ and read it before using this file. |
17 | * |
18 | * The Original Code and all software distributed under the License are |
19 | * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER |
20 | * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES, |
21 | * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY, |
22 | * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT. |
23 | * Please see the License for the specific language governing rights and |
24 | * limitations under the License. |
25 | * |
26 | * @APPLE_OSREFERENCE_LICENSE_HEADER_END@ |
27 | */ |
28 | |
29 | #include <kern/backtrace.h> |
30 | #include <vm/vm_map.h> |
31 | |
32 | RB_GENERATE(rb_head, vm_map_store, entry, rb_node_compare); |
33 | |
34 | #define VME_FOR_STORE(ptr) __container_of(ptr, struct vm_map_entry, store) |
35 | |
36 | void |
37 | vm_map_store_init_rb( struct vm_map_header* hdr ) |
38 | { |
39 | RB_INIT(&(hdr->rb_head_store)); |
40 | } |
41 | |
42 | int |
43 | rb_node_compare(struct vm_map_store *node, struct vm_map_store *parent) |
44 | { |
45 | vm_map_entry_t vme_c; |
46 | vm_map_entry_t vme_p; |
47 | |
48 | vme_c = VME_FOR_STORE(node); |
49 | vme_p = VME_FOR_STORE(parent); |
50 | if (vme_c->vme_start < vme_p->vme_start) { |
51 | return -1; |
52 | } |
53 | if (vme_c->vme_start >= vme_p->vme_end) { |
54 | return 1; |
55 | } |
56 | return 0; |
57 | } |
58 | |
59 | bool |
60 | vm_map_store_lookup_entry_rb(vm_map_t map, vm_map_offset_t address, vm_map_entry_t *vm_entry) |
61 | { |
62 | struct vm_map_header *hdr = &map->hdr; |
63 | struct vm_map_store *rb_entry = RB_ROOT(&hdr->rb_head_store); |
64 | vm_map_entry_t cur = vm_map_to_entry(map); |
65 | vm_map_entry_t prev = VM_MAP_ENTRY_NULL; |
66 | |
67 | while (rb_entry != (struct vm_map_store*)NULL) { |
68 | cur = VME_FOR_STORE(rb_entry); |
69 | if (address >= cur->vme_start) { |
70 | if (address < cur->vme_end) { |
71 | *vm_entry = cur; |
72 | return TRUE; |
73 | } |
74 | rb_entry = RB_RIGHT(rb_entry, entry); |
75 | prev = cur; |
76 | } else { |
77 | rb_entry = RB_LEFT(rb_entry, entry); |
78 | } |
79 | } |
80 | if (prev == VM_MAP_ENTRY_NULL) { |
81 | prev = vm_map_to_entry(map); |
82 | } |
83 | *vm_entry = prev; |
84 | return FALSE; |
85 | } |
86 | |
87 | void |
88 | vm_map_store_entry_link_rb(struct vm_map_header *mapHdr, vm_map_entry_t entry) |
89 | { |
90 | struct rb_head *rbh = &(mapHdr->rb_head_store); |
91 | struct vm_map_store *store = &(entry->store); |
92 | struct vm_map_store *tmp_store; |
93 | |
94 | if ((tmp_store = RB_INSERT( rb_head, rbh, store )) != NULL) { |
95 | panic("VMSEL: INSERT FAILED: 0x%lx, 0x%lx, 0x%lx, 0x%lx" , |
96 | (uintptr_t)entry->vme_start, |
97 | (uintptr_t)entry->vme_end, |
98 | (uintptr_t)(VME_FOR_STORE(tmp_store))->vme_start, |
99 | (uintptr_t)(VME_FOR_STORE(tmp_store))->vme_end); |
100 | } |
101 | } |
102 | |
103 | void |
104 | vm_map_store_entry_unlink_rb( struct vm_map_header *mapHdr, vm_map_entry_t entry) |
105 | { |
106 | struct rb_head *rbh = &(mapHdr->rb_head_store); |
107 | struct vm_map_store *rb_entry; |
108 | struct vm_map_store *store = &(entry->store); |
109 | |
110 | rb_entry = RB_FIND( rb_head, rbh, store); |
111 | if (rb_entry == NULL) { |
112 | panic("NO ENTRY TO DELETE" ); |
113 | } |
114 | RB_REMOVE( rb_head, rbh, store ); |
115 | } |
116 | |
117 | void |
118 | vm_map_store_copy_reset_rb( vm_map_copy_t copy, vm_map_entry_t entry, int nentries ) |
119 | { |
120 | struct vm_map_header *mapHdr = &(copy->cpy_hdr); |
121 | struct rb_head *rbh = &(mapHdr->rb_head_store); |
122 | struct vm_map_store *store; |
123 | int deleted = 0; |
124 | |
125 | while (entry != vm_map_copy_to_entry(copy) && nentries > 0) { |
126 | store = &(entry->store); |
127 | RB_REMOVE( rb_head, rbh, store ); |
128 | entry = entry->vme_next; |
129 | deleted++; |
130 | nentries--; |
131 | } |
132 | } |
133 | |
134 | void |
135 | vm_map_combine_hole(vm_map_t map, vm_map_entry_t hole_entry); |
136 | void |
137 | vm_map_combine_hole(__unused vm_map_t map, vm_map_entry_t hole_entry) |
138 | { |
139 | vm_map_entry_t middle_hole_entry, last_hole_entry; |
140 | |
141 | hole_entry->vme_end = hole_entry->vme_next->vme_end; |
142 | |
143 | middle_hole_entry = hole_entry->vme_next; |
144 | last_hole_entry = middle_hole_entry->vme_next; |
145 | |
146 | assert(last_hole_entry->vme_prev == middle_hole_entry); |
147 | assert(middle_hole_entry->vme_end != last_hole_entry->vme_start); |
148 | |
149 | last_hole_entry->vme_prev = hole_entry; |
150 | hole_entry->vme_next = last_hole_entry; |
151 | |
152 | middle_hole_entry->vme_prev = NULL; |
153 | middle_hole_entry->vme_next = NULL; |
154 | |
155 | zfree_id(ZONE_ID_VM_MAP_HOLES, middle_hole_entry); |
156 | |
157 | assert(hole_entry->vme_start < hole_entry->vme_end); |
158 | assert(last_hole_entry->vme_start < last_hole_entry->vme_end); |
159 | } |
160 | |
161 | |
162 | void |
163 | vm_map_delete_hole(vm_map_t map, vm_map_entry_t hole_entry); |
164 | void |
165 | vm_map_delete_hole(vm_map_t map, vm_map_entry_t hole_entry) |
166 | { |
167 | if (hole_entry == CAST_TO_VM_MAP_ENTRY(map->holes_list)) { |
168 | if (hole_entry->vme_next == CAST_TO_VM_MAP_ENTRY(map->holes_list)) { |
169 | map->holes_list = NULL; |
170 | SAVE_HINT_HOLE_WRITE(map, NULL); |
171 | } else { |
172 | vm_map_entry_t l_next, l_prev; |
173 | |
174 | l_next = (vm_map_entry_t) map->holes_list->next; |
175 | l_prev = (vm_map_entry_t) map->holes_list->prev; |
176 | map->holes_list = (struct vm_map_links*) l_next; |
177 | |
178 | l_next->vme_prev = l_prev; |
179 | l_prev->vme_next = l_next; |
180 | |
181 | SAVE_HINT_HOLE_WRITE(map, (struct vm_map_links*) l_next); |
182 | } |
183 | } else { |
184 | SAVE_HINT_HOLE_WRITE(map, (struct vm_map_links*) hole_entry->vme_prev); |
185 | |
186 | hole_entry->vme_prev->vme_next = hole_entry->vme_next; |
187 | hole_entry->vme_next->vme_prev = hole_entry->vme_prev; |
188 | } |
189 | |
190 | hole_entry->vme_next = NULL; |
191 | hole_entry->vme_prev = NULL; |
192 | zfree_id(ZONE_ID_VM_MAP_HOLES, hole_entry); |
193 | } |
194 | |
195 | |
196 | /* |
197 | * For Debugging. |
198 | */ |
199 | |
200 | #if DEBUG |
201 | extern int vm_check_map_sanity; |
202 | |
203 | static void |
204 | check_map_sanity(vm_map_t map, vm_map_entry_t old_hole_entry) |
205 | { |
206 | vm_map_entry_t hole_entry, next_hole_entry; |
207 | vm_map_entry_t map_entry, next_map_entry; |
208 | |
209 | if (map->holes_list == NULL) { |
210 | return; |
211 | } |
212 | |
213 | hole_entry = CAST_DOWN(vm_map_entry_t, map->holes_list); |
214 | next_hole_entry = hole_entry->vme_next; |
215 | |
216 | map_entry = vm_map_first_entry(map); |
217 | next_map_entry = map_entry->vme_next; |
218 | |
219 | while (map_entry->vme_start > hole_entry->vme_start) { |
220 | hole_entry = next_hole_entry; |
221 | next_hole_entry = hole_entry->vme_next; |
222 | |
223 | if (hole_entry == CAST_DOWN(vm_map_entry_t, map->holes_list)) { |
224 | break; |
225 | } |
226 | } |
227 | |
228 | while (map_entry != vm_map_to_entry(map)) { |
229 | if (map_entry->vme_start >= map->max_offset) { |
230 | break; |
231 | } |
232 | |
233 | if (map_entry->vme_end != map_entry->vme_next->vme_start) { |
234 | if (map_entry->vme_next == vm_map_to_entry(map)) { |
235 | break; |
236 | } |
237 | |
238 | if (hole_entry->vme_start != map_entry->vme_end) { |
239 | panic("hole_entry not aligned %p(0x%llx), %p (0x%llx), %p" , hole_entry, (unsigned long long)hole_entry->vme_start, map_entry->vme_next, (unsigned long long)map_entry->vme_end, old_hole_entry); |
240 | assert(hole_entry->vme_start == map_entry->vme_end); |
241 | } |
242 | |
243 | if (hole_entry->vme_end != map_entry->vme_next->vme_start) { |
244 | panic("hole_entry not next aligned %p(0x%llx), %p (0x%llx), %p" , hole_entry, (unsigned long long)hole_entry->vme_end, map_entry->vme_next, (unsigned long long)map_entry->vme_next->vme_start, old_hole_entry); |
245 | assert(hole_entry->vme_end == map_entry->vme_next->vme_start); |
246 | } |
247 | |
248 | hole_entry = next_hole_entry; |
249 | next_hole_entry = hole_entry->vme_next; |
250 | |
251 | if (hole_entry == CAST_DOWN(vm_map_entry_t, map->holes_list)) { |
252 | break; |
253 | } |
254 | } |
255 | |
256 | map_entry = map_entry->vme_next; |
257 | } |
258 | } |
259 | |
260 | /* |
261 | * For debugging. |
262 | */ |
263 | static void |
264 | copy_hole_info(vm_map_entry_t hole_entry, vm_map_entry_t old_hole_entry) |
265 | { |
266 | old_hole_entry->vme_prev = hole_entry->vme_prev; |
267 | old_hole_entry->vme_next = hole_entry->vme_next; |
268 | old_hole_entry->vme_start = hole_entry->vme_start; |
269 | old_hole_entry->vme_end = hole_entry->vme_end; |
270 | } |
271 | #endif /* DEBUG */ |
272 | |
273 | void |
274 | update_holes_on_entry_deletion(vm_map_t map, vm_map_entry_t old_entry); |
275 | void |
276 | update_holes_on_entry_deletion(vm_map_t map, vm_map_entry_t old_entry) |
277 | { |
278 | /* |
279 | * Dealing with the deletion of an older entry. |
280 | */ |
281 | |
282 | vm_map_entry_t hole_entry, next_hole_entry; |
283 | #if DEBUG |
284 | struct vm_map_entry old_hole_entry; |
285 | #endif /* DEBUG */ |
286 | boolean_t create_new_hole = TRUE; |
287 | |
288 | hole_entry = CAST_TO_VM_MAP_ENTRY(map->hole_hint); |
289 | |
290 | if (hole_entry) { |
291 | if (hole_entry->vme_end == old_entry->vme_start) { |
292 | /* |
293 | * Found a hole right after above our entry. |
294 | * Hit. |
295 | */ |
296 | } else if (hole_entry->vme_start == old_entry->vme_end) { |
297 | if (hole_entry != CAST_TO_VM_MAP_ENTRY(map->holes_list)) { |
298 | /* |
299 | * Found a hole right after below our entry but |
300 | * make sure we don't erroneously extend backwards. |
301 | * |
302 | * Hit. |
303 | */ |
304 | |
305 | hole_entry = hole_entry->vme_prev; |
306 | } |
307 | } else if (hole_entry->vme_start > old_entry->vme_end) { |
308 | /* |
309 | * Useless hint. Start from the top. |
310 | */ |
311 | |
312 | hole_entry = CAST_TO_VM_MAP_ENTRY(map->holes_list); |
313 | } |
314 | |
315 | if (hole_entry != CAST_TO_VM_MAP_ENTRY(map->holes_list)) { |
316 | if (hole_entry->vme_start > old_entry->vme_start) { |
317 | panic("Hole hint failed: Hole entry start: 0x%llx, entry start: 0x%llx, map hole start: 0x%llx, map hint start: 0x%llx" , |
318 | (unsigned long long)hole_entry->vme_start, |
319 | (unsigned long long)old_entry->vme_start, |
320 | (unsigned long long)map->holes_list->start, |
321 | (unsigned long long)map->hole_hint->start); |
322 | } |
323 | if (hole_entry->vme_end > old_entry->vme_start) { |
324 | panic("Hole hint failed: Hole entry end: 0x%llx, entry start: 0x%llx, map hole start: 0x%llx, map hint start: 0x%llx" , |
325 | (unsigned long long)hole_entry->vme_end, |
326 | (unsigned long long)old_entry->vme_start, |
327 | (unsigned long long)map->holes_list->start, |
328 | (unsigned long long)map->hole_hint->start); |
329 | } |
330 | } |
331 | |
332 | while (1) { |
333 | next_hole_entry = hole_entry->vme_next; |
334 | |
335 | /* |
336 | * Hole is right above the entry. |
337 | */ |
338 | if (hole_entry->vme_end == old_entry->vme_start) { |
339 | #if DEBUG |
340 | copy_hole_info(hole_entry, &old_hole_entry); |
341 | #endif /* DEBUG */ |
342 | |
343 | /* |
344 | * Is there another hole right below the entry? |
345 | * Can we combine holes? |
346 | */ |
347 | |
348 | if (old_entry->vme_end == hole_entry->vme_next->vme_start) { |
349 | vm_map_combine_hole(map, hole_entry); |
350 | } else { |
351 | hole_entry->vme_end = old_entry->vme_end; |
352 | } |
353 | create_new_hole = FALSE; |
354 | #if DEBUG |
355 | if (vm_check_map_sanity) { |
356 | check_map_sanity(map, &old_hole_entry); |
357 | } |
358 | #endif /* DEBUG */ |
359 | break; |
360 | } |
361 | |
362 | /* |
363 | * Hole is right below the entry. |
364 | */ |
365 | if (hole_entry->vme_start == old_entry->vme_end) { |
366 | #if DEBUG |
367 | copy_hole_info(hole_entry, &old_hole_entry); |
368 | #endif /* DEBUG */ |
369 | |
370 | hole_entry->vme_start = old_entry->vme_start; |
371 | create_new_hole = FALSE; |
372 | |
373 | #if DEBUG |
374 | if (vm_check_map_sanity) { |
375 | check_map_sanity(map, &old_hole_entry); |
376 | } |
377 | #endif /* DEBUG */ |
378 | break; |
379 | } |
380 | |
381 | /* |
382 | * Hole is beyond our entry. Let's go back to the last hole |
383 | * before our entry so we have the right place to link up the |
384 | * new hole that will be needed. |
385 | */ |
386 | if (hole_entry->vme_start > old_entry->vme_end) { |
387 | #if DEBUG |
388 | copy_hole_info(hole_entry, &old_hole_entry); |
389 | #endif /* DEBUG */ |
390 | |
391 | if (hole_entry != CAST_TO_VM_MAP_ENTRY(map->holes_list)) { |
392 | assert(hole_entry->vme_start != old_entry->vme_start); |
393 | hole_entry = hole_entry->vme_prev; |
394 | } |
395 | break; |
396 | } |
397 | |
398 | hole_entry = next_hole_entry; |
399 | |
400 | if (hole_entry == CAST_TO_VM_MAP_ENTRY(map->holes_list)) { |
401 | hole_entry = hole_entry->vme_prev; |
402 | break; |
403 | } |
404 | } |
405 | } |
406 | |
407 | if (create_new_hole) { |
408 | struct vm_map_links *new_hole_entry = NULL; |
409 | vm_map_entry_t l_next, l_prev; |
410 | |
411 | new_hole_entry = zalloc_id(ZONE_ID_VM_MAP_HOLES, Z_WAITOK | Z_NOFAIL); |
412 | |
413 | /* |
414 | * First hole in the map? |
415 | * OR |
416 | * A hole that is located above the current first hole in the map? |
417 | */ |
418 | if (map->holes_list == NULL || (hole_entry == CAST_TO_VM_MAP_ENTRY(map->holes_list) && hole_entry->vme_start > old_entry->vme_start)) { |
419 | if (map->holes_list == NULL) { |
420 | map->holes_list = new_hole_entry; |
421 | new_hole_entry->prev = new_hole_entry->next = CAST_TO_VM_MAP_ENTRY(map->holes_list); |
422 | } else { |
423 | l_next = CAST_TO_VM_MAP_ENTRY(map->holes_list); |
424 | l_prev = map->holes_list->prev; |
425 | map->holes_list = new_hole_entry; |
426 | new_hole_entry->next = l_next; |
427 | new_hole_entry->prev = l_prev; |
428 | |
429 | l_prev->vme_next = l_next->vme_prev = CAST_TO_VM_MAP_ENTRY(new_hole_entry); |
430 | } |
431 | } else { |
432 | l_next = hole_entry->vme_next; |
433 | l_prev = hole_entry->vme_next->vme_prev; |
434 | |
435 | new_hole_entry->prev = hole_entry; |
436 | new_hole_entry->next = l_next; |
437 | |
438 | hole_entry->vme_next = CAST_TO_VM_MAP_ENTRY(new_hole_entry); |
439 | l_next->vme_prev = CAST_TO_VM_MAP_ENTRY(new_hole_entry); |
440 | } |
441 | |
442 | new_hole_entry->start = old_entry->vme_start; |
443 | new_hole_entry->end = old_entry->vme_end; |
444 | |
445 | hole_entry = CAST_TO_VM_MAP_ENTRY(new_hole_entry); |
446 | |
447 | assert(new_hole_entry->start < new_hole_entry->end); |
448 | } |
449 | |
450 | #if DEBUG |
451 | if (vm_check_map_sanity) { |
452 | check_map_sanity(map, &old_hole_entry); |
453 | } |
454 | #endif /* DEBUG */ |
455 | |
456 | SAVE_HINT_HOLE_WRITE(map, (struct vm_map_links*) hole_entry); |
457 | return; |
458 | } |
459 | |
460 | |
461 | void |
462 | update_holes_on_entry_creation(vm_map_t map, vm_map_entry_t new_entry); |
463 | void |
464 | update_holes_on_entry_creation(vm_map_t map, vm_map_entry_t new_entry) |
465 | { |
466 | vm_map_entry_t hole_entry, next_hole_entry; |
467 | #if DEBUG |
468 | struct vm_map_entry old_hole_entry; |
469 | vm_map_entry_t tmp_entry; |
470 | boolean_t check_map_with_hole_sanity = TRUE; |
471 | #endif /* DEBUG */ |
472 | |
473 | /* |
474 | * Case A: The entry is aligned exactly with the start and end of the hole. |
475 | * This will delete the hole. |
476 | * |
477 | * Case B: The entry is completely within a hole but NOT aligned with the start/end of the hole. |
478 | * This will split a hole. |
479 | * |
480 | * Case C: The entry overlaps with the hole. The entry could be extending upwards (C1) or downwards (C2). |
481 | * This will reduce the size of the hole or delete the hole completely if it is smaller than the entry. |
482 | */ |
483 | |
484 | hole_entry = CAST_TO_VM_MAP_ENTRY(map->holes_list); |
485 | assert(hole_entry); |
486 | next_hole_entry = hole_entry->vme_next; |
487 | |
488 | while (1) { |
489 | #if DEBUG |
490 | /* |
491 | * If the entry doesn't exist in the RB tree, we are likely dealing with copy maps where |
492 | * the entries belonging to the copy map are linked into the list of entries silently and |
493 | * then added to the RB-tree later on. |
494 | * So sanity checks are useless in that case. |
495 | */ |
496 | check_map_with_hole_sanity = vm_map_lookup_entry(map, new_entry->vme_start, &tmp_entry); |
497 | #endif /* DEBUG */ |
498 | |
499 | if (hole_entry->vme_start == new_entry->vme_start && |
500 | hole_entry->vme_end == new_entry->vme_end) { |
501 | /* Case A */ |
502 | #if DEBUG |
503 | copy_hole_info(hole_entry, &old_hole_entry); |
504 | #endif /* DEBUG */ |
505 | |
506 | /* |
507 | * This check makes sense only for regular maps, not copy maps. |
508 | * With a regular map, the VM entry is first linked and then |
509 | * the hole is deleted. So the check below, which makes sure that |
510 | * the map's bounds are being respected, is valid. |
511 | * But for copy maps, the hole is deleted before the VM entry is |
512 | * linked (vm_map_store_copy_insert) and so this check is invalid. |
513 | * |
514 | * if (hole_entry == (vm_map_entry_t) map->holes_list) { |
515 | * |
516 | * if (hole_entry->vme_next == (vm_map_entry_t) map->holes_list) { |
517 | * |
518 | * next_hole_entry = vm_map_last_entry(map); |
519 | * assert(next_hole_entry->vme_end >= map->max_offset); |
520 | * } |
521 | * } |
522 | */ |
523 | |
524 | vm_map_delete_hole(map, hole_entry); |
525 | |
526 | #if DEBUG |
527 | if (vm_check_map_sanity && check_map_with_hole_sanity) { |
528 | check_map_sanity(map, &old_hole_entry); |
529 | } |
530 | #endif /* DEBUG */ |
531 | return; |
532 | } else if (hole_entry->vme_start < new_entry->vme_start && |
533 | hole_entry->vme_end > new_entry->vme_end) { |
534 | /* Case B */ |
535 | struct vm_map_links *new_hole_entry = NULL; |
536 | |
537 | new_hole_entry = zalloc_id(ZONE_ID_VM_MAP_HOLES, Z_WAITOK | Z_NOFAIL); |
538 | |
539 | #if DEBUG |
540 | copy_hole_info(hole_entry, &old_hole_entry); |
541 | #endif /* DEBUG */ |
542 | |
543 | new_hole_entry->prev = hole_entry; |
544 | new_hole_entry->next = hole_entry->vme_next; |
545 | hole_entry->vme_next->vme_prev = CAST_TO_VM_MAP_ENTRY(new_hole_entry); |
546 | hole_entry->vme_next = CAST_TO_VM_MAP_ENTRY(new_hole_entry); |
547 | |
548 | new_hole_entry->start = new_entry->vme_end; |
549 | new_hole_entry->end = hole_entry->vme_end; |
550 | hole_entry->vme_end = new_entry->vme_start; |
551 | |
552 | assert(hole_entry->vme_start < hole_entry->vme_end); |
553 | assert(new_hole_entry->start < new_hole_entry->end); |
554 | |
555 | #if DEBUG |
556 | if (vm_check_map_sanity && check_map_with_hole_sanity) { |
557 | check_map_sanity(map, &old_hole_entry); |
558 | } |
559 | #endif /* DEBUG */ |
560 | |
561 | SAVE_HINT_HOLE_WRITE(map, (struct vm_map_links*) hole_entry); |
562 | return; |
563 | } else if ((new_entry->vme_start <= hole_entry->vme_start) && (hole_entry->vme_start < new_entry->vme_end)) { |
564 | /* |
565 | * Case C1: Entry moving upwards and a part/full hole lies within the bounds of the entry. |
566 | */ |
567 | |
568 | #if DEBUG |
569 | copy_hole_info(hole_entry, &old_hole_entry); |
570 | #endif /* DEBUG */ |
571 | |
572 | if (hole_entry->vme_end <= new_entry->vme_end) { |
573 | vm_map_delete_hole(map, hole_entry); |
574 | } else { |
575 | hole_entry->vme_start = new_entry->vme_end; |
576 | SAVE_HINT_HOLE_WRITE(map, (struct vm_map_links*) hole_entry); |
577 | } |
578 | |
579 | #if DEBUG |
580 | if (vm_check_map_sanity && check_map_with_hole_sanity) { |
581 | check_map_sanity(map, &old_hole_entry); |
582 | } |
583 | #endif /* DEBUG */ |
584 | |
585 | return; |
586 | } else if ((new_entry->vme_start < hole_entry->vme_end) && (hole_entry->vme_end <= new_entry->vme_end)) { |
587 | /* |
588 | * Case C2: Entry moving downwards and a part/full hole lies within the bounds of the entry. |
589 | */ |
590 | |
591 | #if DEBUG |
592 | copy_hole_info(hole_entry, &old_hole_entry); |
593 | #endif /* DEBUG */ |
594 | |
595 | if (hole_entry->vme_start >= new_entry->vme_start) { |
596 | vm_map_delete_hole(map, hole_entry); |
597 | } else { |
598 | hole_entry->vme_end = new_entry->vme_start; |
599 | SAVE_HINT_HOLE_WRITE(map, (struct vm_map_links*) hole_entry); |
600 | } |
601 | |
602 | #if DEBUG |
603 | if (vm_check_map_sanity && check_map_with_hole_sanity) { |
604 | check_map_sanity(map, &old_hole_entry); |
605 | } |
606 | #endif /* DEBUG */ |
607 | |
608 | return; |
609 | } |
610 | |
611 | hole_entry = next_hole_entry; |
612 | next_hole_entry = hole_entry->vme_next; |
613 | |
614 | if (hole_entry == CAST_TO_VM_MAP_ENTRY(map->holes_list)) { |
615 | break; |
616 | } |
617 | } |
618 | |
619 | panic("Illegal action: h1: %p, s:0x%llx, e:0x%llx...h2:%p, s:0x%llx, e:0x%llx...h3:0x%p, s:0x%llx, e:0x%llx" , |
620 | hole_entry->vme_prev, |
621 | (unsigned long long)hole_entry->vme_prev->vme_start, |
622 | (unsigned long long)hole_entry->vme_prev->vme_end, |
623 | hole_entry, |
624 | (unsigned long long)hole_entry->vme_start, |
625 | (unsigned long long)hole_entry->vme_end, |
626 | hole_entry->vme_next, |
627 | (unsigned long long)hole_entry->vme_next->vme_start, |
628 | (unsigned long long)hole_entry->vme_next->vme_end); |
629 | } |
630 | |
631 | void |
632 | update_first_free_rb(vm_map_t map, vm_map_entry_t entry, bool new_entry_creation) |
633 | { |
634 | if (map->holelistenabled) { |
635 | /* |
636 | * Holes can be used to track ranges all the way up to MACH_VM_MAX_ADDRESS or more (e.g. kernel map). |
637 | */ |
638 | vm_map_offset_t max_valid_offset = (map->max_offset > MACH_VM_MAX_ADDRESS) ? map->max_offset : MACH_VM_MAX_ADDRESS; |
639 | |
640 | /* |
641 | * Clipping an entry will not result in the creation/deletion/modification of |
642 | * a hole. Those calls pass NULL for their target entry. |
643 | */ |
644 | if (entry == NULL) { |
645 | return; |
646 | } |
647 | |
648 | /* |
649 | * Commpage is pinned beyond the map's max offset. That shouldn't affect the |
650 | * holes within the bounds of the map. |
651 | */ |
652 | if (vm_map_trunc_page(entry->vme_start, VM_MAP_PAGE_MASK(map)) >= max_valid_offset) { |
653 | return; |
654 | } |
655 | |
656 | /* |
657 | * |
658 | * Note: |
659 | * |
660 | * - A new entry has already been added to the map |
661 | * OR |
662 | * - An older entry has already been deleted from the map |
663 | * |
664 | * We are updating the hole list after the fact (except in one special case involving copy maps). |
665 | * |
666 | */ |
667 | |
668 | if (new_entry_creation) { |
669 | update_holes_on_entry_creation(map, new_entry: entry); |
670 | } else { |
671 | update_holes_on_entry_deletion(map, old_entry: entry); |
672 | } |
673 | } |
674 | } |
675 | |