1/*
2 * Copyright (c) 2000-2016 Apple Inc. All rights reserved.
3 *
4 * @APPLE_OSREFERENCE_LICENSE_HEADER_START@
5 *
6 * This file contains Original Code and/or Modifications of Original Code
7 * as defined in and that are subject to the Apple Public Source License
8 * Version 2.0 (the 'License'). You may not use this file except in
9 * compliance with the License. The rights granted to you under the License
10 * may not be used to create, or enable the creation or redistribution of,
11 * unlawful or unlicensed copies of an Apple operating system, or to
12 * circumvent, violate, or enable the circumvention or violation of, any
13 * terms of an Apple operating system software license agreement.
14 *
15 * Please obtain a copy of the License at
16 * http://www.opensource.apple.com/apsl/ and read it before using this file.
17 *
18 * The Original Code and all software distributed under the License are
19 * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER
20 * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES,
21 * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY,
22 * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT.
23 * Please see the License for the specific language governing rights and
24 * limitations under the License.
25 *
26 * @APPLE_OSREFERENCE_LICENSE_HEADER_END@
27 */
28/*
29 * @OSF_COPYRIGHT@
30 */
31/*
32 * Mach Operating System
33 * Copyright (c) 1991,1990 Carnegie Mellon University
34 * All Rights Reserved.
35 *
36 * Permission to use, copy, modify and distribute this software and its
37 * documentation is hereby granted, provided that both the copyright
38 * notice and this permission notice appear in all copies of the
39 * software, derivative works or modified versions, and any portions
40 * thereof, and that both notices appear in supporting documentation.
41 *
42 * CARNEGIE MELLON ALLOWS FREE USE OF THIS SOFTWARE IN ITS "AS IS"
43 * CONDITION. CARNEGIE MELLON DISCLAIMS ANY LIABILITY OF ANY KIND FOR
44 * ANY DAMAGES WHATSOEVER RESULTING FROM THE USE OF THIS SOFTWARE.
45 *
46 * Carnegie Mellon requests users of this software to return to
47 *
48 * Software Distribution Coordinator or Software.Distribution@CS.CMU.EDU
49 * School of Computer Science
50 * Carnegie Mellon University
51 * Pittsburgh PA 15213-3890
52 *
53 * any improvements or extensions that they make and grant Carnegie Mellon
54 * the rights to redistribute these changes.
55 */
56/*
57 */
58/*
59 * File: vm/vm_debug.c.
60 * Author: Rich Draves
61 * Date: March, 1990
62 *
63 * Exported kernel calls. See mach_debug/mach_debug.defs.
64 */
65#include <mach_vm_debug.h>
66#include <mach/kern_return.h>
67#include <mach/mach_host_server.h>
68#include <mach_debug/vm_info.h>
69#include <mach_debug/page_info.h>
70#include <mach_debug/hash_info.h>
71
72#if MACH_VM_DEBUG
73#include <mach/machine/vm_types.h>
74#include <mach/memory_object_types.h>
75#include <mach/vm_prot.h>
76#include <mach/vm_inherit.h>
77#include <mach/vm_param.h>
78#include <kern/thread.h>
79#include <vm/vm_map.h>
80#include <vm/vm_kern.h>
81#include <vm/vm_object.h>
82#include <kern/task.h>
83#include <kern/host.h>
84#include <ipc/ipc_port.h>
85#include <vm/vm_debug.h>
86#endif
87
88#if !MACH_VM_DEBUG
89#define __DEBUG_ONLY __unused
90#else /* !MACH_VM_DEBUG */
91#define __DEBUG_ONLY
92#endif /* !MACH_VM_DEBUG */
93
94#ifdef VM32_SUPPORT
95
96#include <mach/vm32_map_server.h>
97#include <mach/vm_map.h>
98
99/*
100 * Routine: mach_vm_region_info [kernel call]
101 * Purpose:
102 * Retrieve information about a VM region,
103 * including info about the object chain.
104 * Conditions:
105 * Nothing locked.
106 * Returns:
107 * KERN_SUCCESS Retrieve region/object info.
108 * KERN_INVALID_TASK The map is null.
109 * KERN_NO_SPACE There is no entry at/after the address.
110 * KERN_RESOURCE_SHORTAGE Can't allocate memory.
111 */
112
113kern_return_t
114vm32_region_info(
115 __DEBUG_ONLY vm_map_t map,
116 __DEBUG_ONLY vm32_offset_t address,
117 __DEBUG_ONLY vm_info_region_t *regionp,
118 __DEBUG_ONLY vm_info_object_array_t *objectsp,
119 __DEBUG_ONLY mach_msg_type_number_t *objectsCntp)
120{
121#if !MACH_VM_DEBUG
122 return KERN_FAILURE;
123#else
124 vm_map_copy_t copy;
125 vm_offset_t addr = 0; /* memory for OOL data */
126 vm_size_t size; /* size of the memory */
127 unsigned int room; /* room for this many objects */
128 unsigned int used; /* actually this many objects */
129 vm_info_region_t region;
130 kern_return_t kr;
131
132 if (map == VM_MAP_NULL)
133 return KERN_INVALID_TASK;
134
135 size = 0; /* no memory allocated yet */
136
137 for (;;) {
138 vm_map_t cmap; /* current map in traversal */
139 vm_map_t nmap; /* next map to look at */
140 vm_map_entry_t entry;
141 vm_object_t object, cobject, nobject;
142
143 /* nothing is locked */
144
145 vm_map_lock_read(map);
146 for (cmap = map;; cmap = nmap) {
147 /* cmap is read-locked */
148
149 if (!vm_map_lookup_entry(cmap,
150 (vm_map_address_t)address, &entry)) {
151
152 entry = entry->vme_next;
153 if (entry == vm_map_to_entry(cmap)) {
154 vm_map_unlock_read(cmap);
155 if (size != 0)
156 kmem_free(ipc_kernel_map,
157 addr, size);
158 return KERN_NO_SPACE;
159 }
160 }
161
162 if (entry->is_sub_map)
163 nmap = VME_SUBMAP(entry);
164 else
165 break;
166
167 /* move down to the lower map */
168
169 vm_map_lock_read(nmap);
170 vm_map_unlock_read(cmap);
171 }
172
173 /* cmap is read-locked; we have a real entry */
174
175 object = VME_OBJECT(entry);
176 region.vir_start = (natural_t) entry->vme_start;
177 region.vir_end = (natural_t) entry->vme_end;
178 region.vir_object = (natural_t)(uintptr_t) object;
179 region.vir_offset = (natural_t) VME_OFFSET(entry);
180 region.vir_needs_copy = entry->needs_copy;
181 region.vir_protection = entry->protection;
182 region.vir_max_protection = entry->max_protection;
183 region.vir_inheritance = entry->inheritance;
184 region.vir_wired_count = entry->wired_count;
185 region.vir_user_wired_count = entry->user_wired_count;
186
187 used = 0;
188 room = (unsigned int) (size / sizeof(vm_info_object_t));
189
190 if (object == VM_OBJECT_NULL) {
191 vm_map_unlock_read(cmap);
192 /* no memory needed */
193 break;
194 }
195
196 vm_object_lock(object);
197 vm_map_unlock_read(cmap);
198
199 for (cobject = object;; cobject = nobject) {
200 /* cobject is locked */
201
202 if (used < room) {
203 vm_info_object_t *vio =
204 &((vm_info_object_t *) addr)[used];
205
206 vio->vio_object =
207 (natural_t)(uintptr_t) cobject;
208 vio->vio_size =
209 (natural_t) cobject->vo_size;
210 vio->vio_ref_count =
211 cobject->ref_count;
212 vio->vio_resident_page_count =
213 cobject->resident_page_count;
214 vio->vio_copy =
215 (natural_t)(uintptr_t) cobject->copy;
216 vio->vio_shadow =
217 (natural_t)(uintptr_t) cobject->shadow;
218 vio->vio_shadow_offset =
219 (natural_t) cobject->vo_shadow_offset;
220 vio->vio_paging_offset =
221 (natural_t) cobject->paging_offset;
222 vio->vio_copy_strategy =
223 cobject->copy_strategy;
224 vio->vio_last_alloc =
225 (vm_offset_t) cobject->last_alloc;
226 vio->vio_paging_in_progress =
227 cobject->paging_in_progress +
228 cobject->activity_in_progress;
229 vio->vio_pager_created =
230 cobject->pager_created;
231 vio->vio_pager_initialized =
232 cobject->pager_initialized;
233 vio->vio_pager_ready =
234 cobject->pager_ready;
235 vio->vio_can_persist =
236 cobject->can_persist;
237 vio->vio_internal =
238 cobject->internal;
239 vio->vio_temporary =
240 FALSE;
241 vio->vio_alive =
242 cobject->alive;
243 vio->vio_purgable =
244 (cobject->purgable != VM_PURGABLE_DENY);
245 vio->vio_purgable_volatile =
246 (cobject->purgable == VM_PURGABLE_VOLATILE ||
247 cobject->purgable == VM_PURGABLE_EMPTY);
248 }
249
250 used++;
251 nobject = cobject->shadow;
252 if (nobject == VM_OBJECT_NULL) {
253 vm_object_unlock(cobject);
254 break;
255 }
256
257 vm_object_lock(nobject);
258 vm_object_unlock(cobject);
259 }
260
261 /* nothing locked */
262
263 if (used <= room)
264 break;
265
266 /* must allocate more memory */
267
268 if (size != 0)
269 kmem_free(ipc_kernel_map, addr, size);
270 size = vm_map_round_page(2 * used * sizeof(vm_info_object_t),
271 VM_MAP_PAGE_MASK(ipc_kernel_map));
272
273 kr = vm_allocate_kernel(ipc_kernel_map, &addr, size, VM_FLAGS_ANYWHERE, VM_KERN_MEMORY_IPC);
274 if (kr != KERN_SUCCESS)
275 return KERN_RESOURCE_SHORTAGE;
276
277 kr = vm_map_wire_kernel(
278 ipc_kernel_map,
279 vm_map_trunc_page(addr,
280 VM_MAP_PAGE_MASK(ipc_kernel_map)),
281 vm_map_round_page(addr + size,
282 VM_MAP_PAGE_MASK(ipc_kernel_map)),
283 VM_PROT_READ|VM_PROT_WRITE,
284 VM_KERN_MEMORY_IPC,
285 FALSE);
286 assert(kr == KERN_SUCCESS);
287 }
288
289 /* free excess memory; make remaining memory pageable */
290
291 if (used == 0) {
292 copy = VM_MAP_COPY_NULL;
293
294 if (size != 0)
295 kmem_free(ipc_kernel_map, addr, size);
296 } else {
297 vm_size_t size_used = (used * sizeof(vm_info_object_t));
298 vm_size_t vmsize_used = vm_map_round_page(size_used,
299 VM_MAP_PAGE_MASK(ipc_kernel_map));
300
301 kr = vm_map_unwire(
302 ipc_kernel_map,
303 vm_map_trunc_page(addr,
304 VM_MAP_PAGE_MASK(ipc_kernel_map)),
305 vm_map_round_page(addr + size_used,
306 VM_MAP_PAGE_MASK(ipc_kernel_map)),
307 FALSE);
308 assert(kr == KERN_SUCCESS);
309
310 kr = vm_map_copyin(ipc_kernel_map, (vm_map_address_t)addr,
311 (vm_map_size_t)size_used, TRUE, &copy);
312 assert(kr == KERN_SUCCESS);
313
314 if (size != vmsize_used)
315 kmem_free(ipc_kernel_map,
316 addr + vmsize_used, size - vmsize_used);
317 }
318
319 *regionp = region;
320 *objectsp = (vm_info_object_array_t) copy;
321 *objectsCntp = used;
322 return KERN_SUCCESS;
323#endif /* MACH_VM_DEBUG */
324}
325
326/*
327 * Temporary call for 64 bit data path interface transiotion
328 */
329
330kern_return_t
331vm32_region_info_64(
332 __DEBUG_ONLY vm_map_t map,
333 __DEBUG_ONLY vm32_offset_t address,
334 __DEBUG_ONLY vm_info_region_64_t *regionp,
335 __DEBUG_ONLY vm_info_object_array_t *objectsp,
336 __DEBUG_ONLY mach_msg_type_number_t *objectsCntp)
337{
338#if !MACH_VM_DEBUG
339 return KERN_FAILURE;
340#else
341 vm_map_copy_t copy;
342 vm_offset_t addr = 0; /* memory for OOL data */
343 vm_size_t size; /* size of the memory */
344 unsigned int room; /* room for this many objects */
345 unsigned int used; /* actually this many objects */
346 vm_info_region_64_t region;
347 kern_return_t kr;
348
349 if (map == VM_MAP_NULL)
350 return KERN_INVALID_TASK;
351
352 size = 0; /* no memory allocated yet */
353
354 for (;;) {
355 vm_map_t cmap; /* current map in traversal */
356 vm_map_t nmap; /* next map to look at */
357 vm_map_entry_t entry;
358 vm_object_t object, cobject, nobject;
359
360 /* nothing is locked */
361
362 vm_map_lock_read(map);
363 for (cmap = map;; cmap = nmap) {
364 /* cmap is read-locked */
365
366 if (!vm_map_lookup_entry(cmap, address, &entry)) {
367 entry = entry->vme_next;
368 if (entry == vm_map_to_entry(cmap)) {
369 vm_map_unlock_read(cmap);
370 if (size != 0)
371 kmem_free(ipc_kernel_map,
372 addr, size);
373 return KERN_NO_SPACE;
374 }
375 }
376
377 if (entry->is_sub_map)
378 nmap = VME_SUBMAP(entry);
379 else
380 break;
381
382 /* move down to the lower map */
383
384 vm_map_lock_read(nmap);
385 vm_map_unlock_read(cmap);
386 }
387
388 /* cmap is read-locked; we have a real entry */
389
390 object = VME_OBJECT(entry);
391 region.vir_start = (natural_t) entry->vme_start;
392 region.vir_end = (natural_t) entry->vme_end;
393 region.vir_object = (natural_t)(uintptr_t) object;
394 region.vir_offset = VME_OFFSET(entry);
395 region.vir_needs_copy = entry->needs_copy;
396 region.vir_protection = entry->protection;
397 region.vir_max_protection = entry->max_protection;
398 region.vir_inheritance = entry->inheritance;
399 region.vir_wired_count = entry->wired_count;
400 region.vir_user_wired_count = entry->user_wired_count;
401
402 used = 0;
403 room = (unsigned int) (size / sizeof(vm_info_object_t));
404
405 if (object == VM_OBJECT_NULL) {
406 vm_map_unlock_read(cmap);
407 /* no memory needed */
408 break;
409 }
410
411 vm_object_lock(object);
412 vm_map_unlock_read(cmap);
413
414 for (cobject = object;; cobject = nobject) {
415 /* cobject is locked */
416
417 if (used < room) {
418 vm_info_object_t *vio =
419 &((vm_info_object_t *) addr)[used];
420
421 vio->vio_object =
422 (natural_t)(uintptr_t) cobject;
423 vio->vio_size =
424 (natural_t) cobject->vo_size;
425 vio->vio_ref_count =
426 cobject->ref_count;
427 vio->vio_resident_page_count =
428 cobject->resident_page_count;
429 vio->vio_copy =
430 (natural_t)(uintptr_t) cobject->copy;
431 vio->vio_shadow =
432 (natural_t)(uintptr_t) cobject->shadow;
433 vio->vio_shadow_offset =
434 (natural_t) cobject->vo_shadow_offset;
435 vio->vio_paging_offset =
436 (natural_t) cobject->paging_offset;
437 vio->vio_copy_strategy =
438 cobject->copy_strategy;
439 vio->vio_last_alloc =
440 (vm_offset_t) cobject->last_alloc;
441 vio->vio_paging_in_progress =
442 cobject->paging_in_progress +
443 cobject->activity_in_progress;
444 vio->vio_pager_created =
445 cobject->pager_created;
446 vio->vio_pager_initialized =
447 cobject->pager_initialized;
448 vio->vio_pager_ready =
449 cobject->pager_ready;
450 vio->vio_can_persist =
451 cobject->can_persist;
452 vio->vio_internal =
453 cobject->internal;
454 vio->vio_temporary =
455 FALSE;
456 vio->vio_alive =
457 cobject->alive;
458 vio->vio_purgable =
459 (cobject->purgable != VM_PURGABLE_DENY);
460 vio->vio_purgable_volatile =
461 (cobject->purgable == VM_PURGABLE_VOLATILE ||
462 cobject->purgable == VM_PURGABLE_EMPTY);
463 }
464
465 used++;
466 nobject = cobject->shadow;
467 if (nobject == VM_OBJECT_NULL) {
468 vm_object_unlock(cobject);
469 break;
470 }
471
472 vm_object_lock(nobject);
473 vm_object_unlock(cobject);
474 }
475
476 /* nothing locked */
477
478 if (used <= room)
479 break;
480
481 /* must allocate more memory */
482
483 if (size != 0)
484 kmem_free(ipc_kernel_map, addr, size);
485 size = vm_map_round_page(2 * used * sizeof(vm_info_object_t),
486 VM_MAP_PAGE_MASK(ipc_kernel_map));
487
488 kr = vm_allocate_kernel(ipc_kernel_map, &addr, size, VM_FLAGS_ANYWHERE, VM_KERN_MEMORY_IPC);
489 if (kr != KERN_SUCCESS)
490 return KERN_RESOURCE_SHORTAGE;
491
492 kr = vm_map_wire_kernel(
493 ipc_kernel_map,
494 vm_map_trunc_page(addr,
495 VM_MAP_PAGE_MASK(ipc_kernel_map)),
496 vm_map_round_page(addr + size,
497 VM_MAP_PAGE_MASK(ipc_kernel_map)),
498 VM_PROT_READ|VM_PROT_WRITE,
499 VM_KERN_MEMORY_IPC,
500 FALSE);
501 assert(kr == KERN_SUCCESS);
502 }
503
504 /* free excess memory; make remaining memory pageable */
505
506 if (used == 0) {
507 copy = VM_MAP_COPY_NULL;
508
509 if (size != 0)
510 kmem_free(ipc_kernel_map, addr, size);
511 } else {
512 vm_size_t size_used = (used * sizeof(vm_info_object_t));
513 vm_size_t vmsize_used = vm_map_round_page(size_used,
514 VM_MAP_PAGE_MASK(ipc_kernel_map));
515
516 kr = vm_map_unwire(
517 ipc_kernel_map,
518 vm_map_trunc_page(addr,
519 VM_MAP_PAGE_MASK(ipc_kernel_map)),
520 vm_map_round_page(addr + size_used,
521 VM_MAP_PAGE_MASK(ipc_kernel_map)),
522 FALSE);
523 assert(kr == KERN_SUCCESS);
524
525 kr = vm_map_copyin(ipc_kernel_map, (vm_map_address_t)addr,
526 (vm_map_size_t)size_used, TRUE, &copy);
527 assert(kr == KERN_SUCCESS);
528
529 if (size != vmsize_used)
530 kmem_free(ipc_kernel_map,
531 addr + vmsize_used, size - vmsize_used);
532 }
533
534 *regionp = region;
535 *objectsp = (vm_info_object_array_t) copy;
536 *objectsCntp = used;
537 return KERN_SUCCESS;
538#endif /* MACH_VM_DEBUG */
539}
540/*
541 * Return an array of virtual pages that are mapped to a task.
542 */
543kern_return_t
544vm32_mapped_pages_info(
545 __DEBUG_ONLY vm_map_t map,
546 __DEBUG_ONLY page_address_array_t *pages,
547 __DEBUG_ONLY mach_msg_type_number_t *pages_count)
548{
549#if !MACH_VM_DEBUG
550 return KERN_FAILURE;
551#else
552 pmap_t pmap;
553 vm_size_t size, size_used;
554 unsigned int actual, space;
555 page_address_array_t list;
556 vm_offset_t addr = 0;
557
558 if (map == VM_MAP_NULL)
559 return (KERN_INVALID_ARGUMENT);
560
561 pmap = map->pmap;
562 size = pmap_resident_count(pmap) * sizeof(vm_offset_t);
563 size = vm_map_round_page(size,
564 VM_MAP_PAGE_MASK(ipc_kernel_map));
565
566 for (;;) {
567 (void) vm_allocate_kernel(ipc_kernel_map, &addr, size, VM_FLAGS_ANYWHERE, VM_KERN_MEMORY_IPC);
568 (void) vm_map_unwire(
569 ipc_kernel_map,
570 vm_map_trunc_page(addr,
571 VM_MAP_PAGE_MASK(ipc_kernel_map)),
572 vm_map_round_page(addr + size,
573 VM_MAP_PAGE_MASK(ipc_kernel_map)),
574 FALSE);
575
576 list = (page_address_array_t) addr;
577 space = (unsigned int) (size / sizeof(vm_offset_t));
578
579 actual = pmap_list_resident_pages(pmap,
580 list,
581 space);
582 if (actual <= space)
583 break;
584
585 /*
586 * Free memory if not enough
587 */
588 (void) kmem_free(ipc_kernel_map, addr, size);
589
590 /*
591 * Try again, doubling the size
592 */
593 size = vm_map_round_page(actual * sizeof(vm_offset_t),
594 VM_MAP_PAGE_MASK(ipc_kernel_map));
595 }
596 if (actual == 0) {
597 *pages = 0;
598 *pages_count = 0;
599 (void) kmem_free(ipc_kernel_map, addr, size);
600 }
601 else {
602 vm_size_t vmsize_used;
603 *pages_count = actual;
604 size_used = (actual * sizeof(vm_offset_t));
605 vmsize_used = vm_map_round_page(size_used,
606 VM_MAP_PAGE_MASK(ipc_kernel_map));
607 (void) vm_map_wire_kernel(
608 ipc_kernel_map,
609 vm_map_trunc_page(addr,
610 VM_MAP_PAGE_MASK(ipc_kernel_map)),
611 vm_map_round_page(addr + size,
612 VM_MAP_PAGE_MASK(ipc_kernel_map)),
613 VM_PROT_READ|VM_PROT_WRITE,
614 VM_KERN_MEMORY_IPC,
615 FALSE);
616 (void) vm_map_copyin(ipc_kernel_map,
617 (vm_map_address_t)addr,
618 (vm_map_size_t)size_used,
619 TRUE,
620 (vm_map_copy_t *)pages);
621 if (vmsize_used != size) {
622 (void) kmem_free(ipc_kernel_map,
623 addr + vmsize_used,
624 size - vmsize_used);
625 }
626 }
627
628 return (KERN_SUCCESS);
629#endif /* MACH_VM_DEBUG */
630}
631
632#endif /* VM32_SUPPORT */
633
634/*
635 * Routine: host_virtual_physical_table_info
636 * Purpose:
637 * Return information about the VP table.
638 * Conditions:
639 * Nothing locked. Obeys CountInOut protocol.
640 * Returns:
641 * KERN_SUCCESS Returned information.
642 * KERN_INVALID_HOST The host is null.
643 * KERN_RESOURCE_SHORTAGE Couldn't allocate memory.
644 */
645
646kern_return_t
647host_virtual_physical_table_info(
648 __DEBUG_ONLY host_t host,
649 __DEBUG_ONLY hash_info_bucket_array_t *infop,
650 __DEBUG_ONLY mach_msg_type_number_t *countp)
651{
652#if !MACH_VM_DEBUG
653 return KERN_FAILURE;
654#else
655 vm_offset_t addr = 0;
656 vm_size_t size = 0;
657 hash_info_bucket_t *info;
658 unsigned int potential, actual;
659 kern_return_t kr;
660
661 if (host == HOST_NULL)
662 return KERN_INVALID_HOST;
663
664 /* start with in-line data */
665
666 info = *infop;
667 potential = *countp;
668
669 for (;;) {
670 actual = vm_page_info(info, potential);
671 if (actual <= potential)
672 break;
673
674 /* allocate more memory */
675
676 if (info != *infop)
677 kmem_free(ipc_kernel_map, addr, size);
678
679 size = vm_map_round_page(actual * sizeof *info,
680 VM_MAP_PAGE_MASK(ipc_kernel_map));
681 kr = vm_allocate_kernel(ipc_kernel_map, &addr, size,
682 VM_FLAGS_ANYWHERE, VM_KERN_MEMORY_IPC);
683 if (kr != KERN_SUCCESS)
684 return KERN_RESOURCE_SHORTAGE;
685
686 info = (hash_info_bucket_t *) addr;
687 potential = (unsigned int) (size/sizeof (*info));
688 }
689
690 if (info == *infop) {
691 /* data fit in-line; nothing to deallocate */
692
693 *countp = actual;
694 } else if (actual == 0) {
695 kmem_free(ipc_kernel_map, addr, size);
696
697 *countp = 0;
698 } else {
699 vm_map_copy_t copy;
700 vm_size_t used, vmused;
701
702 used = (actual * sizeof(*info));
703 vmused = vm_map_round_page(used, VM_MAP_PAGE_MASK(ipc_kernel_map));
704
705 if (vmused != size)
706 kmem_free(ipc_kernel_map, addr + vmused, size - vmused);
707
708 kr = vm_map_copyin(ipc_kernel_map, (vm_map_address_t)addr,
709 (vm_map_size_t)used, TRUE, &copy);
710 assert(kr == KERN_SUCCESS);
711
712 *infop = (hash_info_bucket_t *) copy;
713 *countp = actual;
714 }
715
716 return KERN_SUCCESS;
717#endif /* MACH_VM_DEBUG */
718}
719