1/*
2 * Copyright (c) 2000-2016 Apple Inc. All rights reserved.
3 *
4 * @APPLE_OSREFERENCE_LICENSE_HEADER_START@
5 *
6 * This file contains Original Code and/or Modifications of Original Code
7 * as defined in and that are subject to the Apple Public Source License
8 * Version 2.0 (the 'License'). You may not use this file except in
9 * compliance with the License. The rights granted to you under the License
10 * may not be used to create, or enable the creation or redistribution of,
11 * unlawful or unlicensed copies of an Apple operating system, or to
12 * circumvent, violate, or enable the circumvention or violation of, any
13 * terms of an Apple operating system software license agreement.
14 *
15 * Please obtain a copy of the License at
16 * http://www.opensource.apple.com/apsl/ and read it before using this file.
17 *
18 * The Original Code and all software distributed under the License are
19 * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER
20 * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES,
21 * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY,
22 * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT.
23 * Please see the License for the specific language governing rights and
24 * limitations under the License.
25 *
26 * @APPLE_OSREFERENCE_LICENSE_HEADER_END@
27 */
28/*
29 * @OSF_COPYRIGHT@
30 */
31/*
32 * Mach Operating System
33 * Copyright (c) 1991,1990 Carnegie Mellon University
34 * All Rights Reserved.
35 *
36 * Permission to use, copy, modify and distribute this software and its
37 * documentation is hereby granted, provided that both the copyright
38 * notice and this permission notice appear in all copies of the
39 * software, derivative works or modified versions, and any portions
40 * thereof, and that both notices appear in supporting documentation.
41 *
42 * CARNEGIE MELLON ALLOWS FREE USE OF THIS SOFTWARE IN ITS "AS IS"
43 * CONDITION. CARNEGIE MELLON DISCLAIMS ANY LIABILITY OF ANY KIND FOR
44 * ANY DAMAGES WHATSOEVER RESULTING FROM THE USE OF THIS SOFTWARE.
45 *
46 * Carnegie Mellon requests users of this software to return to
47 *
48 * Software Distribution Coordinator or Software.Distribution@CS.CMU.EDU
49 * School of Computer Science
50 * Carnegie Mellon University
51 * Pittsburgh PA 15213-3890
52 *
53 * any improvements or extensions that they make and grant Carnegie Mellon
54 * the rights to redistribute these changes.
55 */
56/*
57 */
58/*
59 * File: vm/vm_debug.c.
60 * Author: Rich Draves
61 * Date: March, 1990
62 *
63 * Exported kernel calls. See mach_debug/mach_debug.defs.
64 */
65#include <mach_vm_debug.h>
66#include <mach/kern_return.h>
67#include <mach/mach_host_server.h>
68#include <mach_debug/vm_info.h>
69#include <mach_debug/page_info.h>
70#include <mach_debug/hash_info.h>
71
72#if MACH_VM_DEBUG
73#include <mach/machine/vm_types.h>
74#include <mach/memory_object_types.h>
75#include <mach/vm_prot.h>
76#include <mach/vm_inherit.h>
77#include <mach/vm_param.h>
78#include <kern/thread.h>
79#include <vm/vm_map.h>
80#include <vm/vm_kern.h>
81#include <vm/vm_object.h>
82#include <kern/task.h>
83#include <kern/host.h>
84#include <ipc/ipc_port.h>
85#include <vm/vm_debug.h>
86#endif
87
88#if !MACH_VM_DEBUG
89#define __DEBUG_ONLY __unused
90#else /* !MACH_VM_DEBUG */
91#define __DEBUG_ONLY
92#endif /* !MACH_VM_DEBUG */
93
94#ifdef VM32_SUPPORT
95
96#include <mach/vm32_map_server.h>
97#include <mach/vm_map.h>
98
99/*
100 * Routine: mach_vm_region_info [kernel call]
101 * Purpose:
102 * Retrieve information about a VM region,
103 * including info about the object chain.
104 * Conditions:
105 * Nothing locked.
106 * Returns:
107 * KERN_SUCCESS Retrieve region/object info.
108 * KERN_INVALID_TASK The map is null.
109 * KERN_NO_SPACE There is no entry at/after the address.
110 * KERN_RESOURCE_SHORTAGE Can't allocate memory.
111 */
112
113kern_return_t
114vm32_region_info(
115 __DEBUG_ONLY vm_map_t map,
116 __DEBUG_ONLY vm32_offset_t address,
117 __DEBUG_ONLY vm_info_region_t *regionp,
118 __DEBUG_ONLY vm_info_object_array_t *objectsp,
119 __DEBUG_ONLY mach_msg_type_number_t *objectsCntp)
120{
121#if !MACH_VM_DEBUG
122 return KERN_FAILURE;
123#else
124 vm_map_copy_t copy;
125 vm_offset_t addr = 0; /* memory for OOL data */
126 vm_size_t size; /* size of the memory */
127 unsigned int room; /* room for this many objects */
128 unsigned int used; /* actually this many objects */
129 vm_info_region_t region;
130 kern_return_t kr;
131
132 if (map == VM_MAP_NULL) {
133 return KERN_INVALID_TASK;
134 }
135
136 size = 0; /* no memory allocated yet */
137
138 for (;;) {
139 vm_map_t cmap; /* current map in traversal */
140 vm_map_t nmap; /* next map to look at */
141 vm_map_entry_t entry;
142 vm_object_t object, cobject, nobject;
143
144 /* nothing is locked */
145
146 vm_map_lock_read(map);
147 for (cmap = map;; cmap = nmap) {
148 /* cmap is read-locked */
149
150 if (!vm_map_lookup_entry_allow_pgz(cmap,
151 (vm_map_address_t)address, &entry)) {
152 entry = entry->vme_next;
153 if (entry == vm_map_to_entry(cmap)) {
154 vm_map_unlock_read(cmap);
155 if (size != 0) {
156 kmem_free(ipc_kernel_map,
157 addr, size);
158 }
159 return KERN_NO_SPACE;
160 }
161 }
162
163 if (entry->is_sub_map) {
164 nmap = VME_SUBMAP(entry);
165 } else {
166 break;
167 }
168
169 /* move down to the lower map */
170
171 vm_map_lock_read(nmap);
172 vm_map_unlock_read(cmap);
173 }
174
175 /* cmap is read-locked; we have a real entry */
176
177 object = VME_OBJECT(entry);
178 region.vir_start = (natural_t) entry->vme_start;
179 region.vir_end = (natural_t) entry->vme_end;
180 region.vir_object = (natural_t)(uintptr_t) object;
181 region.vir_offset = (natural_t) VME_OFFSET(entry);
182 region.vir_needs_copy = entry->needs_copy;
183 region.vir_protection = entry->protection;
184 region.vir_max_protection = entry->max_protection;
185 region.vir_inheritance = entry->inheritance;
186 region.vir_wired_count = entry->wired_count;
187 region.vir_user_wired_count = entry->user_wired_count;
188
189 used = 0;
190 room = (unsigned int) (size / sizeof(vm_info_object_t));
191
192 if (object == VM_OBJECT_NULL) {
193 vm_map_unlock_read(cmap);
194 /* no memory needed */
195 break;
196 }
197
198 vm_object_lock(object);
199 vm_map_unlock_read(cmap);
200
201 for (cobject = object;; cobject = nobject) {
202 /* cobject is locked */
203
204 if (used < room) {
205 vm_info_object_t *vio =
206 &((vm_info_object_t *) addr)[used];
207
208 vio->vio_object =
209 (natural_t)(uintptr_t) cobject;
210 vio->vio_size =
211 (natural_t) cobject->vo_size;
212 vio->vio_ref_count =
213 cobject->ref_count;
214 vio->vio_resident_page_count =
215 cobject->resident_page_count;
216 vio->vio_copy =
217 (natural_t)(uintptr_t) cobject->vo_copy;
218 vio->vio_shadow =
219 (natural_t)(uintptr_t) cobject->shadow;
220 vio->vio_shadow_offset =
221 (natural_t) cobject->vo_shadow_offset;
222 vio->vio_paging_offset =
223 (natural_t) cobject->paging_offset;
224 vio->vio_copy_strategy =
225 cobject->copy_strategy;
226 vio->vio_last_alloc =
227 (vm_offset_t) cobject->last_alloc;
228 vio->vio_paging_in_progress =
229 cobject->paging_in_progress +
230 cobject->activity_in_progress;
231 vio->vio_pager_created =
232 cobject->pager_created;
233 vio->vio_pager_initialized =
234 cobject->pager_initialized;
235 vio->vio_pager_ready =
236 cobject->pager_ready;
237 vio->vio_can_persist =
238 cobject->can_persist;
239 vio->vio_internal =
240 cobject->internal;
241 vio->vio_temporary =
242 FALSE;
243 vio->vio_alive =
244 cobject->alive;
245 vio->vio_purgable =
246 (cobject->purgable != VM_PURGABLE_DENY);
247 vio->vio_purgable_volatile =
248 (cobject->purgable == VM_PURGABLE_VOLATILE ||
249 cobject->purgable == VM_PURGABLE_EMPTY);
250 }
251
252 used++;
253 nobject = cobject->shadow;
254 if (nobject == VM_OBJECT_NULL) {
255 vm_object_unlock(cobject);
256 break;
257 }
258
259 vm_object_lock(nobject);
260 vm_object_unlock(cobject);
261 }
262
263 /* nothing locked */
264
265 if (used <= room) {
266 break;
267 }
268
269 /* must allocate more memory */
270
271 if (size != 0) {
272 kmem_free(ipc_kernel_map, addr, size);
273 }
274 size = vm_map_round_page(2 * used * sizeof(vm_info_object_t),
275 VM_MAP_PAGE_MASK(ipc_kernel_map));
276
277 kr = kmem_alloc(ipc_kernel_map, &addr, size,
278 KMA_DATA, VM_KERN_MEMORY_IPC);
279 if (kr != KERN_SUCCESS) {
280 return KERN_RESOURCE_SHORTAGE;
281 }
282 }
283
284 /* free excess memory; make remaining memory pageable */
285
286 if (used == 0) {
287 copy = VM_MAP_COPY_NULL;
288
289 if (size != 0) {
290 kmem_free(ipc_kernel_map, addr, size);
291 }
292 } else {
293 vm_size_t size_used = (used * sizeof(vm_info_object_t));
294 vm_size_t vmsize_used = vm_map_round_page(size_used,
295 VM_MAP_PAGE_MASK(ipc_kernel_map));
296
297 if (size_used < vmsize_used) {
298 bzero((char *)addr + size_used, vmsize_used - size_used);
299 }
300
301 kr = vm_map_unwire(ipc_kernel_map, addr, addr + size_used, FALSE);
302 assert(kr == KERN_SUCCESS);
303
304 kr = vm_map_copyin(ipc_kernel_map, (vm_map_address_t)addr,
305 (vm_map_size_t)size_used, TRUE, &copy);
306 assert(kr == KERN_SUCCESS);
307
308 if (size != vmsize_used) {
309 kmem_free(ipc_kernel_map,
310 addr + vmsize_used, size - vmsize_used);
311 }
312 }
313
314 *regionp = region;
315 *objectsp = (vm_info_object_array_t) copy;
316 *objectsCntp = used;
317 return KERN_SUCCESS;
318#endif /* MACH_VM_DEBUG */
319}
320
321/*
322 * Temporary call for 64 bit data path interface transiotion
323 */
324
325kern_return_t
326vm32_region_info_64(
327 __DEBUG_ONLY vm_map_t map,
328 __DEBUG_ONLY vm32_offset_t address,
329 __DEBUG_ONLY vm_info_region_64_t *regionp,
330 __DEBUG_ONLY vm_info_object_array_t *objectsp,
331 __DEBUG_ONLY mach_msg_type_number_t *objectsCntp)
332{
333#if !MACH_VM_DEBUG
334 return KERN_FAILURE;
335#else
336 vm_map_copy_t copy;
337 vm_offset_t addr = 0; /* memory for OOL data */
338 vm_size_t size; /* size of the memory */
339 unsigned int room; /* room for this many objects */
340 unsigned int used; /* actually this many objects */
341 vm_info_region_64_t region;
342 kern_return_t kr;
343
344 if (map == VM_MAP_NULL) {
345 return KERN_INVALID_TASK;
346 }
347
348 size = 0; /* no memory allocated yet */
349
350 for (;;) {
351 vm_map_t cmap; /* current map in traversal */
352 vm_map_t nmap; /* next map to look at */
353 vm_map_entry_t entry;
354 vm_object_t object, cobject, nobject;
355
356 /* nothing is locked */
357
358 vm_map_lock_read(map);
359 for (cmap = map;; cmap = nmap) {
360 /* cmap is read-locked */
361
362 if (!vm_map_lookup_entry_allow_pgz(cmap, address, &entry)) {
363 entry = entry->vme_next;
364 if (entry == vm_map_to_entry(cmap)) {
365 vm_map_unlock_read(cmap);
366 if (size != 0) {
367 kmem_free(ipc_kernel_map,
368 addr, size);
369 }
370 return KERN_NO_SPACE;
371 }
372 }
373
374 if (entry->is_sub_map) {
375 nmap = VME_SUBMAP(entry);
376 } else {
377 break;
378 }
379
380 /* move down to the lower map */
381
382 vm_map_lock_read(nmap);
383 vm_map_unlock_read(cmap);
384 }
385
386 /* cmap is read-locked; we have a real entry */
387
388 object = VME_OBJECT(entry);
389 region.vir_start = (natural_t) entry->vme_start;
390 region.vir_end = (natural_t) entry->vme_end;
391 region.vir_object = (natural_t)(uintptr_t) object;
392 region.vir_offset = VME_OFFSET(entry);
393 region.vir_needs_copy = entry->needs_copy;
394 region.vir_protection = entry->protection;
395 region.vir_max_protection = entry->max_protection;
396 region.vir_inheritance = entry->inheritance;
397 region.vir_wired_count = entry->wired_count;
398 region.vir_user_wired_count = entry->user_wired_count;
399
400 used = 0;
401 room = (unsigned int) (size / sizeof(vm_info_object_t));
402
403 if (object == VM_OBJECT_NULL) {
404 vm_map_unlock_read(cmap);
405 /* no memory needed */
406 break;
407 }
408
409 vm_object_lock(object);
410 vm_map_unlock_read(cmap);
411
412 for (cobject = object;; cobject = nobject) {
413 /* cobject is locked */
414
415 if (used < room) {
416 vm_info_object_t *vio =
417 &((vm_info_object_t *) addr)[used];
418
419 vio->vio_object =
420 (natural_t)(uintptr_t) cobject;
421 vio->vio_size =
422 (natural_t) cobject->vo_size;
423 vio->vio_ref_count =
424 cobject->ref_count;
425 vio->vio_resident_page_count =
426 cobject->resident_page_count;
427 vio->vio_copy =
428 (natural_t)(uintptr_t) cobject->vo_copy;
429 vio->vio_shadow =
430 (natural_t)(uintptr_t) cobject->shadow;
431 vio->vio_shadow_offset =
432 (natural_t) cobject->vo_shadow_offset;
433 vio->vio_paging_offset =
434 (natural_t) cobject->paging_offset;
435 vio->vio_copy_strategy =
436 cobject->copy_strategy;
437 vio->vio_last_alloc =
438 (vm_offset_t) cobject->last_alloc;
439 vio->vio_paging_in_progress =
440 cobject->paging_in_progress +
441 cobject->activity_in_progress;
442 vio->vio_pager_created =
443 cobject->pager_created;
444 vio->vio_pager_initialized =
445 cobject->pager_initialized;
446 vio->vio_pager_ready =
447 cobject->pager_ready;
448 vio->vio_can_persist =
449 cobject->can_persist;
450 vio->vio_internal =
451 cobject->internal;
452 vio->vio_temporary =
453 FALSE;
454 vio->vio_alive =
455 cobject->alive;
456 vio->vio_purgable =
457 (cobject->purgable != VM_PURGABLE_DENY);
458 vio->vio_purgable_volatile =
459 (cobject->purgable == VM_PURGABLE_VOLATILE ||
460 cobject->purgable == VM_PURGABLE_EMPTY);
461 }
462
463 used++;
464 nobject = cobject->shadow;
465 if (nobject == VM_OBJECT_NULL) {
466 vm_object_unlock(cobject);
467 break;
468 }
469
470 vm_object_lock(nobject);
471 vm_object_unlock(cobject);
472 }
473
474 /* nothing locked */
475
476 if (used <= room) {
477 break;
478 }
479
480 /* must allocate more memory */
481
482 if (size != 0) {
483 kmem_free(ipc_kernel_map, addr, size);
484 }
485 size = vm_map_round_page(2 * used * sizeof(vm_info_object_t),
486 VM_MAP_PAGE_MASK(ipc_kernel_map));
487
488 kr = kmem_alloc(ipc_kernel_map, &addr, size,
489 KMA_DATA, VM_KERN_MEMORY_IPC);
490 if (kr != KERN_SUCCESS) {
491 return KERN_RESOURCE_SHORTAGE;
492 }
493 }
494
495 /* free excess memory; make remaining memory pageable */
496
497 if (used == 0) {
498 copy = VM_MAP_COPY_NULL;
499
500 if (size != 0) {
501 kmem_free(ipc_kernel_map, addr, size);
502 }
503 } else {
504 vm_size_t size_used = (used * sizeof(vm_info_object_t));
505 vm_size_t vmsize_used = vm_map_round_page(size_used,
506 VM_MAP_PAGE_MASK(ipc_kernel_map));
507
508 if (size_used < vmsize_used) {
509 bzero((char *)addr + size_used, vmsize_used - size_used);
510 }
511
512 kr = vm_map_unwire(ipc_kernel_map, addr, addr + size_used, FALSE);
513 assert(kr == KERN_SUCCESS);
514
515 kr = vm_map_copyin(ipc_kernel_map, (vm_map_address_t)addr,
516 (vm_map_size_t)size_used, TRUE, &copy);
517 assert(kr == KERN_SUCCESS);
518
519 if (size != vmsize_used) {
520 kmem_free(ipc_kernel_map,
521 addr + vmsize_used, size - vmsize_used);
522 }
523 }
524
525 *regionp = region;
526 *objectsp = (vm_info_object_array_t) copy;
527 *objectsCntp = used;
528 return KERN_SUCCESS;
529#endif /* MACH_VM_DEBUG */
530}
531/*
532 * Return an array of virtual pages that are mapped to a task.
533 */
534kern_return_t
535vm32_mapped_pages_info(
536 __DEBUG_ONLY vm_map_t map,
537 __DEBUG_ONLY page_address_array_t *pages,
538 __DEBUG_ONLY mach_msg_type_number_t *pages_count)
539{
540#if !MACH_VM_DEBUG
541 return KERN_FAILURE;
542#elif 1 /* pmap_resident_count is gone with rdar://68290810 */
543 (void)map; (void)pages; (void)pages_count;
544 return KERN_FAILURE;
545#else
546 pmap_t pmap;
547 vm_size_t size, size_used;
548 unsigned int actual, space;
549 page_address_array_t list;
550 mach_vm_offset_t addr = 0;
551
552 if (map == VM_MAP_NULL) {
553 return KERN_INVALID_ARGUMENT;
554 }
555
556 pmap = map->pmap;
557 size = pmap_resident_count(pmap) * sizeof(vm_offset_t);
558 size = vm_map_round_page(size,
559 VM_MAP_PAGE_MASK(ipc_kernel_map));
560
561 for (;;) {
562 (void) mach_vm_allocate_kernel(ipc_kernel_map, &addr, size, VM_FLAGS_ANYWHERE, VM_KERN_MEMORY_IPC);
563 (void) vm_map_unwire(
564 ipc_kernel_map,
565 vm_map_trunc_page(addr,
566 VM_MAP_PAGE_MASK(ipc_kernel_map)),
567 vm_map_round_page(addr + size,
568 VM_MAP_PAGE_MASK(ipc_kernel_map)),
569 FALSE);
570
571 list = (page_address_array_t) addr;
572 space = (unsigned int) (size / sizeof(vm_offset_t));
573
574 actual = pmap_list_resident_pages(pmap,
575 list,
576 space);
577 if (actual <= space) {
578 break;
579 }
580
581 /*
582 * Free memory if not enough
583 */
584 (void) kmem_free(ipc_kernel_map, addr, size);
585
586 /*
587 * Try again, doubling the size
588 */
589 size = vm_map_round_page(actual * sizeof(vm_offset_t),
590 VM_MAP_PAGE_MASK(ipc_kernel_map));
591 }
592 if (actual == 0) {
593 *pages = 0;
594 *pages_count = 0;
595 (void) kmem_free(ipc_kernel_map, addr, size);
596 } else {
597 vm_size_t vmsize_used;
598 *pages_count = actual;
599 size_used = (actual * sizeof(vm_offset_t));
600 vmsize_used = vm_map_round_page(size_used,
601 VM_MAP_PAGE_MASK(ipc_kernel_map));
602 (void) vm_map_wire_kernel(
603 ipc_kernel_map,
604 vm_map_trunc_page(addr,
605 VM_MAP_PAGE_MASK(ipc_kernel_map)),
606 vm_map_round_page(addr + size,
607 VM_MAP_PAGE_MASK(ipc_kernel_map)),
608 VM_PROT_READ | VM_PROT_WRITE,
609 VM_KERN_MEMORY_IPC,
610 FALSE);
611 (void) vm_map_copyin(ipc_kernel_map,
612 (vm_map_address_t)addr,
613 (vm_map_size_t)size_used,
614 TRUE,
615 (vm_map_copy_t *)pages);
616 if (vmsize_used != size) {
617 (void) kmem_free(ipc_kernel_map,
618 addr + vmsize_used,
619 size - vmsize_used);
620 }
621 }
622
623 return KERN_SUCCESS;
624#endif /* MACH_VM_DEBUG */
625}
626
627#endif /* VM32_SUPPORT */
628
629/*
630 * Routine: host_virtual_physical_table_info
631 * Purpose:
632 * Return information about the VP table.
633 * Conditions:
634 * Nothing locked. Obeys CountInOut protocol.
635 * Returns:
636 * KERN_SUCCESS Returned information.
637 * KERN_INVALID_HOST The host is null.
638 * KERN_RESOURCE_SHORTAGE Couldn't allocate memory.
639 */
640
641kern_return_t
642host_virtual_physical_table_info(
643 __DEBUG_ONLY host_t host,
644 __DEBUG_ONLY hash_info_bucket_array_t *infop,
645 __DEBUG_ONLY mach_msg_type_number_t *countp)
646{
647#if !MACH_VM_DEBUG
648 return KERN_FAILURE;
649#else
650 vm_offset_t addr = 0;
651 vm_size_t size = 0;
652 hash_info_bucket_t *info;
653 unsigned int potential, actual;
654 kern_return_t kr;
655
656 if (host == HOST_NULL) {
657 return KERN_INVALID_HOST;
658 }
659
660 /* start with in-line data */
661
662 info = *infop;
663 potential = *countp;
664
665 for (;;) {
666 actual = vm_page_info(info, potential);
667 if (actual <= potential) {
668 break;
669 }
670
671 /* allocate more memory */
672
673 if (info != *infop) {
674 kmem_free(ipc_kernel_map, addr, size);
675 }
676
677 size = vm_map_round_page(actual * sizeof *info,
678 VM_MAP_PAGE_MASK(ipc_kernel_map));
679 kr = kmem_alloc(ipc_kernel_map, &addr, size,
680 KMA_PAGEABLE | KMA_DATA, VM_KERN_MEMORY_IPC);
681 if (kr != KERN_SUCCESS) {
682 return KERN_RESOURCE_SHORTAGE;
683 }
684
685 info = (hash_info_bucket_t *) addr;
686 potential = (unsigned int) (size / sizeof(*info));
687 }
688
689 if (info == *infop) {
690 /* data fit in-line; nothing to deallocate */
691
692 *countp = actual;
693 } else if (actual == 0) {
694 kmem_free(ipc_kernel_map, addr, size);
695
696 *countp = 0;
697 } else {
698 vm_map_copy_t copy;
699 vm_size_t used, vmused;
700
701 used = (actual * sizeof(*info));
702 vmused = vm_map_round_page(used, VM_MAP_PAGE_MASK(ipc_kernel_map));
703
704 if (vmused != size) {
705 kmem_free(ipc_kernel_map, addr + vmused, size - vmused);
706 }
707
708 kr = vm_map_copyin(ipc_kernel_map, (vm_map_address_t)addr,
709 (vm_map_size_t)used, TRUE, &copy);
710 assert(kr == KERN_SUCCESS);
711
712 *infop = (hash_info_bucket_t *) copy;
713 *countp = actual;
714 }
715
716 return KERN_SUCCESS;
717#endif /* MACH_VM_DEBUG */
718}
719