| 1 | /* |
| 2 | * Copyright (c) 2008-2016 Apple Inc. All rights reserved. |
| 3 | * |
| 4 | * @APPLE_OSREFERENCE_LICENSE_HEADER_START@ |
| 5 | * |
| 6 | * This file contains Original Code and/or Modifications of Original Code |
| 7 | * as defined in and that are subject to the Apple Public Source License |
| 8 | * Version 2.0 (the 'License'). You may not use this file except in |
| 9 | * compliance with the License. The rights granted to you under the License |
| 10 | * may not be used to create, or enable the creation or redistribution of, |
| 11 | * unlawful or unlicensed copies of an Apple operating system, or to |
| 12 | * circumvent, violate, or enable the circumvention or violation of, any |
| 13 | * terms of an Apple operating system software license agreement. |
| 14 | * |
| 15 | * Please obtain a copy of the License at |
| 16 | * http://www.opensource.apple.com/apsl/ and read it before using this file. |
| 17 | * |
| 18 | * The Original Code and all software distributed under the License are |
| 19 | * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER |
| 20 | * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES, |
| 21 | * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY, |
| 22 | * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT. |
| 23 | * Please see the License for the specific language governing rights and |
| 24 | * limitations under the License. |
| 25 | * |
| 26 | * @APPLE_OSREFERENCE_LICENSE_HEADER_END@ |
| 27 | */ |
| 28 | /* |
| 29 | * @OSF_COPYRIGHT@ |
| 30 | */ |
| 31 | /* |
| 32 | * Mach Operating System |
| 33 | * Copyright (c) 1991,1990,1989,1988 Carnegie Mellon University |
| 34 | * All Rights Reserved. |
| 35 | * |
| 36 | * Permission to use, copy, modify and distribute this software and its |
| 37 | * documentation is hereby granted, provided that both the copyright |
| 38 | * notice and this permission notice appear in all copies of the |
| 39 | * software, derivative works or modified versions, and any portions |
| 40 | * thereof, and that both notices appear in supporting documentation. |
| 41 | * |
| 42 | * CARNEGIE MELLON ALLOWS FREE USE OF THIS SOFTWARE IN ITS "AS IS" |
| 43 | * CONDITION. CARNEGIE MELLON DISCLAIMS ANY LIABILITY OF ANY KIND FOR |
| 44 | * ANY DAMAGES WHATSOEVER RESULTING FROM THE USE OF THIS SOFTWARE. |
| 45 | * |
| 46 | * Carnegie Mellon requests users of this software to return to |
| 47 | * |
| 48 | * Software Distribution Coordinator or Software.Distribution@CS.CMU.EDU |
| 49 | * School of Computer Science |
| 50 | * Carnegie Mellon University |
| 51 | * Pittsburgh PA 15213-3890 |
| 52 | * |
| 53 | * any improvements or extensions that they make and grant Carnegie Mellon |
| 54 | * the rights to redistribute these changes. |
| 55 | */ |
| 56 | /* |
| 57 | */ |
| 58 | /* |
| 59 | * File: vm/vm32_user.c |
| 60 | * Author: Avadis Tevanian, Jr., Michael Wayne Young |
| 61 | * |
| 62 | * User-exported virtual memory functions. |
| 63 | */ |
| 64 | |
| 65 | #include <debug.h> |
| 66 | |
| 67 | #include <mach/boolean.h> |
| 68 | #include <mach/kern_return.h> |
| 69 | #include <mach/mach_types.h> /* to get vm_address_t */ |
| 70 | #include <mach/memory_object.h> |
| 71 | #include <mach/std_types.h> /* to get pointer_t */ |
| 72 | #include <mach/vm_attributes.h> |
| 73 | #include <mach/vm_param.h> |
| 74 | #include <mach/vm_statistics.h> |
| 75 | #include <mach/mach_syscalls.h> |
| 76 | |
| 77 | #include <mach/host_priv_server.h> |
| 78 | #include <mach/mach_vm_server.h> |
| 79 | #include <mach/vm32_map_server.h> |
| 80 | |
| 81 | #include <kern/host.h> |
| 82 | #include <kern/task.h> |
| 83 | #include <kern/misc_protos.h> |
| 84 | #include <vm/vm_fault.h> |
| 85 | #include <vm/vm_map.h> |
| 86 | #include <vm/vm_object.h> |
| 87 | #include <vm/vm_page.h> |
| 88 | #include <vm/memory_object.h> |
| 89 | #include <vm/vm_pageout.h> |
| 90 | #include <vm/vm_protos.h> |
| 91 | |
| 92 | #ifdef VM32_SUPPORT |
| 93 | |
| 94 | /* |
| 95 | * See vm_user.c for the real implementation of all of these functions. |
| 96 | * We call through to the mach_ "wide" versions of the routines, and trust |
| 97 | * that the VM system verifies the arguments and only returns address that |
| 98 | * are appropriate for the task's address space size. |
| 99 | * |
| 100 | * New VM call implementations should not be added here, because they would |
| 101 | * be available only to 32-bit userspace clients. Add them to vm_user.c |
| 102 | * and the corresponding prototype to mach_vm.defs (subsystem 4800). |
| 103 | */ |
| 104 | |
| 105 | kern_return_t |
| 106 | vm32_allocate( |
| 107 | vm_map_t map, |
| 108 | vm32_offset_t *addr, |
| 109 | vm32_size_t size, |
| 110 | int flags) |
| 111 | { |
| 112 | mach_vm_offset_t maddr; |
| 113 | kern_return_t result; |
| 114 | |
| 115 | maddr = *addr; |
| 116 | result = mach_vm_allocate_external(target: map, address: &maddr, size, flags); |
| 117 | *addr = CAST_DOWN_EXPLICIT(vm32_offset_t, maddr); |
| 118 | |
| 119 | return result; |
| 120 | } |
| 121 | |
| 122 | kern_return_t |
| 123 | vm32_deallocate( |
| 124 | vm_map_t map, |
| 125 | vm32_offset_t start, |
| 126 | vm32_size_t size) |
| 127 | { |
| 128 | if ((map == VM_MAP_NULL) || (start + size < start)) { |
| 129 | return KERN_INVALID_ARGUMENT; |
| 130 | } |
| 131 | |
| 132 | return mach_vm_deallocate(target: map, address: start, size); |
| 133 | } |
| 134 | |
| 135 | kern_return_t |
| 136 | vm32_inherit( |
| 137 | vm_map_t map, |
| 138 | vm32_offset_t start, |
| 139 | vm32_size_t size, |
| 140 | vm_inherit_t new_inheritance) |
| 141 | { |
| 142 | if ((map == VM_MAP_NULL) || (start + size < start)) { |
| 143 | return KERN_INVALID_ARGUMENT; |
| 144 | } |
| 145 | |
| 146 | return mach_vm_inherit(target_task: map, address: start, size, new_inheritance); |
| 147 | } |
| 148 | |
| 149 | kern_return_t |
| 150 | vm32_protect( |
| 151 | vm_map_t map, |
| 152 | vm32_offset_t start, |
| 153 | vm32_size_t size, |
| 154 | boolean_t set_maximum, |
| 155 | vm_prot_t new_protection) |
| 156 | { |
| 157 | if ((map == VM_MAP_NULL) || (start + size < start)) { |
| 158 | return KERN_INVALID_ARGUMENT; |
| 159 | } |
| 160 | |
| 161 | return mach_vm_protect(target_task: map, address: start, size, set_maximum, new_protection); |
| 162 | } |
| 163 | |
| 164 | kern_return_t |
| 165 | vm32_machine_attribute( |
| 166 | vm_map_t map, |
| 167 | vm32_address_t addr, |
| 168 | vm32_size_t size, |
| 169 | vm_machine_attribute_t attribute, |
| 170 | vm_machine_attribute_val_t* value) /* IN/OUT */ |
| 171 | { |
| 172 | if ((map == VM_MAP_NULL) || (addr + size < addr)) { |
| 173 | return KERN_INVALID_ARGUMENT; |
| 174 | } |
| 175 | |
| 176 | return mach_vm_machine_attribute(target_task: map, address: addr, size, attribute, value); |
| 177 | } |
| 178 | |
| 179 | kern_return_t |
| 180 | vm32_read( |
| 181 | vm_map_t map, |
| 182 | vm32_address_t addr, |
| 183 | vm32_size_t size, |
| 184 | pointer_t *data, |
| 185 | mach_msg_type_number_t *data_size) |
| 186 | { |
| 187 | return mach_vm_read(target_task: map, address: addr, size, data, dataCnt: data_size); |
| 188 | } |
| 189 | |
| 190 | kern_return_t |
| 191 | vm32_read_list( |
| 192 | vm_map_t map, |
| 193 | vm32_read_entry_t data_list, |
| 194 | natural_t count) |
| 195 | { |
| 196 | mach_vm_read_entry_t mdata_list; |
| 197 | mach_msg_type_number_t i; |
| 198 | kern_return_t result; |
| 199 | |
| 200 | for (i = 0; i < VM_MAP_ENTRY_MAX; i++) { |
| 201 | mdata_list[i].address = data_list[i].address; |
| 202 | mdata_list[i].size = data_list[i].size; |
| 203 | } |
| 204 | |
| 205 | result = mach_vm_read_list(target_task: map, data_list: mdata_list, count); |
| 206 | |
| 207 | for (i = 0; i < VM_MAP_ENTRY_MAX; i++) { |
| 208 | data_list[i].address = CAST_DOWN_EXPLICIT(vm32_address_t, mdata_list[i].address); |
| 209 | data_list[i].size = CAST_DOWN_EXPLICIT(vm32_size_t, mdata_list[i].size); |
| 210 | } |
| 211 | |
| 212 | return result; |
| 213 | } |
| 214 | |
| 215 | kern_return_t |
| 216 | vm32_read_overwrite( |
| 217 | vm_map_t map, |
| 218 | vm32_address_t address, |
| 219 | vm32_size_t size, |
| 220 | vm32_address_t data, |
| 221 | vm32_size_t *data_size) |
| 222 | { |
| 223 | kern_return_t result; |
| 224 | mach_vm_size_t mdata_size; |
| 225 | |
| 226 | mdata_size = *data_size; |
| 227 | result = mach_vm_read_overwrite(target_task: map, address, size, data, outsize: &mdata_size); |
| 228 | *data_size = CAST_DOWN_EXPLICIT(vm32_size_t, mdata_size); |
| 229 | |
| 230 | return result; |
| 231 | } |
| 232 | |
| 233 | kern_return_t |
| 234 | vm32_write( |
| 235 | vm_map_t map, |
| 236 | vm32_address_t address, |
| 237 | pointer_t data, |
| 238 | mach_msg_type_number_t size) |
| 239 | { |
| 240 | return mach_vm_write(target_task: map, address, data, dataCnt: size); |
| 241 | } |
| 242 | |
| 243 | kern_return_t |
| 244 | vm32_copy( |
| 245 | vm_map_t map, |
| 246 | vm32_address_t source_address, |
| 247 | vm32_size_t size, |
| 248 | vm32_address_t dest_address) |
| 249 | { |
| 250 | return mach_vm_copy(target_task: map, source_address, size, dest_address); |
| 251 | } |
| 252 | |
| 253 | kern_return_t |
| 254 | vm32_map_64( |
| 255 | vm_map_t target_map, |
| 256 | vm32_offset_t *address, |
| 257 | vm32_size_t size, |
| 258 | vm32_offset_t mask, |
| 259 | int flags, |
| 260 | ipc_port_t port, |
| 261 | vm_object_offset_t offset, |
| 262 | boolean_t copy, |
| 263 | vm_prot_t cur_protection, |
| 264 | vm_prot_t max_protection, |
| 265 | vm_inherit_t inheritance) |
| 266 | { |
| 267 | mach_vm_offset_t maddress; |
| 268 | kern_return_t result; |
| 269 | |
| 270 | maddress = *address; |
| 271 | result = mach_vm_map_external(target_task: target_map, address: &maddress, size, mask, |
| 272 | flags, object: port, offset, copy, |
| 273 | cur_protection, max_protection, inheritance); |
| 274 | *address = CAST_DOWN_EXPLICIT(vm32_offset_t, maddress); |
| 275 | |
| 276 | return result; |
| 277 | } |
| 278 | |
| 279 | kern_return_t |
| 280 | vm32_map( |
| 281 | vm_map_t target_map, |
| 282 | vm32_offset_t *address, |
| 283 | vm32_size_t size, |
| 284 | vm32_offset_t mask, |
| 285 | int flags, |
| 286 | ipc_port_t port, |
| 287 | vm32_offset_t offset, |
| 288 | boolean_t copy, |
| 289 | vm_prot_t cur_protection, |
| 290 | vm_prot_t max_protection, |
| 291 | vm_inherit_t inheritance) |
| 292 | { |
| 293 | return vm32_map_64(target_map, address, size, mask, |
| 294 | flags, port, offset, copy, |
| 295 | cur_protection, max_protection, inheritance); |
| 296 | } |
| 297 | |
| 298 | kern_return_t |
| 299 | vm32_remap( |
| 300 | vm_map_t target_map, |
| 301 | vm32_offset_t *address, |
| 302 | vm32_size_t size, |
| 303 | vm32_offset_t mask, |
| 304 | boolean_t anywhere, |
| 305 | vm_map_t src_map, |
| 306 | vm32_offset_t memory_address, |
| 307 | boolean_t copy, |
| 308 | vm_prot_t *cur_protection, |
| 309 | vm_prot_t *max_protection, |
| 310 | vm_inherit_t inheritance) |
| 311 | { |
| 312 | mach_vm_offset_t maddress; |
| 313 | kern_return_t result; |
| 314 | |
| 315 | maddress = *address; |
| 316 | result = mach_vm_remap_external(target_task: target_map, target_address: &maddress, size, mask, |
| 317 | flags: anywhere, src_task: src_map, src_address: memory_address, copy, |
| 318 | cur_protection, max_protection, inheritance); |
| 319 | *address = CAST_DOWN_EXPLICIT(vm32_offset_t, maddress); |
| 320 | |
| 321 | return result; |
| 322 | } |
| 323 | |
| 324 | kern_return_t |
| 325 | vm32_msync( |
| 326 | vm_map_t map, |
| 327 | vm32_address_t address, |
| 328 | vm32_size_t size, |
| 329 | vm_sync_t sync_flags) |
| 330 | { |
| 331 | return mach_vm_msync(target_task: map, address, size, sync_flags); |
| 332 | } |
| 333 | |
| 334 | kern_return_t |
| 335 | vm32_behavior_set( |
| 336 | vm_map_t map, |
| 337 | vm32_offset_t start, |
| 338 | vm32_size_t size, |
| 339 | vm_behavior_t new_behavior) |
| 340 | { |
| 341 | if ((map == VM_MAP_NULL) || (start + size < start)) { |
| 342 | return KERN_INVALID_ARGUMENT; |
| 343 | } |
| 344 | |
| 345 | return mach_vm_behavior_set(target_task: map, address: start, size, new_behavior); |
| 346 | } |
| 347 | |
| 348 | kern_return_t |
| 349 | vm32_region_64( |
| 350 | vm_map_t map, |
| 351 | vm32_offset_t *address, /* IN/OUT */ |
| 352 | vm32_size_t *size, /* OUT */ |
| 353 | vm_region_flavor_t flavor, /* IN */ |
| 354 | vm_region_info_t info, /* OUT */ |
| 355 | mach_msg_type_number_t *count, /* IN/OUT */ |
| 356 | mach_port_t *object_name) /* OUT */ |
| 357 | { |
| 358 | mach_vm_offset_t maddress; |
| 359 | mach_vm_size_t msize; |
| 360 | kern_return_t result; |
| 361 | |
| 362 | maddress = *address; |
| 363 | msize = *size; |
| 364 | result = mach_vm_region(target_task: map, address: &maddress, size: &msize, flavor, info, infoCnt: count, object_name); |
| 365 | *size = CAST_DOWN_EXPLICIT(vm32_size_t, msize); |
| 366 | *address = CAST_DOWN_EXPLICIT(vm32_offset_t, maddress); |
| 367 | |
| 368 | return result; |
| 369 | } |
| 370 | |
| 371 | kern_return_t |
| 372 | vm32_region( |
| 373 | vm_map_t map, |
| 374 | vm32_address_t *address, /* IN/OUT */ |
| 375 | vm32_size_t *size, /* OUT */ |
| 376 | vm_region_flavor_t flavor, /* IN */ |
| 377 | vm_region_info_t info, /* OUT */ |
| 378 | mach_msg_type_number_t *count, /* IN/OUT */ |
| 379 | mach_port_t *object_name) /* OUT */ |
| 380 | { |
| 381 | vm_map_address_t map_addr; |
| 382 | vm_map_size_t map_size; |
| 383 | kern_return_t kr; |
| 384 | |
| 385 | if (VM_MAP_NULL == map) { |
| 386 | return KERN_INVALID_ARGUMENT; |
| 387 | } |
| 388 | |
| 389 | map_addr = (vm_map_address_t)*address; |
| 390 | map_size = (vm_map_size_t)*size; |
| 391 | |
| 392 | kr = vm_map_region(map, |
| 393 | address: &map_addr, size: &map_size, |
| 394 | flavor, info, count, |
| 395 | object_name); |
| 396 | |
| 397 | *address = CAST_DOWN_EXPLICIT(vm32_address_t, map_addr); |
| 398 | *size = CAST_DOWN_EXPLICIT(vm32_size_t, map_size); |
| 399 | |
| 400 | if (KERN_SUCCESS == kr && map_addr + map_size > VM32_MAX_ADDRESS) { |
| 401 | return KERN_INVALID_ADDRESS; |
| 402 | } |
| 403 | return kr; |
| 404 | } |
| 405 | |
| 406 | kern_return_t |
| 407 | vm32_region_recurse_64( |
| 408 | vm_map_t map, |
| 409 | vm32_address_t *address, |
| 410 | vm32_size_t *size, |
| 411 | uint32_t *depth, |
| 412 | vm_region_recurse_info_64_t info, |
| 413 | mach_msg_type_number_t *infoCnt) |
| 414 | { |
| 415 | mach_vm_address_t maddress; |
| 416 | mach_vm_size_t msize; |
| 417 | kern_return_t result; |
| 418 | |
| 419 | maddress = *address; |
| 420 | msize = *size; |
| 421 | result = mach_vm_region_recurse(target_task: map, address: &maddress, size: &msize, nesting_depth: depth, info, infoCnt); |
| 422 | *address = CAST_DOWN_EXPLICIT(vm32_address_t, maddress); |
| 423 | *size = CAST_DOWN_EXPLICIT(vm32_size_t, msize); |
| 424 | |
| 425 | return result; |
| 426 | } |
| 427 | |
| 428 | kern_return_t |
| 429 | vm32_region_recurse( |
| 430 | vm_map_t map, |
| 431 | vm32_offset_t *address, /* IN/OUT */ |
| 432 | vm32_size_t *size, /* OUT */ |
| 433 | natural_t *depth, /* IN/OUT */ |
| 434 | vm_region_recurse_info_t info32, /* IN/OUT */ |
| 435 | mach_msg_type_number_t *infoCnt) /* IN/OUT */ |
| 436 | { |
| 437 | vm_region_submap_info_data_64_t info64; |
| 438 | vm_region_submap_info_t info; |
| 439 | vm_map_address_t map_addr; |
| 440 | vm_map_size_t map_size; |
| 441 | kern_return_t kr; |
| 442 | |
| 443 | if (VM_MAP_NULL == map || *infoCnt < VM_REGION_SUBMAP_INFO_COUNT) { |
| 444 | return KERN_INVALID_ARGUMENT; |
| 445 | } |
| 446 | |
| 447 | |
| 448 | map_addr = (vm_map_address_t)*address; |
| 449 | map_size = (vm_map_size_t)*size; |
| 450 | info = (vm_region_submap_info_t)info32; |
| 451 | *infoCnt = VM_REGION_SUBMAP_INFO_COUNT_64; |
| 452 | |
| 453 | kr = vm_map_region_recurse_64(map, address: &map_addr, size: &map_size, |
| 454 | nesting_depth: depth, info: &info64, count: infoCnt); |
| 455 | |
| 456 | info->protection = info64.protection; |
| 457 | info->max_protection = info64.max_protection; |
| 458 | info->inheritance = info64.inheritance; |
| 459 | info->offset = (uint32_t)info64.offset; /* trouble-maker */ |
| 460 | info->user_tag = info64.user_tag; |
| 461 | info->pages_resident = info64.pages_resident; |
| 462 | info->pages_shared_now_private = info64.pages_shared_now_private; |
| 463 | info->pages_swapped_out = info64.pages_swapped_out; |
| 464 | info->pages_dirtied = info64.pages_dirtied; |
| 465 | info->ref_count = info64.ref_count; |
| 466 | info->shadow_depth = info64.shadow_depth; |
| 467 | info->external_pager = info64.external_pager; |
| 468 | info->share_mode = info64.share_mode; |
| 469 | info->is_submap = info64.is_submap; |
| 470 | info->behavior = info64.behavior; |
| 471 | info->object_id = info64.object_id; |
| 472 | info->user_wired_count = info64.user_wired_count; |
| 473 | |
| 474 | *address = CAST_DOWN_EXPLICIT(vm32_address_t, map_addr); |
| 475 | *size = CAST_DOWN_EXPLICIT(vm32_size_t, map_size); |
| 476 | *infoCnt = VM_REGION_SUBMAP_INFO_COUNT; |
| 477 | |
| 478 | if (KERN_SUCCESS == kr && map_addr + map_size > VM32_MAX_ADDRESS) { |
| 479 | return KERN_INVALID_ADDRESS; |
| 480 | } |
| 481 | return kr; |
| 482 | } |
| 483 | |
| 484 | kern_return_t |
| 485 | vm32_purgable_control( |
| 486 | vm_map_t map, |
| 487 | vm32_offset_t address, |
| 488 | vm_purgable_t control, |
| 489 | int *state) |
| 490 | { |
| 491 | if (VM_MAP_NULL == map) { |
| 492 | return KERN_INVALID_ARGUMENT; |
| 493 | } |
| 494 | |
| 495 | return vm_map_purgable_control(map, |
| 496 | vm_map_trunc_page(address, PAGE_MASK), |
| 497 | control, |
| 498 | state); |
| 499 | } |
| 500 | |
| 501 | kern_return_t |
| 502 | vm32_map_page_query( |
| 503 | vm_map_t map, |
| 504 | vm32_offset_t offset, |
| 505 | int *disposition, |
| 506 | int *ref_count) |
| 507 | { |
| 508 | if (VM_MAP_NULL == map) { |
| 509 | return KERN_INVALID_ARGUMENT; |
| 510 | } |
| 511 | |
| 512 | return vm_map_page_query_internal( |
| 513 | map, |
| 514 | vm_map_trunc_page(offset, PAGE_MASK), |
| 515 | disposition, |
| 516 | ref_count); |
| 517 | } |
| 518 | |
| 519 | kern_return_t |
| 520 | vm32_make_memory_entry_64( |
| 521 | vm_map_t target_map, |
| 522 | memory_object_size_t *size, |
| 523 | memory_object_offset_t offset, |
| 524 | vm_prot_t permission, |
| 525 | ipc_port_t *object_handle, |
| 526 | ipc_port_t parent_handle) |
| 527 | { |
| 528 | // use the existing entrypoint |
| 529 | return _mach_make_memory_entry(target_task: target_map, size, offset, permission, object_handle, parent_handle); |
| 530 | } |
| 531 | |
| 532 | kern_return_t |
| 533 | vm32_make_memory_entry( |
| 534 | vm_map_t target_map, |
| 535 | vm32_size_t *size, |
| 536 | vm32_offset_t offset, |
| 537 | vm_prot_t permission, |
| 538 | ipc_port_t *object_handle, |
| 539 | ipc_port_t parent_entry) |
| 540 | { |
| 541 | memory_object_size_t mo_size; |
| 542 | kern_return_t kr; |
| 543 | |
| 544 | mo_size = (memory_object_size_t)*size; |
| 545 | kr = _mach_make_memory_entry(target_task: target_map, size: &mo_size, |
| 546 | offset: (memory_object_offset_t)offset, permission, object_handle, |
| 547 | parent_handle: parent_entry); |
| 548 | *size = CAST_DOWN_EXPLICIT(vm32_size_t, mo_size); |
| 549 | return kr; |
| 550 | } |
| 551 | |
| 552 | kern_return_t |
| 553 | vm32__task_wire( |
| 554 | vm_map_t map, |
| 555 | boolean_t must_wire __unused) |
| 556 | { |
| 557 | if (map == VM_MAP_NULL) { |
| 558 | return KERN_INVALID_ARGUMENT; |
| 559 | } |
| 560 | |
| 561 | return KERN_NOT_SUPPORTED; |
| 562 | } |
| 563 | |
| 564 | kern_return_t |
| 565 | vm32__map_exec_lockdown( |
| 566 | vm_map_t map) |
| 567 | { |
| 568 | if (map == VM_MAP_NULL) { |
| 569 | return KERN_INVALID_ARGUMENT; |
| 570 | } |
| 571 | |
| 572 | vm_map_lock(map); |
| 573 | map->map_disallow_new_exec = TRUE; |
| 574 | vm_map_unlock(map); |
| 575 | |
| 576 | return KERN_SUCCESS; |
| 577 | } |
| 578 | |
| 579 | |
| 580 | #endif /* VM32_SUPPORT */ |
| 581 | |