| 1 | /* |
| 2 | * Copyright (c) 1998-2000 Apple Computer, Inc. All rights reserved. |
| 3 | * |
| 4 | * @APPLE_OSREFERENCE_LICENSE_HEADER_START@ |
| 5 | * |
| 6 | * This file contains Original Code and/or Modifications of Original Code |
| 7 | * as defined in and that are subject to the Apple Public Source License |
| 8 | * Version 2.0 (the 'License'). You may not use this file except in |
| 9 | * compliance with the License. The rights granted to you under the License |
| 10 | * may not be used to create, or enable the creation or redistribution of, |
| 11 | * unlawful or unlicensed copies of an Apple operating system, or to |
| 12 | * circumvent, violate, or enable the circumvention or violation of, any |
| 13 | * terms of an Apple operating system software license agreement. |
| 14 | * |
| 15 | * Please obtain a copy of the License at |
| 16 | * http://www.opensource.apple.com/apsl/ and read it before using this file. |
| 17 | * |
| 18 | * The Original Code and all software distributed under the License are |
| 19 | * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER |
| 20 | * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES, |
| 21 | * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY, |
| 22 | * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT. |
| 23 | * Please see the License for the specific language governing rights and |
| 24 | * limitations under the License. |
| 25 | * |
| 26 | * @APPLE_OSREFERENCE_LICENSE_HEADER_END@ |
| 27 | */ |
| 28 | #define IOKIT_ENABLE_SHARED_PTR |
| 29 | |
| 30 | #define _IOMEMORYDESCRIPTOR_INTERNAL_ |
| 31 | |
| 32 | #include <IOKit/assert.h> |
| 33 | #include <IOKit/system.h> |
| 34 | |
| 35 | #include <IOKit/IOLib.h> |
| 36 | #include <IOKit/IOMapper.h> |
| 37 | #include <IOKit/IOBufferMemoryDescriptor.h> |
| 38 | #include <libkern/OSDebug.h> |
| 39 | #include <mach/mach_vm.h> |
| 40 | |
| 41 | #include "IOKitKernelInternal.h" |
| 42 | |
| 43 | #ifdef IOALLOCDEBUG |
| 44 | #include <libkern/c++/OSCPPDebug.h> |
| 45 | #endif |
| 46 | #include <IOKit/IOStatisticsPrivate.h> |
| 47 | |
| 48 | #if IOKITSTATS |
| 49 | #define IOStatisticsAlloc(type, size) \ |
| 50 | do { \ |
| 51 | IOStatistics::countAlloc(type, size); \ |
| 52 | } while (0) |
| 53 | #else |
| 54 | #define IOStatisticsAlloc(type, size) |
| 55 | #endif /* IOKITSTATS */ |
| 56 | |
| 57 | |
| 58 | __BEGIN_DECLS |
| 59 | void ipc_port_release_send(ipc_port_t port); |
| 60 | #include <vm/pmap.h> |
| 61 | |
| 62 | KALLOC_HEAP_DEFINE(KHEAP_IOBMD_CONTROL, "IOBMD_control" , KHEAP_ID_KT_VAR); |
| 63 | __END_DECLS |
| 64 | |
| 65 | /* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */ |
| 66 | |
| 67 | enum{ |
| 68 | kInternalFlagPhysical = 0x00000001, |
| 69 | kInternalFlagPageSized = 0x00000002, |
| 70 | kInternalFlagPageAllocated = 0x00000004, |
| 71 | kInternalFlagInit = 0x00000008, |
| 72 | kInternalFlagHasPointers = 0x00000010, |
| 73 | kInternalFlagGuardPages = 0x00000020, |
| 74 | }; |
| 75 | |
| 76 | /* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */ |
| 77 | |
| 78 | #define super IOGeneralMemoryDescriptor |
| 79 | OSDefineMetaClassAndStructorsWithZone(IOBufferMemoryDescriptor, |
| 80 | IOGeneralMemoryDescriptor, ZC_ZFREE_CLEARMEM); |
| 81 | |
| 82 | /* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */ |
| 83 | |
| 84 | #if defined(__x86_64__) |
| 85 | static uintptr_t |
| 86 | IOBMDPageProc(kalloc_heap_t kheap, iopa_t * a) |
| 87 | { |
| 88 | kern_return_t kr; |
| 89 | vm_address_t vmaddr = 0; |
| 90 | kma_flags_t kma_flags = KMA_ZERO; |
| 91 | |
| 92 | if (kheap == KHEAP_DATA_BUFFERS) { |
| 93 | kma_flags = (kma_flags_t) (kma_flags | KMA_DATA); |
| 94 | } |
| 95 | kr = kmem_alloc(kernel_map, &vmaddr, page_size, |
| 96 | kma_flags, VM_KERN_MEMORY_IOKIT); |
| 97 | |
| 98 | if (KERN_SUCCESS != kr) { |
| 99 | vmaddr = 0; |
| 100 | } |
| 101 | |
| 102 | return (uintptr_t) vmaddr; |
| 103 | } |
| 104 | #endif /* defined(__x86_64__) */ |
| 105 | |
| 106 | /* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */ |
| 107 | |
| 108 | #ifndef __LP64__ |
| 109 | bool |
| 110 | IOBufferMemoryDescriptor::initWithOptions( |
| 111 | IOOptionBits options, |
| 112 | vm_size_t capacity, |
| 113 | vm_offset_t alignment, |
| 114 | task_t inTask) |
| 115 | { |
| 116 | mach_vm_address_t physicalMask = 0; |
| 117 | return initWithPhysicalMask(inTask, options, capacity, alignment, physicalMask); |
| 118 | } |
| 119 | #endif /* !__LP64__ */ |
| 120 | |
| 121 | OSSharedPtr<IOBufferMemoryDescriptor> |
| 122 | IOBufferMemoryDescriptor::withCopy( |
| 123 | task_t inTask, |
| 124 | IOOptionBits options, |
| 125 | vm_map_t sourceMap, |
| 126 | mach_vm_address_t source, |
| 127 | mach_vm_size_t size) |
| 128 | { |
| 129 | OSSharedPtr<IOBufferMemoryDescriptor> inst; |
| 130 | kern_return_t err; |
| 131 | vm_map_copy_t copy; |
| 132 | vm_map_address_t address; |
| 133 | |
| 134 | copy = NULL; |
| 135 | do { |
| 136 | err = kIOReturnNoMemory; |
| 137 | inst = OSMakeShared<IOBufferMemoryDescriptor>(); |
| 138 | if (!inst) { |
| 139 | break; |
| 140 | } |
| 141 | inst->_ranges.v64 = IOMallocType(IOAddressRange); |
| 142 | |
| 143 | err = vm_map_copyin(src_map: sourceMap, src_addr: source, len: size, |
| 144 | src_destroy: false /* src_destroy */, copy_result: ©); |
| 145 | if (KERN_SUCCESS != err) { |
| 146 | break; |
| 147 | } |
| 148 | |
| 149 | err = vm_map_copyout(dst_map: get_task_map(inTask), dst_addr: &address, copy); |
| 150 | if (KERN_SUCCESS != err) { |
| 151 | break; |
| 152 | } |
| 153 | copy = NULL; |
| 154 | |
| 155 | inst->_ranges.v64->address = address; |
| 156 | inst->_ranges.v64->length = size; |
| 157 | |
| 158 | if (!inst->initWithPhysicalMask(inTask, options, capacity: size, alignment: page_size, physicalMask: 0)) { |
| 159 | err = kIOReturnError; |
| 160 | } |
| 161 | } while (false); |
| 162 | |
| 163 | if (KERN_SUCCESS == err) { |
| 164 | return inst; |
| 165 | } |
| 166 | |
| 167 | if (copy) { |
| 168 | vm_map_copy_discard(copy); |
| 169 | } |
| 170 | |
| 171 | return nullptr; |
| 172 | } |
| 173 | |
| 174 | |
| 175 | bool |
| 176 | IOBufferMemoryDescriptor::initWithPhysicalMask( |
| 177 | task_t inTask, |
| 178 | IOOptionBits options, |
| 179 | mach_vm_size_t capacity, |
| 180 | mach_vm_address_t alignment, |
| 181 | mach_vm_address_t physicalMask) |
| 182 | { |
| 183 | task_t mapTask = NULL; |
| 184 | kalloc_heap_t kheap = KHEAP_DATA_BUFFERS; |
| 185 | mach_vm_address_t highestMask = 0; |
| 186 | IOOptionBits iomdOptions = kIOMemoryTypeVirtual64 | kIOMemoryAsReference; |
| 187 | IODMAMapSpecification mapSpec; |
| 188 | bool mapped = false; |
| 189 | bool withCopy = false; |
| 190 | bool mappedOrShared = false; |
| 191 | |
| 192 | if (!capacity) { |
| 193 | return false; |
| 194 | } |
| 195 | |
| 196 | /* |
| 197 | * The IOKit constructor requests the allocator for zeroed memory |
| 198 | * so the members of the class do not need to be explicitly zeroed. |
| 199 | */ |
| 200 | _options = options; |
| 201 | _capacity = capacity; |
| 202 | |
| 203 | if (!_ranges.v64) { |
| 204 | _ranges.v64 = IOMallocType(IOAddressRange); |
| 205 | _ranges.v64->address = 0; |
| 206 | _ranges.v64->length = 0; |
| 207 | } else { |
| 208 | if (!_ranges.v64->address) { |
| 209 | return false; |
| 210 | } |
| 211 | if (!(kIOMemoryPageable & options)) { |
| 212 | return false; |
| 213 | } |
| 214 | if (!inTask) { |
| 215 | return false; |
| 216 | } |
| 217 | _buffer = (void *) _ranges.v64->address; |
| 218 | withCopy = true; |
| 219 | } |
| 220 | |
| 221 | /* |
| 222 | * Set kalloc_heap to KHEAP_IOBMD_CONTROL if allocation contains pointers |
| 223 | */ |
| 224 | if (kInternalFlagHasPointers & _internalFlags) { |
| 225 | kheap = KHEAP_IOBMD_CONTROL; |
| 226 | } |
| 227 | |
| 228 | // make sure super::free doesn't dealloc _ranges before super::init |
| 229 | _flags = kIOMemoryAsReference; |
| 230 | |
| 231 | // Grab IOMD bits from the Buffer MD options |
| 232 | iomdOptions |= (options & kIOBufferDescriptorMemoryFlags); |
| 233 | |
| 234 | if (!(kIOMemoryMapperNone & options)) { |
| 235 | IOMapper::checkForSystemMapper(); |
| 236 | mapped = (NULL != IOMapper::gSystem); |
| 237 | } |
| 238 | |
| 239 | if (physicalMask && (alignment <= 1)) { |
| 240 | alignment = ((physicalMask ^ (-1ULL)) & (physicalMask - 1)); |
| 241 | highestMask = (physicalMask | alignment); |
| 242 | alignment++; |
| 243 | if (alignment < page_size) { |
| 244 | alignment = page_size; |
| 245 | } |
| 246 | } |
| 247 | |
| 248 | if ((options & (kIOMemorySharingTypeMask | kIOMapCacheMask | kIOMemoryClearEncrypt)) && (alignment < page_size)) { |
| 249 | alignment = page_size; |
| 250 | } |
| 251 | |
| 252 | if (alignment >= page_size) { |
| 253 | if (round_page_overflow(capacity, &capacity)) { |
| 254 | return false; |
| 255 | } |
| 256 | } |
| 257 | |
| 258 | if (alignment > page_size) { |
| 259 | options |= kIOMemoryPhysicallyContiguous; |
| 260 | } |
| 261 | |
| 262 | _alignment = alignment; |
| 263 | |
| 264 | if ((capacity + alignment) < _capacity) { |
| 265 | return false; |
| 266 | } |
| 267 | |
| 268 | if ((inTask != kernel_task) && !(options & kIOMemoryPageable)) { |
| 269 | return false; |
| 270 | } |
| 271 | |
| 272 | bzero(s: &mapSpec, n: sizeof(mapSpec)); |
| 273 | mapSpec.alignment = _alignment; |
| 274 | mapSpec.numAddressBits = 64; |
| 275 | if (highestMask && mapped) { |
| 276 | if (highestMask <= 0xFFFFFFFF) { |
| 277 | mapSpec.numAddressBits = (uint8_t)(32 - __builtin_clz((unsigned int) highestMask)); |
| 278 | } else { |
| 279 | mapSpec.numAddressBits = (uint8_t)(64 - __builtin_clz((unsigned int) (highestMask >> 32))); |
| 280 | } |
| 281 | highestMask = 0; |
| 282 | } |
| 283 | |
| 284 | // set memory entry cache mode, pageable, purgeable |
| 285 | iomdOptions |= ((options & kIOMapCacheMask) >> kIOMapCacheShift) << kIOMemoryBufferCacheShift; |
| 286 | if (options & kIOMemoryPageable) { |
| 287 | if (_internalFlags & kInternalFlagGuardPages) { |
| 288 | printf("IOBMD: Unsupported use of guard pages with pageable memory.\n" ); |
| 289 | return false; |
| 290 | } |
| 291 | iomdOptions |= kIOMemoryBufferPageable; |
| 292 | if (options & kIOMemoryPurgeable) { |
| 293 | iomdOptions |= kIOMemoryBufferPurgeable; |
| 294 | } |
| 295 | } else { |
| 296 | // Buffer shouldn't auto prepare they should be prepared explicitly |
| 297 | // But it never was enforced so what are you going to do? |
| 298 | iomdOptions |= kIOMemoryAutoPrepare; |
| 299 | |
| 300 | /* Allocate a wired-down buffer inside kernel space. */ |
| 301 | |
| 302 | bool contig = (0 != (options & kIOMemoryHostPhysicallyContiguous)); |
| 303 | |
| 304 | if (!contig && (0 != (options & kIOMemoryPhysicallyContiguous))) { |
| 305 | contig |= (!mapped); |
| 306 | contig |= (0 != (kIOMemoryMapperNone & options)); |
| 307 | #if 0 |
| 308 | // treat kIOMemoryPhysicallyContiguous as kIOMemoryHostPhysicallyContiguous for now |
| 309 | contig |= true; |
| 310 | #endif |
| 311 | } |
| 312 | |
| 313 | mappedOrShared = (mapped || (0 != (kIOMemorySharingTypeMask & options))); |
| 314 | if (contig || highestMask || (alignment > page_size)) { |
| 315 | if (_internalFlags & kInternalFlagGuardPages) { |
| 316 | printf("IOBMD: Unsupported use of guard pages with physical mask or contiguous memory.\n" ); |
| 317 | return false; |
| 318 | } |
| 319 | _internalFlags |= kInternalFlagPhysical; |
| 320 | if (highestMask) { |
| 321 | _internalFlags |= kInternalFlagPageSized; |
| 322 | if (round_page_overflow(capacity, &capacity)) { |
| 323 | return false; |
| 324 | } |
| 325 | } |
| 326 | _buffer = (void *) IOKernelAllocateWithPhysicalRestrict(kheap, |
| 327 | size: capacity, maxPhys: highestMask, alignment, contiguous: contig); |
| 328 | } else if (_internalFlags & kInternalFlagGuardPages) { |
| 329 | vm_offset_t address = 0; |
| 330 | kern_return_t kr; |
| 331 | uintptr_t alignMask; |
| 332 | kma_flags_t kma_flags = (kma_flags_t) (KMA_GUARD_FIRST | |
| 333 | KMA_GUARD_LAST | KMA_ZERO); |
| 334 | |
| 335 | if (((uint32_t) alignment) != alignment) { |
| 336 | return false; |
| 337 | } |
| 338 | if (kheap == KHEAP_DATA_BUFFERS) { |
| 339 | kma_flags = (kma_flags_t) (kma_flags | KMA_DATA); |
| 340 | } |
| 341 | |
| 342 | alignMask = (1UL << log2up(size: (uint32_t) alignment)) - 1; |
| 343 | kr = kernel_memory_allocate(map: kernel_map, addrp: &address, |
| 344 | size: capacity + page_size * 2, mask: alignMask, flags: kma_flags, |
| 345 | tag: IOMemoryTag(map: kernel_map)); |
| 346 | if (kr != KERN_SUCCESS || address == 0) { |
| 347 | return false; |
| 348 | } |
| 349 | #if IOALLOCDEBUG |
| 350 | OSAddAtomicLong(capacity, &debug_iomalloc_size); |
| 351 | #endif |
| 352 | IOStatisticsAlloc(kIOStatisticsMallocAligned, capacity); |
| 353 | _buffer = (void *)(address + page_size); |
| 354 | #if defined(__x86_64__) |
| 355 | } else if (mappedOrShared |
| 356 | && (capacity + alignment) <= (page_size - gIOPageAllocChunkBytes)) { |
| 357 | _internalFlags |= kInternalFlagPageAllocated; |
| 358 | _buffer = (void *) iopa_alloc(&gIOBMDPageAllocator, |
| 359 | &IOBMDPageProc, kheap, capacity, alignment); |
| 360 | if (_buffer) { |
| 361 | bzero(_buffer, capacity); |
| 362 | IOStatisticsAlloc(kIOStatisticsMallocAligned, capacity); |
| 363 | #if IOALLOCDEBUG |
| 364 | OSAddAtomicLong(capacity, &debug_iomalloc_size); |
| 365 | #endif |
| 366 | } |
| 367 | #endif /* defined(__x86_64__) */ |
| 368 | } else if (alignment > 1) { |
| 369 | /* BEGIN IGNORE CODESTYLE */ |
| 370 | __typed_allocators_ignore_push |
| 371 | _buffer = IOMallocAligned_internal(kalloc_heap_cfg: kheap, size: capacity, alignment, |
| 372 | flags: Z_ZERO_VM_TAG_BT_BIT); |
| 373 | } else { |
| 374 | _buffer = IOMalloc_internal(kalloc_heap_cfg: kheap, size: capacity, flags: Z_ZERO_VM_TAG_BT_BIT); |
| 375 | __typed_allocators_ignore_pop |
| 376 | /* END IGNORE CODESTYLE */ |
| 377 | } |
| 378 | if (!_buffer) { |
| 379 | return false; |
| 380 | } |
| 381 | } |
| 382 | |
| 383 | if ((options & (kIOMemoryPageable | kIOMapCacheMask))) { |
| 384 | vm_size_t size = round_page(x: capacity); |
| 385 | |
| 386 | // initWithOptions will create memory entry |
| 387 | if (!withCopy) { |
| 388 | iomdOptions |= kIOMemoryPersistent; |
| 389 | } |
| 390 | |
| 391 | if (options & kIOMemoryPageable) { |
| 392 | #if IOALLOCDEBUG |
| 393 | OSAddAtomicLong(size, &debug_iomallocpageable_size); |
| 394 | #endif |
| 395 | if (!withCopy) { |
| 396 | mapTask = inTask; |
| 397 | } |
| 398 | if (NULL == inTask) { |
| 399 | inTask = kernel_task; |
| 400 | } |
| 401 | } else if (options & kIOMapCacheMask) { |
| 402 | // Prefetch each page to put entries into the pmap |
| 403 | volatile UInt8 * startAddr = (UInt8 *)_buffer; |
| 404 | volatile UInt8 * endAddr = (UInt8 *)_buffer + capacity; |
| 405 | |
| 406 | while (startAddr < endAddr) { |
| 407 | UInt8 dummyVar = *startAddr; |
| 408 | (void) dummyVar; |
| 409 | startAddr += page_size; |
| 410 | } |
| 411 | } |
| 412 | } |
| 413 | |
| 414 | _ranges.v64->address = (mach_vm_address_t) pgz_decode(_buffer, _capacity); |
| 415 | _ranges.v64->length = _capacity; |
| 416 | |
| 417 | if (!super::initWithOptions(buffers: _ranges.v64, count: 1, offset: 0, |
| 418 | task: inTask, options: iomdOptions, /* System mapper */ NULL)) { |
| 419 | return false; |
| 420 | } |
| 421 | |
| 422 | _internalFlags |= kInternalFlagInit; |
| 423 | #if IOTRACKING |
| 424 | if (!(options & kIOMemoryPageable)) { |
| 425 | trackingAccumSize(capacity); |
| 426 | } |
| 427 | #endif /* IOTRACKING */ |
| 428 | |
| 429 | // give any system mapper the allocation params |
| 430 | if (kIOReturnSuccess != dmaCommandOperation(op: kIOMDAddDMAMapSpec, |
| 431 | vData: &mapSpec, dataSize: sizeof(mapSpec))) { |
| 432 | return false; |
| 433 | } |
| 434 | |
| 435 | if (mapTask) { |
| 436 | if (!reserved) { |
| 437 | reserved = IOMallocType(ExpansionData); |
| 438 | if (!reserved) { |
| 439 | return false; |
| 440 | } |
| 441 | } |
| 442 | reserved->map = createMappingInTask(intoTask: mapTask, atAddress: 0, |
| 443 | options: kIOMapAnywhere | (options & kIOMapPrefault) | (options & kIOMapCacheMask), offset: 0, length: 0).detach(); |
| 444 | if (!reserved->map) { |
| 445 | _buffer = NULL; |
| 446 | return false; |
| 447 | } |
| 448 | release(); // map took a retain on this |
| 449 | reserved->map->retain(); |
| 450 | removeMapping(mapping: reserved->map); |
| 451 | mach_vm_address_t buffer = reserved->map->getAddress(); |
| 452 | _buffer = (void *) buffer; |
| 453 | if (kIOMemoryTypeVirtual64 == (kIOMemoryTypeMask & iomdOptions)) { |
| 454 | _ranges.v64->address = buffer; |
| 455 | } |
| 456 | } |
| 457 | |
| 458 | setLength(_capacity); |
| 459 | |
| 460 | return true; |
| 461 | } |
| 462 | |
| 463 | bool |
| 464 | IOBufferMemoryDescriptor::initControlWithPhysicalMask( |
| 465 | task_t inTask, |
| 466 | IOOptionBits options, |
| 467 | mach_vm_size_t capacity, |
| 468 | mach_vm_address_t alignment, |
| 469 | mach_vm_address_t physicalMask) |
| 470 | { |
| 471 | _internalFlags = kInternalFlagHasPointers; |
| 472 | return initWithPhysicalMask(inTask, options, capacity, alignment, |
| 473 | physicalMask); |
| 474 | } |
| 475 | |
| 476 | bool |
| 477 | IOBufferMemoryDescriptor::initWithGuardPages( |
| 478 | task_t inTask, |
| 479 | IOOptionBits options, |
| 480 | mach_vm_size_t capacity) |
| 481 | { |
| 482 | mach_vm_size_t roundedCapacity; |
| 483 | |
| 484 | _internalFlags = kInternalFlagGuardPages; |
| 485 | |
| 486 | if (round_page_overflow(capacity, &roundedCapacity)) { |
| 487 | return false; |
| 488 | } |
| 489 | |
| 490 | return initWithPhysicalMask(inTask, options, capacity: roundedCapacity, alignment: page_size, |
| 491 | physicalMask: (mach_vm_address_t)0); |
| 492 | } |
| 493 | |
| 494 | OSSharedPtr<IOBufferMemoryDescriptor> |
| 495 | IOBufferMemoryDescriptor::inTaskWithOptions( |
| 496 | task_t inTask, |
| 497 | IOOptionBits options, |
| 498 | vm_size_t capacity, |
| 499 | vm_offset_t alignment) |
| 500 | { |
| 501 | OSSharedPtr<IOBufferMemoryDescriptor> me = OSMakeShared<IOBufferMemoryDescriptor>(); |
| 502 | |
| 503 | if (me && !me->initWithPhysicalMask(inTask, options, capacity, alignment, physicalMask: 0)) { |
| 504 | me.reset(); |
| 505 | } |
| 506 | return me; |
| 507 | } |
| 508 | |
| 509 | OSSharedPtr<IOBufferMemoryDescriptor> |
| 510 | IOBufferMemoryDescriptor::inTaskWithOptions( |
| 511 | task_t inTask, |
| 512 | IOOptionBits options, |
| 513 | vm_size_t capacity, |
| 514 | vm_offset_t alignment, |
| 515 | uint32_t kernTag, |
| 516 | uint32_t userTag) |
| 517 | { |
| 518 | OSSharedPtr<IOBufferMemoryDescriptor> me = OSMakeShared<IOBufferMemoryDescriptor>(); |
| 519 | |
| 520 | if (me) { |
| 521 | me->setVMTags(kernelTag: kernTag, userTag); |
| 522 | |
| 523 | if (!me->initWithPhysicalMask(inTask, options, capacity, alignment, physicalMask: 0)) { |
| 524 | me.reset(); |
| 525 | } |
| 526 | } |
| 527 | return me; |
| 528 | } |
| 529 | |
| 530 | OSSharedPtr<IOBufferMemoryDescriptor> |
| 531 | IOBufferMemoryDescriptor::inTaskWithPhysicalMask( |
| 532 | task_t inTask, |
| 533 | IOOptionBits options, |
| 534 | mach_vm_size_t capacity, |
| 535 | mach_vm_address_t physicalMask) |
| 536 | { |
| 537 | OSSharedPtr<IOBufferMemoryDescriptor> me = OSMakeShared<IOBufferMemoryDescriptor>(); |
| 538 | |
| 539 | if (me && !me->initWithPhysicalMask(inTask, options, capacity, alignment: 1, physicalMask)) { |
| 540 | me.reset(); |
| 541 | } |
| 542 | return me; |
| 543 | } |
| 544 | |
| 545 | OSSharedPtr<IOBufferMemoryDescriptor> |
| 546 | IOBufferMemoryDescriptor::inTaskWithGuardPages( |
| 547 | task_t inTask, |
| 548 | IOOptionBits options, |
| 549 | mach_vm_size_t capacity) |
| 550 | { |
| 551 | OSSharedPtr<IOBufferMemoryDescriptor> me = OSMakeShared<IOBufferMemoryDescriptor>(); |
| 552 | |
| 553 | if (me && !me->initWithGuardPages(inTask, options, capacity)) { |
| 554 | me.reset(); |
| 555 | } |
| 556 | return me; |
| 557 | } |
| 558 | |
| 559 | #ifndef __LP64__ |
| 560 | bool |
| 561 | IOBufferMemoryDescriptor::initWithOptions( |
| 562 | IOOptionBits options, |
| 563 | vm_size_t capacity, |
| 564 | vm_offset_t alignment) |
| 565 | { |
| 566 | return initWithPhysicalMask(kernel_task, options, capacity, alignment, (mach_vm_address_t)0); |
| 567 | } |
| 568 | #endif /* !__LP64__ */ |
| 569 | |
| 570 | OSSharedPtr<IOBufferMemoryDescriptor> |
| 571 | IOBufferMemoryDescriptor::withOptions( |
| 572 | IOOptionBits options, |
| 573 | vm_size_t capacity, |
| 574 | vm_offset_t alignment) |
| 575 | { |
| 576 | OSSharedPtr<IOBufferMemoryDescriptor> me = OSMakeShared<IOBufferMemoryDescriptor>(); |
| 577 | |
| 578 | if (me && !me->initWithPhysicalMask(inTask: kernel_task, options, capacity, alignment, physicalMask: 0)) { |
| 579 | me.reset(); |
| 580 | } |
| 581 | return me; |
| 582 | } |
| 583 | |
| 584 | |
| 585 | /* |
| 586 | * withCapacity: |
| 587 | * |
| 588 | * Returns a new IOBufferMemoryDescriptor with a buffer large enough to |
| 589 | * hold capacity bytes. The descriptor's length is initially set to the capacity. |
| 590 | */ |
| 591 | OSSharedPtr<IOBufferMemoryDescriptor> |
| 592 | IOBufferMemoryDescriptor::withCapacity(vm_size_t inCapacity, |
| 593 | IODirection inDirection, |
| 594 | bool inContiguous) |
| 595 | { |
| 596 | return IOBufferMemoryDescriptor::withOptions( |
| 597 | options: inDirection | kIOMemoryUnshared |
| 598 | | (inContiguous ? kIOMemoryPhysicallyContiguous : 0), |
| 599 | capacity: inCapacity, alignment: inContiguous ? inCapacity : 1 ); |
| 600 | } |
| 601 | |
| 602 | #ifndef __LP64__ |
| 603 | /* |
| 604 | * initWithBytes: |
| 605 | * |
| 606 | * Initialize a new IOBufferMemoryDescriptor preloaded with bytes (copied). |
| 607 | * The descriptor's length and capacity are set to the input buffer's size. |
| 608 | */ |
| 609 | bool |
| 610 | IOBufferMemoryDescriptor::initWithBytes(const void * inBytes, |
| 611 | vm_size_t inLength, |
| 612 | IODirection inDirection, |
| 613 | bool inContiguous) |
| 614 | { |
| 615 | if (!initWithPhysicalMask(kernel_task, inDirection | kIOMemoryUnshared |
| 616 | | (inContiguous ? kIOMemoryPhysicallyContiguous : 0), |
| 617 | inLength, inLength, (mach_vm_address_t)0)) { |
| 618 | return false; |
| 619 | } |
| 620 | |
| 621 | // start out with no data |
| 622 | setLength(0); |
| 623 | |
| 624 | if (!appendBytes(inBytes, inLength)) { |
| 625 | return false; |
| 626 | } |
| 627 | |
| 628 | return true; |
| 629 | } |
| 630 | #endif /* !__LP64__ */ |
| 631 | |
| 632 | /* |
| 633 | * withBytes: |
| 634 | * |
| 635 | * Returns a new IOBufferMemoryDescriptor preloaded with bytes (copied). |
| 636 | * The descriptor's length and capacity are set to the input buffer's size. |
| 637 | */ |
| 638 | OSSharedPtr<IOBufferMemoryDescriptor> |
| 639 | IOBufferMemoryDescriptor::withBytes(const void * inBytes, |
| 640 | vm_size_t inLength, |
| 641 | IODirection inDirection, |
| 642 | bool inContiguous) |
| 643 | { |
| 644 | OSSharedPtr<IOBufferMemoryDescriptor> me = OSMakeShared<IOBufferMemoryDescriptor>(); |
| 645 | mach_vm_address_t alignment; |
| 646 | |
| 647 | alignment = (inLength <= page_size) ? inLength : page_size; |
| 648 | if (me && !me->initWithPhysicalMask( |
| 649 | inTask: kernel_task, options: inDirection | kIOMemoryUnshared |
| 650 | | (inContiguous ? kIOMemoryPhysicallyContiguous : 0), |
| 651 | capacity: inLength, alignment, physicalMask: 0 )) { |
| 652 | me.reset(); |
| 653 | } |
| 654 | |
| 655 | if (me) { |
| 656 | // start out with no data |
| 657 | me->setLength(0); |
| 658 | |
| 659 | if (!me->appendBytes(bytes: inBytes, withLength: inLength)) { |
| 660 | me.reset(); |
| 661 | } |
| 662 | } |
| 663 | return me; |
| 664 | } |
| 665 | |
| 666 | /* |
| 667 | * free: |
| 668 | * |
| 669 | * Free resources |
| 670 | */ |
| 671 | void |
| 672 | IOBufferMemoryDescriptor::free() |
| 673 | { |
| 674 | // Cache all of the relevant information on the stack for use |
| 675 | // after we call super::free()! |
| 676 | IOOptionBits flags = _flags; |
| 677 | IOOptionBits internalFlags = _internalFlags; |
| 678 | IOOptionBits options = _options; |
| 679 | vm_size_t size = _capacity; |
| 680 | void * buffer = _buffer; |
| 681 | IOMemoryMap * map = NULL; |
| 682 | IOAddressRange * range = _ranges.v64; |
| 683 | vm_offset_t alignment = _alignment; |
| 684 | kalloc_heap_t kheap = KHEAP_DATA_BUFFERS; |
| 685 | vm_size_t rsize; |
| 686 | |
| 687 | if (alignment >= page_size) { |
| 688 | if (!round_page_overflow(size, &rsize)) { |
| 689 | size = rsize; |
| 690 | } |
| 691 | } |
| 692 | |
| 693 | if (reserved) { |
| 694 | map = reserved->map; |
| 695 | IOFreeType(reserved, ExpansionData); |
| 696 | if (map) { |
| 697 | map->release(); |
| 698 | } |
| 699 | } |
| 700 | |
| 701 | if ((options & kIOMemoryPageable) |
| 702 | || (kInternalFlagPageSized & internalFlags)) { |
| 703 | if (!round_page_overflow(size, &rsize)) { |
| 704 | size = rsize; |
| 705 | } |
| 706 | } |
| 707 | |
| 708 | if (internalFlags & kInternalFlagHasPointers) { |
| 709 | kheap = KHEAP_IOBMD_CONTROL; |
| 710 | } |
| 711 | |
| 712 | #if IOTRACKING |
| 713 | if (!(options & kIOMemoryPageable) |
| 714 | && buffer |
| 715 | && (kInternalFlagInit & _internalFlags)) { |
| 716 | trackingAccumSize(-size); |
| 717 | } |
| 718 | #endif /* IOTRACKING */ |
| 719 | |
| 720 | /* super::free may unwire - deallocate buffer afterwards */ |
| 721 | super::free(); |
| 722 | |
| 723 | if (options & kIOMemoryPageable) { |
| 724 | #if IOALLOCDEBUG |
| 725 | OSAddAtomicLong(-size, &debug_iomallocpageable_size); |
| 726 | #endif |
| 727 | } else if (buffer) { |
| 728 | if (kInternalFlagPhysical & internalFlags) { |
| 729 | IOKernelFreePhysical(kheap, address: (mach_vm_address_t) buffer, size); |
| 730 | } else if (kInternalFlagPageAllocated & internalFlags) { |
| 731 | #if defined(__x86_64__) |
| 732 | uintptr_t page; |
| 733 | page = iopa_free(&gIOBMDPageAllocator, (uintptr_t) buffer, size); |
| 734 | if (page) { |
| 735 | kmem_free(kernel_map, page, page_size); |
| 736 | } |
| 737 | #if IOALLOCDEBUG |
| 738 | OSAddAtomicLong(-size, &debug_iomalloc_size); |
| 739 | #endif |
| 740 | IOStatisticsAlloc(kIOStatisticsFreeAligned, size); |
| 741 | #else /* !defined(__x86_64__) */ |
| 742 | /* should be unreachable */ |
| 743 | panic("Attempting to free IOBMD with page allocated flag" ); |
| 744 | #endif /* defined(__x86_64__) */ |
| 745 | } else if (kInternalFlagGuardPages & internalFlags) { |
| 746 | vm_offset_t allocation = (vm_offset_t)buffer - page_size; |
| 747 | kmem_free(map: kernel_map, addr: allocation, size: size + page_size * 2); |
| 748 | #if IOALLOCDEBUG |
| 749 | OSAddAtomicLong(-size, &debug_iomalloc_size); |
| 750 | #endif |
| 751 | IOStatisticsAlloc(kIOStatisticsFreeAligned, size); |
| 752 | } else if (alignment > 1) { |
| 753 | /* BEGIN IGNORE CODESTYLE */ |
| 754 | __typed_allocators_ignore_push |
| 755 | IOFreeAligned_internal(kalloc_heap_cfg: kheap, address: buffer, size); |
| 756 | } else { |
| 757 | IOFree_internal(kalloc_heap_cfg: kheap, inAddress: buffer, size); |
| 758 | __typed_allocators_ignore_pop |
| 759 | /* END IGNORE CODESTYLE */ |
| 760 | } |
| 761 | } |
| 762 | if (range && (kIOMemoryAsReference & flags)) { |
| 763 | IOFreeType(range, IOAddressRange); |
| 764 | } |
| 765 | } |
| 766 | |
| 767 | /* |
| 768 | * getCapacity: |
| 769 | * |
| 770 | * Get the buffer capacity |
| 771 | */ |
| 772 | vm_size_t |
| 773 | IOBufferMemoryDescriptor::getCapacity() const |
| 774 | { |
| 775 | return _capacity; |
| 776 | } |
| 777 | |
| 778 | /* |
| 779 | * setLength: |
| 780 | * |
| 781 | * Change the buffer length of the memory descriptor. When a new buffer |
| 782 | * is created, the initial length of the buffer is set to be the same as |
| 783 | * the capacity. The length can be adjusted via setLength for a shorter |
| 784 | * transfer (there is no need to create more buffer descriptors when you |
| 785 | * can reuse an existing one, even for different transfer sizes). Note |
| 786 | * that the specified length must not exceed the capacity of the buffer. |
| 787 | */ |
| 788 | void |
| 789 | IOBufferMemoryDescriptor::setLength(vm_size_t length) |
| 790 | { |
| 791 | assert(length <= _capacity); |
| 792 | if (length > _capacity) { |
| 793 | return; |
| 794 | } |
| 795 | |
| 796 | _length = length; |
| 797 | _ranges.v64->length = length; |
| 798 | } |
| 799 | |
| 800 | /* |
| 801 | * setDirection: |
| 802 | * |
| 803 | * Change the direction of the transfer. This method allows one to redirect |
| 804 | * the descriptor's transfer direction. This eliminates the need to destroy |
| 805 | * and create new buffers when different transfer directions are needed. |
| 806 | */ |
| 807 | void |
| 808 | IOBufferMemoryDescriptor::setDirection(IODirection direction) |
| 809 | { |
| 810 | _flags = (_flags & ~kIOMemoryDirectionMask) | direction; |
| 811 | #ifndef __LP64__ |
| 812 | _direction = (IODirection) (_flags & kIOMemoryDirectionMask); |
| 813 | #endif /* !__LP64__ */ |
| 814 | } |
| 815 | |
| 816 | /* |
| 817 | * appendBytes: |
| 818 | * |
| 819 | * Add some data to the end of the buffer. This method automatically |
| 820 | * maintains the memory descriptor buffer length. Note that appendBytes |
| 821 | * will not copy past the end of the memory descriptor's current capacity. |
| 822 | */ |
| 823 | bool |
| 824 | IOBufferMemoryDescriptor::appendBytes(const void * bytes, vm_size_t withLength) |
| 825 | { |
| 826 | vm_size_t actualBytesToCopy = min(withLength, _capacity - _length); |
| 827 | IOByteCount offset; |
| 828 | |
| 829 | assert(_length <= _capacity); |
| 830 | |
| 831 | offset = _length; |
| 832 | _length += actualBytesToCopy; |
| 833 | _ranges.v64->length += actualBytesToCopy; |
| 834 | |
| 835 | if (_task == kernel_task) { |
| 836 | bcopy(/* from */ src: bytes, dst: (void *)(_ranges.v64->address + offset), |
| 837 | n: actualBytesToCopy); |
| 838 | } else { |
| 839 | writeBytes(offset, bytes, withLength: actualBytesToCopy); |
| 840 | } |
| 841 | |
| 842 | return true; |
| 843 | } |
| 844 | |
| 845 | /* |
| 846 | * getBytesNoCopy: |
| 847 | * |
| 848 | * Return the virtual address of the beginning of the buffer |
| 849 | */ |
| 850 | void * |
| 851 | IOBufferMemoryDescriptor::getBytesNoCopy() |
| 852 | { |
| 853 | if (kIOMemoryTypePhysical64 == (_flags & kIOMemoryTypeMask)) { |
| 854 | return _buffer; |
| 855 | } else { |
| 856 | return (void *)_ranges.v64->address; |
| 857 | } |
| 858 | } |
| 859 | |
| 860 | |
| 861 | /* |
| 862 | * getBytesNoCopy: |
| 863 | * |
| 864 | * Return the virtual address of an offset from the beginning of the buffer |
| 865 | */ |
| 866 | void * |
| 867 | IOBufferMemoryDescriptor::getBytesNoCopy(vm_size_t start, vm_size_t withLength) |
| 868 | { |
| 869 | IOVirtualAddress address; |
| 870 | |
| 871 | if ((start + withLength) < start) { |
| 872 | return NULL; |
| 873 | } |
| 874 | |
| 875 | if (kIOMemoryTypePhysical64 == (_flags & kIOMemoryTypeMask)) { |
| 876 | address = (IOVirtualAddress) _buffer; |
| 877 | } else { |
| 878 | address = _ranges.v64->address; |
| 879 | } |
| 880 | |
| 881 | if (start < _length && (start + withLength) <= _length) { |
| 882 | return (void *)(address + start); |
| 883 | } |
| 884 | return NULL; |
| 885 | } |
| 886 | |
| 887 | #ifndef __LP64__ |
| 888 | void * |
| 889 | IOBufferMemoryDescriptor::getVirtualSegment(IOByteCount offset, |
| 890 | IOByteCount * lengthOfSegment) |
| 891 | { |
| 892 | void * bytes = getBytesNoCopy(offset, 0); |
| 893 | |
| 894 | if (bytes && lengthOfSegment) { |
| 895 | *lengthOfSegment = _length - offset; |
| 896 | } |
| 897 | |
| 898 | return bytes; |
| 899 | } |
| 900 | #endif /* !__LP64__ */ |
| 901 | |
| 902 | #ifdef __LP64__ |
| 903 | OSMetaClassDefineReservedUnused(IOBufferMemoryDescriptor, 0); |
| 904 | OSMetaClassDefineReservedUnused(IOBufferMemoryDescriptor, 1); |
| 905 | #else /* !__LP64__ */ |
| 906 | OSMetaClassDefineReservedUsedX86(IOBufferMemoryDescriptor, 0); |
| 907 | OSMetaClassDefineReservedUsedX86(IOBufferMemoryDescriptor, 1); |
| 908 | #endif /* !__LP64__ */ |
| 909 | OSMetaClassDefineReservedUnused(IOBufferMemoryDescriptor, 2); |
| 910 | OSMetaClassDefineReservedUnused(IOBufferMemoryDescriptor, 3); |
| 911 | OSMetaClassDefineReservedUnused(IOBufferMemoryDescriptor, 4); |
| 912 | OSMetaClassDefineReservedUnused(IOBufferMemoryDescriptor, 5); |
| 913 | OSMetaClassDefineReservedUnused(IOBufferMemoryDescriptor, 6); |
| 914 | OSMetaClassDefineReservedUnused(IOBufferMemoryDescriptor, 7); |
| 915 | OSMetaClassDefineReservedUnused(IOBufferMemoryDescriptor, 8); |
| 916 | OSMetaClassDefineReservedUnused(IOBufferMemoryDescriptor, 9); |
| 917 | OSMetaClassDefineReservedUnused(IOBufferMemoryDescriptor, 10); |
| 918 | OSMetaClassDefineReservedUnused(IOBufferMemoryDescriptor, 11); |
| 919 | OSMetaClassDefineReservedUnused(IOBufferMemoryDescriptor, 12); |
| 920 | OSMetaClassDefineReservedUnused(IOBufferMemoryDescriptor, 13); |
| 921 | OSMetaClassDefineReservedUnused(IOBufferMemoryDescriptor, 14); |
| 922 | OSMetaClassDefineReservedUnused(IOBufferMemoryDescriptor, 15); |
| 923 | |