1/*
2 * Copyright (c) 1998-2021 Apple Inc. All rights reserved.
3 *
4 * @APPLE_OSREFERENCE_LICENSE_HEADER_START@
5 *
6 * This file contains Original Code and/or Modifications of Original Code
7 * as defined in and that are subject to the Apple Public Source License
8 * Version 2.0 (the 'License'). You may not use this file except in
9 * compliance with the License. The rights granted to you under the License
10 * may not be used to create, or enable the creation or redistribution of,
11 * unlawful or unlicensed copies of an Apple operating system, or to
12 * circumvent, violate, or enable the circumvention or violation of, any
13 * terms of an Apple operating system software license agreement.
14 *
15 * Please obtain a copy of the License at
16 * http://www.opensource.apple.com/apsl/ and read it before using this file.
17 *
18 * The Original Code and all software distributed under the License are
19 * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER
20 * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES,
21 * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY,
22 * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT.
23 * Please see the License for the specific language governing rights and
24 * limitations under the License.
25 *
26 * @APPLE_OSREFERENCE_LICENSE_HEADER_END@
27 */
28#define IOKIT_ENABLE_SHARED_PTR
29
30#include <sys/cdefs.h>
31
32#include <IOKit/assert.h>
33#include <IOKit/system.h>
34#include <IOKit/IOLib.h>
35#include <IOKit/IOMemoryDescriptor.h>
36#include <IOKit/IOMapper.h>
37#include <IOKit/IODMACommand.h>
38#include <IOKit/IOKitKeysPrivate.h>
39
40#include <IOKit/IOSubMemoryDescriptor.h>
41#include <IOKit/IOMultiMemoryDescriptor.h>
42#include <IOKit/IOBufferMemoryDescriptor.h>
43
44#include <IOKit/IOKitDebug.h>
45#include <IOKit/IOTimeStamp.h>
46#include <libkern/OSDebug.h>
47#include <libkern/OSKextLibPrivate.h>
48
49#include "IOKitKernelInternal.h"
50
51#include <libkern/c++/OSAllocation.h>
52#include <libkern/c++/OSContainers.h>
53#include <libkern/c++/OSDictionary.h>
54#include <libkern/c++/OSArray.h>
55#include <libkern/c++/OSSymbol.h>
56#include <libkern/c++/OSNumber.h>
57#include <os/overflow.h>
58#include <os/cpp_util.h>
59#include <os/base_private.h>
60
61#include <sys/uio.h>
62
63__BEGIN_DECLS
64#include <vm/pmap.h>
65#include <vm/vm_pageout.h>
66#include <mach/memory_object_types.h>
67#include <device/device_port.h>
68
69#include <mach/vm_prot.h>
70#include <mach/mach_vm.h>
71#include <mach/memory_entry.h>
72#include <vm/vm_fault.h>
73#include <vm/vm_protos.h>
74
75extern ppnum_t pmap_find_phys(pmap_t pmap, addr64_t va);
76extern void ipc_port_release_send(ipc_port_t port);
77
78extern kern_return_t
79mach_memory_entry_ownership(
80 ipc_port_t entry_port,
81 task_t owner,
82 int ledger_tag,
83 int ledger_flags);
84
85__END_DECLS
86
87#define kIOMapperWaitSystem ((IOMapper *) 1)
88
89static IOMapper * gIOSystemMapper = NULL;
90
91ppnum_t gIOLastPage;
92
93enum {
94 kIOMapGuardSizeLarge = 65536
95};
96
97/* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */
98
99OSDefineMetaClassAndAbstractStructors( IOMemoryDescriptor, OSObject )
100
101#define super IOMemoryDescriptor
102
103OSDefineMetaClassAndStructorsWithZone(IOGeneralMemoryDescriptor,
104 IOMemoryDescriptor, ZC_ZFREE_CLEARMEM)
105
106/* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */
107
108static IORecursiveLock * gIOMemoryLock;
109
110#define LOCK IORecursiveLockLock( gIOMemoryLock)
111#define UNLOCK IORecursiveLockUnlock( gIOMemoryLock)
112#define SLEEP IORecursiveLockSleep( gIOMemoryLock, (void *)this, THREAD_UNINT)
113#define WAKEUP \
114 IORecursiveLockWakeup( gIOMemoryLock, (void *)this, /* one-thread */ false)
115
116#if 0
117#define DEBG(fmt, args...) { kprintf(fmt, ## args); }
118#else
119#define DEBG(fmt, args...) {}
120#endif
121
122/* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */
123
124// Some data structures and accessor macros used by the initWithOptions
125// Function
126
127enum ioPLBlockFlags {
128 kIOPLOnDevice = 0x00000001,
129 kIOPLExternUPL = 0x00000002,
130};
131
132struct IOMDPersistentInitData {
133 const IOGeneralMemoryDescriptor * fMD;
134 IOMemoryReference * fMemRef;
135};
136
137struct ioPLBlock {
138 upl_t fIOPL;
139 vm_address_t fPageInfo; // Pointer to page list or index into it
140 uint64_t fIOMDOffset; // The offset of this iopl in descriptor
141 ppnum_t fMappedPage; // Page number of first page in this iopl
142 unsigned int fPageOffset; // Offset within first page of iopl
143 unsigned int fFlags; // Flags
144};
145
146enum { kMaxWireTags = 6 };
147
148struct ioGMDData {
149 IOMapper * fMapper;
150 uint64_t fDMAMapAlignment;
151 uint64_t fMappedBase;
152 uint64_t fMappedLength;
153 uint64_t fPreparationID;
154#if IOTRACKING
155 IOTracking fWireTracking;
156#endif /* IOTRACKING */
157 unsigned int fPageCnt;
158 uint8_t fDMAMapNumAddressBits;
159 unsigned char fCompletionError:1;
160 unsigned char fMappedBaseValid:1;
161 unsigned char _resv:4;
162 unsigned char fDMAAccess:2;
163
164 /* variable length arrays */
165 upl_page_info_t fPageList[1]
166#if __LP64__
167 // align fPageList as for ioPLBlock
168 __attribute__((aligned(sizeof(upl_t))))
169#endif
170 ;
171 //ioPLBlock fBlocks[1];
172};
173
174#pragma GCC visibility push(hidden)
175
176class _IOMemoryDescriptorMixedData : public OSObject
177{
178 OSDeclareDefaultStructors(_IOMemoryDescriptorMixedData);
179
180public:
181 static OSPtr<_IOMemoryDescriptorMixedData> withCapacity(size_t capacity);
182 bool initWithCapacity(size_t capacity);
183 virtual void free() APPLE_KEXT_OVERRIDE;
184
185 bool appendBytes(const void * bytes, size_t length);
186 bool setLength(size_t length);
187
188 const void * getBytes() const;
189 size_t getLength() const;
190
191private:
192 void freeMemory();
193
194 void * _data = nullptr;
195 size_t _length = 0;
196 size_t _capacity = 0;
197};
198
199#pragma GCC visibility pop
200
201#define getDataP(osd) ((ioGMDData *) (osd)->getBytes())
202#define getIOPLList(d) ((ioPLBlock *) (void *)&(d->fPageList[d->fPageCnt]))
203#define getNumIOPL(osd, d) \
204 ((UInt)(((osd)->getLength() - ((char *) getIOPLList(d) - (char *) d)) / sizeof(ioPLBlock)))
205#define getPageList(d) (&(d->fPageList[0]))
206#define computeDataSize(p, u) \
207 (offsetof(ioGMDData, fPageList) + p * sizeof(upl_page_info_t) + u * sizeof(ioPLBlock))
208
209enum { kIOMemoryHostOrRemote = kIOMemoryHostOnly | kIOMemoryRemote };
210
211/* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */
212
213extern "C" {
214kern_return_t
215device_data_action(
216 uintptr_t device_handle,
217 ipc_port_t device_pager,
218 vm_prot_t protection,
219 vm_object_offset_t offset,
220 vm_size_t size)
221{
222 kern_return_t kr;
223 IOMemoryDescriptorReserved * ref = (IOMemoryDescriptorReserved *) device_handle;
224 OSSharedPtr<IOMemoryDescriptor> memDesc;
225
226 LOCK;
227 if (ref->dp.memory) {
228 memDesc.reset(p: ref->dp.memory, OSRetain);
229 kr = memDesc->handleFault(pager: device_pager, sourceOffset: offset, length: size);
230 memDesc.reset();
231 } else {
232 kr = KERN_ABORTED;
233 }
234 UNLOCK;
235
236 return kr;
237}
238
239kern_return_t
240device_close(
241 uintptr_t device_handle)
242{
243 IOMemoryDescriptorReserved * ref = (IOMemoryDescriptorReserved *) device_handle;
244
245 IOFreeType( ref, IOMemoryDescriptorReserved );
246
247 return kIOReturnSuccess;
248}
249}; // end extern "C"
250
251/* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */
252
253// Note this inline function uses C++ reference arguments to return values
254// This means that pointers are not passed and NULLs don't have to be
255// checked for as a NULL reference is illegal.
256static inline void
257getAddrLenForInd(
258 mach_vm_address_t &addr,
259 mach_vm_size_t &len, // Output variables
260 UInt32 type,
261 IOGeneralMemoryDescriptor::Ranges r,
262 UInt32 ind,
263 task_t task __unused)
264{
265 assert(kIOMemoryTypeUIO == type
266 || kIOMemoryTypeVirtual == type || kIOMemoryTypeVirtual64 == type
267 || kIOMemoryTypePhysical == type || kIOMemoryTypePhysical64 == type);
268 if (kIOMemoryTypeUIO == type) {
269 user_size_t us;
270 user_addr_t ad;
271 uio_getiov(a_uio: (uio_t) r.uio, a_index: ind, a_baseaddr_p: &ad, a_length_p: &us); addr = ad; len = us;
272 }
273#ifndef __LP64__
274 else if ((kIOMemoryTypeVirtual64 == type) || (kIOMemoryTypePhysical64 == type)) {
275 IOAddressRange cur = r.v64[ind];
276 addr = cur.address;
277 len = cur.length;
278 }
279#endif /* !__LP64__ */
280 else {
281 IOVirtualRange cur = r.v[ind];
282 addr = cur.address;
283 len = cur.length;
284 }
285#if CONFIG_PROB_GZALLOC
286 if (task == kernel_task) {
287 addr = pgz_decode(addr, len);
288 }
289#endif /* CONFIG_PROB_GZALLOC */
290}
291
292/* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */
293
294static IOReturn
295purgeableControlBits(IOOptionBits newState, vm_purgable_t * control, int * state)
296{
297 IOReturn err = kIOReturnSuccess;
298
299 *control = VM_PURGABLE_SET_STATE;
300
301 enum { kIOMemoryPurgeableControlMask = 15 };
302
303 switch (kIOMemoryPurgeableControlMask & newState) {
304 case kIOMemoryPurgeableKeepCurrent:
305 *control = VM_PURGABLE_GET_STATE;
306 break;
307
308 case kIOMemoryPurgeableNonVolatile:
309 *state = VM_PURGABLE_NONVOLATILE;
310 break;
311 case kIOMemoryPurgeableVolatile:
312 *state = VM_PURGABLE_VOLATILE | (newState & ~kIOMemoryPurgeableControlMask);
313 break;
314 case kIOMemoryPurgeableEmpty:
315 *state = VM_PURGABLE_EMPTY | (newState & ~kIOMemoryPurgeableControlMask);
316 break;
317 default:
318 err = kIOReturnBadArgument;
319 break;
320 }
321
322 if (*control == VM_PURGABLE_SET_STATE) {
323 // let VM know this call is from the kernel and is allowed to alter
324 // the volatility of the memory entry even if it was created with
325 // MAP_MEM_PURGABLE_KERNEL_ONLY
326 *control = VM_PURGABLE_SET_STATE_FROM_KERNEL;
327 }
328
329 return err;
330}
331
332static IOReturn
333purgeableStateBits(int * state)
334{
335 IOReturn err = kIOReturnSuccess;
336
337 switch (VM_PURGABLE_STATE_MASK & *state) {
338 case VM_PURGABLE_NONVOLATILE:
339 *state = kIOMemoryPurgeableNonVolatile;
340 break;
341 case VM_PURGABLE_VOLATILE:
342 *state = kIOMemoryPurgeableVolatile;
343 break;
344 case VM_PURGABLE_EMPTY:
345 *state = kIOMemoryPurgeableEmpty;
346 break;
347 default:
348 *state = kIOMemoryPurgeableNonVolatile;
349 err = kIOReturnNotReady;
350 break;
351 }
352 return err;
353}
354
355typedef struct {
356 unsigned int wimg;
357 unsigned int object_type;
358} iokit_memtype_entry;
359
360static const iokit_memtype_entry iomd_mem_types[] = {
361 [kIODefaultCache] = {VM_WIMG_DEFAULT, MAP_MEM_NOOP},
362 [kIOInhibitCache] = {VM_WIMG_IO, MAP_MEM_IO},
363 [kIOWriteThruCache] = {VM_WIMG_WTHRU, MAP_MEM_WTHRU},
364 [kIOWriteCombineCache] = {VM_WIMG_WCOMB, MAP_MEM_WCOMB},
365 [kIOCopybackCache] = {VM_WIMG_COPYBACK, MAP_MEM_COPYBACK},
366 [kIOCopybackInnerCache] = {VM_WIMG_INNERWBACK, MAP_MEM_INNERWBACK},
367 [kIOPostedWrite] = {VM_WIMG_POSTED, MAP_MEM_POSTED},
368 [kIORealTimeCache] = {VM_WIMG_RT, MAP_MEM_RT},
369 [kIOPostedReordered] = {VM_WIMG_POSTED_REORDERED, MAP_MEM_POSTED_REORDERED},
370 [kIOPostedCombinedReordered] = {VM_WIMG_POSTED_COMBINED_REORDERED, MAP_MEM_POSTED_COMBINED_REORDERED},
371};
372
373static vm_prot_t
374vmProtForCacheMode(IOOptionBits cacheMode)
375{
376 assert(cacheMode < (sizeof(iomd_mem_types) / sizeof(iomd_mem_types[0])));
377 if (cacheMode >= (sizeof(iomd_mem_types) / sizeof(iomd_mem_types[0]))) {
378 cacheMode = kIODefaultCache;
379 }
380 vm_prot_t prot = 0;
381 SET_MAP_MEM(iomd_mem_types[cacheMode].object_type, prot);
382 return prot;
383}
384
385static unsigned int
386pagerFlagsForCacheMode(IOOptionBits cacheMode)
387{
388 assert(cacheMode < (sizeof(iomd_mem_types) / sizeof(iomd_mem_types[0])));
389 if (cacheMode >= (sizeof(iomd_mem_types) / sizeof(iomd_mem_types[0]))) {
390 cacheMode = kIODefaultCache;
391 }
392 if (cacheMode == kIODefaultCache) {
393 return -1U;
394 }
395 return iomd_mem_types[cacheMode].wimg;
396}
397
398static IOOptionBits
399cacheModeForPagerFlags(unsigned int pagerFlags)
400{
401 pagerFlags &= VM_WIMG_MASK;
402 IOOptionBits cacheMode = kIODefaultCache;
403 for (IOOptionBits i = 0; i < (sizeof(iomd_mem_types) / sizeof(iomd_mem_types[0])); ++i) {
404 if (iomd_mem_types[i].wimg == pagerFlags) {
405 cacheMode = i;
406 break;
407 }
408 }
409 return (cacheMode == kIODefaultCache) ? kIOCopybackCache : cacheMode;
410}
411
412/* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */
413/* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */
414
415struct IOMemoryEntry {
416 ipc_port_t entry;
417 int64_t offset;
418 uint64_t size;
419 uint64_t start;
420};
421
422struct IOMemoryReference {
423 volatile SInt32 refCount;
424 vm_prot_t prot;
425 uint32_t capacity;
426 uint32_t count;
427 struct IOMemoryReference * mapRef;
428 IOMemoryEntry entries[0];
429};
430
431enum{
432 kIOMemoryReferenceReuse = 0x00000001,
433 kIOMemoryReferenceWrite = 0x00000002,
434 kIOMemoryReferenceCOW = 0x00000004,
435};
436
437SInt32 gIOMemoryReferenceCount;
438
439IOMemoryReference *
440IOGeneralMemoryDescriptor::memoryReferenceAlloc(uint32_t capacity, IOMemoryReference * realloc)
441{
442 IOMemoryReference * ref;
443 size_t oldCapacity;
444
445 if (realloc) {
446 oldCapacity = realloc->capacity;
447 } else {
448 oldCapacity = 0;
449 }
450
451 // Use the kalloc API instead of manually handling the reallocation
452 ref = krealloc_type(IOMemoryReference, IOMemoryEntry,
453 oldCapacity, capacity, realloc, Z_WAITOK_ZERO);
454 if (ref) {
455 if (oldCapacity == 0) {
456 ref->refCount = 1;
457 OSIncrementAtomic(&gIOMemoryReferenceCount);
458 }
459 ref->capacity = capacity;
460 }
461 return ref;
462}
463
464void
465IOGeneralMemoryDescriptor::memoryReferenceFree(IOMemoryReference * ref)
466{
467 IOMemoryEntry * entries;
468
469 if (ref->mapRef) {
470 memoryReferenceFree(ref: ref->mapRef);
471 ref->mapRef = NULL;
472 }
473
474 entries = ref->entries + ref->count;
475 while (entries > &ref->entries[0]) {
476 entries--;
477 ipc_port_release_send(port: entries->entry);
478 }
479 kfree_type(IOMemoryReference, IOMemoryEntry, ref->capacity, ref);
480
481 OSDecrementAtomic(&gIOMemoryReferenceCount);
482}
483
484void
485IOGeneralMemoryDescriptor::memoryReferenceRelease(IOMemoryReference * ref)
486{
487 if (1 == OSDecrementAtomic(&ref->refCount)) {
488 memoryReferenceFree(ref);
489 }
490}
491
492
493IOReturn
494IOGeneralMemoryDescriptor::memoryReferenceCreate(
495 IOOptionBits options,
496 IOMemoryReference ** reference)
497{
498 enum { kCapacity = 4, kCapacityInc = 4 };
499
500 kern_return_t err;
501 IOMemoryReference * ref;
502 IOMemoryEntry * entries;
503 IOMemoryEntry * cloneEntries = NULL;
504 vm_map_t map;
505 ipc_port_t entry, cloneEntry;
506 vm_prot_t prot;
507 memory_object_size_t actualSize;
508 uint32_t rangeIdx;
509 uint32_t count;
510 mach_vm_address_t entryAddr, endAddr, entrySize;
511 mach_vm_size_t srcAddr, srcLen;
512 mach_vm_size_t nextAddr, nextLen;
513 mach_vm_size_t offset, remain;
514 vm_map_offset_t overmap_start = 0, overmap_end = 0;
515 int misaligned_start = 0, misaligned_end = 0;
516 IOByteCount physLen;
517 IOOptionBits type = (_flags & kIOMemoryTypeMask);
518 IOOptionBits cacheMode;
519 unsigned int pagerFlags;
520 vm_tag_t tag;
521 vm_named_entry_kernel_flags_t vmne_kflags;
522
523 ref = memoryReferenceAlloc(capacity: kCapacity, NULL);
524 if (!ref) {
525 return kIOReturnNoMemory;
526 }
527
528 tag = (vm_tag_t) getVMTag(map: kernel_map);
529 vmne_kflags = VM_NAMED_ENTRY_KERNEL_FLAGS_NONE;
530 entries = &ref->entries[0];
531 count = 0;
532 err = KERN_SUCCESS;
533
534 offset = 0;
535 rangeIdx = 0;
536 remain = _length;
537 if (_task) {
538 getAddrLenForInd(addr&: nextAddr, len&: nextLen, type, r: _ranges, ind: rangeIdx, task: _task);
539
540 // account for IOBMD setLength(), use its capacity as length
541 IOBufferMemoryDescriptor * bmd;
542 if ((bmd = OSDynamicCast(IOBufferMemoryDescriptor, this))) {
543 nextLen = bmd->getCapacity();
544 remain = nextLen;
545 }
546 } else {
547 nextAddr = getPhysicalSegment(offset, length: &physLen, options: kIOMemoryMapperNone);
548 nextLen = physLen;
549
550 // default cache mode for physical
551 if (kIODefaultCache == ((_flags & kIOMemoryBufferCacheMask) >> kIOMemoryBufferCacheShift)) {
552 IOOptionBits mode = cacheModeForPagerFlags(pagerFlags: IODefaultCacheBits(pa: nextAddr));
553 _flags |= (mode << kIOMemoryBufferCacheShift);
554 }
555 }
556
557 // cache mode & vm_prot
558 prot = VM_PROT_READ;
559 cacheMode = ((_flags & kIOMemoryBufferCacheMask) >> kIOMemoryBufferCacheShift);
560 prot |= vmProtForCacheMode(cacheMode);
561 // VM system requires write access to change cache mode
562 if (kIODefaultCache != cacheMode) {
563 prot |= VM_PROT_WRITE;
564 }
565 if (kIODirectionOut != (kIODirectionOutIn & _flags)) {
566 prot |= VM_PROT_WRITE;
567 }
568 if (kIOMemoryReferenceWrite & options) {
569 prot |= VM_PROT_WRITE;
570 }
571 if (kIOMemoryReferenceCOW & options) {
572 prot |= MAP_MEM_VM_COPY;
573 }
574
575 if (kIOMemoryUseReserve & _flags) {
576 prot |= MAP_MEM_GRAB_SECLUDED;
577 }
578
579 if ((kIOMemoryReferenceReuse & options) && _memRef) {
580 cloneEntries = &_memRef->entries[0];
581 prot |= MAP_MEM_NAMED_REUSE;
582 }
583
584 if (_task) {
585 // virtual ranges
586
587 if (kIOMemoryBufferPageable & _flags) {
588 int ledger_tag, ledger_no_footprint;
589
590 // IOBufferMemoryDescriptor alloc - set flags for entry + object create
591 prot |= MAP_MEM_NAMED_CREATE;
592
593 // default accounting settings:
594 // + "none" ledger tag
595 // + include in footprint
596 // can be changed later with ::setOwnership()
597 ledger_tag = VM_LEDGER_TAG_NONE;
598 ledger_no_footprint = 0;
599
600 if (kIOMemoryBufferPurgeable & _flags) {
601 prot |= (MAP_MEM_PURGABLE | MAP_MEM_PURGABLE_KERNEL_ONLY);
602 if (VM_KERN_MEMORY_SKYWALK == tag) {
603 // Skywalk purgeable memory accounting:
604 // + "network" ledger tag
605 // + not included in footprint
606 ledger_tag = VM_LEDGER_TAG_NETWORK;
607 ledger_no_footprint = 1;
608 } else {
609 // regular purgeable memory accounting:
610 // + no ledger tag
611 // + included in footprint
612 ledger_tag = VM_LEDGER_TAG_NONE;
613 ledger_no_footprint = 0;
614 }
615 }
616 vmne_kflags.vmnekf_ledger_tag = ledger_tag;
617 vmne_kflags.vmnekf_ledger_no_footprint = ledger_no_footprint;
618 if (kIOMemoryUseReserve & _flags) {
619 prot |= MAP_MEM_GRAB_SECLUDED;
620 }
621
622 prot |= VM_PROT_WRITE;
623 map = NULL;
624 } else {
625 prot |= MAP_MEM_USE_DATA_ADDR;
626 map = get_task_map(_task);
627 }
628 DEBUG4K_IOKIT("map %p _length 0x%llx prot 0x%x\n", map, (uint64_t)_length, prot);
629
630 while (remain) {
631 srcAddr = nextAddr;
632 srcLen = nextLen;
633 nextAddr = 0;
634 nextLen = 0;
635 // coalesce addr range
636 for (++rangeIdx; rangeIdx < _rangesCount; rangeIdx++) {
637 getAddrLenForInd(addr&: nextAddr, len&: nextLen, type, r: _ranges, ind: rangeIdx, task: _task);
638 if ((srcAddr + srcLen) != nextAddr) {
639 break;
640 }
641 srcLen += nextLen;
642 }
643
644 if (MAP_MEM_USE_DATA_ADDR & prot) {
645 entryAddr = srcAddr;
646 endAddr = srcAddr + srcLen;
647 } else {
648 entryAddr = trunc_page_64(srcAddr);
649 endAddr = round_page_64(x: srcAddr + srcLen);
650 }
651 if (vm_map_page_mask(map: get_task_map(_task)) < PAGE_MASK) {
652 DEBUG4K_IOKIT("IOMemRef %p _flags 0x%x prot 0x%x _ranges[%d]: 0x%llx 0x%llx\n", ref, (uint32_t)_flags, prot, rangeIdx - 1, srcAddr, srcLen);
653 }
654
655 do{
656 entrySize = (endAddr - entryAddr);
657 if (!entrySize) {
658 break;
659 }
660 actualSize = entrySize;
661
662 cloneEntry = MACH_PORT_NULL;
663 if (MAP_MEM_NAMED_REUSE & prot) {
664 if (cloneEntries < &_memRef->entries[_memRef->count]) {
665 cloneEntry = cloneEntries->entry;
666 } else {
667 prot &= ~MAP_MEM_NAMED_REUSE;
668 }
669 }
670
671 err = mach_make_memory_entry_internal(target_map: map,
672 size: &actualSize, offset: entryAddr, permission: prot, vmne_kflags, object_handle: &entry, parent_handle: cloneEntry);
673
674 if (KERN_SUCCESS != err) {
675 DEBUG4K_ERROR("make_memory_entry(map %p, addr 0x%llx, size 0x%llx, prot 0x%x) err 0x%x\n", map, entryAddr, actualSize, prot, err);
676 break;
677 }
678 if (MAP_MEM_USE_DATA_ADDR & prot) {
679 if (actualSize > entrySize) {
680 actualSize = entrySize;
681 }
682 } else if (actualSize > entrySize) {
683 panic("mach_make_memory_entry_64 actualSize");
684 }
685
686 memory_entry_check_for_adjustment(src_map: map, port: entry, overmap_start: &overmap_start, overmap_end: &overmap_end);
687
688 if (count && overmap_start) {
689 /*
690 * Track misaligned start for all
691 * except the first entry.
692 */
693 misaligned_start++;
694 }
695
696 if (overmap_end) {
697 /*
698 * Ignore misaligned end for the
699 * last entry.
700 */
701 if ((entryAddr + actualSize) != endAddr) {
702 misaligned_end++;
703 }
704 }
705
706 if (count) {
707 /* Middle entries */
708 if (misaligned_start || misaligned_end) {
709 DEBUG4K_IOKIT("stopped at entryAddr 0x%llx\n", entryAddr);
710 ipc_port_release_send(port: entry);
711 err = KERN_NOT_SUPPORTED;
712 break;
713 }
714 }
715
716 if (count >= ref->capacity) {
717 ref = memoryReferenceAlloc(capacity: ref->capacity + kCapacityInc, realloc: ref);
718 entries = &ref->entries[count];
719 }
720 entries->entry = entry;
721 entries->size = actualSize;
722 entries->offset = offset + (entryAddr - srcAddr);
723 entries->start = entryAddr;
724 entryAddr += actualSize;
725 if (MAP_MEM_NAMED_REUSE & prot) {
726 if ((cloneEntries->entry == entries->entry)
727 && (cloneEntries->size == entries->size)
728 && (cloneEntries->offset == entries->offset)) {
729 cloneEntries++;
730 } else {
731 prot &= ~MAP_MEM_NAMED_REUSE;
732 }
733 }
734 entries++;
735 count++;
736 }while (true);
737 offset += srcLen;
738 remain -= srcLen;
739 }
740 } else {
741 // _task == 0, physical or kIOMemoryTypeUPL
742 memory_object_t pager;
743 vm_size_t size = ptoa_64(_pages);
744
745 if (!getKernelReserved()) {
746 panic("getKernelReserved");
747 }
748
749 reserved->dp.pagerContig = (1 == _rangesCount);
750 reserved->dp.memory = this;
751
752 pagerFlags = pagerFlagsForCacheMode(cacheMode);
753 if (-1U == pagerFlags) {
754 panic("phys is kIODefaultCache");
755 }
756 if (reserved->dp.pagerContig) {
757 pagerFlags |= DEVICE_PAGER_CONTIGUOUS;
758 }
759
760 pager = device_pager_setup((memory_object_t) NULL, (uintptr_t) reserved,
761 size, pagerFlags);
762 assert(pager);
763 if (!pager) {
764 DEBUG4K_ERROR("pager setup failed size 0x%llx flags 0x%x\n", (uint64_t)size, pagerFlags);
765 err = kIOReturnVMError;
766 } else {
767 srcAddr = nextAddr;
768 entryAddr = trunc_page_64(srcAddr);
769 err = mach_memory_object_memory_entry_64(host: (host_t) 1, internal: false /*internal*/,
770 size, VM_PROT_READ | VM_PROT_WRITE, pager, entry_handle: &entry);
771 assert(KERN_SUCCESS == err);
772 if (KERN_SUCCESS != err) {
773 device_pager_deallocate(pager);
774 } else {
775 reserved->dp.devicePager = pager;
776 entries->entry = entry;
777 entries->size = size;
778 entries->offset = offset + (entryAddr - srcAddr);
779 entries++;
780 count++;
781 }
782 }
783 }
784
785 ref->count = count;
786 ref->prot = prot;
787
788 if (_task && (KERN_SUCCESS == err)
789 && (kIOMemoryMapCopyOnWrite & _flags)
790 && !(kIOMemoryReferenceCOW & options)) {
791 err = memoryReferenceCreate(options: options | kIOMemoryReferenceCOW, reference: &ref->mapRef);
792 if (KERN_SUCCESS != err) {
793 DEBUG4K_ERROR("ref %p options 0x%x err 0x%x\n", ref, (unsigned int)options, err);
794 }
795 }
796
797 if (KERN_SUCCESS == err) {
798 if (MAP_MEM_NAMED_REUSE & prot) {
799 memoryReferenceFree(ref);
800 OSIncrementAtomic(&_memRef->refCount);
801 ref = _memRef;
802 }
803 } else {
804 DEBUG4K_ERROR("ref %p err 0x%x\n", ref, err);
805 memoryReferenceFree(ref);
806 ref = NULL;
807 }
808
809 *reference = ref;
810
811 return err;
812}
813
814static mach_vm_size_t
815IOMemoryDescriptorMapGuardSize(vm_map_t map, IOOptionBits options)
816{
817 switch (kIOMapGuardedMask & options) {
818 default:
819 case kIOMapGuardedSmall:
820 return vm_map_page_size(map);
821 case kIOMapGuardedLarge:
822 assert(0 == (kIOMapGuardSizeLarge & vm_map_page_mask(map)));
823 return kIOMapGuardSizeLarge;
824 }
825 ;
826}
827
828static kern_return_t
829IOMemoryDescriptorMapDealloc(IOOptionBits options, vm_map_t map,
830 vm_map_offset_t addr, mach_vm_size_t size)
831{
832 kern_return_t kr;
833 vm_map_offset_t actualAddr;
834 mach_vm_size_t actualSize;
835
836 actualAddr = vm_map_trunc_page(addr, vm_map_page_mask(map));
837 actualSize = vm_map_round_page(addr + size, vm_map_page_mask(map)) - actualAddr;
838
839 if (kIOMapGuardedMask & options) {
840 mach_vm_size_t guardSize = IOMemoryDescriptorMapGuardSize(map, options);
841 actualAddr -= guardSize;
842 actualSize += 2 * guardSize;
843 }
844 kr = mach_vm_deallocate(target: map, address: actualAddr, size: actualSize);
845
846 return kr;
847}
848
849kern_return_t
850IOMemoryDescriptorMapAlloc(vm_map_t map, void * _ref)
851{
852 IOMemoryDescriptorMapAllocRef * ref = (typeof(ref))_ref;
853 IOReturn err;
854 vm_map_offset_t addr;
855 mach_vm_size_t size;
856 mach_vm_size_t guardSize;
857 vm_map_kernel_flags_t vmk_flags;
858
859 addr = ref->mapped;
860 size = ref->size;
861 guardSize = 0;
862
863 if (kIOMapGuardedMask & ref->options) {
864 if (!(kIOMapAnywhere & ref->options)) {
865 return kIOReturnBadArgument;
866 }
867 guardSize = IOMemoryDescriptorMapGuardSize(map, options: ref->options);
868 size += 2 * guardSize;
869 }
870 if (kIOMapAnywhere & ref->options) {
871 vmk_flags = VM_MAP_KERNEL_FLAGS_ANYWHERE();
872 } else {
873 vmk_flags = VM_MAP_KERNEL_FLAGS_FIXED();
874 }
875 vmk_flags.vm_tag = ref->tag;
876
877 /*
878 * Mapping memory into the kernel_map using IOMDs use the data range.
879 * Memory being mapped should not contain kernel pointers.
880 */
881 if (map == kernel_map) {
882 vmk_flags.vmkf_range_id = KMEM_RANGE_ID_DATA;
883 }
884
885 err = vm_map_enter_mem_object(map, address: &addr, size,
886#if __ARM_MIXED_PAGE_SIZE__
887 // TODO4K this should not be necessary...
888 mask: (vm_map_offset_t)((ref->options & kIOMapAnywhere) ? max(PAGE_MASK, vm_map_page_mask(map)) : 0),
889#else /* __ARM_MIXED_PAGE_SIZE__ */
890 (vm_map_offset_t) 0,
891#endif /* __ARM_MIXED_PAGE_SIZE__ */
892 vmk_flags,
893 IPC_PORT_NULL,
894 offset: (memory_object_offset_t) 0,
895 needs_copy: false, /* copy */
896 cur_protection: ref->prot,
897 max_protection: ref->prot,
898 VM_INHERIT_NONE);
899 if (KERN_SUCCESS == err) {
900 ref->mapped = (mach_vm_address_t) addr;
901 ref->map = map;
902 if (kIOMapGuardedMask & ref->options) {
903 vm_map_offset_t lastpage = vm_map_trunc_page(addr + size - guardSize, vm_map_page_mask(map));
904
905 err = vm_map_protect(map, start: addr, end: addr + guardSize, VM_PROT_NONE, set_max: false /*set_max*/);
906 assert(KERN_SUCCESS == err);
907 err = vm_map_protect(map, start: lastpage, end: lastpage + guardSize, VM_PROT_NONE, set_max: false /*set_max*/);
908 assert(KERN_SUCCESS == err);
909 ref->mapped += guardSize;
910 }
911 }
912
913 return err;
914}
915
916IOReturn
917IOGeneralMemoryDescriptor::memoryReferenceMap(
918 IOMemoryReference * ref,
919 vm_map_t map,
920 mach_vm_size_t inoffset,
921 mach_vm_size_t size,
922 IOOptionBits options,
923 mach_vm_address_t * inaddr)
924{
925 IOReturn err;
926 int64_t offset = inoffset;
927 uint32_t rangeIdx, entryIdx;
928 vm_map_offset_t addr, mapAddr;
929 vm_map_offset_t pageOffset, entryOffset, remain, chunk;
930
931 mach_vm_address_t nextAddr;
932 mach_vm_size_t nextLen;
933 IOByteCount physLen;
934 IOMemoryEntry * entry;
935 vm_prot_t prot, memEntryCacheMode;
936 IOOptionBits type;
937 IOOptionBits cacheMode;
938 vm_tag_t tag;
939 // for the kIOMapPrefault option.
940 upl_page_info_t * pageList = NULL;
941 UInt currentPageIndex = 0;
942 bool didAlloc;
943
944 DEBUG4K_IOKIT("ref %p map %p inoffset 0x%llx size 0x%llx options 0x%x *inaddr 0x%llx\n", ref, map, inoffset, size, (uint32_t)options, *inaddr);
945
946 if (ref->mapRef) {
947 err = memoryReferenceMap(ref: ref->mapRef, map, inoffset, size, options, inaddr);
948 return err;
949 }
950
951 if (MAP_MEM_USE_DATA_ADDR & ref->prot) {
952 err = memoryReferenceMapNew(ref, map, inoffset, size, options, inaddr);
953 return err;
954 }
955
956 type = _flags & kIOMemoryTypeMask;
957
958 prot = VM_PROT_READ;
959 if (!(kIOMapReadOnly & options)) {
960 prot |= VM_PROT_WRITE;
961 }
962 prot &= ref->prot;
963
964 cacheMode = ((options & kIOMapCacheMask) >> kIOMapCacheShift);
965 if (kIODefaultCache != cacheMode) {
966 // VM system requires write access to update named entry cache mode
967 memEntryCacheMode = (MAP_MEM_ONLY | VM_PROT_WRITE | prot | vmProtForCacheMode(cacheMode));
968 }
969
970 tag = (typeof(tag))getVMTag(map);
971
972 if (_task) {
973 // Find first range for offset
974 if (!_rangesCount) {
975 return kIOReturnBadArgument;
976 }
977 for (remain = offset, rangeIdx = 0; rangeIdx < _rangesCount; rangeIdx++) {
978 getAddrLenForInd(addr&: nextAddr, len&: nextLen, type, r: _ranges, ind: rangeIdx, task: _task);
979 if (remain < nextLen) {
980 break;
981 }
982 remain -= nextLen;
983 }
984 } else {
985 rangeIdx = 0;
986 remain = 0;
987 nextAddr = getPhysicalSegment(offset, length: &physLen, options: kIOMemoryMapperNone);
988 nextLen = size;
989 }
990
991 assert(remain < nextLen);
992 if (remain >= nextLen) {
993 DEBUG4K_ERROR("map %p inoffset 0x%llx size 0x%llx options 0x%x inaddr 0x%llx remain 0x%llx nextLen 0x%llx\n", map, inoffset, size, (uint32_t)options, *inaddr, (uint64_t)remain, nextLen);
994 return kIOReturnBadArgument;
995 }
996
997 nextAddr += remain;
998 nextLen -= remain;
999#if __ARM_MIXED_PAGE_SIZE__
1000 pageOffset = (vm_map_page_mask(map) & nextAddr);
1001#else /* __ARM_MIXED_PAGE_SIZE__ */
1002 pageOffset = (page_mask & nextAddr);
1003#endif /* __ARM_MIXED_PAGE_SIZE__ */
1004 addr = 0;
1005 didAlloc = false;
1006
1007 if (!(options & kIOMapAnywhere)) {
1008 addr = *inaddr;
1009 if (pageOffset != (vm_map_page_mask(map) & addr)) {
1010 DEBUG4K_ERROR("map %p inoffset 0x%llx size 0x%llx options 0x%x inaddr 0x%llx addr 0x%llx page_mask 0x%llx pageOffset 0x%llx\n", map, inoffset, size, (uint32_t)options, *inaddr, (uint64_t)addr, (uint64_t)page_mask, (uint64_t)pageOffset);
1011 }
1012 addr -= pageOffset;
1013 }
1014
1015 // find first entry for offset
1016 for (entryIdx = 0;
1017 (entryIdx < ref->count) && (offset >= ref->entries[entryIdx].offset);
1018 entryIdx++) {
1019 }
1020 entryIdx--;
1021 entry = &ref->entries[entryIdx];
1022
1023 // allocate VM
1024#if __ARM_MIXED_PAGE_SIZE__
1025 size = round_page_mask_64(size + pageOffset, vm_map_page_mask(map));
1026#else
1027 size = round_page_64(size + pageOffset);
1028#endif
1029 if (kIOMapOverwrite & options) {
1030 if ((map == kernel_map) && (kIOMemoryBufferPageable & _flags)) {
1031 map = IOPageableMapForAddress(address: addr);
1032 }
1033 err = KERN_SUCCESS;
1034 } else {
1035 IOMemoryDescriptorMapAllocRef ref;
1036 ref.map = map;
1037 ref.tag = tag;
1038 ref.options = options;
1039 ref.size = size;
1040 ref.prot = prot;
1041 if (options & kIOMapAnywhere) {
1042 // vm_map looks for addresses above here, even when VM_FLAGS_ANYWHERE
1043 ref.mapped = 0;
1044 } else {
1045 ref.mapped = addr;
1046 }
1047 if ((ref.map == kernel_map) && (kIOMemoryBufferPageable & _flags)) {
1048 err = IOIteratePageableMaps( size: ref.size, callback: &IOMemoryDescriptorMapAlloc, ref: &ref );
1049 } else {
1050 err = IOMemoryDescriptorMapAlloc(map: ref.map, ref: &ref);
1051 }
1052 if (KERN_SUCCESS == err) {
1053 addr = ref.mapped;
1054 map = ref.map;
1055 didAlloc = true;
1056 }
1057 }
1058
1059 /*
1060 * If the memory is associated with a device pager but doesn't have a UPL,
1061 * it will be immediately faulted in through the pager via populateDevicePager().
1062 * kIOMapPrefault is redundant in that case, so don't try to use it for UPL
1063 * operations.
1064 */
1065 if ((reserved != NULL) && (reserved->dp.devicePager) && (_wireCount != 0)) {
1066 options &= ~kIOMapPrefault;
1067 }
1068
1069 /*
1070 * Prefaulting is only possible if we wired the memory earlier. Check the
1071 * memory type, and the underlying data.
1072 */
1073 if (options & kIOMapPrefault) {
1074 /*
1075 * The memory must have been wired by calling ::prepare(), otherwise
1076 * we don't have the UPL. Without UPLs, pages cannot be pre-faulted
1077 */
1078 assert(_wireCount != 0);
1079 assert(_memoryEntries != NULL);
1080 if ((_wireCount == 0) ||
1081 (_memoryEntries == NULL)) {
1082 DEBUG4K_ERROR("map %p inoffset 0x%llx size 0x%llx options 0x%x inaddr 0x%llx\n", map, inoffset, size, (uint32_t)options, *inaddr);
1083 return kIOReturnBadArgument;
1084 }
1085
1086 // Get the page list.
1087 ioGMDData* dataP = getDataP(_memoryEntries);
1088 ioPLBlock const* ioplList = getIOPLList(dataP);
1089 pageList = getPageList(dataP);
1090
1091 // Get the number of IOPLs.
1092 UInt numIOPLs = getNumIOPL(_memoryEntries, dataP);
1093
1094 /*
1095 * Scan through the IOPL Info Blocks, looking for the first block containing
1096 * the offset. The research will go past it, so we'll need to go back to the
1097 * right range at the end.
1098 */
1099 UInt ioplIndex = 0;
1100 while ((ioplIndex < numIOPLs) && (((uint64_t) offset) >= ioplList[ioplIndex].fIOMDOffset)) {
1101 ioplIndex++;
1102 }
1103 ioplIndex--;
1104
1105 // Retrieve the IOPL info block.
1106 ioPLBlock ioplInfo = ioplList[ioplIndex];
1107
1108 /*
1109 * For external UPLs, the fPageInfo points directly to the UPL's page_info_t
1110 * array.
1111 */
1112 if (ioplInfo.fFlags & kIOPLExternUPL) {
1113 pageList = (upl_page_info_t*) ioplInfo.fPageInfo;
1114 } else {
1115 pageList = &pageList[ioplInfo.fPageInfo];
1116 }
1117
1118 // Rebase [offset] into the IOPL in order to looks for the first page index.
1119 mach_vm_size_t offsetInIOPL = offset - ioplInfo.fIOMDOffset + ioplInfo.fPageOffset;
1120
1121 // Retrieve the index of the first page corresponding to the offset.
1122 currentPageIndex = atop_32(offsetInIOPL);
1123 }
1124
1125 // enter mappings
1126 remain = size;
1127 mapAddr = addr;
1128 addr += pageOffset;
1129
1130 while (remain && (KERN_SUCCESS == err)) {
1131 entryOffset = offset - entry->offset;
1132 if ((min(vm_map_page_mask(map), page_mask) & entryOffset) != pageOffset) {
1133 err = kIOReturnNotAligned;
1134 DEBUG4K_ERROR("map %p inoffset 0x%llx size 0x%llx options 0x%x inaddr 0x%llx entryOffset 0x%llx pageOffset 0x%llx\n", map, inoffset, size, (uint32_t)options, *inaddr, (uint64_t)entryOffset, (uint64_t)pageOffset);
1135 break;
1136 }
1137
1138 if (kIODefaultCache != cacheMode) {
1139 vm_size_t unused = 0;
1140 err = mach_make_memory_entry(NULL /*unused*/, size: &unused, offset: 0 /*unused*/,
1141 permission: memEntryCacheMode, NULL, parent_entry: entry->entry);
1142 assert(KERN_SUCCESS == err);
1143 }
1144
1145 entryOffset -= pageOffset;
1146 if (entryOffset >= entry->size) {
1147 panic("entryOffset");
1148 }
1149 chunk = entry->size - entryOffset;
1150 if (chunk) {
1151 vm_map_kernel_flags_t vmk_flags = {
1152 .vmf_fixed = true,
1153 .vmf_overwrite = true,
1154 .vm_tag = tag,
1155 .vmkf_iokit_acct = true,
1156 };
1157
1158 if (chunk > remain) {
1159 chunk = remain;
1160 }
1161 if (options & kIOMapPrefault) {
1162 UInt nb_pages = (typeof(nb_pages))round_page(x: chunk) / PAGE_SIZE;
1163
1164 err = vm_map_enter_mem_object_prefault(map,
1165 address: &mapAddr,
1166 size: chunk, mask: 0 /* mask */,
1167 vmk_flags,
1168 port: entry->entry,
1169 offset: entryOffset,
1170 cur_protection: prot, // cur
1171 max_protection: prot, // max
1172 page_list: &pageList[currentPageIndex],
1173 page_list_count: nb_pages);
1174
1175 if (err || vm_map_page_mask(map) < PAGE_MASK) {
1176 DEBUG4K_IOKIT("IOMemRef %p mapped in map %p (pgshift %d) at 0x%llx size 0x%llx err 0x%x\n", ref, map, vm_map_page_shift(map), (uint64_t)mapAddr, (uint64_t)chunk, err);
1177 }
1178 // Compute the next index in the page list.
1179 currentPageIndex += nb_pages;
1180 assert(currentPageIndex <= _pages);
1181 } else {
1182 err = vm_map_enter_mem_object(map,
1183 address: &mapAddr,
1184 size: chunk, mask: 0 /* mask */,
1185 vmk_flags,
1186 port: entry->entry,
1187 offset: entryOffset,
1188 needs_copy: false, // copy
1189 cur_protection: prot, // cur
1190 max_protection: prot, // max
1191 VM_INHERIT_NONE);
1192 }
1193 if (KERN_SUCCESS != err) {
1194 DEBUG4K_ERROR("IOMemRef %p mapped in map %p (pgshift %d) at 0x%llx size 0x%llx err 0x%x\n", ref, map, vm_map_page_shift(map), (uint64_t)mapAddr, (uint64_t)chunk, err);
1195 break;
1196 }
1197 remain -= chunk;
1198 if (!remain) {
1199 break;
1200 }
1201 mapAddr += chunk;
1202 offset += chunk - pageOffset;
1203 }
1204 pageOffset = 0;
1205 entry++;
1206 entryIdx++;
1207 if (entryIdx >= ref->count) {
1208 err = kIOReturnOverrun;
1209 DEBUG4K_ERROR("map %p inoffset 0x%llx size 0x%llx options 0x%x inaddr 0x%llx entryIdx %d ref->count %d\n", map, inoffset, size, (uint32_t)options, *inaddr, entryIdx, ref->count);
1210 break;
1211 }
1212 }
1213
1214 if ((KERN_SUCCESS != err) && didAlloc) {
1215 (void) IOMemoryDescriptorMapDealloc(options, map, trunc_page_64(addr), size);
1216 addr = 0;
1217 }
1218 *inaddr = addr;
1219
1220 if (err /* || vm_map_page_mask(map) < PAGE_MASK */) {
1221 DEBUG4K_ERROR("map %p (%d) inoffset 0x%llx size 0x%llx options 0x%x inaddr 0x%llx err 0x%x\n", map, vm_map_page_shift(map), inoffset, size, (uint32_t)options, *inaddr, err);
1222 }
1223 return err;
1224}
1225
1226#define LOGUNALIGN 0
1227IOReturn
1228IOGeneralMemoryDescriptor::memoryReferenceMapNew(
1229 IOMemoryReference * ref,
1230 vm_map_t map,
1231 mach_vm_size_t inoffset,
1232 mach_vm_size_t size,
1233 IOOptionBits options,
1234 mach_vm_address_t * inaddr)
1235{
1236 IOReturn err;
1237 int64_t offset = inoffset;
1238 uint32_t entryIdx, firstEntryIdx;
1239 vm_map_offset_t addr, mapAddr, mapAddrOut;
1240 vm_map_offset_t entryOffset, remain, chunk;
1241
1242 IOMemoryEntry * entry;
1243 vm_prot_t prot, memEntryCacheMode;
1244 IOOptionBits type;
1245 IOOptionBits cacheMode;
1246 vm_tag_t tag;
1247 // for the kIOMapPrefault option.
1248 upl_page_info_t * pageList = NULL;
1249 UInt currentPageIndex = 0;
1250 bool didAlloc;
1251
1252 DEBUG4K_IOKIT("ref %p map %p inoffset 0x%llx size 0x%llx options 0x%x *inaddr 0x%llx\n", ref, map, inoffset, size, (uint32_t)options, *inaddr);
1253
1254 if (ref->mapRef) {
1255 err = memoryReferenceMap(ref: ref->mapRef, map, inoffset, size, options, inaddr);
1256 return err;
1257 }
1258
1259#if LOGUNALIGN
1260 printf("MAP offset %qx, %qx\n", inoffset, size);
1261#endif
1262
1263 type = _flags & kIOMemoryTypeMask;
1264
1265 prot = VM_PROT_READ;
1266 if (!(kIOMapReadOnly & options)) {
1267 prot |= VM_PROT_WRITE;
1268 }
1269 prot &= ref->prot;
1270
1271 cacheMode = ((options & kIOMapCacheMask) >> kIOMapCacheShift);
1272 if (kIODefaultCache != cacheMode) {
1273 // VM system requires write access to update named entry cache mode
1274 memEntryCacheMode = (MAP_MEM_ONLY | VM_PROT_WRITE | prot | vmProtForCacheMode(cacheMode));
1275 }
1276
1277 tag = (vm_tag_t) getVMTag(map);
1278
1279 addr = 0;
1280 didAlloc = false;
1281
1282 if (!(options & kIOMapAnywhere)) {
1283 addr = *inaddr;
1284 }
1285
1286 // find first entry for offset
1287 for (firstEntryIdx = 0;
1288 (firstEntryIdx < ref->count) && (offset >= ref->entries[firstEntryIdx].offset);
1289 firstEntryIdx++) {
1290 }
1291 firstEntryIdx--;
1292
1293 // calculate required VM space
1294
1295 entryIdx = firstEntryIdx;
1296 entry = &ref->entries[entryIdx];
1297
1298 remain = size;
1299 int64_t iteroffset = offset;
1300 uint64_t mapSize = 0;
1301 while (remain) {
1302 entryOffset = iteroffset - entry->offset;
1303 if (entryOffset >= entry->size) {
1304 panic("entryOffset");
1305 }
1306
1307#if LOGUNALIGN
1308 printf("[%d] size %qx offset %qx start %qx iter %qx\n",
1309 entryIdx, entry->size, entry->offset, entry->start, iteroffset);
1310#endif
1311
1312 chunk = entry->size - entryOffset;
1313 if (chunk) {
1314 if (chunk > remain) {
1315 chunk = remain;
1316 }
1317 mach_vm_size_t entrySize;
1318 err = mach_memory_entry_map_size(entry_port: entry->entry, map, offset: entryOffset, size: chunk, map_size: &entrySize);
1319 assert(KERN_SUCCESS == err);
1320 mapSize += entrySize;
1321
1322 remain -= chunk;
1323 if (!remain) {
1324 break;
1325 }
1326 iteroffset += chunk; // - pageOffset;
1327 }
1328 entry++;
1329 entryIdx++;
1330 if (entryIdx >= ref->count) {
1331 panic("overrun");
1332 err = kIOReturnOverrun;
1333 break;
1334 }
1335 }
1336
1337 if (kIOMapOverwrite & options) {
1338 if ((map == kernel_map) && (kIOMemoryBufferPageable & _flags)) {
1339 map = IOPageableMapForAddress(address: addr);
1340 }
1341 err = KERN_SUCCESS;
1342 } else {
1343 IOMemoryDescriptorMapAllocRef ref;
1344 ref.map = map;
1345 ref.tag = tag;
1346 ref.options = options;
1347 ref.size = mapSize;
1348 ref.prot = prot;
1349 if (options & kIOMapAnywhere) {
1350 // vm_map looks for addresses above here, even when VM_FLAGS_ANYWHERE
1351 ref.mapped = 0;
1352 } else {
1353 ref.mapped = addr;
1354 }
1355 if ((ref.map == kernel_map) && (kIOMemoryBufferPageable & _flags)) {
1356 err = IOIteratePageableMaps( size: ref.size, callback: &IOMemoryDescriptorMapAlloc, ref: &ref );
1357 } else {
1358 err = IOMemoryDescriptorMapAlloc(map: ref.map, ref: &ref);
1359 }
1360
1361 if (KERN_SUCCESS == err) {
1362 addr = ref.mapped;
1363 map = ref.map;
1364 didAlloc = true;
1365 }
1366#if LOGUNALIGN
1367 IOLog("map err %x size %qx addr %qx\n", err, mapSize, addr);
1368#endif
1369 }
1370
1371 /*
1372 * If the memory is associated with a device pager but doesn't have a UPL,
1373 * it will be immediately faulted in through the pager via populateDevicePager().
1374 * kIOMapPrefault is redundant in that case, so don't try to use it for UPL
1375 * operations.
1376 */
1377 if ((reserved != NULL) && (reserved->dp.devicePager) && (_wireCount != 0)) {
1378 options &= ~kIOMapPrefault;
1379 }
1380
1381 /*
1382 * Prefaulting is only possible if we wired the memory earlier. Check the
1383 * memory type, and the underlying data.
1384 */
1385 if (options & kIOMapPrefault) {
1386 /*
1387 * The memory must have been wired by calling ::prepare(), otherwise
1388 * we don't have the UPL. Without UPLs, pages cannot be pre-faulted
1389 */
1390 assert(_wireCount != 0);
1391 assert(_memoryEntries != NULL);
1392 if ((_wireCount == 0) ||
1393 (_memoryEntries == NULL)) {
1394 return kIOReturnBadArgument;
1395 }
1396
1397 // Get the page list.
1398 ioGMDData* dataP = getDataP(_memoryEntries);
1399 ioPLBlock const* ioplList = getIOPLList(dataP);
1400 pageList = getPageList(dataP);
1401
1402 // Get the number of IOPLs.
1403 UInt numIOPLs = getNumIOPL(_memoryEntries, dataP);
1404
1405 /*
1406 * Scan through the IOPL Info Blocks, looking for the first block containing
1407 * the offset. The research will go past it, so we'll need to go back to the
1408 * right range at the end.
1409 */
1410 UInt ioplIndex = 0;
1411 while ((ioplIndex < numIOPLs) && (((uint64_t) offset) >= ioplList[ioplIndex].fIOMDOffset)) {
1412 ioplIndex++;
1413 }
1414 ioplIndex--;
1415
1416 // Retrieve the IOPL info block.
1417 ioPLBlock ioplInfo = ioplList[ioplIndex];
1418
1419 /*
1420 * For external UPLs, the fPageInfo points directly to the UPL's page_info_t
1421 * array.
1422 */
1423 if (ioplInfo.fFlags & kIOPLExternUPL) {
1424 pageList = (upl_page_info_t*) ioplInfo.fPageInfo;
1425 } else {
1426 pageList = &pageList[ioplInfo.fPageInfo];
1427 }
1428
1429 // Rebase [offset] into the IOPL in order to looks for the first page index.
1430 mach_vm_size_t offsetInIOPL = offset - ioplInfo.fIOMDOffset + ioplInfo.fPageOffset;
1431
1432 // Retrieve the index of the first page corresponding to the offset.
1433 currentPageIndex = atop_32(offsetInIOPL);
1434 }
1435
1436 // enter mappings
1437 remain = size;
1438 mapAddr = addr;
1439 entryIdx = firstEntryIdx;
1440 entry = &ref->entries[entryIdx];
1441
1442 while (remain && (KERN_SUCCESS == err)) {
1443#if LOGUNALIGN
1444 printf("offset %qx, %qx\n", offset, entry->offset);
1445#endif
1446 if (kIODefaultCache != cacheMode) {
1447 vm_size_t unused = 0;
1448 err = mach_make_memory_entry(NULL /*unused*/, size: &unused, offset: 0 /*unused*/,
1449 permission: memEntryCacheMode, NULL, parent_entry: entry->entry);
1450 assert(KERN_SUCCESS == err);
1451 }
1452 entryOffset = offset - entry->offset;
1453 if (entryOffset >= entry->size) {
1454 panic("entryOffset");
1455 }
1456 chunk = entry->size - entryOffset;
1457#if LOGUNALIGN
1458 printf("entryIdx %d, chunk %qx\n", entryIdx, chunk);
1459#endif
1460 if (chunk) {
1461 vm_map_kernel_flags_t vmk_flags = {
1462 .vmf_fixed = true,
1463 .vmf_overwrite = true,
1464 .vmf_return_data_addr = true,
1465 .vm_tag = tag,
1466 .vmkf_iokit_acct = true,
1467 };
1468
1469 if (chunk > remain) {
1470 chunk = remain;
1471 }
1472 mapAddrOut = mapAddr;
1473 if (options & kIOMapPrefault) {
1474 UInt nb_pages = (typeof(nb_pages))round_page(x: chunk) / PAGE_SIZE;
1475
1476 err = vm_map_enter_mem_object_prefault(map,
1477 address: &mapAddrOut,
1478 size: chunk, mask: 0 /* mask */,
1479 vmk_flags,
1480 port: entry->entry,
1481 offset: entryOffset,
1482 cur_protection: prot, // cur
1483 max_protection: prot, // max
1484 page_list: &pageList[currentPageIndex],
1485 page_list_count: nb_pages);
1486
1487 // Compute the next index in the page list.
1488 currentPageIndex += nb_pages;
1489 assert(currentPageIndex <= _pages);
1490 } else {
1491#if LOGUNALIGN
1492 printf("mapAddr i %qx chunk %qx\n", mapAddr, chunk);
1493#endif
1494 err = vm_map_enter_mem_object(map,
1495 address: &mapAddrOut,
1496 size: chunk, mask: 0 /* mask */,
1497 vmk_flags,
1498 port: entry->entry,
1499 offset: entryOffset,
1500 needs_copy: false, // copy
1501 cur_protection: prot, // cur
1502 max_protection: prot, // max
1503 VM_INHERIT_NONE);
1504 }
1505 if (KERN_SUCCESS != err) {
1506 panic("map enter err %x", err);
1507 break;
1508 }
1509#if LOGUNALIGN
1510 printf("mapAddr o %qx\n", mapAddrOut);
1511#endif
1512 if (entryIdx == firstEntryIdx) {
1513 addr = mapAddrOut;
1514 }
1515 remain -= chunk;
1516 if (!remain) {
1517 break;
1518 }
1519 mach_vm_size_t entrySize;
1520 err = mach_memory_entry_map_size(entry_port: entry->entry, map, offset: entryOffset, size: chunk, map_size: &entrySize);
1521 assert(KERN_SUCCESS == err);
1522 mapAddr += entrySize;
1523 offset += chunk;
1524 }
1525
1526 entry++;
1527 entryIdx++;
1528 if (entryIdx >= ref->count) {
1529 err = kIOReturnOverrun;
1530 break;
1531 }
1532 }
1533
1534 if (KERN_SUCCESS != err) {
1535 DEBUG4K_ERROR("size 0x%llx err 0x%x\n", size, err);
1536 }
1537
1538 if ((KERN_SUCCESS != err) && didAlloc) {
1539 (void) IOMemoryDescriptorMapDealloc(options, map, trunc_page_64(addr), size);
1540 addr = 0;
1541 }
1542 *inaddr = addr;
1543
1544 return err;
1545}
1546
1547uint64_t
1548IOGeneralMemoryDescriptor::memoryReferenceGetDMAMapLength(
1549 IOMemoryReference * ref,
1550 uint64_t * offset)
1551{
1552 kern_return_t kr;
1553 vm_object_offset_t data_offset = 0;
1554 uint64_t total;
1555 uint32_t idx;
1556
1557 assert(ref->count);
1558 if (offset) {
1559 *offset = (uint64_t) data_offset;
1560 }
1561 total = 0;
1562 for (idx = 0; idx < ref->count; idx++) {
1563 kr = mach_memory_entry_phys_page_offset(entry_port: ref->entries[idx].entry,
1564 offset_p: &data_offset);
1565 if (KERN_SUCCESS != kr) {
1566 DEBUG4K_ERROR("ref %p entry %p kr 0x%x\n", ref, ref->entries[idx].entry, kr);
1567 } else if (0 != data_offset) {
1568 DEBUG4K_IOKIT("ref %p entry %p offset 0x%llx kr 0x%x\n", ref, ref->entries[0].entry, data_offset, kr);
1569 }
1570 if (offset && !idx) {
1571 *offset = (uint64_t) data_offset;
1572 }
1573 total += round_page(x: data_offset + ref->entries[idx].size);
1574 }
1575
1576 DEBUG4K_IOKIT("ref %p offset 0x%llx total 0x%llx\n", ref,
1577 (offset ? *offset : (vm_object_offset_t)-1), total);
1578
1579 return total;
1580}
1581
1582
1583IOReturn
1584IOGeneralMemoryDescriptor::memoryReferenceGetPageCounts(
1585 IOMemoryReference * ref,
1586 IOByteCount * residentPageCount,
1587 IOByteCount * dirtyPageCount)
1588{
1589 IOReturn err;
1590 IOMemoryEntry * entries;
1591 unsigned int resident, dirty;
1592 unsigned int totalResident, totalDirty;
1593
1594 totalResident = totalDirty = 0;
1595 err = kIOReturnSuccess;
1596 entries = ref->entries + ref->count;
1597 while (entries > &ref->entries[0]) {
1598 entries--;
1599 err = mach_memory_entry_get_page_counts(entry_port: entries->entry, resident_page_count: &resident, dirty_page_count: &dirty);
1600 if (KERN_SUCCESS != err) {
1601 break;
1602 }
1603 totalResident += resident;
1604 totalDirty += dirty;
1605 }
1606
1607 if (residentPageCount) {
1608 *residentPageCount = totalResident;
1609 }
1610 if (dirtyPageCount) {
1611 *dirtyPageCount = totalDirty;
1612 }
1613 return err;
1614}
1615
1616IOReturn
1617IOGeneralMemoryDescriptor::memoryReferenceSetPurgeable(
1618 IOMemoryReference * ref,
1619 IOOptionBits newState,
1620 IOOptionBits * oldState)
1621{
1622 IOReturn err;
1623 IOMemoryEntry * entries;
1624 vm_purgable_t control;
1625 int totalState, state;
1626
1627 totalState = kIOMemoryPurgeableNonVolatile;
1628 err = kIOReturnSuccess;
1629 entries = ref->entries + ref->count;
1630 while (entries > &ref->entries[0]) {
1631 entries--;
1632
1633 err = purgeableControlBits(newState, control: &control, state: &state);
1634 if (KERN_SUCCESS != err) {
1635 break;
1636 }
1637 err = memory_entry_purgeable_control_internal(entry_port: entries->entry, control, state: &state);
1638 if (KERN_SUCCESS != err) {
1639 break;
1640 }
1641 err = purgeableStateBits(state: &state);
1642 if (KERN_SUCCESS != err) {
1643 break;
1644 }
1645
1646 if (kIOMemoryPurgeableEmpty == state) {
1647 totalState = kIOMemoryPurgeableEmpty;
1648 } else if (kIOMemoryPurgeableEmpty == totalState) {
1649 continue;
1650 } else if (kIOMemoryPurgeableVolatile == totalState) {
1651 continue;
1652 } else if (kIOMemoryPurgeableVolatile == state) {
1653 totalState = kIOMemoryPurgeableVolatile;
1654 } else {
1655 totalState = kIOMemoryPurgeableNonVolatile;
1656 }
1657 }
1658
1659 if (oldState) {
1660 *oldState = totalState;
1661 }
1662 return err;
1663}
1664
1665IOReturn
1666IOGeneralMemoryDescriptor::memoryReferenceSetOwnership(
1667 IOMemoryReference * ref,
1668 task_t newOwner,
1669 int newLedgerTag,
1670 IOOptionBits newLedgerOptions)
1671{
1672 IOReturn err, totalErr;
1673 IOMemoryEntry * entries;
1674
1675 totalErr = kIOReturnSuccess;
1676 entries = ref->entries + ref->count;
1677 while (entries > &ref->entries[0]) {
1678 entries--;
1679
1680 err = mach_memory_entry_ownership(entry_port: entries->entry, owner: newOwner, ledger_tag: newLedgerTag, ledger_flags: newLedgerOptions);
1681 if (KERN_SUCCESS != err) {
1682 totalErr = err;
1683 }
1684 }
1685
1686 return totalErr;
1687}
1688
1689/* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */
1690
1691OSSharedPtr<IOMemoryDescriptor>
1692IOMemoryDescriptor::withAddress(void * address,
1693 IOByteCount length,
1694 IODirection direction)
1695{
1696 return IOMemoryDescriptor::
1697 withAddressRange(address: (IOVirtualAddress) address, length, options: direction | kIOMemoryAutoPrepare, task: kernel_task);
1698}
1699
1700#ifndef __LP64__
1701OSSharedPtr<IOMemoryDescriptor>
1702IOMemoryDescriptor::withAddress(IOVirtualAddress address,
1703 IOByteCount length,
1704 IODirection direction,
1705 task_t task)
1706{
1707 OSSharedPtr<IOGeneralMemoryDescriptor> that = OSMakeShared<IOGeneralMemoryDescriptor>();
1708 if (that) {
1709 if (that->initWithAddress(address, length, direction, task)) {
1710 return os::move(that);
1711 }
1712 }
1713 return nullptr;
1714}
1715#endif /* !__LP64__ */
1716
1717OSSharedPtr<IOMemoryDescriptor>
1718IOMemoryDescriptor::withPhysicalAddress(
1719 IOPhysicalAddress address,
1720 IOByteCount length,
1721 IODirection direction )
1722{
1723 return IOMemoryDescriptor::withAddressRange(address, length, options: direction, TASK_NULL);
1724}
1725
1726#ifndef __LP64__
1727OSSharedPtr<IOMemoryDescriptor>
1728IOMemoryDescriptor::withRanges( IOVirtualRange * ranges,
1729 UInt32 withCount,
1730 IODirection direction,
1731 task_t task,
1732 bool asReference)
1733{
1734 OSSharedPtr<IOGeneralMemoryDescriptor> that = OSMakeShared<IOGeneralMemoryDescriptor>();
1735 if (that) {
1736 if (that->initWithRanges(ranges, withCount, direction, task, asReference)) {
1737 return os::move(that);
1738 }
1739 }
1740 return nullptr;
1741}
1742#endif /* !__LP64__ */
1743
1744OSSharedPtr<IOMemoryDescriptor>
1745IOMemoryDescriptor::withAddressRange(mach_vm_address_t address,
1746 mach_vm_size_t length,
1747 IOOptionBits options,
1748 task_t task)
1749{
1750 IOAddressRange range = { .address: address, .length: length };
1751 return IOMemoryDescriptor::withAddressRanges(ranges: &range, rangeCount: 1, options, task);
1752}
1753
1754OSSharedPtr<IOMemoryDescriptor>
1755IOMemoryDescriptor::withAddressRanges(IOAddressRange * ranges,
1756 UInt32 rangeCount,
1757 IOOptionBits options,
1758 task_t task)
1759{
1760 OSSharedPtr<IOGeneralMemoryDescriptor> that = OSMakeShared<IOGeneralMemoryDescriptor>();
1761 if (that) {
1762 if (task) {
1763 options |= kIOMemoryTypeVirtual64;
1764 } else {
1765 options |= kIOMemoryTypePhysical64;
1766 }
1767
1768 if (that->initWithOptions(buffers: ranges, count: rangeCount, offset: 0, task, options, /* mapper */ NULL)) {
1769 return os::move(t&: that);
1770 }
1771 }
1772
1773 return nullptr;
1774}
1775
1776
1777/*
1778 * withOptions:
1779 *
1780 * Create a new IOMemoryDescriptor. The buffer is made up of several
1781 * virtual address ranges, from a given task.
1782 *
1783 * Passing the ranges as a reference will avoid an extra allocation.
1784 */
1785OSSharedPtr<IOMemoryDescriptor>
1786IOMemoryDescriptor::withOptions(void * buffers,
1787 UInt32 count,
1788 UInt32 offset,
1789 task_t task,
1790 IOOptionBits opts,
1791 IOMapper * mapper)
1792{
1793 OSSharedPtr<IOGeneralMemoryDescriptor> self = OSMakeShared<IOGeneralMemoryDescriptor>();
1794
1795 if (self
1796 && !self->initWithOptions(buffers, count, offset, task, options: opts, mapper)) {
1797 return nullptr;
1798 }
1799
1800 return os::move(t&: self);
1801}
1802
1803bool
1804IOMemoryDescriptor::initWithOptions(void * buffers,
1805 UInt32 count,
1806 UInt32 offset,
1807 task_t task,
1808 IOOptionBits options,
1809 IOMapper * mapper)
1810{
1811 return false;
1812}
1813
1814#ifndef __LP64__
1815OSSharedPtr<IOMemoryDescriptor>
1816IOMemoryDescriptor::withPhysicalRanges( IOPhysicalRange * ranges,
1817 UInt32 withCount,
1818 IODirection direction,
1819 bool asReference)
1820{
1821 OSSharedPtr<IOGeneralMemoryDescriptor> that = OSMakeShared<IOGeneralMemoryDescriptor>();
1822 if (that) {
1823 if (that->initWithPhysicalRanges(ranges, withCount, direction, asReference)) {
1824 return os::move(that);
1825 }
1826 }
1827 return nullptr;
1828}
1829
1830OSSharedPtr<IOMemoryDescriptor>
1831IOMemoryDescriptor::withSubRange(IOMemoryDescriptor * of,
1832 IOByteCount offset,
1833 IOByteCount length,
1834 IODirection direction)
1835{
1836 return IOSubMemoryDescriptor::withSubRange(of, offset, length, direction);
1837}
1838#endif /* !__LP64__ */
1839
1840OSSharedPtr<IOMemoryDescriptor>
1841IOMemoryDescriptor::withPersistentMemoryDescriptor(IOMemoryDescriptor *originalMD)
1842{
1843 IOGeneralMemoryDescriptor *origGenMD =
1844 OSDynamicCast(IOGeneralMemoryDescriptor, originalMD);
1845
1846 if (origGenMD) {
1847 return IOGeneralMemoryDescriptor::
1848 withPersistentMemoryDescriptor(originalMD: origGenMD);
1849 } else {
1850 return nullptr;
1851 }
1852}
1853
1854OSSharedPtr<IOMemoryDescriptor>
1855IOGeneralMemoryDescriptor::withPersistentMemoryDescriptor(IOGeneralMemoryDescriptor *originalMD)
1856{
1857 IOMemoryReference * memRef;
1858 OSSharedPtr<IOGeneralMemoryDescriptor> self;
1859
1860 if (kIOReturnSuccess != originalMD->memoryReferenceCreate(options: kIOMemoryReferenceReuse, reference: &memRef)) {
1861 return nullptr;
1862 }
1863
1864 if (memRef == originalMD->_memRef) {
1865 self.reset(p: originalMD, OSRetain);
1866 originalMD->memoryReferenceRelease(ref: memRef);
1867 return os::move(t&: self);
1868 }
1869
1870 self = OSMakeShared<IOGeneralMemoryDescriptor>();
1871 IOMDPersistentInitData initData = { .fMD: originalMD, .fMemRef: memRef };
1872
1873 if (self
1874 && !self->initWithOptions(buffers: &initData, count: 1, offset: 0, NULL, options: kIOMemoryTypePersistentMD, NULL)) {
1875 return nullptr;
1876 }
1877 return os::move(t&: self);
1878}
1879
1880#ifndef __LP64__
1881bool
1882IOGeneralMemoryDescriptor::initWithAddress(void * address,
1883 IOByteCount withLength,
1884 IODirection withDirection)
1885{
1886 _singleRange.v.address = (vm_offset_t) address;
1887 _singleRange.v.length = withLength;
1888
1889 return initWithRanges(&_singleRange.v, 1, withDirection, kernel_task, true);
1890}
1891
1892bool
1893IOGeneralMemoryDescriptor::initWithAddress(IOVirtualAddress address,
1894 IOByteCount withLength,
1895 IODirection withDirection,
1896 task_t withTask)
1897{
1898 _singleRange.v.address = address;
1899 _singleRange.v.length = withLength;
1900
1901 return initWithRanges(&_singleRange.v, 1, withDirection, withTask, true);
1902}
1903
1904bool
1905IOGeneralMemoryDescriptor::initWithPhysicalAddress(
1906 IOPhysicalAddress address,
1907 IOByteCount withLength,
1908 IODirection withDirection )
1909{
1910 _singleRange.p.address = address;
1911 _singleRange.p.length = withLength;
1912
1913 return initWithPhysicalRanges( &_singleRange.p, 1, withDirection, true);
1914}
1915
1916bool
1917IOGeneralMemoryDescriptor::initWithPhysicalRanges(
1918 IOPhysicalRange * ranges,
1919 UInt32 count,
1920 IODirection direction,
1921 bool reference)
1922{
1923 IOOptionBits mdOpts = direction | kIOMemoryTypePhysical;
1924
1925 if (reference) {
1926 mdOpts |= kIOMemoryAsReference;
1927 }
1928
1929 return initWithOptions(ranges, count, 0, NULL, mdOpts, /* mapper */ NULL);
1930}
1931
1932bool
1933IOGeneralMemoryDescriptor::initWithRanges(
1934 IOVirtualRange * ranges,
1935 UInt32 count,
1936 IODirection direction,
1937 task_t task,
1938 bool reference)
1939{
1940 IOOptionBits mdOpts = direction;
1941
1942 if (reference) {
1943 mdOpts |= kIOMemoryAsReference;
1944 }
1945
1946 if (task) {
1947 mdOpts |= kIOMemoryTypeVirtual;
1948
1949 // Auto-prepare if this is a kernel memory descriptor as very few
1950 // clients bother to prepare() kernel memory.
1951 // But it was not enforced so what are you going to do?
1952 if (task == kernel_task) {
1953 mdOpts |= kIOMemoryAutoPrepare;
1954 }
1955 } else {
1956 mdOpts |= kIOMemoryTypePhysical;
1957 }
1958
1959 return initWithOptions(ranges, count, 0, task, mdOpts, /* mapper */ NULL);
1960}
1961#endif /* !__LP64__ */
1962
1963/*
1964 * initWithOptions:
1965 *
1966 * IOMemoryDescriptor. The buffer is made up of several virtual address ranges,
1967 * from a given task, several physical ranges, an UPL from the ubc
1968 * system or a uio (may be 64bit) from the BSD subsystem.
1969 *
1970 * Passing the ranges as a reference will avoid an extra allocation.
1971 *
1972 * An IOMemoryDescriptor can be re-used by calling initWithOptions again on an
1973 * existing instance -- note this behavior is not commonly supported in other
1974 * I/O Kit classes, although it is supported here.
1975 */
1976
1977bool
1978IOGeneralMemoryDescriptor::initWithOptions(void * buffers,
1979 UInt32 count,
1980 UInt32 offset,
1981 task_t task,
1982 IOOptionBits options,
1983 IOMapper * mapper)
1984{
1985 IOOptionBits type = options & kIOMemoryTypeMask;
1986
1987#ifndef __LP64__
1988 if (task
1989 && (kIOMemoryTypeVirtual == type)
1990 && vm_map_is_64bit(get_task_map(task))
1991 && ((IOVirtualRange *) buffers)->address) {
1992 OSReportWithBacktrace("IOMemoryDescriptor: attempt to create 32b virtual in 64b task, use ::withAddressRange()");
1993 return false;
1994 }
1995#endif /* !__LP64__ */
1996
1997 // Grab the original MD's configuation data to initialse the
1998 // arguments to this function.
1999 if (kIOMemoryTypePersistentMD == type) {
2000 IOMDPersistentInitData *initData = (typeof(initData))buffers;
2001 const IOGeneralMemoryDescriptor *orig = initData->fMD;
2002 ioGMDData *dataP = getDataP(orig->_memoryEntries);
2003
2004 // Only accept persistent memory descriptors with valid dataP data.
2005 assert(orig->_rangesCount == 1);
2006 if (!(orig->_flags & kIOMemoryPersistent) || !dataP) {
2007 return false;
2008 }
2009
2010 _memRef = initData->fMemRef; // Grab the new named entry
2011 options = orig->_flags & ~kIOMemoryAsReference;
2012 type = options & kIOMemoryTypeMask;
2013 buffers = orig->_ranges.v;
2014 count = orig->_rangesCount;
2015
2016 // Now grab the original task and whatever mapper was previously used
2017 task = orig->_task;
2018 mapper = dataP->fMapper;
2019
2020 // We are ready to go through the original initialisation now
2021 }
2022
2023 switch (type) {
2024 case kIOMemoryTypeUIO:
2025 case kIOMemoryTypeVirtual:
2026#ifndef __LP64__
2027 case kIOMemoryTypeVirtual64:
2028#endif /* !__LP64__ */
2029 assert(task);
2030 if (!task) {
2031 return false;
2032 }
2033 break;
2034
2035 case kIOMemoryTypePhysical: // Neither Physical nor UPL should have a task
2036#ifndef __LP64__
2037 case kIOMemoryTypePhysical64:
2038#endif /* !__LP64__ */
2039 case kIOMemoryTypeUPL:
2040 assert(!task);
2041 break;
2042 default:
2043 return false; /* bad argument */
2044 }
2045
2046 assert(buffers);
2047 assert(count);
2048
2049 /*
2050 * We can check the _initialized instance variable before having ever set
2051 * it to an initial value because I/O Kit guarantees that all our instance
2052 * variables are zeroed on an object's allocation.
2053 */
2054
2055 if (_initialized) {
2056 /*
2057 * An existing memory descriptor is being retargeted to point to
2058 * somewhere else. Clean up our present state.
2059 */
2060 IOOptionBits type = _flags & kIOMemoryTypeMask;
2061 if ((kIOMemoryTypePhysical != type) && (kIOMemoryTypePhysical64 != type)) {
2062 while (_wireCount) {
2063 complete();
2064 }
2065 }
2066 if (_ranges.v && !(kIOMemoryAsReference & _flags)) {
2067 if (kIOMemoryTypeUIO == type) {
2068 uio_free(a_uio: (uio_t) _ranges.v);
2069 }
2070#ifndef __LP64__
2071 else if ((kIOMemoryTypeVirtual64 == type) || (kIOMemoryTypePhysical64 == type)) {
2072 IODelete(_ranges.v64, IOAddressRange, _rangesCount);
2073 }
2074#endif /* !__LP64__ */
2075 else {
2076 IODelete(_ranges.v, IOVirtualRange, _rangesCount);
2077 }
2078 }
2079
2080 options |= (kIOMemoryRedirected & _flags);
2081 if (!(kIOMemoryRedirected & options)) {
2082 if (_memRef) {
2083 memoryReferenceRelease(ref: _memRef);
2084 _memRef = NULL;
2085 }
2086 if (_mappings) {
2087 _mappings->flushCollection();
2088 }
2089 }
2090 } else {
2091 if (!super::init()) {
2092 return false;
2093 }
2094 _initialized = true;
2095 }
2096
2097 // Grab the appropriate mapper
2098 if (kIOMemoryHostOrRemote & options) {
2099 options |= kIOMemoryMapperNone;
2100 }
2101 if (kIOMemoryMapperNone & options) {
2102 mapper = NULL; // No Mapper
2103 } else if (mapper == kIOMapperSystem) {
2104 IOMapper::checkForSystemMapper();
2105 gIOSystemMapper = mapper = IOMapper::gSystem;
2106 }
2107
2108 // Remove the dynamic internal use flags from the initial setting
2109 options &= ~(kIOMemoryPreparedReadOnly);
2110 _flags = options;
2111 _task = task;
2112
2113#ifndef __LP64__
2114 _direction = (IODirection) (_flags & kIOMemoryDirectionMask);
2115#endif /* !__LP64__ */
2116
2117 _dmaReferences = 0;
2118 __iomd_reservedA = 0;
2119 __iomd_reservedB = 0;
2120 _highestPage = 0;
2121
2122 if (kIOMemoryThreadSafe & options) {
2123 if (!_prepareLock) {
2124 _prepareLock = IOLockAlloc();
2125 }
2126 } else if (_prepareLock) {
2127 IOLockFree(lock: _prepareLock);
2128 _prepareLock = NULL;
2129 }
2130
2131 if (kIOMemoryTypeUPL == type) {
2132 ioGMDData *dataP;
2133 unsigned int dataSize = computeDataSize(/* pages */ 0, /* upls */ 1);
2134
2135 if (!initMemoryEntries(size: dataSize, mapper)) {
2136 return false;
2137 }
2138 dataP = getDataP(_memoryEntries);
2139 dataP->fPageCnt = 0;
2140 switch (kIOMemoryDirectionMask & options) {
2141 case kIODirectionOut:
2142 dataP->fDMAAccess = kIODMAMapReadAccess;
2143 break;
2144 case kIODirectionIn:
2145 dataP->fDMAAccess = kIODMAMapWriteAccess;
2146 break;
2147 case kIODirectionNone:
2148 case kIODirectionOutIn:
2149 default:
2150 panic("bad dir for upl 0x%x", (int) options);
2151 break;
2152 }
2153 // _wireCount++; // UPLs start out life wired
2154
2155 _length = count;
2156 _pages += atop_32(offset + count + PAGE_MASK) - atop_32(offset);
2157
2158 ioPLBlock iopl;
2159 iopl.fIOPL = (upl_t) buffers;
2160 upl_set_referenced(upl: iopl.fIOPL, value: true);
2161 upl_page_info_t *pageList = UPL_GET_INTERNAL_PAGE_LIST(iopl.fIOPL);
2162
2163 if (upl_get_size(upl: iopl.fIOPL) < (count + offset)) {
2164 panic("short external upl");
2165 }
2166
2167 _highestPage = upl_get_highest_page(upl: iopl.fIOPL);
2168 DEBUG4K_IOKIT("offset 0x%x task %p options 0x%x -> _highestPage 0x%x\n", (uint32_t)offset, task, (uint32_t)options, _highestPage);
2169
2170 // Set the flag kIOPLOnDevice convieniently equal to 1
2171 iopl.fFlags = pageList->device | kIOPLExternUPL;
2172 if (!pageList->device) {
2173 // Pre-compute the offset into the UPL's page list
2174 pageList = &pageList[atop_32(offset)];
2175 offset &= PAGE_MASK;
2176 }
2177 iopl.fIOMDOffset = 0;
2178 iopl.fMappedPage = 0;
2179 iopl.fPageInfo = (vm_address_t) pageList;
2180 iopl.fPageOffset = offset;
2181 _memoryEntries->appendBytes(bytes: &iopl, length: sizeof(iopl));
2182 } else {
2183 // kIOMemoryTypeVirtual | kIOMemoryTypeVirtual64 | kIOMemoryTypeUIO
2184 // kIOMemoryTypePhysical | kIOMemoryTypePhysical64
2185
2186 // Initialize the memory descriptor
2187 if (options & kIOMemoryAsReference) {
2188#ifndef __LP64__
2189 _rangesIsAllocated = false;
2190#endif /* !__LP64__ */
2191
2192 // Hack assignment to get the buffer arg into _ranges.
2193 // I'd prefer to do _ranges = (Ranges) buffers, but that doesn't
2194 // work, C++ sigh.
2195 // This also initialises the uio & physical ranges.
2196 _ranges.v = (IOVirtualRange *) buffers;
2197 } else {
2198#ifndef __LP64__
2199 _rangesIsAllocated = true;
2200#endif /* !__LP64__ */
2201 switch (type) {
2202 case kIOMemoryTypeUIO:
2203 _ranges.v = (IOVirtualRange *) uio_duplicate(a_uio: (uio_t) buffers);
2204 break;
2205
2206#ifndef __LP64__
2207 case kIOMemoryTypeVirtual64:
2208 case kIOMemoryTypePhysical64:
2209 if (count == 1
2210#ifndef __arm__
2211 && (((IOAddressRange *) buffers)->address + ((IOAddressRange *) buffers)->length) <= 0x100000000ULL
2212#endif
2213 ) {
2214 if (kIOMemoryTypeVirtual64 == type) {
2215 type = kIOMemoryTypeVirtual;
2216 } else {
2217 type = kIOMemoryTypePhysical;
2218 }
2219 _flags = (_flags & ~kIOMemoryTypeMask) | type | kIOMemoryAsReference;
2220 _rangesIsAllocated = false;
2221 _ranges.v = &_singleRange.v;
2222 _singleRange.v.address = ((IOAddressRange *) buffers)->address;
2223 _singleRange.v.length = ((IOAddressRange *) buffers)->length;
2224 break;
2225 }
2226 _ranges.v64 = IONew(IOAddressRange, count);
2227 if (!_ranges.v64) {
2228 return false;
2229 }
2230 bcopy(buffers, _ranges.v, count * sizeof(IOAddressRange));
2231 break;
2232#endif /* !__LP64__ */
2233 case kIOMemoryTypeVirtual:
2234 case kIOMemoryTypePhysical:
2235 if (count == 1) {
2236 _flags |= kIOMemoryAsReference;
2237#ifndef __LP64__
2238 _rangesIsAllocated = false;
2239#endif /* !__LP64__ */
2240 _ranges.v = &_singleRange.v;
2241 } else {
2242 _ranges.v = IONew(IOVirtualRange, count);
2243 if (!_ranges.v) {
2244 return false;
2245 }
2246 }
2247 bcopy(src: buffers, dst: _ranges.v, n: count * sizeof(IOVirtualRange));
2248 break;
2249 }
2250 }
2251 _rangesCount = count;
2252
2253 // Find starting address within the vector of ranges
2254 Ranges vec = _ranges;
2255 mach_vm_size_t totalLength = 0;
2256 unsigned int ind, pages = 0;
2257 for (ind = 0; ind < count; ind++) {
2258 mach_vm_address_t addr;
2259 mach_vm_address_t endAddr;
2260 mach_vm_size_t len;
2261
2262 // addr & len are returned by this function
2263 getAddrLenForInd(addr, len, type, r: vec, ind, task: _task);
2264 if (_task) {
2265 mach_vm_size_t phys_size;
2266 kern_return_t kret;
2267 kret = vm_map_range_physical_size(map: get_task_map(_task), start: addr, size: len, phys_size: &phys_size);
2268 if (KERN_SUCCESS != kret) {
2269 break;
2270 }
2271 if (os_add_overflow(pages, atop_64(phys_size), &pages)) {
2272 break;
2273 }
2274 } else {
2275 if (os_add3_overflow(addr, len, PAGE_MASK, &endAddr)) {
2276 break;
2277 }
2278 if (!(kIOMemoryRemote & options) && (atop_64(endAddr) > UINT_MAX)) {
2279 break;
2280 }
2281 if (os_add_overflow(pages, (atop_64(endAddr) - atop_64(addr)), &pages)) {
2282 break;
2283 }
2284 }
2285 if (os_add_overflow(totalLength, len, &totalLength)) {
2286 break;
2287 }
2288 if ((kIOMemoryTypePhysical == type) || (kIOMemoryTypePhysical64 == type)) {
2289 uint64_t highPage = atop_64(addr + len - 1);
2290 if ((highPage > _highestPage) && (highPage <= UINT_MAX)) {
2291 _highestPage = (ppnum_t) highPage;
2292 DEBUG4K_IOKIT("offset 0x%x task %p options 0x%x -> _highestPage 0x%x\n", (uint32_t)offset, task, (uint32_t)options, _highestPage);
2293 }
2294 }
2295 }
2296 if ((ind < count)
2297 || (totalLength != ((IOByteCount) totalLength))) {
2298 return false; /* overflow */
2299 }
2300 _length = totalLength;
2301 _pages = pages;
2302
2303 // Auto-prepare memory at creation time.
2304 // Implied completion when descriptor is free-ed
2305
2306
2307 if ((kIOMemoryTypePhysical == type) || (kIOMemoryTypePhysical64 == type)) {
2308 _wireCount++; // Physical MDs are, by definition, wired
2309 } else { /* kIOMemoryTypeVirtual | kIOMemoryTypeVirtual64 | kIOMemoryTypeUIO */
2310 ioGMDData *dataP;
2311 unsigned dataSize;
2312
2313 if (_pages > atop_64(max_mem)) {
2314 return false;
2315 }
2316
2317 dataSize = computeDataSize(_pages, /* upls */ count * 2);
2318 if (!initMemoryEntries(size: dataSize, mapper)) {
2319 return false;
2320 }
2321 dataP = getDataP(_memoryEntries);
2322 dataP->fPageCnt = _pages;
2323
2324 if (((_task != kernel_task) || (kIOMemoryBufferPageable & _flags))
2325 && (VM_KERN_MEMORY_NONE == _kernelTag)) {
2326 _kernelTag = IOMemoryTag(map: kernel_map);
2327 if (_kernelTag == gIOSurfaceTag) {
2328 _userTag = VM_MEMORY_IOSURFACE;
2329 }
2330 }
2331
2332 if ((kIOMemoryPersistent & _flags) && !_memRef) {
2333 IOReturn
2334 err = memoryReferenceCreate(options: 0, reference: &_memRef);
2335 if (kIOReturnSuccess != err) {
2336 return false;
2337 }
2338 }
2339
2340 if ((_flags & kIOMemoryAutoPrepare)
2341 && prepare() != kIOReturnSuccess) {
2342 return false;
2343 }
2344 }
2345 }
2346
2347 return true;
2348}
2349
2350/*
2351 * free
2352 *
2353 * Free resources.
2354 */
2355void
2356IOGeneralMemoryDescriptor::free()
2357{
2358 IOOptionBits type = _flags & kIOMemoryTypeMask;
2359
2360 if (reserved && reserved->dp.memory) {
2361 LOCK;
2362 reserved->dp.memory = NULL;
2363 UNLOCK;
2364 }
2365 if ((kIOMemoryTypePhysical == type) || (kIOMemoryTypePhysical64 == type)) {
2366 ioGMDData * dataP;
2367 if (_memoryEntries && (dataP = getDataP(_memoryEntries)) && dataP->fMappedBaseValid) {
2368 dmaUnmap(mapper: dataP->fMapper, NULL, offset: 0, mapAddress: dataP->fMappedBase, mapLength: dataP->fMappedLength);
2369 dataP->fMappedBaseValid = dataP->fMappedBase = 0;
2370 }
2371 } else {
2372 while (_wireCount) {
2373 complete();
2374 }
2375 }
2376
2377 if (_memoryEntries) {
2378 _memoryEntries.reset();
2379 }
2380
2381 if (_ranges.v && !(kIOMemoryAsReference & _flags)) {
2382 if (kIOMemoryTypeUIO == type) {
2383 uio_free(a_uio: (uio_t) _ranges.v);
2384 }
2385#ifndef __LP64__
2386 else if ((kIOMemoryTypeVirtual64 == type) || (kIOMemoryTypePhysical64 == type)) {
2387 IODelete(_ranges.v64, IOAddressRange, _rangesCount);
2388 }
2389#endif /* !__LP64__ */
2390 else {
2391 IODelete(_ranges.v, IOVirtualRange, _rangesCount);
2392 }
2393
2394 _ranges.v = NULL;
2395 }
2396
2397 if (reserved) {
2398 cleanKernelReserved(reserved);
2399 if (reserved->dp.devicePager) {
2400 // memEntry holds a ref on the device pager which owns reserved
2401 // (IOMemoryDescriptorReserved) so no reserved access after this point
2402 device_pager_deallocate((memory_object_t) reserved->dp.devicePager );
2403 } else {
2404 IOFreeType(reserved, IOMemoryDescriptorReserved);
2405 }
2406 reserved = NULL;
2407 }
2408
2409 if (_memRef) {
2410 memoryReferenceRelease(ref: _memRef);
2411 }
2412 if (_prepareLock) {
2413 IOLockFree(lock: _prepareLock);
2414 }
2415
2416 super::free();
2417}
2418
2419#ifndef __LP64__
2420void
2421IOGeneralMemoryDescriptor::unmapFromKernel()
2422{
2423 panic("IOGMD::unmapFromKernel deprecated");
2424}
2425
2426void
2427IOGeneralMemoryDescriptor::mapIntoKernel(unsigned rangeIndex)
2428{
2429 panic("IOGMD::mapIntoKernel deprecated");
2430}
2431#endif /* !__LP64__ */
2432
2433/*
2434 * getDirection:
2435 *
2436 * Get the direction of the transfer.
2437 */
2438IODirection
2439IOMemoryDescriptor::getDirection() const
2440{
2441#ifndef __LP64__
2442 if (_direction) {
2443 return _direction;
2444 }
2445#endif /* !__LP64__ */
2446 return (IODirection) (_flags & kIOMemoryDirectionMask);
2447}
2448
2449/*
2450 * getLength:
2451 *
2452 * Get the length of the transfer (over all ranges).
2453 */
2454IOByteCount
2455IOMemoryDescriptor::getLength() const
2456{
2457 return _length;
2458}
2459
2460void
2461IOMemoryDescriptor::setTag( IOOptionBits tag )
2462{
2463 _tag = tag;
2464}
2465
2466IOOptionBits
2467IOMemoryDescriptor::getTag( void )
2468{
2469 return _tag;
2470}
2471
2472uint64_t
2473IOMemoryDescriptor::getFlags(void)
2474{
2475 return _flags;
2476}
2477
2478OSObject *
2479IOMemoryDescriptor::copyContext(void) const
2480{
2481 if (reserved) {
2482 OSObject * context = reserved->contextObject;
2483 if (context) {
2484 context->retain();
2485 }
2486 return context;
2487 } else {
2488 return NULL;
2489 }
2490}
2491
2492void
2493IOMemoryDescriptor::setContext(OSObject * obj)
2494{
2495 if (this->reserved == NULL && obj == NULL) {
2496 // No existing object, and no object to set
2497 return;
2498 }
2499
2500 IOMemoryDescriptorReserved * reserved = getKernelReserved();
2501 if (reserved) {
2502 OSObject * oldObject = reserved->contextObject;
2503 if (oldObject && OSCompareAndSwapPtr(oldObject, NULL, &reserved->contextObject)) {
2504 oldObject->release();
2505 }
2506 if (obj != NULL) {
2507 obj->retain();
2508 reserved->contextObject = obj;
2509 }
2510 }
2511}
2512
2513#ifndef __LP64__
2514#pragma clang diagnostic push
2515#pragma clang diagnostic ignored "-Wdeprecated-declarations"
2516
2517// @@@ gvdl: who is using this API? Seems like a wierd thing to implement.
2518IOPhysicalAddress
2519IOMemoryDescriptor::getSourceSegment( IOByteCount offset, IOByteCount * length )
2520{
2521 addr64_t physAddr = 0;
2522
2523 if (prepare() == kIOReturnSuccess) {
2524 physAddr = getPhysicalSegment64( offset, length );
2525 complete();
2526 }
2527
2528 return (IOPhysicalAddress) physAddr; // truncated but only page offset is used
2529}
2530
2531#pragma clang diagnostic pop
2532
2533#endif /* !__LP64__ */
2534
2535IOByteCount
2536IOMemoryDescriptor::readBytes
2537(IOByteCount offset, void *bytes, IOByteCount length)
2538{
2539 addr64_t dstAddr = CAST_DOWN(addr64_t, bytes);
2540 IOByteCount endoffset;
2541 IOByteCount remaining;
2542
2543
2544 // Check that this entire I/O is within the available range
2545 if ((offset > _length)
2546 || os_add_overflow(length, offset, &endoffset)
2547 || (endoffset > _length)) {
2548 assertf(false, "readBytes exceeds length (0x%lx, 0x%lx) > 0x%lx", (long) offset, (long) length, (long) _length);
2549 return 0;
2550 }
2551 if (offset >= _length) {
2552 return 0;
2553 }
2554
2555 assert(!(kIOMemoryRemote & _flags));
2556 if (kIOMemoryRemote & _flags) {
2557 return 0;
2558 }
2559
2560 if (kIOMemoryThreadSafe & _flags) {
2561 LOCK;
2562 }
2563
2564 remaining = length = min(length, _length - offset);
2565 while (remaining) { // (process another target segment?)
2566 addr64_t srcAddr64;
2567 IOByteCount srcLen;
2568
2569 srcAddr64 = getPhysicalSegment(offset, length: &srcLen, options: kIOMemoryMapperNone);
2570 if (!srcAddr64) {
2571 break;
2572 }
2573
2574 // Clip segment length to remaining
2575 if (srcLen > remaining) {
2576 srcLen = remaining;
2577 }
2578
2579 if (srcLen > (UINT_MAX - PAGE_SIZE + 1)) {
2580 srcLen = (UINT_MAX - PAGE_SIZE + 1);
2581 }
2582 copypv(source: srcAddr64, sink: dstAddr, size: (unsigned int) srcLen,
2583 cppvPsrc | cppvNoRefSrc | cppvFsnk | cppvKmap);
2584
2585 dstAddr += srcLen;
2586 offset += srcLen;
2587 remaining -= srcLen;
2588 }
2589
2590 if (kIOMemoryThreadSafe & _flags) {
2591 UNLOCK;
2592 }
2593
2594 assert(!remaining);
2595
2596 return length - remaining;
2597}
2598
2599IOByteCount
2600IOMemoryDescriptor::writeBytes
2601(IOByteCount inoffset, const void *bytes, IOByteCount length)
2602{
2603 addr64_t srcAddr = CAST_DOWN(addr64_t, bytes);
2604 IOByteCount remaining;
2605 IOByteCount endoffset;
2606 IOByteCount offset = inoffset;
2607
2608 assert( !(kIOMemoryPreparedReadOnly & _flags));
2609
2610 // Check that this entire I/O is within the available range
2611 if ((offset > _length)
2612 || os_add_overflow(length, offset, &endoffset)
2613 || (endoffset > _length)) {
2614 assertf(false, "writeBytes exceeds length (0x%lx, 0x%lx) > 0x%lx", (long) inoffset, (long) length, (long) _length);
2615 return 0;
2616 }
2617 if (kIOMemoryPreparedReadOnly & _flags) {
2618 return 0;
2619 }
2620 if (offset >= _length) {
2621 return 0;
2622 }
2623
2624 assert(!(kIOMemoryRemote & _flags));
2625 if (kIOMemoryRemote & _flags) {
2626 return 0;
2627 }
2628
2629 if (kIOMemoryThreadSafe & _flags) {
2630 LOCK;
2631 }
2632
2633 remaining = length = min(length, _length - offset);
2634 while (remaining) { // (process another target segment?)
2635 addr64_t dstAddr64;
2636 IOByteCount dstLen;
2637
2638 dstAddr64 = getPhysicalSegment(offset, length: &dstLen, options: kIOMemoryMapperNone);
2639 if (!dstAddr64) {
2640 break;
2641 }
2642
2643 // Clip segment length to remaining
2644 if (dstLen > remaining) {
2645 dstLen = remaining;
2646 }
2647
2648 if (dstLen > (UINT_MAX - PAGE_SIZE + 1)) {
2649 dstLen = (UINT_MAX - PAGE_SIZE + 1);
2650 }
2651 if (!srcAddr) {
2652 bzero_phys(phys_address: dstAddr64, length: (unsigned int) dstLen);
2653 } else {
2654 copypv(source: srcAddr, sink: (addr64_t) dstAddr64, size: (unsigned int) dstLen,
2655 cppvPsnk | cppvFsnk | cppvNoRefSrc | cppvNoModSnk | cppvKmap);
2656 srcAddr += dstLen;
2657 }
2658 offset += dstLen;
2659 remaining -= dstLen;
2660 }
2661
2662 if (kIOMemoryThreadSafe & _flags) {
2663 UNLOCK;
2664 }
2665
2666 assert(!remaining);
2667
2668#if defined(__x86_64__)
2669 // copypv does not cppvFsnk on intel
2670#else
2671 if (!srcAddr) {
2672 performOperation(options: kIOMemoryIncoherentIOFlush, offset: inoffset, length);
2673 }
2674#endif
2675
2676 return length - remaining;
2677}
2678
2679#ifndef __LP64__
2680void
2681IOGeneralMemoryDescriptor::setPosition(IOByteCount position)
2682{
2683 panic("IOGMD::setPosition deprecated");
2684}
2685#endif /* !__LP64__ */
2686
2687static volatile SInt64 gIOMDPreparationID __attribute__((aligned(8))) = (1ULL << 32);
2688static volatile SInt64 gIOMDDescriptorID __attribute__((aligned(8))) = (kIODescriptorIDInvalid + 1ULL);
2689
2690uint64_t
2691IOGeneralMemoryDescriptor::getPreparationID( void )
2692{
2693 ioGMDData *dataP;
2694
2695 if (!_wireCount) {
2696 return kIOPreparationIDUnprepared;
2697 }
2698
2699 if (((kIOMemoryTypeMask & _flags) == kIOMemoryTypePhysical)
2700 || ((kIOMemoryTypeMask & _flags) == kIOMemoryTypePhysical64)) {
2701 IOMemoryDescriptor::setPreparationID();
2702 return IOMemoryDescriptor::getPreparationID();
2703 }
2704
2705 if (!_memoryEntries || !(dataP = getDataP(_memoryEntries))) {
2706 return kIOPreparationIDUnprepared;
2707 }
2708
2709 if (kIOPreparationIDUnprepared == dataP->fPreparationID) {
2710 SInt64 newID = OSIncrementAtomic64(address: &gIOMDPreparationID);
2711 OSCompareAndSwap64(kIOPreparationIDUnprepared, newID, &dataP->fPreparationID);
2712 }
2713 return dataP->fPreparationID;
2714}
2715
2716void
2717IOMemoryDescriptor::cleanKernelReserved( IOMemoryDescriptorReserved * reserved )
2718{
2719 if (reserved->creator) {
2720 task_deallocate(reserved->creator);
2721 reserved->creator = NULL;
2722 }
2723
2724 if (reserved->contextObject) {
2725 reserved->contextObject->release();
2726 reserved->contextObject = NULL;
2727 }
2728}
2729
2730IOMemoryDescriptorReserved *
2731IOMemoryDescriptor::getKernelReserved( void )
2732{
2733 if (!reserved) {
2734 reserved = IOMallocType(IOMemoryDescriptorReserved);
2735 }
2736 return reserved;
2737}
2738
2739void
2740IOMemoryDescriptor::setPreparationID( void )
2741{
2742 if (getKernelReserved() && (kIOPreparationIDUnprepared == reserved->preparationID)) {
2743 SInt64 newID = OSIncrementAtomic64(address: &gIOMDPreparationID);
2744 OSCompareAndSwap64(kIOPreparationIDUnprepared, newID, &reserved->preparationID);
2745 }
2746}
2747
2748uint64_t
2749IOMemoryDescriptor::getPreparationID( void )
2750{
2751 if (reserved) {
2752 return reserved->preparationID;
2753 } else {
2754 return kIOPreparationIDUnsupported;
2755 }
2756}
2757
2758void
2759IOMemoryDescriptor::setDescriptorID( void )
2760{
2761 if (getKernelReserved() && (kIODescriptorIDInvalid == reserved->descriptorID)) {
2762 SInt64 newID = OSIncrementAtomic64(address: &gIOMDDescriptorID);
2763 OSCompareAndSwap64(kIODescriptorIDInvalid, newID, &reserved->descriptorID);
2764 }
2765}
2766
2767uint64_t
2768IOMemoryDescriptor::getDescriptorID( void )
2769{
2770 setDescriptorID();
2771
2772 if (reserved) {
2773 return reserved->descriptorID;
2774 } else {
2775 return kIODescriptorIDInvalid;
2776 }
2777}
2778
2779IOReturn
2780IOMemoryDescriptor::ktraceEmitPhysicalSegments( void )
2781{
2782 if (!kdebug_debugid_enabled(IODBG_IOMDPA(IOMDPA_MAPPED))) {
2783 return kIOReturnSuccess;
2784 }
2785
2786 assert(getPreparationID() >= kIOPreparationIDAlwaysPrepared);
2787 if (getPreparationID() < kIOPreparationIDAlwaysPrepared) {
2788 return kIOReturnBadArgument;
2789 }
2790
2791 uint64_t descriptorID = getDescriptorID();
2792 assert(descriptorID != kIODescriptorIDInvalid);
2793 if (getDescriptorID() == kIODescriptorIDInvalid) {
2794 return kIOReturnBadArgument;
2795 }
2796
2797 IOTimeStampConstant(IODBG_IOMDPA(IOMDPA_MAPPED), a: descriptorID, VM_KERNEL_ADDRHIDE(this), c: getLength());
2798
2799#if __LP64__
2800 static const uint8_t num_segments_page = 8;
2801#else
2802 static const uint8_t num_segments_page = 4;
2803#endif
2804 static const uint8_t num_segments_long = 2;
2805
2806 IOPhysicalAddress segments_page[num_segments_page];
2807 IOPhysicalRange segments_long[num_segments_long];
2808 memset(s: segments_page, UINT32_MAX, n: sizeof(segments_page));
2809 memset(s: segments_long, c: 0, n: sizeof(segments_long));
2810
2811 uint8_t segment_page_idx = 0;
2812 uint8_t segment_long_idx = 0;
2813
2814 IOPhysicalRange physical_segment;
2815 for (IOByteCount offset = 0; offset < getLength(); offset += physical_segment.length) {
2816 physical_segment.address = getPhysicalSegment(offset, length: &physical_segment.length);
2817
2818 if (physical_segment.length == 0) {
2819 break;
2820 }
2821
2822 /**
2823 * Most IOMemoryDescriptors are made up of many individual physically discontiguous pages. To optimize for trace
2824 * buffer memory, pack segment events according to the following.
2825 *
2826 * Mappings must be emitted in ascending order starting from offset 0. Mappings can be associated with the previous
2827 * IOMDPA_MAPPED event emitted on by the current thread_id.
2828 *
2829 * IOMDPA_SEGMENTS_PAGE = up to 8 virtually contiguous page aligned mappings of PAGE_SIZE length
2830 * - (ppn_0 << 32 | ppn_1), ..., (ppn_6 << 32 | ppn_7)
2831 * - unmapped pages will have a ppn of MAX_INT_32
2832 * IOMDPA_SEGMENTS_LONG = up to 2 virtually contiguous mappings of variable length
2833 * - address_0, length_0, address_0, length_1
2834 * - unmapped pages will have an address of 0
2835 *
2836 * During each iteration do the following depending on the length of the mapping:
2837 * 1. add the current segment to the appropriate queue of pending segments
2838 * 1. check if we are operating on the same type of segment (PAGE/LONG) as the previous pass
2839 * 1a. if FALSE emit and reset all events in the previous queue
2840 * 2. check if we have filled up the current queue of pending events
2841 * 2a. if TRUE emit and reset all events in the pending queue
2842 * 3. after completing all iterations emit events in the current queue
2843 */
2844
2845 bool emit_page = false;
2846 bool emit_long = false;
2847 if ((physical_segment.address & PAGE_MASK) == 0 && physical_segment.length == PAGE_SIZE) {
2848 segments_page[segment_page_idx] = physical_segment.address;
2849 segment_page_idx++;
2850
2851 emit_long = segment_long_idx != 0;
2852 emit_page = segment_page_idx == num_segments_page;
2853
2854 if (os_unlikely(emit_long)) {
2855 IOTimeStampConstant(IODBG_IOMDPA(IOMDPA_SEGMENTS_LONG),
2856 a: segments_long[0].address, b: segments_long[0].length,
2857 c: segments_long[1].address, d: segments_long[1].length);
2858 }
2859
2860 if (os_unlikely(emit_page)) {
2861#if __LP64__
2862 IOTimeStampConstant(IODBG_IOMDPA(IOMDPA_SEGMENTS_PAGE),
2863 a: ((uintptr_t) atop_64(segments_page[0]) << 32) | (ppnum_t) atop_64(segments_page[1]),
2864 b: ((uintptr_t) atop_64(segments_page[2]) << 32) | (ppnum_t) atop_64(segments_page[3]),
2865 c: ((uintptr_t) atop_64(segments_page[4]) << 32) | (ppnum_t) atop_64(segments_page[5]),
2866 d: ((uintptr_t) atop_64(segments_page[6]) << 32) | (ppnum_t) atop_64(segments_page[7]));
2867#else
2868 IOTimeStampConstant(IODBG_IOMDPA(IOMDPA_SEGMENTS_PAGE),
2869 (ppnum_t) atop_32(segments_page[1]),
2870 (ppnum_t) atop_32(segments_page[2]),
2871 (ppnum_t) atop_32(segments_page[3]),
2872 (ppnum_t) atop_32(segments_page[4]));
2873#endif
2874 }
2875 } else {
2876 segments_long[segment_long_idx] = physical_segment;
2877 segment_long_idx++;
2878
2879 emit_page = segment_page_idx != 0;
2880 emit_long = segment_long_idx == num_segments_long;
2881
2882 if (os_unlikely(emit_page)) {
2883#if __LP64__
2884 IOTimeStampConstant(IODBG_IOMDPA(IOMDPA_SEGMENTS_PAGE),
2885 a: ((uintptr_t) atop_64(segments_page[0]) << 32) | (ppnum_t) atop_64(segments_page[1]),
2886 b: ((uintptr_t) atop_64(segments_page[2]) << 32) | (ppnum_t) atop_64(segments_page[3]),
2887 c: ((uintptr_t) atop_64(segments_page[4]) << 32) | (ppnum_t) atop_64(segments_page[5]),
2888 d: ((uintptr_t) atop_64(segments_page[6]) << 32) | (ppnum_t) atop_64(segments_page[7]));
2889#else
2890 IOTimeStampConstant(IODBG_IOMDPA(IOMDPA_SEGMENTS_PAGE),
2891 (ppnum_t) atop_32(segments_page[1]),
2892 (ppnum_t) atop_32(segments_page[2]),
2893 (ppnum_t) atop_32(segments_page[3]),
2894 (ppnum_t) atop_32(segments_page[4]));
2895#endif
2896 }
2897
2898 if (emit_long) {
2899 IOTimeStampConstant(IODBG_IOMDPA(IOMDPA_SEGMENTS_LONG),
2900 a: segments_long[0].address, b: segments_long[0].length,
2901 c: segments_long[1].address, d: segments_long[1].length);
2902 }
2903 }
2904
2905 if (os_unlikely(emit_page)) {
2906 memset(s: segments_page, UINT32_MAX, n: sizeof(segments_page));
2907 segment_page_idx = 0;
2908 }
2909
2910 if (os_unlikely(emit_long)) {
2911 memset(s: segments_long, c: 0, n: sizeof(segments_long));
2912 segment_long_idx = 0;
2913 }
2914 }
2915
2916 if (segment_page_idx != 0) {
2917 assert(segment_long_idx == 0);
2918#if __LP64__
2919 IOTimeStampConstant(IODBG_IOMDPA(IOMDPA_SEGMENTS_PAGE),
2920 a: ((uintptr_t) atop_64(segments_page[0]) << 32) | (ppnum_t) atop_64(segments_page[1]),
2921 b: ((uintptr_t) atop_64(segments_page[2]) << 32) | (ppnum_t) atop_64(segments_page[3]),
2922 c: ((uintptr_t) atop_64(segments_page[4]) << 32) | (ppnum_t) atop_64(segments_page[5]),
2923 d: ((uintptr_t) atop_64(segments_page[6]) << 32) | (ppnum_t) atop_64(segments_page[7]));
2924#else
2925 IOTimeStampConstant(IODBG_IOMDPA(IOMDPA_SEGMENTS_PAGE),
2926 (ppnum_t) atop_32(segments_page[1]),
2927 (ppnum_t) atop_32(segments_page[2]),
2928 (ppnum_t) atop_32(segments_page[3]),
2929 (ppnum_t) atop_32(segments_page[4]));
2930#endif
2931 } else if (segment_long_idx != 0) {
2932 assert(segment_page_idx == 0);
2933 IOTimeStampConstant(IODBG_IOMDPA(IOMDPA_SEGMENTS_LONG),
2934 a: segments_long[0].address, b: segments_long[0].length,
2935 c: segments_long[1].address, d: segments_long[1].length);
2936 }
2937
2938 return kIOReturnSuccess;
2939}
2940
2941void
2942IOMemoryDescriptor::setVMTags(uint32_t kernelTag, uint32_t userTag)
2943{
2944 _kernelTag = (vm_tag_t) kernelTag;
2945 _userTag = (vm_tag_t) userTag;
2946}
2947
2948uint32_t
2949IOMemoryDescriptor::getVMTag(vm_map_t map)
2950{
2951 if (vm_kernel_map_is_kernel(map)) {
2952 if (VM_KERN_MEMORY_NONE != _kernelTag) {
2953 return (uint32_t) _kernelTag;
2954 }
2955 } else {
2956 if (VM_KERN_MEMORY_NONE != _userTag) {
2957 return (uint32_t) _userTag;
2958 }
2959 }
2960 return IOMemoryTag(map);
2961}
2962
2963IOReturn
2964IOGeneralMemoryDescriptor::dmaCommandOperation(DMACommandOps op, void *vData, UInt dataSize) const
2965{
2966 IOReturn err = kIOReturnSuccess;
2967 DMACommandOps params;
2968 IOGeneralMemoryDescriptor * md = const_cast<IOGeneralMemoryDescriptor *>(this);
2969 ioGMDData *dataP;
2970
2971 params = (op & ~kIOMDDMACommandOperationMask & op);
2972 op &= kIOMDDMACommandOperationMask;
2973
2974 if (kIOMDDMAMap == op) {
2975 if (dataSize < sizeof(IOMDDMAMapArgs)) {
2976 return kIOReturnUnderrun;
2977 }
2978
2979 IOMDDMAMapArgs * data = (IOMDDMAMapArgs *) vData;
2980
2981 if (!_memoryEntries
2982 && !md->initMemoryEntries(computeDataSize(0, 0), kIOMapperWaitSystem)) {
2983 return kIOReturnNoMemory;
2984 }
2985
2986 if (_memoryEntries && data->fMapper) {
2987 bool remap, keepMap;
2988 dataP = getDataP(_memoryEntries);
2989
2990 if (data->fMapSpec.numAddressBits < dataP->fDMAMapNumAddressBits) {
2991 dataP->fDMAMapNumAddressBits = data->fMapSpec.numAddressBits;
2992 }
2993 if (data->fMapSpec.alignment > dataP->fDMAMapAlignment) {
2994 dataP->fDMAMapAlignment = data->fMapSpec.alignment;
2995 }
2996
2997 keepMap = (data->fMapper == gIOSystemMapper);
2998 keepMap &= ((data->fOffset == 0) && (data->fLength == _length));
2999
3000 if ((data->fMapper == gIOSystemMapper) && _prepareLock) {
3001 IOLockLock(_prepareLock);
3002 }
3003
3004 remap = (!keepMap);
3005 remap |= (dataP->fDMAMapNumAddressBits < 64)
3006 && ((dataP->fMappedBase + _length) > (1ULL << dataP->fDMAMapNumAddressBits));
3007 remap |= (dataP->fDMAMapAlignment > page_size);
3008
3009 if (remap || !dataP->fMappedBaseValid) {
3010 err = md->dmaMap(mapper: data->fMapper, memory: md, command: data->fCommand, mapSpec: &data->fMapSpec, offset: data->fOffset, length: data->fLength, mapAddress: &data->fAlloc, mapLength: &data->fAllocLength);
3011 if (keepMap && (kIOReturnSuccess == err) && !dataP->fMappedBaseValid) {
3012 dataP->fMappedBase = data->fAlloc;
3013 dataP->fMappedBaseValid = true;
3014 dataP->fMappedLength = data->fAllocLength;
3015 data->fAllocLength = 0; // IOMD owns the alloc now
3016 }
3017 } else {
3018 data->fAlloc = dataP->fMappedBase;
3019 data->fAllocLength = 0; // give out IOMD map
3020 md->dmaMapRecord(mapper: data->fMapper, command: data->fCommand, mapLength: dataP->fMappedLength);
3021 }
3022
3023 if ((data->fMapper == gIOSystemMapper) && _prepareLock) {
3024 IOLockUnlock(_prepareLock);
3025 }
3026 }
3027 return err;
3028 }
3029 if (kIOMDDMAUnmap == op) {
3030 if (dataSize < sizeof(IOMDDMAMapArgs)) {
3031 return kIOReturnUnderrun;
3032 }
3033 IOMDDMAMapArgs * data = (IOMDDMAMapArgs *) vData;
3034
3035 err = md->dmaUnmap(mapper: data->fMapper, command: data->fCommand, offset: data->fOffset, mapAddress: data->fAlloc, mapLength: data->fAllocLength);
3036
3037 return kIOReturnSuccess;
3038 }
3039
3040 if (kIOMDAddDMAMapSpec == op) {
3041 if (dataSize < sizeof(IODMAMapSpecification)) {
3042 return kIOReturnUnderrun;
3043 }
3044
3045 IODMAMapSpecification * data = (IODMAMapSpecification *) vData;
3046
3047 if (!_memoryEntries
3048 && !md->initMemoryEntries(computeDataSize(0, 0), kIOMapperWaitSystem)) {
3049 return kIOReturnNoMemory;
3050 }
3051
3052 if (_memoryEntries) {
3053 dataP = getDataP(_memoryEntries);
3054 if (data->numAddressBits < dataP->fDMAMapNumAddressBits) {
3055 dataP->fDMAMapNumAddressBits = data->numAddressBits;
3056 }
3057 if (data->alignment > dataP->fDMAMapAlignment) {
3058 dataP->fDMAMapAlignment = data->alignment;
3059 }
3060 }
3061 return kIOReturnSuccess;
3062 }
3063
3064 if (kIOMDGetCharacteristics == op) {
3065 if (dataSize < sizeof(IOMDDMACharacteristics)) {
3066 return kIOReturnUnderrun;
3067 }
3068
3069 IOMDDMACharacteristics *data = (IOMDDMACharacteristics *) vData;
3070 data->fLength = _length;
3071 data->fSGCount = _rangesCount;
3072 data->fPages = _pages;
3073 data->fDirection = getDirection();
3074 if (!_wireCount) {
3075 data->fIsPrepared = false;
3076 } else {
3077 data->fIsPrepared = true;
3078 data->fHighestPage = _highestPage;
3079 if (_memoryEntries) {
3080 dataP = getDataP(_memoryEntries);
3081 ioPLBlock *ioplList = getIOPLList(dataP);
3082 UInt count = getNumIOPL(_memoryEntries, dataP);
3083 if (count == 1) {
3084 data->fPageAlign = (ioplList[0].fPageOffset & PAGE_MASK) | ~PAGE_MASK;
3085 }
3086 }
3087 }
3088
3089 return kIOReturnSuccess;
3090 } else if (kIOMDDMAActive == op) {
3091 if (params) {
3092 int16_t prior;
3093 prior = OSAddAtomic16(amount: 1, address: &md->_dmaReferences);
3094 if (!prior) {
3095 md->_mapName = NULL;
3096 }
3097 } else {
3098 if (md->_dmaReferences) {
3099 OSAddAtomic16(amount: -1, address: &md->_dmaReferences);
3100 } else {
3101 panic("_dmaReferences underflow");
3102 }
3103 }
3104 } else if (kIOMDWalkSegments != op) {
3105 return kIOReturnBadArgument;
3106 }
3107
3108 // Get the next segment
3109 struct InternalState {
3110 IOMDDMAWalkSegmentArgs fIO;
3111 mach_vm_size_t fOffset2Index;
3112 mach_vm_size_t fNextOffset;
3113 UInt fIndex;
3114 } *isP;
3115
3116 // Find the next segment
3117 if (dataSize < sizeof(*isP)) {
3118 return kIOReturnUnderrun;
3119 }
3120
3121 isP = (InternalState *) vData;
3122 uint64_t offset = isP->fIO.fOffset;
3123 uint8_t mapped = isP->fIO.fMapped;
3124 uint64_t mappedBase;
3125
3126 if (mapped && (kIOMemoryRemote & _flags)) {
3127 return kIOReturnNotAttached;
3128 }
3129
3130 if (IOMapper::gSystem && mapped
3131 && (!(kIOMemoryHostOnly & _flags))
3132 && (!_memoryEntries || !getDataP(_memoryEntries)->fMappedBaseValid)) {
3133// && (_memoryEntries && !getDataP(_memoryEntries)->fMappedBaseValid))
3134 if (!_memoryEntries
3135 && !md->initMemoryEntries(computeDataSize(0, 0), kIOMapperWaitSystem)) {
3136 return kIOReturnNoMemory;
3137 }
3138
3139 dataP = getDataP(_memoryEntries);
3140 if (dataP->fMapper) {
3141 IODMAMapSpecification mapSpec;
3142 bzero(s: &mapSpec, n: sizeof(mapSpec));
3143 mapSpec.numAddressBits = dataP->fDMAMapNumAddressBits;
3144 mapSpec.alignment = dataP->fDMAMapAlignment;
3145 err = md->dmaMap(mapper: dataP->fMapper, memory: md, NULL, mapSpec: &mapSpec, offset: 0, length: _length, mapAddress: &dataP->fMappedBase, mapLength: &dataP->fMappedLength);
3146 if (kIOReturnSuccess != err) {
3147 return err;
3148 }
3149 dataP->fMappedBaseValid = true;
3150 }
3151 }
3152
3153 if (mapped) {
3154 if (IOMapper::gSystem
3155 && (!(kIOMemoryHostOnly & _flags))
3156 && _memoryEntries
3157 && (dataP = getDataP(_memoryEntries))
3158 && dataP->fMappedBaseValid) {
3159 mappedBase = dataP->fMappedBase;
3160 } else {
3161 mapped = 0;
3162 }
3163 }
3164
3165 if (offset >= _length) {
3166 return (offset == _length)? kIOReturnOverrun : kIOReturnInternalError;
3167 }
3168
3169 // Validate the previous offset
3170 UInt ind;
3171 mach_vm_size_t off2Ind = isP->fOffset2Index;
3172 if (!params
3173 && offset
3174 && (offset == isP->fNextOffset || off2Ind <= offset)) {
3175 ind = isP->fIndex;
3176 } else {
3177 ind = off2Ind = 0; // Start from beginning
3178 }
3179 mach_vm_size_t length;
3180 UInt64 address;
3181
3182 if ((_flags & kIOMemoryTypeMask) == kIOMemoryTypePhysical) {
3183 // Physical address based memory descriptor
3184 const IOPhysicalRange *physP = (IOPhysicalRange *) &_ranges.p[0];
3185
3186 // Find the range after the one that contains the offset
3187 mach_vm_size_t len;
3188 for (len = 0; off2Ind <= offset; ind++) {
3189 len = physP[ind].length;
3190 off2Ind += len;
3191 }
3192
3193 // Calculate length within range and starting address
3194 length = off2Ind - offset;
3195 address = physP[ind - 1].address + len - length;
3196
3197 if (true && mapped) {
3198 address = mappedBase + offset;
3199 } else {
3200 // see how far we can coalesce ranges
3201 while (ind < _rangesCount && address + length == physP[ind].address) {
3202 len = physP[ind].length;
3203 length += len;
3204 off2Ind += len;
3205 ind++;
3206 }
3207 }
3208
3209 // correct contiguous check overshoot
3210 ind--;
3211 off2Ind -= len;
3212 }
3213#ifndef __LP64__
3214 else if ((_flags & kIOMemoryTypeMask) == kIOMemoryTypePhysical64) {
3215 // Physical address based memory descriptor
3216 const IOAddressRange *physP = (IOAddressRange *) &_ranges.v64[0];
3217
3218 // Find the range after the one that contains the offset
3219 mach_vm_size_t len;
3220 for (len = 0; off2Ind <= offset; ind++) {
3221 len = physP[ind].length;
3222 off2Ind += len;
3223 }
3224
3225 // Calculate length within range and starting address
3226 length = off2Ind - offset;
3227 address = physP[ind - 1].address + len - length;
3228
3229 if (true && mapped) {
3230 address = mappedBase + offset;
3231 } else {
3232 // see how far we can coalesce ranges
3233 while (ind < _rangesCount && address + length == physP[ind].address) {
3234 len = physP[ind].length;
3235 length += len;
3236 off2Ind += len;
3237 ind++;
3238 }
3239 }
3240 // correct contiguous check overshoot
3241 ind--;
3242 off2Ind -= len;
3243 }
3244#endif /* !__LP64__ */
3245 else {
3246 do {
3247 if (!_wireCount) {
3248 panic("IOGMD: not wired for the IODMACommand");
3249 }
3250
3251 assert(_memoryEntries);
3252
3253 dataP = getDataP(_memoryEntries);
3254 const ioPLBlock *ioplList = getIOPLList(dataP);
3255 UInt numIOPLs = getNumIOPL(_memoryEntries, dataP);
3256 upl_page_info_t *pageList = getPageList(dataP);
3257
3258 assert(numIOPLs > 0);
3259
3260 // Scan through iopl info blocks looking for block containing offset
3261 while (ind < numIOPLs && offset >= ioplList[ind].fIOMDOffset) {
3262 ind++;
3263 }
3264
3265 // Go back to actual range as search goes past it
3266 ioPLBlock ioplInfo = ioplList[ind - 1];
3267 off2Ind = ioplInfo.fIOMDOffset;
3268
3269 if (ind < numIOPLs) {
3270 length = ioplList[ind].fIOMDOffset;
3271 } else {
3272 length = _length;
3273 }
3274 length -= offset; // Remainder within iopl
3275
3276 // Subtract offset till this iopl in total list
3277 offset -= off2Ind;
3278
3279 // If a mapped address is requested and this is a pre-mapped IOPL
3280 // then just need to compute an offset relative to the mapped base.
3281 if (mapped) {
3282 offset += (ioplInfo.fPageOffset & PAGE_MASK);
3283 address = trunc_page_64(mappedBase) + ptoa_64(ioplInfo.fMappedPage) + offset;
3284 continue; // Done leave do/while(false) now
3285 }
3286
3287 // The offset is rebased into the current iopl.
3288 // Now add the iopl 1st page offset.
3289 offset += ioplInfo.fPageOffset;
3290
3291 // For external UPLs the fPageInfo field points directly to
3292 // the upl's upl_page_info_t array.
3293 if (ioplInfo.fFlags & kIOPLExternUPL) {
3294 pageList = (upl_page_info_t *) ioplInfo.fPageInfo;
3295 } else {
3296 pageList = &pageList[ioplInfo.fPageInfo];
3297 }
3298
3299 // Check for direct device non-paged memory
3300 if (ioplInfo.fFlags & kIOPLOnDevice) {
3301 address = ptoa_64(pageList->phys_addr) + offset;
3302 continue; // Done leave do/while(false) now
3303 }
3304
3305 // Now we need compute the index into the pageList
3306 UInt pageInd = atop_32(offset);
3307 offset &= PAGE_MASK;
3308
3309 // Compute the starting address of this segment
3310 IOPhysicalAddress pageAddr = pageList[pageInd].phys_addr;
3311 if (!pageAddr) {
3312 panic("!pageList phys_addr");
3313 }
3314
3315 address = ptoa_64(pageAddr) + offset;
3316
3317 // length is currently set to the length of the remainider of the iopl.
3318 // We need to check that the remainder of the iopl is contiguous.
3319 // This is indicated by pageList[ind].phys_addr being sequential.
3320 IOByteCount contigLength = PAGE_SIZE - offset;
3321 while (contigLength < length
3322 && ++pageAddr == pageList[++pageInd].phys_addr) {
3323 contigLength += PAGE_SIZE;
3324 }
3325
3326 if (contigLength < length) {
3327 length = contigLength;
3328 }
3329
3330
3331 assert(address);
3332 assert(length);
3333 } while (false);
3334 }
3335
3336 // Update return values and state
3337 isP->fIO.fIOVMAddr = address;
3338 isP->fIO.fLength = length;
3339 isP->fIndex = ind;
3340 isP->fOffset2Index = off2Ind;
3341 isP->fNextOffset = isP->fIO.fOffset + length;
3342
3343 return kIOReturnSuccess;
3344}
3345
3346addr64_t
3347IOGeneralMemoryDescriptor::getPhysicalSegment(IOByteCount offset, IOByteCount *lengthOfSegment, IOOptionBits options)
3348{
3349 IOReturn ret;
3350 mach_vm_address_t address = 0;
3351 mach_vm_size_t length = 0;
3352 IOMapper * mapper = gIOSystemMapper;
3353 IOOptionBits type = _flags & kIOMemoryTypeMask;
3354
3355 if (lengthOfSegment) {
3356 *lengthOfSegment = 0;
3357 }
3358
3359 if (offset >= _length) {
3360 return 0;
3361 }
3362
3363 // IOMemoryDescriptor::doMap() cannot use getPhysicalSegment() to obtain the page offset, since it must
3364 // support the unwired memory case in IOGeneralMemoryDescriptor, and hibernate_write_image() cannot use
3365 // map()->getVirtualAddress() to obtain the kernel pointer, since it must prevent the memory allocation
3366 // due to IOMemoryMap, so _kIOMemorySourceSegment is a necessary evil until all of this gets cleaned up
3367
3368 if ((options & _kIOMemorySourceSegment) && (kIOMemoryTypeUPL != type)) {
3369 unsigned rangesIndex = 0;
3370 Ranges vec = _ranges;
3371 mach_vm_address_t addr;
3372
3373 // Find starting address within the vector of ranges
3374 for (;;) {
3375 getAddrLenForInd(addr, len&: length, type, r: vec, ind: rangesIndex, task: _task);
3376 if (offset < length) {
3377 break;
3378 }
3379 offset -= length; // (make offset relative)
3380 rangesIndex++;
3381 }
3382
3383 // Now that we have the starting range,
3384 // lets find the last contiguous range
3385 addr += offset;
3386 length -= offset;
3387
3388 for (++rangesIndex; rangesIndex < _rangesCount; rangesIndex++) {
3389 mach_vm_address_t newAddr;
3390 mach_vm_size_t newLen;
3391
3392 getAddrLenForInd(addr&: newAddr, len&: newLen, type, r: vec, ind: rangesIndex, task: _task);
3393 if (addr + length != newAddr) {
3394 break;
3395 }
3396 length += newLen;
3397 }
3398 if (addr) {
3399 address = (IOPhysicalAddress) addr; // Truncate address to 32bit
3400 }
3401 } else {
3402 IOMDDMAWalkSegmentState _state;
3403 IOMDDMAWalkSegmentArgs * state = (IOMDDMAWalkSegmentArgs *) (void *)&_state;
3404
3405 state->fOffset = offset;
3406 state->fLength = _length - offset;
3407 state->fMapped = (0 == (options & kIOMemoryMapperNone)) && !(_flags & kIOMemoryHostOrRemote);
3408
3409 ret = dmaCommandOperation(op: kIOMDFirstSegment, vData: _state, dataSize: sizeof(_state));
3410
3411 if ((kIOReturnSuccess != ret) && (kIOReturnOverrun != ret)) {
3412 DEBG("getPhysicalSegment dmaCommandOperation(%lx), %p, offset %qx, addr %qx, len %qx\n",
3413 ret, this, state->fOffset,
3414 state->fIOVMAddr, state->fLength);
3415 }
3416 if (kIOReturnSuccess == ret) {
3417 address = state->fIOVMAddr;
3418 length = state->fLength;
3419 }
3420
3421 // dmaCommandOperation() does not distinguish between "mapped" and "unmapped" physical memory, even
3422 // with fMapped set correctly, so we must handle the transformation here until this gets cleaned up
3423
3424 if (mapper && ((kIOMemoryTypePhysical == type) || (kIOMemoryTypePhysical64 == type))) {
3425 if ((options & kIOMemoryMapperNone) && !(_flags & kIOMemoryMapperNone)) {
3426 addr64_t origAddr = address;
3427 IOByteCount origLen = length;
3428
3429 address = mapper->mapToPhysicalAddress(mappedAddress: origAddr);
3430 length = page_size - (address & (page_size - 1));
3431 while ((length < origLen)
3432 && ((address + length) == mapper->mapToPhysicalAddress(mappedAddress: origAddr + length))) {
3433 length += page_size;
3434 }
3435 if (length > origLen) {
3436 length = origLen;
3437 }
3438 }
3439 }
3440 }
3441
3442 if (!address) {
3443 length = 0;
3444 }
3445
3446 if (lengthOfSegment) {
3447 *lengthOfSegment = length;
3448 }
3449
3450 return address;
3451}
3452
3453#ifndef __LP64__
3454#pragma clang diagnostic push
3455#pragma clang diagnostic ignored "-Wdeprecated-declarations"
3456
3457addr64_t
3458IOMemoryDescriptor::getPhysicalSegment(IOByteCount offset, IOByteCount *lengthOfSegment, IOOptionBits options)
3459{
3460 addr64_t address = 0;
3461
3462 if (options & _kIOMemorySourceSegment) {
3463 address = getSourceSegment(offset, lengthOfSegment);
3464 } else if (options & kIOMemoryMapperNone) {
3465 address = getPhysicalSegment64(offset, lengthOfSegment);
3466 } else {
3467 address = getPhysicalSegment(offset, lengthOfSegment);
3468 }
3469
3470 return address;
3471}
3472#pragma clang diagnostic pop
3473
3474addr64_t
3475IOGeneralMemoryDescriptor::getPhysicalSegment64(IOByteCount offset, IOByteCount *lengthOfSegment)
3476{
3477 return getPhysicalSegment(offset, lengthOfSegment, kIOMemoryMapperNone);
3478}
3479
3480IOPhysicalAddress
3481IOGeneralMemoryDescriptor::getPhysicalSegment(IOByteCount offset, IOByteCount *lengthOfSegment)
3482{
3483 addr64_t address = 0;
3484 IOByteCount length = 0;
3485
3486 address = getPhysicalSegment(offset, lengthOfSegment, 0);
3487
3488 if (lengthOfSegment) {
3489 length = *lengthOfSegment;
3490 }
3491
3492 if ((address + length) > 0x100000000ULL) {
3493 panic("getPhysicalSegment() out of 32b range 0x%qx, len 0x%lx, class %s",
3494 address, (long) length, (getMetaClass())->getClassName());
3495 }
3496
3497 return (IOPhysicalAddress) address;
3498}
3499
3500addr64_t
3501IOMemoryDescriptor::getPhysicalSegment64(IOByteCount offset, IOByteCount *lengthOfSegment)
3502{
3503 IOPhysicalAddress phys32;
3504 IOByteCount length;
3505 addr64_t phys64;
3506 IOMapper * mapper = NULL;
3507
3508 phys32 = getPhysicalSegment(offset, lengthOfSegment);
3509 if (!phys32) {
3510 return 0;
3511 }
3512
3513 if (gIOSystemMapper) {
3514 mapper = gIOSystemMapper;
3515 }
3516
3517 if (mapper) {
3518 IOByteCount origLen;
3519
3520 phys64 = mapper->mapToPhysicalAddress(phys32);
3521 origLen = *lengthOfSegment;
3522 length = page_size - (phys64 & (page_size - 1));
3523 while ((length < origLen)
3524 && ((phys64 + length) == mapper->mapToPhysicalAddress(phys32 + length))) {
3525 length += page_size;
3526 }
3527 if (length > origLen) {
3528 length = origLen;
3529 }
3530
3531 *lengthOfSegment = length;
3532 } else {
3533 phys64 = (addr64_t) phys32;
3534 }
3535
3536 return phys64;
3537}
3538
3539IOPhysicalAddress
3540IOMemoryDescriptor::getPhysicalSegment(IOByteCount offset, IOByteCount *lengthOfSegment)
3541{
3542 return (IOPhysicalAddress) getPhysicalSegment(offset, lengthOfSegment, 0);
3543}
3544
3545IOPhysicalAddress
3546IOGeneralMemoryDescriptor::getSourceSegment(IOByteCount offset, IOByteCount *lengthOfSegment)
3547{
3548 return (IOPhysicalAddress) getPhysicalSegment(offset, lengthOfSegment, _kIOMemorySourceSegment);
3549}
3550
3551#pragma clang diagnostic push
3552#pragma clang diagnostic ignored "-Wdeprecated-declarations"
3553
3554void *
3555IOGeneralMemoryDescriptor::getVirtualSegment(IOByteCount offset,
3556 IOByteCount * lengthOfSegment)
3557{
3558 if (_task == kernel_task) {
3559 return (void *) getSourceSegment(offset, lengthOfSegment);
3560 } else {
3561 panic("IOGMD::getVirtualSegment deprecated");
3562 }
3563
3564 return NULL;
3565}
3566#pragma clang diagnostic pop
3567#endif /* !__LP64__ */
3568
3569IOReturn
3570IOMemoryDescriptor::dmaCommandOperation(DMACommandOps op, void *vData, UInt dataSize) const
3571{
3572 IOMemoryDescriptor *md = const_cast<IOMemoryDescriptor *>(this);
3573 DMACommandOps params;
3574 IOReturn err;
3575
3576 params = (op & ~kIOMDDMACommandOperationMask & op);
3577 op &= kIOMDDMACommandOperationMask;
3578
3579 if (kIOMDGetCharacteristics == op) {
3580 if (dataSize < sizeof(IOMDDMACharacteristics)) {
3581 return kIOReturnUnderrun;
3582 }
3583
3584 IOMDDMACharacteristics *data = (IOMDDMACharacteristics *) vData;
3585 data->fLength = getLength();
3586 data->fSGCount = 0;
3587 data->fDirection = getDirection();
3588 data->fIsPrepared = true; // Assume prepared - fails safe
3589 } else if (kIOMDWalkSegments == op) {
3590 if (dataSize < sizeof(IOMDDMAWalkSegmentArgs)) {
3591 return kIOReturnUnderrun;
3592 }
3593
3594 IOMDDMAWalkSegmentArgs *data = (IOMDDMAWalkSegmentArgs *) vData;
3595 IOByteCount offset = (IOByteCount) data->fOffset;
3596 IOPhysicalLength length, nextLength;
3597 addr64_t addr, nextAddr;
3598
3599 if (data->fMapped) {
3600 panic("fMapped %p %s %qx", this, getMetaClass()->getClassName(), (uint64_t) getLength());
3601 }
3602 addr = md->getPhysicalSegment(offset, length: &length, options: kIOMemoryMapperNone);
3603 offset += length;
3604 while (offset < getLength()) {
3605 nextAddr = md->getPhysicalSegment(offset, length: &nextLength, options: kIOMemoryMapperNone);
3606 if ((addr + length) != nextAddr) {
3607 break;
3608 }
3609 length += nextLength;
3610 offset += nextLength;
3611 }
3612 data->fIOVMAddr = addr;
3613 data->fLength = length;
3614 } else if (kIOMDAddDMAMapSpec == op) {
3615 return kIOReturnUnsupported;
3616 } else if (kIOMDDMAMap == op) {
3617 if (dataSize < sizeof(IOMDDMAMapArgs)) {
3618 return kIOReturnUnderrun;
3619 }
3620 IOMDDMAMapArgs * data = (IOMDDMAMapArgs *) vData;
3621
3622 err = md->dmaMap(mapper: data->fMapper, memory: md, command: data->fCommand, mapSpec: &data->fMapSpec, offset: data->fOffset, length: data->fLength, mapAddress: &data->fAlloc, mapLength: &data->fAllocLength);
3623
3624 return err;
3625 } else if (kIOMDDMAUnmap == op) {
3626 if (dataSize < sizeof(IOMDDMAMapArgs)) {
3627 return kIOReturnUnderrun;
3628 }
3629 IOMDDMAMapArgs * data = (IOMDDMAMapArgs *) vData;
3630
3631 err = md->dmaUnmap(mapper: data->fMapper, command: data->fCommand, offset: data->fOffset, mapAddress: data->fAlloc, mapLength: data->fAllocLength);
3632
3633 return kIOReturnSuccess;
3634 } else {
3635 return kIOReturnBadArgument;
3636 }
3637
3638 return kIOReturnSuccess;
3639}
3640
3641IOReturn
3642IOGeneralMemoryDescriptor::setPurgeable( IOOptionBits newState,
3643 IOOptionBits * oldState )
3644{
3645 IOReturn err = kIOReturnSuccess;
3646
3647 vm_purgable_t control;
3648 int state;
3649
3650 assert(!(kIOMemoryRemote & _flags));
3651 if (kIOMemoryRemote & _flags) {
3652 return kIOReturnNotAttached;
3653 }
3654
3655 if (_memRef) {
3656 err = super::setPurgeable(newState, oldState);
3657 } else {
3658 if (kIOMemoryThreadSafe & _flags) {
3659 LOCK;
3660 }
3661 do{
3662 // Find the appropriate vm_map for the given task
3663 vm_map_t curMap;
3664 if (_task == kernel_task && (kIOMemoryBufferPageable & _flags)) {
3665 err = kIOReturnNotReady;
3666 break;
3667 } else if (!_task) {
3668 err = kIOReturnUnsupported;
3669 break;
3670 } else {
3671 curMap = get_task_map(_task);
3672 if (NULL == curMap) {
3673 err = KERN_INVALID_ARGUMENT;
3674 break;
3675 }
3676 }
3677
3678 // can only do one range
3679 Ranges vec = _ranges;
3680 IOOptionBits type = _flags & kIOMemoryTypeMask;
3681 mach_vm_address_t addr;
3682 mach_vm_size_t len;
3683 getAddrLenForInd(addr, len, type, r: vec, ind: 0, task: _task);
3684
3685 err = purgeableControlBits(newState, control: &control, state: &state);
3686 if (kIOReturnSuccess != err) {
3687 break;
3688 }
3689 err = vm_map_purgable_control(map: curMap, address: addr, control, state: &state);
3690 if (oldState) {
3691 if (kIOReturnSuccess == err) {
3692 err = purgeableStateBits(state: &state);
3693 *oldState = state;
3694 }
3695 }
3696 }while (false);
3697 if (kIOMemoryThreadSafe & _flags) {
3698 UNLOCK;
3699 }
3700 }
3701
3702 return err;
3703}
3704
3705IOReturn
3706IOMemoryDescriptor::setPurgeable( IOOptionBits newState,
3707 IOOptionBits * oldState )
3708{
3709 IOReturn err = kIOReturnNotReady;
3710
3711 if (kIOMemoryThreadSafe & _flags) {
3712 LOCK;
3713 }
3714 if (_memRef) {
3715 err = IOGeneralMemoryDescriptor::memoryReferenceSetPurgeable(ref: _memRef, newState, oldState);
3716 }
3717 if (kIOMemoryThreadSafe & _flags) {
3718 UNLOCK;
3719 }
3720
3721 return err;
3722}
3723
3724IOReturn
3725IOGeneralMemoryDescriptor::setOwnership( task_t newOwner,
3726 int newLedgerTag,
3727 IOOptionBits newLedgerOptions )
3728{
3729 IOReturn err = kIOReturnSuccess;
3730
3731 assert(!(kIOMemoryRemote & _flags));
3732 if (kIOMemoryRemote & _flags) {
3733 return kIOReturnNotAttached;
3734 }
3735
3736 if (iokit_iomd_setownership_enabled == FALSE) {
3737 return kIOReturnUnsupported;
3738 }
3739
3740 if (_memRef) {
3741 err = super::setOwnership(newOwner, newLedgerTag, newLedgerOptions);
3742 } else {
3743 err = kIOReturnUnsupported;
3744 }
3745
3746 return err;
3747}
3748
3749IOReturn
3750IOMemoryDescriptor::setOwnership( task_t newOwner,
3751 int newLedgerTag,
3752 IOOptionBits newLedgerOptions )
3753{
3754 IOReturn err = kIOReturnNotReady;
3755
3756 assert(!(kIOMemoryRemote & _flags));
3757 if (kIOMemoryRemote & _flags) {
3758 return kIOReturnNotAttached;
3759 }
3760
3761 if (iokit_iomd_setownership_enabled == FALSE) {
3762 return kIOReturnUnsupported;
3763 }
3764
3765 if (kIOMemoryThreadSafe & _flags) {
3766 LOCK;
3767 }
3768 if (_memRef) {
3769 err = IOGeneralMemoryDescriptor::memoryReferenceSetOwnership(ref: _memRef, newOwner, newLedgerTag, newLedgerOptions);
3770 } else {
3771 IOMultiMemoryDescriptor * mmd;
3772 IOSubMemoryDescriptor * smd;
3773 if ((smd = OSDynamicCast(IOSubMemoryDescriptor, this))) {
3774 err = smd->setOwnership(newOwner, newLedgerTag, newLedgerOptions);
3775 } else if ((mmd = OSDynamicCast(IOMultiMemoryDescriptor, this))) {
3776 err = mmd->setOwnership(newOwner, newLedgerTag, newOptions: newLedgerOptions);
3777 }
3778 }
3779 if (kIOMemoryThreadSafe & _flags) {
3780 UNLOCK;
3781 }
3782
3783 return err;
3784}
3785
3786
3787uint64_t
3788IOMemoryDescriptor::getDMAMapLength(uint64_t * offset)
3789{
3790 uint64_t length;
3791
3792 if (_memRef) {
3793 length = IOGeneralMemoryDescriptor::memoryReferenceGetDMAMapLength(ref: _memRef, offset);
3794 } else {
3795 IOByteCount iterate, segLen;
3796 IOPhysicalAddress sourceAddr, sourceAlign;
3797
3798 if (kIOMemoryThreadSafe & _flags) {
3799 LOCK;
3800 }
3801 length = 0;
3802 iterate = 0;
3803 while ((sourceAddr = getPhysicalSegment(offset: iterate, length: &segLen, options: _kIOMemorySourceSegment))) {
3804 sourceAlign = (sourceAddr & page_mask);
3805 if (offset && !iterate) {
3806 *offset = sourceAlign;
3807 }
3808 length += round_page(x: sourceAddr + segLen) - trunc_page(sourceAddr);
3809 iterate += segLen;
3810 }
3811 if (!iterate) {
3812 length = getLength();
3813 if (offset) {
3814 *offset = 0;
3815 }
3816 }
3817 if (kIOMemoryThreadSafe & _flags) {
3818 UNLOCK;
3819 }
3820 }
3821
3822 return length;
3823}
3824
3825
3826IOReturn
3827IOMemoryDescriptor::getPageCounts( IOByteCount * residentPageCount,
3828 IOByteCount * dirtyPageCount )
3829{
3830 IOReturn err = kIOReturnNotReady;
3831
3832 assert(!(kIOMemoryRemote & _flags));
3833 if (kIOMemoryRemote & _flags) {
3834 return kIOReturnNotAttached;
3835 }
3836
3837 if (kIOMemoryThreadSafe & _flags) {
3838 LOCK;
3839 }
3840 if (_memRef) {
3841 err = IOGeneralMemoryDescriptor::memoryReferenceGetPageCounts(ref: _memRef, residentPageCount, dirtyPageCount);
3842 } else {
3843 IOMultiMemoryDescriptor * mmd;
3844 IOSubMemoryDescriptor * smd;
3845 if ((smd = OSDynamicCast(IOSubMemoryDescriptor, this))) {
3846 err = smd->getPageCounts(residentPageCount, dirtyPageCount);
3847 } else if ((mmd = OSDynamicCast(IOMultiMemoryDescriptor, this))) {
3848 err = mmd->getPageCounts(residentPageCount, dirtyPageCount);
3849 }
3850 }
3851 if (kIOMemoryThreadSafe & _flags) {
3852 UNLOCK;
3853 }
3854
3855 return err;
3856}
3857
3858
3859#if defined(__arm64__)
3860extern "C" void dcache_incoherent_io_flush64(addr64_t pa, unsigned int count, unsigned int remaining, unsigned int *res);
3861extern "C" void dcache_incoherent_io_store64(addr64_t pa, unsigned int count, unsigned int remaining, unsigned int *res);
3862#else /* defined(__arm64__) */
3863extern "C" void dcache_incoherent_io_flush64(addr64_t pa, unsigned int count);
3864extern "C" void dcache_incoherent_io_store64(addr64_t pa, unsigned int count);
3865#endif /* defined(__arm64__) */
3866
3867static void
3868SetEncryptOp(addr64_t pa, unsigned int count)
3869{
3870 ppnum_t page, end;
3871
3872 page = (ppnum_t) atop_64(round_page_64(pa));
3873 end = (ppnum_t) atop_64(trunc_page_64(pa + count));
3874 for (; page < end; page++) {
3875 pmap_clear_noencrypt(pn: page);
3876 }
3877}
3878
3879static void
3880ClearEncryptOp(addr64_t pa, unsigned int count)
3881{
3882 ppnum_t page, end;
3883
3884 page = (ppnum_t) atop_64(round_page_64(pa));
3885 end = (ppnum_t) atop_64(trunc_page_64(pa + count));
3886 for (; page < end; page++) {
3887 pmap_set_noencrypt(pn: page);
3888 }
3889}
3890
3891IOReturn
3892IOMemoryDescriptor::performOperation( IOOptionBits options,
3893 IOByteCount offset, IOByteCount length )
3894{
3895 IOByteCount remaining;
3896 unsigned int res;
3897 void (*func)(addr64_t pa, unsigned int count) = NULL;
3898#if defined(__arm64__)
3899 void (*func_ext)(addr64_t pa, unsigned int count, unsigned int remaining, unsigned int *result) = NULL;
3900#endif
3901
3902 assert(!(kIOMemoryRemote & _flags));
3903 if (kIOMemoryRemote & _flags) {
3904 return kIOReturnNotAttached;
3905 }
3906
3907 switch (options) {
3908 case kIOMemoryIncoherentIOFlush:
3909#if defined(__arm64__)
3910 func_ext = &dcache_incoherent_io_flush64;
3911#if __ARM_COHERENT_IO__
3912 func_ext(0, 0, 0, &res);
3913 return kIOReturnSuccess;
3914#else /* __ARM_COHERENT_IO__ */
3915 break;
3916#endif /* __ARM_COHERENT_IO__ */
3917#else /* defined(__arm64__) */
3918 func = &dcache_incoherent_io_flush64;
3919 break;
3920#endif /* defined(__arm64__) */
3921 case kIOMemoryIncoherentIOStore:
3922#if defined(__arm64__)
3923 func_ext = &dcache_incoherent_io_store64;
3924#if __ARM_COHERENT_IO__
3925 func_ext(0, 0, 0, &res);
3926 return kIOReturnSuccess;
3927#else /* __ARM_COHERENT_IO__ */
3928 break;
3929#endif /* __ARM_COHERENT_IO__ */
3930#else /* defined(__arm64__) */
3931 func = &dcache_incoherent_io_store64;
3932 break;
3933#endif /* defined(__arm64__) */
3934
3935 case kIOMemorySetEncrypted:
3936 func = &SetEncryptOp;
3937 break;
3938 case kIOMemoryClearEncrypted:
3939 func = &ClearEncryptOp;
3940 break;
3941 }
3942
3943#if defined(__arm64__)
3944 if ((func == NULL) && (func_ext == NULL)) {
3945 return kIOReturnUnsupported;
3946 }
3947#else /* defined(__arm64__) */
3948 if (!func) {
3949 return kIOReturnUnsupported;
3950 }
3951#endif /* defined(__arm64__) */
3952
3953 if (kIOMemoryThreadSafe & _flags) {
3954 LOCK;
3955 }
3956
3957 res = 0x0UL;
3958 remaining = length = min(length, getLength() - offset);
3959 while (remaining) {
3960 // (process another target segment?)
3961 addr64_t dstAddr64;
3962 IOByteCount dstLen;
3963
3964 dstAddr64 = getPhysicalSegment(offset, length: &dstLen, options: kIOMemoryMapperNone);
3965 if (!dstAddr64) {
3966 break;
3967 }
3968
3969 // Clip segment length to remaining
3970 if (dstLen > remaining) {
3971 dstLen = remaining;
3972 }
3973 if (dstLen > (UINT_MAX - PAGE_SIZE + 1)) {
3974 dstLen = (UINT_MAX - PAGE_SIZE + 1);
3975 }
3976 if (remaining > UINT_MAX) {
3977 remaining = UINT_MAX;
3978 }
3979
3980#if defined(__arm64__)
3981 if (func) {
3982 (*func)(dstAddr64, (unsigned int) dstLen);
3983 }
3984 if (func_ext) {
3985 (*func_ext)(dstAddr64, (unsigned int) dstLen, (unsigned int) remaining, &res);
3986 if (res != 0x0UL) {
3987 remaining = 0;
3988 break;
3989 }
3990 }
3991#else /* defined(__arm64__) */
3992 (*func)(dstAddr64, (unsigned int) dstLen);
3993#endif /* defined(__arm64__) */
3994
3995 offset += dstLen;
3996 remaining -= dstLen;
3997 }
3998
3999 if (kIOMemoryThreadSafe & _flags) {
4000 UNLOCK;
4001 }
4002
4003 return remaining ? kIOReturnUnderrun : kIOReturnSuccess;
4004}
4005
4006/*
4007 *
4008 */
4009
4010#if defined(__i386__) || defined(__x86_64__)
4011
4012extern vm_offset_t kc_highest_nonlinkedit_vmaddr;
4013
4014/* XXX: By extending io_kernel_static_end to the highest virtual address in the KC,
4015 * we're opening up this path to IOMemoryDescriptor consumers who can now create UPLs to
4016 * kernel non-text data -- should we just add another range instead?
4017 */
4018#define io_kernel_static_start vm_kernel_stext
4019#define io_kernel_static_end (kc_highest_nonlinkedit_vmaddr ? kc_highest_nonlinkedit_vmaddr : vm_kernel_etext)
4020
4021#elif defined(__arm64__)
4022
4023extern vm_offset_t static_memory_end;
4024
4025#if defined(__arm64__)
4026#define io_kernel_static_start vm_kext_base
4027#else /* defined(__arm64__) */
4028#define io_kernel_static_start vm_kernel_stext
4029#endif /* defined(__arm64__) */
4030
4031#define io_kernel_static_end static_memory_end
4032
4033#else
4034#error io_kernel_static_end is undefined for this architecture
4035#endif
4036
4037static kern_return_t
4038io_get_kernel_static_upl(
4039 vm_map_t /* map */,
4040 uintptr_t offset,
4041 upl_size_t *upl_size,
4042 unsigned int *page_offset,
4043 upl_t *upl,
4044 upl_page_info_array_t page_list,
4045 unsigned int *count,
4046 ppnum_t *highest_page)
4047{
4048 unsigned int pageCount, page;
4049 ppnum_t phys;
4050 ppnum_t highestPage = 0;
4051
4052 pageCount = atop_32(round_page(*upl_size + (page_mask & offset)));
4053 if (pageCount > *count) {
4054 pageCount = *count;
4055 }
4056 *upl_size = (upl_size_t) ptoa_64(pageCount);
4057
4058 *upl = NULL;
4059 *page_offset = ((unsigned int) page_mask & offset);
4060
4061 for (page = 0; page < pageCount; page++) {
4062 phys = pmap_find_phys(pmap: kernel_pmap, va: ((addr64_t)offset) + ptoa_64(page));
4063 if (!phys) {
4064 break;
4065 }
4066 page_list[page].phys_addr = phys;
4067 page_list[page].free_when_done = 0;
4068 page_list[page].absent = 0;
4069 page_list[page].dirty = 0;
4070 page_list[page].precious = 0;
4071 page_list[page].device = 0;
4072 if (phys > highestPage) {
4073 highestPage = phys;
4074 }
4075 }
4076
4077 *highest_page = highestPage;
4078
4079 return (page >= pageCount) ? kIOReturnSuccess : kIOReturnVMError;
4080}
4081
4082IOReturn
4083IOGeneralMemoryDescriptor::wireVirtual(IODirection forDirection)
4084{
4085 IOOptionBits type = _flags & kIOMemoryTypeMask;
4086 IOReturn error = kIOReturnSuccess;
4087 ioGMDData *dataP;
4088 upl_page_info_array_t pageInfo;
4089 ppnum_t mapBase;
4090 vm_tag_t tag = VM_KERN_MEMORY_NONE;
4091 mach_vm_size_t numBytesWired = 0;
4092
4093 assert(kIOMemoryTypeVirtual == type || kIOMemoryTypeVirtual64 == type || kIOMemoryTypeUIO == type);
4094
4095 if ((kIODirectionOutIn & forDirection) == kIODirectionNone) {
4096 forDirection = (IODirection) (forDirection | getDirection());
4097 }
4098
4099 dataP = getDataP(_memoryEntries);
4100 upl_control_flags_t uplFlags; // This Mem Desc's default flags for upl creation
4101 switch (kIODirectionOutIn & forDirection) {
4102 case kIODirectionOut:
4103 // Pages do not need to be marked as dirty on commit
4104 uplFlags = UPL_COPYOUT_FROM;
4105 dataP->fDMAAccess = kIODMAMapReadAccess;
4106 break;
4107
4108 case kIODirectionIn:
4109 dataP->fDMAAccess = kIODMAMapWriteAccess;
4110 uplFlags = 0; // i.e. ~UPL_COPYOUT_FROM
4111 break;
4112
4113 default:
4114 dataP->fDMAAccess = kIODMAMapReadAccess | kIODMAMapWriteAccess;
4115 uplFlags = 0; // i.e. ~UPL_COPYOUT_FROM
4116 break;
4117 }
4118
4119 if (_wireCount) {
4120 if ((kIOMemoryPreparedReadOnly & _flags) && !(UPL_COPYOUT_FROM & uplFlags)) {
4121 OSReportWithBacktrace(str: "IOMemoryDescriptor 0x%zx prepared read only",
4122 (size_t)VM_KERNEL_ADDRPERM(this));
4123 error = kIOReturnNotWritable;
4124 }
4125 } else {
4126 IOTimeStampIntervalConstantFiltered traceInterval(IODBG_MDESC(IOMDESC_WIRE), VM_KERNEL_ADDRHIDE(this), forDirection);
4127 IOMapper *mapper;
4128
4129 mapper = dataP->fMapper;
4130 dataP->fMappedBaseValid = dataP->fMappedBase = 0;
4131
4132 uplFlags |= UPL_SET_IO_WIRE | UPL_SET_LITE;
4133 tag = _kernelTag;
4134 if (VM_KERN_MEMORY_NONE == tag) {
4135 tag = IOMemoryTag(map: kernel_map);
4136 }
4137
4138 if (kIODirectionPrepareToPhys32 & forDirection) {
4139 if (!mapper) {
4140 uplFlags |= UPL_NEED_32BIT_ADDR;
4141 }
4142 if (dataP->fDMAMapNumAddressBits > 32) {
4143 dataP->fDMAMapNumAddressBits = 32;
4144 }
4145 }
4146 if (kIODirectionPrepareNoFault & forDirection) {
4147 uplFlags |= UPL_REQUEST_NO_FAULT;
4148 }
4149 if (kIODirectionPrepareNoZeroFill & forDirection) {
4150 uplFlags |= UPL_NOZEROFILLIO;
4151 }
4152 if (kIODirectionPrepareNonCoherent & forDirection) {
4153 uplFlags |= UPL_REQUEST_FORCE_COHERENCY;
4154 }
4155
4156 mapBase = 0;
4157
4158 // Note that appendBytes(NULL) zeros the data up to the desired length
4159 size_t uplPageSize = dataP->fPageCnt * sizeof(upl_page_info_t);
4160 if (uplPageSize > ((unsigned int)uplPageSize)) {
4161 error = kIOReturnNoMemory;
4162 traceInterval.setEndArg2(error);
4163 return error;
4164 }
4165 if (!_memoryEntries->appendBytes(NULL, length: uplPageSize)) {
4166 error = kIOReturnNoMemory;
4167 traceInterval.setEndArg2(error);
4168 return error;
4169 }
4170 dataP = NULL;
4171
4172 // Find the appropriate vm_map for the given task
4173 vm_map_t curMap;
4174 if ((NULL != _memRef) || ((_task == kernel_task && (kIOMemoryBufferPageable & _flags)))) {
4175 curMap = NULL;
4176 } else {
4177 curMap = get_task_map(_task);
4178 }
4179
4180 // Iterate over the vector of virtual ranges
4181 Ranges vec = _ranges;
4182 unsigned int pageIndex = 0;
4183 IOByteCount mdOffset = 0;
4184 ppnum_t highestPage = 0;
4185 bool byteAlignUPL;
4186
4187 IOMemoryEntry * memRefEntry = NULL;
4188 if (_memRef) {
4189 memRefEntry = &_memRef->entries[0];
4190 byteAlignUPL = (0 != (MAP_MEM_USE_DATA_ADDR & _memRef->prot));
4191 } else {
4192 byteAlignUPL = true;
4193 }
4194
4195 for (UInt range = 0; mdOffset < _length; range++) {
4196 ioPLBlock iopl;
4197 mach_vm_address_t startPage, startPageOffset;
4198 mach_vm_size_t numBytes;
4199 ppnum_t highPage = 0;
4200
4201 if (_memRef) {
4202 if (range >= _memRef->count) {
4203 panic("memRefEntry");
4204 }
4205 memRefEntry = &_memRef->entries[range];
4206 numBytes = memRefEntry->size;
4207 startPage = -1ULL;
4208 if (byteAlignUPL) {
4209 startPageOffset = 0;
4210 } else {
4211 startPageOffset = (memRefEntry->start & PAGE_MASK);
4212 }
4213 } else {
4214 // Get the startPage address and length of vec[range]
4215 getAddrLenForInd(addr&: startPage, len&: numBytes, type, r: vec, ind: range, task: _task);
4216 if (byteAlignUPL) {
4217 startPageOffset = 0;
4218 } else {
4219 startPageOffset = startPage & PAGE_MASK;
4220 startPage = trunc_page_64(startPage);
4221 }
4222 }
4223 iopl.fPageOffset = (typeof(iopl.fPageOffset))startPageOffset;
4224 numBytes += startPageOffset;
4225
4226 if (mapper) {
4227 iopl.fMappedPage = mapBase + pageIndex;
4228 } else {
4229 iopl.fMappedPage = 0;
4230 }
4231
4232 // Iterate over the current range, creating UPLs
4233 while (numBytes) {
4234 vm_address_t kernelStart = (vm_address_t) startPage;
4235 vm_map_t theMap;
4236 if (curMap) {
4237 theMap = curMap;
4238 } else if (_memRef) {
4239 theMap = NULL;
4240 } else {
4241 assert(_task == kernel_task);
4242 theMap = IOPageableMapForAddress(address: kernelStart);
4243 }
4244
4245 // ioplFlags is an in/out parameter
4246 upl_control_flags_t ioplFlags = uplFlags;
4247 dataP = getDataP(_memoryEntries);
4248 pageInfo = getPageList(dataP);
4249 upl_page_list_ptr_t baseInfo = &pageInfo[pageIndex];
4250
4251 mach_vm_size_t ioplPhysSize;
4252 upl_size_t ioplSize;
4253 unsigned int numPageInfo;
4254
4255 if (_memRef) {
4256 error = mach_memory_entry_map_size(entry_port: memRefEntry->entry, NULL /*physical*/, offset: 0, size: memRefEntry->size, map_size: &ioplPhysSize);
4257 DEBUG4K_IOKIT("_memRef %p memRefEntry %p entry %p startPage 0x%llx numBytes 0x%llx ioplPhysSize 0x%llx\n", _memRef, memRefEntry, memRefEntry->entry, startPage, numBytes, ioplPhysSize);
4258 } else {
4259 error = vm_map_range_physical_size(map: theMap, start: startPage, size: numBytes, phys_size: &ioplPhysSize);
4260 DEBUG4K_IOKIT("_memRef %p theMap %p startPage 0x%llx numBytes 0x%llx ioplPhysSize 0x%llx\n", _memRef, theMap, startPage, numBytes, ioplPhysSize);
4261 }
4262 if (error != KERN_SUCCESS) {
4263 if (_memRef) {
4264 DEBUG4K_ERROR("_memRef %p memRefEntry %p entry %p theMap %p startPage 0x%llx numBytes 0x%llx error 0x%x\n", _memRef, memRefEntry, memRefEntry->entry, theMap, startPage, numBytes, error);
4265 } else {
4266 DEBUG4K_ERROR("_memRef %p theMap %p startPage 0x%llx numBytes 0x%llx error 0x%x\n", _memRef, theMap, startPage, numBytes, error);
4267 }
4268 printf("entry size error %d\n", error);
4269 goto abortExit;
4270 }
4271 ioplPhysSize = (ioplPhysSize <= MAX_UPL_SIZE_BYTES) ? ioplPhysSize : MAX_UPL_SIZE_BYTES;
4272 numPageInfo = atop_32(ioplPhysSize);
4273 if (byteAlignUPL) {
4274 if (numBytes > ioplPhysSize) {
4275 ioplSize = ((typeof(ioplSize))ioplPhysSize);
4276 } else {
4277 ioplSize = ((typeof(ioplSize))numBytes);
4278 }
4279 } else {
4280 ioplSize = ((typeof(ioplSize))ioplPhysSize);
4281 }
4282
4283 if (_memRef) {
4284 memory_object_offset_t entryOffset;
4285
4286 entryOffset = mdOffset;
4287 if (byteAlignUPL) {
4288 entryOffset = (entryOffset - memRefEntry->offset);
4289 } else {
4290 entryOffset = (entryOffset - iopl.fPageOffset - memRefEntry->offset);
4291 }
4292 if (ioplSize > (memRefEntry->size - entryOffset)) {
4293 ioplSize = ((typeof(ioplSize))(memRefEntry->size - entryOffset));
4294 }
4295 error = memory_object_iopl_request(port: memRefEntry->entry,
4296 offset: entryOffset,
4297 upl_size: &ioplSize,
4298 upl_ptr: &iopl.fIOPL,
4299 user_page_list: baseInfo,
4300 page_list_count: &numPageInfo,
4301 flags: &ioplFlags,
4302 tag);
4303 } else if ((theMap == kernel_map)
4304 && (kernelStart >= io_kernel_static_start)
4305 && (kernelStart < io_kernel_static_end)) {
4306 error = io_get_kernel_static_upl(theMap,
4307 offset: kernelStart,
4308 upl_size: &ioplSize,
4309 page_offset: &iopl.fPageOffset,
4310 upl: &iopl.fIOPL,
4311 page_list: baseInfo,
4312 count: &numPageInfo,
4313 highest_page: &highPage);
4314 } else {
4315 assert(theMap);
4316 error = vm_map_create_upl(map: theMap,
4317 offset: startPage,
4318 upl_size: (upl_size_t*)&ioplSize,
4319 upl: &iopl.fIOPL,
4320 page_list: baseInfo,
4321 count: &numPageInfo,
4322 flags: &ioplFlags,
4323 tag);
4324 }
4325
4326 if (error != KERN_SUCCESS) {
4327 traceInterval.setEndArg2(error);
4328 DEBUG4K_ERROR("UPL create error 0x%x theMap %p (kernel:%d) _memRef %p startPage 0x%llx ioplSize 0x%x\n", error, theMap, (theMap == kernel_map), _memRef, startPage, ioplSize);
4329 goto abortExit;
4330 }
4331
4332 assert(ioplSize);
4333
4334 if (iopl.fIOPL) {
4335 highPage = upl_get_highest_page(upl: iopl.fIOPL);
4336 }
4337 if (highPage > highestPage) {
4338 highestPage = highPage;
4339 }
4340
4341 if (baseInfo->device) {
4342 numPageInfo = 1;
4343 iopl.fFlags = kIOPLOnDevice;
4344 } else {
4345 iopl.fFlags = 0;
4346 }
4347
4348 if (byteAlignUPL) {
4349 if (iopl.fIOPL) {
4350 DEBUG4K_UPL("startPage 0x%llx numBytes 0x%llx iopl.fPageOffset 0x%x upl_get_data_offset(%p) 0x%llx\n", startPage, numBytes, iopl.fPageOffset, iopl.fIOPL, upl_get_data_offset(iopl.fIOPL));
4351 iopl.fPageOffset = (typeof(iopl.fPageOffset))upl_get_data_offset(upl: iopl.fIOPL);
4352 }
4353 if (startPage != (mach_vm_address_t)-1) {
4354 // assert(iopl.fPageOffset == (startPage & PAGE_MASK));
4355 startPage -= iopl.fPageOffset;
4356 }
4357 ioplSize = ((typeof(ioplSize))ptoa_64(numPageInfo));
4358 numBytes += iopl.fPageOffset;
4359 }
4360
4361 iopl.fIOMDOffset = mdOffset;
4362 iopl.fPageInfo = pageIndex;
4363
4364 if (!_memoryEntries->appendBytes(bytes: &iopl, length: sizeof(iopl))) {
4365 // Clean up partial created and unsaved iopl
4366 if (iopl.fIOPL) {
4367 upl_abort(upl_object: iopl.fIOPL, abort_cond: 0);
4368 upl_deallocate(upl: iopl.fIOPL);
4369 }
4370 error = kIOReturnNoMemory;
4371 traceInterval.setEndArg2(error);
4372 goto abortExit;
4373 }
4374 dataP = NULL;
4375
4376 // Check for a multiple iopl's in one virtual range
4377 pageIndex += numPageInfo;
4378 mdOffset -= iopl.fPageOffset;
4379 numBytesWired += ioplSize;
4380 if (ioplSize < numBytes) {
4381 numBytes -= ioplSize;
4382 if (startPage != (mach_vm_address_t)-1) {
4383 startPage += ioplSize;
4384 }
4385 mdOffset += ioplSize;
4386 iopl.fPageOffset = 0;
4387 if (mapper) {
4388 iopl.fMappedPage = mapBase + pageIndex;
4389 }
4390 } else {
4391 mdOffset += numBytes;
4392 break;
4393 }
4394 }
4395 }
4396
4397 _highestPage = highestPage;
4398 DEBUG4K_IOKIT("-> _highestPage 0x%x\n", _highestPage);
4399
4400 if (UPL_COPYOUT_FROM & uplFlags) {
4401 _flags |= kIOMemoryPreparedReadOnly;
4402 }
4403 traceInterval.setEndCodes(arg1: numBytesWired, arg2: error);
4404 }
4405
4406#if IOTRACKING
4407 if (!(_flags & kIOMemoryAutoPrepare) && (kIOReturnSuccess == error)) {
4408 dataP = getDataP(_memoryEntries);
4409 if (!dataP->fWireTracking.link.next) {
4410 IOTrackingAdd(gIOWireTracking, &dataP->fWireTracking, ptoa(_pages), false, tag);
4411 }
4412 }
4413#endif /* IOTRACKING */
4414
4415 return error;
4416
4417abortExit:
4418 {
4419 dataP = getDataP(_memoryEntries);
4420 UInt done = getNumIOPL(_memoryEntries, dataP);
4421 ioPLBlock *ioplList = getIOPLList(dataP);
4422
4423 for (UInt ioplIdx = 0; ioplIdx < done; ioplIdx++) {
4424 if (ioplList[ioplIdx].fIOPL) {
4425 upl_abort(upl_object: ioplList[ioplIdx].fIOPL, abort_cond: 0);
4426 upl_deallocate(upl: ioplList[ioplIdx].fIOPL);
4427 }
4428 }
4429 _memoryEntries->setLength(computeDataSize(0, 0));
4430 }
4431
4432 if (error == KERN_FAILURE) {
4433 error = kIOReturnCannotWire;
4434 } else if (error == KERN_MEMORY_ERROR) {
4435 error = kIOReturnNoResources;
4436 }
4437
4438 return error;
4439}
4440
4441bool
4442IOGeneralMemoryDescriptor::initMemoryEntries(size_t size, IOMapper * mapper)
4443{
4444 ioGMDData * dataP;
4445
4446 if (size > UINT_MAX) {
4447 return false;
4448 }
4449 if (!_memoryEntries) {
4450 _memoryEntries = _IOMemoryDescriptorMixedData::withCapacity(capacity: size);
4451 if (!_memoryEntries) {
4452 return false;
4453 }
4454 } else if (!_memoryEntries->initWithCapacity(capacity: size)) {
4455 return false;
4456 }
4457
4458 _memoryEntries->appendBytes(NULL, computeDataSize(0, 0));
4459 dataP = getDataP(_memoryEntries);
4460
4461 if (mapper == kIOMapperWaitSystem) {
4462 IOMapper::checkForSystemMapper();
4463 mapper = IOMapper::gSystem;
4464 }
4465 dataP->fMapper = mapper;
4466 dataP->fPageCnt = 0;
4467 dataP->fMappedBase = 0;
4468 dataP->fDMAMapNumAddressBits = 64;
4469 dataP->fDMAMapAlignment = 0;
4470 dataP->fPreparationID = kIOPreparationIDUnprepared;
4471 dataP->fCompletionError = false;
4472 dataP->fMappedBaseValid = false;
4473
4474 return true;
4475}
4476
4477IOReturn
4478IOMemoryDescriptor::dmaMap(
4479 IOMapper * mapper,
4480 IOMemoryDescriptor * memory,
4481 IODMACommand * command,
4482 const IODMAMapSpecification * mapSpec,
4483 uint64_t offset,
4484 uint64_t length,
4485 uint64_t * mapAddress,
4486 uint64_t * mapLength)
4487{
4488 IOReturn err;
4489 uint32_t mapOptions;
4490
4491 mapOptions = 0;
4492 mapOptions |= kIODMAMapReadAccess;
4493 if (!(kIOMemoryPreparedReadOnly & _flags)) {
4494 mapOptions |= kIODMAMapWriteAccess;
4495 }
4496
4497 err = mapper->iovmMapMemory(memory, descriptorOffset: offset, length, mapOptions,
4498 mapSpecification: mapSpec, dmaCommand: command, NULL, mapAddress, mapLength);
4499
4500 if (kIOReturnSuccess == err) {
4501 dmaMapRecord(mapper, command, mapLength: *mapLength);
4502 }
4503
4504 return err;
4505}
4506
4507void
4508IOMemoryDescriptor::dmaMapRecord(
4509 IOMapper * mapper,
4510 IODMACommand * command,
4511 uint64_t mapLength)
4512{
4513 IOTimeStampIntervalConstantFiltered traceInterval(IODBG_MDESC(IOMDESC_DMA_MAP), VM_KERNEL_ADDRHIDE(this));
4514 kern_allocation_name_t alloc;
4515 int16_t prior;
4516
4517 if ((alloc = mapper->fAllocName) /* && mapper != IOMapper::gSystem */) {
4518 kern_allocation_update_size(allocation: mapper->fAllocName, delta: mapLength, NULL);
4519 }
4520
4521 if (!command) {
4522 return;
4523 }
4524 prior = OSAddAtomic16(amount: 1, address: &_dmaReferences);
4525 if (!prior) {
4526 if (alloc && (VM_KERN_MEMORY_NONE != _kernelTag)) {
4527 _mapName = alloc;
4528 mapLength = _length;
4529 kern_allocation_update_subtotal(allocation: alloc, subtag: _kernelTag, delta: mapLength);
4530 } else {
4531 _mapName = NULL;
4532 }
4533 }
4534}
4535
4536IOReturn
4537IOMemoryDescriptor::dmaUnmap(
4538 IOMapper * mapper,
4539 IODMACommand * command,
4540 uint64_t offset,
4541 uint64_t mapAddress,
4542 uint64_t mapLength)
4543{
4544 IOTimeStampIntervalConstantFiltered traceInterval(IODBG_MDESC(IOMDESC_DMA_UNMAP), VM_KERNEL_ADDRHIDE(this));
4545 IOReturn ret;
4546 kern_allocation_name_t alloc;
4547 kern_allocation_name_t mapName;
4548 int16_t prior;
4549
4550 mapName = NULL;
4551 prior = 0;
4552 if (command) {
4553 mapName = _mapName;
4554 if (_dmaReferences) {
4555 prior = OSAddAtomic16(amount: -1, address: &_dmaReferences);
4556 } else {
4557 panic("_dmaReferences underflow");
4558 }
4559 }
4560
4561 if (!mapLength) {
4562 traceInterval.setEndArg1(kIOReturnSuccess);
4563 return kIOReturnSuccess;
4564 }
4565
4566 ret = mapper->iovmUnmapMemory(memory: this, dmaCommand: command, mapAddress, mapLength);
4567
4568 if ((alloc = mapper->fAllocName)) {
4569 kern_allocation_update_size(allocation: alloc, delta: -mapLength, NULL);
4570 if ((1 == prior) && mapName && (VM_KERN_MEMORY_NONE != _kernelTag)) {
4571 mapLength = _length;
4572 kern_allocation_update_subtotal(allocation: mapName, subtag: _kernelTag, delta: -mapLength);
4573 }
4574 }
4575
4576 traceInterval.setEndArg1(ret);
4577 return ret;
4578}
4579
4580IOReturn
4581IOGeneralMemoryDescriptor::dmaMap(
4582 IOMapper * mapper,
4583 IOMemoryDescriptor * memory,
4584 IODMACommand * command,
4585 const IODMAMapSpecification * mapSpec,
4586 uint64_t offset,
4587 uint64_t length,
4588 uint64_t * mapAddress,
4589 uint64_t * mapLength)
4590{
4591 IOReturn err = kIOReturnSuccess;
4592 ioGMDData * dataP;
4593 IOOptionBits type = _flags & kIOMemoryTypeMask;
4594
4595 *mapAddress = 0;
4596 if (kIOMemoryHostOnly & _flags) {
4597 return kIOReturnSuccess;
4598 }
4599 if (kIOMemoryRemote & _flags) {
4600 return kIOReturnNotAttached;
4601 }
4602
4603 if ((type == kIOMemoryTypePhysical) || (type == kIOMemoryTypePhysical64)
4604 || offset || (length != _length)) {
4605 err = super::dmaMap(mapper, memory, command, mapSpec, offset, length, mapAddress, mapLength);
4606 } else if (_memoryEntries && _pages && (dataP = getDataP(_memoryEntries))) {
4607 const ioPLBlock * ioplList = getIOPLList(dataP);
4608 upl_page_info_t * pageList;
4609 uint32_t mapOptions = 0;
4610
4611 IODMAMapSpecification mapSpec;
4612 bzero(s: &mapSpec, n: sizeof(mapSpec));
4613 mapSpec.numAddressBits = dataP->fDMAMapNumAddressBits;
4614 mapSpec.alignment = dataP->fDMAMapAlignment;
4615
4616 // For external UPLs the fPageInfo field points directly to
4617 // the upl's upl_page_info_t array.
4618 if (ioplList->fFlags & kIOPLExternUPL) {
4619 pageList = (upl_page_info_t *) ioplList->fPageInfo;
4620 mapOptions |= kIODMAMapPagingPath;
4621 } else {
4622 pageList = getPageList(dataP);
4623 }
4624
4625 if ((_length == ptoa_64(_pages)) && !(page_mask & ioplList->fPageOffset)) {
4626 mapOptions |= kIODMAMapPageListFullyOccupied;
4627 }
4628
4629 assert(dataP->fDMAAccess);
4630 mapOptions |= dataP->fDMAAccess;
4631
4632 // Check for direct device non-paged memory
4633 if (ioplList->fFlags & kIOPLOnDevice) {
4634 mapOptions |= kIODMAMapPhysicallyContiguous;
4635 }
4636
4637 IODMAMapPageList dmaPageList =
4638 {
4639 .pageOffset = (uint32_t)(ioplList->fPageOffset & page_mask),
4640 .pageListCount = _pages,
4641 .pageList = &pageList[0]
4642 };
4643 err = mapper->iovmMapMemory(memory, descriptorOffset: offset, length, mapOptions, mapSpecification: &mapSpec,
4644 dmaCommand: command, pageList: &dmaPageList, mapAddress, mapLength);
4645
4646 if (kIOReturnSuccess == err) {
4647 dmaMapRecord(mapper, command, mapLength: *mapLength);
4648 }
4649 }
4650
4651 return err;
4652}
4653
4654/*
4655 * prepare
4656 *
4657 * Prepare the memory for an I/O transfer. This involves paging in
4658 * the memory, if necessary, and wiring it down for the duration of
4659 * the transfer. The complete() method completes the processing of
4660 * the memory after the I/O transfer finishes. This method needn't
4661 * called for non-pageable memory.
4662 */
4663
4664IOReturn
4665IOGeneralMemoryDescriptor::prepare(IODirection forDirection)
4666{
4667 IOReturn error = kIOReturnSuccess;
4668 IOOptionBits type = _flags & kIOMemoryTypeMask;
4669 IOTimeStampIntervalConstantFiltered traceInterval(IODBG_MDESC(IOMDESC_PREPARE), VM_KERNEL_ADDRHIDE(this), forDirection);
4670
4671 if ((kIOMemoryTypePhysical == type) || (kIOMemoryTypePhysical64 == type)) {
4672 traceInterval.setEndArg1(kIOReturnSuccess);
4673 return kIOReturnSuccess;
4674 }
4675
4676 assert(!(kIOMemoryRemote & _flags));
4677 if (kIOMemoryRemote & _flags) {
4678 traceInterval.setEndArg1(kIOReturnNotAttached);
4679 return kIOReturnNotAttached;
4680 }
4681
4682 if (_prepareLock) {
4683 IOLockLock(_prepareLock);
4684 }
4685
4686 if (kIOMemoryTypeVirtual == type || kIOMemoryTypeVirtual64 == type || kIOMemoryTypeUIO == type) {
4687 if ((forDirection & kIODirectionPrepareAvoidThrottling) && NEED_TO_HARD_THROTTLE_THIS_TASK()) {
4688 error = kIOReturnNotReady;
4689 goto finish;
4690 }
4691 error = wireVirtual(forDirection);
4692 }
4693
4694 if (kIOReturnSuccess == error) {
4695 if (1 == ++_wireCount) {
4696 if (kIOMemoryClearEncrypt & _flags) {
4697 performOperation(options: kIOMemoryClearEncrypted, offset: 0, length: _length);
4698 }
4699
4700 ktraceEmitPhysicalSegments();
4701 }
4702 }
4703
4704finish:
4705
4706 if (_prepareLock) {
4707 IOLockUnlock(_prepareLock);
4708 }
4709 traceInterval.setEndArg1(error);
4710
4711 return error;
4712}
4713
4714/*
4715 * complete
4716 *
4717 * Complete processing of the memory after an I/O transfer finishes.
4718 * This method should not be called unless a prepare was previously
4719 * issued; the prepare() and complete() must occur in pairs, before
4720 * before and after an I/O transfer involving pageable memory.
4721 */
4722
4723IOReturn
4724IOGeneralMemoryDescriptor::complete(IODirection forDirection)
4725{
4726 IOOptionBits type = _flags & kIOMemoryTypeMask;
4727 ioGMDData * dataP;
4728 IOTimeStampIntervalConstantFiltered traceInterval(IODBG_MDESC(IOMDESC_COMPLETE), VM_KERNEL_ADDRHIDE(this), forDirection);
4729
4730 if ((kIOMemoryTypePhysical == type) || (kIOMemoryTypePhysical64 == type)) {
4731 traceInterval.setEndArg1(kIOReturnSuccess);
4732 return kIOReturnSuccess;
4733 }
4734
4735 assert(!(kIOMemoryRemote & _flags));
4736 if (kIOMemoryRemote & _flags) {
4737 traceInterval.setEndArg1(kIOReturnNotAttached);
4738 return kIOReturnNotAttached;
4739 }
4740
4741 if (_prepareLock) {
4742 IOLockLock(_prepareLock);
4743 }
4744 do{
4745 assert(_wireCount);
4746 if (!_wireCount) {
4747 break;
4748 }
4749 dataP = getDataP(_memoryEntries);
4750 if (!dataP) {
4751 break;
4752 }
4753
4754 if (kIODirectionCompleteWithError & forDirection) {
4755 dataP->fCompletionError = true;
4756 }
4757
4758 if ((kIOMemoryClearEncrypt & _flags) && (1 == _wireCount)) {
4759 performOperation(options: kIOMemorySetEncrypted, offset: 0, length: _length);
4760 }
4761
4762 _wireCount--;
4763 if (!_wireCount || (kIODirectionCompleteWithDataValid & forDirection)) {
4764 ioPLBlock *ioplList = getIOPLList(dataP);
4765 UInt ind, count = getNumIOPL(_memoryEntries, dataP);
4766
4767 if (_wireCount) {
4768 // kIODirectionCompleteWithDataValid & forDirection
4769 if (kIOMemoryTypeVirtual == type || kIOMemoryTypeVirtual64 == type || kIOMemoryTypeUIO == type) {
4770 vm_tag_t tag;
4771 tag = (typeof(tag))getVMTag(map: kernel_map);
4772 for (ind = 0; ind < count; ind++) {
4773 if (ioplList[ind].fIOPL) {
4774 iopl_valid_data(upl_ptr: ioplList[ind].fIOPL, tag);
4775 }
4776 }
4777 }
4778 } else {
4779 if (_dmaReferences) {
4780 panic("complete() while dma active");
4781 }
4782
4783 if (dataP->fMappedBaseValid) {
4784 dmaUnmap(mapper: dataP->fMapper, NULL, offset: 0, mapAddress: dataP->fMappedBase, mapLength: dataP->fMappedLength);
4785 dataP->fMappedBaseValid = dataP->fMappedBase = 0;
4786 }
4787#if IOTRACKING
4788 if (dataP->fWireTracking.link.next) {
4789 IOTrackingRemove(gIOWireTracking, &dataP->fWireTracking, ptoa(_pages));
4790 }
4791#endif /* IOTRACKING */
4792 // Only complete iopls that we created which are for TypeVirtual
4793 if (kIOMemoryTypeVirtual == type || kIOMemoryTypeVirtual64 == type || kIOMemoryTypeUIO == type) {
4794 for (ind = 0; ind < count; ind++) {
4795 if (ioplList[ind].fIOPL) {
4796 if (dataP->fCompletionError) {
4797 upl_abort(upl_object: ioplList[ind].fIOPL, abort_cond: 0 /*!UPL_ABORT_DUMP_PAGES*/);
4798 } else {
4799 upl_commit(upl_object: ioplList[ind].fIOPL, NULL, page_listCnt: 0);
4800 }
4801 upl_deallocate(upl: ioplList[ind].fIOPL);
4802 }
4803 }
4804 } else if (kIOMemoryTypeUPL == type) {
4805 upl_set_referenced(upl: ioplList[0].fIOPL, value: false);
4806 }
4807
4808 _memoryEntries->setLength(computeDataSize(0, 0));
4809
4810 dataP->fPreparationID = kIOPreparationIDUnprepared;
4811 _flags &= ~kIOMemoryPreparedReadOnly;
4812
4813 if (kdebug_debugid_explicitly_enabled(IODBG_IOMDPA(IOMDPA_UNMAPPED))) {
4814 IOTimeStampConstantFiltered(IODBG_IOMDPA(IOMDPA_UNMAPPED), a: getDescriptorID(), VM_KERNEL_ADDRHIDE(this));
4815 }
4816 }
4817 }
4818 }while (false);
4819
4820 if (_prepareLock) {
4821 IOLockUnlock(_prepareLock);
4822 }
4823
4824 traceInterval.setEndArg1(kIOReturnSuccess);
4825 return kIOReturnSuccess;
4826}
4827
4828IOOptionBits
4829IOGeneralMemoryDescriptor::memoryReferenceCreateOptions(IOOptionBits options, IOMemoryMap * mapping)
4830{
4831 IOOptionBits createOptions = 0;
4832
4833 if (!(kIOMap64Bit & options)) {
4834 panic("IOMemoryDescriptor::makeMapping !64bit");
4835 }
4836 if (!(kIOMapReadOnly & options)) {
4837 createOptions |= kIOMemoryReferenceWrite;
4838#if DEVELOPMENT || DEBUG
4839 if ((kIODirectionOut == (kIODirectionOutIn & _flags))
4840 && (!reserved || (reserved->creator != mapping->fAddressTask))) {
4841 OSReportWithBacktrace("warning: creating writable mapping from IOMemoryDescriptor(kIODirectionOut) - use kIOMapReadOnly or change direction");
4842 }
4843#endif
4844 }
4845 return createOptions;
4846}
4847
4848/*
4849 * Attempt to create any kIOMemoryMapCopyOnWrite named entry needed ahead of the global
4850 * lock taken in IOMemoryDescriptor::makeMapping() since it may allocate real pages on
4851 * creation.
4852 */
4853
4854IOMemoryMap *
4855IOGeneralMemoryDescriptor::makeMapping(
4856 IOMemoryDescriptor * owner,
4857 task_t __intoTask,
4858 IOVirtualAddress __address,
4859 IOOptionBits options,
4860 IOByteCount __offset,
4861 IOByteCount __length )
4862{
4863 IOReturn err = kIOReturnSuccess;
4864
4865 if ((kIOMemoryMapCopyOnWrite & _flags) && _task && !_memRef) {
4866 if (!_memRef) {
4867 struct IOMemoryReference * newRef;
4868 err = memoryReferenceCreate(options: memoryReferenceCreateOptions(options, mapping: (IOMemoryMap *) __address), reference: &newRef);
4869 if (kIOReturnSuccess == err) {
4870 if (!OSCompareAndSwapPtr(NULL, newRef, &_memRef)) {
4871 memoryReferenceFree(ref: newRef);
4872 }
4873 }
4874 }
4875 }
4876 if (kIOReturnSuccess != err) {
4877 return NULL;
4878 }
4879 return IOMemoryDescriptor::makeMapping(
4880 owner, intoTask: __intoTask, atAddress: __address, options, offset: __offset, length: __length);
4881}
4882
4883IOReturn
4884IOGeneralMemoryDescriptor::doMap(
4885 vm_map_t __addressMap,
4886 IOVirtualAddress * __address,
4887 IOOptionBits options,
4888 IOByteCount __offset,
4889 IOByteCount __length )
4890{
4891 IOTimeStampIntervalConstantFiltered traceInterval(IODBG_MDESC(IOMDESC_MAP), VM_KERNEL_ADDRHIDE(this), VM_KERNEL_ADDRHIDE(*__address), __length);
4892 traceInterval.setEndArg1(kIOReturnSuccess);
4893#ifndef __LP64__
4894 if (!(kIOMap64Bit & options)) {
4895 panic("IOGeneralMemoryDescriptor::doMap !64bit");
4896 }
4897#endif /* !__LP64__ */
4898
4899 kern_return_t err;
4900
4901 IOMemoryMap * mapping = (IOMemoryMap *) *__address;
4902 mach_vm_size_t offset = mapping->fOffset + __offset;
4903 mach_vm_size_t length = mapping->fLength;
4904
4905 IOOptionBits type = _flags & kIOMemoryTypeMask;
4906 Ranges vec = _ranges;
4907
4908 mach_vm_address_t range0Addr = 0;
4909 mach_vm_size_t range0Len = 0;
4910
4911 if ((offset >= _length) || ((offset + length) > _length)) {
4912 traceInterval.setEndArg1(kIOReturnBadArgument);
4913 DEBUG4K_ERROR("map %p offset 0x%llx length 0x%llx _length 0x%llx kIOReturnBadArgument\n", __addressMap, offset, length, (uint64_t)_length);
4914 // assert(offset == 0 && _length == 0 && length == 0);
4915 return kIOReturnBadArgument;
4916 }
4917
4918 assert(!(kIOMemoryRemote & _flags));
4919 if (kIOMemoryRemote & _flags) {
4920 return 0;
4921 }
4922
4923 if (vec.v) {
4924 getAddrLenForInd(addr&: range0Addr, len&: range0Len, type, r: vec, ind: 0, task: _task);
4925 }
4926
4927 // mapping source == dest? (could be much better)
4928 if (_task
4929 && (mapping->fAddressTask == _task)
4930 && (mapping->fAddressMap == get_task_map(_task))
4931 && (options & kIOMapAnywhere)
4932 && (!(kIOMapUnique & options))
4933 && (!(kIOMapGuardedMask & options))
4934 && (1 == _rangesCount)
4935 && (0 == offset)
4936 && range0Addr
4937 && (length <= range0Len)) {
4938 mapping->fAddress = range0Addr;
4939 mapping->fOptions |= kIOMapStatic;
4940
4941 return kIOReturnSuccess;
4942 }
4943
4944 if (!_memRef) {
4945 err = memoryReferenceCreate(options: memoryReferenceCreateOptions(options, mapping), reference: &_memRef);
4946 if (kIOReturnSuccess != err) {
4947 traceInterval.setEndArg1(err);
4948 DEBUG4K_ERROR("map %p err 0x%x\n", __addressMap, err);
4949 return err;
4950 }
4951 }
4952
4953 memory_object_t pager;
4954 pager = (memory_object_t) (reserved ? reserved->dp.devicePager : NULL);
4955
4956 // <upl_transpose //
4957 if ((kIOMapReference | kIOMapUnique) == ((kIOMapReference | kIOMapUnique) & options)) {
4958 do{
4959 upl_t redirUPL2;
4960 upl_size_t size;
4961 upl_control_flags_t flags;
4962 unsigned int lock_count;
4963
4964 if (!_memRef || (1 != _memRef->count)) {
4965 err = kIOReturnNotReadable;
4966 DEBUG4K_ERROR("map %p err 0x%x\n", __addressMap, err);
4967 break;
4968 }
4969
4970 size = (upl_size_t) round_page(x: mapping->fLength);
4971 flags = UPL_COPYOUT_FROM | UPL_SET_INTERNAL
4972 | UPL_SET_LITE | UPL_SET_IO_WIRE | UPL_BLOCK_ACCESS;
4973
4974 if (KERN_SUCCESS != memory_object_iopl_request(port: _memRef->entries[0].entry, offset: 0, upl_size: &size, upl_ptr: &redirUPL2,
4975 NULL, NULL,
4976 flags: &flags, tag: (vm_tag_t) getVMTag(map: kernel_map))) {
4977 redirUPL2 = NULL;
4978 }
4979
4980 for (lock_count = 0;
4981 IORecursiveLockHaveLock(lock: gIOMemoryLock);
4982 lock_count++) {
4983 UNLOCK;
4984 }
4985 err = upl_transpose(upl1: redirUPL2, upl2: mapping->fRedirUPL);
4986 for (;
4987 lock_count;
4988 lock_count--) {
4989 LOCK;
4990 }
4991
4992 if (kIOReturnSuccess != err) {
4993 IOLog(format: "upl_transpose(%x)\n", err);
4994 err = kIOReturnSuccess;
4995 }
4996
4997 if (redirUPL2) {
4998 upl_commit(upl_object: redirUPL2, NULL, page_listCnt: 0);
4999 upl_deallocate(upl: redirUPL2);
5000 redirUPL2 = NULL;
5001 }
5002 {
5003 // swap the memEntries since they now refer to different vm_objects
5004 IOMemoryReference * me = _memRef;
5005 _memRef = mapping->fMemory->_memRef;
5006 mapping->fMemory->_memRef = me;
5007 }
5008 if (pager) {
5009 err = populateDevicePager( pager, addressMap: mapping->fAddressMap, address: mapping->fAddress, sourceOffset: offset, length, options );
5010 }
5011 }while (false);
5012 }
5013 // upl_transpose> //
5014 else {
5015 err = memoryReferenceMap(ref: _memRef, map: mapping->fAddressMap, inoffset: offset, size: length, options, inaddr: &mapping->fAddress);
5016 if (err) {
5017 DEBUG4K_ERROR("map %p err 0x%x\n", mapping->fAddressMap, err);
5018 }
5019#if IOTRACKING
5020 if ((err == KERN_SUCCESS) && ((kIOTracking & gIOKitDebug) || _task)) {
5021 // only dram maps in the default on developement case
5022 IOTrackingAddUser(gIOMapTracking, &mapping->fTracking, mapping->fLength);
5023 }
5024#endif /* IOTRACKING */
5025 if ((err == KERN_SUCCESS) && pager) {
5026 err = populateDevicePager(pager, addressMap: mapping->fAddressMap, address: mapping->fAddress, sourceOffset: offset, length, options);
5027
5028 if (err != KERN_SUCCESS) {
5029 doUnmap(addressMap: mapping->fAddressMap, logical: (IOVirtualAddress) mapping, length: 0);
5030 } else if (kIOMapDefaultCache == (options & kIOMapCacheMask)) {
5031 mapping->fOptions |= ((_flags & kIOMemoryBufferCacheMask) >> kIOMemoryBufferCacheShift);
5032 }
5033 }
5034 }
5035
5036 traceInterval.setEndArg1(err);
5037 if (err) {
5038 DEBUG4K_ERROR("map %p err 0x%x\n", __addressMap, err);
5039 }
5040 return err;
5041}
5042
5043#if IOTRACKING
5044IOReturn
5045IOMemoryMapTracking(IOTrackingUser * tracking, task_t * task,
5046 mach_vm_address_t * address, mach_vm_size_t * size)
5047{
5048#define iomap_offsetof(type, field) ((size_t)(&((type *)NULL)->field))
5049
5050 IOMemoryMap * map = (typeof(map))(((uintptr_t) tracking) - iomap_offsetof(IOMemoryMap, fTracking));
5051
5052 if (!map->fAddressMap || (map->fAddressMap != get_task_map(map->fAddressTask))) {
5053 return kIOReturnNotReady;
5054 }
5055
5056 *task = map->fAddressTask;
5057 *address = map->fAddress;
5058 *size = map->fLength;
5059
5060 return kIOReturnSuccess;
5061}
5062#endif /* IOTRACKING */
5063
5064IOReturn
5065IOGeneralMemoryDescriptor::doUnmap(
5066 vm_map_t addressMap,
5067 IOVirtualAddress __address,
5068 IOByteCount __length )
5069{
5070 IOTimeStampIntervalConstantFiltered traceInterval(IODBG_MDESC(IOMDESC_UNMAP), VM_KERNEL_ADDRHIDE(this), VM_KERNEL_ADDRHIDE(__address), __length);
5071 IOReturn ret;
5072 ret = super::doUnmap(addressMap, logical: __address, length: __length);
5073 traceInterval.setEndArg1(ret);
5074 return ret;
5075}
5076
5077/* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */
5078
5079#undef super
5080#define super OSObject
5081
5082OSDefineMetaClassAndStructorsWithZone( IOMemoryMap, OSObject, ZC_NONE )
5083
5084OSMetaClassDefineReservedUnused(IOMemoryMap, 0);
5085OSMetaClassDefineReservedUnused(IOMemoryMap, 1);
5086OSMetaClassDefineReservedUnused(IOMemoryMap, 2);
5087OSMetaClassDefineReservedUnused(IOMemoryMap, 3);
5088OSMetaClassDefineReservedUnused(IOMemoryMap, 4);
5089OSMetaClassDefineReservedUnused(IOMemoryMap, 5);
5090OSMetaClassDefineReservedUnused(IOMemoryMap, 6);
5091OSMetaClassDefineReservedUnused(IOMemoryMap, 7);
5092
5093/* ex-inline function implementation */
5094IOPhysicalAddress
5095IOMemoryMap::getPhysicalAddress()
5096{
5097 return getPhysicalSegment( offset: 0, NULL );
5098}
5099
5100/* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */
5101
5102bool
5103IOMemoryMap::init(
5104 task_t intoTask,
5105 mach_vm_address_t toAddress,
5106 IOOptionBits _options,
5107 mach_vm_size_t _offset,
5108 mach_vm_size_t _length )
5109{
5110 if (!intoTask) {
5111 return false;
5112 }
5113
5114 if (!super::init()) {
5115 return false;
5116 }
5117
5118 fAddressMap = get_task_map(intoTask);
5119 if (!fAddressMap) {
5120 return false;
5121 }
5122 vm_map_reference(map: fAddressMap);
5123
5124 fAddressTask = intoTask;
5125 fOptions = _options;
5126 fLength = _length;
5127 fOffset = _offset;
5128 fAddress = toAddress;
5129
5130 return true;
5131}
5132
5133bool
5134IOMemoryMap::setMemoryDescriptor(IOMemoryDescriptor * _memory, mach_vm_size_t _offset)
5135{
5136 if (!_memory) {
5137 return false;
5138 }
5139
5140 if (!fSuperMap) {
5141 if ((_offset + fLength) > _memory->getLength()) {
5142 return false;
5143 }
5144 fOffset = _offset;
5145 }
5146
5147
5148 OSSharedPtr<IOMemoryDescriptor> tempval(_memory, OSRetain);
5149 if (fMemory) {
5150 if (fMemory != _memory) {
5151 fMemory->removeMapping(mapping: this);
5152 }
5153 }
5154 fMemory = os::move(t&: tempval);
5155
5156 return true;
5157}
5158
5159IOReturn
5160IOMemoryDescriptor::doMap(
5161 vm_map_t __addressMap,
5162 IOVirtualAddress * __address,
5163 IOOptionBits options,
5164 IOByteCount __offset,
5165 IOByteCount __length )
5166{
5167 return kIOReturnUnsupported;
5168}
5169
5170IOReturn
5171IOMemoryDescriptor::handleFault(
5172 void * _pager,
5173 mach_vm_size_t sourceOffset,
5174 mach_vm_size_t length)
5175{
5176 if (kIOMemoryRedirected & _flags) {
5177#if DEBUG
5178 IOLog("sleep mem redirect %p, %qx\n", this, sourceOffset);
5179#endif
5180 do {
5181 SLEEP;
5182 } while (kIOMemoryRedirected & _flags);
5183 }
5184 return kIOReturnSuccess;
5185}
5186
5187IOReturn
5188IOMemoryDescriptor::populateDevicePager(
5189 void * _pager,
5190 vm_map_t addressMap,
5191 mach_vm_address_t address,
5192 mach_vm_size_t sourceOffset,
5193 mach_vm_size_t length,
5194 IOOptionBits options )
5195{
5196 IOReturn err = kIOReturnSuccess;
5197 memory_object_t pager = (memory_object_t) _pager;
5198 mach_vm_size_t size;
5199 mach_vm_size_t bytes;
5200 mach_vm_size_t page;
5201 mach_vm_size_t pageOffset;
5202 mach_vm_size_t pagerOffset;
5203 IOPhysicalLength segLen, chunk;
5204 addr64_t physAddr;
5205 IOOptionBits type;
5206
5207 type = _flags & kIOMemoryTypeMask;
5208
5209 if (reserved->dp.pagerContig) {
5210 sourceOffset = 0;
5211 pagerOffset = 0;
5212 }
5213
5214 physAddr = getPhysicalSegment( offset: sourceOffset, length: &segLen, options: kIOMemoryMapperNone );
5215 assert( physAddr );
5216 pageOffset = physAddr - trunc_page_64( physAddr );
5217 pagerOffset = sourceOffset;
5218
5219 size = length + pageOffset;
5220 physAddr -= pageOffset;
5221
5222 segLen += pageOffset;
5223 bytes = size;
5224 do{
5225 // in the middle of the loop only map whole pages
5226 if (segLen >= bytes) {
5227 segLen = bytes;
5228 } else if (segLen != trunc_page_64(segLen)) {
5229 err = kIOReturnVMError;
5230 }
5231 if (physAddr != trunc_page_64(physAddr)) {
5232 err = kIOReturnBadArgument;
5233 }
5234
5235 if (kIOReturnSuccess != err) {
5236 break;
5237 }
5238
5239#if DEBUG || DEVELOPMENT
5240 if ((kIOMemoryTypeUPL != type)
5241 && pmap_has_managed_page((ppnum_t) atop_64(physAddr), (ppnum_t) atop_64(physAddr + segLen - 1))) {
5242 OSReportWithBacktrace("IOMemoryDescriptor physical with managed page 0x%qx:0x%qx",
5243 physAddr, (uint64_t)segLen);
5244 }
5245#endif /* DEBUG || DEVELOPMENT */
5246
5247 chunk = (reserved->dp.pagerContig ? round_page(x: segLen) : page_size);
5248 for (page = 0;
5249 (page < segLen) && (KERN_SUCCESS == err);
5250 page += chunk) {
5251 err = device_pager_populate_object(device: pager, offset: pagerOffset,
5252 page_num: (ppnum_t)(atop_64(physAddr + page)), size: chunk);
5253 pagerOffset += chunk;
5254 }
5255
5256 assert(KERN_SUCCESS == err);
5257 if (err) {
5258 break;
5259 }
5260
5261 // This call to vm_fault causes an early pmap level resolution
5262 // of the mappings created above for kernel mappings, since
5263 // faulting in later can't take place from interrupt level.
5264 if ((addressMap == kernel_map) && !(kIOMemoryRedirected & _flags)) {
5265 err = vm_fault(map: addressMap,
5266 vaddr: (vm_map_offset_t)trunc_page_64(address),
5267 fault_type: options & kIOMapReadOnly ? VM_PROT_READ : VM_PROT_READ | VM_PROT_WRITE,
5268 FALSE, VM_KERN_MEMORY_NONE,
5269 THREAD_UNINT, NULL,
5270 pmap_addr: (vm_map_offset_t)0);
5271
5272 if (KERN_SUCCESS != err) {
5273 break;
5274 }
5275 }
5276
5277 sourceOffset += segLen - pageOffset;
5278 address += segLen;
5279 bytes -= segLen;
5280 pageOffset = 0;
5281 }while (bytes && (physAddr = getPhysicalSegment( offset: sourceOffset, length: &segLen, options: kIOMemoryMapperNone )));
5282
5283 if (bytes) {
5284 err = kIOReturnBadArgument;
5285 }
5286
5287 return err;
5288}
5289
5290IOReturn
5291IOMemoryDescriptor::doUnmap(
5292 vm_map_t addressMap,
5293 IOVirtualAddress __address,
5294 IOByteCount __length )
5295{
5296 IOReturn err;
5297 IOMemoryMap * mapping;
5298 mach_vm_address_t address;
5299 mach_vm_size_t length;
5300
5301 if (__length) {
5302 panic("doUnmap");
5303 }
5304
5305 mapping = (IOMemoryMap *) __address;
5306 addressMap = mapping->fAddressMap;
5307 address = mapping->fAddress;
5308 length = mapping->fLength;
5309
5310 if (kIOMapOverwrite & mapping->fOptions) {
5311 err = KERN_SUCCESS;
5312 } else {
5313 if ((addressMap == kernel_map) && (kIOMemoryBufferPageable & _flags)) {
5314 addressMap = IOPageableMapForAddress( address );
5315 }
5316#if DEBUG
5317 if (kIOLogMapping & gIOKitDebug) {
5318 IOLog("IOMemoryDescriptor::doUnmap map %p, 0x%qx:0x%qx\n",
5319 addressMap, address, length );
5320 }
5321#endif
5322 err = IOMemoryDescriptorMapDealloc(options: mapping->fOptions, map: addressMap, addr: address, size: length );
5323 if (vm_map_page_mask(map: addressMap) < PAGE_MASK) {
5324 DEBUG4K_IOKIT("map %p address 0x%llx length 0x%llx err 0x%x\n", addressMap, address, length, err);
5325 }
5326 }
5327
5328#if IOTRACKING
5329 IOTrackingRemoveUser(gIOMapTracking, &mapping->fTracking);
5330#endif /* IOTRACKING */
5331
5332 return err;
5333}
5334
5335IOReturn
5336IOMemoryDescriptor::redirect( task_t safeTask, bool doRedirect )
5337{
5338 IOReturn err = kIOReturnSuccess;
5339 IOMemoryMap * mapping = NULL;
5340 OSSharedPtr<OSIterator> iter;
5341
5342 LOCK;
5343
5344 if (doRedirect) {
5345 _flags |= kIOMemoryRedirected;
5346 } else {
5347 _flags &= ~kIOMemoryRedirected;
5348 }
5349
5350 do {
5351 if ((iter = OSCollectionIterator::withCollection( inColl: _mappings.get()))) {
5352 memory_object_t pager;
5353
5354 if (reserved) {
5355 pager = (memory_object_t) reserved->dp.devicePager;
5356 } else {
5357 pager = MACH_PORT_NULL;
5358 }
5359
5360 while ((mapping = (IOMemoryMap *) iter->getNextObject())) {
5361 mapping->redirect( intoTask: safeTask, redirect: doRedirect );
5362 if (!doRedirect && !safeTask && pager && (kernel_map == mapping->fAddressMap)) {
5363 err = populateDevicePager(pager: pager, addressMap: mapping->fAddressMap, address: mapping->fAddress, sourceOffset: mapping->fOffset, length: mapping->fLength, options: kIOMapDefaultCache );
5364 }
5365 }
5366
5367 iter.reset();
5368 }
5369 } while (false);
5370
5371 if (!doRedirect) {
5372 WAKEUP;
5373 }
5374
5375 UNLOCK;
5376
5377#ifndef __LP64__
5378 // temporary binary compatibility
5379 IOSubMemoryDescriptor * subMem;
5380 if ((subMem = OSDynamicCast( IOSubMemoryDescriptor, this))) {
5381 err = subMem->redirect( safeTask, doRedirect );
5382 } else {
5383 err = kIOReturnSuccess;
5384 }
5385#endif /* !__LP64__ */
5386
5387 return err;
5388}
5389
5390IOReturn
5391IOMemoryMap::redirect( task_t safeTask, bool doRedirect )
5392{
5393 IOReturn err = kIOReturnSuccess;
5394
5395 if (fSuperMap) {
5396// err = ((IOMemoryMap *)superMap)->redirect( safeTask, doRedirect );
5397 } else {
5398 LOCK;
5399
5400 do{
5401 if (!fAddress) {
5402 break;
5403 }
5404 if (!fAddressMap) {
5405 break;
5406 }
5407
5408 if ((!safeTask || (get_task_map(safeTask) != fAddressMap))
5409 && (0 == (fOptions & kIOMapStatic))) {
5410 IOUnmapPages( map: fAddressMap, va: fAddress, length: fLength );
5411 err = kIOReturnSuccess;
5412#if DEBUG
5413 IOLog("IOMemoryMap::redirect(%d, %p) 0x%qx:0x%qx from %p\n", doRedirect, this, fAddress, fLength, fAddressMap);
5414#endif
5415 } else if (kIOMapWriteCombineCache == (fOptions & kIOMapCacheMask)) {
5416 IOOptionBits newMode;
5417 newMode = (fOptions & ~kIOMapCacheMask) | (doRedirect ? kIOMapInhibitCache : kIOMapWriteCombineCache);
5418 IOProtectCacheMode(map: fAddressMap, va: fAddress, length: fLength, options: newMode);
5419 }
5420 }while (false);
5421 UNLOCK;
5422 }
5423
5424 if ((((fMemory->_flags & kIOMemoryTypeMask) == kIOMemoryTypePhysical)
5425 || ((fMemory->_flags & kIOMemoryTypeMask) == kIOMemoryTypePhysical64))
5426 && safeTask
5427 && (doRedirect != (0 != (fMemory->_flags & kIOMemoryRedirected)))) {
5428 fMemory->redirect(safeTask, doRedirect);
5429 }
5430
5431 return err;
5432}
5433
5434IOReturn
5435IOMemoryMap::unmap( void )
5436{
5437 IOReturn err;
5438
5439 LOCK;
5440
5441 if (fAddress && fAddressMap && (NULL == fSuperMap) && fMemory
5442 && (0 == (kIOMapStatic & fOptions))) {
5443 err = fMemory->doUnmap(addressMap: fAddressMap, address: (IOVirtualAddress) this, length: 0);
5444 } else {
5445 err = kIOReturnSuccess;
5446 }
5447
5448 if (fAddressMap) {
5449 vm_map_deallocate(map: fAddressMap);
5450 fAddressMap = NULL;
5451 }
5452
5453 fAddress = 0;
5454
5455 UNLOCK;
5456
5457 return err;
5458}
5459
5460void
5461IOMemoryMap::taskDied( void )
5462{
5463 LOCK;
5464 if (fUserClientUnmap) {
5465 unmap();
5466 }
5467#if IOTRACKING
5468 else {
5469 IOTrackingRemoveUser(gIOMapTracking, &fTracking);
5470 }
5471#endif /* IOTRACKING */
5472
5473 if (fAddressMap) {
5474 vm_map_deallocate(map: fAddressMap);
5475 fAddressMap = NULL;
5476 }
5477 fAddressTask = NULL;
5478 fAddress = 0;
5479 UNLOCK;
5480}
5481
5482IOReturn
5483IOMemoryMap::userClientUnmap( void )
5484{
5485 fUserClientUnmap = true;
5486 return kIOReturnSuccess;
5487}
5488
5489// Overload the release mechanism. All mappings must be a member
5490// of a memory descriptors _mappings set. This means that we
5491// always have 2 references on a mapping. When either of these mappings
5492// are released we need to free ourselves.
5493void
5494IOMemoryMap::taggedRelease(const void *tag) const
5495{
5496 LOCK;
5497 super::taggedRelease(tag, freeWhen: 2);
5498 UNLOCK;
5499}
5500
5501void
5502IOMemoryMap::free()
5503{
5504 unmap();
5505
5506 if (fMemory) {
5507 LOCK;
5508 fMemory->removeMapping(mapping: this);
5509 UNLOCK;
5510 fMemory.reset();
5511 }
5512
5513 if (fSuperMap) {
5514 fSuperMap.reset();
5515 }
5516
5517 if (fRedirUPL) {
5518 upl_commit(upl_object: fRedirUPL, NULL, page_listCnt: 0);
5519 upl_deallocate(upl: fRedirUPL);
5520 }
5521
5522 super::free();
5523}
5524
5525IOByteCount
5526IOMemoryMap::getLength()
5527{
5528 return fLength;
5529}
5530
5531IOVirtualAddress
5532IOMemoryMap::getVirtualAddress()
5533{
5534#ifndef __LP64__
5535 if (fSuperMap) {
5536 fSuperMap->getVirtualAddress();
5537 } else if (fAddressMap
5538 && vm_map_is_64bit(fAddressMap)
5539 && (sizeof(IOVirtualAddress) < 8)) {
5540 OSReportWithBacktrace("IOMemoryMap::getVirtualAddress(0x%qx) called on 64b map; use ::getAddress()", fAddress);
5541 }
5542#endif /* !__LP64__ */
5543
5544 return fAddress;
5545}
5546
5547#ifndef __LP64__
5548mach_vm_address_t
5549IOMemoryMap::getAddress()
5550{
5551 return fAddress;
5552}
5553
5554mach_vm_size_t
5555IOMemoryMap::getSize()
5556{
5557 return fLength;
5558}
5559#endif /* !__LP64__ */
5560
5561
5562task_t
5563IOMemoryMap::getAddressTask()
5564{
5565 if (fSuperMap) {
5566 return fSuperMap->getAddressTask();
5567 } else {
5568 return fAddressTask;
5569 }
5570}
5571
5572IOOptionBits
5573IOMemoryMap::getMapOptions()
5574{
5575 return fOptions;
5576}
5577
5578IOMemoryDescriptor *
5579IOMemoryMap::getMemoryDescriptor()
5580{
5581 return fMemory.get();
5582}
5583
5584IOMemoryMap *
5585IOMemoryMap::copyCompatible(
5586 IOMemoryMap * newMapping )
5587{
5588 task_t task = newMapping->getAddressTask();
5589 mach_vm_address_t toAddress = newMapping->fAddress;
5590 IOOptionBits _options = newMapping->fOptions;
5591 mach_vm_size_t _offset = newMapping->fOffset;
5592 mach_vm_size_t _length = newMapping->fLength;
5593
5594 if ((!task) || (!fAddressMap) || (fAddressMap != get_task_map(task))) {
5595 return NULL;
5596 }
5597 if ((fOptions ^ _options) & kIOMapReadOnly) {
5598 return NULL;
5599 }
5600 if ((fOptions ^ _options) & kIOMapGuardedMask) {
5601 return NULL;
5602 }
5603 if ((kIOMapDefaultCache != (_options & kIOMapCacheMask))
5604 && ((fOptions ^ _options) & kIOMapCacheMask)) {
5605 return NULL;
5606 }
5607
5608 if ((0 == (_options & kIOMapAnywhere)) && (fAddress != toAddress)) {
5609 return NULL;
5610 }
5611
5612 if (_offset < fOffset) {
5613 return NULL;
5614 }
5615
5616 _offset -= fOffset;
5617
5618 if ((_offset + _length) > fLength) {
5619 return NULL;
5620 }
5621
5622 if ((fLength == _length) && (!_offset)) {
5623 retain();
5624 newMapping = this;
5625 } else {
5626 newMapping->fSuperMap.reset(p: this, OSRetain);
5627 newMapping->fOffset = fOffset + _offset;
5628 newMapping->fAddress = fAddress + _offset;
5629 }
5630
5631 return newMapping;
5632}
5633
5634IOReturn
5635IOMemoryMap::wireRange(
5636 uint32_t options,
5637 mach_vm_size_t offset,
5638 mach_vm_size_t length)
5639{
5640 IOReturn kr;
5641 mach_vm_address_t start = trunc_page_64(fAddress + offset);
5642 mach_vm_address_t end = round_page_64(x: fAddress + offset + length);
5643 vm_prot_t prot;
5644
5645 prot = (kIODirectionOutIn & options);
5646 if (prot) {
5647 kr = vm_map_wire_kernel(map: fAddressMap, start, end, access_type: prot, tag: (vm_tag_t) fMemory->getVMTag(map: kernel_map), FALSE);
5648 } else {
5649 kr = vm_map_unwire(map: fAddressMap, start, end, FALSE);
5650 }
5651
5652 return kr;
5653}
5654
5655
5656IOPhysicalAddress
5657#ifdef __LP64__
5658IOMemoryMap::getPhysicalSegment( IOByteCount _offset, IOPhysicalLength * _length, IOOptionBits _options)
5659#else /* !__LP64__ */
5660IOMemoryMap::getPhysicalSegment( IOByteCount _offset, IOPhysicalLength * _length)
5661#endif /* !__LP64__ */
5662{
5663 IOPhysicalAddress address;
5664
5665 LOCK;
5666#ifdef __LP64__
5667 address = fMemory->getPhysicalSegment( offset: fOffset + _offset, length: _length, options: _options );
5668#else /* !__LP64__ */
5669 address = fMemory->getPhysicalSegment( fOffset + _offset, _length );
5670#endif /* !__LP64__ */
5671 UNLOCK;
5672
5673 return address;
5674}
5675
5676/* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */
5677
5678#undef super
5679#define super OSObject
5680
5681/* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */
5682
5683void
5684IOMemoryDescriptor::initialize( void )
5685{
5686 if (NULL == gIOMemoryLock) {
5687 gIOMemoryLock = IORecursiveLockAlloc();
5688 }
5689
5690 gIOLastPage = IOGetLastPageNumber();
5691}
5692
5693void
5694IOMemoryDescriptor::free( void )
5695{
5696 if (_mappings) {
5697 _mappings.reset();
5698 }
5699
5700 if (reserved) {
5701 cleanKernelReserved(reserved);
5702 IOFreeType(reserved, IOMemoryDescriptorReserved);
5703 reserved = NULL;
5704 }
5705 super::free();
5706}
5707
5708OSSharedPtr<IOMemoryMap>
5709IOMemoryDescriptor::setMapping(
5710 task_t intoTask,
5711 IOVirtualAddress mapAddress,
5712 IOOptionBits options )
5713{
5714 return createMappingInTask( intoTask, atAddress: mapAddress,
5715 options: options | kIOMapStatic,
5716 offset: 0, length: getLength());
5717}
5718
5719OSSharedPtr<IOMemoryMap>
5720IOMemoryDescriptor::map(
5721 IOOptionBits options )
5722{
5723 return createMappingInTask( intoTask: kernel_task, atAddress: 0,
5724 options: options | kIOMapAnywhere,
5725 offset: 0, length: getLength());
5726}
5727
5728#ifndef __LP64__
5729OSSharedPtr<IOMemoryMap>
5730IOMemoryDescriptor::map(
5731 task_t intoTask,
5732 IOVirtualAddress atAddress,
5733 IOOptionBits options,
5734 IOByteCount offset,
5735 IOByteCount length )
5736{
5737 if ((!(kIOMapAnywhere & options)) && vm_map_is_64bit(get_task_map(intoTask))) {
5738 OSReportWithBacktrace("IOMemoryDescriptor::map() in 64b task, use ::createMappingInTask()");
5739 return NULL;
5740 }
5741
5742 return createMappingInTask(intoTask, atAddress,
5743 options, offset, length);
5744}
5745#endif /* !__LP64__ */
5746
5747OSSharedPtr<IOMemoryMap>
5748IOMemoryDescriptor::createMappingInTask(
5749 task_t intoTask,
5750 mach_vm_address_t atAddress,
5751 IOOptionBits options,
5752 mach_vm_size_t offset,
5753 mach_vm_size_t length)
5754{
5755 IOMemoryMap * result;
5756 IOMemoryMap * mapping;
5757
5758 if (0 == length) {
5759 length = getLength();
5760 }
5761
5762 mapping = new IOMemoryMap;
5763
5764 if (mapping
5765 && !mapping->init( intoTask, toAddress: atAddress,
5766 options: options, offset: offset, length: length )) {
5767 mapping->release();
5768 mapping = NULL;
5769 }
5770
5771 if (mapping) {
5772 result = makeMapping(owner: this, intoTask, atAddress: (IOVirtualAddress) mapping, options: options | kIOMap64Bit, offset: 0, length: 0);
5773 } else {
5774 result = nullptr;
5775 }
5776
5777#if DEBUG
5778 if (!result) {
5779 IOLog("createMappingInTask failed desc %p, addr %qx, options %x, offset %qx, length %llx\n",
5780 this, atAddress, (uint32_t) options, offset, length);
5781 }
5782#endif
5783
5784 // already retained through makeMapping
5785 OSSharedPtr<IOMemoryMap> retval(result, OSNoRetain);
5786
5787 return retval;
5788}
5789
5790#ifndef __LP64__ // there is only a 64 bit version for LP64
5791IOReturn
5792IOMemoryMap::redirect(IOMemoryDescriptor * newBackingMemory,
5793 IOOptionBits options,
5794 IOByteCount offset)
5795{
5796 return redirect(newBackingMemory, options, (mach_vm_size_t)offset);
5797}
5798#endif
5799
5800IOReturn
5801IOMemoryMap::redirect(IOMemoryDescriptor * newBackingMemory,
5802 IOOptionBits options,
5803 mach_vm_size_t offset)
5804{
5805 IOReturn err = kIOReturnSuccess;
5806 OSSharedPtr<IOMemoryDescriptor> physMem;
5807
5808 LOCK;
5809
5810 if (fAddress && fAddressMap) {
5811 do{
5812 if (((fMemory->_flags & kIOMemoryTypeMask) == kIOMemoryTypePhysical)
5813 || ((fMemory->_flags & kIOMemoryTypeMask) == kIOMemoryTypePhysical64)) {
5814 physMem = fMemory;
5815 }
5816
5817 if (!fRedirUPL && fMemory->_memRef && (1 == fMemory->_memRef->count)) {
5818 upl_size_t size = (typeof(size))round_page(x: fLength);
5819 upl_control_flags_t flags = UPL_COPYOUT_FROM | UPL_SET_INTERNAL
5820 | UPL_SET_LITE | UPL_SET_IO_WIRE | UPL_BLOCK_ACCESS;
5821 if (KERN_SUCCESS != memory_object_iopl_request(port: fMemory->_memRef->entries[0].entry, offset: 0, upl_size: &size, upl_ptr: &fRedirUPL,
5822 NULL, NULL,
5823 flags: &flags, tag: (vm_tag_t) fMemory->getVMTag(map: kernel_map))) {
5824 fRedirUPL = NULL;
5825 }
5826
5827 if (physMem) {
5828 IOUnmapPages( map: fAddressMap, va: fAddress, length: fLength );
5829 if ((false)) {
5830 physMem->redirect(NULL, doRedirect: true);
5831 }
5832 }
5833 }
5834
5835 if (newBackingMemory) {
5836 if (newBackingMemory != fMemory) {
5837 fOffset = 0;
5838 if (this != newBackingMemory->makeMapping(owner: newBackingMemory, intoTask: fAddressTask, atAddress: (IOVirtualAddress) this,
5839 options: options | kIOMapUnique | kIOMapReference | kIOMap64Bit,
5840 offset, length: fLength)) {
5841 err = kIOReturnError;
5842 }
5843 }
5844 if (fRedirUPL) {
5845 upl_commit(upl_object: fRedirUPL, NULL, page_listCnt: 0);
5846 upl_deallocate(upl: fRedirUPL);
5847 fRedirUPL = NULL;
5848 }
5849 if ((false) && physMem) {
5850 physMem->redirect(NULL, doRedirect: false);
5851 }
5852 }
5853 }while (false);
5854 }
5855
5856 UNLOCK;
5857
5858 return err;
5859}
5860
5861IOMemoryMap *
5862IOMemoryDescriptor::makeMapping(
5863 IOMemoryDescriptor * owner,
5864 task_t __intoTask,
5865 IOVirtualAddress __address,
5866 IOOptionBits options,
5867 IOByteCount __offset,
5868 IOByteCount __length )
5869{
5870#ifndef __LP64__
5871 if (!(kIOMap64Bit & options)) {
5872 panic("IOMemoryDescriptor::makeMapping !64bit");
5873 }
5874#endif /* !__LP64__ */
5875
5876 OSSharedPtr<IOMemoryDescriptor> mapDesc;
5877 __block IOMemoryMap * result = NULL;
5878
5879 IOMemoryMap * mapping = (IOMemoryMap *) __address;
5880 mach_vm_size_t offset = mapping->fOffset + __offset;
5881 mach_vm_size_t length = mapping->fLength;
5882
5883 mapping->fOffset = offset;
5884
5885 LOCK;
5886
5887 do{
5888 if (kIOMapStatic & options) {
5889 result = mapping;
5890 addMapping(mapping);
5891 mapping->setMemoryDescriptor(memory: this, offset: 0);
5892 continue;
5893 }
5894
5895 if (kIOMapUnique & options) {
5896 addr64_t phys;
5897 IOByteCount physLen;
5898
5899// if (owner != this) continue;
5900
5901 if (((_flags & kIOMemoryTypeMask) == kIOMemoryTypePhysical)
5902 || ((_flags & kIOMemoryTypeMask) == kIOMemoryTypePhysical64)) {
5903 phys = getPhysicalSegment(offset, length: &physLen, options: kIOMemoryMapperNone);
5904 if (!phys || (physLen < length)) {
5905 continue;
5906 }
5907
5908 mapDesc = IOMemoryDescriptor::withAddressRange(
5909 address: phys, length, options: getDirection() | kIOMemoryMapperNone, NULL);
5910 if (!mapDesc) {
5911 continue;
5912 }
5913 offset = 0;
5914 mapping->fOffset = offset;
5915 }
5916 } else {
5917 // look for a compatible existing mapping
5918 if (_mappings) {
5919 _mappings->iterateObjects(block: ^(OSObject * object)
5920 {
5921 IOMemoryMap * lookMapping = (IOMemoryMap *) object;
5922 if ((result = lookMapping->copyCompatible(newMapping: mapping))) {
5923 addMapping(mapping: result);
5924 result->setMemoryDescriptor(memory: this, offset: offset);
5925 return true;
5926 }
5927 return false;
5928 });
5929 }
5930 if (result || (options & kIOMapReference)) {
5931 if (result != mapping) {
5932 mapping->release();
5933 mapping = NULL;
5934 }
5935 continue;
5936 }
5937 }
5938
5939 if (!mapDesc) {
5940 mapDesc.reset(p: this, OSRetain);
5941 }
5942 IOReturn
5943 kr = mapDesc->doMap( NULL, address: (IOVirtualAddress *) &mapping, options, offset: 0, length: 0 );
5944 if (kIOReturnSuccess == kr) {
5945 result = mapping;
5946 mapDesc->addMapping(mapping: result);
5947 result->setMemoryDescriptor(memory: mapDesc.get(), offset: offset);
5948 } else {
5949 mapping->release();
5950 mapping = NULL;
5951 }
5952 }while (false);
5953
5954 UNLOCK;
5955
5956 return result;
5957}
5958
5959void
5960IOMemoryDescriptor::addMapping(
5961 IOMemoryMap * mapping )
5962{
5963 if (mapping) {
5964 if (NULL == _mappings) {
5965 _mappings = OSSet::withCapacity(capacity: 1);
5966 }
5967 if (_mappings) {
5968 _mappings->setObject( mapping );
5969 }
5970 }
5971}
5972
5973void
5974IOMemoryDescriptor::removeMapping(
5975 IOMemoryMap * mapping )
5976{
5977 if (_mappings) {
5978 _mappings->removeObject( anObject: mapping);
5979 }
5980}
5981
5982void
5983IOMemoryDescriptor::setMapperOptions( uint16_t options)
5984{
5985 _iomapperOptions = options;
5986}
5987
5988uint16_t
5989IOMemoryDescriptor::getMapperOptions( void )
5990{
5991 return _iomapperOptions;
5992}
5993
5994#ifndef __LP64__
5995// obsolete initializers
5996// - initWithOptions is the designated initializer
5997bool
5998IOMemoryDescriptor::initWithAddress(void * address,
5999 IOByteCount length,
6000 IODirection direction)
6001{
6002 return false;
6003}
6004
6005bool
6006IOMemoryDescriptor::initWithAddress(IOVirtualAddress address,
6007 IOByteCount length,
6008 IODirection direction,
6009 task_t task)
6010{
6011 return false;
6012}
6013
6014bool
6015IOMemoryDescriptor::initWithPhysicalAddress(
6016 IOPhysicalAddress address,
6017 IOByteCount length,
6018 IODirection direction )
6019{
6020 return false;
6021}
6022
6023bool
6024IOMemoryDescriptor::initWithRanges(
6025 IOVirtualRange * ranges,
6026 UInt32 withCount,
6027 IODirection direction,
6028 task_t task,
6029 bool asReference)
6030{
6031 return false;
6032}
6033
6034bool
6035IOMemoryDescriptor::initWithPhysicalRanges( IOPhysicalRange * ranges,
6036 UInt32 withCount,
6037 IODirection direction,
6038 bool asReference)
6039{
6040 return false;
6041}
6042
6043void *
6044IOMemoryDescriptor::getVirtualSegment(IOByteCount offset,
6045 IOByteCount * lengthOfSegment)
6046{
6047 return NULL;
6048}
6049#endif /* !__LP64__ */
6050
6051/* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */
6052
6053bool
6054IOGeneralMemoryDescriptor::serialize(OSSerialize * s) const
6055{
6056 OSSharedPtr<OSSymbol const> keys[2] = {NULL};
6057 OSSharedPtr<OSObject> values[2] = {NULL};
6058 OSSharedPtr<OSArray> array;
6059
6060 struct SerData {
6061 user_addr_t address;
6062 user_size_t length;
6063 };
6064
6065 unsigned int index;
6066
6067 IOOptionBits type = _flags & kIOMemoryTypeMask;
6068
6069 if (s == NULL) {
6070 return false;
6071 }
6072
6073 array = OSArray::withCapacity(capacity: 4);
6074 if (!array) {
6075 return false;
6076 }
6077
6078 OSDataAllocation<struct SerData> vcopy(_rangesCount, OSAllocateMemory);
6079 if (!vcopy) {
6080 return false;
6081 }
6082
6083 keys[0] = OSSymbol::withCString(cString: "address");
6084 keys[1] = OSSymbol::withCString(cString: "length");
6085
6086 // Copy the volatile data so we don't have to allocate memory
6087 // while the lock is held.
6088 LOCK;
6089 if (vcopy.size() == _rangesCount) {
6090 Ranges vec = _ranges;
6091 for (index = 0; index < vcopy.size(); index++) {
6092 mach_vm_address_t addr; mach_vm_size_t len;
6093 getAddrLenForInd(addr, len, type, r: vec, ind: index, task: _task);
6094 vcopy[index].address = addr;
6095 vcopy[index].length = len;
6096 }
6097 } else {
6098 // The descriptor changed out from under us. Give up.
6099 UNLOCK;
6100 return false;
6101 }
6102 UNLOCK;
6103
6104 for (index = 0; index < vcopy.size(); index++) {
6105 user_addr_t addr = vcopy[index].address;
6106 IOByteCount len = (IOByteCount) vcopy[index].length;
6107 values[0] = OSNumber::withNumber(value: addr, numberOfBits: sizeof(addr) * 8);
6108 if (values[0] == NULL) {
6109 return false;
6110 }
6111 values[1] = OSNumber::withNumber(value: len, numberOfBits: sizeof(len) * 8);
6112 if (values[1] == NULL) {
6113 return false;
6114 }
6115 OSSharedPtr<OSDictionary> dict = OSDictionary::withObjects(objects: (const OSObject **)values, keys: (const OSSymbol **)keys, count: 2);
6116 if (dict == NULL) {
6117 return false;
6118 }
6119 array->setObject(dict.get());
6120 dict.reset();
6121 values[0].reset();
6122 values[1].reset();
6123 }
6124
6125 return array->serialize(serializer: s);
6126}
6127/* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */
6128
6129OSMetaClassDefineReservedUsedX86(IOMemoryDescriptor, 0);
6130#ifdef __LP64__
6131OSMetaClassDefineReservedUnused(IOMemoryDescriptor, 1);
6132OSMetaClassDefineReservedUnused(IOMemoryDescriptor, 2);
6133OSMetaClassDefineReservedUnused(IOMemoryDescriptor, 3);
6134OSMetaClassDefineReservedUnused(IOMemoryDescriptor, 4);
6135OSMetaClassDefineReservedUnused(IOMemoryDescriptor, 5);
6136OSMetaClassDefineReservedUnused(IOMemoryDescriptor, 6);
6137OSMetaClassDefineReservedUnused(IOMemoryDescriptor, 7);
6138#else /* !__LP64__ */
6139OSMetaClassDefineReservedUsedX86(IOMemoryDescriptor, 1);
6140OSMetaClassDefineReservedUsedX86(IOMemoryDescriptor, 2);
6141OSMetaClassDefineReservedUsedX86(IOMemoryDescriptor, 3);
6142OSMetaClassDefineReservedUsedX86(IOMemoryDescriptor, 4);
6143OSMetaClassDefineReservedUsedX86(IOMemoryDescriptor, 5);
6144OSMetaClassDefineReservedUsedX86(IOMemoryDescriptor, 6);
6145OSMetaClassDefineReservedUsedX86(IOMemoryDescriptor, 7);
6146#endif /* !__LP64__ */
6147OSMetaClassDefineReservedUnused(IOMemoryDescriptor, 8);
6148OSMetaClassDefineReservedUnused(IOMemoryDescriptor, 9);
6149OSMetaClassDefineReservedUnused(IOMemoryDescriptor, 10);
6150OSMetaClassDefineReservedUnused(IOMemoryDescriptor, 11);
6151OSMetaClassDefineReservedUnused(IOMemoryDescriptor, 12);
6152OSMetaClassDefineReservedUnused(IOMemoryDescriptor, 13);
6153OSMetaClassDefineReservedUnused(IOMemoryDescriptor, 14);
6154OSMetaClassDefineReservedUnused(IOMemoryDescriptor, 15);
6155
6156/* for real this is a ioGMDData + upl_page_info_t + ioPLBlock */
6157KALLOC_TYPE_VAR_DEFINE(KT_IOMD_MIXED_DATA,
6158 struct ioGMDData, struct ioPLBlock, KT_DEFAULT);
6159
6160/* ex-inline function implementation */
6161IOPhysicalAddress
6162IOMemoryDescriptor::getPhysicalAddress()
6163{
6164 return getPhysicalSegment( offset: 0, NULL );
6165}
6166
6167OSDefineMetaClassAndStructors(_IOMemoryDescriptorMixedData, OSObject)
6168
6169OSPtr<_IOMemoryDescriptorMixedData>
6170_IOMemoryDescriptorMixedData::withCapacity(size_t capacity)
6171{
6172 OSSharedPtr<_IOMemoryDescriptorMixedData> me = OSMakeShared<_IOMemoryDescriptorMixedData>();
6173 if (me && !me->initWithCapacity(capacity)) {
6174 return nullptr;
6175 }
6176 return me;
6177}
6178
6179bool
6180_IOMemoryDescriptorMixedData::initWithCapacity(size_t capacity)
6181{
6182 if (_data && (!capacity || (_capacity < capacity))) {
6183 freeMemory();
6184 }
6185
6186 if (!OSObject::init()) {
6187 return false;
6188 }
6189
6190 if (!_data && capacity) {
6191 _data = kalloc_type_var_impl(KT_IOMD_MIXED_DATA, capacity,
6192 Z_VM_TAG_BT(Z_WAITOK_ZERO, VM_KERN_MEMORY_IOKIT), NULL);
6193 if (!_data) {
6194 return false;
6195 }
6196 _capacity = capacity;
6197 }
6198
6199 _length = 0;
6200
6201 return true;
6202}
6203
6204void
6205_IOMemoryDescriptorMixedData::free()
6206{
6207 freeMemory();
6208 OSObject::free();
6209}
6210
6211void
6212_IOMemoryDescriptorMixedData::freeMemory()
6213{
6214 kfree_type_var_impl(kt_view: KT_IOMD_MIXED_DATA, ptr: _data, size: _capacity);
6215 _data = nullptr;
6216 _capacity = _length = 0;
6217}
6218
6219bool
6220_IOMemoryDescriptorMixedData::appendBytes(const void * bytes, size_t length)
6221{
6222 const auto oldLength = getLength();
6223 size_t newLength;
6224 if (os_add_overflow(oldLength, length, &newLength)) {
6225 return false;
6226 }
6227
6228 if (!setLength(newLength)) {
6229 return false;
6230 }
6231
6232 unsigned char * const dest = &(((unsigned char *)_data)[oldLength]);
6233 if (bytes) {
6234 bcopy(src: bytes, dst: dest, n: length);
6235 }
6236
6237 return true;
6238}
6239
6240bool
6241_IOMemoryDescriptorMixedData::setLength(size_t length)
6242{
6243 if (!_data || (length > _capacity)) {
6244 void *newData;
6245
6246 newData = __krealloc_type(kt_view: KT_IOMD_MIXED_DATA, addr: _data, old_size: _capacity,
6247 new_size: length, Z_VM_TAG_BT(Z_WAITOK_ZERO, VM_KERN_MEMORY_IOKIT),
6248 NULL);
6249 if (!newData) {
6250 return false;
6251 }
6252
6253 _data = newData;
6254 _capacity = length;
6255 }
6256
6257 _length = length;
6258 return true;
6259}
6260
6261const void *
6262_IOMemoryDescriptorMixedData::getBytes() const
6263{
6264 return _length ? _data : nullptr;
6265}
6266
6267size_t
6268_IOMemoryDescriptorMixedData::getLength() const
6269{
6270 return _data ? _length : 0;
6271}
6272