1 | /* |
2 | * Copyright (c) 2005-2006 Apple Computer, Inc. All rights reserved. |
3 | * |
4 | * @APPLE_OSREFERENCE_LICENSE_HEADER_START@ |
5 | * |
6 | * This file contains Original Code and/or Modifications of Original Code |
7 | * as defined in and that are subject to the Apple Public Source License |
8 | * Version 2.0 (the 'License'). You may not use this file except in |
9 | * compliance with the License. The rights granted to you under the License |
10 | * may not be used to create, or enable the creation or redistribution of, |
11 | * unlawful or unlicensed copies of an Apple operating system, or to |
12 | * circumvent, violate, or enable the circumvention or violation of, any |
13 | * terms of an Apple operating system software license agreement. |
14 | * |
15 | * Please obtain a copy of the License at |
16 | * http://www.opensource.apple.com/apsl/ and read it before using this file. |
17 | * |
18 | * The Original Code and all software distributed under the License are |
19 | * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER |
20 | * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES, |
21 | * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY, |
22 | * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT. |
23 | * Please see the License for the specific language governing rights and |
24 | * limitations under the License. |
25 | * |
26 | * @APPLE_OSREFERENCE_LICENSE_HEADER_END@ |
27 | */ |
28 | |
29 | #define IOKIT_ENABLE_SHARED_PTR |
30 | |
31 | #include <IOKit/assert.h> |
32 | |
33 | #include <libkern/OSTypes.h> |
34 | #include <libkern/OSByteOrder.h> |
35 | #include <libkern/OSDebug.h> |
36 | |
37 | #include <IOKit/IOReturn.h> |
38 | #include <IOKit/IOLib.h> |
39 | #include <IOKit/IODMACommand.h> |
40 | #include <IOKit/IOMapper.h> |
41 | #include <IOKit/IOMemoryDescriptor.h> |
42 | #include <IOKit/IOBufferMemoryDescriptor.h> |
43 | |
44 | #include "IOKitKernelInternal.h" |
45 | |
46 | #define MAPTYPE(type) ((UInt) (type) & kTypeMask) |
47 | #define IS_NONCOHERENT(type) (MAPTYPE(type) == kNonCoherent) |
48 | |
49 | enum{ |
50 | kWalkSyncIn = 0x01,// bounce -> md |
51 | kWalkSyncOut = 0x02,// bounce <- md |
52 | kWalkSyncAlways = 0x04, |
53 | kWalkPreflight = 0x08, |
54 | kWalkDoubleBuffer = 0x10, |
55 | kWalkPrepare = 0x20, |
56 | kWalkComplete = 0x40, |
57 | kWalkClient = 0x80 |
58 | }; |
59 | |
60 | |
61 | #define fInternalState reserved |
62 | #define fState reserved->fState |
63 | #define fMDSummary reserved->fMDSummary |
64 | |
65 | |
66 | #if 1 |
67 | // no direction => OutIn |
68 | #define SHOULD_COPY_DIR(op, direction) \ |
69 | ((kIODirectionNone == (direction)) \ |
70 | || (kWalkSyncAlways & (op)) \ |
71 | || (((kWalkSyncIn & (op)) ? kIODirectionIn : kIODirectionOut) \ |
72 | & (direction))) |
73 | |
74 | #else |
75 | #define SHOULD_COPY_DIR(state, direction) (true) |
76 | #endif |
77 | |
78 | #if 0 |
79 | #define DEBG(fmt, args...) { IOLog(fmt, ## args); kprintf(fmt, ## args); } |
80 | #else |
81 | #define DEBG(fmt, args...) {} |
82 | #endif |
83 | |
84 | #if 0 |
85 | #define LOGTAG 0x87654321 |
86 | #endif |
87 | |
88 | /**************************** class IODMACommand ***************************/ |
89 | |
90 | #undef super |
91 | #define super IOCommand |
92 | OSDefineMetaClassAndStructorsWithZone(IODMACommand, IOCommand, ZC_NONE); |
93 | |
94 | OSMetaClassDefineReservedUsedX86(IODMACommand, 0); |
95 | OSMetaClassDefineReservedUsedX86(IODMACommand, 1); |
96 | OSMetaClassDefineReservedUsedX86(IODMACommand, 2); |
97 | OSMetaClassDefineReservedUsedX86(IODMACommand, 3); |
98 | OSMetaClassDefineReservedUsedX86(IODMACommand, 4); |
99 | OSMetaClassDefineReservedUsedX86(IODMACommand, 5); |
100 | OSMetaClassDefineReservedUsedX86(IODMACommand, 6); |
101 | OSMetaClassDefineReservedUnused(IODMACommand, 7); |
102 | OSMetaClassDefineReservedUnused(IODMACommand, 8); |
103 | OSMetaClassDefineReservedUnused(IODMACommand, 9); |
104 | OSMetaClassDefineReservedUnused(IODMACommand, 10); |
105 | OSMetaClassDefineReservedUnused(IODMACommand, 11); |
106 | OSMetaClassDefineReservedUnused(IODMACommand, 12); |
107 | OSMetaClassDefineReservedUnused(IODMACommand, 13); |
108 | OSMetaClassDefineReservedUnused(IODMACommand, 14); |
109 | OSMetaClassDefineReservedUnused(IODMACommand, 15); |
110 | |
111 | |
112 | OSSharedPtr<IODMACommand> |
113 | IODMACommand::withRefCon(void * refCon) |
114 | { |
115 | OSSharedPtr<IODMACommand> me = OSMakeShared<IODMACommand>(); |
116 | |
117 | if (me && !me->initWithRefCon(refCon)) { |
118 | return nullptr; |
119 | } |
120 | |
121 | return me; |
122 | } |
123 | |
124 | OSSharedPtr<IODMACommand> |
125 | IODMACommand::withSpecification(SegmentFunction outSegFunc, |
126 | const SegmentOptions * segmentOptions, |
127 | uint32_t mappingOptions, |
128 | IOMapper * mapper, |
129 | void * refCon) |
130 | { |
131 | OSSharedPtr<IODMACommand> me = OSMakeShared<IODMACommand>(); |
132 | |
133 | if (me && !me->initWithSpecification(outSegFunc, segmentOptions, mappingOptions, |
134 | mapper, refCon)) { |
135 | return nullptr; |
136 | } |
137 | |
138 | return me; |
139 | } |
140 | |
141 | OSSharedPtr<IODMACommand> |
142 | IODMACommand::withSpecification(SegmentFunction outSegFunc, |
143 | UInt8 numAddressBits, |
144 | UInt64 maxSegmentSize, |
145 | MappingOptions mappingOptions, |
146 | UInt64 maxTransferSize, |
147 | UInt32 alignment, |
148 | IOMapper *mapper, |
149 | void *refCon) |
150 | { |
151 | OSSharedPtr<IODMACommand> me = OSMakeShared<IODMACommand>(); |
152 | |
153 | if (me && !me->initWithSpecification(outSegFunc, |
154 | numAddressBits, maxSegmentSize, |
155 | mappingOptions, maxTransferSize, |
156 | alignment, mapper, refCon)) { |
157 | return nullptr; |
158 | } |
159 | |
160 | return me; |
161 | } |
162 | |
163 | OSSharedPtr<IODMACommand> |
164 | IODMACommand::cloneCommand(void *refCon) |
165 | { |
166 | SegmentOptions segmentOptions = |
167 | { |
168 | .fStructSize = sizeof(segmentOptions), |
169 | .fNumAddressBits = (uint8_t)fNumAddressBits, |
170 | .fMaxSegmentSize = fMaxSegmentSize, |
171 | .fMaxTransferSize = fMaxTransferSize, |
172 | .fAlignment = fAlignMask + 1, |
173 | .fAlignmentLength = fAlignMaskInternalSegments + 1, |
174 | .fAlignmentInternalSegments = fAlignMaskLength + 1 |
175 | }; |
176 | |
177 | return IODMACommand::withSpecification(outSegFunc: fOutSeg, segmentOptions: &segmentOptions, |
178 | mappingOptions: fMappingOptions, mapper: fMapper.get(), refCon); |
179 | } |
180 | |
181 | #define kLastOutputFunction ((SegmentFunction) kLastOutputFunction) |
182 | |
183 | bool |
184 | IODMACommand::initWithRefCon(void * refCon) |
185 | { |
186 | if (!super::init()) { |
187 | return false; |
188 | } |
189 | |
190 | if (!reserved) { |
191 | reserved = IOMallocType(IODMACommandInternal); |
192 | } |
193 | fRefCon = refCon; |
194 | |
195 | return true; |
196 | } |
197 | |
198 | bool |
199 | IODMACommand::initWithSpecification(SegmentFunction outSegFunc, |
200 | const SegmentOptions * segmentOptions, |
201 | uint32_t mappingOptions, |
202 | IOMapper * mapper, |
203 | void * refCon) |
204 | { |
205 | if (!initWithRefCon(refCon)) { |
206 | return false; |
207 | } |
208 | |
209 | if (kIOReturnSuccess != setSpecification(outSegFunc, segmentOptions, |
210 | mappingOptions, mapper)) { |
211 | return false; |
212 | } |
213 | |
214 | return true; |
215 | } |
216 | |
217 | bool |
218 | IODMACommand::initWithSpecification(SegmentFunction outSegFunc, |
219 | UInt8 numAddressBits, |
220 | UInt64 maxSegmentSize, |
221 | MappingOptions mappingOptions, |
222 | UInt64 maxTransferSize, |
223 | UInt32 alignment, |
224 | IOMapper *mapper, |
225 | void *refCon) |
226 | { |
227 | SegmentOptions segmentOptions = |
228 | { |
229 | .fStructSize = sizeof(segmentOptions), |
230 | .fNumAddressBits = numAddressBits, |
231 | .fMaxSegmentSize = maxSegmentSize, |
232 | .fMaxTransferSize = maxTransferSize, |
233 | .fAlignment = alignment, |
234 | .fAlignmentLength = 1, |
235 | .fAlignmentInternalSegments = alignment |
236 | }; |
237 | |
238 | return initWithSpecification(outSegFunc, segmentOptions: &segmentOptions, mappingOptions, mapper, refCon); |
239 | } |
240 | |
241 | IOReturn |
242 | IODMACommand::setSpecification(SegmentFunction outSegFunc, |
243 | const SegmentOptions * segmentOptions, |
244 | uint32_t mappingOptions, |
245 | IOMapper * mapper) |
246 | { |
247 | IOService * device = NULL; |
248 | UInt8 numAddressBits; |
249 | UInt64 maxSegmentSize; |
250 | UInt64 maxTransferSize; |
251 | UInt32 alignment; |
252 | |
253 | bool is32Bit; |
254 | |
255 | if (!outSegFunc || !segmentOptions) { |
256 | return kIOReturnBadArgument; |
257 | } |
258 | |
259 | is32Bit = ((OutputHost32 == outSegFunc) |
260 | || (OutputBig32 == outSegFunc) |
261 | || (OutputLittle32 == outSegFunc)); |
262 | |
263 | numAddressBits = segmentOptions->fNumAddressBits; |
264 | maxSegmentSize = segmentOptions->fMaxSegmentSize; |
265 | maxTransferSize = segmentOptions->fMaxTransferSize; |
266 | alignment = segmentOptions->fAlignment; |
267 | if (is32Bit) { |
268 | if (!numAddressBits) { |
269 | numAddressBits = 32; |
270 | } else if (numAddressBits > 32) { |
271 | return kIOReturnBadArgument; // Wrong output function for bits |
272 | } |
273 | } |
274 | |
275 | if (numAddressBits && (numAddressBits < PAGE_SHIFT)) { |
276 | return kIOReturnBadArgument; |
277 | } |
278 | |
279 | if (!maxSegmentSize) { |
280 | maxSegmentSize--; // Set Max segment to -1 |
281 | } |
282 | if (!maxTransferSize) { |
283 | maxTransferSize--; // Set Max transfer to -1 |
284 | } |
285 | if (mapper && !OSDynamicCast(IOMapper, mapper)) { |
286 | device = mapper; |
287 | mapper = NULL; |
288 | } |
289 | if (!mapper && (kUnmapped != MAPTYPE(mappingOptions))) { |
290 | IOMapper::checkForSystemMapper(); |
291 | mapper = IOMapper::gSystem; |
292 | } |
293 | |
294 | fNumSegments = 0; |
295 | fOutSeg = outSegFunc; |
296 | fNumAddressBits = numAddressBits; |
297 | fMaxSegmentSize = maxSegmentSize; |
298 | fMappingOptions = mappingOptions; |
299 | fMaxTransferSize = maxTransferSize; |
300 | if (!alignment) { |
301 | alignment = 1; |
302 | } |
303 | fAlignMask = alignment - 1; |
304 | |
305 | alignment = segmentOptions->fAlignmentLength; |
306 | if (!alignment) { |
307 | alignment = 1; |
308 | } |
309 | fAlignMaskLength = alignment - 1; |
310 | |
311 | alignment = segmentOptions->fAlignmentInternalSegments; |
312 | if (!alignment) { |
313 | alignment = (fAlignMask + 1); |
314 | } |
315 | fAlignMaskInternalSegments = alignment - 1; |
316 | |
317 | switch (MAPTYPE(mappingOptions)) { |
318 | case kMapped: break; |
319 | case kUnmapped: break; |
320 | case kNonCoherent: break; |
321 | |
322 | case kBypassed: |
323 | if (!mapper) { |
324 | break; |
325 | } |
326 | return kIOReturnBadArgument; |
327 | |
328 | default: |
329 | return kIOReturnBadArgument; |
330 | } |
331 | |
332 | if (mapper != fMapper) { |
333 | fMapper.reset(p: mapper, OSRetain); |
334 | } |
335 | |
336 | fInternalState->fIterateOnly = (0 != (kIterateOnly & mappingOptions)); |
337 | if (0 != (kIODMAMapOptionDextOwner & mappingOptions)) { |
338 | fInternalState->fDextLock = IOLockAlloc(); |
339 | } |
340 | fInternalState->fDevice = device; |
341 | |
342 | |
343 | return kIOReturnSuccess; |
344 | } |
345 | |
346 | void |
347 | IODMACommand::free() |
348 | { |
349 | if (reserved) { |
350 | if (fInternalState->fDextLock) { |
351 | if (fActive) { |
352 | CompleteDMA(options: kIODMACommandCompleteDMANoOptions); |
353 | } |
354 | IOLockFree(fInternalState->fDextLock); |
355 | } |
356 | IOFreeType(reserved, IODMACommandInternal); |
357 | } |
358 | |
359 | fMapper.reset(); |
360 | |
361 | // Correct use of this class when setting an IOMemoryDescriptor |
362 | // in fMemory via setMemoryDescriptor(desc) is, for the caller, to |
363 | // have a matching call to clearMemoryDescriptor() before releasing |
364 | // the object. The matching call has also the effect of releasing |
365 | // the ref taken on the IOMemoryDescriptor in setMemoryDescriptor(). |
366 | // |
367 | // A number of "misbehaving" drivers has been found during testing, |
368 | // whereby a matching call to clearMemoryDescriptor() is missing: |
369 | // |
370 | // rdar://59947343 |
371 | // rdar://59946968 |
372 | // |
373 | // Both the approaches taken in said drivers are wrong, but have gone |
374 | // basically silent with fMemory being a regular pointer. With fMemory |
375 | // becoming a OSSharedPtr, the IODMACommand destructor expects to find |
376 | // either fMemory reset (through the call to clearMemoryDescriptor()) or |
377 | // a reference hold for the release. |
378 | // |
379 | // For this reason, this workaround of detaching fMemory is put in |
380 | // place here, choosing the leak over the panic for misbehaving |
381 | // drivers. Once all instances are fixed, this workaround will be |
382 | // removed. |
383 | // |
384 | // Note: all well behaving drivers that have matching calls for |
385 | // setMemoryDescriptor() and clearMemoryDescriptor() are unaffected |
386 | // since fMemory will be null at this point. |
387 | fMemory.detach(); |
388 | |
389 | super::free(); |
390 | } |
391 | |
392 | IOReturn |
393 | IODMACommand::setMemoryDescriptor(const IOMemoryDescriptor *mem, bool autoPrepare) |
394 | { |
395 | IOReturn err = kIOReturnSuccess; |
396 | |
397 | if (mem == fMemory) { |
398 | if (!autoPrepare) { |
399 | while (fActive) { |
400 | complete(); |
401 | } |
402 | } |
403 | return kIOReturnSuccess; |
404 | } |
405 | |
406 | if (fMemory) { |
407 | // As we are almost certainly being called from a work loop thread |
408 | // if fActive is true it is probably not a good time to potentially |
409 | // block. Just test for it and return an error |
410 | if (fActive) { |
411 | return kIOReturnBusy; |
412 | } |
413 | clearMemoryDescriptor(); |
414 | } |
415 | |
416 | if (mem) { |
417 | bzero(s: &fMDSummary, n: sizeof(fMDSummary)); |
418 | err = mem->dmaCommandOperation(op: kIOMDGetCharacteristics | (kMapped == MAPTYPE(fMappingOptions)), |
419 | vData: &fMDSummary, dataSize: sizeof(fMDSummary)); |
420 | if (err) { |
421 | return err; |
422 | } |
423 | |
424 | ppnum_t highPage = fMDSummary.fHighestPage ? fMDSummary.fHighestPage : gIOLastPage; |
425 | |
426 | if ((kMapped == MAPTYPE(fMappingOptions)) |
427 | && fMapper) { |
428 | fInternalState->fCheckAddressing = false; |
429 | } else { |
430 | fInternalState->fCheckAddressing = (fNumAddressBits && (highPage >= (1UL << (fNumAddressBits - PAGE_SHIFT)))); |
431 | } |
432 | |
433 | fInternalState->fNewMD = true; |
434 | fMemory.reset(p: const_cast<IOMemoryDescriptor *>(mem), OSRetain); |
435 | fInternalState->fSetActiveNoMapper = (!fMapper); |
436 | if (fInternalState->fSetActiveNoMapper) { |
437 | mem->dmaCommandOperation(op: kIOMDSetDMAActive, vData: this, dataSize: 0); |
438 | } |
439 | if (autoPrepare) { |
440 | err = prepare(); |
441 | if (err) { |
442 | clearMemoryDescriptor(); |
443 | } |
444 | } |
445 | } |
446 | |
447 | return err; |
448 | } |
449 | |
450 | IOReturn |
451 | IODMACommand::clearMemoryDescriptor(bool autoComplete) |
452 | { |
453 | if (fActive && !autoComplete) { |
454 | return kIOReturnNotReady; |
455 | } |
456 | |
457 | if (fMemory) { |
458 | while (fActive) { |
459 | complete(); |
460 | } |
461 | if (fInternalState->fSetActiveNoMapper) { |
462 | fMemory->dmaCommandOperation(op: kIOMDSetDMAInactive, vData: this, dataSize: 0); |
463 | } |
464 | fMemory.reset(); |
465 | } |
466 | |
467 | return kIOReturnSuccess; |
468 | } |
469 | |
470 | const IOMemoryDescriptor * |
471 | IODMACommand::getMemoryDescriptor() const |
472 | { |
473 | return fMemory.get(); |
474 | } |
475 | |
476 | IOMemoryDescriptor * |
477 | IODMACommand::getIOMemoryDescriptor() const |
478 | { |
479 | OSSharedPtr<IOMemoryDescriptor> mem; |
480 | |
481 | mem = reserved->fCopyMD; |
482 | if (!mem) { |
483 | mem = fMemory; |
484 | } |
485 | |
486 | return mem.get(); |
487 | } |
488 | |
489 | IOReturn |
490 | IODMACommand::segmentOp( |
491 | void *reference, |
492 | IODMACommand *target, |
493 | Segment64 segment, |
494 | void *segments, |
495 | UInt32 segmentIndex) |
496 | { |
497 | IOOptionBits op = (IOOptionBits)(uintptr_t) reference; |
498 | addr64_t maxPhys, address; |
499 | uint64_t length; |
500 | uint32_t numPages; |
501 | uint32_t mask; |
502 | |
503 | IODMACommandInternal * state = target->reserved; |
504 | |
505 | if (target->fNumAddressBits && (target->fNumAddressBits < 64) && (state->fLocalMapperAllocValid || !target->fMapper)) { |
506 | maxPhys = (1ULL << target->fNumAddressBits); |
507 | } else { |
508 | maxPhys = 0; |
509 | } |
510 | maxPhys--; |
511 | |
512 | address = segment.fIOVMAddr; |
513 | length = segment.fLength; |
514 | |
515 | assert(length); |
516 | |
517 | if (!state->fMisaligned) { |
518 | mask = (segmentIndex ? target->fAlignMaskInternalSegments : state->fSourceAlignMask); |
519 | state->fMisaligned |= (0 != (mask & address)); |
520 | if (state->fMisaligned) { |
521 | DEBG("misaligned address %qx:%qx, %x\n" , address, length, mask); |
522 | } |
523 | } |
524 | if (!state->fMisaligned) { |
525 | mask = target->fAlignMaskLength; |
526 | state->fMisaligned |= (0 != (mask & length)); |
527 | if (state->fMisaligned) { |
528 | DEBG("misaligned length %qx:%qx, %x\n" , address, length, mask); |
529 | } |
530 | } |
531 | |
532 | if (state->fMisaligned && (kWalkPreflight & op)) { |
533 | return kIOReturnNotAligned; |
534 | } |
535 | |
536 | if (!state->fDoubleBuffer) { |
537 | if ((address + length - 1) <= maxPhys) { |
538 | length = 0; |
539 | } else if (address <= maxPhys) { |
540 | DEBG("tail %qx, %qx" , address, length); |
541 | length = (address + length - maxPhys - 1); |
542 | address = maxPhys + 1; |
543 | DEBG("-> %qx, %qx\n" , address, length); |
544 | } |
545 | } |
546 | |
547 | if (!length) { |
548 | return kIOReturnSuccess; |
549 | } |
550 | |
551 | uint64_t numPages64 = atop_64(round_page_64((address & PAGE_MASK) + length)); |
552 | if (numPages64 > UINT_MAX) { |
553 | return kIOReturnVMError; |
554 | } |
555 | numPages = (typeof(numPages))numPages64; |
556 | |
557 | if (kWalkPreflight & op) { |
558 | state->fCopyPageCount += numPages; |
559 | } else { |
560 | vm_page_t lastPage; |
561 | lastPage = NULL; |
562 | if (kWalkPrepare & op) { |
563 | lastPage = state->fCopyNext; |
564 | for (IOItemCount idx = 0; idx < numPages; idx++) { |
565 | vm_page_set_offset(page: lastPage, atop_64(address) + idx); |
566 | lastPage = vm_page_get_next(page: lastPage); |
567 | } |
568 | } |
569 | |
570 | if (!lastPage || SHOULD_COPY_DIR(op, target->fMDSummary.fDirection)) { |
571 | lastPage = state->fCopyNext; |
572 | for (IOItemCount idx = 0; idx < numPages; idx++) { |
573 | if (SHOULD_COPY_DIR(op, target->fMDSummary.fDirection)) { |
574 | addr64_t cpuAddr = address; |
575 | addr64_t remapAddr; |
576 | uint64_t chunk; |
577 | |
578 | if ((kMapped == MAPTYPE(target->fMappingOptions)) |
579 | && target->fMapper) { |
580 | cpuAddr = target->fMapper->mapToPhysicalAddress(mappedAddress: address); |
581 | } |
582 | |
583 | remapAddr = ptoa_64(vm_page_get_phys_page(lastPage)); |
584 | if (!state->fDoubleBuffer) { |
585 | remapAddr += (address & PAGE_MASK); |
586 | } |
587 | chunk = PAGE_SIZE - (address & PAGE_MASK); |
588 | if (chunk > length) { |
589 | chunk = length; |
590 | } |
591 | if (chunk > (UINT_MAX - PAGE_SIZE + 1)) { |
592 | chunk = (UINT_MAX - PAGE_SIZE + 1); |
593 | } |
594 | |
595 | DEBG("cpv: 0x%qx %s 0x%qx, 0x%qx, 0x%02lx\n" , remapAddr, |
596 | (kWalkSyncIn & op) ? "->" : "<-" , |
597 | address, chunk, op); |
598 | |
599 | if (kWalkSyncIn & op) { // cppvNoModSnk |
600 | copypv(source: remapAddr, sink: cpuAddr, size: (unsigned int) chunk, |
601 | cppvPsnk | cppvFsnk | cppvPsrc | cppvNoRefSrc ); |
602 | } else { |
603 | copypv(source: cpuAddr, sink: remapAddr, size: (unsigned int) chunk, |
604 | cppvPsnk | cppvFsnk | cppvPsrc | cppvNoRefSrc ); |
605 | } |
606 | address += chunk; |
607 | length -= chunk; |
608 | } |
609 | lastPage = vm_page_get_next(page: lastPage); |
610 | } |
611 | } |
612 | state->fCopyNext = lastPage; |
613 | } |
614 | |
615 | return kIOReturnSuccess; |
616 | } |
617 | |
618 | OSSharedPtr<IOBufferMemoryDescriptor> |
619 | IODMACommand::createCopyBuffer(IODirection direction, UInt64 length) |
620 | { |
621 | mach_vm_address_t mask = 0xFFFFF000; //state->fSourceAlignMask |
622 | return IOBufferMemoryDescriptor::inTaskWithPhysicalMask(inTask: kernel_task, |
623 | options: direction, capacity: length, physicalMask: mask); |
624 | } |
625 | |
626 | IOReturn |
627 | IODMACommand::walkAll(uint32_t op) |
628 | { |
629 | IODMACommandInternal * state = fInternalState; |
630 | |
631 | IOReturn ret = kIOReturnSuccess; |
632 | UInt32 numSegments; |
633 | UInt64 offset; |
634 | |
635 | if (kWalkPreflight & op) { |
636 | state->fMisaligned = false; |
637 | state->fDoubleBuffer = false; |
638 | state->fPrepared = false; |
639 | state->fCopyNext = NULL; |
640 | state->fCopyPageAlloc = NULL; |
641 | state->fCopyPageCount = 0; |
642 | state->fNextRemapPage = NULL; |
643 | state->fCopyMD = NULL; |
644 | |
645 | if (!(kWalkDoubleBuffer & op)) { |
646 | offset = 0; |
647 | numSegments = 0 - 1; |
648 | ret = genIOVMSegments(op, outSegFunc: segmentOp, reference: (void *)(uintptr_t) op, offsetP: &offset, segmentsP: state, numSegmentsP: &numSegments); |
649 | } |
650 | |
651 | op &= ~kWalkPreflight; |
652 | |
653 | state->fDoubleBuffer = (state->fMisaligned || state->fForceDoubleBuffer); |
654 | state->fForceDoubleBuffer = false; |
655 | if (state->fDoubleBuffer) { |
656 | state->fCopyPageCount = (typeof(state->fCopyPageCount))(atop_64(round_page(state->fPreparedLength))); |
657 | } |
658 | |
659 | if (state->fCopyPageCount) { |
660 | vm_page_t mapBase = NULL; |
661 | |
662 | DEBG("preflight fCopyPageCount %d\n" , state->fCopyPageCount); |
663 | |
664 | if (!fMapper && !state->fDoubleBuffer) { |
665 | kern_return_t kr; |
666 | |
667 | if (fMapper) { |
668 | panic("fMapper copying" ); |
669 | } |
670 | |
671 | kr = vm_page_alloc_list(page_count: state->fCopyPageCount, |
672 | flags: (kma_flags_t)(KMA_LOMEM | KMA_NOPAGEWAIT), list: &mapBase); |
673 | if (KERN_SUCCESS != kr) { |
674 | DEBG("vm_page_alloc_list(%d) failed (%d)\n" , state->fCopyPageCount, kr); |
675 | mapBase = NULL; |
676 | } |
677 | } |
678 | |
679 | if (mapBase) { |
680 | state->fCopyPageAlloc = mapBase; |
681 | state->fCopyNext = state->fCopyPageAlloc; |
682 | offset = 0; |
683 | numSegments = 0 - 1; |
684 | ret = genIOVMSegments(op, outSegFunc: segmentOp, reference: (void *)(uintptr_t) op, offsetP: &offset, segmentsP: state, numSegmentsP: &numSegments); |
685 | state->fPrepared = true; |
686 | op &= ~(kWalkSyncIn | kWalkSyncOut); |
687 | } else { |
688 | DEBG("alloc IOBMD\n" ); |
689 | state->fCopyMD = createCopyBuffer(fMDSummary.fDirection, length: state->fPreparedLength); |
690 | |
691 | if (state->fCopyMD) { |
692 | ret = kIOReturnSuccess; |
693 | state->fPrepared = true; |
694 | } else { |
695 | DEBG("IODMACommand !alloc IOBMD" ); |
696 | return kIOReturnNoResources; |
697 | } |
698 | } |
699 | } |
700 | } |
701 | |
702 | if (state->fPrepared && ((kWalkSyncIn | kWalkSyncOut) & op)) { |
703 | if (state->fCopyPageCount) { |
704 | DEBG("sync fCopyPageCount %d\n" , state->fCopyPageCount); |
705 | |
706 | if (state->fCopyPageAlloc) { |
707 | state->fCopyNext = state->fCopyPageAlloc; |
708 | offset = 0; |
709 | numSegments = 0 - 1; |
710 | ret = genIOVMSegments(op, outSegFunc: segmentOp, reference: (void *)(uintptr_t) op, offsetP: &offset, segmentsP: state, numSegmentsP: &numSegments); |
711 | } else if (state->fCopyMD) { |
712 | DEBG("sync IOBMD\n" ); |
713 | |
714 | if (SHOULD_COPY_DIR(op, fMDSummary.fDirection)) { |
715 | OSSharedPtr<IOMemoryDescriptor> poMD = fMemory; |
716 | |
717 | IOByteCount bytes; |
718 | |
719 | if (kWalkSyncIn & op) { |
720 | bytes = poMD->writeBytes(offset: state->fPreparedOffset, |
721 | bytes: state->fCopyMD->getBytesNoCopy(), |
722 | withLength: state->fPreparedLength); |
723 | } else { |
724 | bytes = poMD->readBytes(offset: state->fPreparedOffset, |
725 | bytes: state->fCopyMD->getBytesNoCopy(), |
726 | withLength: state->fPreparedLength); |
727 | } |
728 | DEBG("fCopyMD %s %lx bytes\n" , (kWalkSyncIn & op) ? "wrote" : "read" , bytes); |
729 | ret = (bytes == state->fPreparedLength) ? kIOReturnSuccess : kIOReturnUnderrun; |
730 | } else { |
731 | ret = kIOReturnSuccess; |
732 | } |
733 | } |
734 | } |
735 | } |
736 | |
737 | if (kWalkComplete & op) { |
738 | if (state->fCopyPageAlloc) { |
739 | vm_page_free_list(mem: state->fCopyPageAlloc, FALSE); |
740 | state->fCopyPageAlloc = NULL; |
741 | state->fCopyPageCount = 0; |
742 | } |
743 | if (state->fCopyMD) { |
744 | state->fCopyMD.reset(); |
745 | } |
746 | |
747 | state->fPrepared = false; |
748 | } |
749 | return ret; |
750 | } |
751 | |
752 | UInt8 |
753 | IODMACommand::getNumAddressBits(void) |
754 | { |
755 | return (UInt8) fNumAddressBits; |
756 | } |
757 | |
758 | UInt32 |
759 | IODMACommand::getAlignment(void) |
760 | { |
761 | return fAlignMask + 1; |
762 | } |
763 | |
764 | uint32_t |
765 | IODMACommand::getAlignmentLength(void) |
766 | { |
767 | return fAlignMaskLength + 1; |
768 | } |
769 | |
770 | uint32_t |
771 | IODMACommand::getAlignmentInternalSegments(void) |
772 | { |
773 | return fAlignMaskInternalSegments + 1; |
774 | } |
775 | |
776 | IOReturn |
777 | IODMACommand::prepareWithSpecification(SegmentFunction outSegFunc, |
778 | const SegmentOptions * segmentOptions, |
779 | uint32_t mappingOptions, |
780 | IOMapper * mapper, |
781 | UInt64 offset, |
782 | UInt64 length, |
783 | bool flushCache, |
784 | bool synchronize) |
785 | { |
786 | IOReturn ret; |
787 | |
788 | if (fActive) { |
789 | return kIOReturnNotPermitted; |
790 | } |
791 | |
792 | ret = setSpecification(outSegFunc, segmentOptions, mappingOptions, mapper); |
793 | if (kIOReturnSuccess != ret) { |
794 | return ret; |
795 | } |
796 | |
797 | ret = prepare(offset, length, flushCache, synchronize); |
798 | |
799 | return ret; |
800 | } |
801 | |
802 | IOReturn |
803 | IODMACommand::prepareWithSpecification(SegmentFunction outSegFunc, |
804 | UInt8 numAddressBits, |
805 | UInt64 maxSegmentSize, |
806 | MappingOptions mappingOptions, |
807 | UInt64 maxTransferSize, |
808 | UInt32 alignment, |
809 | IOMapper *mapper, |
810 | UInt64 offset, |
811 | UInt64 length, |
812 | bool flushCache, |
813 | bool synchronize) |
814 | { |
815 | SegmentOptions segmentOptions = |
816 | { |
817 | .fStructSize = sizeof(segmentOptions), |
818 | .fNumAddressBits = numAddressBits, |
819 | .fMaxSegmentSize = maxSegmentSize, |
820 | .fMaxTransferSize = maxTransferSize, |
821 | .fAlignment = alignment, |
822 | .fAlignmentLength = 1, |
823 | .fAlignmentInternalSegments = alignment |
824 | }; |
825 | |
826 | return prepareWithSpecification(outSegFunc, segmentOptions: &segmentOptions, mappingOptions, mapper, |
827 | offset, length, flushCache, synchronize); |
828 | } |
829 | |
830 | |
831 | IOReturn |
832 | IODMACommand::prepare(UInt64 offset, UInt64 length, bool flushCache, bool synchronize) |
833 | { |
834 | IODMACommandInternal * state = fInternalState; |
835 | IOReturn ret = kIOReturnSuccess; |
836 | uint32_t mappingOptions = fMappingOptions; |
837 | |
838 | // check specification has been set |
839 | if (!fOutSeg) { |
840 | return kIOReturnNotReady; |
841 | } |
842 | |
843 | if (!length) { |
844 | length = fMDSummary.fLength; |
845 | } |
846 | |
847 | if (length > fMaxTransferSize) { |
848 | return kIOReturnNoSpace; |
849 | } |
850 | |
851 | if (fActive++) { |
852 | if ((state->fPreparedOffset != offset) |
853 | || (state->fPreparedLength != length)) { |
854 | ret = kIOReturnNotReady; |
855 | } |
856 | } else { |
857 | if (fAlignMaskLength & length) { |
858 | return kIOReturnNotAligned; |
859 | } |
860 | |
861 | if (atop_64(state->fPreparedLength) > UINT_MAX) { |
862 | return kIOReturnVMError; |
863 | } |
864 | state->fPreparedOffset = offset; |
865 | state->fPreparedLength = length; |
866 | |
867 | state->fMisaligned = false; |
868 | state->fDoubleBuffer = false; |
869 | state->fPrepared = false; |
870 | state->fCopyNext = NULL; |
871 | state->fCopyPageAlloc = NULL; |
872 | state->fCopyPageCount = 0; |
873 | state->fNextRemapPage = NULL; |
874 | state->fCopyMD = NULL; |
875 | state->fLocalMapperAlloc = 0; |
876 | state->fLocalMapperAllocValid = false; |
877 | state->fLocalMapperAllocLength = 0; |
878 | |
879 | state->fSourceAlignMask = fAlignMask; |
880 | if (fMapper) { |
881 | state->fSourceAlignMask &= page_mask; |
882 | } |
883 | |
884 | state->fCursor = state->fIterateOnly |
885 | || (!state->fCheckAddressing |
886 | && (!state->fSourceAlignMask |
887 | || ((fMDSummary.fPageAlign & (1 << 31)) && (0 == (fMDSummary.fPageAlign & state->fSourceAlignMask))))); |
888 | |
889 | if (!state->fCursor) { |
890 | IOOptionBits op = kWalkPrepare | kWalkPreflight; |
891 | if (synchronize) { |
892 | op |= kWalkSyncOut; |
893 | } |
894 | ret = walkAll(op); |
895 | } |
896 | |
897 | if (IS_NONCOHERENT(mappingOptions) && flushCache) { |
898 | if (state->fCopyMD) { |
899 | state->fCopyMD->performOperation(options: kIOMemoryIncoherentIOStore, offset: 0, length); |
900 | } else { |
901 | fMemory->performOperation(options: kIOMemoryIncoherentIOStore, offset, length); |
902 | } |
903 | } |
904 | |
905 | if (fMapper) { |
906 | IOMDDMAMapArgs mapArgs; |
907 | bzero(s: &mapArgs, n: sizeof(mapArgs)); |
908 | mapArgs.fMapper = fMapper.get(); |
909 | mapArgs.fCommand = this; |
910 | mapArgs.fMapSpec.device = state->fDevice; |
911 | mapArgs.fMapSpec.alignment = fAlignMask + 1; |
912 | mapArgs.fMapSpec.numAddressBits = fNumAddressBits ? ((UInt8) fNumAddressBits) : 64; |
913 | mapArgs.fLength = state->fPreparedLength; |
914 | OSSharedPtr<IOMemoryDescriptor> md = state->fCopyMD; |
915 | if (md) { |
916 | mapArgs.fOffset = 0; |
917 | } else { |
918 | md = fMemory; |
919 | mapArgs.fOffset = state->fPreparedOffset; |
920 | } |
921 | |
922 | ret = md->dmaCommandOperation(op: kIOMDDMAMap, vData: &mapArgs, dataSize: sizeof(mapArgs)); |
923 | |
924 | if ((kIOReturnSuccess == ret) |
925 | && mapArgs.fAllocLength |
926 | && (mapArgs.fAllocLength != mapArgs.fLength)) { |
927 | do { |
928 | // multisegment case |
929 | IOMDDMAWalkSegmentState walkState; |
930 | IOMDDMAWalkSegmentArgs * walkArgs = (IOMDDMAWalkSegmentArgs *) (void *)&walkState; |
931 | IOOptionBits mdOp; |
932 | uint64_t index; |
933 | IOPhysicalLength segLen; |
934 | uint32_t segCount; |
935 | uint64_t phys, align; |
936 | uint64_t mapperPageMask; |
937 | uint64_t mapperPageShift; |
938 | uint64_t insertOffset; |
939 | uint32_t mapOptions; |
940 | uint64_t length; |
941 | |
942 | assert(mapArgs.fAllocLength > mapArgs.fLength); |
943 | |
944 | mapperPageMask = fMapper->getPageSize(); |
945 | assert(mapperPageMask); |
946 | mapperPageMask -= 1; |
947 | mapperPageShift = (64 - __builtin_clzll(mapperPageMask)); |
948 | walkArgs->fMapped = false; |
949 | length = state->fPreparedLength; |
950 | mdOp = kIOMDFirstSegment; |
951 | segCount = 0; |
952 | for (index = 0; index < length; segCount++) { |
953 | walkArgs->fOffset = state->fPreparedOffset + index; |
954 | |
955 | ret = md->dmaCommandOperation(op: mdOp, vData: &walkState, dataSize: sizeof(walkState)); |
956 | mdOp = kIOMDWalkSegments; |
957 | assert(kIOReturnSuccess == ret); |
958 | if (ret != kIOReturnSuccess) { |
959 | panic("dmaCommandOperation" ); |
960 | } |
961 | segLen = walkArgs->fLength; |
962 | index += segLen; |
963 | } |
964 | if (ret != kIOReturnSuccess) { |
965 | break; |
966 | } |
967 | |
968 | #if defined(LOGTAG) |
969 | if (LOGTAG == fMemory->getTag()) { |
970 | IOLog("DMA[%p] alloc 0x%qx, 0x%qx\n" , this, mapArgs.fAlloc, mapArgs.fAllocLength); |
971 | } |
972 | #endif /* defined(LOGTAG) */ |
973 | |
974 | state->fMapSegments = IONewZeroData(IODMACommandMapSegment, segCount); |
975 | if (!state->fMapSegments) { |
976 | ret = kIOReturnNoMemory; |
977 | break; |
978 | } |
979 | state->fMapSegmentsCount = segCount; |
980 | |
981 | switch (kIODirectionOutIn & fMDSummary.fDirection) { |
982 | case kIODirectionOut: |
983 | mapOptions = kIODMAMapReadAccess; |
984 | break; |
985 | case kIODirectionIn: |
986 | mapOptions = kIODMAMapWriteAccess; |
987 | break; |
988 | default: |
989 | mapOptions = kIODMAMapReadAccess | kIODMAMapWriteAccess; |
990 | break; |
991 | } |
992 | |
993 | mdOp = kIOMDFirstSegment; |
994 | segCount = 0; |
995 | for (insertOffset = 0, index = 0; index < length; segCount++) { |
996 | walkArgs->fOffset = state->fPreparedOffset + index; |
997 | ret = md->dmaCommandOperation(op: mdOp, vData: &walkState, dataSize: sizeof(walkState)); |
998 | mdOp = kIOMDWalkSegments; |
999 | if (ret != kIOReturnSuccess) { |
1000 | panic("dmaCommandOperation 0x%x" , ret); |
1001 | } |
1002 | phys = walkArgs->fIOVMAddr; |
1003 | segLen = walkArgs->fLength; |
1004 | |
1005 | #if defined(LOGTAG) |
1006 | if (LOGTAG == fMemory->getTag()) { |
1007 | IOLog("DMA[%p] phys[%d] 0x%qx, 0x%qx\n" , this, segCount, (uint64_t) phys, (uint64_t) segLen); |
1008 | } |
1009 | #endif /* defined(LOGTAG) */ |
1010 | |
1011 | align = (phys & mapperPageMask); |
1012 | |
1013 | #if defined(LOGTAG) |
1014 | if (LOGTAG == fMemory->getTag()) { |
1015 | IOLog("DMA[%p] runs[%d] dmaoff 0x%qx, mapoff 0x%qx, align 0x%qx\n" , this, segCount, index, insertOffset, align); |
1016 | } |
1017 | #endif /* defined(LOGTAG) */ |
1018 | |
1019 | assert(segCount < state->fMapSegmentsCount); |
1020 | state->fMapSegments[segCount].fDMAOffset = state->fPreparedOffset + index; |
1021 | state->fMapSegments[segCount].fMapOffset = insertOffset; |
1022 | state->fMapSegments[segCount].fPageOffset = align; |
1023 | index += segLen; |
1024 | |
1025 | // segment page align |
1026 | segLen = ((phys + segLen + mapperPageMask) & ~mapperPageMask); |
1027 | phys -= align; |
1028 | segLen -= phys; |
1029 | insertOffset += segLen; |
1030 | } |
1031 | state->fLocalMapperAllocBase = (mapArgs.fAlloc & ~mapperPageMask); |
1032 | #if defined(LOGTAG) |
1033 | if (LOGTAG == fMemory->getTag()) { |
1034 | IOLog("IODMACommand fMapSegmentsCount %d\n" , state->fMapSegmentsCount); |
1035 | } |
1036 | #endif /* defined(LOGTAG) */ |
1037 | } while (false); |
1038 | } |
1039 | if (kIOReturnSuccess == ret) { |
1040 | state->fLocalMapperAlloc = mapArgs.fAlloc; |
1041 | state->fLocalMapperAllocValid = true; |
1042 | state->fLocalMapperAllocLength = mapArgs.fAllocLength; |
1043 | } |
1044 | } |
1045 | if (kIOReturnSuccess == ret) { |
1046 | state->fPrepared = true; |
1047 | } |
1048 | } |
1049 | return ret; |
1050 | } |
1051 | |
1052 | IOReturn |
1053 | IODMACommand::complete(bool invalidateCache, bool synchronize) |
1054 | { |
1055 | IODMACommandInternal * state = fInternalState; |
1056 | IOReturn ret = kIOReturnSuccess; |
1057 | OSSharedPtr<IOMemoryDescriptor> copyMD; |
1058 | |
1059 | if (fActive < 1) { |
1060 | return kIOReturnNotReady; |
1061 | } |
1062 | |
1063 | if (!--fActive) { |
1064 | copyMD = state->fCopyMD; |
1065 | |
1066 | if (IS_NONCOHERENT(fMappingOptions) && invalidateCache) { |
1067 | if (copyMD) { |
1068 | copyMD->performOperation(options: kIOMemoryIncoherentIOFlush, offset: 0, length: state->fPreparedLength); |
1069 | } else { |
1070 | OSSharedPtr<IOMemoryDescriptor> md = fMemory; |
1071 | md->performOperation(options: kIOMemoryIncoherentIOFlush, offset: state->fPreparedOffset, length: state->fPreparedLength); |
1072 | } |
1073 | } |
1074 | |
1075 | if (!state->fCursor) { |
1076 | IOOptionBits op = kWalkComplete; |
1077 | if (synchronize) { |
1078 | op |= kWalkSyncIn; |
1079 | } |
1080 | ret = walkAll(op); |
1081 | } |
1082 | |
1083 | if (state->fLocalMapperAllocValid) { |
1084 | IOMDDMAMapArgs mapArgs; |
1085 | bzero(s: &mapArgs, n: sizeof(mapArgs)); |
1086 | mapArgs.fMapper = fMapper.get(); |
1087 | mapArgs.fCommand = this; |
1088 | mapArgs.fAlloc = state->fLocalMapperAlloc; |
1089 | mapArgs.fAllocLength = state->fLocalMapperAllocLength; |
1090 | OSSharedPtr<IOMemoryDescriptor> md = copyMD; |
1091 | if (md) { |
1092 | mapArgs.fOffset = 0; |
1093 | } else { |
1094 | md = fMemory; |
1095 | mapArgs.fOffset = state->fPreparedOffset; |
1096 | } |
1097 | |
1098 | ret = md->dmaCommandOperation(op: kIOMDDMAUnmap, vData: &mapArgs, dataSize: sizeof(mapArgs)); |
1099 | |
1100 | state->fLocalMapperAlloc = 0; |
1101 | state->fLocalMapperAllocValid = false; |
1102 | state->fLocalMapperAllocLength = 0; |
1103 | if (state->fMapSegments) { |
1104 | IODeleteData(state->fMapSegments, IODMACommandMapSegment, state->fMapSegmentsCount); |
1105 | state->fMapSegments = NULL; |
1106 | state->fMapSegmentsCount = 0; |
1107 | } |
1108 | } |
1109 | |
1110 | state->fPrepared = false; |
1111 | } |
1112 | |
1113 | return ret; |
1114 | } |
1115 | |
1116 | IOReturn |
1117 | IODMACommand::getPreparedOffsetAndLength(UInt64 * offset, UInt64 * length) |
1118 | { |
1119 | IODMACommandInternal * state = fInternalState; |
1120 | if (fActive < 1) { |
1121 | return kIOReturnNotReady; |
1122 | } |
1123 | |
1124 | if (offset) { |
1125 | *offset = state->fPreparedOffset; |
1126 | } |
1127 | if (length) { |
1128 | *length = state->fPreparedLength; |
1129 | } |
1130 | |
1131 | return kIOReturnSuccess; |
1132 | } |
1133 | |
1134 | IOReturn |
1135 | IODMACommand::synchronize(IOOptionBits options) |
1136 | { |
1137 | IODMACommandInternal * state = fInternalState; |
1138 | IOReturn ret = kIOReturnSuccess; |
1139 | IOOptionBits op; |
1140 | |
1141 | if (kIODirectionOutIn == (kIODirectionOutIn & options)) { |
1142 | return kIOReturnBadArgument; |
1143 | } |
1144 | |
1145 | if (fActive < 1) { |
1146 | return kIOReturnNotReady; |
1147 | } |
1148 | |
1149 | op = 0; |
1150 | if (kForceDoubleBuffer & options) { |
1151 | if (state->fDoubleBuffer) { |
1152 | return kIOReturnSuccess; |
1153 | } |
1154 | ret = complete(invalidateCache: false /* invalidateCache */, synchronize: true /* synchronize */); |
1155 | state->fCursor = false; |
1156 | state->fForceDoubleBuffer = true; |
1157 | ret = prepare(offset: state->fPreparedOffset, length: state->fPreparedLength, flushCache: false /* flushCache */, synchronize: true /* synchronize */); |
1158 | |
1159 | return ret; |
1160 | } else if (state->fCursor) { |
1161 | return kIOReturnSuccess; |
1162 | } |
1163 | |
1164 | if (kIODirectionIn & options) { |
1165 | op |= kWalkSyncIn | kWalkSyncAlways; |
1166 | } else if (kIODirectionOut & options) { |
1167 | op |= kWalkSyncOut | kWalkSyncAlways; |
1168 | } |
1169 | |
1170 | ret = walkAll(op); |
1171 | |
1172 | return ret; |
1173 | } |
1174 | |
1175 | struct IODMACommandTransferContext { |
1176 | void * buffer; |
1177 | UInt64 bufferOffset; |
1178 | UInt64 remaining; |
1179 | UInt32 op; |
1180 | }; |
1181 | enum{ |
1182 | kIODMACommandTransferOpReadBytes = 1, |
1183 | kIODMACommandTransferOpWriteBytes = 2 |
1184 | }; |
1185 | |
1186 | IOReturn |
1187 | IODMACommand::transferSegment(void *reference, |
1188 | IODMACommand *target, |
1189 | Segment64 segment, |
1190 | void *segments, |
1191 | UInt32 segmentIndex) |
1192 | { |
1193 | IODMACommandTransferContext * context = (IODMACommandTransferContext *) reference; |
1194 | UInt64 length = min(segment.fLength, context->remaining); |
1195 | addr64_t ioAddr = segment.fIOVMAddr; |
1196 | addr64_t cpuAddr = ioAddr; |
1197 | |
1198 | context->remaining -= length; |
1199 | |
1200 | while (length) { |
1201 | UInt64 copyLen = length; |
1202 | if ((kMapped == MAPTYPE(target->fMappingOptions)) |
1203 | && target->fMapper) { |
1204 | cpuAddr = target->fMapper->mapToPhysicalAddress(mappedAddress: ioAddr); |
1205 | copyLen = min(copyLen, page_size - (ioAddr & (page_size - 1))); |
1206 | ioAddr += copyLen; |
1207 | } |
1208 | if (copyLen > (UINT_MAX - PAGE_SIZE + 1)) { |
1209 | copyLen = (UINT_MAX - PAGE_SIZE + 1); |
1210 | } |
1211 | |
1212 | switch (context->op) { |
1213 | case kIODMACommandTransferOpReadBytes: |
1214 | copypv(source: cpuAddr, sink: context->bufferOffset + (addr64_t) context->buffer, size: (unsigned int) copyLen, |
1215 | cppvPsrc | cppvNoRefSrc | cppvFsnk | cppvKmap); |
1216 | break; |
1217 | case kIODMACommandTransferOpWriteBytes: |
1218 | copypv(source: context->bufferOffset + (addr64_t) context->buffer, sink: cpuAddr, size: (unsigned int) copyLen, |
1219 | cppvPsnk | cppvFsnk | cppvNoRefSrc | cppvNoModSnk | cppvKmap); |
1220 | break; |
1221 | } |
1222 | length -= copyLen; |
1223 | context->bufferOffset += copyLen; |
1224 | } |
1225 | |
1226 | return context->remaining ? kIOReturnSuccess : kIOReturnOverrun; |
1227 | } |
1228 | |
1229 | UInt64 |
1230 | IODMACommand::transfer(IOOptionBits transferOp, UInt64 offset, void * buffer, UInt64 length) |
1231 | { |
1232 | IODMACommandInternal * state = fInternalState; |
1233 | IODMACommandTransferContext context; |
1234 | Segment64 segments[1]; |
1235 | UInt32 numSegments = 0 - 1; |
1236 | |
1237 | if (fActive < 1) { |
1238 | return 0; |
1239 | } |
1240 | |
1241 | if (offset >= state->fPreparedLength) { |
1242 | return 0; |
1243 | } |
1244 | length = min(length, state->fPreparedLength - offset); |
1245 | |
1246 | context.buffer = buffer; |
1247 | context.bufferOffset = 0; |
1248 | context.remaining = length; |
1249 | context.op = transferOp; |
1250 | (void) genIOVMSegments(op: kWalkClient, outSegFunc: transferSegment, reference: &context, offsetP: &offset, segmentsP: &segments[0], numSegmentsP: &numSegments); |
1251 | |
1252 | return length - context.remaining; |
1253 | } |
1254 | |
1255 | UInt64 |
1256 | IODMACommand::readBytes(UInt64 offset, void *bytes, UInt64 length) |
1257 | { |
1258 | return transfer(transferOp: kIODMACommandTransferOpReadBytes, offset, buffer: bytes, length); |
1259 | } |
1260 | |
1261 | UInt64 |
1262 | IODMACommand::writeBytes(UInt64 offset, const void *bytes, UInt64 length) |
1263 | { |
1264 | return transfer(transferOp: kIODMACommandTransferOpWriteBytes, offset, buffer: const_cast<void *>(bytes), length); |
1265 | } |
1266 | |
1267 | IOReturn |
1268 | IODMACommand::genIOVMSegments(UInt64 *offsetP, |
1269 | void *segmentsP, |
1270 | UInt32 *numSegmentsP) |
1271 | { |
1272 | return genIOVMSegments(op: kWalkClient, outSegFunc: clientOutputSegment, reference: (void *) fOutSeg, |
1273 | offsetP, segmentsP, numSegmentsP); |
1274 | } |
1275 | |
1276 | IOReturn |
1277 | IODMACommand::genIOVMSegments(uint32_t op, |
1278 | InternalSegmentFunction outSegFunc, |
1279 | void *reference, |
1280 | UInt64 *offsetP, |
1281 | void *segmentsP, |
1282 | UInt32 *numSegmentsP) |
1283 | { |
1284 | IODMACommandInternal * internalState = fInternalState; |
1285 | IOOptionBits mdOp = kIOMDWalkSegments; |
1286 | IOReturn ret = kIOReturnSuccess; |
1287 | |
1288 | if (!(kWalkComplete & op) && !fActive) { |
1289 | return kIOReturnNotReady; |
1290 | } |
1291 | |
1292 | if (!offsetP || !segmentsP || !numSegmentsP || !*numSegmentsP) { |
1293 | return kIOReturnBadArgument; |
1294 | } |
1295 | |
1296 | IOMDDMAWalkSegmentArgs *state = |
1297 | (IOMDDMAWalkSegmentArgs *)(void *) fState; |
1298 | |
1299 | UInt64 offset = *offsetP + internalState->fPreparedOffset; |
1300 | UInt64 memLength = internalState->fPreparedOffset + internalState->fPreparedLength; |
1301 | |
1302 | if (offset >= memLength) { |
1303 | return kIOReturnOverrun; |
1304 | } |
1305 | |
1306 | if ((offset == internalState->fPreparedOffset) || (offset != state->fOffset) || internalState->fNewMD) { |
1307 | state->fOffset = 0; |
1308 | internalState->fIOVMAddrValid = state->fIOVMAddr = 0; |
1309 | internalState->fNextRemapPage = NULL; |
1310 | internalState->fNewMD = false; |
1311 | mdOp = kIOMDFirstSegment; |
1312 | if (fMapper) { |
1313 | if (internalState->fLocalMapperAllocValid) { |
1314 | state->fMapped = true; |
1315 | state->fMappedBase = internalState->fLocalMapperAlloc; |
1316 | } else { |
1317 | state->fMapped = false; |
1318 | } |
1319 | } |
1320 | } |
1321 | |
1322 | UInt32 segIndex = 0; |
1323 | UInt32 numSegments = *numSegmentsP; |
1324 | Segment64 curSeg = { .fIOVMAddr: 0, .fLength: 0 }; |
1325 | bool curSegValid = false; |
1326 | addr64_t maxPhys; |
1327 | |
1328 | if (fNumAddressBits && (fNumAddressBits < 64)) { |
1329 | maxPhys = (1ULL << fNumAddressBits); |
1330 | } else { |
1331 | maxPhys = 0; |
1332 | } |
1333 | maxPhys--; |
1334 | |
1335 | while (internalState->fIOVMAddrValid || (state->fOffset < memLength)) { |
1336 | // state = next seg |
1337 | if (!internalState->fIOVMAddrValid) { |
1338 | IOReturn rtn; |
1339 | |
1340 | state->fOffset = offset; |
1341 | state->fLength = memLength - offset; |
1342 | |
1343 | bool done = false; |
1344 | bool check = false; |
1345 | |
1346 | if (internalState->fLocalMapperAllocValid) { |
1347 | if (!internalState->fMapSegmentsCount) { |
1348 | state->fIOVMAddr = internalState->fLocalMapperAlloc + offset - internalState->fPreparedOffset; |
1349 | rtn = kIOReturnSuccess; |
1350 | done = true; |
1351 | check = true; |
1352 | } else { |
1353 | uint64_t address; |
1354 | uint64_t length; |
1355 | uint64_t runOffset; |
1356 | uint64_t ind; |
1357 | uint64_t off2Ind = internalState->fOffset2Index; |
1358 | |
1359 | // Validate the previous offset |
1360 | if (offset |
1361 | && (offset == internalState->fNextOffset || off2Ind <= offset)) { |
1362 | ind = internalState->fIndex; |
1363 | } else { |
1364 | ind = off2Ind = 0; // Start from beginning |
1365 | } |
1366 | #if defined(LOGTAG) |
1367 | if (LOGTAG == fMemory->getTag()) { |
1368 | IOLog("DMA[%p] offsets 0x%qx, 0x%qx, 0x%qx ind %qd\n" , this, offset, internalState->fPreparedOffset, internalState->fNextOffset, ind); |
1369 | } |
1370 | #endif /* defined(LOGTAG) */ |
1371 | |
1372 | // Scan through iopl info blocks looking for block containing offset |
1373 | while (ind < internalState->fMapSegmentsCount && offset >= internalState->fMapSegments[ind].fDMAOffset) { |
1374 | ind++; |
1375 | } |
1376 | if (ind < internalState->fMapSegmentsCount) { |
1377 | length = internalState->fMapSegments[ind].fDMAOffset; |
1378 | } else { |
1379 | length = memLength; |
1380 | } |
1381 | length -= offset; // Remainder within iopl |
1382 | |
1383 | // Go back to actual range as search goes past it |
1384 | ind--; |
1385 | off2Ind = internalState->fMapSegments[ind].fDMAOffset; |
1386 | |
1387 | // Subtract offset till this iopl in total list |
1388 | runOffset = offset - off2Ind; |
1389 | |
1390 | // Compute an offset relative to the mapped base |
1391 | |
1392 | runOffset += internalState->fMapSegments[ind].fPageOffset; |
1393 | address = internalState->fLocalMapperAllocBase + internalState->fMapSegments[ind].fMapOffset + runOffset; |
1394 | #if defined(LOGTAG) |
1395 | if (LOGTAG == fMemory->getTag()) { |
1396 | IOLog("DMA[%p] addrlen 0x%qx, 0x%qx\n" , this, address, length); |
1397 | } |
1398 | #endif /* defined(LOGTAG) */ |
1399 | |
1400 | state->fIOVMAddr = address; |
1401 | state->fLength = length; |
1402 | |
1403 | internalState->fIndex = ind; |
1404 | internalState->fOffset2Index = off2Ind; |
1405 | internalState->fNextOffset = state->fOffset + length; |
1406 | |
1407 | rtn = kIOReturnSuccess; |
1408 | done = true; |
1409 | check = true; |
1410 | } |
1411 | } |
1412 | |
1413 | if (!done) { |
1414 | IOMemoryDescriptor * memory = |
1415 | internalState->fCopyMD ? internalState->fCopyMD.get() : fMemory.get(); |
1416 | rtn = memory->dmaCommandOperation(op: mdOp, fState, dataSize: sizeof(fState)); |
1417 | mdOp = kIOMDWalkSegments; |
1418 | } |
1419 | #if 0 |
1420 | if (check |
1421 | && !ml_at_interrupt_context() |
1422 | && (rtn == kIOReturnSuccess) |
1423 | && fMapper |
1424 | && strcmp("AppleNVMeMMU" , fMapper->getName())) { |
1425 | uint64_t checkOffset; |
1426 | IOPhysicalLength segLen; |
1427 | IOMemoryDescriptor * memory = |
1428 | internalState->fCopyMD ? internalState->fCopyMD.get() : fMemory.get(); |
1429 | for (checkOffset = 0; checkOffset < state->fLength;) { |
1430 | addr64_t phys = memory->getPhysicalSegment(offset + checkOffset, &segLen, kIOMemoryMapperNone); |
1431 | addr64_t mapperPhys; |
1432 | |
1433 | mapperPhys = fMapper->mapToPhysicalAddress(state->fIOVMAddr + checkOffset); |
1434 | mapperPhys |= (phys & (fMapper->getPageSize() - 1)); |
1435 | if (mapperPhys != phys) { |
1436 | panic("DMA[%p] mismatch at offset %llx + %llx, dma %llx mapperPhys %llx != %llx, len %llx" , |
1437 | this, offset, checkOffset, |
1438 | state->fIOVMAddr + checkOffset, mapperPhys, phys, state->fLength); |
1439 | } |
1440 | checkOffset += page_size - (phys & page_mask); |
1441 | } |
1442 | } |
1443 | #endif |
1444 | if (rtn == kIOReturnSuccess) { |
1445 | internalState->fIOVMAddrValid = true; |
1446 | assert(state->fLength); |
1447 | if (curSegValid && ((curSeg.fIOVMAddr + curSeg.fLength) == state->fIOVMAddr)) { |
1448 | UInt64 length = state->fLength; |
1449 | offset += length; |
1450 | curSeg.fLength += length; |
1451 | internalState->fIOVMAddrValid = state->fIOVMAddr = 0; |
1452 | } |
1453 | } else if (rtn == kIOReturnOverrun) { |
1454 | internalState->fIOVMAddrValid = state->fIOVMAddr = state->fLength = 0; // At end |
1455 | } else { |
1456 | return rtn; |
1457 | } |
1458 | } |
1459 | |
1460 | // seg = state, offset = end of seg |
1461 | if (!curSegValid) { |
1462 | UInt64 length = state->fLength; |
1463 | offset += length; |
1464 | curSeg.fIOVMAddr = state->fIOVMAddr; |
1465 | curSeg.fLength = length; |
1466 | curSegValid = true; |
1467 | internalState->fIOVMAddrValid = state->fIOVMAddr = 0; |
1468 | } |
1469 | |
1470 | if (!internalState->fIOVMAddrValid) { |
1471 | // maxPhys |
1472 | if ((kWalkClient & op) && (curSeg.fIOVMAddr + curSeg.fLength - 1) > maxPhys) { |
1473 | if (internalState->fCursor) { |
1474 | curSegValid = curSeg.fIOVMAddr = 0; |
1475 | ret = kIOReturnMessageTooLarge; |
1476 | break; |
1477 | } else if (curSeg.fIOVMAddr <= maxPhys) { |
1478 | UInt64 remain, newLength; |
1479 | |
1480 | newLength = (maxPhys + 1 - curSeg.fIOVMAddr); |
1481 | DEBG("trunc %qx, %qx-> %qx\n" , curSeg.fIOVMAddr, curSeg.fLength, newLength); |
1482 | remain = curSeg.fLength - newLength; |
1483 | state->fIOVMAddr = newLength + curSeg.fIOVMAddr; |
1484 | internalState->fIOVMAddrValid = true; |
1485 | curSeg.fLength = newLength; |
1486 | state->fLength = remain; |
1487 | offset -= remain; |
1488 | } else { |
1489 | UInt64 addr = curSeg.fIOVMAddr; |
1490 | ppnum_t addrPage = (ppnum_t) atop_64(addr); |
1491 | vm_page_t remap = NULL; |
1492 | UInt64 remain, newLength; |
1493 | |
1494 | DEBG("sparse switch %qx, %qx " , addr, curSeg.fLength); |
1495 | |
1496 | remap = internalState->fNextRemapPage; |
1497 | if (remap && (addrPage == vm_page_get_offset(page: remap))) { |
1498 | } else { |
1499 | for (remap = internalState->fCopyPageAlloc; |
1500 | remap && (addrPage != vm_page_get_offset(page: remap)); |
1501 | remap = vm_page_get_next(page: remap)) { |
1502 | } |
1503 | } |
1504 | |
1505 | if (!remap) { |
1506 | panic("no remap page found" ); |
1507 | } |
1508 | |
1509 | curSeg.fIOVMAddr = ptoa_64(vm_page_get_phys_page(remap)) |
1510 | + (addr & PAGE_MASK); |
1511 | curSegValid = true; |
1512 | internalState->fNextRemapPage = vm_page_get_next(page: remap); |
1513 | |
1514 | newLength = PAGE_SIZE - (addr & PAGE_MASK); |
1515 | if (newLength < curSeg.fLength) { |
1516 | remain = curSeg.fLength - newLength; |
1517 | state->fIOVMAddr = addr + newLength; |
1518 | internalState->fIOVMAddrValid = true; |
1519 | curSeg.fLength = newLength; |
1520 | state->fLength = remain; |
1521 | offset -= remain; |
1522 | } |
1523 | DEBG("-> %qx, %qx offset %qx\n" , curSeg.fIOVMAddr, curSeg.fLength, offset); |
1524 | } |
1525 | } |
1526 | |
1527 | // reduce size of output segment |
1528 | uint64_t reduce, leftover = 0; |
1529 | |
1530 | // fMaxSegmentSize |
1531 | if (curSeg.fLength > fMaxSegmentSize) { |
1532 | leftover += curSeg.fLength - fMaxSegmentSize; |
1533 | curSeg.fLength = fMaxSegmentSize; |
1534 | state->fIOVMAddr = curSeg.fLength + curSeg.fIOVMAddr; |
1535 | internalState->fIOVMAddrValid = true; |
1536 | } |
1537 | |
1538 | // alignment current length |
1539 | |
1540 | reduce = (curSeg.fLength & fAlignMaskLength); |
1541 | if (reduce && (curSeg.fLength > reduce)) { |
1542 | leftover += reduce; |
1543 | curSeg.fLength -= reduce; |
1544 | state->fIOVMAddr = curSeg.fLength + curSeg.fIOVMAddr; |
1545 | internalState->fIOVMAddrValid = true; |
1546 | } |
1547 | |
1548 | // alignment next address |
1549 | |
1550 | reduce = (state->fIOVMAddr & fAlignMaskInternalSegments); |
1551 | if (reduce && (curSeg.fLength > reduce)) { |
1552 | leftover += reduce; |
1553 | curSeg.fLength -= reduce; |
1554 | state->fIOVMAddr = curSeg.fLength + curSeg.fIOVMAddr; |
1555 | internalState->fIOVMAddrValid = true; |
1556 | } |
1557 | |
1558 | if (leftover) { |
1559 | DEBG("reduce seg by 0x%llx @ 0x%llx [0x%llx, 0x%llx]\n" , |
1560 | leftover, offset, |
1561 | curSeg.fIOVMAddr, curSeg.fLength); |
1562 | state->fLength = leftover; |
1563 | offset -= leftover; |
1564 | } |
1565 | |
1566 | // |
1567 | |
1568 | if (internalState->fCursor) { |
1569 | bool misaligned; |
1570 | uint32_t mask; |
1571 | |
1572 | mask = (segIndex ? fAlignMaskInternalSegments : internalState->fSourceAlignMask); |
1573 | misaligned = (0 != (mask & curSeg.fIOVMAddr)); |
1574 | if (!misaligned) { |
1575 | mask = fAlignMaskLength; |
1576 | misaligned |= (0 != (mask & curSeg.fLength)); |
1577 | } |
1578 | if (misaligned) { |
1579 | if (misaligned) { |
1580 | DEBG("cursor misaligned %qx:%qx\n" , curSeg.fIOVMAddr, curSeg.fLength); |
1581 | } |
1582 | curSegValid = curSeg.fIOVMAddr = 0; |
1583 | ret = kIOReturnNotAligned; |
1584 | break; |
1585 | } |
1586 | } |
1587 | |
1588 | if (offset >= memLength) { |
1589 | curSeg.fLength -= (offset - memLength); |
1590 | offset = memLength; |
1591 | internalState->fIOVMAddrValid = state->fIOVMAddr = state->fLength = 0; // At end |
1592 | break; |
1593 | } |
1594 | } |
1595 | |
1596 | if (internalState->fIOVMAddrValid) { |
1597 | if ((segIndex + 1 == numSegments)) { |
1598 | break; |
1599 | } |
1600 | #if defined(LOGTAG) |
1601 | if ((LOGTAG == fMemory->getTag()) && (kWalkClient == op)) { |
1602 | IOLog("DMA[%p] outseg 0x%qx, 0x%qx\n" , this, curSeg.fIOVMAddr, curSeg.fLength); |
1603 | } |
1604 | #endif /* defined(LOGTAG) */ |
1605 | ret = (*outSegFunc)(reference, this, curSeg, segmentsP, segIndex++); |
1606 | curSegValid = curSeg.fIOVMAddr = 0; |
1607 | if (kIOReturnSuccess != ret) { |
1608 | break; |
1609 | } |
1610 | } |
1611 | } |
1612 | |
1613 | if (curSegValid) { |
1614 | #if defined(LOGTAG) |
1615 | if ((LOGTAG == fMemory->getTag()) && (kWalkClient == op)) { |
1616 | IOLog("DMA[%p] outseg 0x%qx, 0x%qx\n" , this, curSeg.fIOVMAddr, curSeg.fLength); |
1617 | } |
1618 | #endif /* defined(LOGTAG) */ |
1619 | ret = (*outSegFunc)(reference, this, curSeg, segmentsP, segIndex++); |
1620 | } |
1621 | |
1622 | if (kIOReturnSuccess == ret) { |
1623 | state->fOffset = offset; |
1624 | *offsetP = offset - internalState->fPreparedOffset; |
1625 | *numSegmentsP = segIndex; |
1626 | } |
1627 | return ret; |
1628 | } |
1629 | |
1630 | IOReturn |
1631 | IODMACommand::clientOutputSegment( |
1632 | void *reference, IODMACommand *target, |
1633 | Segment64 segment, void *vSegList, UInt32 outSegIndex) |
1634 | { |
1635 | SegmentFunction segmentFunction = (SegmentFunction) reference; |
1636 | IOReturn ret = kIOReturnSuccess; |
1637 | |
1638 | if (target->fNumAddressBits && (target->fNumAddressBits < 64) |
1639 | && ((segment.fIOVMAddr + segment.fLength - 1) >> target->fNumAddressBits) |
1640 | && (target->reserved->fLocalMapperAllocValid || !target->fMapper)) { |
1641 | DEBG("kIOReturnMessageTooLarge(fNumAddressBits) %qx, %qx\n" , segment.fIOVMAddr, segment.fLength); |
1642 | ret = kIOReturnMessageTooLarge; |
1643 | } |
1644 | |
1645 | if (!(*segmentFunction)(target, segment, vSegList, outSegIndex)) { |
1646 | DEBG("kIOReturnMessageTooLarge(fOutSeg) %qx, %qx\n" , segment.fIOVMAddr, segment.fLength); |
1647 | ret = kIOReturnMessageTooLarge; |
1648 | } |
1649 | |
1650 | return ret; |
1651 | } |
1652 | |
1653 | IOReturn |
1654 | IODMACommand::genIOVMSegments(SegmentFunction segmentFunction, |
1655 | UInt64 *offsetP, |
1656 | void *segmentsP, |
1657 | UInt32 *numSegmentsP) |
1658 | { |
1659 | return genIOVMSegments(op: kWalkClient, outSegFunc: clientOutputSegment, reference: (void *) segmentFunction, |
1660 | offsetP, segmentsP, numSegmentsP); |
1661 | } |
1662 | |
1663 | bool |
1664 | IODMACommand::OutputHost32(IODMACommand *, |
1665 | Segment64 segment, void *vSegList, UInt32 outSegIndex) |
1666 | { |
1667 | Segment32 *base = (Segment32 *) vSegList; |
1668 | base[outSegIndex].fIOVMAddr = (UInt32) segment.fIOVMAddr; |
1669 | base[outSegIndex].fLength = (UInt32) segment.fLength; |
1670 | return true; |
1671 | } |
1672 | |
1673 | bool |
1674 | IODMACommand::OutputBig32(IODMACommand *, |
1675 | Segment64 segment, void *vSegList, UInt32 outSegIndex) |
1676 | { |
1677 | const UInt offAddr = outSegIndex * sizeof(Segment32); |
1678 | const UInt offLen = offAddr + sizeof(UInt32); |
1679 | OSWriteBigInt32(vSegList, offAddr, (UInt32) segment.fIOVMAddr); |
1680 | OSWriteBigInt32(vSegList, offLen, (UInt32) segment.fLength); |
1681 | return true; |
1682 | } |
1683 | |
1684 | bool |
1685 | IODMACommand::OutputLittle32(IODMACommand *, |
1686 | Segment64 segment, void *vSegList, UInt32 outSegIndex) |
1687 | { |
1688 | const UInt offAddr = outSegIndex * sizeof(Segment32); |
1689 | const UInt offLen = offAddr + sizeof(UInt32); |
1690 | OSWriteLittleInt32(vSegList, offAddr, (UInt32) segment.fIOVMAddr); |
1691 | OSWriteLittleInt32(vSegList, offLen, (UInt32) segment.fLength); |
1692 | return true; |
1693 | } |
1694 | |
1695 | bool |
1696 | IODMACommand::OutputHost64(IODMACommand *, |
1697 | Segment64 segment, void *vSegList, UInt32 outSegIndex) |
1698 | { |
1699 | Segment64 *base = (Segment64 *) vSegList; |
1700 | base[outSegIndex] = segment; |
1701 | return true; |
1702 | } |
1703 | |
1704 | bool |
1705 | IODMACommand::OutputBig64(IODMACommand *, |
1706 | Segment64 segment, void *vSegList, UInt32 outSegIndex) |
1707 | { |
1708 | const UInt offAddr = outSegIndex * sizeof(Segment64); |
1709 | const UInt offLen = offAddr + sizeof(UInt64); |
1710 | OSWriteBigInt64(vSegList, offAddr, (UInt64) segment.fIOVMAddr); |
1711 | OSWriteBigInt64(vSegList, offLen, (UInt64) segment.fLength); |
1712 | return true; |
1713 | } |
1714 | |
1715 | bool |
1716 | IODMACommand::OutputLittle64(IODMACommand *, |
1717 | Segment64 segment, void *vSegList, UInt32 outSegIndex) |
1718 | { |
1719 | const UInt offAddr = outSegIndex * sizeof(Segment64); |
1720 | const UInt offLen = offAddr + sizeof(UInt64); |
1721 | OSWriteLittleInt64(vSegList, offAddr, (UInt64) segment.fIOVMAddr); |
1722 | OSWriteLittleInt64(vSegList, offLen, (UInt64) segment.fLength); |
1723 | return true; |
1724 | } |
1725 | |