1/*
2 * Copyright (c) 2014-2016 Apple Inc. All rights reserved.
3 *
4 * @APPLE_OSREFERENCE_LICENSE_HEADER_START@
5 *
6 * This file contains Original Code and/or Modifications of Original Code
7 * as defined in and that are subject to the Apple Public Source License
8 * Version 2.0 (the 'License'). You may not use this file except in
9 * compliance with the License. The rights granted to you under the License
10 * may not be used to create, or enable the creation or redistribution of,
11 * unlawful or unlicensed copies of an Apple operating system, or to
12 * circumvent, violate, or enable the circumvention or violation of, any
13 * terms of an Apple operating system software license agreement.
14 *
15 * Please obtain a copy of the License at
16 * http://www.opensource.apple.com/apsl/ and read it before using this file.
17 *
18 * The Original Code and all software distributed under the License are
19 * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER
20 * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES,
21 * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY,
22 * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT.
23 * Please see the License for the specific language governing rights and
24 * limitations under the License.
25 *
26 * @APPLE_OSREFERENCE_LICENSE_HEADER_END@
27 */
28
29#include <sys/cdefs.h>
30
31#include <IOKit/assert.h>
32#include <IOKit/system.h>
33#include <IOKit/IOLib.h>
34#include <IOKit/IOMemoryDescriptor.h>
35#include <IOKit/IOMapper.h>
36#include <IOKit/IODMACommand.h>
37#include <IOKit/IOKitKeysPrivate.h>
38#include "Tests.h"
39
40#ifndef __LP64__
41#include <IOKit/IOSubMemoryDescriptor.h>
42#endif /* !__LP64__ */
43#include <IOKit/IOSubMemoryDescriptor.h>
44#include <IOKit/IOMultiMemoryDescriptor.h>
45#include <IOKit/IOBufferMemoryDescriptor.h>
46
47#include <IOKit/IOKitDebug.h>
48#include <libkern/OSDebug.h>
49#include <sys/uio.h>
50
51__BEGIN_DECLS
52#include <vm/pmap.h>
53#include <vm/vm_pageout.h>
54#include <mach/memory_object_types.h>
55#include <device/device_port.h>
56
57#include <mach/vm_prot.h>
58#include <mach/mach_vm.h>
59#include <vm/vm_fault.h>
60#include <vm/vm_protos.h>
61__END_DECLS
62
63
64/* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */
65
66#if DEVELOPMENT || DEBUG
67
68extern SInt32 gIOMemoryReferenceCount;
69
70static int IOMultMemoryDescriptorTest(int newValue)
71{
72 IOMemoryDescriptor * mds[3];
73 IOMultiMemoryDescriptor * mmd;
74 IOMemoryMap * map;
75 void * addr;
76 uint8_t * data;
77 uint32_t i;
78 IOAddressRange ranges[2];
79
80 data = (typeof(data)) IOMallocAligned(ptoa(8), page_size);
81 for (i = 0; i < ptoa(8); i++) data[i] = atop(i) | 0xD0;
82
83 ranges[0].address = (IOVirtualAddress)(data + ptoa(4));
84 ranges[0].length = ptoa(4);
85 ranges[1].address = (IOVirtualAddress)(data + ptoa(0));
86 ranges[1].length = ptoa(4);
87
88 mds[0] = IOMemoryDescriptor::withAddressRanges(&ranges[0], 2, kIODirectionOutIn, kernel_task);
89
90 mds[1] = IOSubMemoryDescriptor::withSubRange(mds[0], ptoa(3), ptoa(2), kIODirectionOutIn);
91 mds[2] = IOSubMemoryDescriptor::withSubRange(mds[0], ptoa(7), ptoa(1), kIODirectionOutIn);
92
93 mmd = IOMultiMemoryDescriptor::withDescriptors(&mds[0], sizeof(mds)/sizeof(mds[0]), kIODirectionOutIn, false);
94 mds[2]->release();
95 mds[1]->release();
96 mds[0]->release();
97 map = mmd->createMappingInTask(kernel_task, 0, kIOMapAnywhere, ptoa(7), mmd->getLength() - ptoa(7));
98 mmd->release();
99 assert(map);
100
101 addr = (void *) map->getVirtualAddress();
102 assert(ptoa(4) == map->getLength());
103 assert(0xd3d3d3d3 == ((uint32_t *)addr)[ptoa(0) / sizeof(uint32_t)]);
104 assert(0xd7d7d7d7 == ((uint32_t *)addr)[ptoa(1) / sizeof(uint32_t)]);
105 assert(0xd0d0d0d0 == ((uint32_t *)addr)[ptoa(2) / sizeof(uint32_t)]);
106 assert(0xd3d3d3d3 == ((uint32_t *)addr)[ptoa(3) / sizeof(uint32_t)]);
107 map->release();
108 IOFreeAligned(data, ptoa(8));
109
110 return (0);
111}
112
113
114
115// <rdar://problem/30102458>
116static int
117IODMACommandForceDoubleBufferTest(int newValue)
118{
119 IOReturn ret;
120 IOBufferMemoryDescriptor * bmd;
121 IODMACommand * dma;
122 uint32_t dir, data;
123 IODMACommand::SegmentOptions segOptions =
124 {
125 .fStructSize = sizeof(segOptions),
126 .fNumAddressBits = 64,
127 .fMaxSegmentSize = 0x2000,
128 .fMaxTransferSize = 128*1024,
129 .fAlignment = 1,
130 .fAlignmentLength = 1,
131 .fAlignmentInternalSegments = 1
132 };
133 IODMACommand::Segment64 segments[1];
134 UInt32 numSegments;
135 UInt64 dmaOffset;
136
137
138 for (dir = kIODirectionIn; ; dir++)
139 {
140 bmd = IOBufferMemoryDescriptor::inTaskWithOptions(kernel_task,
141 dir | kIOMemoryPageable, ptoa(8));
142 assert(bmd);
143
144 ((uint32_t*) bmd->getBytesNoCopy())[0] = 0x53535300 | dir;
145
146 ret = bmd->prepare((IODirection) dir);
147 assert(kIOReturnSuccess == ret);
148
149 dma = IODMACommand::withSpecification(kIODMACommandOutputHost64, &segOptions,
150 kIODMAMapOptionMapped,
151 NULL, NULL);
152 assert(dma);
153 ret = dma->setMemoryDescriptor(bmd, true);
154 assert(kIOReturnSuccess == ret);
155
156 ret = dma->synchronize(IODMACommand::kForceDoubleBuffer | kIODirectionOut);
157 assert(kIOReturnSuccess == ret);
158
159 dmaOffset = 0;
160 numSegments = 1;
161 ret = dma->gen64IOVMSegments(&dmaOffset, &segments[0], &numSegments);
162 assert(kIOReturnSuccess == ret);
163 assert(1 == numSegments);
164
165 if (kIODirectionOut & dir)
166 {
167 data = ((uint32_t*) bmd->getBytesNoCopy())[0];
168 assertf((0x53535300 | dir) == data, "mismatch 0x%x", data);
169 }
170 if (kIODirectionIn & dir)
171 {
172 IOMappedWrite32(segments[0].fIOVMAddr, 0x11223300 | dir);
173 }
174
175 ret = dma->clearMemoryDescriptor(true);
176 assert(kIOReturnSuccess == ret);
177 dma->release();
178
179 bmd->complete((IODirection) dir);
180
181 if (kIODirectionIn & dir)
182 {
183 data = ((uint32_t*) bmd->getBytesNoCopy())[0];
184 assertf((0x11223300 | dir) == data, "mismatch 0x%x", data);
185 }
186
187 bmd->release();
188
189 if (dir == kIODirectionInOut) break;
190 }
191
192 return (0);
193}
194
195// <rdar://problem/34322778>
196static int __unused
197IODMACommandLocalMappedNonContig(int newValue)
198{
199 IOReturn kr;
200 IOMemoryDescriptor * md;
201 IODMACommand * dma;
202 OSDictionary * matching;
203 IOService * device;
204 IOMapper * mapper;
205 IODMACommand::SegmentOptions segOptions =
206 {
207 .fStructSize = sizeof(segOptions),
208 .fNumAddressBits = 64,
209 .fMaxSegmentSize = 128*1024,
210 .fMaxTransferSize = 128*1024,
211 .fAlignment = 1,
212 .fAlignmentLength = 1,
213 .fAlignmentInternalSegments = 1
214 };
215 IODMACommand::Segment64 segments[1];
216 UInt32 numSegments;
217 UInt64 dmaOffset;
218 UInt64 segPhys;
219 vm_address_t buffer;
220 vm_size_t bufSize = ptoa(4);
221
222 if (!IOMapper::gSystem) return (0);
223
224 buffer = 0;
225 kr = vm_allocate_kernel(kernel_map, &buffer, bufSize, VM_FLAGS_ANYWHERE, VM_KERN_MEMORY_IOKIT);
226 assert(KERN_SUCCESS == kr);
227
228 // fragment the vmentries
229 kr = vm_inherit(kernel_map, buffer + ptoa(1), ptoa(1), VM_INHERIT_NONE);
230 assert(KERN_SUCCESS == kr);
231
232 md = IOMemoryDescriptor::withAddressRange(
233 buffer + 0xa00, 0x2000, kIODirectionOutIn, kernel_task);
234 assert(md);
235 kr = md->prepare(kIODirectionOutIn);
236 assert(kIOReturnSuccess == kr);
237
238 segPhys = md->getPhysicalSegment(0, NULL, 0);
239
240 matching = IOService::nameMatching("XHC1");
241 assert(matching);
242 device = IOService::copyMatchingService(matching);
243 matching->release();
244 mapper = device ? IOMapper::copyMapperForDeviceWithIndex(device, 0) : NULL;
245
246 dma = IODMACommand::withSpecification(kIODMACommandOutputHost64, &segOptions,
247 kIODMAMapOptionMapped,
248 mapper, NULL);
249 assert(dma);
250 kr = dma->setMemoryDescriptor(md, true);
251 assert(kIOReturnSuccess == kr);
252
253 dmaOffset = 0;
254 numSegments = 1;
255 kr = dma->gen64IOVMSegments(&dmaOffset, &segments[0], &numSegments);
256 assert(kIOReturnSuccess == kr);
257 assert(1 == numSegments);
258
259 if (mapper) assertf(segments[0].fIOVMAddr != segPhys, "phys !local 0x%qx, 0x%qx, %p", segments[0].fIOVMAddr, segPhys, dma);
260
261 kr = dma->clearMemoryDescriptor(true);
262 assert(kIOReturnSuccess == kr);
263 dma->release();
264
265 kr = md->complete(kIODirectionOutIn);
266 assert(kIOReturnSuccess == kr);
267 md->release();
268
269 kr = vm_deallocate(kernel_map, buffer, bufSize);
270 assert(KERN_SUCCESS == kr);
271 OSSafeReleaseNULL(mapper);
272
273 return (0);
274}
275
276// <rdar://problem/30102458>
277static int
278IOMemoryRemoteTest(int newValue)
279{
280 IOReturn ret;
281 IOMemoryDescriptor * md;
282 IOByteCount offset, length;
283 addr64_t addr;
284 uint32_t idx;
285
286 IODMACommand * dma;
287 IODMACommand::SegmentOptions segOptions =
288 {
289 .fStructSize = sizeof(segOptions),
290 .fNumAddressBits = 64,
291 .fMaxSegmentSize = 0x2000,
292 .fMaxTransferSize = 128*1024,
293 .fAlignment = 1,
294 .fAlignmentLength = 1,
295 .fAlignmentInternalSegments = 1
296 };
297 IODMACommand::Segment64 segments[1];
298 UInt32 numSegments;
299 UInt64 dmaOffset;
300
301 IOAddressRange ranges[2] = {
302 { 0x1234567890123456ULL, 0x1000 }, { 0x5432109876543210, 0x2000 },
303 };
304
305 md = IOMemoryDescriptor::withAddressRanges(&ranges[0], 2, kIODirectionOutIn|kIOMemoryRemote, TASK_NULL);
306 assert(md);
307
308// md->map();
309// md->readBytes(0, &idx, sizeof(idx));
310
311 ret = md->prepare(kIODirectionOutIn);
312 assert(kIOReturnSuccess == ret);
313
314 printf("remote md flags 0x%qx, r %d\n",
315 md->getFlags(), (0 != (kIOMemoryRemote & md->getFlags())));
316
317 for (offset = 0, idx = 0; true; offset += length, idx++)
318 {
319 addr = md->getPhysicalSegment(offset, &length, 0);
320 if (!length) break;
321 assert(idx < 2);
322 assert(addr == ranges[idx].address);
323 assert(length == ranges[idx].length);
324 }
325 assert(offset == md->getLength());
326
327 dma = IODMACommand::withSpecification(kIODMACommandOutputHost64, &segOptions,
328 kIODMAMapOptionUnmapped | kIODMAMapOptionIterateOnly,
329 NULL, NULL);
330 assert(dma);
331 ret = dma->setMemoryDescriptor(md, true);
332 assert(kIOReturnSuccess == ret);
333
334 for (dmaOffset = 0, idx = 0; dmaOffset < md->getLength(); idx++)
335 {
336 numSegments = 1;
337 ret = dma->gen64IOVMSegments(&dmaOffset, &segments[0], &numSegments);
338 assert(kIOReturnSuccess == ret);
339 assert(1 == numSegments);
340 assert(idx < 2);
341 assert(segments[0].fIOVMAddr == ranges[idx].address);
342 assert(segments[0].fLength == ranges[idx].length);
343 }
344 assert(dmaOffset == md->getLength());
345
346 ret = dma->clearMemoryDescriptor(true);
347 assert(kIOReturnSuccess == ret);
348 dma->release();
349 md->complete(kIODirectionOutIn);
350 md->release();
351
352 return (0);
353}
354
355static IOReturn
356IOMemoryPrefaultTest(uint32_t options)
357{
358 IOBufferMemoryDescriptor * bmd;
359 IOMemoryMap * map;
360 IOReturn kr;
361 uint32_t data;
362 uint32_t * p;
363 IOSimpleLock * lock;
364
365 lock = IOSimpleLockAlloc();
366 assert(lock);
367
368 bmd = IOBufferMemoryDescriptor::inTaskWithOptions(current_task(),
369 kIODirectionOutIn | kIOMemoryPageable, ptoa(8));
370 assert(bmd);
371 kr = bmd->prepare();
372 assert(KERN_SUCCESS == kr);
373
374 map = bmd->map(kIOMapPrefault);
375 assert(map);
376
377 p = (typeof(p)) map->getVirtualAddress();
378 IOSimpleLockLock(lock);
379 data = p[0];
380 IOSimpleLockUnlock(lock);
381
382 IOLog("IOMemoryPrefaultTest %d\n", data);
383
384 map->release();
385 bmd->release();
386 IOSimpleLockFree(lock);
387
388 return (kIOReturnSuccess);
389}
390
391
392// <rdar://problem/26375234>
393static IOReturn
394ZeroLengthTest(int newValue)
395{
396 IOMemoryDescriptor * md;
397
398 md = IOMemoryDescriptor::withAddressRange(
399 0, 0, kIODirectionNone, current_task());
400 assert(md);
401 md->prepare();
402 md->complete();
403 md->release();
404 return (0);
405}
406
407// <rdar://problem/27002624>
408static IOReturn
409BadFixedAllocTest(int newValue)
410{
411 IOBufferMemoryDescriptor * bmd;
412 IOMemoryMap * map;
413
414 bmd = IOBufferMemoryDescriptor::inTaskWithOptions(NULL,
415 kIODirectionIn | kIOMemoryPageable, ptoa(1));
416 assert(bmd);
417 map = bmd->createMappingInTask(kernel_task, 0x2000, 0);
418 assert(!map);
419
420 bmd->release();
421 return (0);
422}
423
424// <rdar://problem/26466423>
425static IOReturn
426IODirectionPrepareNoZeroFillTest(int newValue)
427{
428 IOBufferMemoryDescriptor * bmd;
429
430 bmd = IOBufferMemoryDescriptor::inTaskWithOptions(NULL,
431 kIODirectionIn | kIOMemoryPageable, ptoa(24));
432 assert(bmd);
433 bmd->prepare((IODirection)(kIODirectionIn | kIODirectionPrepareNoZeroFill));
434 bmd->prepare(kIODirectionIn);
435 bmd->complete((IODirection)(kIODirectionIn | kIODirectionCompleteWithDataValid));
436 bmd->complete(kIODirectionIn);
437 bmd->release();
438 return (0);
439}
440
441// <rdar://problem/28190483>
442static IOReturn
443IOMemoryMapTest(uint32_t options)
444{
445 IOBufferMemoryDescriptor * bmd;
446 IOMemoryDescriptor * md;
447 IOMemoryMap * map;
448 uint32_t data;
449 user_addr_t p;
450 uint8_t * p2;
451 int r;
452 uint64_t time, nano;
453
454 bmd = IOBufferMemoryDescriptor::inTaskWithOptions(current_task(),
455 kIODirectionOutIn | kIOMemoryPageable, 0x4018+0x800);
456 assert(bmd);
457 p = (typeof(p)) bmd->getBytesNoCopy();
458 p += 0x800;
459 data = 0x11111111;
460 r = copyout(&data, p, sizeof(data));
461 assert(r == 0);
462 data = 0x22222222;
463 r = copyout(&data, p + 0x1000, sizeof(data));
464 assert(r == 0);
465 data = 0x33333333;
466 r = copyout(&data, p + 0x2000, sizeof(data));
467 assert(r == 0);
468 data = 0x44444444;
469 r = copyout(&data, p + 0x3000, sizeof(data));
470 assert(r == 0);
471
472 md = IOMemoryDescriptor::withAddressRange(p, 0x4018,
473 kIODirectionOut | options,
474 current_task());
475 assert(md);
476 time = mach_absolute_time();
477 map = md->map(kIOMapReadOnly);
478 time = mach_absolute_time() - time;
479 assert(map);
480 absolutetime_to_nanoseconds(time, &nano);
481
482 p2 = (typeof(p2)) map->getVirtualAddress();
483 assert(0x11 == p2[0]);
484 assert(0x22 == p2[0x1000]);
485 assert(0x33 == p2[0x2000]);
486 assert(0x44 == p2[0x3000]);
487
488 data = 0x99999999;
489 r = copyout(&data, p + 0x2000, sizeof(data));
490 assert(r == 0);
491
492 assert(0x11 == p2[0]);
493 assert(0x22 == p2[0x1000]);
494 assert(0x44 == p2[0x3000]);
495 if (kIOMemoryMapCopyOnWrite & options) assert(0x33 == p2[0x2000]);
496 else assert(0x99 == p2[0x2000]);
497
498 IOLog("IOMemoryMapCopyOnWriteTest map(%s) %lld ns\n",
499 kIOMemoryMapCopyOnWrite & options ? "kIOMemoryMapCopyOnWrite" : "",
500 nano);
501
502 map->release();
503 md->release();
504 bmd->release();
505
506 return (kIOReturnSuccess);
507}
508
509static int
510IOMemoryMapCopyOnWriteTest(int newValue)
511{
512 IOMemoryMapTest(0);
513 IOMemoryMapTest(kIOMemoryMapCopyOnWrite);
514 return (0);
515}
516
517static int
518AllocationNameTest(int newValue)
519{
520 IOMemoryDescriptor * bmd;
521 kern_allocation_name_t name, prior;
522
523 name = kern_allocation_name_allocate("com.apple.iokit.test", 0);
524 assert(name);
525
526 prior = thread_set_allocation_name(name);
527
528 bmd = IOBufferMemoryDescriptor::inTaskWithOptions(TASK_NULL,
529 kIODirectionOutIn | kIOMemoryPageable | kIOMemoryKernelUserShared,
530 ptoa(13));
531 assert(bmd);
532 bmd->prepare();
533
534 thread_set_allocation_name(prior);
535 kern_allocation_name_release(name);
536
537 if (newValue != 7) bmd->release();
538
539 return (0);
540}
541
542int IOMemoryDescriptorTest(int newValue)
543{
544 int result;
545
546 IOLog("/IOMemoryDescriptorTest %d\n", (int) gIOMemoryReferenceCount);
547
548#if 0
549 if (6 == newValue)
550 {
551 IOMemoryDescriptor * sbmds[3];
552 IOMultiMemoryDescriptor * smmd;
553 IOMemoryDescriptor * mds[2];
554 IOMultiMemoryDescriptor * mmd;
555 IOMemoryMap * map;
556
557 sbmds[0] = IOBufferMemoryDescriptor::inTaskWithOptions(kernel_task, kIODirectionOutIn | kIOMemoryKernelUserShared, ptoa(1));
558 sbmds[1] = IOBufferMemoryDescriptor::inTaskWithOptions(kernel_task, kIODirectionOutIn | kIOMemoryKernelUserShared, ptoa(2));
559 sbmds[2] = IOBufferMemoryDescriptor::inTaskWithOptions(kernel_task, kIODirectionOutIn | kIOMemoryKernelUserShared, ptoa(3));
560 smmd = IOMultiMemoryDescriptor::withDescriptors(&sbmds[0], sizeof(sbmds)/sizeof(sbmds[0]), kIODirectionOutIn, false);
561
562 mds[0] = IOBufferMemoryDescriptor::inTaskWithOptions(kernel_task, kIODirectionOutIn | kIOMemoryKernelUserShared, ptoa(1));
563 mds[1] = smmd;
564 mmd = IOMultiMemoryDescriptor::withDescriptors(&mds[0], sizeof(mds)/sizeof(mds[0]), kIODirectionOutIn, false);
565 map = mmd->createMappingInTask(kernel_task, 0, kIOMapAnywhere);
566 assert(map);
567 map->release();
568 mmd->release();
569 mds[0]->release();
570 mds[1]->release();
571 sbmds[0]->release();
572 sbmds[1]->release();
573 sbmds[2]->release();
574
575 return (0);
576 }
577 else if (5 == newValue)
578 {
579 IOReturn ret;
580 IOMemoryDescriptor * md;
581 IODMACommand * dma;
582 IODMACommand::SegmentOptions segOptions =
583 {
584 .fStructSize = sizeof(segOptions),
585 .fNumAddressBits = 64,
586 .fMaxSegmentSize = 4096,
587 .fMaxTransferSize = 128*1024,
588 .fAlignment = 4,
589 .fAlignmentLength = 4,
590 .fAlignmentInternalSegments = 0x1000
591 };
592
593 IOAddressRange ranges[3][2] =
594 {
595 {
596 { (uintptr_t) &IOMemoryDescriptorTest, 0x2ffc },
597 { 0, 0 },
598 },
599 {
600 { ranges[0][0].address, 0x10 },
601 { 0x3000 + ranges[0][0].address, 0xff0 },
602 },
603 {
604 { ranges[0][0].address, 0x2ffc },
605 { trunc_page(ranges[0][0].address), 0x800 },
606 },
607 };
608 static const uint32_t rangesCount[3] = { 1, 2, 2 };
609 uint32_t test;
610
611 for (test = 0; test < 3; test++)
612 {
613 kprintf("---[%d] address 0x%qx-0x%qx, 0x%qx-0x%qx\n", test,
614 ranges[test][0].address, ranges[test][0].length,
615 ranges[test][1].address, ranges[test][1].length);
616
617 md = IOMemoryDescriptor::withAddressRanges((IOAddressRange*)&ranges[test][0], rangesCount[test], kIODirectionOut, kernel_task);
618 assert(md);
619 ret = md->prepare();
620 assert(kIOReturnSuccess == ret);
621 dma = IODMACommand::withSpecification(kIODMACommandOutputHost64, &segOptions,
622 IODMACommand::kMapped, NULL, NULL);
623 assert(dma);
624 ret = dma->setMemoryDescriptor(md, true);
625 if (kIOReturnSuccess == ret)
626 {
627 IODMACommand::Segment64 segments[1];
628 UInt32 numSegments;
629 UInt64 offset;
630
631 offset = 0;
632 do
633 {
634 numSegments = 1;
635 ret = dma->gen64IOVMSegments(&offset, &segments[0], &numSegments);
636 assert(kIOReturnSuccess == ret);
637 assert(1 == numSegments);
638 kprintf("seg 0x%qx, 0x%qx\n", segments[0].fIOVMAddr, segments[0].fLength);
639 }
640 while (offset < md->getLength());
641
642 ret = dma->clearMemoryDescriptor(true);
643 assert(kIOReturnSuccess == ret);
644 dma->release();
645 }
646 md->release();
647 }
648
649 return (kIOReturnSuccess);
650 }
651 else if (4 == newValue)
652 {
653 IOService * isp;
654 IOMapper * mapper;
655 IOBufferMemoryDescriptor * md1;
656 IODMACommand * dma;
657 IOReturn ret;
658 size_t bufSize = 8192 * 8192 * sizeof(uint32_t);
659 uint64_t start, time, nano;
660
661 isp = IOService::copyMatchingService(IOService::nameMatching("isp"));
662 assert(isp);
663 mapper = IOMapper::copyMapperForDeviceWithIndex(isp, 0);
664 assert(mapper);
665
666 md1 = IOBufferMemoryDescriptor::inTaskWithOptions(TASK_NULL,
667 kIODirectionOutIn | kIOMemoryPersistent | kIOMemoryPageable,
668 bufSize, page_size);
669
670 ret = md1->prepare();
671 assert(kIOReturnSuccess == ret);
672
673 IODMAMapSpecification mapSpec;
674 bzero(&mapSpec, sizeof(mapSpec));
675 uint64_t mapped;
676 uint64_t mappedLength;
677
678 start = mach_absolute_time();
679
680 ret = md1->dmaMap(mapper, NULL, &mapSpec, 0, bufSize, &mapped, &mappedLength);
681 assert(kIOReturnSuccess == ret);
682
683 time = mach_absolute_time() - start;
684
685 absolutetime_to_nanoseconds(time, &nano);
686 kprintf("time %lld us\n", nano / 1000ULL);
687 kprintf("seg0 0x%qx, 0x%qx\n", mapped, mappedLength);
688
689 assert(md1);
690
691 dma = IODMACommand::withSpecification(kIODMACommandOutputHost32,
692 32, 0, IODMACommand::kMapped, 0, 1, mapper, NULL);
693
694 assert(dma);
695
696 start = mach_absolute_time();
697 ret = dma->setMemoryDescriptor(md1, true);
698 assert(kIOReturnSuccess == ret);
699 time = mach_absolute_time() - start;
700
701 absolutetime_to_nanoseconds(time, &nano);
702 kprintf("time %lld us\n", nano / 1000ULL);
703
704
705 IODMACommand::Segment32 segments[1];
706 UInt32 numSegments = 1;
707 UInt64 offset;
708
709 offset = 0;
710 ret = dma->gen32IOVMSegments(&offset, &segments[0], &numSegments);
711 assert(kIOReturnSuccess == ret);
712 assert(1 == numSegments);
713 kprintf("seg0 0x%x, 0x%x\n", (int)segments[0].fIOVMAddr, (int)segments[0].fLength);
714
715 ret = dma->clearMemoryDescriptor(true);
716 assert(kIOReturnSuccess == ret);
717
718 md1->release();
719
720 return (kIOReturnSuccess);
721 }
722
723 if (3 == newValue)
724 {
725 IOBufferMemoryDescriptor * md1;
726 IOBufferMemoryDescriptor * md2;
727 IOMemoryMap * map1;
728 IOMemoryMap * map2;
729 uint32_t * buf1;
730 uint32_t * buf2;
731 IOReturn err;
732
733 md1 = IOBufferMemoryDescriptor::inTaskWithOptions(TASK_NULL,
734 kIODirectionOutIn | kIOMemoryPersistent | kIOMemoryPageable,
735 64*1024, page_size);
736 assert(md1);
737 map1 = md1->createMappingInTask(kernel_task, 0, kIOMapAnywhere | kIOMapUnique);
738 assert(map1);
739 buf1 = (uint32_t *) map1->getVirtualAddress();
740
741 md2 = IOBufferMemoryDescriptor::inTaskWithOptions(TASK_NULL,
742 kIODirectionOutIn | kIOMemoryPersistent | kIOMemoryPageable,
743 64*1024, page_size);
744 assert(md2);
745 map2 = md2->createMappingInTask(kernel_task, 0, kIOMapAnywhere | kIOMapUnique);
746 assert(map2);
747 buf2 = (uint32_t *) map2->getVirtualAddress();
748
749 memset(buf1, 0x11, 64*1024L);
750 memset(buf2, 0x22, 64*1024L);
751
752 kprintf("md1 %p, map1 %p, buf2 %p; md2 %p, map2 %p, buf2 %p\n", md1, map1, buf1, md2, map2, buf2);
753
754 kprintf("no redir 0x%08x, 0x%08x\n", buf1[0], buf2[0]);
755 assert(0x11111111 == buf1[0]);
756 assert(0x22222222 == buf2[0]);
757 err = map1->redirect(md2, 0, 0ULL);
758 kprintf("redir md2(0x%x) 0x%08x, 0x%08x\n", err, buf1[0], buf2[0]);
759 assert(0x11111111 == buf2[0]);
760 assert(0x22222222 == buf1[0]);
761 err = map1->redirect(md1, 0, 0ULL);
762 kprintf("redir md1(0x%x) 0x%08x, 0x%08x\n", err, buf1[0], buf2[0]);
763 assert(0x11111111 == buf1[0]);
764 assert(0x22222222 == buf2[0]);
765 map1->release();
766 map2->release();
767 md1->release();
768 md2->release();
769 }
770#endif
771
772// result = IODMACommandLocalMappedNonContig(newValue);
773// if (result) return (result);
774
775 result = IODMACommandForceDoubleBufferTest(newValue);
776 if (result) return (result);
777
778 result = AllocationNameTest(newValue);
779 if (result) return (result);
780
781 result = IOMemoryMapCopyOnWriteTest(newValue);
782 if (result) return (result);
783
784 result = IOMultMemoryDescriptorTest(newValue);
785 if (result) return (result);
786
787 result = ZeroLengthTest(newValue);
788 if (result) return (result);
789
790 result = IODirectionPrepareNoZeroFillTest(newValue);
791 if (result) return (result);
792
793 result = BadFixedAllocTest(newValue);
794 if (result) return (result);
795
796 result = IOMemoryRemoteTest(newValue);
797 if (result) return (result);
798
799 result = IOMemoryPrefaultTest(newValue);
800 if (result) return (result);
801
802 IOGeneralMemoryDescriptor * md;
803 vm_offset_t data[2];
804 vm_size_t bsize = 16*1024*1024;
805 vm_size_t srcsize, srcoffset, mapoffset, size;
806 kern_return_t kr;
807
808 data[0] = data[1] = 0;
809 kr = vm_allocate_kernel(kernel_map, &data[0], bsize, VM_FLAGS_ANYWHERE, VM_KERN_MEMORY_IOKIT);
810 assert(KERN_SUCCESS == kr);
811
812 vm_inherit(kernel_map, data[0] + ptoa(1), ptoa(1), VM_INHERIT_NONE);
813 vm_inherit(kernel_map, data[0] + ptoa(16), ptoa(4), VM_INHERIT_NONE);
814
815 IOLog("data 0x%lx, 0x%lx\n", (long)data[0], (long)data[1]);
816
817 uint32_t idx, offidx;
818 for (idx = 0; idx < (bsize / sizeof(uint32_t)); idx++)
819 {
820 ((uint32_t*)data[0])[idx] = idx;
821 }
822
823 for (srcoffset = 0; srcoffset < bsize; srcoffset = ((srcoffset << 2) + 0x40c))
824 {
825 for (srcsize = 4; srcsize < (bsize - srcoffset - 1); srcsize = ((srcsize << 2) + 0x3fc))
826 {
827 IOAddressRange ranges[3];
828 uint32_t rangeCount = 1;
829
830 bzero(&ranges[0], sizeof(ranges));
831 ranges[0].address = data[0] + srcoffset;
832 ranges[0].length = srcsize;
833 ranges[1].address = ranges[2].address = data[0];
834
835 if (srcsize > ptoa(5))
836 {
837 ranges[0].length = 7634;
838 ranges[1].length = 9870;
839 ranges[2].length = srcsize - ranges[0].length - ranges[1].length;
840 ranges[1].address = ranges[0].address + ranges[0].length;
841 ranges[2].address = ranges[1].address + ranges[1].length;
842 rangeCount = 3;
843 }
844 else if ((srcsize > ptoa(2)) && !(page_mask & srcoffset))
845 {
846 ranges[0].length = ptoa(1);
847 ranges[1].length = ptoa(1);
848 ranges[2].length = srcsize - ranges[0].length - ranges[1].length;
849 ranges[0].address = data[0] + srcoffset + ptoa(1);
850 ranges[1].address = data[0] + srcoffset;
851 ranges[2].address = ranges[0].address + ranges[0].length;
852 rangeCount = 3;
853 }
854
855 md = OSDynamicCast(IOGeneralMemoryDescriptor,
856 IOMemoryDescriptor::withAddressRanges(&ranges[0], rangeCount, kIODirectionInOut, kernel_task));
857 assert(md);
858
859 IOLog("IOMemoryDescriptor::withAddressRanges [0x%lx @ 0x%lx]\n[0x%llx, 0x%llx],\n[0x%llx, 0x%llx],\n[0x%llx, 0x%llx]\n",
860 (long) srcsize, (long) srcoffset,
861 (long long) ranges[0].address - data[0], (long long) ranges[0].length,
862 (long long) ranges[1].address - data[0], (long long) ranges[1].length,
863 (long long) ranges[2].address - data[0], (long long) ranges[2].length);
864
865 if (kIOReturnSuccess == kr)
866 {
867 for (mapoffset = 0; mapoffset < srcsize; mapoffset = ((mapoffset << 1) + 0xf00))
868 {
869 for (size = 4; size < (srcsize - mapoffset - 1); size = ((size << 2) + 0x200))
870 {
871 IOMemoryMap * map;
872 mach_vm_address_t addr = 0;
873 uint32_t data;
874
875// IOLog("<mapRef [0x%lx @ 0x%lx]\n", (long) size, (long) mapoffset);
876
877 map = md->createMappingInTask(kernel_task, 0, kIOMapAnywhere, mapoffset, size);
878 if (map) addr = map->getAddress();
879 else kr = kIOReturnError;
880
881// IOLog(">mapRef 0x%x %llx\n", kr, addr);
882
883 if (kIOReturnSuccess != kr) break;
884 kr = md->prepare();
885 if (kIOReturnSuccess != kr)
886 {
887 panic("prepare() fail 0x%x\n", kr);
888 break;
889 }
890 for (idx = 0; idx < size; idx += sizeof(uint32_t))
891 {
892 offidx = (idx + mapoffset + srcoffset);
893 if ((srcsize <= ptoa(5)) && (srcsize > ptoa(2)) && !(page_mask & srcoffset))
894 {
895 if (offidx < ptoa(2)) offidx ^= ptoa(1);
896 }
897 offidx /= sizeof(uint32_t);
898
899 if (offidx != ((uint32_t*)addr)[idx/sizeof(uint32_t)])
900 {
901 panic("vm mismatch md %p map %p, @ 0x%x, 0x%lx, 0x%lx, \n", md, map, idx, (long) srcoffset, (long) mapoffset);
902 kr = kIOReturnBadMedia;
903 }
904 else
905 {
906 if (sizeof(data) != md->readBytes(mapoffset + idx, &data, sizeof(data))) data = 0;
907 if (offidx != data)
908 {
909 panic("phys mismatch md %p map %p, @ 0x%x, 0x%lx, 0x%lx, \n", md, map, idx, (long) srcoffset, (long) mapoffset);
910 kr = kIOReturnBadMedia;
911 }
912 }
913 }
914 md->complete();
915 map->release();
916// IOLog("unmapRef %llx\n", addr);
917 }
918 if (kIOReturnSuccess != kr) break;
919 }
920 }
921 md->release();
922 if (kIOReturnSuccess != kr) break;
923 }
924 if (kIOReturnSuccess != kr) break;
925 }
926
927 if (kIOReturnSuccess != kr) IOLog("FAIL: src 0x%lx @ 0x%lx, map 0x%lx @ 0x%lx\n",
928 (long) srcsize, (long) srcoffset, (long) size, (long) mapoffset);
929
930 assert(kr == kIOReturnSuccess);
931
932 vm_deallocate(kernel_map, data[0], bsize);
933// vm_deallocate(kernel_map, data[1], size);
934
935 IOLog("IOMemoryDescriptorTest/ %d\n", (int) gIOMemoryReferenceCount);
936
937 return (0);
938}
939
940#endif /* DEVELOPMENT || DEBUG */
941