1/*
2 * Copyright (c) 2014-2020 Apple Inc. All rights reserved.
3 *
4 * @APPLE_OSREFERENCE_LICENSE_HEADER_START@
5 *
6 * This file contains Original Code and/or Modifications of Original Code
7 * as defined in and that are subject to the Apple Public Source License
8 * Version 2.0 (the 'License'). You may not use this file except in
9 * compliance with the License. The rights granted to you under the License
10 * may not be used to create, or enable the creation or redistribution of,
11 * unlawful or unlicensed copies of an Apple operating system, or to
12 * circumvent, violate, or enable the circumvention or violation of, any
13 * terms of an Apple operating system software license agreement.
14 *
15 * Please obtain a copy of the License at
16 * http://www.opensource.apple.com/apsl/ and read it before using this file.
17 *
18 * The Original Code and all software distributed under the License are
19 * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER
20 * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES,
21 * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY,
22 * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT.
23 * Please see the License for the specific language governing rights and
24 * limitations under the License.
25 *
26 * @APPLE_OSREFERENCE_LICENSE_HEADER_END@
27 */
28
29#include <sys/cdefs.h>
30
31#include <IOKit/assert.h>
32#include <IOKit/system.h>
33#include <IOKit/IOLib.h>
34#include <IOKit/IOMemoryDescriptor.h>
35#include <IOKit/IOMapper.h>
36#include <IOKit/IODMACommand.h>
37#include <IOKit/IOKitKeysPrivate.h>
38#include <Kernel/IOKitKernelInternal.h>
39#include <IOKit/IOUserClient.h>
40#include <IOKit/IOService.h>
41#include "Tests.h"
42
43#ifndef __LP64__
44#include <IOKit/IOSubMemoryDescriptor.h>
45#endif /* !__LP64__ */
46#include <IOKit/IOSubMemoryDescriptor.h>
47#include <IOKit/IOMultiMemoryDescriptor.h>
48#include <IOKit/IOBufferMemoryDescriptor.h>
49#include <IOKit/IOGuardPageMemoryDescriptor.h>
50
51#include <IOKit/IOKitDebug.h>
52#include <libkern/OSDebug.h>
53#include <sys/uio.h>
54#include <libkern/sysctl.h>
55#include <sys/sysctl.h>
56
57__BEGIN_DECLS
58#include <vm/pmap.h>
59#include <vm/vm_pageout.h>
60#include <mach/memory_object_types.h>
61#include <device/device_port.h>
62
63#include <mach/vm_prot.h>
64#include <mach/mach_vm.h>
65#include <mach/vm_param.h>
66#include <vm/vm_fault.h>
67#include <vm/vm_protos.h>
68__END_DECLS
69
70
71/* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */
72
73#if DEVELOPMENT || DEBUG
74
75extern SInt32 gIOMemoryReferenceCount;
76
77static int
78IOMultMemoryDescriptorTest(int newValue)
79{
80 IOMemoryDescriptor * mds[3];
81 IOMultiMemoryDescriptor * mmd;
82 IOMemoryMap * map;
83 void * addr;
84 uint8_t * data;
85 uint32_t i;
86 IOAddressRange ranges[2];
87
88 data = (typeof(data))IOMallocAligned(ptoa(8), page_size);
89 for (i = 0; i < ptoa(8); i++) {
90 data[i] = ((uint8_t) atop(i)) | 0xD0;
91 }
92
93 ranges[0].address = (IOVirtualAddress)(data + ptoa(4));
94 ranges[0].length = ptoa(4);
95 ranges[1].address = (IOVirtualAddress)(data + ptoa(0));
96 ranges[1].length = ptoa(4);
97
98 mds[0] = IOMemoryDescriptor::withAddressRange((mach_vm_address_t) data, 2, kIODirectionOutIn, kernel_task);
99 assert(mds[0]);
100 {
101 uint64_t dmaLen, dmaOffset;
102 dmaLen = mds[0]->getDMAMapLength(&dmaOffset);
103 assert(0 == dmaOffset);
104 assert(ptoa(1) == dmaLen);
105 }
106 mds[0]->release();
107 mds[0] = IOMemoryDescriptor::withAddressRange((mach_vm_address_t) (data + page_size - 2), 4, kIODirectionOutIn, kernel_task);
108 assert(mds[0]);
109 {
110 uint64_t dmaLen, dmaOffset;
111 dmaLen = mds[0]->getDMAMapLength(&dmaOffset);
112 assert((page_size - 2) == dmaOffset);
113 assert(ptoa(2) == dmaLen);
114 }
115 mds[0]->release();
116
117 mds[0] = IOMemoryDescriptor::withAddressRanges(&ranges[0], 2, kIODirectionOutIn, kernel_task);
118 {
119 uint64_t dmaLen, dmaOffset;
120 dmaLen = mds[0]->getDMAMapLength(&dmaOffset);
121 assert(0 == dmaOffset);
122 assert(ptoa(8) == dmaLen);
123 }
124 mds[1] = IOSubMemoryDescriptor::withSubRange(mds[0], ptoa(3), ptoa(2), kIODirectionOutIn);
125 {
126 uint64_t dmaLen, dmaOffset;
127 dmaLen = mds[1]->getDMAMapLength(&dmaOffset);
128 assert(0 == dmaOffset);
129 assert(ptoa(2) == dmaLen);
130 }
131 mds[2] = IOSubMemoryDescriptor::withSubRange(mds[0], ptoa(7), ptoa(1), kIODirectionOutIn);
132
133 mmd = IOMultiMemoryDescriptor::withDescriptors(&mds[0], sizeof(mds) / sizeof(mds[0]), kIODirectionOutIn, false);
134 {
135 uint64_t dmaLen, dmaOffset;
136 dmaLen = mmd->getDMAMapLength(&dmaOffset);
137 assert(0 == dmaOffset);
138 assert(ptoa(11) == dmaLen);
139 }
140 mds[2]->release();
141 mds[1]->release();
142 mds[0]->release();
143 map = mmd->createMappingInTask(kernel_task, 0, kIOMapAnywhere | kIOMapGuardedSmall, ptoa(7), mmd->getLength() - ptoa(7));
144 mmd->release();
145 assert(map);
146
147 addr = (void *) map->getVirtualAddress();
148 assert(ptoa(4) == map->getLength());
149 assert(0xd3d3d3d3 == ((uint32_t *)addr)[ptoa(0) / sizeof(uint32_t)]);
150 assert(0xd7d7d7d7 == ((uint32_t *)addr)[ptoa(1) / sizeof(uint32_t)]);
151 assert(0xd0d0d0d0 == ((uint32_t *)addr)[ptoa(2) / sizeof(uint32_t)]);
152 assert(0xd3d3d3d3 == ((uint32_t *)addr)[ptoa(3) / sizeof(uint32_t)]);
153 map->release();
154 IOFreeAligned(data, ptoa(8));
155
156 return 0;
157}
158
159
160
161// <rdar://problem/30102458>
162static int
163IODMACommandForceDoubleBufferTest(int newValue)
164{
165 IOReturn ret;
166 IOBufferMemoryDescriptor * bmd;
167 IODMACommand * dma;
168 uint32_t dir, data;
169 IODMACommand::SegmentOptions segOptions =
170 {
171 .fStructSize = sizeof(segOptions),
172 .fNumAddressBits = 64,
173 .fMaxSegmentSize = 0x2000,
174 .fMaxTransferSize = 128 * 1024,
175 .fAlignment = 1,
176 .fAlignmentLength = 1,
177 .fAlignmentInternalSegments = 1
178 };
179 IODMACommand::Segment64 segments[1];
180 UInt32 numSegments;
181 UInt64 dmaOffset;
182
183
184 for (dir = kIODirectionIn;; dir++) {
185 bmd = IOBufferMemoryDescriptor::inTaskWithOptions(kernel_task,
186 dir | kIOMemoryPageable, ptoa(8));
187 assert(bmd);
188 {
189 uint64_t dmaLen, dmaOffset;
190 dmaLen = bmd->getDMAMapLength(&dmaOffset);
191 assert(0 == dmaOffset);
192 assert(ptoa(8) == dmaLen);
193 }
194
195 ((uint32_t*) bmd->getBytesNoCopy())[0] = 0x53535300 | dir;
196
197 ret = bmd->prepare((IODirection) dir);
198 assert(kIOReturnSuccess == ret);
199
200 dma = IODMACommand::withSpecification(kIODMACommandOutputHost64, &segOptions,
201 kIODMAMapOptionMapped,
202 NULL, NULL);
203 assert(dma);
204 ret = dma->setMemoryDescriptor(bmd, true);
205 assert(kIOReturnSuccess == ret);
206
207 ret = dma->synchronize(IODMACommand::kForceDoubleBuffer | kIODirectionOut);
208 assert(kIOReturnSuccess == ret);
209
210 dmaOffset = 0;
211 numSegments = 1;
212 ret = dma->gen64IOVMSegments(&dmaOffset, &segments[0], &numSegments);
213 assert(kIOReturnSuccess == ret);
214 assert(1 == numSegments);
215
216 if (kIODirectionOut & dir) {
217 data = ((uint32_t*) bmd->getBytesNoCopy())[0];
218 assertf((0x53535300 | dir) == data, "mismatch 0x%x", data);
219 }
220 if (kIODirectionIn & dir) {
221 IOMappedWrite32(segments[0].fIOVMAddr, 0x11223300 | dir);
222 }
223
224 ret = dma->clearMemoryDescriptor(true);
225 assert(kIOReturnSuccess == ret);
226 dma->release();
227
228 bmd->complete((IODirection) dir);
229
230 if (kIODirectionIn & dir) {
231 data = ((uint32_t*) bmd->getBytesNoCopy())[0];
232 assertf((0x11223300 | dir) == data, "mismatch 0x%x", data);
233 }
234
235 bmd->release();
236
237 if (dir == kIODirectionInOut) {
238 break;
239 }
240 }
241
242 return 0;
243}
244
245// <rdar://problem/34322778>
246static int __unused
247IODMACommandLocalMappedNonContig(int newValue)
248{
249 IOReturn kr;
250 IOMemoryDescriptor * md;
251 IODMACommand * dma;
252 OSDictionary * matching;
253 IOService * device;
254 IOMapper * mapper;
255 IODMACommand::SegmentOptions segOptions =
256 {
257 .fStructSize = sizeof(segOptions),
258 .fNumAddressBits = 64,
259 .fMaxSegmentSize = 128 * 1024,
260 .fMaxTransferSize = 128 * 1024,
261 .fAlignment = 1,
262 .fAlignmentLength = 1,
263 .fAlignmentInternalSegments = 1
264 };
265 IODMACommand::Segment64 segments[1];
266 UInt32 numSegments;
267 UInt64 dmaOffset;
268 UInt64 segPhys;
269 mach_vm_address_t buffer;
270 vm_size_t bufSize = ptoa(4);
271
272 if (!IOMapper::gSystem) {
273 return 0;
274 }
275
276 buffer = 0;
277 kr = mach_vm_allocate_kernel(kernel_map, &buffer, bufSize, VM_FLAGS_ANYWHERE, VM_KERN_MEMORY_IOKIT);
278 assert(KERN_SUCCESS == kr);
279
280 // fragment the vmentries
281 kr = mach_vm_inherit(kernel_map, buffer + ptoa(1), ptoa(1), VM_INHERIT_NONE);
282 assert(KERN_SUCCESS == kr);
283
284 md = IOMemoryDescriptor::withAddressRange(
285 buffer + 0xa00, 0x2000, kIODirectionOutIn, kernel_task);
286 assert(md);
287 kr = md->prepare(kIODirectionOutIn);
288 assert(kIOReturnSuccess == kr);
289
290 segPhys = md->getPhysicalSegment(0, NULL, 0);
291
292 matching = IOService::nameMatching("XHC1");
293 assert(matching);
294 device = IOService::copyMatchingService(matching);
295 matching->release();
296 mapper = device ? IOMapper::copyMapperForDeviceWithIndex(device, 0) : NULL;
297 OSSafeReleaseNULL(device);
298
299 dma = IODMACommand::withSpecification(kIODMACommandOutputHost64, &segOptions,
300 kIODMAMapOptionMapped,
301 mapper, NULL);
302 assert(dma);
303 kr = dma->setMemoryDescriptor(md, true);
304 assert(kIOReturnSuccess == kr);
305
306 dmaOffset = 0;
307 numSegments = 1;
308 kr = dma->gen64IOVMSegments(&dmaOffset, &segments[0], &numSegments);
309 assert(kIOReturnSuccess == kr);
310 assert(1 == numSegments);
311
312 if (mapper) {
313 assertf(segments[0].fIOVMAddr != segPhys, "phys !local 0x%qx, 0x%qx, %p", segments[0].fIOVMAddr, segPhys, dma);
314 }
315
316 kr = dma->clearMemoryDescriptor(true);
317 assert(kIOReturnSuccess == kr);
318 dma->release();
319
320 kr = md->complete(kIODirectionOutIn);
321 assert(kIOReturnSuccess == kr);
322 md->release();
323
324 kr = mach_vm_deallocate(kernel_map, buffer, bufSize);
325 assert(KERN_SUCCESS == kr);
326 OSSafeReleaseNULL(mapper);
327
328 return 0;
329}
330
331// <rdar://problem/30102458>
332static int
333IOMemoryRemoteTest(int newValue)
334{
335 IOReturn ret;
336 IOMemoryDescriptor * md;
337 IOByteCount offset, length;
338 addr64_t addr;
339 uint32_t idx;
340
341 IODMACommand * dma;
342 IODMACommand::SegmentOptions segOptions =
343 {
344 .fStructSize = sizeof(segOptions),
345 .fNumAddressBits = 64,
346 .fMaxSegmentSize = 0x2000,
347 .fMaxTransferSize = 128 * 1024,
348 .fAlignment = 1,
349 .fAlignmentLength = 1,
350 .fAlignmentInternalSegments = 1
351 };
352 IODMACommand::Segment64 segments[1];
353 UInt32 numSegments;
354 UInt64 dmaOffset;
355
356 IOAddressRange ranges[2] = {
357 { 0x1234567890123456ULL, 0x1000 }, { 0x5432109876543210, 0x2000 },
358 };
359
360 md = IOMemoryDescriptor::withAddressRanges(&ranges[0], 2, kIODirectionOutIn | kIOMemoryRemote, TASK_NULL);
361 assert(md);
362
363// md->map();
364// md->readBytes(0, &idx, sizeof(idx));
365
366 ret = md->prepare(kIODirectionOutIn);
367 assert(kIOReturnSuccess == ret);
368
369 printf("remote md flags 0x%qx, r %d\n",
370 md->getFlags(), (0 != (kIOMemoryRemote & md->getFlags())));
371
372 for (offset = 0, idx = 0; true; offset += length, idx++) {
373 addr = md->getPhysicalSegment(offset, &length, 0);
374 if (!length) {
375 break;
376 }
377 assert(idx < 2);
378 assert(addr == ranges[idx].address);
379 assert(length == ranges[idx].length);
380 }
381 assert(offset == md->getLength());
382
383 dma = IODMACommand::withSpecification(kIODMACommandOutputHost64, &segOptions,
384 kIODMAMapOptionUnmapped | kIODMAMapOptionIterateOnly,
385 NULL, NULL);
386 assert(dma);
387 ret = dma->setMemoryDescriptor(md, true);
388 assert(kIOReturnSuccess == ret);
389
390 for (dmaOffset = 0, idx = 0; dmaOffset < md->getLength(); idx++) {
391 numSegments = 1;
392 ret = dma->gen64IOVMSegments(&dmaOffset, &segments[0], &numSegments);
393 assert(kIOReturnSuccess == ret);
394 assert(1 == numSegments);
395 assert(idx < 2);
396 assert(segments[0].fIOVMAddr == ranges[idx].address);
397 assert(segments[0].fLength == ranges[idx].length);
398 }
399 assert(dmaOffset == md->getLength());
400
401 ret = dma->clearMemoryDescriptor(true);
402 assert(kIOReturnSuccess == ret);
403 dma->release();
404 md->complete(kIODirectionOutIn);
405 md->release();
406
407 return 0;
408}
409
410static IOReturn
411IOMemoryPrefaultTest(uint32_t options)
412{
413 IOBufferMemoryDescriptor * bmd;
414 IOMemoryMap * map;
415 IOReturn kr;
416 uint32_t data;
417 uint32_t * p;
418 IOSimpleLock * lock;
419
420 lock = IOSimpleLockAlloc();
421 assert(lock);
422
423 bmd = IOBufferMemoryDescriptor::inTaskWithOptions(current_task(),
424 kIODirectionOutIn | kIOMemoryPageable, ptoa(8));
425 assert(bmd);
426 kr = bmd->prepare();
427 assert(KERN_SUCCESS == kr);
428
429 map = bmd->map(kIOMapPrefault);
430 assert(map);
431
432 p = (typeof(p))map->getVirtualAddress();
433 IOSimpleLockLock(lock);
434 data = p[0];
435 IOSimpleLockUnlock(lock);
436
437 IOLog("IOMemoryPrefaultTest %d\n", data);
438
439 map->release();
440 bmd->release();
441 IOSimpleLockFree(lock);
442
443 return kIOReturnSuccess;
444}
445
446static IOReturn
447IOBMDOverflowTest(uint32_t options)
448{
449 IOBufferMemoryDescriptor * bmd;
450
451 bmd = IOBufferMemoryDescriptor::inTaskWithPhysicalMask(kernel_task, kIOMemoryPageable | kIODirectionOut,
452 0xffffffffffffffff, 0);
453 assert(NULL == bmd);
454
455 return kIOReturnSuccess;
456}
457
458static IOReturn
459IOBMDSetLengthMapTest(uint32_t options)
460{
461 IOBufferMemoryDescriptor * bmd;
462 IOMemoryMap * map;
463
464 bmd = IOBufferMemoryDescriptor::inTaskWithOptions(
465 kernel_task, kIOMemoryDirectionOutIn | kIOMemoryKernelUserShared, 0x4000, 0x4000);
466 assert(bmd);
467
468 bmd->setLength(0x100);
469 map = bmd->createMappingInTask(current_task(), 0, kIOMapAnywhere, 0, 0);
470 assert(map);
471 OSSafeReleaseNULL(map);
472
473 bmd->setLength(0x200);
474 map = bmd->createMappingInTask(current_task(), 0, kIOMapAnywhere, 0, 0);
475 assert(map);
476 OSSafeReleaseNULL(map);
477
478 bmd->release();
479
480 return kIOReturnSuccess;
481}
482
483// <rdar://problem/26375234>
484static IOReturn
485ZeroLengthTest(int newValue)
486{
487 IOMemoryDescriptor * md;
488
489 md = IOMemoryDescriptor::withAddressRange(
490 0, 0, kIODirectionNone, current_task());
491 assert(md);
492 md->prepare();
493 md->complete();
494 md->release();
495 return 0;
496}
497
498// <rdar://problem/27002624>
499static IOReturn
500BadFixedAllocTest(int newValue)
501{
502 IOBufferMemoryDescriptor * bmd;
503 IOMemoryMap * map;
504
505 bmd = IOBufferMemoryDescriptor::inTaskWithOptions(NULL,
506 kIODirectionIn | kIOMemoryPageable, ptoa(1));
507 assert(bmd);
508 map = bmd->createMappingInTask(kernel_task, 0x2000, 0);
509 assert(!map);
510
511 bmd->release();
512 return 0;
513}
514
515// <rdar://problem/26466423>
516static IOReturn
517IODirectionPrepareNoZeroFillTest(int newValue)
518{
519 IOBufferMemoryDescriptor * bmd;
520
521 bmd = IOBufferMemoryDescriptor::inTaskWithOptions(NULL,
522 kIODirectionIn | kIOMemoryPageable, ptoa(24));
523 assert(bmd);
524 bmd->prepare((IODirection)(kIODirectionIn | kIODirectionPrepareNoZeroFill));
525 bmd->prepare(kIODirectionIn);
526 bmd->complete((IODirection)(kIODirectionIn | kIODirectionCompleteWithDataValid));
527 bmd->complete(kIODirectionIn);
528 bmd->release();
529 return 0;
530}
531
532// <rdar://problem/28190483>
533static IOReturn
534IOMemoryMapTest(uint32_t options)
535{
536 IOBufferMemoryDescriptor * bmd;
537 IOMemoryDescriptor * md;
538 IOMemoryMap * map;
539 uint32_t data;
540 user_addr_t p;
541 uint8_t * p2;
542 int r;
543 uint64_t time, nano;
544
545 bmd = IOBufferMemoryDescriptor::inTaskWithOptions(current_task(),
546 kIODirectionOutIn | kIOMemoryPageable, 0x4018 + 0x800);
547 assert(bmd);
548 p = (typeof(p))bmd->getBytesNoCopy();
549 p += 0x800;
550 data = 0x11111111;
551 r = copyout(&data, p, sizeof(data));
552 assert(r == 0);
553 data = 0x22222222;
554 r = copyout(&data, p + 0x1000, sizeof(data));
555 assert(r == 0);
556 data = 0x33333333;
557 r = copyout(&data, p + 0x2000, sizeof(data));
558 assert(r == 0);
559 data = 0x44444444;
560 r = copyout(&data, p + 0x3000, sizeof(data));
561 assert(r == 0);
562
563 md = IOMemoryDescriptor::withAddressRange(p, 0x4018,
564 kIODirectionOut | options,
565 current_task());
566 assert(md);
567 time = mach_absolute_time();
568 map = md->map(kIOMapReadOnly);
569 time = mach_absolute_time() - time;
570 assert(map);
571 absolutetime_to_nanoseconds(time, &nano);
572
573 p2 = (typeof(p2))map->getVirtualAddress();
574 assert(0x11 == p2[0]);
575 assert(0x22 == p2[0x1000]);
576 assert(0x33 == p2[0x2000]);
577 assert(0x44 == p2[0x3000]);
578
579 data = 0x99999999;
580 r = copyout(&data, p + 0x2000, sizeof(data));
581 assert(r == 0);
582
583 assert(0x11 == p2[0]);
584 assert(0x22 == p2[0x1000]);
585 assert(0x44 == p2[0x3000]);
586 if (kIOMemoryMapCopyOnWrite & options) {
587 assert(0x33 == p2[0x2000]);
588 } else {
589 assert(0x99 == p2[0x2000]);
590 }
591
592 IOLog("IOMemoryMapCopyOnWriteTest map(%s) %lld ns\n",
593 kIOMemoryMapCopyOnWrite & options ? "kIOMemoryMapCopyOnWrite" : "",
594 nano);
595
596 map->release();
597 md->release();
598 bmd->release();
599
600 return kIOReturnSuccess;
601}
602
603static int
604IOMemoryMapCopyOnWriteTest(int newValue)
605{
606 IOMemoryMapTest(0);
607 IOMemoryMapTest(kIOMemoryMapCopyOnWrite);
608 return 0;
609}
610
611static int
612AllocationNameTest(int newValue)
613{
614 IOMemoryDescriptor * bmd;
615 kern_allocation_name_t name, prior;
616
617 name = kern_allocation_name_allocate("com.apple.iokit.test", 0);
618 assert(name);
619
620 prior = thread_set_allocation_name(name);
621
622 bmd = IOBufferMemoryDescriptor::inTaskWithOptions(TASK_NULL,
623 kIODirectionOutIn | kIOMemoryPageable | kIOMemoryKernelUserShared,
624 ptoa(13));
625 assert(bmd);
626 bmd->prepare();
627
628 thread_set_allocation_name(prior);
629 kern_allocation_name_release(name);
630
631 if (newValue != 7) {
632 bmd->release();
633 }
634
635 return 0;
636}
637
638static IOReturn
639IOGuardPageMDTest(int newValue)
640{
641 constexpr size_t MAX_LEFT_GUARD_PAGES = 5;
642 constexpr size_t MAX_RIGHT_GUARD_PAGES = 5;
643
644 IOMemoryDescriptor * mds[3];
645 IOMemoryDescriptor * dataMD;
646 IOMultiMemoryDescriptor * mmd;
647 IOBufferMemoryDescriptor * iobmd;
648 IOMemoryMap * map;
649 void * addr;
650 uint8_t * data;
651 uint32_t i;
652
653 data = (typeof(data))IOMallocAligned(page_size, page_size);
654 for (i = 0; i < page_size; i++) {
655 data[i] = (uint8_t)(i & 0xFF);
656 }
657
658 dataMD = IOMemoryDescriptor::withAddressRange((mach_vm_address_t) data, page_size, kIODirectionOutIn, kernel_task);
659 assert(dataMD);
660
661
662 for (size_t leftGuardSize = 1; leftGuardSize < MAX_LEFT_GUARD_PAGES; leftGuardSize++) {
663 for (size_t rightGuardSize = 1; rightGuardSize < MAX_RIGHT_GUARD_PAGES; rightGuardSize++) {
664 mds[0] = IOGuardPageMemoryDescriptor::withSize(page_size * leftGuardSize);
665 assert(mds[0]);
666
667 mds[1] = dataMD;
668 mds[1]->retain();
669
670 mds[2] = IOGuardPageMemoryDescriptor::withSize(page_size * rightGuardSize);
671 assert(mds[2]);
672
673 mmd = IOMultiMemoryDescriptor::withDescriptors(&mds[0], sizeof(mds) / sizeof(mds[0]), kIODirectionOutIn, false);
674
675 OSSafeReleaseNULL(mds[2]);
676 OSSafeReleaseNULL(mds[1]);
677 OSSafeReleaseNULL(mds[0]);
678
679 map = mmd->createMappingInTask(kernel_task, 0, kIOMapAnywhere, 0, mmd->getLength());
680
681 OSSafeReleaseNULL(mmd);
682 assert(map);
683 addr = (void *)map->getAddress();
684
685 // check data
686 for (i = 0; i < page_size; i++) {
687 assert(*(uint8_t *)((uintptr_t)addr + page_size * leftGuardSize + i) == (uint8_t)(i & 0xFF));
688 }
689
690 // check map length
691 assert(page_size * leftGuardSize + page_size + page_size * rightGuardSize == map->getLength());
692
693 // check page protections
694 for (i = 0; i < leftGuardSize + 1 + rightGuardSize; i++) {
695 mach_vm_address_t regionAddr = (vm_address_t)addr + i * page_size;
696 mach_vm_size_t regionSize;
697 vm_region_extended_info regionInfo;
698 mach_msg_type_number_t count = VM_REGION_EXTENDED_INFO_COUNT;
699 mach_port_t unused;
700 kern_return_t kr = mach_vm_region(kernel_map, &regionAddr, &regionSize, VM_REGION_EXTENDED_INFO, (vm_region_info_t)&regionInfo, &count, &unused);
701 assert(kr == KERN_SUCCESS);
702 if (i < leftGuardSize || i > leftGuardSize + 1) {
703 assert(regionInfo.protection == VM_PROT_NONE);
704 }
705 }
706 OSSafeReleaseNULL(map);
707 }
708 }
709
710 OSSafeReleaseNULL(dataMD);
711 IOFreeAligned(data, page_size);
712
713 for (size_t iobmdCapacity = page_size / 8; iobmdCapacity < page_size * 10; iobmdCapacity += page_size / 8) {
714 iobmd = IOBufferMemoryDescriptor::inTaskWithGuardPages(kernel_task, kIODirectionOutIn, iobmdCapacity);
715
716 // Capacity should be rounded up to page size
717 assert(iobmd->getLength() == round_page(iobmdCapacity));
718
719 // Buffer should be page aligned
720 addr = iobmd->getBytesNoCopy();
721 assert((vm_offset_t)addr == round_page((vm_offset_t)addr));
722
723 // fill buffer
724 for (size_t i = 0; i < iobmdCapacity; i++) {
725 *((char *)addr + i) = (char)(i & 0xFF);
726 }
727
728 map = iobmd->createMappingInTask(kernel_task, 0, kIOMapAnywhere | kIOMapUnique, 0, iobmd->getLength());
729 assert(map->getLength() == iobmd->getLength());
730
731 // check buffer
732 for (size_t i = 0; i < iobmdCapacity; i++) {
733 assert(*((char *)map->getAddress() + i) == (char)(i & 0xFF));
734 }
735
736 OSSafeReleaseNULL(map);
737 OSSafeReleaseNULL(iobmd);
738 }
739
740 return kIOReturnSuccess;
741}
742
743static IOReturn
744IOMDContextTest(int newValue)
745{
746 IOBufferMemoryDescriptor * bmd = IOBufferMemoryDescriptor::inTaskWithOptions(TASK_NULL,
747 kIODirectionOutIn | kIOMemoryPageable | kIOMemoryKernelUserShared,
748 ptoa(13));
749
750 OSObject * current = NULL;
751 OSString * firstString = OSString::withCStringNoCopy("firstString");
752 OSString * secondString = OSString::withCStringNoCopy("secondString");
753
754 assert(bmd->copyContext() == NULL);
755
756 bmd->setContext(NULL);
757 assert(bmd->copyContext() == NULL);
758
759 bmd->setContext(firstString);
760 current = bmd->copyContext();
761 assert(current == firstString);
762 OSSafeReleaseNULL(current);
763
764 bmd->setContext(NULL);
765 assert(bmd->copyContext() == NULL);
766
767 bmd->setContext(secondString);
768 current = bmd->copyContext();
769 assert(current == secondString);
770 OSSafeReleaseNULL(current);
771
772 bmd->release();
773
774 assert(firstString->getRetainCount() == 1);
775 assert(secondString->getRetainCount() == 1);
776
777 firstString->release();
778 secondString->release();
779
780 return kIOReturnSuccess;
781}
782
783int
784IOMemoryDescriptorTest(int newValue)
785{
786 int result;
787
788 IOLog("/IOMemoryDescriptorTest %d\n", (int) gIOMemoryReferenceCount);
789
790#if 0
791 if (6 == newValue) {
792 IOMemoryDescriptor * sbmds[3];
793 IOMultiMemoryDescriptor * smmd;
794 IOMemoryDescriptor * mds[2];
795 IOMultiMemoryDescriptor * mmd;
796 IOMemoryMap * map;
797
798 sbmds[0] = IOBufferMemoryDescriptor::inTaskWithOptions(kernel_task, kIODirectionOutIn | kIOMemoryKernelUserShared, ptoa(1));
799 sbmds[1] = IOBufferMemoryDescriptor::inTaskWithOptions(kernel_task, kIODirectionOutIn | kIOMemoryKernelUserShared, ptoa(2));
800 sbmds[2] = IOBufferMemoryDescriptor::inTaskWithOptions(kernel_task, kIODirectionOutIn | kIOMemoryKernelUserShared, ptoa(3));
801 smmd = IOMultiMemoryDescriptor::withDescriptors(&sbmds[0], sizeof(sbmds) / sizeof(sbmds[0]), kIODirectionOutIn, false);
802
803 mds[0] = IOBufferMemoryDescriptor::inTaskWithOptions(kernel_task, kIODirectionOutIn | kIOMemoryKernelUserShared, ptoa(1));
804 mds[1] = smmd;
805 mmd = IOMultiMemoryDescriptor::withDescriptors(&mds[0], sizeof(mds) / sizeof(mds[0]), kIODirectionOutIn, false);
806 map = mmd->createMappingInTask(kernel_task, 0, kIOMapAnywhere | kIOMapGuardedSmall);
807 assert(map);
808 map->release();
809 mmd->release();
810 mds[0]->release();
811 mds[1]->release();
812 sbmds[0]->release();
813 sbmds[1]->release();
814 sbmds[2]->release();
815
816 return 0;
817 } else if (5 == newValue) {
818 IOReturn ret;
819 IOMemoryDescriptor * md;
820 IODMACommand * dma;
821 IODMACommand::SegmentOptions segOptions =
822 {
823 .fStructSize = sizeof(segOptions),
824 .fNumAddressBits = 64,
825 .fMaxSegmentSize = 4096,
826 .fMaxTransferSize = 128 * 1024,
827 .fAlignment = 4,
828 .fAlignmentLength = 4,
829 .fAlignmentInternalSegments = 0x1000
830 };
831
832 IOAddressRange ranges[3][2] =
833 {
834 {
835 { (uintptr_t) &IOMemoryDescriptorTest, 0x2ffc },
836 { 0, 0 },
837 },
838 {
839 { ranges[0][0].address, 0x10 },
840 { 0x3000 + ranges[0][0].address, 0xff0 },
841 },
842 {
843 { ranges[0][0].address, 0x2ffc },
844 { trunc_page(ranges[0][0].address), 0x800 },
845 },
846 };
847 static const uint32_t rangesCount[3] = { 1, 2, 2 };
848 uint32_t test;
849
850 for (test = 0; test < 3; test++) {
851 kprintf("---[%d] address 0x%qx-0x%qx, 0x%qx-0x%qx\n", test,
852 ranges[test][0].address, ranges[test][0].length,
853 ranges[test][1].address, ranges[test][1].length);
854
855 md = IOMemoryDescriptor::withAddressRanges((IOAddressRange*)&ranges[test][0], rangesCount[test], kIODirectionOut, kernel_task);
856 assert(md);
857 ret = md->prepare();
858 assert(kIOReturnSuccess == ret);
859 dma = IODMACommand::withSpecification(kIODMACommandOutputHost64, &segOptions,
860 IODMACommand::kMapped, NULL, NULL);
861 assert(dma);
862 ret = dma->setMemoryDescriptor(md, true);
863 if (kIOReturnSuccess == ret) {
864 IODMACommand::Segment64 segments[1];
865 UInt32 numSegments;
866 UInt64 offset;
867
868 offset = 0;
869 do{
870 numSegments = 1;
871 ret = dma->gen64IOVMSegments(&offset, &segments[0], &numSegments);
872 assert(kIOReturnSuccess == ret);
873 assert(1 == numSegments);
874 kprintf("seg 0x%qx, 0x%qx\n", segments[0].fIOVMAddr, segments[0].fLength);
875 }while (offset < md->getLength());
876
877 ret = dma->clearMemoryDescriptor(true);
878 assert(kIOReturnSuccess == ret);
879 dma->release();
880 }
881 md->release();
882 }
883
884 return kIOReturnSuccess;
885 } else if (4 == newValue) {
886 IOService * isp;
887 IOMapper * mapper;
888 IOBufferMemoryDescriptor * md1;
889 IODMACommand * dma;
890 IOReturn ret;
891 size_t bufSize = 8192 * 8192 * sizeof(uint32_t);
892 uint64_t start, time, nano;
893
894 isp = IOService::copyMatchingService(IOService::nameMatching("isp"));
895 assert(isp);
896 mapper = IOMapper::copyMapperForDeviceWithIndex(isp, 0);
897 assert(mapper);
898
899 md1 = IOBufferMemoryDescriptor::inTaskWithOptions(TASK_NULL,
900 kIODirectionOutIn | kIOMemoryPersistent | kIOMemoryPageable,
901 bufSize, page_size);
902
903 ret = md1->prepare();
904 assert(kIOReturnSuccess == ret);
905
906 IODMAMapSpecification mapSpec;
907 bzero(&mapSpec, sizeof(mapSpec));
908 uint64_t mapped;
909 uint64_t mappedLength;
910
911 start = mach_absolute_time();
912
913 ret = md1->dmaMap(mapper, NULL, &mapSpec, 0, bufSize, &mapped, &mappedLength);
914 assert(kIOReturnSuccess == ret);
915
916 time = mach_absolute_time() - start;
917
918 absolutetime_to_nanoseconds(time, &nano);
919 kprintf("time %lld us\n", nano / 1000ULL);
920 kprintf("seg0 0x%qx, 0x%qx\n", mapped, mappedLength);
921
922 assert(md1);
923
924 dma = IODMACommand::withSpecification(kIODMACommandOutputHost32,
925 32, 0, IODMACommand::kMapped, 0, 1, mapper, NULL);
926
927 assert(dma);
928
929 start = mach_absolute_time();
930 ret = dma->setMemoryDescriptor(md1, true);
931 assert(kIOReturnSuccess == ret);
932 time = mach_absolute_time() - start;
933
934 absolutetime_to_nanoseconds(time, &nano);
935 kprintf("time %lld us\n", nano / 1000ULL);
936
937
938 IODMACommand::Segment32 segments[1];
939 UInt32 numSegments = 1;
940 UInt64 offset;
941
942 offset = 0;
943 ret = dma->gen32IOVMSegments(&offset, &segments[0], &numSegments);
944 assert(kIOReturnSuccess == ret);
945 assert(1 == numSegments);
946 kprintf("seg0 0x%x, 0x%x\n", (int)segments[0].fIOVMAddr, (int)segments[0].fLength);
947
948 ret = dma->clearMemoryDescriptor(true);
949 assert(kIOReturnSuccess == ret);
950
951 md1->release();
952
953 return kIOReturnSuccess;
954 }
955
956 if (3 == newValue) {
957 IOBufferMemoryDescriptor * md1;
958 IOBufferMemoryDescriptor * md2;
959 IOMemoryMap * map1;
960 IOMemoryMap * map2;
961 uint32_t * buf1;
962 uint32_t * buf2;
963 IOReturn err;
964
965 md1 = IOBufferMemoryDescriptor::inTaskWithOptions(TASK_NULL,
966 kIODirectionOutIn | kIOMemoryPersistent | kIOMemoryPageable,
967 64 * 1024, page_size);
968 assert(md1);
969 map1 = md1->createMappingInTask(kernel_task, 0, kIOMapAnywhere | kIOMapUnique);
970 assert(map1);
971 buf1 = (uint32_t *) map1->getVirtualAddress();
972
973 md2 = IOBufferMemoryDescriptor::inTaskWithOptions(TASK_NULL,
974 kIODirectionOutIn | kIOMemoryPersistent | kIOMemoryPageable,
975 64 * 1024, page_size);
976 assert(md2);
977 map2 = md2->createMappingInTask(kernel_task, 0, kIOMapAnywhere | kIOMapUnique);
978 assert(map2);
979 buf2 = (uint32_t *) map2->getVirtualAddress();
980
981 memset(buf1, 0x11, 64 * 1024L);
982 memset(buf2, 0x22, 64 * 1024L);
983
984 kprintf("md1 %p, map1 %p, buf2 %p; md2 %p, map2 %p, buf2 %p\n", md1, map1, buf1, md2, map2, buf2);
985
986 kprintf("no redir 0x%08x, 0x%08x\n", buf1[0], buf2[0]);
987 assert(0x11111111 == buf1[0]);
988 assert(0x22222222 == buf2[0]);
989 err = map1->redirect(md2, 0, 0ULL);
990 kprintf("redir md2(0x%x) 0x%08x, 0x%08x\n", err, buf1[0], buf2[0]);
991 assert(0x11111111 == buf2[0]);
992 assert(0x22222222 == buf1[0]);
993 err = map1->redirect(md1, 0, 0ULL);
994 kprintf("redir md1(0x%x) 0x%08x, 0x%08x\n", err, buf1[0], buf2[0]);
995 assert(0x11111111 == buf1[0]);
996 assert(0x22222222 == buf2[0]);
997 map1->release();
998 map2->release();
999 md1->release();
1000 md2->release();
1001 }
1002#endif
1003
1004// result = IODMACommandLocalMappedNonContig(newValue);
1005// if (result) return (result);
1006
1007 result = IODMACommandForceDoubleBufferTest(newValue);
1008 if (result) {
1009 return result;
1010 }
1011
1012 result = AllocationNameTest(newValue);
1013 if (result) {
1014 return result;
1015 }
1016
1017 result = IOMemoryMapCopyOnWriteTest(newValue);
1018 if (result) {
1019 return result;
1020 }
1021
1022 result = IOMultMemoryDescriptorTest(newValue);
1023 if (result) {
1024 return result;
1025 }
1026
1027 result = IOBMDOverflowTest(newValue);
1028 if (result) {
1029 return result;
1030 }
1031
1032 result = IOBMDSetLengthMapTest(newValue);
1033 if (result) {
1034 return result;
1035 }
1036
1037 result = ZeroLengthTest(newValue);
1038 if (result) {
1039 return result;
1040 }
1041
1042 result = IODirectionPrepareNoZeroFillTest(newValue);
1043 if (result) {
1044 return result;
1045 }
1046
1047 result = BadFixedAllocTest(newValue);
1048 if (result) {
1049 return result;
1050 }
1051
1052 result = IOMemoryRemoteTest(newValue);
1053 if (result) {
1054 return result;
1055 }
1056
1057 result = IOMemoryPrefaultTest(newValue);
1058 if (result) {
1059 return result;
1060 }
1061
1062 result = IOGuardPageMDTest(newValue);
1063 if (result) {
1064 return result;
1065 }
1066
1067 result = IOMDContextTest(newValue);
1068 if (result) {
1069 return result;
1070 }
1071
1072 IOGeneralMemoryDescriptor * md;
1073 mach_vm_offset_t data[2];
1074 vm_size_t bsize = 16 * 1024 * 1024;
1075 vm_size_t srcsize, srcoffset, mapoffset, size;
1076 kern_return_t kr;
1077
1078 data[0] = data[1] = 0;
1079 kr = mach_vm_allocate_kernel(kernel_map, &data[0], bsize, VM_FLAGS_ANYWHERE, VM_KERN_MEMORY_IOKIT);
1080 assert(KERN_SUCCESS == kr);
1081
1082 mach_vm_inherit(kernel_map, data[0] + ptoa(1), ptoa(1), VM_INHERIT_NONE);
1083 mach_vm_inherit(kernel_map, data[0] + ptoa(16), ptoa(4), VM_INHERIT_NONE);
1084
1085 IOLog("data 0x%lx, 0x%lx\n", (long)data[0], (long)data[1]);
1086
1087 uint32_t idx, offidx;
1088 for (idx = 0; idx < (bsize / sizeof(uint32_t)); idx++) {
1089 ((uint32_t*)data[0])[idx] = idx;
1090 }
1091
1092 for (srcoffset = 0; srcoffset < bsize; srcoffset = ((srcoffset << 2) + 0x40c)) {
1093 for (srcsize = 4; srcsize < (bsize - srcoffset - 1); srcsize = ((srcsize << 2) + 0x3fc)) {
1094 IOAddressRange ranges[3];
1095 uint32_t rangeCount = 1;
1096
1097 bzero(&ranges[0], sizeof(ranges));
1098 ranges[0].address = data[0] + srcoffset;
1099 ranges[0].length = srcsize;
1100 ranges[1].address = ranges[2].address = data[0];
1101
1102 if (srcsize > ptoa(5)) {
1103 ranges[0].length = 7634;
1104 ranges[1].length = 9870;
1105 ranges[2].length = srcsize - ranges[0].length - ranges[1].length;
1106 ranges[1].address = ranges[0].address + ranges[0].length;
1107 ranges[2].address = ranges[1].address + ranges[1].length;
1108 rangeCount = 3;
1109 } else if ((srcsize > ptoa(2)) && !(page_mask & srcoffset)) {
1110 ranges[0].length = ptoa(1);
1111 ranges[1].length = ptoa(1);
1112 ranges[2].length = srcsize - ranges[0].length - ranges[1].length;
1113 ranges[0].address = data[0] + srcoffset + ptoa(1);
1114 ranges[1].address = data[0] + srcoffset;
1115 ranges[2].address = ranges[0].address + ranges[0].length;
1116 rangeCount = 3;
1117 }
1118
1119 md = OSDynamicCast(IOGeneralMemoryDescriptor,
1120 IOMemoryDescriptor::withAddressRanges(&ranges[0], rangeCount, kIODirectionInOut, kernel_task));
1121 assert(md);
1122
1123 IOLog("IOMemoryDescriptor::withAddressRanges [0x%lx @ 0x%lx]\n[0x%llx, 0x%llx],\n[0x%llx, 0x%llx],\n[0x%llx, 0x%llx]\n",
1124 (long) srcsize, (long) srcoffset,
1125 (long long) ranges[0].address - data[0], (long long) ranges[0].length,
1126 (long long) ranges[1].address - data[0], (long long) ranges[1].length,
1127 (long long) ranges[2].address - data[0], (long long) ranges[2].length);
1128
1129 if (kIOReturnSuccess == kr) {
1130 for (mapoffset = 0; mapoffset < srcsize; mapoffset = ((mapoffset << 1) + 0xf00)) {
1131 for (size = 4; size < (srcsize - mapoffset - 1); size = ((size << 2) + 0x200)) {
1132 IOMemoryMap * map;
1133 mach_vm_address_t addr = 0;
1134 uint32_t data;
1135
1136// IOLog("<mapRef [0x%lx @ 0x%lx]\n", (long) size, (long) mapoffset);
1137
1138 map = md->createMappingInTask(kernel_task, 0, kIOMapAnywhere | kIOMapGuardedSmall, mapoffset, size);
1139 if (map) {
1140 addr = map->getAddress();
1141 } else {
1142 kr = kIOReturnError;
1143 }
1144
1145// IOLog(">mapRef 0x%x %llx\n", kr, addr);
1146
1147 if (kIOReturnSuccess != kr) {
1148 break;
1149 }
1150 kr = md->prepare();
1151 if (kIOReturnSuccess != kr) {
1152 panic("prepare() fail 0x%x", kr);
1153 break;
1154 }
1155 for (idx = 0; idx < size; idx += sizeof(uint32_t)) {
1156 offidx = (typeof(offidx))(idx + mapoffset + srcoffset);
1157 if ((srcsize <= ptoa(5)) && (srcsize > ptoa(2)) && !(page_mask & srcoffset)) {
1158 if (offidx < ptoa(2)) {
1159 offidx ^= ptoa(1);
1160 }
1161 }
1162 offidx /= sizeof(uint32_t);
1163
1164 if (offidx != ((uint32_t*)addr)[idx / sizeof(uint32_t)]) {
1165 panic("vm mismatch md %p map %p, @ 0x%x, 0x%lx, 0x%lx,", md, map, idx, (long) srcoffset, (long) mapoffset);
1166 kr = kIOReturnBadMedia;
1167 } else {
1168 if (sizeof(data) != md->readBytes(mapoffset + idx, &data, sizeof(data))) {
1169 data = 0;
1170 }
1171 if (offidx != data) {
1172 panic("phys mismatch md %p map %p, @ 0x%x, 0x%lx, 0x%lx,", md, map, idx, (long) srcoffset, (long) mapoffset);
1173 kr = kIOReturnBadMedia;
1174 }
1175 }
1176 }
1177 md->complete();
1178 map->release();
1179// IOLog("unmapRef %llx\n", addr);
1180 }
1181 if (kIOReturnSuccess != kr) {
1182 break;
1183 }
1184 }
1185 }
1186 md->release();
1187 if (kIOReturnSuccess != kr) {
1188 break;
1189 }
1190 }
1191 if (kIOReturnSuccess != kr) {
1192 break;
1193 }
1194 }
1195
1196 if (kIOReturnSuccess != kr) {
1197 IOLog("FAIL: src 0x%lx @ 0x%lx, map 0x%lx @ 0x%lx\n",
1198 (long) srcsize, (long) srcoffset, (long) size, (long) mapoffset);
1199 }
1200
1201 assert(kr == kIOReturnSuccess);
1202
1203 mach_vm_deallocate(kernel_map, data[0], bsize);
1204 //mach_vm_deallocate(kernel_map, data[1], size);
1205
1206 IOLog("IOMemoryDescriptorTest/ %d\n", (int) gIOMemoryReferenceCount);
1207
1208 return 0;
1209}
1210
1211
1212#endif /* DEVELOPMENT || DEBUG */
1213