1/*
2 * Copyright (c) 2015-2022 Apple Inc. All rights reserved.
3 *
4 * @APPLE_OSREFERENCE_LICENSE_HEADER_START@
5 *
6 * This file contains Original Code and/or Modifications of Original Code
7 * as defined in and that are subject to the Apple Public Source License
8 * Version 2.0 (the 'License'). You may not use this file except in
9 * compliance with the License. The rights granted to you under the License
10 * may not be used to create, or enable the creation or redistribution of,
11 * unlawful or unlicensed copies of an Apple operating system, or to
12 * circumvent, violate, or enable the circumvention or violation of, any
13 * terms of an Apple operating system software license agreement.
14 *
15 * Please obtain a copy of the License at
16 * http://www.opensource.apple.com/apsl/ and read it before using this file.
17 *
18 * The Original Code and all software distributed under the License are
19 * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER
20 * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES,
21 * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY,
22 * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT.
23 * Please see the License for the specific language governing rights and
24 * limitations under the License.
25 *
26 * @APPLE_OSREFERENCE_LICENSE_HEADER_END@
27 */
28#if defined(__x86_64__)
29#include <libkern/c++/OSKext.h> // IOSKCopyKextIdentifierWithAddress()
30#endif
31
32#include <IOKit/IOBufferMemoryDescriptor.h>
33#include <IOKit/IOMultiMemoryDescriptor.h>
34#include <IOKit/IOCommand.h>
35#include <IOKit/IOLib.h>
36#include <IOKit/skywalk/IOSkywalkSupport.h>
37#include <skywalk/os_skywalk_private.h>
38#include <sys/errno.h>
39#include <sys/queue.h>
40
41#include <mach/mach_vm.h>
42#include <mach/vm_map.h>
43#include <mach/vm_types.h>
44
45#define ELOG(fmt, args...) SK_ERR(fmt, ##args)
46#define DLOG(fmt, args...) SK_DF(SK_VERB_IOSK, fmt, ##args)
47#define IOSK_SIZE_OK(x) (((x) != 0) && (round_page(x) == (x)))
48#define IOSK_OFFSET_OK(x) (round_page(x) == (x))
49
50static vm_tag_t
51getVMTagForMap( vm_map_t map )
52{
53 return (map == kernel_map) ?
54 VM_KERN_MEMORY_SKYWALK : VM_MEMORY_SKYWALK;
55}
56
57class IOSKMemoryArray : public IOMultiMemoryDescriptor
58{
59 OSDeclareFinalStructors( IOSKMemoryArray );
60
61public:
62 bool overwriteMappingInTask(
63 task_t intoTask,
64 mach_vm_address_t * startAddr,
65 IOOptionBits options );
66};
67
68class IOSKMemoryBuffer : public IOBufferMemoryDescriptor
69{
70 OSDeclareFinalStructors( IOSKMemoryBuffer );
71
72public:
73 bool initWithSpec( task_t inTask,
74 mach_vm_size_t capacity,
75 mach_vm_address_t alignment,
76 const IOSKMemoryBufferSpec * spec );
77
78 virtual void * getBytesNoCopy( void ) APPLE_KEXT_OVERRIDE;
79
80 virtual void * getBytesNoCopy(vm_size_t start, vm_size_t withLength) APPLE_KEXT_OVERRIDE;
81
82 bool
83 isWired( void ) const
84 {
85 return _wireCount != 0;
86 }
87
88 IOSKMemoryBufferSpec fSpec;
89 void *fKernelAddr;
90 IOMemoryMap *fKernelReadOnlyMapping;
91
92protected:
93 virtual void taggedRelease(const void *tag = NULL) const APPLE_KEXT_OVERRIDE;
94 virtual void free( void ) APPLE_KEXT_OVERRIDE;
95};
96
97// FIXME: rename IOSKMemoryBuffer -> IOSKBuffer
98typedef IOSKMemoryBuffer IOSKBuffer;
99
100// IOSKRegionMapper:
101// Tracks all memory mappings of a single IOSKRegion, with an array of
102// IOMemoryMaps to map the region's memory segments.
103// Created and released by the parent IOSKMapper.
104
105class IOSKRegionMapper : public OSObject
106{
107 OSDeclareFinalStructors( IOSKRegionMapper );
108
109public:
110 bool initWithMapper( IOSKMapper * mapper, IOSKRegion * region,
111 IOSKOffset regionOffset );
112
113 IOReturn map( IOSKIndex segIndex, IOSKBuffer * buffer );
114 void unmap( IOSKIndex segIndex, vm_prot_t prot );
115
116 kern_return_t mapOverwrite( vm_map_offset_t addr,
117 vm_map_size_t size, vm_prot_t prot );
118
119private:
120 virtual void free( void ) APPLE_KEXT_OVERRIDE;
121
122 IOSKMapper * fMapper;
123 IOSKRegion * fRegion;
124 IOMemoryMap ** fMemoryMaps;
125 IOSKCount fMemoryMapCount;
126 IOSKOffset fRegionOffset;
127};
128
129// IOSKMapper:
130// Manages all memory mappings of a single task, with an array of
131// IOSKRegionMappers to map all memory regions of a memory arena.
132// Retains the IOSKArena.
133
134class IOSKMapper : public OSObject
135{
136 OSDeclareFinalStructors( IOSKMapper );
137 friend class IOSKRegionMapper;
138
139public:
140 bool initWithTask( task_t task, IOSKArena * arena );
141
142 IOReturn map( IOSKIndex regIndex, IOSKIndex segIndex, IOSKBuffer * buffer );
143 void unmap( IOSKIndex regIndex, IOSKIndex segIndex, vm_prot_t prot );
144
145 mach_vm_address_t
146 getMapAddress( mach_vm_size_t * size ) const
147 {
148 if (size) {
149 *size = fMapSize;
150 }
151 return fMapAddr;
152 }
153
154 IOSKArena *
155 getArena( void ) const
156 {
157 return fArena;
158 }
159 bool
160 isRedirected( void ) const
161 {
162 return fRedirected;
163 }
164 void
165 redirectMap( void )
166 {
167 fRedirected = true;
168 }
169
170private:
171 virtual void free( void ) APPLE_KEXT_OVERRIDE;
172
173 task_t fTask;
174 vm_map_t fTaskMap;
175 IOSKArena * fArena;
176 OSArray * fSubMaps;
177 mach_vm_address_t fMapAddr;
178 mach_vm_size_t fMapSize;
179 bool fRedirected;
180};
181
182// IOSKArena:
183// An array of IOSKRegions is used to create an IOSKArena.
184// One or more IOSKMapper can map the arena memory to tasks.
185// Retains the IOSKRegions, also circularly retains the IOSKMapper(s)
186// until the client calls IOSKMapperDestroy().
187
188class IOSKArena : public OSObject
189{
190 OSDeclareFinalStructors( IOSKArena );
191
192public:
193 bool initWithRegions( IOSKRegion ** regions,
194 IOSKCount regionCount );
195
196 IOReturn createMapperForTask( task_t task,
197 LIBKERN_RETURNS_RETAINED IOSKMapper ** mapper );
198 void redirectMap( IOSKMapper * mapper );
199
200 IOSKSize
201 getArenaSize( void ) const
202 {
203 return fArenaSize;
204 }
205 IOSKCount
206 getRegionCount( void ) const
207 {
208 return fRegions->getCount();
209 }
210 IOSKRegion * getRegion( IOSKIndex regIndex ) const;
211
212 IOReturn map( const IOSKRegion * region,
213 IOSKOffset regionOffset,
214 IOSKIndex regionIndex,
215 IOSKIndex segmentIndex,
216 IOSKMemoryBuffer * buffer );
217
218 void unmap( const IOSKRegion * region,
219 IOSKOffset regionOffset,
220 IOSKIndex regionIndex,
221 IOSKIndex segmentIndex,
222 vm_prot_t prot,
223 bool isRedirected,
224 const void * context );
225
226 bool addMapper( const IOSKMapper * mapper );
227 void removeMapper( const IOSKMapper * mapper );
228
229private:
230 virtual void free( void ) APPLE_KEXT_OVERRIDE;
231
232 IOLock * fArenaLock;
233 OSSet * fMappers;
234 OSArray * fRegions;
235 IOSKSize fArenaSize;
236};
237
238// IOSKRegion:
239// An IOSKRegion manages a dynamic array of IOSKBuffers representing each
240// memory segment in the region. Each IOSKRegion can be shared by multiple
241// IOSKArenas, and the IOSKRegion keeps state specific to each arena - the
242// offset and the index of the region within the arena. A lock is used to
243// serialize updates to the IOSKBuffer array and the arenas.
244// Retains the IOSKBuffers.
245
246class IOSKRegion : public OSObject
247{
248 OSDeclareFinalStructors( IOSKRegion );
249
250public:
251 bool initWithSpec( const IOSKRegionSpec * spec,
252 IOSKSize segSize, IOSKCount segCount );
253
254 IOReturn setSegmentBuffer( IOSKIndex index, IOSKBuffer * buf );
255 void clearSegmentBuffer( IOSKIndex index, IOSKMemoryBufferRef * prevBuffer );
256
257 bool attachArena( IOSKArena * arena,
258 IOSKOffset regionOffset, IOSKIndex regionIndex );
259 void detachArena( const IOSKArena * arena );
260
261 IOReturn updateMappingsForArena( IOSKArena * arena, bool redirect,
262 const void * context = NULL );
263
264 IOSKCount
265 getSegmentCount( void ) const
266 {
267 return fSegmentCount;
268 }
269 IOSKSize
270 getSegmentSize( void ) const
271 {
272 return fSegmentSize;
273 }
274 IOSKSize
275 getRegionSize( void ) const
276 {
277 return fSegmentCount * fSegmentSize;
278 }
279
280private:
281 virtual void free( void ) APPLE_KEXT_OVERRIDE;
282
283 struct Segment {
284 IOSKBuffer * fBuffer;
285 };
286
287 struct ArenaEntry {
288 SLIST_ENTRY(ArenaEntry) link;
289 IOSKArena * fArena;
290 IOSKOffset fRegionOffset;
291 IOSKIndex fRegionIndex;
292 };
293 SLIST_HEAD(ArenaHead, ArenaEntry);
294
295 IOReturn _setSegmentBuffer( const IOSKIndex index, IOSKMemoryBuffer * buf );
296 void _clearSegmentBuffer( const IOSKIndex index, IOSKMemoryBufferRef * prevBuffer );
297 ArenaEntry * findArenaEntry( const IOSKArena * arena );
298
299 IOSKRegionSpec fSpec;
300 IOLock * fRegionLock;
301 ArenaHead fArenaHead;
302 Segment * fSegments;
303 IOSKCount fSegmentCount;
304 IOSKSize fSegmentSize;
305};
306
307#undef super
308#define super OSObject
309OSDefineMetaClassAndFinalStructors( IOSKRegionMapper, OSObject )
310
311bool
312IOSKRegionMapper::initWithMapper(
313 IOSKMapper * mapper, IOSKRegion * region, IOSKOffset regionOffset )
314{
315 if ((mapper == NULL) || (region == NULL) || !super::init()) {
316 return false;
317 }
318
319 // parent mapper retains the arena, which retains the regions
320 assert(IOSK_OFFSET_OK(regionOffset));
321 fMapper = mapper;
322 fRegion = region;
323 fRegionOffset = regionOffset;
324
325 fMemoryMapCount = region->getSegmentCount();
326 assert(fMemoryMapCount != 0);
327 fMemoryMaps = IONew(IOMemoryMap *, fMemoryMapCount);
328 if (!fMemoryMaps) {
329 return false;
330 }
331
332 bzero(s: fMemoryMaps, n: sizeof(IOMemoryMap *) * fMemoryMapCount);
333
334 DLOG("SKRegionMapper %p mapper %p region %p offset 0x%x",
335 this, mapper, region, regionOffset);
336 return true;
337}
338
339void
340IOSKRegionMapper::free( void )
341{
342 DLOG("SKRegionMapper %p", this);
343
344 if (fMemoryMaps) {
345 assert(fMemoryMapCount != 0);
346 for (IOSKIndex i = 0; i < fMemoryMapCount; i++) {
347 if (fMemoryMaps[i]) {
348 fMemoryMaps[i]->release();
349 fMemoryMaps[i] = NULL;
350 }
351 }
352 IODelete(fMemoryMaps, IOMemoryMap *, fMemoryMapCount);
353 fMemoryMaps = NULL;
354 fMemoryMapCount = 0;
355 }
356
357 fMapper = NULL;
358 fRegion = NULL;
359 super::free();
360}
361
362IOReturn
363IOSKRegionMapper::map( IOSKIndex segIndex, IOSKBuffer * buffer )
364{
365 mach_vm_address_t addr;
366 mach_vm_offset_t offset;
367 IOMemoryMap * map;
368 IOOptionBits options = kIOMapOverwrite;
369 IOReturn ret = kIOReturnSuccess;
370
371 assert(segIndex < fMemoryMapCount);
372 assert(buffer != NULL);
373
374 if ((segIndex >= fMemoryMapCount) || (buffer == NULL)) {
375 return kIOReturnBadArgument;
376 }
377
378 // redundant map requests are expected when the arena is mapped
379 // by more than one mapper.
380 if ((map = fMemoryMaps[segIndex]) != NULL) {
381 assert(map->getMemoryDescriptor() == buffer);
382 return kIOReturnSuccess;
383 }
384
385 if (buffer->fSpec.user_writable == FALSE) {
386 options |= kIOMapReadOnly;
387 }
388
389 offset = fRegionOffset + (segIndex * fRegion->getSegmentSize());
390 assert((offset + fRegion->getSegmentSize()) <= fMapper->fMapSize);
391 addr = fMapper->fMapAddr + offset;
392
393 map = buffer->createMappingInTask(intoTask: fMapper->fTask, atAddress: addr, options);
394 fMemoryMaps[segIndex] = map;
395 assert((map == NULL) || (map->getLength() == fRegion->getSegmentSize()));
396 if (map == NULL) {
397 ret = kIOReturnVMError;
398 }
399
400 SK_DF(ret == kIOReturnSuccess ? SK_VERB_IOSK : SK_VERB_ERROR,
401 "%p buffer %p index %u map %p offset 0x%x size 0x%x",
402 this, buffer, segIndex, fMemoryMaps[segIndex],
403 (uint32_t)offset, fRegion->getSegmentSize());
404
405 return ret;
406}
407
408void
409IOSKRegionMapper::unmap( IOSKIndex segIndex, vm_prot_t prot )
410{
411 mach_vm_address_t addr;
412 mach_vm_offset_t offset;
413 IOMemoryMap * map;
414 kern_return_t kr;
415
416 assert(segIndex < fMemoryMapCount);
417
418 // redundant unmap requests are expected when the arena is mapped
419 // by more than one mapper.
420 if ((segIndex >= fMemoryMapCount) || ((map = fMemoryMaps[segIndex]) == NULL)) {
421 return;
422 }
423
424 offset = fRegionOffset + (segIndex * fRegion->getSegmentSize());
425 assert((offset + fRegion->getSegmentSize()) <= fMapper->fMapSize);
426 addr = fMapper->fMapAddr + offset;
427
428 kr = mapOverwrite(addr, size: fRegion->getSegmentSize(), prot);
429 assert(KERN_SUCCESS == kr);
430
431 map->release();
432 fMemoryMaps[segIndex] = map = NULL;
433
434 DLOG("SKRegionMapper %p index %u offset 0x%x size 0x%x",
435 this, segIndex, (uint32_t)offset, fRegion->getSegmentSize());
436}
437
438kern_return_t
439IOSKRegionMapper::mapOverwrite(
440 vm_map_offset_t addr, vm_map_size_t size, vm_prot_t prot )
441{
442 vm_map_kernel_flags_t vmk_flags = VM_MAP_KERNEL_FLAGS_FIXED();
443 kern_return_t kr;
444
445 vmk_flags.vmf_overwrite = true;
446 vmk_flags.vm_tag = getVMTagForMap(map: fMapper->fTaskMap);
447
448 kr = vm_map_enter_mem_object(
449 map: fMapper->fTaskMap,
450 address: &addr,
451 size,
452 mask: (vm_map_offset_t)0,
453 vmk_flags,
454 IPC_PORT_NULL,
455 offset: (vm_object_offset_t)0,
456 FALSE,
457 cur_protection: prot,
458 VM_PROT_DEFAULT,
459 VM_INHERIT_NONE);
460
461 SK_DF(kr == KERN_SUCCESS ? SK_VERB_IOSK : SK_VERB_ERROR,
462 "SKRegionMapper %p addr 0x%llx size 0x%llx prot 0x%x "
463 "kr 0x%x", this, (uint64_t)addr, (uint64_t)size, prot, kr);
464 return kr;
465}
466
467#undef super
468#define super OSObject
469OSDefineMetaClassAndFinalStructors( IOSKMapper, OSObject )
470
471bool
472IOSKMapper::initWithTask(
473 task_t task, IOSKArena * arena )
474{
475 IOSKRegionMapper * subMap;
476 IOSKRegion * region;
477 IOSKCount regionCount;
478 IOSKOffset regionOffset = 0;
479 vm_map_offset_t addr;
480 vm_map_size_t size;
481 kern_return_t kr;
482 bool ok = false;
483
484 if ((task == TASK_NULL) || (arena == NULL) || !super::init()) {
485 return false;
486 }
487
488 fTask = task;
489 fTaskMap = get_task_map(task);
490 if (fTaskMap == VM_MAP_NULL) {
491 return false;
492 }
493
494 arena->retain();
495 fArena = arena;
496
497 regionCount = fArena->getRegionCount();
498 assert(regionCount != 0);
499
500 fSubMaps = OSArray::withCapacity(capacity: regionCount);
501 if (!fSubMaps) {
502 return false;
503 }
504
505 for (IOSKIndex i = 0; i < regionCount; i++) {
506 region = fArena->getRegion(regIndex: i);
507 assert(region != NULL);
508
509 subMap = new IOSKRegionMapper;
510 if (subMap && !subMap->initWithMapper(mapper: this, region, regionOffset)) {
511 subMap->release();
512 subMap = NULL;
513 }
514 if (!subMap) {
515 break;
516 }
517
518 // array retains the regions
519 ok = fSubMaps->setObject(subMap);
520 subMap->release();
521 subMap = NULL;
522 if (!ok) {
523 break;
524 }
525
526 // offset of next region
527 regionOffset += region->getRegionSize();
528 }
529 if (fSubMaps->getCount() != regionCount) {
530 return false;
531 }
532
533 addr = 0;
534 size = fArena->getArenaSize();
535 assert(regionOffset == size);
536 assert(IOSK_SIZE_OK(size));
537
538 vm_map_kernel_flags_t vmk_flags = VM_MAP_KERNEL_FLAGS_ANYWHERE();
539 vmk_flags.vm_tag = getVMTagForMap(map: fTaskMap);
540
541 // reserve address space on given task with PROT_NONE
542 kr = vm_map_enter_mem_object(
543 map: fTaskMap,
544 address: &addr,
545 size,
546 mask: (vm_map_offset_t)0,
547 vmk_flags,
548 IPC_PORT_NULL,
549 offset: (vm_object_offset_t)0,
550 FALSE,
551 VM_PROT_NONE,
552 VM_PROT_DEFAULT,
553 VM_INHERIT_NONE);
554
555 ok = false;
556 if (KERN_SUCCESS == kr) {
557 fMapAddr = (mach_vm_address_t)addr;
558 fMapSize = (mach_vm_size_t)size;
559 ok = true;
560 }
561
562 SK_DF(kr == KERN_SUCCESS ? SK_VERB_IOSK : SK_VERB_ERROR,
563 "SKMapper %p task 0x%llx map %p addr 0x%llx size 0x%llx subMaps %u "
564 "kr 0x%x", this, (uint64_t)task, fTaskMap, (uint64_t)addr,
565 (uint64_t)size, fSubMaps->getCount(), kr);
566
567
568 return ok;
569}
570
571void
572IOSKMapper::free( void )
573{
574 DLOG("SKMapper %p", this);
575
576 if (fSubMaps != NULL) {
577 fSubMaps->release();
578 fSubMaps = NULL;
579 }
580
581 if (fArena != NULL) {
582 fArena->release();
583 fArena = NULL;
584 }
585
586 if (fMapSize != 0) {
587 mach_vm_deallocate(target: fTaskMap, address: fMapAddr, size: fMapSize);
588 fTaskMap = NULL;
589 fMapAddr = 0;
590 fMapSize = 0;
591 }
592
593 fTask = NULL;
594 fTaskMap = NULL;
595
596 super::free();
597}
598
599IOReturn
600IOSKMapper::map(
601 IOSKIndex regionIndex, IOSKIndex segmentIndex, IOSKBuffer * buffer )
602{
603 IOSKRegionMapper * subMap;
604 IOReturn ret = kIOReturnBadArgument;
605
606 // route to the region mapper at regionIndex
607 assert(regionIndex < fSubMaps->getCount());
608 subMap = (typeof(subMap))fSubMaps->getObject(index: regionIndex);
609 if (subMap) {
610 ret = subMap->map(segIndex: segmentIndex, buffer);
611 }
612
613 return ret;
614}
615
616void
617IOSKMapper::unmap(
618 IOSKIndex regionIndex, IOSKIndex segmentIndex, vm_prot_t prot )
619{
620 IOSKRegionMapper * subMap;
621
622 // route to the region mapper at regionIndex
623 assert(regionIndex < fSubMaps->getCount());
624 subMap = (typeof(subMap))fSubMaps->getObject(index: regionIndex);
625 if (subMap) {
626 subMap->unmap(segIndex: segmentIndex, prot);
627 }
628}
629
630#undef super
631#define super OSObject
632OSDefineMetaClassAndFinalStructors( IOSKArena, OSObject )
633
634bool
635IOSKArena::initWithRegions(
636 IOSKRegion ** regions, IOSKCount regionCount )
637{
638 IOSKRegion * region;
639 IOSKSize regionSize;
640 IOSKOffset regionOffset = 0;
641 bool ok = false;
642
643 assert(regions != NULL);
644 assert(regionCount != 0);
645
646 do {
647 if ((regions == NULL) || (regionCount == 0) || !super::init()) {
648 break;
649 }
650
651 fArenaLock = IOLockAlloc();
652 if (fArenaLock == NULL) {
653 break;
654 }
655
656 fRegions = OSArray::withObjects(objects: (const OSObject **)regions, count: regionCount);
657 if (!fRegions) {
658 break;
659 }
660
661 ok = true;
662 for (uint32_t i = 0; i < regionCount; i++) {
663 region = OSDynamicCast(IOSKRegion, fRegions->getObject(i));
664 ok = (region != NULL);
665 if (!ok) {
666 break;
667 }
668
669 regionSize = region->getRegionSize();
670 assert(IOSK_SIZE_OK(regionSize));
671
672 // attach to each region and assign region offset/index
673 ok = region->attachArena(arena: this, regionOffset, regionIndex: i);
674 if (!ok) {
675 break;
676 }
677
678 // offset of next region
679 regionOffset += regionSize;
680 assert(IOSK_OFFSET_OK(regionOffset));
681 }
682 fArenaSize = regionOffset;
683 } while (false);
684
685 DLOG("SKArena %p regions %u size 0x%x ok %d",
686 this, regionCount, fArenaSize, ok);
687 return ok;
688}
689
690void
691IOSKArena::free( void )
692{
693 DLOG("IOSKArena %p", this);
694
695 if (fRegions) {
696 IOSKRegion * region;
697 OSObject * object;
698
699 // detach from regions to stop mapping requests
700 for (uint32_t i = 0; (object = fRegions->getObject(index: i)); i++) {
701 region = OSDynamicCast(IOSKRegion, object);
702 if (region) {
703 region->detachArena(arena: this);
704 }
705 }
706
707 fRegions->release();
708 fRegions = NULL;
709 }
710
711 if (fMappers) {
712 assert(fMappers->getCount() == 0);
713 fMappers->release();
714 fMappers = NULL;
715 }
716
717 if (fArenaLock != NULL) {
718 IOLockFree(lock: fArenaLock);
719 fArenaLock = NULL;
720 }
721
722 super::free();
723}
724
725IOReturn
726IOSKArena::createMapperForTask( task_t task, IOSKMapper ** outMapper )
727{
728 IOSKRegion * region;
729 OSObject * object;
730 IOSKMapper * mapper;
731 IOReturn result, ret = kIOReturnSuccess;
732
733 assert(task != TASK_NULL);
734 assert(outMapper != NULL);
735
736 mapper = new IOSKMapper;
737 if (mapper && !mapper->initWithTask(task, arena: this)) {
738 mapper->release();
739 mapper = NULL;
740 }
741 if (!mapper || !addMapper(mapper)) {
742 ret = kIOReturnNoMemory;
743 goto done;
744 }
745
746 // request all regions to refresh the arena's mappings,
747 // which now includes the newly added mapper.
748 for (uint32_t i = 0; (object = fRegions->getObject(index: i)); i++) {
749 region = OSDynamicCast(IOSKRegion, object);
750 assert(region != NULL);
751 result = region->updateMappingsForArena(arena: this, redirect: false);
752 assert(kIOReturnSuccess == result);
753 if (result != kIOReturnSuccess) {
754 ret = result;
755 }
756 }
757
758done:
759 if ((ret != kIOReturnSuccess) && mapper) {
760 mapper->release();
761 mapper = NULL;
762 }
763 *outMapper = mapper;
764 return ret;
765}
766
767IOReturn
768IOSKArena::map(
769 const IOSKRegion * region __unused,
770 IOSKOffset regionOffset __unused,
771 IOSKIndex regionIndex, IOSKIndex segmentIndex,
772 IOSKBuffer * buffer )
773{
774 IOSKMapper * mapper;
775 OSIterator * iter;
776 IOReturn result, ret = kIOReturnSuccess;
777
778 IOLockLock(fArenaLock);
779
780 if (fMappers && (iter = OSCollectionIterator::withCollection(inColl: fMappers))) {
781 while ((mapper = (typeof(mapper))iter->getNextObject())) {
782 // skip any redirected mapper
783 if (mapper->isRedirected()) {
784 continue;
785 }
786 result = mapper->map(regionIndex, segmentIndex, buffer);
787 assert(kIOReturnSuccess == result);
788 if (result != kIOReturnSuccess) {
789 ret = result;
790 }
791 }
792 iter->release();
793 }
794
795 IOLockUnlock(fArenaLock);
796 return ret;
797}
798
799void
800IOSKArena::unmap(
801 const IOSKRegion * region __unused,
802 IOSKOffset regionOffset __unused,
803 IOSKIndex regionIndex, IOSKIndex segmentIndex,
804 vm_prot_t prot, bool redirecting, const void * context )
805{
806 IOSKMapper * mapper;
807 const IOSKMapper * redirectMapper = (typeof(redirectMapper))context;
808 OSIterator * iter;
809
810 IOLockLock(fArenaLock);
811
812 if (fMappers && (iter = OSCollectionIterator::withCollection(inColl: fMappers))) {
813 while ((mapper = (typeof(mapper))iter->getNextObject())) {
814 if (redirecting) {
815 if ((redirectMapper == NULL) || (redirectMapper == mapper)) {
816 // redirecting can be specific to one mapper
817 mapper->unmap(regionIndex, segmentIndex, prot);
818 mapper->redirectMap();
819 }
820 } else if (!mapper->isRedirected()) {
821 mapper->unmap(regionIndex, segmentIndex, prot);
822 }
823 }
824 iter->release();
825 }
826
827 IOLockUnlock(fArenaLock);
828}
829
830void
831IOSKArena::redirectMap( IOSKMapper * mapper )
832{
833 OSObject * object;
834 IOSKRegion * region;
835 IOReturn ret;
836
837 // request all (redirectable) regions to redirect the arena's mapper,
838 // mapper=0 will redirect all mappers.
839
840 for (uint32_t i = 0; (object = fRegions->getObject(index: i)); i++) {
841 region = OSDynamicCast(IOSKRegion, object);
842 assert(region != NULL);
843 ret = region->updateMappingsForArena(arena: this, redirect: true, context: (const void *)mapper);
844 assert(kIOReturnSuccess == ret);
845 }
846}
847
848IOSKRegion *
849IOSKArena::getRegion( IOSKIndex regionIndex ) const
850{
851 assert(regionIndex < getRegionCount());
852 return OSDynamicCast(IOSKRegion, fRegions->getObject(regionIndex));
853}
854
855bool
856IOSKArena::addMapper( const IOSKMapper * mapper )
857{
858 bool ok = false;
859
860 assert(mapper != NULL);
861 if (!mapper) {
862 return false;
863 }
864
865 IOLockLock(fArenaLock);
866
867 if (!fMappers) {
868 fMappers = OSSet::withCapacity(capacity: 2);
869 }
870 if (fMappers) {
871 ok = fMappers->setObject(mapper);
872 }
873
874 IOLockUnlock(fArenaLock);
875
876 DLOG("arena %p mapper %p ok %d", this, mapper, ok);
877 return ok;
878}
879
880void
881IOSKArena::removeMapper( const IOSKMapper * mapper )
882{
883 assert(mapper != NULL);
884 if (!mapper) {
885 return;
886 }
887
888 IOLockLock(fArenaLock);
889
890 if (fMappers) {
891 fMappers->removeObject(anObject: mapper);
892 }
893
894 IOLockUnlock(fArenaLock);
895 DLOG("arena %p mapper %p", this, mapper);
896}
897
898#undef super
899#define super OSObject
900OSDefineMetaClassAndFinalStructors( IOSKRegion, OSObject )
901
902bool
903IOSKRegion::initWithSpec( const IOSKRegionSpec * spec,
904 IOSKSize segmentSize, IOSKCount segmentCount )
905{
906 bool ok = false;
907
908 do {
909 if (!IOSK_SIZE_OK(segmentSize) || (segmentCount == 0) || !super::init()) {
910 break;
911 }
912
913 if (spec) {
914 fSpec = *spec;
915 }
916 fSegmentCount = segmentCount;
917 fSegmentSize = segmentSize;
918
919 fRegionLock = IOLockAlloc();
920 if (fRegionLock == NULL) {
921 break;
922 }
923
924 SLIST_INIT(&fArenaHead);
925
926 fSegments = IONew(Segment, fSegmentCount);
927 if (fSegments == NULL) {
928 break;
929 }
930 bzero(s: fSegments, n: sizeof(IOSKRegion::Segment) * fSegmentCount);
931 ok = true;
932 } while (false);
933
934 SK_DF(ok ? SK_VERB_IOSK : SK_VERB_ERROR,
935 "SKRegion %p segment size 0x%x count %u ok %d",
936 this, segmentSize, segmentCount, ok);
937
938 return ok;
939}
940
941void
942IOSKRegion::free( void )
943{
944 DLOG("SKRegion %p", this);
945
946 ArenaEntry *entry, *tentry;
947 SLIST_FOREACH_SAFE(entry, &fArenaHead, link, tentry) {
948 SLIST_REMOVE(&fArenaHead, entry, ArenaEntry, link);
949 // Arena didn't detach from the region before release()
950 assert(entry->fArena == NULL);
951 IOFreeType(entry, ArenaEntry);
952 }
953 assert(SLIST_EMPTY(&fArenaHead));
954
955 if (fSegments != NULL) {
956 assert(fSegmentCount != 0);
957 for (uint32_t i = 0; i < fSegmentCount; i++) {
958 _clearSegmentBuffer(index: i, NULL);
959 }
960
961 IODelete(fSegments, Segment, fSegmentCount);
962 fSegments = NULL;
963 }
964
965 if (fRegionLock != NULL) {
966 IOLockFree(lock: fRegionLock);
967 fRegionLock = NULL;
968 }
969
970 super::free();
971}
972
973IOReturn
974IOSKRegion::_setSegmentBuffer(
975 const IOSKIndex segmentIndex, IOSKBuffer * buffer )
976{
977 Segment * seg;
978 IOReturn ret = kIOReturnSuccess;
979
980 assert(buffer != NULL);
981 assert(segmentIndex < fSegmentCount);
982
983 if (!buffer || (buffer->getCapacity() != fSegmentSize) ||
984 (segmentIndex >= fSegmentCount)) {
985 ret = kIOReturnBadArgument;
986 goto done;
987 }
988
989 seg = &fSegments[segmentIndex];
990 assert(seg->fBuffer == NULL);
991
992 if (seg->fBuffer == NULL) {
993 buffer->retain();
994 seg->fBuffer = buffer;
995
996 // update mappings for all arenas containing this region,
997 // or none if no arena is attached.
998 ArenaEntry * entry;
999 SLIST_FOREACH(entry, &fArenaHead, link) {
1000 if (entry->fArena != NULL) {
1001 ret = entry->fArena->map(region: this,
1002 regionOffset: entry->fRegionOffset, regionIndex: entry->fRegionIndex,
1003 segmentIndex, buffer);
1004 assert(kIOReturnSuccess == ret);
1005 if (ret != kIOReturnSuccess) {
1006 break;
1007 }
1008 }
1009 }
1010 }
1011
1012 if (ret != kIOReturnSuccess) {
1013 _clearSegmentBuffer(index: segmentIndex, NULL);
1014 }
1015
1016done:
1017 SK_DF(ret == kIOReturnSuccess ? SK_VERB_IOSK : SK_VERB_ERROR,
1018 "SKRegion %p set segment[%u] buffer %p ret 0x%x",
1019 this, segmentIndex, buffer, ret);
1020
1021 return ret;
1022}
1023
1024void
1025IOSKRegion::_clearSegmentBuffer(
1026 const IOSKIndex segmentIndex, IOSKMemoryBufferRef * prevBuffer )
1027{
1028 Segment * seg;
1029 bool cleared = false;
1030 IOSKBuffer * foundBuffer = NULL;
1031
1032 assert(segmentIndex < fSegmentCount);
1033 if (segmentIndex >= fSegmentCount) {
1034 goto done;
1035 }
1036
1037 seg = &fSegments[segmentIndex];
1038 if (seg->fBuffer != NULL) {
1039 foundBuffer = seg->fBuffer;
1040
1041 // update mappings for all arenas containing this region,
1042 // or none if no arena is attached.
1043 vm_prot_t prot = VM_PROT_NONE;
1044 ArenaEntry * entry;
1045
1046 SLIST_FOREACH(entry, &fArenaHead, link) {
1047 if (entry->fArena != NULL) {
1048 entry->fArena->unmap(region: this,
1049 regionOffset: entry->fRegionOffset, regionIndex: entry->fRegionIndex,
1050 segmentIndex, prot, redirecting: false, NULL);
1051 }
1052 }
1053
1054 seg->fBuffer->release();
1055 seg->fBuffer = NULL;
1056 cleared = true;
1057 }
1058
1059 if (prevBuffer) {
1060 *prevBuffer = foundBuffer;
1061 }
1062
1063done:
1064 DLOG("SKRegion %p clear segment[%u] ok %d",
1065 this, segmentIndex, cleared);
1066}
1067
1068IOReturn
1069IOSKRegion::setSegmentBuffer(
1070 IOSKIndex index, IOSKMemoryBuffer * buffer )
1071{
1072 IOReturn ret;
1073
1074 IOLockLock(fRegionLock);
1075 ret = _setSegmentBuffer(segmentIndex: index, buffer);
1076 IOLockUnlock(fRegionLock);
1077 return ret;
1078}
1079
1080void
1081IOSKRegion::clearSegmentBuffer( IOSKIndex index, IOSKMemoryBufferRef * prevBuffer )
1082{
1083 IOLockLock(fRegionLock);
1084 _clearSegmentBuffer(segmentIndex: index, prevBuffer);
1085 IOLockUnlock(fRegionLock);
1086}
1087
1088IOSKRegion::ArenaEntry *
1089IOSKRegion::findArenaEntry( const IOSKArena * arena )
1090{
1091 ArenaEntry * found = NULL;
1092
1093 assert(arena != NULL);
1094
1095 ArenaEntry * entry;
1096 SLIST_FOREACH(entry, &fArenaHead, link) {
1097 if (entry->fArena == arena) {
1098 found = entry;
1099 break;
1100 }
1101 }
1102 return found;
1103}
1104
1105bool
1106IOSKRegion::attachArena(
1107 IOSKArena * arena, IOSKOffset regionOffset, IOSKIndex regionIndex )
1108{
1109 bool ok = false;
1110
1111 assert(arena != NULL);
1112 if (!arena) {
1113 return false;
1114 }
1115
1116 IOLockLock(fRegionLock);
1117
1118 ArenaEntry * entry = NULL;
1119 ArenaEntry * empty = NULL;
1120 ArenaEntry * dup = NULL;
1121
1122 SLIST_FOREACH(entry, &fArenaHead, link) {
1123 // duplicates not allowed
1124 assert(entry->fArena != arena);
1125 if (entry->fArena == arena) {
1126 dup = entry;
1127 break;
1128 }
1129
1130 if ((empty == NULL) && (entry->fArena == NULL)) {
1131 empty = entry;
1132 }
1133 }
1134
1135 if (dup != NULL) {
1136 // do nothing
1137 } else if (empty != NULL) {
1138 // update the empty/available entry
1139 empty->fArena = arena;
1140 empty->fRegionOffset = regionOffset;
1141 empty->fRegionIndex = regionIndex;
1142 ok = true;
1143 } else {
1144 // append a new entry
1145 ArenaEntry * newEntry = IOMallocType(ArenaEntry);
1146 newEntry->fArena = arena;
1147 newEntry->fRegionOffset = regionOffset;
1148 newEntry->fRegionIndex = regionIndex;
1149 SLIST_INSERT_HEAD(&fArenaHead, newEntry, link);
1150 ok = true;
1151 }
1152
1153 IOLockUnlock(fRegionLock);
1154
1155 SK_DF(ok ? SK_VERB_IOSK : SK_VERB_ERROR,
1156 "SKRegion %p attach arena %p offset 0x%x index %u ok %d",
1157 this, arena, regionOffset, regionIndex, ok);
1158 return ok;
1159}
1160
1161void
1162IOSKRegion::detachArena( const IOSKArena * arena )
1163{
1164 ArenaEntry * entry;
1165 bool detached = false;
1166
1167 assert(arena != NULL);
1168 if (!arena) {
1169 return;
1170 }
1171
1172 IOLockLock(fRegionLock);
1173
1174 entry = findArenaEntry(arena);
1175 if (entry != NULL) {
1176 entry->fArena = NULL;
1177 entry->fRegionOffset = 0;
1178 entry->fRegionIndex = 0;
1179 detached = true;
1180 }
1181
1182 IOLockUnlock(fRegionLock);
1183 DLOG("SKRegion %p detach arena %p ok %d", this, arena, detached);
1184}
1185
1186IOReturn
1187IOSKRegion::updateMappingsForArena(
1188 IOSKArena * arena, bool redirect, const void * context )
1189{
1190 ArenaEntry * entry;
1191 Segment * seg;
1192 vm_prot_t prot;
1193 IOReturn result = kIOReturnSuccess;
1194
1195 assert(arena != NULL);
1196 if (redirect && fSpec.noRedirect) {
1197 DLOG("SKRegion %p no redirect", this);
1198 return kIOReturnSuccess;
1199 }
1200
1201 IOLockLock(fRegionLock);
1202
1203 entry = findArenaEntry(arena);
1204 if (entry != NULL) {
1205 assert(entry->fArena == arena);
1206
1207 for (uint32_t index = 0; index < fSegmentCount; index++) {
1208 seg = &fSegments[index];
1209 if ((seg->fBuffer == NULL) || redirect) {
1210 prot = VM_PROT_NONE;
1211 if (redirect && (seg->fBuffer != NULL)) {
1212 prot = VM_PROT_READ;
1213 if (seg->fBuffer->fSpec.user_writable) {
1214 prot |= VM_PROT_WRITE;
1215 }
1216 }
1217
1218 arena->unmap(region: this, regionOffset: entry->fRegionOffset, regionIndex: entry->fRegionIndex,
1219 segmentIndex: index, prot, redirecting: redirect, context);
1220 } else {
1221 result = arena->map(region: this, regionOffset: entry->fRegionOffset,
1222 regionIndex: entry->fRegionIndex,
1223 segmentIndex: index, buffer: seg->fBuffer);
1224 }
1225 }
1226 }
1227
1228 IOLockUnlock(fRegionLock);
1229 SK_DF(result == kIOReturnSuccess ? SK_VERB_IOSK : SK_VERB_ERROR,
1230 "%p update arena %p redirect %d ret 0x%x",
1231 this, arena, redirect, result);
1232 return result;
1233}
1234
1235OSDefineMetaClassAndFinalStructors( IOSKMemoryArray, IOMultiMemoryDescriptor )
1236
1237bool
1238IOSKMemoryArray::overwriteMappingInTask(
1239 task_t intoTask,
1240 mach_vm_address_t * startAddr,
1241 IOOptionBits options )
1242{
1243 bool ok = true;
1244
1245 for (uint32_t i = 0; i < _descriptorsCount; i++) {
1246 IOMemoryDescriptor * iomd = _descriptors[i];
1247 IOSKMemoryBuffer * mb = OSDynamicCast(IOSKMemoryBuffer, iomd);
1248 IOSKMemoryArray * ma = OSDynamicCast(IOSKMemoryArray, iomd);
1249
1250 if (mb) {
1251 IOMemoryMap * rwMap;
1252
1253 if (mb->fSpec.user_writable) {
1254 // overwrite read-only mapping to read-write
1255 rwMap = mb->createMappingInTask(intoTask,
1256 atAddress: *startAddr, options: options | kIOMapOverwrite);
1257 if (rwMap) {
1258 DLOG("map_rw %d: addr 0x%llx, size 0x%x",
1259 i, *startAddr, (uint32_t)iomd->getLength());
1260 rwMap->release();
1261 } else {
1262 ELOG("overwrite map failed");
1263 ok = false;
1264 break;
1265 }
1266 } else {
1267 DLOG("map_ro %d: addr 0x%llx, size 0x%x",
1268 i, *startAddr, (uint32_t)iomd->getLength());
1269 }
1270
1271 //DLOG("map increment 0x%x", (uint32_t)iomd->getLength());
1272 *startAddr += iomd->getLength();
1273 } else if (ma) {
1274 ok = ma->overwriteMappingInTask(intoTask, startAddr, options);
1275 if (!ok) {
1276 break;
1277 }
1278 }
1279 }
1280
1281 return ok;
1282}
1283
1284#undef super
1285#define super IOBufferMemoryDescriptor
1286OSDefineMetaClassAndFinalStructorsWithZone( IOSKMemoryBuffer,
1287 IOBufferMemoryDescriptor, ZC_NONE )
1288
1289bool
1290IOSKMemoryBuffer::initWithSpec(
1291 task_t inTask,
1292 mach_vm_size_t capacity,
1293 mach_vm_address_t alignment,
1294 const IOSKMemoryBufferSpec * spec )
1295{
1296 bool ok = true;
1297 IOOptionBits options = kIOMemoryKernelUserShared;
1298
1299 if (spec) {
1300 fSpec = *spec;
1301 }
1302 if (fSpec.iodir_in) {
1303 options |= kIODirectionIn;
1304 }
1305 if (fSpec.iodir_out) {
1306 options |= kIODirectionOut;
1307 }
1308 if (fSpec.purgeable) {
1309 options |= (kIOMemoryPageable | kIOMemoryPurgeable);
1310 }
1311 if (fSpec.inhibitCache) {
1312 options |= kIOMapInhibitCache;
1313 }
1314 if (fSpec.physcontig) {
1315 options |= kIOMemoryPhysicallyContiguous;
1316 }
1317 if (fSpec.threadSafe) {
1318 options |= kIOMemoryThreadSafe;
1319 }
1320
1321 setVMTags(VM_KERN_MEMORY_SKYWALK, VM_MEMORY_SKYWALK);
1322
1323 if (fSpec.kernel_writable) {
1324 if (fSpec.puredata) {
1325 /* purely data; use data buffers heap */
1326 ok = initWithPhysicalMask(
1327 inTask, options, capacity, alignment, physicalMask: 0);
1328 } else {
1329 /* may have pointers; use default heap */
1330 ok = initControlWithPhysicalMask(
1331 inTask, options, capacity, alignment, physicalMask: 0);
1332 }
1333 if (!ok) {
1334 return false;
1335 }
1336 fKernelAddr = super::getBytesNoCopy();
1337 return true;
1338 } else {
1339 /*
1340 * To create kernel read-only BMD:
1341 * 1. init with TASK_NULL (which isn’t mapped anywhere);
1342 * 2. then map read-only into kernel_task
1343 * Note that kernel virtual address has to be obtained from
1344 * the secondary kernel read-only mapping.
1345 */
1346 options |= kIOMapReadOnly;
1347 if (fSpec.puredata) {
1348 /* purely data; use data buffers heap */
1349 ok = initWithPhysicalMask(
1350 TASK_NULL, options, capacity, alignment, physicalMask: 0);
1351 } else {
1352 /* may have pointers; use default heap */
1353 ok = initControlWithPhysicalMask(
1354 TASK_NULL, options, capacity, alignment, physicalMask: 0);
1355 }
1356 if (!ok) {
1357 return false;
1358 }
1359 /* RO mapping will retain this, see ::taggedRelease() */
1360 fKernelReadOnlyMapping = super::createMappingInTask(intoTask: kernel_task, atAddress: 0, options);
1361 if (fKernelReadOnlyMapping == NULL) {
1362 return false;
1363 }
1364 fKernelAddr = (void *)fKernelReadOnlyMapping->getVirtualAddress();
1365 assert(fKernelAddr != NULL);
1366 return true;
1367 }
1368}
1369
1370void
1371IOSKMemoryBuffer::taggedRelease(const void *tag) const
1372{
1373 /*
1374 * RO buffer has extra retain from fKernelReadOnlyMapping, needs to
1375 * explicitly release when refcnt == 2 to free ourselves.
1376 */
1377 if (!fSpec.kernel_writable && fKernelReadOnlyMapping != NULL) {
1378 super::taggedRelease(tag, freeWhen: 2);
1379 } else {
1380 super::taggedRelease(tag);
1381 }
1382}
1383
1384void
1385IOSKMemoryBuffer::free( void )
1386{
1387 if (!fSpec.kernel_writable && fKernelReadOnlyMapping != NULL) {
1388 OSSafeReleaseNULL(fKernelReadOnlyMapping);
1389 fKernelAddr = NULL;
1390 }
1391 super::free();
1392}
1393
1394void *
1395IOSKMemoryBuffer::getBytesNoCopy( void )
1396{
1397 return fKernelAddr;
1398}
1399
1400void *
1401IOSKMemoryBuffer::getBytesNoCopy( vm_size_t start, vm_size_t withLength )
1402{
1403 IOVirtualAddress address;
1404
1405 if ((start + withLength) < start) {
1406 return NULL;
1407 }
1408
1409 address = (IOVirtualAddress) fKernelAddr;
1410
1411 if (start < _length && (start + withLength) <= _length) {
1412 return (void *)(address + start);
1413 }
1414 return NULL;
1415}
1416
1417static IOSKMemoryBuffer *
1418RefToMemoryBuffer( IOSKMemoryRef inRef )
1419{
1420 IOSKMemoryBuffer * mb = OSDynamicCast(IOSKMemoryBuffer, inRef);
1421 return mb;
1422}
1423
1424static IOSKMemoryArray *
1425RefToMemoryArray( IOSKMemoryRef inRef )
1426{
1427 IOSKMemoryArray * ma = OSDynamicCast(IOSKMemoryArray, inRef);
1428 return ma;
1429}
1430
1431__BEGIN_DECLS
1432
1433void
1434IOSKMemoryDestroy(
1435 IOSKMemoryRef reference )
1436{
1437 assert(reference);
1438 if (reference) {
1439 reference->release();
1440 }
1441}
1442
1443void
1444IOSKMemoryMapDestroy(
1445 IOSKMemoryMapRef reference )
1446{
1447 assert(reference);
1448 if (reference) {
1449 reference->release();
1450 }
1451}
1452
1453IOSKMemoryBufferRef
1454IOSKMemoryBufferCreate(
1455 mach_vm_size_t capacity,
1456 const IOSKMemoryBufferSpec * spec,
1457 mach_vm_address_t * kvaddr )
1458{
1459 IOSKMemoryBuffer * mb;
1460 void * addr = NULL;
1461
1462 mach_vm_size_t rounded_capacity = round_page(x: capacity);
1463 if (capacity != rounded_capacity) {
1464 return NULL;
1465 }
1466
1467 mb = new IOSKMemoryBuffer;
1468 if (mb && !mb->initWithSpec(inTask: kernel_task, capacity, PAGE_SIZE, spec)) {
1469 mb->release();
1470 mb = NULL;
1471 }
1472 if (!mb) {
1473 ELOG("create capacity=0x%llx failed", capacity);
1474 goto fail;
1475 }
1476
1477 addr = mb->fKernelAddr;
1478 if (kvaddr) {
1479 *kvaddr = (mach_vm_address_t)(uintptr_t)addr;
1480 }
1481 DLOG("buffer %p, vaddr %p, capacity 0x%llx", mb, addr, capacity);
1482
1483fail:
1484 return mb;
1485}
1486
1487IOSKMemoryArrayRef
1488IOSKMemoryArrayCreate(
1489 const IOSKMemoryRef refs[],
1490 uint32_t count )
1491{
1492 IOSKMemoryArray * ma;
1493 IOSKMemoryRef ref;
1494 bool ok = true;
1495
1496 if (!refs || (count < 1)) {
1497 return NULL;
1498 }
1499
1500 // Validate the references
1501 for (uint32_t i = 0; i < count; i++) {
1502 ref = refs[i];
1503 assert(RefToMemoryBuffer(ref) || RefToMemoryArray(ref));
1504 if (!RefToMemoryBuffer(inRef: ref) && !RefToMemoryArray(inRef: ref)) {
1505 ok = false;
1506 break;
1507 }
1508 }
1509 if (!ok) {
1510 return NULL;
1511 }
1512
1513 ma = new IOSKMemoryArray;
1514 if (ma && !ma->initWithDescriptors(descriptors: (IOMemoryDescriptor **)refs,
1515 withCount: count, withDirection: kIODirectionInOut, asReference: false)) {
1516 ma->release();
1517 ma = NULL;
1518 }
1519 if (!ma) {
1520 ELOG("create count=%u failed", count);
1521 } else {
1522 DLOG("array %p count=%u", ma, count);
1523 }
1524
1525 return ma;
1526}
1527
1528IOSKMemoryMapRef
1529IOSKMemoryMapToTask(
1530 IOSKMemoryRef reference,
1531 task_t intoTask,
1532 mach_vm_address_t * mapAddr,
1533 mach_vm_size_t * mapSize )
1534{
1535 IOOptionBits options = kIOMapAnywhere | kIOMapReadOnly;
1536 mach_vm_address_t startAddr;
1537 IOMemoryMap * map = NULL;
1538
1539 IOSKMemoryArray * ma = RefToMemoryArray(inRef: reference);
1540
1541 assert(ma);
1542 if (!ma) {
1543 return NULL;
1544 }
1545
1546 assert(intoTask != kernel_task);
1547 map = ma->createMappingInTask(intoTask, atAddress: 0, options);
1548 if (map) {
1549 bool ok;
1550
1551 startAddr = map->getAddress();
1552 *mapAddr = startAddr;
1553 *mapSize = map->getSize();
1554 DLOG("map vaddr 0x%llx, size 0x%llx", *mapAddr, *mapSize);
1555
1556 options &= ~(kIOMapReadOnly | kIOMapAnywhere);
1557 ok = ma->overwriteMappingInTask(intoTask, startAddr: &startAddr, options);
1558 if (!ok) {
1559 map->release();
1560 map = NULL;
1561 }
1562 }
1563 return map;
1564}
1565
1566IOSKMemoryMapRef
1567IOSKMemoryMapToKernelTask(
1568 IOSKMemoryRef reference,
1569 mach_vm_address_t * mapAddr,
1570 mach_vm_size_t * mapSize )
1571{
1572 IOOptionBits options = kIOMapAnywhere;
1573 mach_vm_address_t startAddr;
1574 IOMemoryMap * map = NULL;
1575
1576 IOSKMemoryArray * ma = RefToMemoryArray(inRef: reference);
1577
1578 assert(ma);
1579 if (!ma) {
1580 return NULL;
1581 }
1582
1583 map = ma->createMappingInTask(intoTask: kernel_task, atAddress: 0, options);
1584 if (map) {
1585 startAddr = map->getAddress();
1586 *mapAddr = startAddr;
1587 *mapSize = map->getSize();
1588 DLOG("map vaddr 0x%llx, size 0x%llx", *mapAddr, *mapSize);
1589 }
1590 return map;
1591}
1592
1593IOReturn
1594IOSKMemoryDiscard( IOSKMemoryRef reference )
1595{
1596 IOSKMemoryBuffer * mb = RefToMemoryBuffer(inRef: reference);
1597
1598 assert(mb);
1599 assert(mb->fSpec.purgeable);
1600 if (!mb || !mb->fSpec.purgeable) {
1601 return kIOReturnBadArgument;
1602 }
1603
1604 return mb->setPurgeable(newState: kIOMemoryPurgeableEmpty |
1605 kIOMemoryPurgeableFaultOnAccess, NULL);
1606}
1607
1608IOReturn
1609IOSKMemoryReclaim( IOSKMemoryRef reference )
1610{
1611 IOSKMemoryBuffer * mb = RefToMemoryBuffer(inRef: reference);
1612
1613 assert(mb);
1614 assert(mb->fSpec.purgeable);
1615 if (!mb || !mb->fSpec.purgeable) {
1616 return kIOReturnBadArgument;
1617 }
1618
1619 return mb->setPurgeable(newState: kIOMemoryPurgeableNonVolatile, NULL);
1620}
1621
1622IOReturn
1623IOSKMemoryWire( IOSKMemoryRef reference )
1624{
1625 IOSKMemoryBuffer * mb = RefToMemoryBuffer(inRef: reference);
1626
1627 assert(mb);
1628 assert(mb->fSpec.purgeable);
1629 if (!mb || !mb->fSpec.purgeable) {
1630 return kIOReturnBadArgument;
1631 }
1632
1633 return mb->prepare();
1634}
1635
1636IOReturn
1637IOSKMemoryUnwire( IOSKMemoryRef reference )
1638{
1639 IOSKMemoryBuffer * mb = RefToMemoryBuffer(inRef: reference);
1640
1641 assert(mb);
1642 assert(mb->fSpec.purgeable);
1643 if (!mb || !mb->fSpec.purgeable) {
1644 return kIOReturnBadArgument;
1645 }
1646
1647 return mb->complete();
1648}
1649
1650static void
1651IOSKObjectDestroy( const OSObject * object )
1652{
1653 assert(object != NULL);
1654 if (object) {
1655 object->release();
1656 }
1657}
1658
1659IOSKArenaRef
1660IOSKArenaCreate( IOSKRegionRef * regionList, IOSKCount regionCount )
1661{
1662 IOSKArenaRef arena;
1663
1664 arena = new IOSKArena;
1665 if ((arena != NULL) && !arena->initWithRegions(regions: regionList, regionCount)) {
1666 arena->release();
1667 arena = NULL;
1668 }
1669 return arena;
1670}
1671
1672void
1673IOSKArenaDestroy( IOSKArenaRef arena )
1674{
1675 IOSKObjectDestroy(object: arena);
1676}
1677
1678void
1679IOSKArenaRedirect( IOSKArenaRef arena )
1680{
1681 assert(arena != NULL);
1682 if (arena != NULL) {
1683 arena->redirectMap(NULL);
1684 }
1685}
1686
1687IOSKRegionRef
1688IOSKRegionCreate( const IOSKRegionSpec * regionSpec,
1689 IOSKSize segSize, IOSKCount segCount )
1690{
1691 IOSKRegionRef region;
1692
1693 region = new IOSKRegion;
1694 if ((region != NULL) && !region->initWithSpec(spec: regionSpec, segmentSize: segSize, segmentCount: segCount)) {
1695 region->release();
1696 region = NULL;
1697 }
1698 return region;
1699}
1700
1701void
1702IOSKRegionDestroy( IOSKRegionRef region )
1703{
1704 IOSKObjectDestroy(object: region);
1705}
1706
1707IOReturn
1708IOSKRegionSetBuffer( IOSKRegionRef region, IOSKIndex segmentIndex,
1709 IOSKMemoryBufferRef buffer )
1710{
1711 IOReturn ret = kIOReturnBadArgument;
1712
1713 assert(region != NULL);
1714 if (region != NULL) {
1715 ret = region->setSegmentBuffer(index: segmentIndex, buffer: (IOSKBuffer *)buffer);
1716 }
1717
1718 return ret;
1719}
1720
1721void
1722IOSKRegionClearBuffer( IOSKRegionRef region, IOSKIndex segmentIndex )
1723{
1724 assert(region != NULL);
1725 if (region != NULL) {
1726 region->clearSegmentBuffer(index: segmentIndex, NULL);
1727 }
1728}
1729
1730void
1731IOSKRegionClearBufferDebug( IOSKRegionRef region, IOSKIndex segmentIndex,
1732 IOSKMemoryBufferRef * prevBufferRef )
1733{
1734 assert(region != NULL);
1735 if (region != NULL) {
1736 region->clearSegmentBuffer(index: segmentIndex, prevBuffer: prevBufferRef);
1737 }
1738}
1739
1740IOSKMapperRef
1741IOSKMapperCreate( IOSKArenaRef arena, task_t task )
1742{
1743 IOSKMapperRef mapper = NULL;
1744
1745 assert(arena != NULL);
1746 if (arena != NULL) {
1747 arena->createMapperForTask(task, outMapper: &mapper);
1748 }
1749 return mapper;
1750}
1751
1752void
1753IOSKMapperDestroy( IOSKMapperRef mapper )
1754{
1755 assert(mapper != NULL);
1756 if (mapper != NULL) {
1757 IOSKArena * arena = mapper->getArena();
1758 assert(arena != NULL);
1759 arena->removeMapper(mapper);
1760 IOSKObjectDestroy(object: mapper);
1761 }
1762}
1763
1764void
1765IOSKMapperRedirect( IOSKMapperRef mapper )
1766{
1767 assert(mapper != NULL);
1768 if (mapper != NULL) {
1769 IOSKArena * arena = mapper->getArena();
1770 assert(arena != NULL);
1771 arena->redirectMap(mapper);
1772 }
1773}
1774
1775IOReturn
1776IOSKMapperGetAddress( IOSKMapperRef mapper,
1777 mach_vm_address_t * address, mach_vm_size_t * size )
1778{
1779 assert(mapper != NULL);
1780 if ((mapper == NULL) || (address == NULL)) {
1781 return kIOReturnBadArgument;
1782 }
1783
1784 *address = mapper->getMapAddress(size);
1785 return kIOReturnSuccess;
1786}
1787
1788boolean_t
1789IOSKBufferIsWired( IOSKMemoryBufferRef buffer )
1790{
1791 assert(buffer != NULL);
1792 return ((IOSKBuffer *)buffer)->isWired();
1793}
1794
1795__END_DECLS
1796
1797#if DEVELOPMENT || DEBUG
1798
1799extern int IOSkywalkSupportTest(int x);
1800
1801int
1802IOSkywalkSupportTest( int newValue )
1803{
1804 static const int kNumRegions = 3;
1805 static const int kNumBuffers = 6;
1806 static const int kNumMappers = 3;
1807 static const int kNumArenas = 2;
1808
1809 IOSKMemoryBufferSpec bspec;
1810 IOSKRegionSpec rspec;
1811 IOSKMemoryBufferRef buffers[kNumBuffers];
1812 mach_vm_address_t bufkvas[kNumBuffers];
1813 IOSKRegionRef regions[kNumRegions];
1814 IOSKRegionRef reverse[kNumRegions];
1815 IOSKArenaRef arenas[kNumArenas];
1816 IOSKMapperRef mappers[kNumMappers];
1817 mach_vm_address_t addrs[kNumMappers];
1818 mach_vm_size_t size;
1819 uint32_t value;
1820 uint32_t * ptr;
1821 IOReturn ret;
1822
1823 kprintf("IOSKArena count : %u\n",
1824 IOSKArena::gMetaClass.getInstanceCount());
1825 kprintf("IOSKRegion count : %u\n",
1826 IOSKRegion::gMetaClass.getInstanceCount());
1827 kprintf("IOSKMapper count : %u, %u (sub maps)\n",
1828 IOSKMapper::gMetaClass.getInstanceCount(),
1829 IOSKRegionMapper::gMetaClass.getInstanceCount());
1830 kprintf("IOSKBuffer count : %u\n",
1831 IOSKBuffer::gMetaClass.getInstanceCount());
1832
1833 rspec.noRedirect = true;
1834 regions[0] = IOSKRegionCreate(&rspec, (IOSKSize) ptoa(1), 2);
1835 assert(regions[0]);
1836 rspec.noRedirect = false;
1837 regions[1] = IOSKRegionCreate(&rspec, (IOSKSize) ptoa(2), 3);
1838 assert(regions[1]);
1839 regions[2] = IOSKRegionCreate(&rspec, (IOSKSize) ptoa(3), 4);
1840 assert(regions[2]);
1841
1842 reverse[0] = regions[2];
1843 reverse[1] = regions[1];
1844 reverse[2] = regions[0];
1845
1846 arenas[0] = IOSKArenaCreate(regions, 3);
1847 assert(arenas[0]);
1848 arenas[1] = IOSKArenaCreate(reverse, 3);
1849 assert(arenas[1]);
1850
1851 bzero(&bspec, sizeof(bspec));
1852 bspec.purgeable = true;
1853 bspec.user_writable = false;
1854 buffers[0] = IOSKMemoryBufferCreate(ptoa(1), &bspec, &bufkvas[0]);
1855 assert(buffers[0]);
1856 assert(IOSKBufferIsWired(buffers[0]) == false);
1857 bspec.user_writable = true;
1858 buffers[1] = IOSKMemoryBufferCreate(ptoa(1), &bspec, &bufkvas[1]);
1859 assert(buffers[1]);
1860 buffers[2] = IOSKMemoryBufferCreate(ptoa(2), &bspec, &bufkvas[2]);
1861 assert(buffers[2]);
1862 buffers[3] = IOSKMemoryBufferCreate(ptoa(2), &bspec, &bufkvas[3]);
1863 assert(buffers[3]);
1864 buffers[4] = IOSKMemoryBufferCreate(ptoa(3), &bspec, &bufkvas[4]);
1865 assert(buffers[4]);
1866 buffers[5] = IOSKMemoryBufferCreate(ptoa(3), &bspec, &bufkvas[5]);
1867 assert(buffers[5]);
1868
1869 for (int i = 0; i < kNumBuffers; i++) {
1870 value = 0x534B0000 | i;
1871 ptr = (uint32_t *)(uintptr_t)bufkvas[i];
1872 *ptr = value;
1873 assert(value == *ptr);
1874 }
1875
1876 ret = IOSKRegionSetBuffer(regions[0], 0, buffers[0]);
1877 assert(ret == kIOReturnSuccess);
1878 ret = IOSKRegionSetBuffer(regions[0], 1, buffers[1]);
1879 assert(ret == kIOReturnSuccess);
1880 ret = IOSKRegionSetBuffer(regions[1], 0, buffers[2]);
1881 assert(ret == kIOReturnSuccess);
1882 ret = IOSKRegionSetBuffer(regions[1], 1, buffers[3]);
1883 assert(ret == kIOReturnSuccess);
1884 ret = IOSKRegionSetBuffer(regions[2], 0, buffers[4]);
1885 assert(ret == kIOReturnSuccess);
1886 ret = IOSKRegionSetBuffer(regions[2], 3, buffers[5]);
1887 assert(ret == kIOReturnSuccess);
1888
1889 mappers[0] = IOSKMapperCreate(arenas[0], current_task());
1890 assert(mappers[0]);
1891 mappers[1] = IOSKMapperCreate(arenas[0], current_task());
1892 assert(mappers[1]);
1893 mappers[2] = IOSKMapperCreate(arenas[1], current_task());
1894 assert(mappers[2]);
1895
1896 ret = IOSKMapperGetAddress(mappers[0], &addrs[0], &size);
1897 assert(ret == kIOReturnSuccess);
1898 assert(size == ptoa(20));
1899 ret = IOSKMapperGetAddress(mappers[1], &addrs[1], &size);
1900 assert(ret == kIOReturnSuccess);
1901 assert(size == ptoa(20));
1902 ret = IOSKMapperGetAddress(mappers[2], &addrs[2], &size);
1903 assert(ret == kIOReturnSuccess);
1904 assert(size == ptoa(20));
1905
1906 for (int i = 0; i < kNumMappers; i++) {
1907 kprintf("mapper[%d] %p map address 0x%llx size 0x%x\n",
1908 i, mappers[i], (uint64_t)addrs[i], (uint32_t)size);
1909 }
1910
1911 ptr = (uint32_t *)(uintptr_t)addrs[0];
1912 assert(*ptr == 0x534B0000);
1913 ptr = (uint32_t *)(uintptr_t)(addrs[0] + ptoa(1));
1914 assert(*ptr == 0x534B0001);
1915 ptr = (uint32_t *)(uintptr_t)(addrs[0] + ptoa(2));
1916 assert(*ptr == 0x534B0002);
1917 ptr = (uint32_t *)(uintptr_t)(addrs[0] + ptoa(4));
1918 assert(*ptr == 0x534B0003);
1919 ptr = (uint32_t *)(uintptr_t)(addrs[0] + ptoa(8));
1920 assert(*ptr == 0x534B0004);
1921 ptr = (uint32_t *)(uintptr_t)(addrs[0] + ptoa(17));
1922 assert(*ptr == 0x534B0005);
1923
1924 *ptr = 0x4B530005;
1925 assert(0x4B530005 == *ptr);
1926 *ptr = 0x534B0005;
1927
1928 IOSKMapperRedirect(mappers[0]);
1929 *ptr = 0x33333333;
1930 assert(0x33333333 == *ptr);
1931 ptr = (uint32_t *)(uintptr_t)addrs[0];
1932 assert(*ptr == 0x534B0000);
1933
1934 ptr = (uint32_t *)(uintptr_t)addrs[2];
1935 assert(*ptr == 0x534B0004);
1936 ptr = (uint32_t *)(uintptr_t)(addrs[2] + ptoa(9));
1937 assert(*ptr == 0x534B0005);
1938 ptr = (uint32_t *)(uintptr_t)(addrs[2] + ptoa(12));
1939 assert(*ptr == 0x534B0002);
1940 ptr = (uint32_t *)(uintptr_t)(addrs[2] + ptoa(14));
1941 assert(*ptr == 0x534B0003);
1942 ptr = (uint32_t *)(uintptr_t)(addrs[2] + ptoa(18));
1943 assert(*ptr == 0x534B0000);
1944 ptr = (uint32_t *)(uintptr_t)(addrs[2] + ptoa(19));
1945 assert(*ptr == 0x534B0001);
1946
1947 IOSKRegionClearBufferDebug(regions[0], 1, NULL);
1948 ret = IOSKRegionSetBuffer(regions[0], 1, buffers[1]);
1949 assert(ret == kIOReturnSuccess);
1950 assert(*ptr == 0x534B0001);
1951
1952 IOSKArenaRedirect(arenas[0]);
1953 IOSKArenaRedirect(arenas[1]);
1954
1955 for (int i = 0; i < kNumBuffers; i++) {
1956 IOSKMemoryDestroy(buffers[i]);
1957 }
1958 for (int i = 0; i < kNumRegions; i++) {
1959 IOSKRegionDestroy(regions[i]);
1960 }
1961 for (int i = 0; i < kNumArenas; i++) {
1962 IOSKArenaDestroy(arenas[i]);
1963 }
1964 for (int i = 0; i < kNumMappers; i++) {
1965 IOSKMapperDestroy(mappers[i]);
1966 }
1967
1968 kprintf("IOSKArena count : %u\n",
1969 IOSKArena::gMetaClass.getInstanceCount());
1970 kprintf("IOSKRegion count : %u\n",
1971 IOSKRegion::gMetaClass.getInstanceCount());
1972 kprintf("IOSKMapper count : %u, %u (sub maps)\n",
1973 IOSKMapper::gMetaClass.getInstanceCount(),
1974 IOSKRegionMapper::gMetaClass.getInstanceCount());
1975 kprintf("IOSKBuffer count : %u\n",
1976 IOSKBuffer::gMetaClass.getInstanceCount());
1977
1978 return 0;
1979}
1980
1981#endif /* DEVELOPMENT || DEBUG */
1982
1983#if defined(__x86_64__)
1984const OSSymbol *
1985IOSKCopyKextIdentifierWithAddress( vm_address_t address )
1986{
1987 const OSSymbol * id = NULL;
1988
1989 OSKext * kext = OSKext::lookupKextWithAddress(address);
1990 if (kext) {
1991 id = kext->getIdentifier();
1992 if (id) {
1993 id->retain();
1994 }
1995 kext->release();
1996 }
1997 return id;
1998}
1999#endif /* __x86_64__ */
2000