1/*
2 * Copyright (c) 1998-2016 Apple Inc. All rights reserved.
3 *
4 * @APPLE_OSREFERENCE_LICENSE_HEADER_START@
5 *
6 * This file contains Original Code and/or Modifications of Original Code
7 * as defined in and that are subject to the Apple Public Source License
8 * Version 2.0 (the 'License'). You may not use this file except in
9 * compliance with the License. The rights granted to you under the License
10 * may not be used to create, or enable the creation or redistribution of,
11 * unlawful or unlicensed copies of an Apple operating system, or to
12 * circumvent, violate, or enable the circumvention or violation of, any
13 * terms of an Apple operating system software license agreement.
14 *
15 * Please obtain a copy of the License at
16 * http://www.opensource.apple.com/apsl/ and read it before using this file.
17 *
18 * The Original Code and all software distributed under the License are
19 * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER
20 * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES,
21 * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY,
22 * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT.
23 * Please see the License for the specific language governing rights and
24 * limitations under the License.
25 *
26 * @APPLE_OSREFERENCE_LICENSE_HEADER_END@
27 */
28#ifndef _IOMEMORYDESCRIPTOR_H
29#define _IOMEMORYDESCRIPTOR_H
30
31#include <sys/cdefs.h>
32
33#include <IOKit/IOTypes.h>
34#include <IOKit/IOLocks.h>
35#include <libkern/c++/OSContainers.h>
36#ifdef XNU_KERNEL_PRIVATE
37#include <IOKit/IOKitDebug.h>
38#endif
39
40#include <mach/memory_object_types.h>
41
42class IOMemoryMap;
43class IOMapper;
44class IOService;
45class IODMACommand;
46
47/*
48 * Direction of transfer, with respect to the described memory.
49 */
50#ifdef __LP64__
51enum
52#else /* !__LP64__ */
53enum IODirection
54#endif /* !__LP64__ */
55{
56 kIODirectionNone = 0x0, // same as VM_PROT_NONE
57 kIODirectionIn = 0x1, // User land 'read', same as VM_PROT_READ
58 kIODirectionOut = 0x2, // User land 'write', same as VM_PROT_WRITE
59 kIODirectionOutIn = kIODirectionOut | kIODirectionIn,
60 kIODirectionInOut = kIODirectionIn | kIODirectionOut,
61
62 // these flags are valid for the prepare() method only
63 kIODirectionPrepareToPhys32 = 0x00000004,
64 kIODirectionPrepareNoFault = 0x00000008,
65 kIODirectionPrepareReserved1 = 0x00000010,
66#define IODIRECTIONPREPARENONCOHERENTDEFINED 1
67 kIODirectionPrepareNonCoherent = 0x00000020,
68
69 // these flags are valid for the complete() method only
70#define IODIRECTIONCOMPLETEWITHERRORDEFINED 1
71 kIODirectionCompleteWithError = 0x00000040,
72#define IODIRECTIONCOMPLETEWITHDATAVALIDDEFINED 1
73 kIODirectionCompleteWithDataValid = 0x00000080,
74};
75
76#ifdef __LP64__
77typedef IOOptionBits IODirection;
78#endif /* __LP64__ */
79
80/*
81 * IOOptionBits used in the withOptions variant
82 */
83enum {
84 kIOMemoryDirectionMask = 0x00000007,
85#ifdef XNU_KERNEL_PRIVATE
86 kIOMemoryAutoPrepare = 0x00000008, // Shared with Buffer MD
87#endif
88
89 kIOMemoryTypeVirtual = 0x00000010,
90 kIOMemoryTypePhysical = 0x00000020,
91 kIOMemoryTypeUPL = 0x00000030,
92 kIOMemoryTypePersistentMD = 0x00000040, // Persistent Memory Descriptor
93 kIOMemoryTypeUIO = 0x00000050,
94#ifdef __LP64__
95 kIOMemoryTypeVirtual64 = kIOMemoryTypeVirtual,
96 kIOMemoryTypePhysical64 = kIOMemoryTypePhysical,
97#else /* !__LP64__ */
98 kIOMemoryTypeVirtual64 = 0x00000060,
99 kIOMemoryTypePhysical64 = 0x00000070,
100#endif /* !__LP64__ */
101 kIOMemoryTypeMask = 0x000000f0,
102
103 kIOMemoryAsReference = 0x00000100,
104 kIOMemoryBufferPageable = 0x00000400,
105 kIOMemoryMapperNone = 0x00000800, // Shared with Buffer MD
106 kIOMemoryHostOnly = 0x00001000, // Never DMA accessible
107#ifdef XNU_KERNEL_PRIVATE
108 kIOMemoryRedirected = 0x00004000,
109 kIOMemoryPreparedReadOnly = 0x00008000,
110#endif
111 kIOMemoryPersistent = 0x00010000,
112 kIOMemoryMapCopyOnWrite = 0x00020000,
113 kIOMemoryRemote = 0x00040000,
114 kIOMemoryThreadSafe = 0x00100000, // Shared with Buffer MD
115 kIOMemoryClearEncrypt = 0x00200000, // Shared with Buffer MD
116 kIOMemoryUseReserve = 0x00800000, // Shared with Buffer MD
117#define IOMEMORYUSERESERVEDEFINED 1
118
119#ifdef XNU_KERNEL_PRIVATE
120 kIOMemoryBufferPurgeable = 0x00400000,
121 kIOMemoryBufferCacheMask = 0x70000000,
122 kIOMemoryBufferCacheShift = 28,
123#endif
124};
125
126#define kIOMapperSystem ((IOMapper *) 0)
127
128enum
129{
130 kIOMemoryPurgeableKeepCurrent = 1,
131
132 kIOMemoryPurgeableNonVolatile = 2,
133 kIOMemoryPurgeableVolatile = 3,
134 kIOMemoryPurgeableEmpty = 4,
135
136 // modifiers for kIOMemoryPurgeableVolatile behavior
137 kIOMemoryPurgeableVolatileGroup0 = VM_VOLATILE_GROUP_0,
138 kIOMemoryPurgeableVolatileGroup1 = VM_VOLATILE_GROUP_1,
139 kIOMemoryPurgeableVolatileGroup2 = VM_VOLATILE_GROUP_2,
140 kIOMemoryPurgeableVolatileGroup3 = VM_VOLATILE_GROUP_3,
141 kIOMemoryPurgeableVolatileGroup4 = VM_VOLATILE_GROUP_4,
142 kIOMemoryPurgeableVolatileGroup5 = VM_VOLATILE_GROUP_5,
143 kIOMemoryPurgeableVolatileGroup6 = VM_VOLATILE_GROUP_6,
144 kIOMemoryPurgeableVolatileGroup7 = VM_VOLATILE_GROUP_7,
145 kIOMemoryPurgeableVolatileBehaviorFifo = VM_PURGABLE_BEHAVIOR_FIFO,
146 kIOMemoryPurgeableVolatileBehaviorLifo = VM_PURGABLE_BEHAVIOR_LIFO,
147 kIOMemoryPurgeableVolatileOrderingObsolete = VM_PURGABLE_ORDERING_OBSOLETE,
148 kIOMemoryPurgeableVolatileOrderingNormal = VM_PURGABLE_ORDERING_NORMAL,
149 kIOMemoryPurgeableFaultOnAccess = VM_PURGABLE_DEBUG_FAULT,
150};
151enum
152{
153 kIOMemoryIncoherentIOFlush = 1,
154 kIOMemoryIncoherentIOStore = 2,
155
156 kIOMemoryClearEncrypted = 50,
157 kIOMemorySetEncrypted = 51,
158};
159
160#define IOMEMORYDESCRIPTOR_SUPPORTS_DMACOMMAND 1
161
162struct IODMAMapSpecification
163{
164 uint64_t alignment;
165 IOService * device;
166 uint32_t options;
167 uint8_t numAddressBits;
168 uint8_t resvA[3];
169 uint32_t resvB[4];
170};
171
172struct IODMAMapPageList
173{
174 uint32_t pageOffset;
175 uint32_t pageListCount;
176 const upl_page_info_t * pageList;
177};
178
179// mapOptions for iovmMapMemory
180enum
181{
182 kIODMAMapReadAccess = 0x00000001,
183 kIODMAMapWriteAccess = 0x00000002,
184 kIODMAMapPhysicallyContiguous = 0x00000010,
185 kIODMAMapDeviceMemory = 0x00000020,
186 kIODMAMapPagingPath = 0x00000040,
187 kIODMAMapIdentityMap = 0x00000080,
188
189 kIODMAMapPageListFullyOccupied = 0x00000100,
190 kIODMAMapFixedAddress = 0x00000200,
191};
192
193#ifdef KERNEL_PRIVATE
194
195// Used for dmaCommandOperation communications for IODMACommand and mappers
196
197enum {
198 kIOMDWalkSegments = 0x01000000,
199 kIOMDFirstSegment = 1 | kIOMDWalkSegments,
200 kIOMDGetCharacteristics = 0x02000000,
201 kIOMDGetCharacteristicsMapped = 1 | kIOMDGetCharacteristics,
202 kIOMDDMAActive = 0x03000000,
203 kIOMDSetDMAActive = 1 | kIOMDDMAActive,
204 kIOMDSetDMAInactive = kIOMDDMAActive,
205 kIOMDAddDMAMapSpec = 0x04000000,
206 kIOMDDMAMap = 0x05000000,
207 kIOMDDMAUnmap = 0x06000000,
208 kIOMDDMACommandOperationMask = 0xFF000000,
209};
210struct IOMDDMACharacteristics {
211 UInt64 fLength;
212 UInt32 fSGCount;
213 UInt32 fPages;
214 UInt32 fPageAlign;
215 ppnum_t fHighestPage;
216 IODirection fDirection;
217 UInt8 fIsPrepared;
218};
219
220struct IOMDDMAMapArgs {
221 IOMapper * fMapper;
222 IODMACommand * fCommand;
223 IODMAMapSpecification fMapSpec;
224 uint64_t fOffset;
225 uint64_t fLength;
226 uint64_t fAlloc;
227 uint64_t fAllocLength;
228 uint8_t fMapContig;
229};
230
231struct IOMDDMAWalkSegmentArgs {
232 UInt64 fOffset; // Input/Output offset
233 UInt64 fIOVMAddr, fLength; // Output variables
234 UInt8 fMapped; // Input Variable, Require mapped IOVMA
235 UInt64 fMappedBase; // Input base of mapping
236};
237typedef UInt8 IOMDDMAWalkSegmentState[128];
238// fMapped:
239enum
240{
241 kIOMDDMAWalkMappedLocal = 2
242};
243
244#endif /* KERNEL_PRIVATE */
245
246enum
247{
248 kIOPreparationIDUnprepared = 0,
249 kIOPreparationIDUnsupported = 1,
250 kIOPreparationIDAlwaysPrepared = 2,
251};
252
253#ifdef XNU_KERNEL_PRIVATE
254struct IOMemoryReference;
255#endif
256
257
258/*! @class IOMemoryDescriptor : public OSObject
259 @abstract An abstract base class defining common methods for describing physical or virtual memory.
260 @discussion The IOMemoryDescriptor object represents a buffer or range of memory, specified as one or more physical or virtual address ranges. It contains methods to return the memory's physically contiguous segments (fragments), for use with the IOMemoryCursor, and methods to map the memory into any address space with caching and placed mapping options. */
261
262class IOMemoryDescriptor : public OSObject
263{
264 friend class IOMemoryMap;
265 friend class IOMultiMemoryDescriptor;
266
267 OSDeclareDefaultStructors(IOMemoryDescriptor);
268
269protected:
270
271/*! @var reserved
272 Reserved for future use. (Internal use only) */
273 struct IOMemoryDescriptorReserved * reserved;
274
275protected:
276 OSSet * _mappings;
277 IOOptionBits _flags;
278
279
280#ifdef XNU_KERNEL_PRIVATE
281public:
282 struct IOMemoryReference * _memRef;
283 vm_tag_t _kernelTag;
284 vm_tag_t _userTag;
285 int16_t _dmaReferences;
286 uint16_t _internalFlags;
287 kern_allocation_name_t _mapName;
288protected:
289#else /* XNU_KERNEL_PRIVATE */
290 void * __iomd_reserved5;
291 uint16_t __iomd_reserved1[4];
292 uintptr_t __iomd_reserved2;
293#endif /* XNU_KERNEL_PRIVATE */
294
295 uintptr_t __iomd_reserved3;
296 uintptr_t __iomd_reserved4;
297
298#ifndef __LP64__
299 IODirection _direction; /* use _flags instead */
300#endif /* !__LP64__ */
301 IOByteCount _length; /* length of all ranges */
302 IOOptionBits _tag;
303
304public:
305typedef IOOptionBits DMACommandOps;
306#ifndef __LP64__
307 virtual IOPhysicalAddress getSourceSegment( IOByteCount offset,
308 IOByteCount * length ) APPLE_KEXT_DEPRECATED;
309#endif /* !__LP64__ */
310
311/*! @function initWithOptions
312 @abstract Master initialiser for all variants of memory descriptors. For a more complete description see IOMemoryDescriptor::withOptions.
313 @discussion Note this function can be used to re-init a previously created memory descriptor.
314 @result true on success, false on failure. */
315 virtual bool initWithOptions(void * buffers,
316 UInt32 count,
317 UInt32 offset,
318 task_t task,
319 IOOptionBits options,
320 IOMapper * mapper = kIOMapperSystem);
321
322#ifndef __LP64__
323 virtual addr64_t getPhysicalSegment64( IOByteCount offset,
324 IOByteCount * length ) APPLE_KEXT_DEPRECATED; /* use getPhysicalSegment() and kIOMemoryMapperNone instead */
325#endif /* !__LP64__ */
326
327/*! @function setPurgeable
328 @abstract Control the purgeable status of a memory descriptors memory.
329 @discussion Buffers may be allocated with the ability to have their purgeable status changed - IOBufferMemoryDescriptor with the kIOMemoryPurgeable option, VM_FLAGS_PURGEABLE may be passed to vm_allocate() in user space to allocate such buffers. The purgeable status of such a buffer may be controlled with setPurgeable(). The process of making a purgeable memory descriptor non-volatile and determining its previous state is atomic - if a purgeable memory descriptor is made nonvolatile and the old state is returned as kIOMemoryPurgeableVolatile, then the memory's previous contents are completely intact and will remain so until the memory is made volatile again. If the old state is returned as kIOMemoryPurgeableEmpty then the memory was reclaimed while it was in a volatile state and its previous contents have been lost.
330 @param newState - the desired new purgeable state of the memory:<br>
331 kIOMemoryPurgeableKeepCurrent - make no changes to the memory's purgeable state.<br>
332 kIOMemoryPurgeableVolatile - make the memory volatile - the memory may be reclaimed by the VM system without saving its contents to backing store.<br>
333 kIOMemoryPurgeableNonVolatile - make the memory nonvolatile - the memory is treated as with usual allocations and must be saved to backing store if paged.<br>
334 kIOMemoryPurgeableEmpty - make the memory volatile, and discard any pages allocated to it.
335 @param oldState - if non-NULL, the previous purgeable state of the memory is returned here:<br>
336 kIOMemoryPurgeableNonVolatile - the memory was nonvolatile.<br>
337 kIOMemoryPurgeableVolatile - the memory was volatile but its content has not been discarded by the VM system.<br>
338 kIOMemoryPurgeableEmpty - the memory was volatile and has been discarded by the VM system.<br>
339 @result An IOReturn code. */
340
341 virtual IOReturn setPurgeable( IOOptionBits newState,
342 IOOptionBits * oldState );
343
344
345/*! @function getPageCounts
346 @abstract Retrieve the number of resident and/or dirty pages encompassed by an IOMemoryDescriptor.
347 @discussion This method returns the number of resident and/or dirty pages encompassed by an IOMemoryDescriptor.
348 @param residentPageCount - If non-null, a pointer to a byte count that will return the number of resident pages encompassed by this IOMemoryDescriptor.
349 @param dirtyPageCount - If non-null, a pointer to a byte count that will return the number of dirty pages encompassed by this IOMemoryDescriptor.
350 @result An IOReturn code. */
351
352 IOReturn getPageCounts( IOByteCount * residentPageCount,
353 IOByteCount * dirtyPageCount);
354
355/*! @function performOperation
356 @abstract Perform an operation on the memory descriptor's memory.
357 @discussion This method performs some operation on a range of the memory descriptor's memory. When a memory descriptor's memory is not mapped, it should be more efficient to use this method than mapping the memory to perform the operation virtually.
358 @param options The operation to perform on the memory:<br>
359 kIOMemoryIncoherentIOFlush - pass this option to store to memory and flush any data in the processor cache for the memory range, with synchronization to ensure the data has passed through all levels of processor cache. It may not be supported on all architectures. This type of flush may be used for non-coherent I/O such as AGP - it is NOT required for PCI coherent operations. The memory descriptor must have been previously prepared.<br>
360 kIOMemoryIncoherentIOStore - pass this option to store to memory any data in the processor cache for the memory range, with synchronization to ensure the data has passed through all levels of processor cache. It may not be supported on all architectures. This type of flush may be used for non-coherent I/O such as AGP - it is NOT required for PCI coherent operations. The memory descriptor must have been previously prepared.
361 @param offset A byte offset into the memory descriptor's memory.
362 @param length The length of the data range.
363 @result An IOReturn code. */
364
365 virtual IOReturn performOperation( IOOptionBits options,
366 IOByteCount offset, IOByteCount length );
367
368 // Used for dedicated communications for IODMACommand
369 virtual IOReturn dmaCommandOperation(DMACommandOps op, void *vData, UInt dataSize) const;
370
371/*! @function getPhysicalSegment
372 @abstract Break a memory descriptor into its physically contiguous segments.
373 @discussion This method returns the physical address of the byte at the given offset into the memory, and optionally the length of the physically contiguous segment from that offset.
374 @param offset A byte offset into the memory whose physical address to return.
375 @param length If non-zero, getPhysicalSegment will store here the length of the physically contiguous segement at the given offset.
376 @result A physical address, or zero if the offset is beyond the length of the memory. */
377
378#ifdef __LP64__
379 virtual addr64_t getPhysicalSegment( IOByteCount offset,
380 IOByteCount * length,
381 IOOptionBits options = 0 ) = 0;
382#else /* !__LP64__ */
383 virtual addr64_t getPhysicalSegment( IOByteCount offset,
384 IOByteCount * length,
385 IOOptionBits options );
386#endif /* !__LP64__ */
387
388 virtual uint64_t getPreparationID( void );
389 void setPreparationID( void );
390
391#ifdef XNU_KERNEL_PRIVATE
392 IOMemoryDescriptorReserved * getKernelReserved( void );
393 IOReturn dmaMap(
394 IOMapper * mapper,
395 IODMACommand * command,
396 const IODMAMapSpecification * mapSpec,
397 uint64_t offset,
398 uint64_t length,
399 uint64_t * mapAddress,
400 uint64_t * mapLength);
401 IOReturn dmaUnmap(
402 IOMapper * mapper,
403 IODMACommand * command,
404 uint64_t offset,
405 uint64_t mapAddress,
406 uint64_t mapLength);
407 void dmaMapRecord(
408 IOMapper * mapper,
409 IODMACommand * command,
410 uint64_t mapLength);
411
412 void setVMTags(vm_tag_t kernelTag, vm_tag_t userTag);
413 vm_tag_t getVMTag(vm_map_t map);
414#endif
415
416private:
417 OSMetaClassDeclareReservedUsed(IOMemoryDescriptor, 0);
418#ifdef __LP64__
419 OSMetaClassDeclareReservedUnused(IOMemoryDescriptor, 1);
420 OSMetaClassDeclareReservedUnused(IOMemoryDescriptor, 2);
421 OSMetaClassDeclareReservedUnused(IOMemoryDescriptor, 3);
422 OSMetaClassDeclareReservedUnused(IOMemoryDescriptor, 4);
423 OSMetaClassDeclareReservedUnused(IOMemoryDescriptor, 5);
424 OSMetaClassDeclareReservedUnused(IOMemoryDescriptor, 6);
425 OSMetaClassDeclareReservedUnused(IOMemoryDescriptor, 7);
426#else /* !__LP64__ */
427 OSMetaClassDeclareReservedUsed(IOMemoryDescriptor, 1);
428 OSMetaClassDeclareReservedUsed(IOMemoryDescriptor, 2);
429 OSMetaClassDeclareReservedUsed(IOMemoryDescriptor, 3);
430 OSMetaClassDeclareReservedUsed(IOMemoryDescriptor, 4);
431 OSMetaClassDeclareReservedUsed(IOMemoryDescriptor, 5);
432 OSMetaClassDeclareReservedUsed(IOMemoryDescriptor, 6);
433 OSMetaClassDeclareReservedUsed(IOMemoryDescriptor, 7);
434#endif /* !__LP64__ */
435 OSMetaClassDeclareReservedUnused(IOMemoryDescriptor, 8);
436 OSMetaClassDeclareReservedUnused(IOMemoryDescriptor, 9);
437 OSMetaClassDeclareReservedUnused(IOMemoryDescriptor, 10);
438 OSMetaClassDeclareReservedUnused(IOMemoryDescriptor, 11);
439 OSMetaClassDeclareReservedUnused(IOMemoryDescriptor, 12);
440 OSMetaClassDeclareReservedUnused(IOMemoryDescriptor, 13);
441 OSMetaClassDeclareReservedUnused(IOMemoryDescriptor, 14);
442 OSMetaClassDeclareReservedUnused(IOMemoryDescriptor, 15);
443
444protected:
445 virtual void free() APPLE_KEXT_OVERRIDE;
446public:
447 static void initialize( void );
448
449public:
450/*! @function withAddress
451 @abstract Create an IOMemoryDescriptor to describe one virtual range of the kernel task.
452 @discussion This method creates and initializes an IOMemoryDescriptor for memory consisting of a single virtual memory range mapped into the kernel map. This memory descriptor needs to be prepared before it can be used to extract data from the memory described.
453 @param address The virtual address of the first byte in the memory.
454 @param withLength The length of memory.
455 @param withDirection An I/O direction to be associated with the descriptor, which may affect the operation of the prepare and complete methods on some architectures.
456 @result The created IOMemoryDescriptor on success, to be released by the caller, or zero on failure. */
457
458 static IOMemoryDescriptor * withAddress(void * address,
459 IOByteCount withLength,
460 IODirection withDirection);
461
462#ifndef __LP64__
463 static IOMemoryDescriptor * withAddress(IOVirtualAddress address,
464 IOByteCount withLength,
465 IODirection withDirection,
466 task_t withTask) APPLE_KEXT_DEPRECATED; /* use withAddressRange() and prepare() instead */
467#endif /* !__LP64__ */
468
469/*! @function withPhysicalAddress
470 @abstract Create an IOMemoryDescriptor to describe one physical range.
471 @discussion This method creates and initializes an IOMemoryDescriptor for memory consisting of a single physical memory range.
472 @param address The physical address of the first byte in the memory.
473 @param withLength The length of memory.
474 @param withDirection An I/O direction to be associated with the descriptor, which may affect the operation of the prepare and complete methods on some architectures.
475 @result The created IOMemoryDescriptor on success, to be released by the caller, or zero on failure. */
476
477 static IOMemoryDescriptor * withPhysicalAddress(
478 IOPhysicalAddress address,
479 IOByteCount withLength,
480 IODirection withDirection );
481
482#ifndef __LP64__
483 static IOMemoryDescriptor * withRanges(IOVirtualRange * ranges,
484 UInt32 withCount,
485 IODirection withDirection,
486 task_t withTask,
487 bool asReference = false) APPLE_KEXT_DEPRECATED; /* use withAddressRanges() instead */
488#endif /* !__LP64__ */
489
490/*! @function withAddressRange
491 @abstract Create an IOMemoryDescriptor to describe one virtual range of the specified map.
492 @discussion This method creates and initializes an IOMemoryDescriptor for memory consisting of a single virtual memory range mapped into the specified map. This memory descriptor needs to be prepared before it can be used to extract data from the memory described.
493 @param address The virtual address of the first byte in the memory.
494 @param length The length of memory.
495 @param options
496 kIOMemoryDirectionMask (options:direction) This nibble indicates the I/O direction to be associated with the descriptor, which may affect the operation of the prepare and complete methods on some architectures.
497 @param task The task the virtual ranges are mapped into. Note that unlike IOMemoryDescriptor::withAddress(), kernel_task memory must be explicitly prepared when passed to this api. The task argument may be NULL to specify memory by physical address.
498 @result The created IOMemoryDescriptor on success, to be released by the caller, or zero on failure. */
499
500 static IOMemoryDescriptor * withAddressRange(
501 mach_vm_address_t address,
502 mach_vm_size_t length,
503 IOOptionBits options,
504 task_t task);
505
506/*! @function withAddressRanges
507 @abstract Create an IOMemoryDescriptor to describe one or more virtual ranges.
508 @discussion This method creates and initializes an IOMemoryDescriptor for memory consisting of an array of virtual memory ranges each mapped into a specified source task. This memory descriptor needs to be prepared before it can be used to extract data from the memory described.
509 @param ranges An array of IOAddressRange structures which specify the virtual ranges in the specified map which make up the memory to be described. IOAddressRange is the 64bit version of IOVirtualRange.
510 @param rangeCount The member count of the ranges array.
511 @param options
512 kIOMemoryDirectionMask (options:direction) This nibble indicates the I/O direction to be associated with the descriptor, which may affect the operation of the prepare and complete methods on some architectures.
513 kIOMemoryAsReference For options:type = Virtual or Physical this indicate that the memory descriptor need not copy the ranges array into local memory. This is an optimisation to try to minimise unnecessary allocations.
514 @param task The task each of the virtual ranges are mapped into. Note that unlike IOMemoryDescriptor::withAddress(), kernel_task memory must be explicitly prepared when passed to this api. The task argument may be NULL to specify memory by physical address.
515 @result The created IOMemoryDescriptor on success, to be released by the caller, or zero on failure. */
516
517 static IOMemoryDescriptor * withAddressRanges(
518 IOAddressRange * ranges,
519 UInt32 rangeCount,
520 IOOptionBits options,
521 task_t task);
522
523/*! @function withOptions
524 @abstract Master initialiser for all variants of memory descriptors.
525 @discussion This method creates and initializes an IOMemoryDescriptor for memory it has three main variants: Virtual, Physical & mach UPL. These variants are selected with the options parameter, see below. This memory descriptor needs to be prepared before it can be used to extract data from the memory described.
526
527
528 @param buffers A pointer to an array of IOAddressRange when options:type is kIOMemoryTypeVirtual64 or kIOMemoryTypePhysical64 or a 64bit kernel. For type UPL it is a upl_t returned by the mach/memory_object_types.h apis, primarily used internally by the UBC. IOVirtualRanges or IOPhysicalRanges are 32 bit only types for use when options:type is kIOMemoryTypeVirtual or kIOMemoryTypePhysical on 32bit kernels.
529
530 @param count options:type = Virtual or Physical count contains a count of the number of entires in the buffers array. For options:type = UPL this field contains a total length.
531
532 @param offset Only used when options:type = UPL, in which case this field contains an offset for the memory within the buffers upl.
533
534 @param task Only used options:type = Virtual, The task each of the virtual ranges are mapped into.
535
536 @param options
537 kIOMemoryDirectionMask (options:direction) This nibble indicates the I/O direction to be associated with the descriptor, which may affect the operation of the prepare and complete methods on some architectures.
538 kIOMemoryTypeMask (options:type) kIOMemoryTypeVirtual64, kIOMemoryTypeVirtual, kIOMemoryTypePhysical64, kIOMemoryTypePhysical, kIOMemoryTypeUPL Indicates that what type of memory basic memory descriptor to use. This sub-field also controls the interpretation of the buffers, count, offset & task parameters.
539 kIOMemoryAsReference For options:type = Virtual or Physical this indicate that the memory descriptor need not copy the ranges array into local memory. This is an optimisation to try to minimise unnecessary allocations.
540 kIOMemoryBufferPageable Only used by the IOBufferMemoryDescriptor as an indication that the kernel virtual memory is in fact pageable and we need to use the kernel pageable submap rather than the default map.
541
542 @param mapper Which IOMapper should be used to map the in-memory physical addresses into I/O space addresses. Defaults to 0 which indicates that the system mapper is to be used, if present.
543
544 @result The created IOMemoryDescriptor on success, to be released by the caller, or zero on failure. */
545
546 static IOMemoryDescriptor *withOptions(void * buffers,
547 UInt32 count,
548 UInt32 offset,
549 task_t task,
550 IOOptionBits options,
551 IOMapper * mapper = kIOMapperSystem);
552
553#ifndef __LP64__
554 static IOMemoryDescriptor * withPhysicalRanges(
555 IOPhysicalRange * ranges,
556 UInt32 withCount,
557 IODirection withDirection,
558 bool asReference = false) APPLE_KEXT_DEPRECATED; /* use withOptions() and kIOMemoryTypePhysical instead */
559#endif /* !__LP64__ */
560
561#ifndef __LP64__
562 static IOMemoryDescriptor * withSubRange(IOMemoryDescriptor *of,
563 IOByteCount offset,
564 IOByteCount length,
565 IODirection withDirection) APPLE_KEXT_DEPRECATED; /* use IOSubMemoryDescriptor::withSubRange() and kIOMemoryThreadSafe instead */
566#endif /* !__LP64__ */
567
568/*! @function withPersistentMemoryDescriptor
569 @abstract Copy constructor that generates a new memory descriptor if the backing memory for the same task's virtual address and length has changed.
570 @discussion If the original memory descriptor's address and length is still backed by the same real memory, i.e. the user hasn't deallocated and the reallocated memory at the same address then the original memory descriptor is returned with a additional reference. Otherwise we build a totally new memory descriptor with the same characteristics as the previous one but with a new view of the vm. Note not legal to call this function with anything except an IOGeneralMemoryDescriptor that was created with the kIOMemoryPersistent option.
571 @param originalMD The memory descriptor to be duplicated.
572 @result Either the original memory descriptor with an additional retain or a new memory descriptor, 0 for a bad original memory descriptor or some other resource shortage. */
573 static IOMemoryDescriptor *
574 withPersistentMemoryDescriptor(IOMemoryDescriptor *originalMD);
575
576#ifndef __LP64__
577 // obsolete initializers
578 // - initWithOptions is the designated initializer
579 virtual bool initWithAddress(void * address,
580 IOByteCount withLength,
581 IODirection withDirection) APPLE_KEXT_DEPRECATED; /* use initWithOptions() instead */
582 virtual bool initWithAddress(IOVirtualAddress address,
583 IOByteCount withLength,
584 IODirection withDirection,
585 task_t withTask) APPLE_KEXT_DEPRECATED; /* use initWithOptions() instead */
586 virtual bool initWithPhysicalAddress(
587 IOPhysicalAddress address,
588 IOByteCount withLength,
589 IODirection withDirection ) APPLE_KEXT_DEPRECATED; /* use initWithOptions() instead */
590 virtual bool initWithRanges(IOVirtualRange * ranges,
591 UInt32 withCount,
592 IODirection withDirection,
593 task_t withTask,
594 bool asReference = false) APPLE_KEXT_DEPRECATED; /* use initWithOptions() instead */
595 virtual bool initWithPhysicalRanges(IOPhysicalRange * ranges,
596 UInt32 withCount,
597 IODirection withDirection,
598 bool asReference = false) APPLE_KEXT_DEPRECATED; /* use initWithOptions() instead */
599#endif /* __LP64__ */
600
601/*! @function getDirection
602 @abstract Accessor to get the direction the memory descriptor was created with.
603 @discussion This method returns the direction the memory descriptor was created with.
604 @result The direction. */
605
606 virtual IODirection getDirection() const;
607
608/*! @function getLength
609 @abstract Accessor to get the length of the memory descriptor (over all its ranges).
610 @discussion This method returns the total length of the memory described by the descriptor, ie. the sum of its ranges' lengths.
611 @result The byte count. */
612
613 virtual IOByteCount getLength() const;
614
615/*! @function setTag
616 @abstract Set the tag for the memory descriptor.
617 @discussion This method sets the tag for the memory descriptor. Tag bits are not interpreted by IOMemoryDescriptor.
618 @param tag The tag. */
619
620 virtual void setTag( IOOptionBits tag );
621
622/*! @function getTag
623 @abstract Accessor to the retrieve the tag for the memory descriptor.
624 @discussion This method returns the tag for the memory descriptor. Tag bits are not interpreted by IOMemoryDescriptor.
625 @result The tag. */
626
627 virtual IOOptionBits getTag( void );
628
629/*! @function getFlags
630 @abstract Accessor to the retrieve the options the memory descriptor was created with.
631 @discussion Accessor to the retrieve the options the memory descriptor was created with, and flags with its state. These bits are defined by the kIOMemory* enum.
632 @result The flags bitfield. */
633
634 uint64_t getFlags(void);
635
636/*! @function readBytes
637 @abstract Copy data from the memory descriptor's buffer to the specified buffer.
638 @discussion This method copies data from the memory descriptor's memory at the given offset, to the caller's buffer. The memory descriptor MUST have the kIODirectionOut direcction bit set and be prepared. kIODirectionOut means that this memory descriptor will be output to an external device, so readBytes is used to get memory into a local buffer for a PIO transfer to the device.
639 @param offset A byte offset into the memory descriptor's memory.
640 @param bytes The caller supplied buffer to copy the data to.
641 @param withLength The length of the data to copy.
642 @result The number of bytes copied, zero will be returned if the specified offset is beyond the length of the descriptor. Development/debug kernel builds will assert if the offset is beyond the length of the descriptor. */
643
644 virtual IOByteCount readBytes(IOByteCount offset,
645 void * bytes, IOByteCount withLength);
646
647/*! @function writeBytes
648 @abstract Copy data to the memory descriptor's buffer from the specified buffer.
649 @discussion This method copies data to the memory descriptor's memory at the given offset, from the caller's buffer. The memory descriptor MUST have the kIODirectionIn direcction bit set and be prepared. kIODirectionIn means that this memory descriptor will be input from an external device, so writeBytes is used to write memory into the descriptor for PIO drivers.
650 @param offset A byte offset into the memory descriptor's memory.
651 @param bytes The caller supplied buffer to copy the data from.
652 @param withLength The length of the data to copy.
653 @result The number of bytes copied, zero will be returned if the specified offset is beyond the length of the descriptor. Development/debug kernel builds will assert if the offset is beyond the length of the descriptor. */
654
655 virtual IOByteCount writeBytes(IOByteCount offset,
656 const void * bytes, IOByteCount withLength);
657
658#ifndef __LP64__
659 virtual IOPhysicalAddress getPhysicalSegment(IOByteCount offset,
660 IOByteCount * length);
661#endif /* !__LP64__ */
662
663/*! @function getPhysicalAddress
664 @abstract Return the physical address of the first byte in the memory.
665 @discussion This method returns the physical address of the first byte in the memory. It is most useful on memory known to be physically contiguous.
666 @result A physical address. */
667
668 IOPhysicalAddress getPhysicalAddress();
669
670#ifndef __LP64__
671 virtual void * getVirtualSegment(IOByteCount offset,
672 IOByteCount * length) APPLE_KEXT_DEPRECATED; /* use map() and getVirtualAddress() instead */
673#endif /* !__LP64__ */
674
675/*! @function prepare
676 @abstract Prepare the memory for an I/O transfer.
677 @discussion This involves paging in the memory, if necessary, and wiring it down for the duration of the transfer. The complete() method completes the processing of the memory after the I/O transfer finishes. Note that the prepare call is not thread safe and it is expected that the client will more easily be able to guarantee single threading a particular memory descriptor.
678 @param forDirection The direction of the I/O just completed, or kIODirectionNone for the direction specified by the memory descriptor.
679 @result An IOReturn code. */
680
681 virtual IOReturn prepare(IODirection forDirection = kIODirectionNone) = 0;
682
683/*! @function complete
684 @abstract Complete processing of the memory after an I/O transfer finishes.
685 @discussion This method should not be called unless a prepare was previously issued; the prepare() and complete() must occur in pairs, before and after an I/O transfer involving pageable memory. In 10.3 or greater systems the direction argument to complete is not longer respected. The direction is totally determined at prepare() time.
686 @param forDirection DEPRECATED The direction of the I/O just completed, or kIODirectionNone for the direction specified by the memory descriptor.
687 @result An IOReturn code. */
688
689 virtual IOReturn complete(IODirection forDirection = kIODirectionNone) = 0;
690
691 /*
692 * Mapping functions.
693 */
694
695/*! @function createMappingInTask
696 @abstract Maps a IOMemoryDescriptor into a task.
697 @discussion This is the general purpose method to map all or part of the memory described by a memory descriptor into a task at any available address, or at a fixed address if possible. Caching & read-only options may be set for the mapping. The mapping is represented as a returned reference to a IOMemoryMap object, which may be shared if the mapping is compatible with an existing mapping of the IOMemoryDescriptor. The IOMemoryMap object returned should be released only when the caller has finished accessing the mapping, as freeing the object destroys the mapping.
698 @param intoTask Sets the target task for the mapping. Pass kernel_task for the kernel address space.
699 @param atAddress If a placed mapping is requested, atAddress specifies its address, and the kIOMapAnywhere should not be set. Otherwise, atAddress is ignored.
700 @param options Mapping options are defined in IOTypes.h,<br>
701 kIOMapAnywhere should be passed if the mapping can be created anywhere. If not set, the atAddress parameter sets the location of the mapping, if it is available in the target map.<br>
702 kIOMapDefaultCache to inhibit the cache in I/O areas, kIOMapCopybackCache in general purpose RAM.<br>
703 kIOMapInhibitCache, kIOMapWriteThruCache, kIOMapCopybackCache to set the appropriate caching.<br>
704 kIOMapReadOnly to allow only read only accesses to the memory - writes will cause and access fault.<br>
705 kIOMapReference will only succeed if the mapping already exists, and the IOMemoryMap object is just an extra reference, ie. no new mapping will be created.<br>
706 kIOMapUnique allows a special kind of mapping to be created that may be used with the IOMemoryMap::redirect() API. These mappings will not be shared as is the default - there will always be a unique mapping created for the caller, not an existing mapping with an extra reference.<br>
707 kIOMapPrefault will try to prefault the pages corresponding to the mapping. This must not be done on the kernel task, and the memory must have been wired via prepare(). Otherwise, the function will fail.<br>
708 @param offset Is a beginning offset into the IOMemoryDescriptor's memory where the mapping starts. Zero is the default to map all the memory.
709 @param length Is the length of the mapping requested for a subset of the IOMemoryDescriptor. Zero is the default to map all the memory.
710 @result A reference to an IOMemoryMap object representing the mapping, which can supply the virtual address of the mapping and other information. The mapping may be shared with multiple callers - multiple maps are avoided if a compatible one exists. The IOMemoryMap object returned should be released only when the caller has finished accessing the mapping, as freeing the object destroys the mapping. The IOMemoryMap instance also retains the IOMemoryDescriptor it maps while it exists. */
711
712 IOMemoryMap * createMappingInTask(
713 task_t intoTask,
714 mach_vm_address_t atAddress,
715 IOOptionBits options,
716 mach_vm_size_t offset = 0,
717 mach_vm_size_t length = 0 );
718
719#ifndef __LP64__
720 virtual IOMemoryMap * map(
721 task_t intoTask,
722 IOVirtualAddress atAddress,
723 IOOptionBits options,
724 IOByteCount offset = 0,
725 IOByteCount length = 0 ) APPLE_KEXT_DEPRECATED; /* use createMappingInTask() instead */
726#endif /* !__LP64__ */
727
728/*! @function map
729 @abstract Maps a IOMemoryDescriptor into the kernel map.
730 @discussion This is a shortcut method to map all the memory described by a memory descriptor into the kernel map at any available address. See the full version of the createMappingInTask method for further details.
731 @param options Mapping options as in the full version of the createMappingInTask method, with kIOMapAnywhere assumed.
732 @result See the full version of the createMappingInTask method. */
733
734 virtual IOMemoryMap * map(
735 IOOptionBits options = 0 );
736
737/*! @function setMapping
738 @abstract Establishes an already existing mapping.
739 @discussion This method tells the IOMemoryDescriptor about a mapping that exists, but was created elsewhere. It allows later callers of the map method to share this externally created mapping. The IOMemoryMap object returned is created to represent it. This method is not commonly needed.
740 @param task Address space in which the mapping exists.
741 @param mapAddress Virtual address of the mapping.
742 @param options Caching and read-only attributes of the mapping.
743 @result A IOMemoryMap object created to represent the mapping. */
744
745 virtual IOMemoryMap * setMapping(
746 task_t task,
747 IOVirtualAddress mapAddress,
748 IOOptionBits options = 0 );
749
750 // Following methods are private implementation
751
752#ifdef __LP64__
753 virtual
754#endif /* __LP64__ */
755 IOReturn redirect( task_t safeTask, bool redirect );
756
757 IOReturn handleFault(
758 void * _pager,
759 mach_vm_size_t sourceOffset,
760 mach_vm_size_t length);
761
762 IOReturn populateDevicePager(
763 void * pager,
764 vm_map_t addressMap,
765 mach_vm_address_t address,
766 mach_vm_size_t sourceOffset,
767 mach_vm_size_t length,
768 IOOptionBits options );
769
770 virtual IOMemoryMap * makeMapping(
771 IOMemoryDescriptor * owner,
772 task_t intoTask,
773 IOVirtualAddress atAddress,
774 IOOptionBits options,
775 IOByteCount offset,
776 IOByteCount length );
777
778protected:
779 virtual void addMapping(
780 IOMemoryMap * mapping );
781
782 virtual void removeMapping(
783 IOMemoryMap * mapping );
784
785 virtual IOReturn doMap(
786 vm_map_t addressMap,
787 IOVirtualAddress * atAddress,
788 IOOptionBits options,
789 IOByteCount sourceOffset = 0,
790 IOByteCount length = 0 );
791
792 virtual IOReturn doUnmap(
793 vm_map_t addressMap,
794 IOVirtualAddress logical,
795 IOByteCount length );
796};
797
798/* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */
799
800/*! @class IOMemoryMap : public OSObject
801 @abstract A class defining common methods for describing a memory mapping.
802 @discussion The IOMemoryMap object represents a mapped range of memory, described by a IOMemoryDescriptor. The mapping may be in the kernel or a non-kernel task and has processor cache mode attributes. IOMemoryMap instances are created by IOMemoryDescriptor when it creates mappings in its map method, and returned to the caller. */
803
804class IOMemoryMap : public OSObject
805{
806 OSDeclareDefaultStructors(IOMemoryMap)
807#ifdef XNU_KERNEL_PRIVATE
808public:
809 IOMemoryDescriptor * fMemory;
810 IOMemoryMap * fSuperMap;
811 mach_vm_size_t fOffset;
812 mach_vm_address_t fAddress;
813 mach_vm_size_t fLength;
814 task_t fAddressTask;
815 vm_map_t fAddressMap;
816 IOOptionBits fOptions;
817 upl_t fRedirUPL;
818 ipc_port_t fRedirEntry;
819 IOMemoryDescriptor * fOwner;
820 uint8_t fUserClientUnmap;
821#if IOTRACKING
822 IOTrackingUser fTracking;
823#endif
824#endif /* XNU_KERNEL_PRIVATE */
825
826protected:
827 virtual void taggedRelease(const void *tag = 0) const APPLE_KEXT_OVERRIDE;
828 virtual void free() APPLE_KEXT_OVERRIDE;
829
830public:
831/*! @function getVirtualAddress
832 @abstract Accessor to the virtual address of the first byte in the mapping.
833 @discussion This method returns the virtual address of the first byte in the mapping. Since the IOVirtualAddress is only 32bit in 32bit kernels, the getAddress() method should be used for compatibility with 64bit task mappings.
834 @result A virtual address. */
835
836 virtual IOVirtualAddress getVirtualAddress();
837
838/*! @function getPhysicalSegment
839 @abstract Break a mapping into its physically contiguous segments.
840 @discussion This method returns the physical address of the byte at the given offset into the mapping, and optionally the length of the physically contiguous segment from that offset. It functions similarly to IOMemoryDescriptor::getPhysicalSegment.
841 @param offset A byte offset into the mapping whose physical address to return.
842 @param length If non-zero, getPhysicalSegment will store here the length of the physically contiguous segement at the given offset.
843 @result A physical address, or zero if the offset is beyond the length of the mapping. */
844
845#ifdef __LP64__
846 virtual IOPhysicalAddress getPhysicalSegment(IOByteCount offset,
847 IOByteCount * length,
848 IOOptionBits options = 0);
849#else /* !__LP64__ */
850 virtual IOPhysicalAddress getPhysicalSegment(IOByteCount offset,
851 IOByteCount * length);
852#endif /* !__LP64__ */
853
854/*! @function getPhysicalAddress
855 @abstract Return the physical address of the first byte in the mapping.
856 @discussion This method returns the physical address of the first byte in the mapping. It is most useful on mappings known to be physically contiguous.
857 @result A physical address. */
858
859 IOPhysicalAddress getPhysicalAddress();
860
861/*! @function getLength
862 @abstract Accessor to the length of the mapping.
863 @discussion This method returns the length of the mapping.
864 @result A byte count. */
865
866 virtual IOByteCount getLength();
867
868/*! @function getAddressTask
869 @abstract Accessor to the task of the mapping.
870 @discussion This method returns the mach task the mapping exists in.
871 @result A mach task_t. */
872
873 virtual task_t getAddressTask();
874
875/*! @function getMemoryDescriptor
876 @abstract Accessor to the IOMemoryDescriptor the mapping was created from.
877 @discussion This method returns the IOMemoryDescriptor the mapping was created from.
878 @result An IOMemoryDescriptor reference, which is valid while the IOMemoryMap object is retained. It should not be released by the caller. */
879
880 virtual IOMemoryDescriptor * getMemoryDescriptor();
881
882/*! @function getMapOptions
883 @abstract Accessor to the options the mapping was created with.
884 @discussion This method returns the options to IOMemoryDescriptor::map the mapping was created with.
885 @result Options for the mapping, including cache settings. */
886
887 virtual IOOptionBits getMapOptions();
888
889/*! @function unmap
890 @abstract Force the IOMemoryMap to unmap, without destroying the object.
891 @discussion IOMemoryMap instances will unmap themselves upon free, ie. when the last client with a reference calls release. This method forces the IOMemoryMap to destroy the mapping it represents, regardless of the number of clients. It is not generally used.
892 @result An IOReturn code. */
893
894 virtual IOReturn unmap();
895
896 virtual void taskDied();
897
898/*! @function redirect
899 @abstract Replace the memory mapped in a process with new backing memory.
900 @discussion An IOMemoryMap created with the kIOMapUnique option to IOMemoryDescriptor::map() can remapped to a new IOMemoryDescriptor backing object. If the new IOMemoryDescriptor is specified as NULL, client access to the memory map is blocked until a new backing object has been set. By blocking access and copying data, the caller can create atomic copies of the memory while the client is potentially reading or writing the memory.
901 @param newBackingMemory The IOMemoryDescriptor that represents the physical memory that is to be now mapped in the virtual range the IOMemoryMap represents. If newBackingMemory is NULL, any access to the mapping will hang (in vm_fault()) until access has been restored by a new call to redirect() with non-NULL newBackingMemory argument.
902 @param options Mapping options are defined in IOTypes.h, and are documented in IOMemoryDescriptor::map()
903 @param offset As with IOMemoryDescriptor::map(), a beginning offset into the IOMemoryDescriptor's memory where the mapping starts. Zero is the default.
904 @result An IOReturn code. */
905
906#ifndef __LP64__
907// For 32 bit XNU, there is a 32 bit (IOByteCount) and a 64 bit (mach_vm_size_t) interface;
908// for 64 bit, these fall together on the 64 bit one.
909 virtual IOReturn redirect(IOMemoryDescriptor * newBackingMemory,
910 IOOptionBits options,
911 IOByteCount offset = 0);
912#endif
913 virtual IOReturn redirect(IOMemoryDescriptor * newBackingMemory,
914 IOOptionBits options,
915 mach_vm_size_t offset = 0);
916
917#ifdef __LP64__
918/*! @function getAddress
919 @abstract Accessor to the virtual address of the first byte in the mapping.
920 @discussion This method returns the virtual address of the first byte in the mapping.
921 @result A virtual address. */
922 inline mach_vm_address_t getAddress() __attribute__((always_inline));
923/*! @function getSize
924 @abstract Accessor to the length of the mapping.
925 @discussion This method returns the length of the mapping.
926 @result A byte count. */
927 inline mach_vm_size_t getSize() __attribute__((always_inline));
928#else /* !__LP64__ */
929/*! @function getAddress
930 @abstract Accessor to the virtual address of the first byte in the mapping.
931 @discussion This method returns the virtual address of the first byte in the mapping.
932 @result A virtual address. */
933 virtual mach_vm_address_t getAddress();
934/*! @function getSize
935 @abstract Accessor to the length of the mapping.
936 @discussion This method returns the length of the mapping.
937 @result A byte count. */
938 virtual mach_vm_size_t getSize();
939#endif /* !__LP64__ */
940
941#ifdef XNU_KERNEL_PRIVATE
942 // for IOMemoryDescriptor use
943 IOMemoryMap * copyCompatible( IOMemoryMap * newMapping );
944
945 bool init(
946 task_t intoTask,
947 mach_vm_address_t toAddress,
948 IOOptionBits options,
949 mach_vm_size_t offset,
950 mach_vm_size_t length );
951
952 bool setMemoryDescriptor(IOMemoryDescriptor * _memory, mach_vm_size_t _offset);
953
954 IOReturn redirect(
955 task_t intoTask, bool redirect );
956
957 IOReturn userClientUnmap();
958#endif /* XNU_KERNEL_PRIVATE */
959
960 IOReturn wireRange(
961 uint32_t options,
962 mach_vm_size_t offset,
963 mach_vm_size_t length);
964
965 OSMetaClassDeclareReservedUnused(IOMemoryMap, 0);
966 OSMetaClassDeclareReservedUnused(IOMemoryMap, 1);
967 OSMetaClassDeclareReservedUnused(IOMemoryMap, 2);
968 OSMetaClassDeclareReservedUnused(IOMemoryMap, 3);
969 OSMetaClassDeclareReservedUnused(IOMemoryMap, 4);
970 OSMetaClassDeclareReservedUnused(IOMemoryMap, 5);
971 OSMetaClassDeclareReservedUnused(IOMemoryMap, 6);
972 OSMetaClassDeclareReservedUnused(IOMemoryMap, 7);
973};
974
975/* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */
976#ifdef XNU_KERNEL_PRIVATE
977// Also these flags should not overlap with the options to
978// IOMemoryDescriptor::initWithRanges(... IOOptionsBits options);
979enum {
980 _kIOMemorySourceSegment = 0x00002000
981};
982#endif /* XNU_KERNEL_PRIVATE */
983
984// The following classes are private implementation of IOMemoryDescriptor - they
985// should not be referenced directly, just through the public API's in the
986// IOMemoryDescriptor class. For example, an IOGeneralMemoryDescriptor instance
987// might be created by IOMemoryDescriptor::withAddressRange(), but there should be
988// no need to reference as anything but a generic IOMemoryDescriptor *.
989
990class IOGeneralMemoryDescriptor : public IOMemoryDescriptor
991{
992 OSDeclareDefaultStructors(IOGeneralMemoryDescriptor);
993
994public:
995 union Ranges {
996 IOVirtualRange *v;
997 IOAddressRange *v64;
998 IOPhysicalRange *p;
999 void *uio;
1000 };
1001protected:
1002 Ranges _ranges;
1003 unsigned _rangesCount; /* number of address ranges in list */
1004#ifndef __LP64__
1005 bool _rangesIsAllocated; /* is list allocated by us? */
1006#endif /* !__LP64__ */
1007
1008 task_t _task; /* task where all ranges are mapped to */
1009
1010 union {
1011 IOVirtualRange v;
1012 IOPhysicalRange p;
1013 } _singleRange; /* storage space for a single range */
1014
1015 unsigned _wireCount; /* number of outstanding wires */
1016
1017#ifndef __LP64__
1018 uintptr_t _cachedVirtualAddress;
1019
1020 IOPhysicalAddress _cachedPhysicalAddress;
1021#endif /* !__LP64__ */
1022
1023 bool _initialized; /* has superclass been initialized? */
1024
1025public:
1026 virtual void free() APPLE_KEXT_OVERRIDE;
1027
1028 virtual IOReturn dmaCommandOperation(DMACommandOps op, void *vData, UInt dataSize) const APPLE_KEXT_OVERRIDE;
1029
1030 virtual uint64_t getPreparationID( void ) APPLE_KEXT_OVERRIDE;
1031
1032#ifdef XNU_KERNEL_PRIVATE
1033 // Internal APIs may be made virtual at some time in the future.
1034 IOReturn wireVirtual(IODirection forDirection);
1035 IOReturn dmaMap(
1036 IOMapper * mapper,
1037 IODMACommand * command,
1038 const IODMAMapSpecification * mapSpec,
1039 uint64_t offset,
1040 uint64_t length,
1041 uint64_t * mapAddress,
1042 uint64_t * mapLength);
1043 bool initMemoryEntries(size_t size, IOMapper * mapper);
1044
1045 IOMemoryReference * memoryReferenceAlloc(uint32_t capacity,
1046 IOMemoryReference * realloc);
1047 void memoryReferenceFree(IOMemoryReference * ref);
1048 void memoryReferenceRelease(IOMemoryReference * ref);
1049
1050 IOReturn memoryReferenceCreate(
1051 IOOptionBits options,
1052 IOMemoryReference ** reference);
1053
1054 IOReturn memoryReferenceMap(IOMemoryReference * ref,
1055 vm_map_t map,
1056 mach_vm_size_t inoffset,
1057 mach_vm_size_t size,
1058 IOOptionBits options,
1059 mach_vm_address_t * inaddr);
1060
1061 static IOReturn memoryReferenceSetPurgeable(
1062 IOMemoryReference * ref,
1063 IOOptionBits newState,
1064 IOOptionBits * oldState);
1065 static IOReturn memoryReferenceGetPageCounts(
1066 IOMemoryReference * ref,
1067 IOByteCount * residentPageCount,
1068 IOByteCount * dirtyPageCount);
1069#endif
1070
1071private:
1072
1073#ifndef __LP64__
1074 virtual void setPosition(IOByteCount position);
1075 virtual void mapIntoKernel(unsigned rangeIndex);
1076 virtual void unmapFromKernel();
1077#endif /* !__LP64__ */
1078
1079 // Internal
1080 OSData * _memoryEntries;
1081 unsigned int _pages;
1082 ppnum_t _highestPage;
1083 uint32_t __iomd_reservedA;
1084 uint32_t __iomd_reservedB;
1085
1086 IOLock * _prepareLock;
1087
1088public:
1089 /*
1090 * IOMemoryDescriptor required methods
1091 */
1092
1093 // Master initaliser
1094 virtual bool initWithOptions(void * buffers,
1095 UInt32 count,
1096 UInt32 offset,
1097 task_t task,
1098 IOOptionBits options,
1099 IOMapper * mapper = kIOMapperSystem) APPLE_KEXT_OVERRIDE;
1100
1101#ifndef __LP64__
1102 // Secondary initialisers
1103 virtual bool initWithAddress(void * address,
1104 IOByteCount withLength,
1105 IODirection withDirection) APPLE_KEXT_OVERRIDE APPLE_KEXT_DEPRECATED;
1106
1107 virtual bool initWithAddress(IOVirtualAddress address,
1108 IOByteCount withLength,
1109 IODirection withDirection,
1110 task_t withTask) APPLE_KEXT_OVERRIDE APPLE_KEXT_DEPRECATED;
1111
1112 virtual bool initWithPhysicalAddress(
1113 IOPhysicalAddress address,
1114 IOByteCount withLength,
1115 IODirection withDirection ) APPLE_KEXT_OVERRIDE APPLE_KEXT_DEPRECATED;
1116
1117 virtual bool initWithRanges( IOVirtualRange * ranges,
1118 UInt32 withCount,
1119 IODirection withDirection,
1120 task_t withTask,
1121 bool asReference = false) APPLE_KEXT_OVERRIDE APPLE_KEXT_DEPRECATED;
1122
1123 virtual bool initWithPhysicalRanges(IOPhysicalRange * ranges,
1124 UInt32 withCount,
1125 IODirection withDirection,
1126 bool asReference = false) APPLE_KEXT_OVERRIDE APPLE_KEXT_DEPRECATED;
1127
1128 virtual addr64_t getPhysicalSegment64( IOByteCount offset,
1129 IOByteCount * length ) APPLE_KEXT_OVERRIDE APPLE_KEXT_DEPRECATED;
1130
1131 virtual IOPhysicalAddress getPhysicalSegment(IOByteCount offset,
1132 IOByteCount * length) APPLE_KEXT_OVERRIDE;
1133
1134 virtual IOPhysicalAddress getSourceSegment(IOByteCount offset,
1135 IOByteCount * length) APPLE_KEXT_OVERRIDE APPLE_KEXT_DEPRECATED;
1136
1137 virtual void * getVirtualSegment(IOByteCount offset,
1138 IOByteCount * length) APPLE_KEXT_OVERRIDE APPLE_KEXT_DEPRECATED;
1139#endif /* !__LP64__ */
1140
1141 virtual IOReturn setPurgeable( IOOptionBits newState,
1142 IOOptionBits * oldState ) APPLE_KEXT_OVERRIDE;
1143
1144 virtual addr64_t getPhysicalSegment( IOByteCount offset,
1145 IOByteCount * length,
1146#ifdef __LP64__
1147 IOOptionBits options = 0 ) APPLE_KEXT_OVERRIDE;
1148#else /* !__LP64__ */
1149 IOOptionBits options ) APPLE_KEXT_OVERRIDE;
1150#endif /* !__LP64__ */
1151
1152 virtual IOReturn prepare(IODirection forDirection = kIODirectionNone) APPLE_KEXT_OVERRIDE;
1153
1154 virtual IOReturn complete(IODirection forDirection = kIODirectionNone) APPLE_KEXT_OVERRIDE;
1155
1156 virtual IOReturn doMap(
1157 vm_map_t addressMap,
1158 IOVirtualAddress * atAddress,
1159 IOOptionBits options,
1160 IOByteCount sourceOffset = 0,
1161 IOByteCount length = 0 ) APPLE_KEXT_OVERRIDE;
1162
1163 virtual IOReturn doUnmap(
1164 vm_map_t addressMap,
1165 IOVirtualAddress logical,
1166 IOByteCount length ) APPLE_KEXT_OVERRIDE;
1167
1168 virtual bool serialize(OSSerialize *s) const APPLE_KEXT_OVERRIDE;
1169
1170 // Factory method for cloning a persistent IOMD, see IOMemoryDescriptor
1171 static IOMemoryDescriptor *
1172 withPersistentMemoryDescriptor(IOGeneralMemoryDescriptor *originalMD);
1173
1174};
1175
1176/* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */
1177
1178#ifdef __LP64__
1179mach_vm_address_t IOMemoryMap::getAddress()
1180{
1181 return (getVirtualAddress());
1182}
1183
1184mach_vm_size_t IOMemoryMap::getSize()
1185{
1186 return (getLength());
1187}
1188#else /* !__LP64__ */
1189#include <IOKit/IOSubMemoryDescriptor.h>
1190#endif /* !__LP64__ */
1191
1192/* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */
1193
1194#endif /* !_IOMEMORYDESCRIPTOR_H */
1195