1 | /* |
2 | * Copyright (c) 1998-2000 Apple Computer, Inc. All rights reserved. |
3 | * |
4 | * @APPLE_OSREFERENCE_LICENSE_HEADER_START@ |
5 | * |
6 | * This file contains Original Code and/or Modifications of Original Code |
7 | * as defined in and that are subject to the Apple Public Source License |
8 | * Version 2.0 (the 'License'). You may not use this file except in |
9 | * compliance with the License. The rights granted to you under the License |
10 | * may not be used to create, or enable the creation or redistribution of, |
11 | * unlawful or unlicensed copies of an Apple operating system, or to |
12 | * circumvent, violate, or enable the circumvention or violation of, any |
13 | * terms of an Apple operating system software license agreement. |
14 | * |
15 | * Please obtain a copy of the License at |
16 | * http://www.opensource.apple.com/apsl/ and read it before using this file. |
17 | * |
18 | * The Original Code and all software distributed under the License are |
19 | * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER |
20 | * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES, |
21 | * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY, |
22 | * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT. |
23 | * Please see the License for the specific language governing rights and |
24 | * limitations under the License. |
25 | * |
26 | * @APPLE_OSREFERENCE_LICENSE_HEADER_END@ |
27 | */ |
28 | |
29 | #include <IOKit/IOLib.h> |
30 | #include <IOKit/IOInterleavedMemoryDescriptor.h> |
31 | |
32 | #define super IOMemoryDescriptor |
33 | OSDefineMetaClassAndStructors(IOInterleavedMemoryDescriptor, IOMemoryDescriptor) |
34 | |
35 | IOInterleavedMemoryDescriptor * IOInterleavedMemoryDescriptor::withCapacity( |
36 | IOByteCount capacity, |
37 | IODirection direction ) |
38 | { |
39 | // |
40 | // Create a new IOInterleavedMemoryDescriptor. The "buffer" will be made up |
41 | // of several memory descriptors, that are to be chained end-to-end to make up |
42 | // a single memory descriptor. |
43 | // |
44 | |
45 | IOInterleavedMemoryDescriptor * me = new IOInterleavedMemoryDescriptor; |
46 | |
47 | if (me && !me->initWithCapacity( |
48 | /* capacity */ capacity, |
49 | /* direction */ direction )) { |
50 | me->release(); |
51 | me = NULL; |
52 | } |
53 | |
54 | return me; |
55 | } |
56 | |
57 | bool |
58 | IOInterleavedMemoryDescriptor::initWithCapacity( |
59 | IOByteCount capacity, |
60 | IODirection direction ) |
61 | { |
62 | // |
63 | // Initialize an IOInterleavedMemoryDescriptor. The "buffer" will be made up |
64 | // of several memory descriptors, that are to be chained end-to-end to make up |
65 | // a single memory descriptor. |
66 | // |
67 | |
68 | assert(capacity); |
69 | |
70 | // Ask our superclass' opinion. |
71 | if (super::init() == false) { |
72 | return false; |
73 | } |
74 | |
75 | // Initialize our minimal state. |
76 | |
77 | _flags = direction; |
78 | #ifndef __LP64__ |
79 | _direction = (IODirection) (_flags & kIOMemoryDirectionMask); |
80 | #endif /* !__LP64__ */ |
81 | _length = 0; |
82 | _mappings = NULL; |
83 | _tag = 0; |
84 | _descriptorCount = 0; |
85 | _descriptors = IONew(IOMemoryDescriptor *, capacity); |
86 | _descriptorOffsets = IONewData(IOByteCount, capacity); |
87 | _descriptorLengths = IONewData(IOByteCount, capacity); |
88 | |
89 | if ((_descriptors == NULL) || (_descriptorOffsets == NULL) || (_descriptorLengths == NULL)) { |
90 | return false; |
91 | } |
92 | |
93 | _descriptorCapacity = capacity; |
94 | |
95 | return true; |
96 | } |
97 | |
98 | void |
99 | IOInterleavedMemoryDescriptor::clearMemoryDescriptors( IODirection direction ) |
100 | { |
101 | UInt32 index; |
102 | |
103 | for (index = 0; index < _descriptorCount; index++) { |
104 | if (_descriptorPrepared) { |
105 | _descriptors[index]->complete(forDirection: getDirection()); |
106 | } |
107 | |
108 | _descriptors[index]->release(); |
109 | _descriptors[index] = NULL; |
110 | |
111 | _descriptorOffsets[index] = 0; |
112 | _descriptorLengths[index] = 0; |
113 | } |
114 | |
115 | if (direction != kIODirectionNone) { |
116 | _flags = (_flags & ~kIOMemoryDirectionMask) | direction; |
117 | #ifndef __LP64__ |
118 | _direction = (IODirection) (_flags & kIOMemoryDirectionMask); |
119 | #endif /* !__LP64__ */ |
120 | } |
121 | |
122 | _descriptorCount = 0; |
123 | _length = 0; |
124 | _mappings = NULL; |
125 | _tag = 0; |
126 | }; |
127 | |
128 | bool |
129 | IOInterleavedMemoryDescriptor::setMemoryDescriptor( |
130 | IOMemoryDescriptor * descriptor, |
131 | IOByteCount offset, |
132 | IOByteCount length ) |
133 | { |
134 | if (_descriptorPrepared || (_descriptorCount == _descriptorCapacity)) { |
135 | return false; |
136 | } |
137 | |
138 | if ((offset + length) > descriptor->getLength()) { |
139 | return false; |
140 | } |
141 | |
142 | // if ( descriptor->getDirection() != getDirection() ) |
143 | // return false; |
144 | |
145 | descriptor->retain(); |
146 | _descriptors[_descriptorCount] = descriptor; |
147 | _descriptorOffsets[_descriptorCount] = offset; |
148 | _descriptorLengths[_descriptorCount] = length; |
149 | |
150 | _descriptorCount++; |
151 | |
152 | _length += length; |
153 | |
154 | return true; |
155 | } |
156 | |
157 | void |
158 | IOInterleavedMemoryDescriptor::free() |
159 | { |
160 | // |
161 | // Free all of this object's outstanding resources. |
162 | // |
163 | |
164 | if (_descriptors) { |
165 | for (unsigned index = 0; index < _descriptorCount; index++) { |
166 | _descriptors[index]->release(); |
167 | } |
168 | |
169 | if (_descriptors != NULL) { |
170 | IODelete(_descriptors, IOMemoryDescriptor *, _descriptorCapacity); |
171 | } |
172 | |
173 | if (_descriptorOffsets != NULL) { |
174 | IODeleteData(_descriptorOffsets, IOByteCount, _descriptorCapacity); |
175 | } |
176 | |
177 | if (_descriptorLengths != NULL) { |
178 | IODeleteData(_descriptorLengths, IOByteCount, _descriptorCapacity); |
179 | } |
180 | } |
181 | |
182 | super::free(); |
183 | } |
184 | |
185 | IOReturn |
186 | IOInterleavedMemoryDescriptor::prepare(IODirection forDirection) |
187 | { |
188 | // |
189 | // Prepare the memory for an I/O transfer. |
190 | // |
191 | // This involves paging in the memory and wiring it down for the duration |
192 | // of the transfer. The complete() method finishes the processing of the |
193 | // memory after the I/O transfer finishes. |
194 | // |
195 | |
196 | unsigned index; |
197 | IOReturn status = kIOReturnSuccess; |
198 | IOReturn statusUndo; |
199 | |
200 | if (forDirection == kIODirectionNone) { |
201 | forDirection = getDirection(); |
202 | } |
203 | |
204 | for (index = 0; index < _descriptorCount; index++) { |
205 | status = _descriptors[index]->prepare(forDirection); |
206 | if (status != kIOReturnSuccess) { |
207 | break; |
208 | } |
209 | } |
210 | |
211 | if (status != kIOReturnSuccess) { |
212 | for (unsigned indexUndo = 0; indexUndo < index; indexUndo++) { |
213 | statusUndo = _descriptors[index]->complete(forDirection); |
214 | assert(statusUndo == kIOReturnSuccess); |
215 | } |
216 | } |
217 | |
218 | if (status == kIOReturnSuccess) { |
219 | _descriptorPrepared = true; |
220 | } |
221 | |
222 | return status; |
223 | } |
224 | |
225 | IOReturn |
226 | IOInterleavedMemoryDescriptor::complete(IODirection forDirection) |
227 | { |
228 | // |
229 | // Complete processing of the memory after an I/O transfer finishes. |
230 | // |
231 | // This method shouldn't be called unless a prepare() was previously issued; |
232 | // the prepare() and complete() must occur in pairs, before and after an I/O |
233 | // transfer. |
234 | // |
235 | |
236 | IOReturn status; |
237 | IOReturn statusFinal = kIOReturnSuccess; |
238 | |
239 | if (forDirection == kIODirectionNone) { |
240 | forDirection = getDirection(); |
241 | } |
242 | |
243 | for (unsigned index = 0; index < _descriptorCount; index++) { |
244 | status = _descriptors[index]->complete(forDirection); |
245 | if (status != kIOReturnSuccess) { |
246 | statusFinal = status; |
247 | } |
248 | assert(status == kIOReturnSuccess); |
249 | } |
250 | |
251 | _descriptorPrepared = false; |
252 | |
253 | return statusFinal; |
254 | } |
255 | |
256 | addr64_t |
257 | IOInterleavedMemoryDescriptor::getPhysicalSegment( |
258 | IOByteCount offset, |
259 | IOByteCount * length, |
260 | IOOptionBits options ) |
261 | { |
262 | // |
263 | // This method returns the physical address of the byte at the given offset |
264 | // into the memory, and optionally the length of the physically contiguous |
265 | // segment from that offset. |
266 | // |
267 | |
268 | addr64_t pa; |
269 | |
270 | assert(offset <= _length); |
271 | |
272 | for (unsigned index = 0; index < _descriptorCount; index++) { |
273 | if (offset < _descriptorLengths[index]) { |
274 | pa = _descriptors[index]->getPhysicalSegment(offset: _descriptorOffsets[index] + offset, length, options); |
275 | if ((_descriptorLengths[index] - offset) < *length) { |
276 | *length = _descriptorLengths[index] - offset; |
277 | } |
278 | return pa; |
279 | } |
280 | offset -= _descriptorLengths[index]; |
281 | } |
282 | |
283 | if (length) { |
284 | *length = 0; |
285 | } |
286 | |
287 | return 0; |
288 | } |
289 | |