1 | /* |
2 | * Copyright (c) 1998-2000 Apple Computer, Inc. All rights reserved. |
3 | * |
4 | * @APPLE_OSREFERENCE_LICENSE_HEADER_START@ |
5 | * |
6 | * This file contains Original Code and/or Modifications of Original Code |
7 | * as defined in and that are subject to the Apple Public Source License |
8 | * Version 2.0 (the 'License'). You may not use this file except in |
9 | * compliance with the License. The rights granted to you under the License |
10 | * may not be used to create, or enable the creation or redistribution of, |
11 | * unlawful or unlicensed copies of an Apple operating system, or to |
12 | * circumvent, violate, or enable the circumvention or violation of, any |
13 | * terms of an Apple operating system software license agreement. |
14 | * |
15 | * Please obtain a copy of the License at |
16 | * http://www.opensource.apple.com/apsl/ and read it before using this file. |
17 | * |
18 | * The Original Code and all software distributed under the License are |
19 | * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER |
20 | * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES, |
21 | * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY, |
22 | * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT. |
23 | * Please see the License for the specific language governing rights and |
24 | * limitations under the License. |
25 | * |
26 | * @APPLE_OSREFERENCE_LICENSE_HEADER_END@ |
27 | */ |
28 | |
29 | #include <IOKit/IOLib.h> |
30 | #include <IOKit/IOMultiMemoryDescriptor.h> |
31 | |
32 | #define super IOMemoryDescriptor |
33 | OSDefineMetaClassAndStructors(IOMultiMemoryDescriptor, IOMemoryDescriptor) |
34 | |
35 | IOMultiMemoryDescriptor * IOMultiMemoryDescriptor::withDescriptors( |
36 | IOMemoryDescriptor ** descriptors, |
37 | UInt32 withCount, |
38 | IODirection withDirection, |
39 | bool asReference ) |
40 | { |
41 | // |
42 | // Create a new IOMultiMemoryDescriptor. The "buffer" is made up of several |
43 | // memory descriptors, that are to be chained end-to-end to make up a single |
44 | // memory descriptor. |
45 | // |
46 | // Passing the ranges as a reference will avoid an extra allocation. |
47 | // |
48 | |
49 | IOMultiMemoryDescriptor * me = new IOMultiMemoryDescriptor; |
50 | |
51 | if ( me && me->initWithDescriptors( |
52 | /* descriptors */ descriptors, |
53 | /* withCount */ withCount, |
54 | /* withDirection */ withDirection, |
55 | /* asReference */ asReference ) == false ) |
56 | { |
57 | me->release(); |
58 | me = 0; |
59 | } |
60 | |
61 | return me; |
62 | } |
63 | |
64 | bool IOMultiMemoryDescriptor::initWithDescriptors( |
65 | IOMemoryDescriptor ** descriptors, |
66 | UInt32 withCount, |
67 | IODirection withDirection, |
68 | bool asReference ) |
69 | { |
70 | unsigned index; |
71 | IOOptionBits copyFlags; |
72 | // |
73 | // Initialize an IOMultiMemoryDescriptor. The "buffer" is made up of several |
74 | // memory descriptors, that are to be chained end-to-end to make up a single |
75 | // memory descriptor. |
76 | // |
77 | // Passing the ranges as a reference will avoid an extra allocation. |
78 | // |
79 | |
80 | assert(descriptors); |
81 | |
82 | // Release existing descriptors, if any |
83 | if ( _descriptors ) |
84 | { |
85 | for ( unsigned index = 0; index < _descriptorsCount; index++ ) |
86 | _descriptors[index]->release(); |
87 | |
88 | if ( _descriptorsIsAllocated ) |
89 | IODelete(_descriptors, IOMemoryDescriptor *, _descriptorsCount); |
90 | } else { |
91 | // Ask our superclass' opinion. |
92 | if ( super::init() == false ) return false; |
93 | } |
94 | |
95 | // Initialize our minimal state. |
96 | |
97 | _descriptors = 0; |
98 | _descriptorsCount = withCount; |
99 | _descriptorsIsAllocated = asReference ? false : true; |
100 | _flags = withDirection; |
101 | #ifndef __LP64__ |
102 | _direction = (IODirection) (_flags & kIOMemoryDirectionMask); |
103 | #endif /* !__LP64__ */ |
104 | _length = 0; |
105 | _mappings = 0; |
106 | _tag = 0; |
107 | |
108 | if ( asReference ) |
109 | { |
110 | _descriptors = descriptors; |
111 | } |
112 | else |
113 | { |
114 | _descriptors = IONew(IOMemoryDescriptor *, withCount); |
115 | if ( _descriptors == 0 ) return false; |
116 | |
117 | bcopy( /* from */ descriptors, |
118 | /* to */ _descriptors, |
119 | /* bytes */ withCount * sizeof(IOMemoryDescriptor *) ); |
120 | } |
121 | |
122 | for ( index = 0; index < withCount; index++ ) |
123 | { |
124 | descriptors[index]->retain(); |
125 | _length += descriptors[index]->getLength(); |
126 | if ( _tag == 0 ) _tag = descriptors[index]->getTag(); |
127 | assert(descriptors[index]->getDirection() == |
128 | (withDirection & kIOMemoryDirectionMask)); |
129 | } |
130 | |
131 | enum { kCopyFlags = kIOMemoryBufferPageable }; |
132 | copyFlags = 0; |
133 | for ( index = 0; index < withCount; index++ ) |
134 | { |
135 | if (!index) copyFlags = (kCopyFlags & descriptors[index]->_flags); |
136 | else if (copyFlags != (kCopyFlags & descriptors[index]->_flags)) break; |
137 | } |
138 | if (index < withCount) return (false); |
139 | _flags |= copyFlags; |
140 | |
141 | return true; |
142 | } |
143 | |
144 | void IOMultiMemoryDescriptor::free() |
145 | { |
146 | // |
147 | // Free all of this object's outstanding resources. |
148 | // |
149 | |
150 | if ( _descriptors ) |
151 | { |
152 | for ( unsigned index = 0; index < _descriptorsCount; index++ ) |
153 | _descriptors[index]->release(); |
154 | |
155 | if ( _descriptorsIsAllocated ) |
156 | IODelete(_descriptors, IOMemoryDescriptor *, _descriptorsCount); |
157 | } |
158 | |
159 | super::free(); |
160 | } |
161 | |
162 | IOReturn IOMultiMemoryDescriptor::prepare(IODirection forDirection) |
163 | { |
164 | // |
165 | // Prepare the memory for an I/O transfer. |
166 | // |
167 | // This involves paging in the memory and wiring it down for the duration |
168 | // of the transfer. The complete() method finishes the processing of the |
169 | // memory after the I/O transfer finishes. |
170 | // |
171 | |
172 | unsigned index; |
173 | IOReturn status = kIOReturnInternalError; |
174 | IOReturn statusUndo; |
175 | |
176 | if ( forDirection == kIODirectionNone ) |
177 | { |
178 | forDirection = getDirection(); |
179 | } |
180 | |
181 | for ( index = 0; index < _descriptorsCount; index++ ) |
182 | { |
183 | status = _descriptors[index]->prepare(forDirection); |
184 | if ( status != kIOReturnSuccess ) break; |
185 | } |
186 | |
187 | if ( status != kIOReturnSuccess ) |
188 | { |
189 | for ( unsigned indexUndo = 0; indexUndo < index; indexUndo++ ) |
190 | { |
191 | statusUndo = _descriptors[indexUndo]->complete(forDirection); |
192 | assert(statusUndo == kIOReturnSuccess); |
193 | } |
194 | } |
195 | |
196 | return status; |
197 | } |
198 | |
199 | IOReturn IOMultiMemoryDescriptor::complete(IODirection forDirection) |
200 | { |
201 | // |
202 | // Complete processing of the memory after an I/O transfer finishes. |
203 | // |
204 | // This method shouldn't be called unless a prepare() was previously issued; |
205 | // the prepare() and complete() must occur in pairs, before and after an I/O |
206 | // transfer. |
207 | // |
208 | |
209 | IOReturn status; |
210 | IOReturn statusFinal = kIOReturnSuccess; |
211 | |
212 | if ( forDirection == kIODirectionNone ) |
213 | { |
214 | forDirection = getDirection(); |
215 | } |
216 | |
217 | for ( unsigned index = 0; index < _descriptorsCount; index++ ) |
218 | { |
219 | status = _descriptors[index]->complete(forDirection); |
220 | if ( status != kIOReturnSuccess ) statusFinal = status; |
221 | assert(status == kIOReturnSuccess); |
222 | } |
223 | |
224 | return statusFinal; |
225 | } |
226 | |
227 | addr64_t IOMultiMemoryDescriptor::getPhysicalSegment(IOByteCount offset, |
228 | IOByteCount * length, |
229 | IOOptionBits options) |
230 | { |
231 | // |
232 | // This method returns the physical address of the byte at the given offset |
233 | // into the memory, and optionally the length of the physically contiguous |
234 | // segment from that offset. |
235 | // |
236 | |
237 | assert(offset <= _length); |
238 | |
239 | for ( unsigned index = 0; index < _descriptorsCount; index++ ) |
240 | { |
241 | if ( offset < _descriptors[index]->getLength() ) |
242 | { |
243 | return _descriptors[index]->getPhysicalSegment(offset, length, options); |
244 | } |
245 | offset -= _descriptors[index]->getLength(); |
246 | } |
247 | |
248 | if ( length ) *length = 0; |
249 | |
250 | return 0; |
251 | } |
252 | |
253 | #include "IOKitKernelInternal.h" |
254 | |
255 | IOReturn IOMultiMemoryDescriptor::doMap(vm_map_t __addressMap, |
256 | IOVirtualAddress * __address, |
257 | IOOptionBits options, |
258 | IOByteCount __offset, |
259 | IOByteCount __length) |
260 | { |
261 | IOMemoryMap * mapping = (IOMemoryMap *) *__address; |
262 | vm_map_t map = mapping->fAddressMap; |
263 | mach_vm_size_t offset = mapping->fOffset; |
264 | mach_vm_size_t length = mapping->fLength; |
265 | mach_vm_address_t address = mapping->fAddress; |
266 | |
267 | kern_return_t err; |
268 | IOOptionBits subOptions; |
269 | mach_vm_size_t mapOffset; |
270 | mach_vm_size_t bytesRemaining, chunk; |
271 | mach_vm_address_t nextAddress; |
272 | IOMemoryDescriptorMapAllocRef ref; |
273 | vm_prot_t prot; |
274 | |
275 | do |
276 | { |
277 | prot = VM_PROT_READ; |
278 | if (!(kIOMapReadOnly & options)) prot |= VM_PROT_WRITE; |
279 | |
280 | if (kIOMapOverwrite & options) |
281 | { |
282 | if ((map == kernel_map) && (kIOMemoryBufferPageable & _flags)) |
283 | { |
284 | map = IOPageableMapForAddress(address); |
285 | } |
286 | err = KERN_SUCCESS; |
287 | } |
288 | else |
289 | { |
290 | ref.map = map; |
291 | ref.tag = IOMemoryTag(map); |
292 | ref.options = options; |
293 | ref.size = length; |
294 | ref.prot = prot; |
295 | if (options & kIOMapAnywhere) |
296 | // vm_map looks for addresses above here, even when VM_FLAGS_ANYWHERE |
297 | ref.mapped = 0; |
298 | else |
299 | ref.mapped = mapping->fAddress; |
300 | |
301 | if ((ref.map == kernel_map) && (kIOMemoryBufferPageable & _flags)) |
302 | err = IOIteratePageableMaps(ref.size, &IOMemoryDescriptorMapAlloc, &ref); |
303 | else |
304 | err = IOMemoryDescriptorMapAlloc(ref.map, &ref); |
305 | |
306 | if (KERN_SUCCESS != err) break; |
307 | |
308 | address = ref.mapped; |
309 | mapping->fAddress = address; |
310 | } |
311 | |
312 | mapOffset = offset; |
313 | bytesRemaining = length; |
314 | nextAddress = address; |
315 | assert(mapOffset <= _length); |
316 | subOptions = (options & ~kIOMapAnywhere) | kIOMapOverwrite; |
317 | |
318 | for (unsigned index = 0; bytesRemaining && (index < _descriptorsCount); index++) |
319 | { |
320 | chunk = _descriptors[index]->getLength(); |
321 | if (mapOffset >= chunk) |
322 | { |
323 | mapOffset -= chunk; |
324 | continue; |
325 | } |
326 | chunk -= mapOffset; |
327 | if (chunk > bytesRemaining) chunk = bytesRemaining; |
328 | IOMemoryMap * subMap; |
329 | subMap = _descriptors[index]->createMappingInTask(mapping->fAddressTask, nextAddress, subOptions, mapOffset, chunk ); |
330 | if (!subMap) break; |
331 | subMap->release(); // kIOMapOverwrite means it will not deallocate |
332 | |
333 | bytesRemaining -= chunk; |
334 | nextAddress += chunk; |
335 | mapOffset = 0; |
336 | } |
337 | if (bytesRemaining) err = kIOReturnUnderrun; |
338 | } |
339 | while (false); |
340 | |
341 | if (kIOReturnSuccess == err) |
342 | { |
343 | #if IOTRACKING |
344 | IOTrackingAddUser(gIOMapTracking, &mapping->fTracking, mapping->fLength); |
345 | #endif |
346 | } |
347 | |
348 | return (err); |
349 | } |
350 | |
351 | IOReturn IOMultiMemoryDescriptor::setPurgeable( IOOptionBits newState, |
352 | IOOptionBits * oldState ) |
353 | { |
354 | IOReturn err; |
355 | IOOptionBits totalState, state; |
356 | |
357 | totalState = kIOMemoryPurgeableNonVolatile; |
358 | err = kIOReturnSuccess; |
359 | for (unsigned index = 0; index < _descriptorsCount; index++) |
360 | { |
361 | err = _descriptors[index]->setPurgeable(newState, &state); |
362 | if (kIOReturnSuccess != err) break; |
363 | |
364 | if (kIOMemoryPurgeableEmpty == state) totalState = kIOMemoryPurgeableEmpty; |
365 | else if (kIOMemoryPurgeableEmpty == totalState) continue; |
366 | else if (kIOMemoryPurgeableVolatile == totalState) continue; |
367 | else if (kIOMemoryPurgeableVolatile == state) totalState = kIOMemoryPurgeableVolatile; |
368 | else totalState = kIOMemoryPurgeableNonVolatile; |
369 | } |
370 | if (oldState) *oldState = totalState; |
371 | |
372 | return (err); |
373 | } |
374 | |
375 | IOReturn IOMultiMemoryDescriptor::getPageCounts(IOByteCount * pResidentPageCount, |
376 | IOByteCount * pDirtyPageCount) |
377 | { |
378 | IOReturn err; |
379 | IOByteCount totalResidentPageCount, totalDirtyPageCount; |
380 | IOByteCount residentPageCount, dirtyPageCount; |
381 | |
382 | err = kIOReturnSuccess; |
383 | totalResidentPageCount = totalDirtyPageCount = 0; |
384 | for (unsigned index = 0; index < _descriptorsCount; index++) |
385 | { |
386 | err = _descriptors[index]->getPageCounts(&residentPageCount, &dirtyPageCount); |
387 | if (kIOReturnSuccess != err) break; |
388 | totalResidentPageCount += residentPageCount; |
389 | totalDirtyPageCount += dirtyPageCount; |
390 | } |
391 | |
392 | if (pResidentPageCount) *pResidentPageCount = totalResidentPageCount; |
393 | if (pDirtyPageCount) *pDirtyPageCount = totalDirtyPageCount; |
394 | |
395 | return (err); |
396 | } |
397 | |
398 | uint64_t IOMultiMemoryDescriptor::getPreparationID( void ) |
399 | { |
400 | |
401 | if (!super::getKernelReserved()) |
402 | { |
403 | return (kIOPreparationIDUnsupported); |
404 | } |
405 | |
406 | for (unsigned index = 0; index < _descriptorsCount; index++) |
407 | { |
408 | uint64_t preparationID = _descriptors[index]->getPreparationID(); |
409 | |
410 | if ( preparationID == kIOPreparationIDUnsupported ) |
411 | { |
412 | return (kIOPreparationIDUnsupported); |
413 | } |
414 | |
415 | if ( preparationID == kIOPreparationIDUnprepared ) |
416 | { |
417 | return (kIOPreparationIDUnprepared); |
418 | } |
419 | } |
420 | |
421 | super::setPreparationID(); |
422 | |
423 | return (super::getPreparationID()); |
424 | } |
425 | |