1/*
2 * Copyright (c) 1998-2000 Apple Computer, Inc. All rights reserved.
3 *
4 * @APPLE_OSREFERENCE_LICENSE_HEADER_START@
5 *
6 * This file contains Original Code and/or Modifications of Original Code
7 * as defined in and that are subject to the Apple Public Source License
8 * Version 2.0 (the 'License'). You may not use this file except in
9 * compliance with the License. The rights granted to you under the License
10 * may not be used to create, or enable the creation or redistribution of,
11 * unlawful or unlicensed copies of an Apple operating system, or to
12 * circumvent, violate, or enable the circumvention or violation of, any
13 * terms of an Apple operating system software license agreement.
14 *
15 * Please obtain a copy of the License at
16 * http://www.opensource.apple.com/apsl/ and read it before using this file.
17 *
18 * The Original Code and all software distributed under the License are
19 * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER
20 * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES,
21 * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY,
22 * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT.
23 * Please see the License for the specific language governing rights and
24 * limitations under the License.
25 *
26 * @APPLE_OSREFERENCE_LICENSE_HEADER_END@
27 */
28
29#include <IOKit/IOLib.h>
30#include <IOKit/IOMultiMemoryDescriptor.h>
31
32#define super IOMemoryDescriptor
33OSDefineMetaClassAndStructors(IOMultiMemoryDescriptor, IOMemoryDescriptor)
34
35IOMultiMemoryDescriptor * IOMultiMemoryDescriptor::withDescriptors(
36 IOMemoryDescriptor * *descriptors,
37 UInt32 withCount,
38 IODirection withDirection,
39 bool asReference )
40{
41 //
42 // Create a new IOMultiMemoryDescriptor. The "buffer" is made up of several
43 // memory descriptors, that are to be chained end-to-end to make up a single
44 // memory descriptor.
45 //
46 // Passing the ranges as a reference will avoid an extra allocation.
47 //
48
49 IOMultiMemoryDescriptor * me = new IOMultiMemoryDescriptor;
50
51 if (me && me->initWithDescriptors(
52 /* descriptors */ descriptors,
53 /* withCount */ withCount,
54 /* withDirection */ withDirection,
55 /* asReference */ asReference ) == false) {
56 me->release();
57 me = NULL;
58 }
59
60 return me;
61}
62
63bool
64IOMultiMemoryDescriptor::initWithDescriptors(
65 IOMemoryDescriptor ** descriptors,
66 UInt32 withCount,
67 IODirection withDirection,
68 bool asReference )
69{
70 unsigned index;
71 IOOptionBits copyFlags;
72 //
73 // Initialize an IOMultiMemoryDescriptor. The "buffer" is made up of several
74 // memory descriptors, that are to be chained end-to-end to make up a single
75 // memory descriptor.
76 //
77 // Passing the ranges as a reference will avoid an extra allocation.
78 //
79
80 assert(descriptors);
81
82 // Release existing descriptors, if any
83 if (_descriptors) {
84 for (unsigned index = 0; index < _descriptorsCount; index++) {
85 _descriptors[index]->release();
86 }
87
88 if (_descriptorsIsAllocated) {
89 IODelete(_descriptors, IOMemoryDescriptor *, _descriptorsCount);
90 }
91 } else {
92 // Ask our superclass' opinion.
93 if (super::init() == false) {
94 return false;
95 }
96 }
97
98 // Initialize our minimal state.
99
100 _descriptors = NULL;
101 _descriptorsCount = withCount;
102 _descriptorsIsAllocated = asReference ? false : true;
103 _flags = withDirection;
104#ifndef __LP64__
105 _direction = (IODirection) (_flags & kIOMemoryDirectionMask);
106#endif /* !__LP64__ */
107 _length = 0;
108 _mappings = NULL;
109 _tag = 0;
110
111 if (asReference) {
112 _descriptors = descriptors;
113 } else {
114 _descriptors = IONew(IOMemoryDescriptor *, withCount);
115 if (_descriptors == NULL) {
116 return false;
117 }
118
119 bcopy( /* from */ src: descriptors,
120 /* to */ dst: _descriptors,
121 /* bytes */ n: withCount * sizeof(IOMemoryDescriptor *));
122 }
123
124 for (index = 0; index < withCount; index++) {
125 descriptors[index]->retain();
126 _length += descriptors[index]->getLength();
127 if (_tag == 0) {
128 _tag = descriptors[index]->getTag();
129 }
130 assert(descriptors[index]->getDirection() ==
131 (withDirection & kIOMemoryDirectionMask));
132 }
133
134 enum { kCopyFlags = kIOMemoryBufferPageable };
135 copyFlags = 0;
136 for (index = 0; index < withCount; index++) {
137 if (!index) {
138 copyFlags = (kCopyFlags & descriptors[index]->_flags);
139 } else if (copyFlags != (kCopyFlags & descriptors[index]->_flags)) {
140 break;
141 }
142 }
143 if (index < withCount) {
144 return false;
145 }
146 _flags |= copyFlags;
147
148 return true;
149}
150
151void
152IOMultiMemoryDescriptor::free()
153{
154 //
155 // Free all of this object's outstanding resources.
156 //
157
158 if (_descriptors) {
159 for (unsigned index = 0; index < _descriptorsCount; index++) {
160 _descriptors[index]->release();
161 }
162
163 if (_descriptorsIsAllocated) {
164 IODelete(_descriptors, IOMemoryDescriptor *, _descriptorsCount);
165 }
166 }
167
168 super::free();
169}
170
171IOReturn
172IOMultiMemoryDescriptor::prepare(IODirection forDirection)
173{
174 //
175 // Prepare the memory for an I/O transfer.
176 //
177 // This involves paging in the memory and wiring it down for the duration
178 // of the transfer. The complete() method finishes the processing of the
179 // memory after the I/O transfer finishes.
180 //
181
182 unsigned index;
183 IOReturn status = kIOReturnInternalError;
184 IOReturn statusUndo;
185
186 if (forDirection == kIODirectionNone) {
187 forDirection = getDirection();
188 }
189
190 for (index = 0; index < _descriptorsCount; index++) {
191 status = _descriptors[index]->prepare(forDirection);
192 if (status != kIOReturnSuccess) {
193 break;
194 }
195 }
196
197 if (status != kIOReturnSuccess) {
198 for (unsigned indexUndo = 0; indexUndo < index; indexUndo++) {
199 statusUndo = _descriptors[indexUndo]->complete(forDirection);
200 assert(statusUndo == kIOReturnSuccess);
201 }
202 }
203
204 return status;
205}
206
207IOReturn
208IOMultiMemoryDescriptor::complete(IODirection forDirection)
209{
210 //
211 // Complete processing of the memory after an I/O transfer finishes.
212 //
213 // This method shouldn't be called unless a prepare() was previously issued;
214 // the prepare() and complete() must occur in pairs, before and after an I/O
215 // transfer.
216 //
217
218 IOReturn status;
219 IOReturn statusFinal = kIOReturnSuccess;
220
221 if (forDirection == kIODirectionNone) {
222 forDirection = getDirection();
223 }
224
225 for (unsigned index = 0; index < _descriptorsCount; index++) {
226 status = _descriptors[index]->complete(forDirection);
227 if (status != kIOReturnSuccess) {
228 statusFinal = status;
229 }
230 assert(status == kIOReturnSuccess);
231 }
232
233 return statusFinal;
234}
235
236addr64_t
237IOMultiMemoryDescriptor::getPhysicalSegment(IOByteCount offset,
238 IOByteCount * length,
239 IOOptionBits options)
240{
241 //
242 // This method returns the physical address of the byte at the given offset
243 // into the memory, and optionally the length of the physically contiguous
244 // segment from that offset.
245 //
246
247 assert(offset <= _length);
248
249 for (unsigned index = 0; index < _descriptorsCount; index++) {
250 if (offset < _descriptors[index]->getLength()) {
251 return _descriptors[index]->getPhysicalSegment(offset, length, options);
252 }
253 offset -= _descriptors[index]->getLength();
254 }
255
256 if (length) {
257 *length = 0;
258 }
259
260 return 0;
261}
262
263#include "IOKitKernelInternal.h"
264
265IOReturn
266IOMultiMemoryDescriptor::doMap(vm_map_t __addressMap,
267 IOVirtualAddress * __address,
268 IOOptionBits options,
269 IOByteCount __offset,
270 IOByteCount __length)
271{
272 IOMemoryMap * mapping = (IOMemoryMap *) *__address;
273 vm_map_t map = mapping->fAddressMap;
274 mach_vm_size_t offset = mapping->fOffset;
275 mach_vm_size_t length = mapping->fLength;
276 mach_vm_address_t address = mapping->fAddress;
277
278 kern_return_t err;
279 IOOptionBits subOptions;
280 mach_vm_size_t mapOffset;
281 mach_vm_size_t bytesRemaining, chunk;
282 mach_vm_address_t nextAddress;
283 IOMemoryDescriptorMapAllocRef ref;
284 vm_prot_t prot;
285
286 do{
287 prot = VM_PROT_READ;
288 if (!(kIOMapReadOnly & options)) {
289 prot |= VM_PROT_WRITE;
290 }
291
292 if (kIOMapOverwrite & options) {
293 if ((map == kernel_map) && (kIOMemoryBufferPageable & _flags)) {
294 map = IOPageableMapForAddress(address);
295 }
296 err = KERN_SUCCESS;
297 } else {
298 ref.map = map;
299 ref.tag = IOMemoryTag(map);
300 ref.options = options;
301 ref.size = length;
302 ref.prot = prot;
303 if (options & kIOMapAnywhere) {
304 // vm_map looks for addresses above here, even when VM_FLAGS_ANYWHERE
305 ref.mapped = 0;
306 } else {
307 ref.mapped = mapping->fAddress;
308 }
309
310 if ((ref.map == kernel_map) && (kIOMemoryBufferPageable & _flags)) {
311 err = IOIteratePageableMaps(size: ref.size, callback: &IOMemoryDescriptorMapAlloc, ref: &ref);
312 } else {
313 err = IOMemoryDescriptorMapAlloc(map: ref.map, ref: &ref);
314 }
315
316 if (KERN_SUCCESS != err) {
317 break;
318 }
319
320 address = ref.mapped;
321 mapping->fAddress = address;
322 }
323
324 mapOffset = offset;
325 bytesRemaining = length;
326 nextAddress = address;
327 assert(mapOffset <= _length);
328 subOptions = (options & ~kIOMapAnywhere) | kIOMapOverwrite;
329
330 for (unsigned index = 0; bytesRemaining && (index < _descriptorsCount); index++) {
331 chunk = _descriptors[index]->getLength();
332 if (mapOffset >= chunk) {
333 mapOffset -= chunk;
334 continue;
335 }
336 chunk -= mapOffset;
337 if (chunk > bytesRemaining) {
338 chunk = bytesRemaining;
339 }
340 IOMemoryMap * subMap;
341 subMap = _descriptors[index]->createMappingInTask(intoTask: mapping->fAddressTask, atAddress: nextAddress, options: subOptions, offset: mapOffset, length: chunk );
342 if (!subMap) {
343 break;
344 }
345 subMap->release(); // kIOMapOverwrite means it will not deallocate
346
347 bytesRemaining -= chunk;
348 nextAddress += chunk;
349 mapOffset = 0;
350 }
351 if (bytesRemaining) {
352 err = kIOReturnUnderrun;
353 }
354 }while (false);
355
356 if (kIOReturnSuccess == err) {
357#if IOTRACKING
358 IOTrackingAddUser(gIOMapTracking, &mapping->fTracking, mapping->fLength);
359#endif
360 }
361
362 return err;
363}
364
365IOReturn
366IOMultiMemoryDescriptor::setPurgeable( IOOptionBits newState,
367 IOOptionBits * oldState )
368{
369 IOReturn err;
370 IOOptionBits totalState, state;
371
372 totalState = kIOMemoryPurgeableNonVolatile;
373 err = kIOReturnSuccess;
374 for (unsigned index = 0; index < _descriptorsCount; index++) {
375 err = _descriptors[index]->setPurgeable(newState, oldState: &state);
376 if (kIOReturnSuccess != err) {
377 break;
378 }
379
380 if (kIOMemoryPurgeableEmpty == state) {
381 totalState = kIOMemoryPurgeableEmpty;
382 } else if (kIOMemoryPurgeableEmpty == totalState) {
383 continue;
384 } else if (kIOMemoryPurgeableVolatile == totalState) {
385 continue;
386 } else if (kIOMemoryPurgeableVolatile == state) {
387 totalState = kIOMemoryPurgeableVolatile;
388 } else {
389 totalState = kIOMemoryPurgeableNonVolatile;
390 }
391 }
392 if (oldState) {
393 *oldState = totalState;
394 }
395
396 return err;
397}
398
399IOReturn
400IOMultiMemoryDescriptor::setOwnership( task_t newOwner,
401 int newLedgerTag,
402 IOOptionBits newLedgerOptions )
403{
404 IOReturn err;
405
406 if (iokit_iomd_setownership_enabled == FALSE) {
407 return kIOReturnUnsupported;
408 }
409
410 err = kIOReturnSuccess;
411 for (unsigned index = 0; index < _descriptorsCount; index++) {
412 err = _descriptors[index]->setOwnership(newOwner, newLedgerTag, newLedgerOptions);
413 if (kIOReturnSuccess != err) {
414 break;
415 }
416 }
417
418 return err;
419}
420
421IOReturn
422IOMultiMemoryDescriptor::getPageCounts(IOByteCount * pResidentPageCount,
423 IOByteCount * pDirtyPageCount)
424{
425 IOReturn err;
426 IOByteCount totalResidentPageCount, totalDirtyPageCount;
427 IOByteCount residentPageCount, dirtyPageCount;
428
429 err = kIOReturnSuccess;
430 totalResidentPageCount = totalDirtyPageCount = 0;
431 for (unsigned index = 0; index < _descriptorsCount; index++) {
432 err = _descriptors[index]->getPageCounts(residentPageCount: &residentPageCount, dirtyPageCount: &dirtyPageCount);
433 if (kIOReturnSuccess != err) {
434 break;
435 }
436 totalResidentPageCount += residentPageCount;
437 totalDirtyPageCount += dirtyPageCount;
438 }
439
440 if (pResidentPageCount) {
441 *pResidentPageCount = totalResidentPageCount;
442 }
443 if (pDirtyPageCount) {
444 *pDirtyPageCount = totalDirtyPageCount;
445 }
446
447 return err;
448}
449
450uint64_t
451IOMultiMemoryDescriptor::getPreparationID( void )
452{
453 if (!super::getKernelReserved()) {
454 return kIOPreparationIDUnsupported;
455 }
456
457 for (unsigned index = 0; index < _descriptorsCount; index++) {
458 uint64_t preparationID = _descriptors[index]->getPreparationID();
459
460 if (preparationID == kIOPreparationIDUnsupported) {
461 return kIOPreparationIDUnsupported;
462 }
463
464 if (preparationID == kIOPreparationIDUnprepared) {
465 return kIOPreparationIDUnprepared;
466 }
467 }
468
469 super::setPreparationID();
470
471 return super::getPreparationID();
472}
473