1 | /* |
2 | * Copyright (c) 1998-2016 Apple Inc. All rights reserved. |
3 | * |
4 | * @APPLE_OSREFERENCE_LICENSE_HEADER_START@ |
5 | * |
6 | * This file contains Original Code and/or Modifications of Original Code |
7 | * as defined in and that are subject to the Apple Public Source License |
8 | * Version 2.0 (the 'License'). You may not use this file except in |
9 | * compliance with the License. The rights granted to you under the License |
10 | * may not be used to create, or enable the creation or redistribution of, |
11 | * unlawful or unlicensed copies of an Apple operating system, or to |
12 | * circumvent, violate, or enable the circumvention or violation of, any |
13 | * terms of an Apple operating system software license agreement. |
14 | * |
15 | * Please obtain a copy of the License at |
16 | * http://www.opensource.apple.com/apsl/ and read it before using this file. |
17 | * |
18 | * The Original Code and all software distributed under the License are |
19 | * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER |
20 | * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES, |
21 | * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY, |
22 | * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT. |
23 | * Please see the License for the specific language governing rights and |
24 | * limitations under the License. |
25 | * |
26 | * @APPLE_OSREFERENCE_LICENSE_HEADER_END@ |
27 | */ |
28 | |
29 | |
30 | #include <sys/cdefs.h> |
31 | |
32 | #include <IOKit/assert.h> |
33 | #include <IOKit/system.h> |
34 | #include <IOKit/IOLib.h> |
35 | #include <IOKit/IOMemoryDescriptor.h> |
36 | #include <IOKit/IOMapper.h> |
37 | #include <IOKit/IODMACommand.h> |
38 | #include <IOKit/IOKitKeysPrivate.h> |
39 | |
40 | #include <IOKit/IOSubMemoryDescriptor.h> |
41 | #include <IOKit/IOMultiMemoryDescriptor.h> |
42 | |
43 | #include <IOKit/IOKitDebug.h> |
44 | #include <libkern/OSDebug.h> |
45 | #include <libkern/OSKextLibPrivate.h> |
46 | |
47 | #include "IOKitKernelInternal.h" |
48 | |
49 | #include <libkern/c++/OSContainers.h> |
50 | #include <libkern/c++/OSDictionary.h> |
51 | #include <libkern/c++/OSArray.h> |
52 | #include <libkern/c++/OSSymbol.h> |
53 | #include <libkern/c++/OSNumber.h> |
54 | #include <os/overflow.h> |
55 | |
56 | #include <sys/uio.h> |
57 | |
58 | __BEGIN_DECLS |
59 | #include <vm/pmap.h> |
60 | #include <vm/vm_pageout.h> |
61 | #include <mach/memory_object_types.h> |
62 | #include <device/device_port.h> |
63 | |
64 | #include <mach/vm_prot.h> |
65 | #include <mach/mach_vm.h> |
66 | #include <vm/vm_fault.h> |
67 | #include <vm/vm_protos.h> |
68 | |
69 | extern ppnum_t pmap_find_phys(pmap_t pmap, addr64_t va); |
70 | extern void ipc_port_release_send(ipc_port_t port); |
71 | |
72 | __END_DECLS |
73 | |
74 | #define kIOMapperWaitSystem ((IOMapper *) 1) |
75 | |
76 | static IOMapper * gIOSystemMapper = NULL; |
77 | |
78 | ppnum_t gIOLastPage; |
79 | |
80 | /* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */ |
81 | |
82 | OSDefineMetaClassAndAbstractStructors( IOMemoryDescriptor, OSObject ) |
83 | |
84 | #define super IOMemoryDescriptor |
85 | |
86 | OSDefineMetaClassAndStructors(IOGeneralMemoryDescriptor, IOMemoryDescriptor) |
87 | |
88 | /* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */ |
89 | |
90 | static IORecursiveLock * gIOMemoryLock; |
91 | |
92 | #define LOCK IORecursiveLockLock( gIOMemoryLock) |
93 | #define UNLOCK IORecursiveLockUnlock( gIOMemoryLock) |
94 | #define SLEEP IORecursiveLockSleep( gIOMemoryLock, (void *)this, THREAD_UNINT) |
95 | #define WAKEUP \ |
96 | IORecursiveLockWakeup( gIOMemoryLock, (void *)this, /* one-thread */ false) |
97 | |
98 | #if 0 |
99 | #define DEBG(fmt, args...) { kprintf(fmt, ## args); } |
100 | #else |
101 | #define DEBG(fmt, args...) {} |
102 | #endif |
103 | |
104 | /* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */ |
105 | |
106 | // Some data structures and accessor macros used by the initWithOptions |
107 | // Function |
108 | |
109 | enum ioPLBlockFlags { |
110 | kIOPLOnDevice = 0x00000001, |
111 | kIOPLExternUPL = 0x00000002, |
112 | }; |
113 | |
114 | struct IOMDPersistentInitData |
115 | { |
116 | const IOGeneralMemoryDescriptor * fMD; |
117 | IOMemoryReference * fMemRef; |
118 | }; |
119 | |
120 | struct ioPLBlock { |
121 | upl_t fIOPL; |
122 | vm_address_t fPageInfo; // Pointer to page list or index into it |
123 | uint32_t fIOMDOffset; // The offset of this iopl in descriptor |
124 | ppnum_t fMappedPage; // Page number of first page in this iopl |
125 | unsigned int fPageOffset; // Offset within first page of iopl |
126 | unsigned int fFlags; // Flags |
127 | }; |
128 | |
129 | enum { kMaxWireTags = 6 }; |
130 | |
131 | struct ioGMDData |
132 | { |
133 | IOMapper * fMapper; |
134 | uint64_t fDMAMapAlignment; |
135 | uint64_t fMappedBase; |
136 | uint64_t fMappedLength; |
137 | uint64_t fPreparationID; |
138 | #if IOTRACKING |
139 | IOTracking fWireTracking; |
140 | #endif /* IOTRACKING */ |
141 | unsigned int fPageCnt; |
142 | uint8_t fDMAMapNumAddressBits; |
143 | unsigned char fDiscontig:1; |
144 | unsigned char fCompletionError:1; |
145 | unsigned char fMappedBaseValid:1; |
146 | unsigned char _resv:3; |
147 | unsigned char fDMAAccess:2; |
148 | |
149 | /* variable length arrays */ |
150 | upl_page_info_t fPageList[1] |
151 | #if __LP64__ |
152 | // align fPageList as for ioPLBlock |
153 | __attribute__((aligned(sizeof(upl_t)))) |
154 | #endif |
155 | ; |
156 | ioPLBlock fBlocks[1]; |
157 | }; |
158 | |
159 | #define getDataP(osd) ((ioGMDData *) (osd)->getBytesNoCopy()) |
160 | #define getIOPLList(d) ((ioPLBlock *) (void *)&(d->fPageList[d->fPageCnt])) |
161 | #define getNumIOPL(osd, d) \ |
162 | (((osd)->getLength() - ((char *) getIOPLList(d) - (char *) d)) / sizeof(ioPLBlock)) |
163 | #define getPageList(d) (&(d->fPageList[0])) |
164 | #define computeDataSize(p, u) \ |
165 | (offsetof(ioGMDData, fPageList) + p * sizeof(upl_page_info_t) + u * sizeof(ioPLBlock)) |
166 | |
167 | enum { kIOMemoryHostOrRemote = kIOMemoryHostOnly | kIOMemoryRemote }; |
168 | |
169 | /* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */ |
170 | |
171 | #define next_page(a) ( trunc_page(a) + PAGE_SIZE ) |
172 | |
173 | extern "C" { |
174 | |
175 | kern_return_t device_data_action( |
176 | uintptr_t device_handle, |
177 | ipc_port_t , |
178 | vm_prot_t protection, |
179 | vm_object_offset_t offset, |
180 | vm_size_t size) |
181 | { |
182 | kern_return_t kr; |
183 | IOMemoryDescriptorReserved * ref = (IOMemoryDescriptorReserved *) device_handle; |
184 | IOMemoryDescriptor * memDesc; |
185 | |
186 | LOCK; |
187 | memDesc = ref->dp.memory; |
188 | if( memDesc) |
189 | { |
190 | memDesc->retain(); |
191 | kr = memDesc->handleFault(device_pager, offset, size); |
192 | memDesc->release(); |
193 | } |
194 | else |
195 | kr = KERN_ABORTED; |
196 | UNLOCK; |
197 | |
198 | return( kr ); |
199 | } |
200 | |
201 | kern_return_t device_close( |
202 | uintptr_t device_handle) |
203 | { |
204 | IOMemoryDescriptorReserved * ref = (IOMemoryDescriptorReserved *) device_handle; |
205 | |
206 | IODelete( ref, IOMemoryDescriptorReserved, 1 ); |
207 | |
208 | return( kIOReturnSuccess ); |
209 | } |
210 | }; // end extern "C" |
211 | |
212 | /* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */ |
213 | |
214 | // Note this inline function uses C++ reference arguments to return values |
215 | // This means that pointers are not passed and NULLs don't have to be |
216 | // checked for as a NULL reference is illegal. |
217 | static inline void |
218 | getAddrLenForInd(mach_vm_address_t &addr, mach_vm_size_t &len, // Output variables |
219 | UInt32 type, IOGeneralMemoryDescriptor::Ranges r, UInt32 ind) |
220 | { |
221 | assert(kIOMemoryTypeUIO == type |
222 | || kIOMemoryTypeVirtual == type || kIOMemoryTypeVirtual64 == type |
223 | || kIOMemoryTypePhysical == type || kIOMemoryTypePhysical64 == type); |
224 | if (kIOMemoryTypeUIO == type) { |
225 | user_size_t us; |
226 | user_addr_t ad; |
227 | uio_getiov((uio_t) r.uio, ind, &ad, &us); addr = ad; len = us; |
228 | } |
229 | #ifndef __LP64__ |
230 | else if ((kIOMemoryTypeVirtual64 == type) || (kIOMemoryTypePhysical64 == type)) { |
231 | IOAddressRange cur = r.v64[ind]; |
232 | addr = cur.address; |
233 | len = cur.length; |
234 | } |
235 | #endif /* !__LP64__ */ |
236 | else { |
237 | IOVirtualRange cur = r.v[ind]; |
238 | addr = cur.address; |
239 | len = cur.length; |
240 | } |
241 | } |
242 | |
243 | /* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */ |
244 | |
245 | static IOReturn |
246 | purgeableControlBits(IOOptionBits newState, vm_purgable_t * control, int * state) |
247 | { |
248 | IOReturn err = kIOReturnSuccess; |
249 | |
250 | *control = VM_PURGABLE_SET_STATE; |
251 | |
252 | enum { kIOMemoryPurgeableControlMask = 15 }; |
253 | |
254 | switch (kIOMemoryPurgeableControlMask & newState) |
255 | { |
256 | case kIOMemoryPurgeableKeepCurrent: |
257 | *control = VM_PURGABLE_GET_STATE; |
258 | break; |
259 | |
260 | case kIOMemoryPurgeableNonVolatile: |
261 | *state = VM_PURGABLE_NONVOLATILE; |
262 | break; |
263 | case kIOMemoryPurgeableVolatile: |
264 | *state = VM_PURGABLE_VOLATILE | (newState & ~kIOMemoryPurgeableControlMask); |
265 | break; |
266 | case kIOMemoryPurgeableEmpty: |
267 | *state = VM_PURGABLE_EMPTY | (newState & ~kIOMemoryPurgeableControlMask); |
268 | break; |
269 | default: |
270 | err = kIOReturnBadArgument; |
271 | break; |
272 | } |
273 | |
274 | if (*control == VM_PURGABLE_SET_STATE) { |
275 | // let VM know this call is from the kernel and is allowed to alter |
276 | // the volatility of the memory entry even if it was created with |
277 | // MAP_MEM_PURGABLE_KERNEL_ONLY |
278 | *control = VM_PURGABLE_SET_STATE_FROM_KERNEL; |
279 | } |
280 | |
281 | return (err); |
282 | } |
283 | |
284 | static IOReturn |
285 | purgeableStateBits(int * state) |
286 | { |
287 | IOReturn err = kIOReturnSuccess; |
288 | |
289 | switch (VM_PURGABLE_STATE_MASK & *state) |
290 | { |
291 | case VM_PURGABLE_NONVOLATILE: |
292 | *state = kIOMemoryPurgeableNonVolatile; |
293 | break; |
294 | case VM_PURGABLE_VOLATILE: |
295 | *state = kIOMemoryPurgeableVolatile; |
296 | break; |
297 | case VM_PURGABLE_EMPTY: |
298 | *state = kIOMemoryPurgeableEmpty; |
299 | break; |
300 | default: |
301 | *state = kIOMemoryPurgeableNonVolatile; |
302 | err = kIOReturnNotReady; |
303 | break; |
304 | } |
305 | return (err); |
306 | } |
307 | |
308 | |
309 | static vm_prot_t |
310 | vmProtForCacheMode(IOOptionBits cacheMode) |
311 | { |
312 | vm_prot_t prot = 0; |
313 | switch (cacheMode) |
314 | { |
315 | case kIOInhibitCache: |
316 | SET_MAP_MEM(MAP_MEM_IO, prot); |
317 | break; |
318 | |
319 | case kIOWriteThruCache: |
320 | SET_MAP_MEM(MAP_MEM_WTHRU, prot); |
321 | break; |
322 | |
323 | case kIOWriteCombineCache: |
324 | SET_MAP_MEM(MAP_MEM_WCOMB, prot); |
325 | break; |
326 | |
327 | case kIOCopybackCache: |
328 | SET_MAP_MEM(MAP_MEM_COPYBACK, prot); |
329 | break; |
330 | |
331 | case kIOCopybackInnerCache: |
332 | SET_MAP_MEM(MAP_MEM_INNERWBACK, prot); |
333 | break; |
334 | |
335 | case kIOPostedWrite: |
336 | SET_MAP_MEM(MAP_MEM_POSTED, prot); |
337 | break; |
338 | |
339 | case kIODefaultCache: |
340 | default: |
341 | SET_MAP_MEM(MAP_MEM_NOOP, prot); |
342 | break; |
343 | } |
344 | |
345 | return (prot); |
346 | } |
347 | |
348 | static unsigned int |
349 | (IOOptionBits cacheMode) |
350 | { |
351 | unsigned int = 0; |
352 | switch (cacheMode) |
353 | { |
354 | case kIOInhibitCache: |
355 | pagerFlags = DEVICE_PAGER_CACHE_INHIB | DEVICE_PAGER_COHERENT | DEVICE_PAGER_GUARDED; |
356 | break; |
357 | |
358 | case kIOWriteThruCache: |
359 | pagerFlags = DEVICE_PAGER_WRITE_THROUGH | DEVICE_PAGER_COHERENT | DEVICE_PAGER_GUARDED; |
360 | break; |
361 | |
362 | case kIOWriteCombineCache: |
363 | pagerFlags = DEVICE_PAGER_CACHE_INHIB | DEVICE_PAGER_COHERENT; |
364 | break; |
365 | |
366 | case kIOCopybackCache: |
367 | pagerFlags = DEVICE_PAGER_COHERENT; |
368 | break; |
369 | |
370 | case kIOCopybackInnerCache: |
371 | pagerFlags = DEVICE_PAGER_COHERENT; |
372 | break; |
373 | |
374 | case kIOPostedWrite: |
375 | pagerFlags = DEVICE_PAGER_CACHE_INHIB | DEVICE_PAGER_COHERENT | DEVICE_PAGER_GUARDED | DEVICE_PAGER_EARLY_ACK; |
376 | break; |
377 | |
378 | case kIODefaultCache: |
379 | default: |
380 | pagerFlags = -1U; |
381 | break; |
382 | } |
383 | return (pagerFlags); |
384 | } |
385 | |
386 | /* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */ |
387 | /* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */ |
388 | |
389 | struct IOMemoryEntry |
390 | { |
391 | ipc_port_t entry; |
392 | int64_t offset; |
393 | uint64_t size; |
394 | }; |
395 | |
396 | struct IOMemoryReference |
397 | { |
398 | volatile SInt32 refCount; |
399 | vm_prot_t prot; |
400 | uint32_t capacity; |
401 | uint32_t count; |
402 | struct IOMemoryReference * mapRef; |
403 | IOMemoryEntry entries[0]; |
404 | }; |
405 | |
406 | enum |
407 | { |
408 | kIOMemoryReferenceReuse = 0x00000001, |
409 | kIOMemoryReferenceWrite = 0x00000002, |
410 | kIOMemoryReferenceCOW = 0x00000004, |
411 | }; |
412 | |
413 | SInt32 gIOMemoryReferenceCount; |
414 | |
415 | IOMemoryReference * |
416 | IOGeneralMemoryDescriptor::memoryReferenceAlloc(uint32_t capacity, IOMemoryReference * realloc) |
417 | { |
418 | IOMemoryReference * ref; |
419 | size_t newSize, oldSize, copySize; |
420 | |
421 | newSize = (sizeof(IOMemoryReference) |
422 | - sizeof(ref->entries) |
423 | + capacity * sizeof(ref->entries[0])); |
424 | ref = (typeof(ref)) IOMalloc(newSize); |
425 | if (realloc) |
426 | { |
427 | oldSize = (sizeof(IOMemoryReference) |
428 | - sizeof(realloc->entries) |
429 | + realloc->capacity * sizeof(realloc->entries[0])); |
430 | copySize = oldSize; |
431 | if (copySize > newSize) copySize = newSize; |
432 | if (ref) bcopy(realloc, ref, copySize); |
433 | IOFree(realloc, oldSize); |
434 | } |
435 | else if (ref) |
436 | { |
437 | bzero(ref, sizeof(*ref)); |
438 | ref->refCount = 1; |
439 | OSIncrementAtomic(&gIOMemoryReferenceCount); |
440 | } |
441 | if (!ref) return (0); |
442 | ref->capacity = capacity; |
443 | return (ref); |
444 | } |
445 | |
446 | void |
447 | IOGeneralMemoryDescriptor::memoryReferenceFree(IOMemoryReference * ref) |
448 | { |
449 | IOMemoryEntry * entries; |
450 | size_t size; |
451 | |
452 | if (ref->mapRef) |
453 | { |
454 | memoryReferenceFree(ref->mapRef); |
455 | ref->mapRef = 0; |
456 | } |
457 | |
458 | entries = ref->entries + ref->count; |
459 | while (entries > &ref->entries[0]) |
460 | { |
461 | entries--; |
462 | ipc_port_release_send(entries->entry); |
463 | } |
464 | size = (sizeof(IOMemoryReference) |
465 | - sizeof(ref->entries) |
466 | + ref->capacity * sizeof(ref->entries[0])); |
467 | IOFree(ref, size); |
468 | |
469 | OSDecrementAtomic(&gIOMemoryReferenceCount); |
470 | } |
471 | |
472 | void |
473 | IOGeneralMemoryDescriptor::memoryReferenceRelease(IOMemoryReference * ref) |
474 | { |
475 | if (1 == OSDecrementAtomic(&ref->refCount)) memoryReferenceFree(ref); |
476 | } |
477 | |
478 | |
479 | IOReturn |
480 | IOGeneralMemoryDescriptor::memoryReferenceCreate( |
481 | IOOptionBits options, |
482 | IOMemoryReference ** reference) |
483 | { |
484 | enum { kCapacity = 4, kCapacityInc = 4 }; |
485 | |
486 | kern_return_t err; |
487 | IOMemoryReference * ref; |
488 | IOMemoryEntry * entries; |
489 | IOMemoryEntry * cloneEntries; |
490 | vm_map_t map; |
491 | ipc_port_t entry, cloneEntry; |
492 | vm_prot_t prot; |
493 | memory_object_size_t actualSize; |
494 | uint32_t rangeIdx; |
495 | uint32_t count; |
496 | mach_vm_address_t entryAddr, endAddr, entrySize; |
497 | mach_vm_size_t srcAddr, srcLen; |
498 | mach_vm_size_t nextAddr, nextLen; |
499 | mach_vm_size_t offset, remain; |
500 | IOByteCount physLen; |
501 | IOOptionBits type = (_flags & kIOMemoryTypeMask); |
502 | IOOptionBits cacheMode; |
503 | unsigned int ; |
504 | vm_tag_t tag; |
505 | |
506 | ref = memoryReferenceAlloc(kCapacity, NULL); |
507 | if (!ref) return (kIOReturnNoMemory); |
508 | |
509 | tag = getVMTag(kernel_map); |
510 | entries = &ref->entries[0]; |
511 | count = 0; |
512 | err = KERN_SUCCESS; |
513 | |
514 | offset = 0; |
515 | rangeIdx = 0; |
516 | if (_task) |
517 | { |
518 | getAddrLenForInd(nextAddr, nextLen, type, _ranges, rangeIdx); |
519 | } |
520 | else |
521 | { |
522 | nextAddr = getPhysicalSegment(offset, &physLen, kIOMemoryMapperNone); |
523 | nextLen = physLen; |
524 | |
525 | // default cache mode for physical |
526 | if (kIODefaultCache == ((_flags & kIOMemoryBufferCacheMask) >> kIOMemoryBufferCacheShift)) |
527 | { |
528 | IOOptionBits mode; |
529 | pagerFlags = IODefaultCacheBits(nextAddr); |
530 | if (DEVICE_PAGER_CACHE_INHIB & pagerFlags) |
531 | { |
532 | if (DEVICE_PAGER_EARLY_ACK & pagerFlags) |
533 | mode = kIOPostedWrite; |
534 | else if (DEVICE_PAGER_GUARDED & pagerFlags) |
535 | mode = kIOInhibitCache; |
536 | else |
537 | mode = kIOWriteCombineCache; |
538 | } |
539 | else if (DEVICE_PAGER_WRITE_THROUGH & pagerFlags) |
540 | mode = kIOWriteThruCache; |
541 | else |
542 | mode = kIOCopybackCache; |
543 | _flags |= (mode << kIOMemoryBufferCacheShift); |
544 | } |
545 | } |
546 | |
547 | // cache mode & vm_prot |
548 | prot = VM_PROT_READ; |
549 | cacheMode = ((_flags & kIOMemoryBufferCacheMask) >> kIOMemoryBufferCacheShift); |
550 | prot |= vmProtForCacheMode(cacheMode); |
551 | // VM system requires write access to change cache mode |
552 | if (kIODefaultCache != cacheMode) prot |= VM_PROT_WRITE; |
553 | if (kIODirectionOut != (kIODirectionOutIn & _flags)) prot |= VM_PROT_WRITE; |
554 | if (kIOMemoryReferenceWrite & options) prot |= VM_PROT_WRITE; |
555 | if (kIOMemoryReferenceCOW & options) prot |= MAP_MEM_VM_COPY; |
556 | |
557 | if ((kIOMemoryReferenceReuse & options) && _memRef) |
558 | { |
559 | cloneEntries = &_memRef->entries[0]; |
560 | prot |= MAP_MEM_NAMED_REUSE; |
561 | } |
562 | |
563 | if (_task) |
564 | { |
565 | // virtual ranges |
566 | |
567 | if (kIOMemoryBufferPageable & _flags) |
568 | { |
569 | // IOBufferMemoryDescriptor alloc - set flags for entry + object create |
570 | prot |= MAP_MEM_NAMED_CREATE; |
571 | if (kIOMemoryBufferPurgeable & _flags) |
572 | { |
573 | prot |= (MAP_MEM_PURGABLE | MAP_MEM_PURGABLE_KERNEL_ONLY); |
574 | if (VM_KERN_MEMORY_SKYWALK == tag) |
575 | { |
576 | prot |= MAP_MEM_LEDGER_TAG_NETWORK; |
577 | } |
578 | } |
579 | if (kIOMemoryUseReserve & _flags) prot |= MAP_MEM_GRAB_SECLUDED; |
580 | |
581 | prot |= VM_PROT_WRITE; |
582 | map = NULL; |
583 | } |
584 | else map = get_task_map(_task); |
585 | |
586 | remain = _length; |
587 | while (remain) |
588 | { |
589 | srcAddr = nextAddr; |
590 | srcLen = nextLen; |
591 | nextAddr = 0; |
592 | nextLen = 0; |
593 | // coalesce addr range |
594 | for (++rangeIdx; rangeIdx < _rangesCount; rangeIdx++) |
595 | { |
596 | getAddrLenForInd(nextAddr, nextLen, type, _ranges, rangeIdx); |
597 | if ((srcAddr + srcLen) != nextAddr) break; |
598 | srcLen += nextLen; |
599 | } |
600 | entryAddr = trunc_page_64(srcAddr); |
601 | endAddr = round_page_64(srcAddr + srcLen); |
602 | do |
603 | { |
604 | entrySize = (endAddr - entryAddr); |
605 | if (!entrySize) break; |
606 | actualSize = entrySize; |
607 | |
608 | cloneEntry = MACH_PORT_NULL; |
609 | if (MAP_MEM_NAMED_REUSE & prot) |
610 | { |
611 | if (cloneEntries < &_memRef->entries[_memRef->count]) cloneEntry = cloneEntries->entry; |
612 | else prot &= ~MAP_MEM_NAMED_REUSE; |
613 | } |
614 | |
615 | err = mach_make_memory_entry_internal(map, |
616 | &actualSize, entryAddr, prot, &entry, cloneEntry); |
617 | |
618 | if (KERN_SUCCESS != err) break; |
619 | if (actualSize > entrySize) panic("mach_make_memory_entry_64 actualSize" ); |
620 | |
621 | if (count >= ref->capacity) |
622 | { |
623 | ref = memoryReferenceAlloc(ref->capacity + kCapacityInc, ref); |
624 | entries = &ref->entries[count]; |
625 | } |
626 | entries->entry = entry; |
627 | entries->size = actualSize; |
628 | entries->offset = offset + (entryAddr - srcAddr); |
629 | entryAddr += actualSize; |
630 | if (MAP_MEM_NAMED_REUSE & prot) |
631 | { |
632 | if ((cloneEntries->entry == entries->entry) |
633 | && (cloneEntries->size == entries->size) |
634 | && (cloneEntries->offset == entries->offset)) cloneEntries++; |
635 | else prot &= ~MAP_MEM_NAMED_REUSE; |
636 | } |
637 | entries++; |
638 | count++; |
639 | } |
640 | while (true); |
641 | offset += srcLen; |
642 | remain -= srcLen; |
643 | } |
644 | } |
645 | else |
646 | { |
647 | // _task == 0, physical or kIOMemoryTypeUPL |
648 | memory_object_t ; |
649 | vm_size_t size = ptoa_32(_pages); |
650 | |
651 | if (!getKernelReserved()) panic("getKernelReserved" ); |
652 | |
653 | reserved->dp.pagerContig = (1 == _rangesCount); |
654 | reserved->dp.memory = this; |
655 | |
656 | pagerFlags = pagerFlagsForCacheMode(cacheMode); |
657 | if (-1U == pagerFlags) panic("phys is kIODefaultCache" ); |
658 | if (reserved->dp.pagerContig) pagerFlags |= DEVICE_PAGER_CONTIGUOUS; |
659 | |
660 | pager = device_pager_setup((memory_object_t) 0, (uintptr_t) reserved, |
661 | size, pagerFlags); |
662 | assert (pager); |
663 | if (!pager) err = kIOReturnVMError; |
664 | else |
665 | { |
666 | srcAddr = nextAddr; |
667 | entryAddr = trunc_page_64(srcAddr); |
668 | err = mach_memory_object_memory_entry_64((host_t) 1, false /*internal*/, |
669 | size, VM_PROT_READ | VM_PROT_WRITE, pager, &entry); |
670 | assert (KERN_SUCCESS == err); |
671 | if (KERN_SUCCESS != err) device_pager_deallocate(pager); |
672 | else |
673 | { |
674 | reserved->dp.devicePager = pager; |
675 | entries->entry = entry; |
676 | entries->size = size; |
677 | entries->offset = offset + (entryAddr - srcAddr); |
678 | entries++; |
679 | count++; |
680 | } |
681 | } |
682 | } |
683 | |
684 | ref->count = count; |
685 | ref->prot = prot; |
686 | |
687 | if (_task && (KERN_SUCCESS == err) |
688 | && (kIOMemoryMapCopyOnWrite & _flags) |
689 | && !(kIOMemoryReferenceCOW & options)) |
690 | { |
691 | err = memoryReferenceCreate(options | kIOMemoryReferenceCOW, &ref->mapRef); |
692 | } |
693 | |
694 | if (KERN_SUCCESS == err) |
695 | { |
696 | if (MAP_MEM_NAMED_REUSE & prot) |
697 | { |
698 | memoryReferenceFree(ref); |
699 | OSIncrementAtomic(&_memRef->refCount); |
700 | ref = _memRef; |
701 | } |
702 | } |
703 | else |
704 | { |
705 | memoryReferenceFree(ref); |
706 | ref = NULL; |
707 | } |
708 | |
709 | *reference = ref; |
710 | |
711 | return (err); |
712 | } |
713 | |
714 | kern_return_t |
715 | IOMemoryDescriptorMapAlloc(vm_map_t map, void * _ref) |
716 | { |
717 | IOMemoryDescriptorMapAllocRef * ref = (typeof(ref))_ref; |
718 | IOReturn err; |
719 | vm_map_offset_t addr; |
720 | |
721 | addr = ref->mapped; |
722 | |
723 | err = vm_map_enter_mem_object(map, &addr, ref->size, |
724 | (vm_map_offset_t) 0, |
725 | (((ref->options & kIOMapAnywhere) |
726 | ? VM_FLAGS_ANYWHERE |
727 | : VM_FLAGS_FIXED)), |
728 | VM_MAP_KERNEL_FLAGS_NONE, |
729 | ref->tag, |
730 | IPC_PORT_NULL, |
731 | (memory_object_offset_t) 0, |
732 | false, /* copy */ |
733 | ref->prot, |
734 | ref->prot, |
735 | VM_INHERIT_NONE); |
736 | if (KERN_SUCCESS == err) |
737 | { |
738 | ref->mapped = (mach_vm_address_t) addr; |
739 | ref->map = map; |
740 | } |
741 | |
742 | return( err ); |
743 | } |
744 | |
745 | IOReturn |
746 | IOGeneralMemoryDescriptor::memoryReferenceMap( |
747 | IOMemoryReference * ref, |
748 | vm_map_t map, |
749 | mach_vm_size_t inoffset, |
750 | mach_vm_size_t size, |
751 | IOOptionBits options, |
752 | mach_vm_address_t * inaddr) |
753 | { |
754 | IOReturn err; |
755 | int64_t offset = inoffset; |
756 | uint32_t rangeIdx, entryIdx; |
757 | vm_map_offset_t addr, mapAddr; |
758 | vm_map_offset_t pageOffset, entryOffset, remain, chunk; |
759 | |
760 | mach_vm_address_t nextAddr; |
761 | mach_vm_size_t nextLen; |
762 | IOByteCount physLen; |
763 | IOMemoryEntry * entry; |
764 | vm_prot_t prot, memEntryCacheMode; |
765 | IOOptionBits type; |
766 | IOOptionBits cacheMode; |
767 | vm_tag_t tag; |
768 | // for the kIOMapPrefault option. |
769 | upl_page_info_t * pageList = NULL; |
770 | UInt currentPageIndex = 0; |
771 | bool didAlloc; |
772 | |
773 | if (ref->mapRef) |
774 | { |
775 | err = memoryReferenceMap(ref->mapRef, map, inoffset, size, options, inaddr); |
776 | return (err); |
777 | } |
778 | |
779 | type = _flags & kIOMemoryTypeMask; |
780 | |
781 | prot = VM_PROT_READ; |
782 | if (!(kIOMapReadOnly & options)) prot |= VM_PROT_WRITE; |
783 | prot &= ref->prot; |
784 | |
785 | cacheMode = ((options & kIOMapCacheMask) >> kIOMapCacheShift); |
786 | if (kIODefaultCache != cacheMode) |
787 | { |
788 | // VM system requires write access to update named entry cache mode |
789 | memEntryCacheMode = (MAP_MEM_ONLY | VM_PROT_WRITE | prot | vmProtForCacheMode(cacheMode)); |
790 | } |
791 | |
792 | tag = getVMTag(map); |
793 | |
794 | if (_task) |
795 | { |
796 | // Find first range for offset |
797 | if (!_rangesCount) return (kIOReturnBadArgument); |
798 | for (remain = offset, rangeIdx = 0; rangeIdx < _rangesCount; rangeIdx++) |
799 | { |
800 | getAddrLenForInd(nextAddr, nextLen, type, _ranges, rangeIdx); |
801 | if (remain < nextLen) break; |
802 | remain -= nextLen; |
803 | } |
804 | } |
805 | else |
806 | { |
807 | rangeIdx = 0; |
808 | remain = 0; |
809 | nextAddr = getPhysicalSegment(offset, &physLen, kIOMemoryMapperNone); |
810 | nextLen = size; |
811 | } |
812 | |
813 | assert(remain < nextLen); |
814 | if (remain >= nextLen) return (kIOReturnBadArgument); |
815 | |
816 | nextAddr += remain; |
817 | nextLen -= remain; |
818 | pageOffset = (page_mask & nextAddr); |
819 | addr = 0; |
820 | didAlloc = false; |
821 | |
822 | if (!(options & kIOMapAnywhere)) |
823 | { |
824 | addr = *inaddr; |
825 | if (pageOffset != (page_mask & addr)) return (kIOReturnNotAligned); |
826 | addr -= pageOffset; |
827 | } |
828 | |
829 | // find first entry for offset |
830 | for (entryIdx = 0; |
831 | (entryIdx < ref->count) && (offset >= ref->entries[entryIdx].offset); |
832 | entryIdx++) {} |
833 | entryIdx--; |
834 | entry = &ref->entries[entryIdx]; |
835 | |
836 | // allocate VM |
837 | size = round_page_64(size + pageOffset); |
838 | if (kIOMapOverwrite & options) |
839 | { |
840 | if ((map == kernel_map) && (kIOMemoryBufferPageable & _flags)) |
841 | { |
842 | map = IOPageableMapForAddress(addr); |
843 | } |
844 | err = KERN_SUCCESS; |
845 | } |
846 | else |
847 | { |
848 | IOMemoryDescriptorMapAllocRef ref; |
849 | ref.map = map; |
850 | ref.tag = tag; |
851 | ref.options = options; |
852 | ref.size = size; |
853 | ref.prot = prot; |
854 | if (options & kIOMapAnywhere) |
855 | // vm_map looks for addresses above here, even when VM_FLAGS_ANYWHERE |
856 | ref.mapped = 0; |
857 | else |
858 | ref.mapped = addr; |
859 | if ((ref.map == kernel_map) && (kIOMemoryBufferPageable & _flags)) |
860 | err = IOIteratePageableMaps( ref.size, &IOMemoryDescriptorMapAlloc, &ref ); |
861 | else |
862 | err = IOMemoryDescriptorMapAlloc(ref.map, &ref); |
863 | if (KERN_SUCCESS == err) |
864 | { |
865 | addr = ref.mapped; |
866 | map = ref.map; |
867 | didAlloc = true; |
868 | } |
869 | } |
870 | |
871 | /* |
872 | * If the memory is associated with a device pager but doesn't have a UPL, |
873 | * it will be immediately faulted in through the pager via populateDevicePager(). |
874 | * kIOMapPrefault is redundant in that case, so don't try to use it for UPL |
875 | * operations. |
876 | */ |
877 | if ((reserved != NULL) && (reserved->dp.devicePager) && (_wireCount != 0)) |
878 | options &= ~kIOMapPrefault; |
879 | |
880 | /* |
881 | * Prefaulting is only possible if we wired the memory earlier. Check the |
882 | * memory type, and the underlying data. |
883 | */ |
884 | if (options & kIOMapPrefault) |
885 | { |
886 | /* |
887 | * The memory must have been wired by calling ::prepare(), otherwise |
888 | * we don't have the UPL. Without UPLs, pages cannot be pre-faulted |
889 | */ |
890 | assert(_wireCount != 0); |
891 | assert(_memoryEntries != NULL); |
892 | if ((_wireCount == 0) || |
893 | (_memoryEntries == NULL)) |
894 | { |
895 | return kIOReturnBadArgument; |
896 | } |
897 | |
898 | // Get the page list. |
899 | ioGMDData* dataP = getDataP(_memoryEntries); |
900 | ioPLBlock const* ioplList = getIOPLList(dataP); |
901 | pageList = getPageList(dataP); |
902 | |
903 | // Get the number of IOPLs. |
904 | UInt numIOPLs = getNumIOPL(_memoryEntries, dataP); |
905 | |
906 | /* |
907 | * Scan through the IOPL Info Blocks, looking for the first block containing |
908 | * the offset. The research will go past it, so we'll need to go back to the |
909 | * right range at the end. |
910 | */ |
911 | UInt ioplIndex = 0; |
912 | while (ioplIndex < numIOPLs && offset >= ioplList[ioplIndex].fIOMDOffset) |
913 | ioplIndex++; |
914 | ioplIndex--; |
915 | |
916 | // Retrieve the IOPL info block. |
917 | ioPLBlock ioplInfo = ioplList[ioplIndex]; |
918 | |
919 | /* |
920 | * For external UPLs, the fPageInfo points directly to the UPL's page_info_t |
921 | * array. |
922 | */ |
923 | if (ioplInfo.fFlags & kIOPLExternUPL) |
924 | pageList = (upl_page_info_t*) ioplInfo.fPageInfo; |
925 | else |
926 | pageList = &pageList[ioplInfo.fPageInfo]; |
927 | |
928 | // Rebase [offset] into the IOPL in order to looks for the first page index. |
929 | mach_vm_size_t offsetInIOPL = offset - ioplInfo.fIOMDOffset + ioplInfo.fPageOffset; |
930 | |
931 | // Retrieve the index of the first page corresponding to the offset. |
932 | currentPageIndex = atop_32(offsetInIOPL); |
933 | } |
934 | |
935 | // enter mappings |
936 | remain = size; |
937 | mapAddr = addr; |
938 | addr += pageOffset; |
939 | |
940 | while (remain && (KERN_SUCCESS == err)) |
941 | { |
942 | entryOffset = offset - entry->offset; |
943 | if ((page_mask & entryOffset) != pageOffset) |
944 | { |
945 | err = kIOReturnNotAligned; |
946 | break; |
947 | } |
948 | |
949 | if (kIODefaultCache != cacheMode) |
950 | { |
951 | vm_size_t unused = 0; |
952 | err = mach_make_memory_entry(NULL /*unused*/, &unused, 0 /*unused*/, |
953 | memEntryCacheMode, NULL, entry->entry); |
954 | assert (KERN_SUCCESS == err); |
955 | } |
956 | |
957 | entryOffset -= pageOffset; |
958 | if (entryOffset >= entry->size) panic("entryOffset" ); |
959 | chunk = entry->size - entryOffset; |
960 | if (chunk) |
961 | { |
962 | vm_map_kernel_flags_t vmk_flags; |
963 | |
964 | vmk_flags = VM_MAP_KERNEL_FLAGS_NONE; |
965 | vmk_flags.vmkf_iokit_acct = TRUE; /* iokit accounting */ |
966 | |
967 | if (chunk > remain) chunk = remain; |
968 | if (options & kIOMapPrefault) |
969 | { |
970 | UInt nb_pages = round_page(chunk) / PAGE_SIZE; |
971 | |
972 | err = vm_map_enter_mem_object_prefault(map, |
973 | &mapAddr, |
974 | chunk, 0 /* mask */, |
975 | (VM_FLAGS_FIXED |
976 | | VM_FLAGS_OVERWRITE), |
977 | vmk_flags, |
978 | tag, |
979 | entry->entry, |
980 | entryOffset, |
981 | prot, // cur |
982 | prot, // max |
983 | &pageList[currentPageIndex], |
984 | nb_pages); |
985 | |
986 | // Compute the next index in the page list. |
987 | currentPageIndex += nb_pages; |
988 | assert(currentPageIndex <= _pages); |
989 | } |
990 | else |
991 | { |
992 | err = vm_map_enter_mem_object(map, |
993 | &mapAddr, |
994 | chunk, 0 /* mask */, |
995 | (VM_FLAGS_FIXED |
996 | | VM_FLAGS_OVERWRITE), |
997 | vmk_flags, |
998 | tag, |
999 | entry->entry, |
1000 | entryOffset, |
1001 | false, // copy |
1002 | prot, // cur |
1003 | prot, // max |
1004 | VM_INHERIT_NONE); |
1005 | } |
1006 | if (KERN_SUCCESS != err) break; |
1007 | remain -= chunk; |
1008 | if (!remain) break; |
1009 | mapAddr += chunk; |
1010 | offset += chunk - pageOffset; |
1011 | } |
1012 | pageOffset = 0; |
1013 | entry++; |
1014 | entryIdx++; |
1015 | if (entryIdx >= ref->count) |
1016 | { |
1017 | err = kIOReturnOverrun; |
1018 | break; |
1019 | } |
1020 | } |
1021 | |
1022 | if ((KERN_SUCCESS != err) && didAlloc) |
1023 | { |
1024 | (void) mach_vm_deallocate(map, trunc_page_64(addr), size); |
1025 | addr = 0; |
1026 | } |
1027 | *inaddr = addr; |
1028 | |
1029 | return (err); |
1030 | } |
1031 | |
1032 | IOReturn |
1033 | IOGeneralMemoryDescriptor::memoryReferenceGetPageCounts( |
1034 | IOMemoryReference * ref, |
1035 | IOByteCount * residentPageCount, |
1036 | IOByteCount * dirtyPageCount) |
1037 | { |
1038 | IOReturn err; |
1039 | IOMemoryEntry * entries; |
1040 | unsigned int resident, dirty; |
1041 | unsigned int totalResident, totalDirty; |
1042 | |
1043 | totalResident = totalDirty = 0; |
1044 | err = kIOReturnSuccess; |
1045 | entries = ref->entries + ref->count; |
1046 | while (entries > &ref->entries[0]) |
1047 | { |
1048 | entries--; |
1049 | err = mach_memory_entry_get_page_counts(entries->entry, &resident, &dirty); |
1050 | if (KERN_SUCCESS != err) break; |
1051 | totalResident += resident; |
1052 | totalDirty += dirty; |
1053 | } |
1054 | |
1055 | if (residentPageCount) *residentPageCount = totalResident; |
1056 | if (dirtyPageCount) *dirtyPageCount = totalDirty; |
1057 | return (err); |
1058 | } |
1059 | |
1060 | IOReturn |
1061 | IOGeneralMemoryDescriptor::memoryReferenceSetPurgeable( |
1062 | IOMemoryReference * ref, |
1063 | IOOptionBits newState, |
1064 | IOOptionBits * oldState) |
1065 | { |
1066 | IOReturn err; |
1067 | IOMemoryEntry * entries; |
1068 | vm_purgable_t control; |
1069 | int totalState, state; |
1070 | |
1071 | totalState = kIOMemoryPurgeableNonVolatile; |
1072 | err = kIOReturnSuccess; |
1073 | entries = ref->entries + ref->count; |
1074 | while (entries > &ref->entries[0]) |
1075 | { |
1076 | entries--; |
1077 | |
1078 | err = purgeableControlBits(newState, &control, &state); |
1079 | if (KERN_SUCCESS != err) break; |
1080 | err = memory_entry_purgeable_control_internal(entries->entry, control, &state); |
1081 | if (KERN_SUCCESS != err) break; |
1082 | err = purgeableStateBits(&state); |
1083 | if (KERN_SUCCESS != err) break; |
1084 | |
1085 | if (kIOMemoryPurgeableEmpty == state) totalState = kIOMemoryPurgeableEmpty; |
1086 | else if (kIOMemoryPurgeableEmpty == totalState) continue; |
1087 | else if (kIOMemoryPurgeableVolatile == totalState) continue; |
1088 | else if (kIOMemoryPurgeableVolatile == state) totalState = kIOMemoryPurgeableVolatile; |
1089 | else totalState = kIOMemoryPurgeableNonVolatile; |
1090 | } |
1091 | |
1092 | if (oldState) *oldState = totalState; |
1093 | return (err); |
1094 | } |
1095 | |
1096 | /* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */ |
1097 | |
1098 | IOMemoryDescriptor * |
1099 | IOMemoryDescriptor::withAddress(void * address, |
1100 | IOByteCount length, |
1101 | IODirection direction) |
1102 | { |
1103 | return IOMemoryDescriptor:: |
1104 | withAddressRange((IOVirtualAddress) address, length, direction | kIOMemoryAutoPrepare, kernel_task); |
1105 | } |
1106 | |
1107 | #ifndef __LP64__ |
1108 | IOMemoryDescriptor * |
1109 | IOMemoryDescriptor::withAddress(IOVirtualAddress address, |
1110 | IOByteCount length, |
1111 | IODirection direction, |
1112 | task_t task) |
1113 | { |
1114 | IOGeneralMemoryDescriptor * that = new IOGeneralMemoryDescriptor; |
1115 | if (that) |
1116 | { |
1117 | if (that->initWithAddress(address, length, direction, task)) |
1118 | return that; |
1119 | |
1120 | that->release(); |
1121 | } |
1122 | return 0; |
1123 | } |
1124 | #endif /* !__LP64__ */ |
1125 | |
1126 | IOMemoryDescriptor * |
1127 | IOMemoryDescriptor::withPhysicalAddress( |
1128 | IOPhysicalAddress address, |
1129 | IOByteCount length, |
1130 | IODirection direction ) |
1131 | { |
1132 | return (IOMemoryDescriptor::withAddressRange(address, length, direction, TASK_NULL)); |
1133 | } |
1134 | |
1135 | #ifndef __LP64__ |
1136 | IOMemoryDescriptor * |
1137 | IOMemoryDescriptor::withRanges( IOVirtualRange * ranges, |
1138 | UInt32 withCount, |
1139 | IODirection direction, |
1140 | task_t task, |
1141 | bool asReference) |
1142 | { |
1143 | IOGeneralMemoryDescriptor * that = new IOGeneralMemoryDescriptor; |
1144 | if (that) |
1145 | { |
1146 | if (that->initWithRanges(ranges, withCount, direction, task, asReference)) |
1147 | return that; |
1148 | |
1149 | that->release(); |
1150 | } |
1151 | return 0; |
1152 | } |
1153 | #endif /* !__LP64__ */ |
1154 | |
1155 | IOMemoryDescriptor * |
1156 | IOMemoryDescriptor::withAddressRange(mach_vm_address_t address, |
1157 | mach_vm_size_t length, |
1158 | IOOptionBits options, |
1159 | task_t task) |
1160 | { |
1161 | IOAddressRange range = { address, length }; |
1162 | return (IOMemoryDescriptor::withAddressRanges(&range, 1, options, task)); |
1163 | } |
1164 | |
1165 | IOMemoryDescriptor * |
1166 | IOMemoryDescriptor::withAddressRanges(IOAddressRange * ranges, |
1167 | UInt32 rangeCount, |
1168 | IOOptionBits options, |
1169 | task_t task) |
1170 | { |
1171 | IOGeneralMemoryDescriptor * that = new IOGeneralMemoryDescriptor; |
1172 | if (that) |
1173 | { |
1174 | if (task) |
1175 | options |= kIOMemoryTypeVirtual64; |
1176 | else |
1177 | options |= kIOMemoryTypePhysical64; |
1178 | |
1179 | if (that->initWithOptions(ranges, rangeCount, 0, task, options, /* mapper */ 0)) |
1180 | return that; |
1181 | |
1182 | that->release(); |
1183 | } |
1184 | |
1185 | return 0; |
1186 | } |
1187 | |
1188 | |
1189 | /* |
1190 | * withOptions: |
1191 | * |
1192 | * Create a new IOMemoryDescriptor. The buffer is made up of several |
1193 | * virtual address ranges, from a given task. |
1194 | * |
1195 | * Passing the ranges as a reference will avoid an extra allocation. |
1196 | */ |
1197 | IOMemoryDescriptor * |
1198 | IOMemoryDescriptor::withOptions(void * buffers, |
1199 | UInt32 count, |
1200 | UInt32 offset, |
1201 | task_t task, |
1202 | IOOptionBits opts, |
1203 | IOMapper * mapper) |
1204 | { |
1205 | IOGeneralMemoryDescriptor *self = new IOGeneralMemoryDescriptor; |
1206 | |
1207 | if (self |
1208 | && !self->initWithOptions(buffers, count, offset, task, opts, mapper)) |
1209 | { |
1210 | self->release(); |
1211 | return 0; |
1212 | } |
1213 | |
1214 | return self; |
1215 | } |
1216 | |
1217 | bool IOMemoryDescriptor::initWithOptions(void * buffers, |
1218 | UInt32 count, |
1219 | UInt32 offset, |
1220 | task_t task, |
1221 | IOOptionBits options, |
1222 | IOMapper * mapper) |
1223 | { |
1224 | return( false ); |
1225 | } |
1226 | |
1227 | #ifndef __LP64__ |
1228 | IOMemoryDescriptor * |
1229 | IOMemoryDescriptor::withPhysicalRanges( IOPhysicalRange * ranges, |
1230 | UInt32 withCount, |
1231 | IODirection direction, |
1232 | bool asReference) |
1233 | { |
1234 | IOGeneralMemoryDescriptor * that = new IOGeneralMemoryDescriptor; |
1235 | if (that) |
1236 | { |
1237 | if (that->initWithPhysicalRanges(ranges, withCount, direction, asReference)) |
1238 | return that; |
1239 | |
1240 | that->release(); |
1241 | } |
1242 | return 0; |
1243 | } |
1244 | |
1245 | IOMemoryDescriptor * |
1246 | IOMemoryDescriptor::withSubRange(IOMemoryDescriptor * of, |
1247 | IOByteCount offset, |
1248 | IOByteCount length, |
1249 | IODirection direction) |
1250 | { |
1251 | return (IOSubMemoryDescriptor::withSubRange(of, offset, length, direction)); |
1252 | } |
1253 | #endif /* !__LP64__ */ |
1254 | |
1255 | IOMemoryDescriptor * |
1256 | IOMemoryDescriptor::withPersistentMemoryDescriptor(IOMemoryDescriptor *originalMD) |
1257 | { |
1258 | IOGeneralMemoryDescriptor *origGenMD = |
1259 | OSDynamicCast(IOGeneralMemoryDescriptor, originalMD); |
1260 | |
1261 | if (origGenMD) |
1262 | return IOGeneralMemoryDescriptor:: |
1263 | withPersistentMemoryDescriptor(origGenMD); |
1264 | else |
1265 | return 0; |
1266 | } |
1267 | |
1268 | IOMemoryDescriptor * |
1269 | IOGeneralMemoryDescriptor::withPersistentMemoryDescriptor(IOGeneralMemoryDescriptor *originalMD) |
1270 | { |
1271 | IOMemoryReference * memRef; |
1272 | |
1273 | if (kIOReturnSuccess != originalMD->memoryReferenceCreate(kIOMemoryReferenceReuse, &memRef)) return (0); |
1274 | |
1275 | if (memRef == originalMD->_memRef) |
1276 | { |
1277 | originalMD->retain(); // Add a new reference to ourselves |
1278 | originalMD->memoryReferenceRelease(memRef); |
1279 | return originalMD; |
1280 | } |
1281 | |
1282 | IOGeneralMemoryDescriptor * self = new IOGeneralMemoryDescriptor; |
1283 | IOMDPersistentInitData initData = { originalMD, memRef }; |
1284 | |
1285 | if (self |
1286 | && !self->initWithOptions(&initData, 1, 0, 0, kIOMemoryTypePersistentMD, 0)) { |
1287 | self->release(); |
1288 | self = 0; |
1289 | } |
1290 | return self; |
1291 | } |
1292 | |
1293 | #ifndef __LP64__ |
1294 | bool |
1295 | IOGeneralMemoryDescriptor::initWithAddress(void * address, |
1296 | IOByteCount withLength, |
1297 | IODirection withDirection) |
1298 | { |
1299 | _singleRange.v.address = (vm_offset_t) address; |
1300 | _singleRange.v.length = withLength; |
1301 | |
1302 | return initWithRanges(&_singleRange.v, 1, withDirection, kernel_task, true); |
1303 | } |
1304 | |
1305 | bool |
1306 | IOGeneralMemoryDescriptor::initWithAddress(IOVirtualAddress address, |
1307 | IOByteCount withLength, |
1308 | IODirection withDirection, |
1309 | task_t withTask) |
1310 | { |
1311 | _singleRange.v.address = address; |
1312 | _singleRange.v.length = withLength; |
1313 | |
1314 | return initWithRanges(&_singleRange.v, 1, withDirection, withTask, true); |
1315 | } |
1316 | |
1317 | bool |
1318 | IOGeneralMemoryDescriptor::initWithPhysicalAddress( |
1319 | IOPhysicalAddress address, |
1320 | IOByteCount withLength, |
1321 | IODirection withDirection ) |
1322 | { |
1323 | _singleRange.p.address = address; |
1324 | _singleRange.p.length = withLength; |
1325 | |
1326 | return initWithPhysicalRanges( &_singleRange.p, 1, withDirection, true); |
1327 | } |
1328 | |
1329 | bool |
1330 | IOGeneralMemoryDescriptor::initWithPhysicalRanges( |
1331 | IOPhysicalRange * ranges, |
1332 | UInt32 count, |
1333 | IODirection direction, |
1334 | bool reference) |
1335 | { |
1336 | IOOptionBits mdOpts = direction | kIOMemoryTypePhysical; |
1337 | |
1338 | if (reference) |
1339 | mdOpts |= kIOMemoryAsReference; |
1340 | |
1341 | return initWithOptions(ranges, count, 0, 0, mdOpts, /* mapper */ 0); |
1342 | } |
1343 | |
1344 | bool |
1345 | IOGeneralMemoryDescriptor::initWithRanges( |
1346 | IOVirtualRange * ranges, |
1347 | UInt32 count, |
1348 | IODirection direction, |
1349 | task_t task, |
1350 | bool reference) |
1351 | { |
1352 | IOOptionBits mdOpts = direction; |
1353 | |
1354 | if (reference) |
1355 | mdOpts |= kIOMemoryAsReference; |
1356 | |
1357 | if (task) { |
1358 | mdOpts |= kIOMemoryTypeVirtual; |
1359 | |
1360 | // Auto-prepare if this is a kernel memory descriptor as very few |
1361 | // clients bother to prepare() kernel memory. |
1362 | // But it was not enforced so what are you going to do? |
1363 | if (task == kernel_task) |
1364 | mdOpts |= kIOMemoryAutoPrepare; |
1365 | } |
1366 | else |
1367 | mdOpts |= kIOMemoryTypePhysical; |
1368 | |
1369 | return initWithOptions(ranges, count, 0, task, mdOpts, /* mapper */ 0); |
1370 | } |
1371 | #endif /* !__LP64__ */ |
1372 | |
1373 | /* |
1374 | * initWithOptions: |
1375 | * |
1376 | * IOMemoryDescriptor. The buffer is made up of several virtual address ranges, |
1377 | * from a given task, several physical ranges, an UPL from the ubc |
1378 | * system or a uio (may be 64bit) from the BSD subsystem. |
1379 | * |
1380 | * Passing the ranges as a reference will avoid an extra allocation. |
1381 | * |
1382 | * An IOMemoryDescriptor can be re-used by calling initWithOptions again on an |
1383 | * existing instance -- note this behavior is not commonly supported in other |
1384 | * I/O Kit classes, although it is supported here. |
1385 | */ |
1386 | |
1387 | bool |
1388 | IOGeneralMemoryDescriptor::initWithOptions(void * buffers, |
1389 | UInt32 count, |
1390 | UInt32 offset, |
1391 | task_t task, |
1392 | IOOptionBits options, |
1393 | IOMapper * mapper) |
1394 | { |
1395 | IOOptionBits type = options & kIOMemoryTypeMask; |
1396 | |
1397 | #ifndef __LP64__ |
1398 | if (task |
1399 | && (kIOMemoryTypeVirtual == type) |
1400 | && vm_map_is_64bit(get_task_map(task)) |
1401 | && ((IOVirtualRange *) buffers)->address) |
1402 | { |
1403 | OSReportWithBacktrace("IOMemoryDescriptor: attempt to create 32b virtual in 64b task, use ::withAddressRange()" ); |
1404 | return false; |
1405 | } |
1406 | #endif /* !__LP64__ */ |
1407 | |
1408 | // Grab the original MD's configuation data to initialse the |
1409 | // arguments to this function. |
1410 | if (kIOMemoryTypePersistentMD == type) { |
1411 | |
1412 | IOMDPersistentInitData *initData = (typeof(initData)) buffers; |
1413 | const IOGeneralMemoryDescriptor *orig = initData->fMD; |
1414 | ioGMDData *dataP = getDataP(orig->_memoryEntries); |
1415 | |
1416 | // Only accept persistent memory descriptors with valid dataP data. |
1417 | assert(orig->_rangesCount == 1); |
1418 | if ( !(orig->_flags & kIOMemoryPersistent) || !dataP) |
1419 | return false; |
1420 | |
1421 | _memRef = initData->fMemRef; // Grab the new named entry |
1422 | options = orig->_flags & ~kIOMemoryAsReference; |
1423 | type = options & kIOMemoryTypeMask; |
1424 | buffers = orig->_ranges.v; |
1425 | count = orig->_rangesCount; |
1426 | |
1427 | // Now grab the original task and whatever mapper was previously used |
1428 | task = orig->_task; |
1429 | mapper = dataP->fMapper; |
1430 | |
1431 | // We are ready to go through the original initialisation now |
1432 | } |
1433 | |
1434 | switch (type) { |
1435 | case kIOMemoryTypeUIO: |
1436 | case kIOMemoryTypeVirtual: |
1437 | #ifndef __LP64__ |
1438 | case kIOMemoryTypeVirtual64: |
1439 | #endif /* !__LP64__ */ |
1440 | assert(task); |
1441 | if (!task) |
1442 | return false; |
1443 | break; |
1444 | |
1445 | case kIOMemoryTypePhysical: // Neither Physical nor UPL should have a task |
1446 | #ifndef __LP64__ |
1447 | case kIOMemoryTypePhysical64: |
1448 | #endif /* !__LP64__ */ |
1449 | case kIOMemoryTypeUPL: |
1450 | assert(!task); |
1451 | break; |
1452 | default: |
1453 | return false; /* bad argument */ |
1454 | } |
1455 | |
1456 | assert(buffers); |
1457 | assert(count); |
1458 | |
1459 | /* |
1460 | * We can check the _initialized instance variable before having ever set |
1461 | * it to an initial value because I/O Kit guarantees that all our instance |
1462 | * variables are zeroed on an object's allocation. |
1463 | */ |
1464 | |
1465 | if (_initialized) { |
1466 | /* |
1467 | * An existing memory descriptor is being retargeted to point to |
1468 | * somewhere else. Clean up our present state. |
1469 | */ |
1470 | IOOptionBits type = _flags & kIOMemoryTypeMask; |
1471 | if ((kIOMemoryTypePhysical != type) && (kIOMemoryTypePhysical64 != type)) |
1472 | { |
1473 | while (_wireCount) |
1474 | complete(); |
1475 | } |
1476 | if (_ranges.v && !(kIOMemoryAsReference & _flags)) |
1477 | { |
1478 | if (kIOMemoryTypeUIO == type) |
1479 | uio_free((uio_t) _ranges.v); |
1480 | #ifndef __LP64__ |
1481 | else if ((kIOMemoryTypeVirtual64 == type) || (kIOMemoryTypePhysical64 == type)) |
1482 | IODelete(_ranges.v64, IOAddressRange, _rangesCount); |
1483 | #endif /* !__LP64__ */ |
1484 | else |
1485 | IODelete(_ranges.v, IOVirtualRange, _rangesCount); |
1486 | } |
1487 | |
1488 | options |= (kIOMemoryRedirected & _flags); |
1489 | if (!(kIOMemoryRedirected & options)) |
1490 | { |
1491 | if (_memRef) |
1492 | { |
1493 | memoryReferenceRelease(_memRef); |
1494 | _memRef = 0; |
1495 | } |
1496 | if (_mappings) |
1497 | _mappings->flushCollection(); |
1498 | } |
1499 | } |
1500 | else { |
1501 | if (!super::init()) |
1502 | return false; |
1503 | _initialized = true; |
1504 | } |
1505 | |
1506 | // Grab the appropriate mapper |
1507 | if (kIOMemoryHostOrRemote & options) options |= kIOMemoryMapperNone; |
1508 | if (kIOMemoryMapperNone & options) |
1509 | mapper = 0; // No Mapper |
1510 | else if (mapper == kIOMapperSystem) { |
1511 | IOMapper::checkForSystemMapper(); |
1512 | gIOSystemMapper = mapper = IOMapper::gSystem; |
1513 | } |
1514 | |
1515 | // Remove the dynamic internal use flags from the initial setting |
1516 | options &= ~(kIOMemoryPreparedReadOnly); |
1517 | _flags = options; |
1518 | _task = task; |
1519 | |
1520 | #ifndef __LP64__ |
1521 | _direction = (IODirection) (_flags & kIOMemoryDirectionMask); |
1522 | #endif /* !__LP64__ */ |
1523 | |
1524 | _dmaReferences = 0; |
1525 | __iomd_reservedA = 0; |
1526 | __iomd_reservedB = 0; |
1527 | _highestPage = 0; |
1528 | |
1529 | if (kIOMemoryThreadSafe & options) |
1530 | { |
1531 | if (!_prepareLock) |
1532 | _prepareLock = IOLockAlloc(); |
1533 | } |
1534 | else if (_prepareLock) |
1535 | { |
1536 | IOLockFree(_prepareLock); |
1537 | _prepareLock = NULL; |
1538 | } |
1539 | |
1540 | if (kIOMemoryTypeUPL == type) { |
1541 | |
1542 | ioGMDData *dataP; |
1543 | unsigned int dataSize = computeDataSize(/* pages */ 0, /* upls */ 1); |
1544 | |
1545 | if (!initMemoryEntries(dataSize, mapper)) return (false); |
1546 | dataP = getDataP(_memoryEntries); |
1547 | dataP->fPageCnt = 0; |
1548 | switch (kIOMemoryDirectionMask & options) |
1549 | { |
1550 | case kIODirectionOut: |
1551 | dataP->fDMAAccess = kIODMAMapReadAccess; |
1552 | break; |
1553 | case kIODirectionIn: |
1554 | dataP->fDMAAccess = kIODMAMapWriteAccess; |
1555 | break; |
1556 | case kIODirectionNone: |
1557 | case kIODirectionOutIn: |
1558 | default: |
1559 | panic("bad dir for upl 0x%x\n" , (int) options); |
1560 | break; |
1561 | } |
1562 | // _wireCount++; // UPLs start out life wired |
1563 | |
1564 | _length = count; |
1565 | _pages += atop_32(offset + count + PAGE_MASK) - atop_32(offset); |
1566 | |
1567 | ioPLBlock iopl; |
1568 | iopl.fIOPL = (upl_t) buffers; |
1569 | upl_set_referenced(iopl.fIOPL, true); |
1570 | upl_page_info_t *pageList = UPL_GET_INTERNAL_PAGE_LIST(iopl.fIOPL); |
1571 | |
1572 | if (upl_get_size(iopl.fIOPL) < (count + offset)) |
1573 | panic("short external upl" ); |
1574 | |
1575 | _highestPage = upl_get_highest_page(iopl.fIOPL); |
1576 | |
1577 | // Set the flag kIOPLOnDevice convieniently equal to 1 |
1578 | iopl.fFlags = pageList->device | kIOPLExternUPL; |
1579 | if (!pageList->device) { |
1580 | // Pre-compute the offset into the UPL's page list |
1581 | pageList = &pageList[atop_32(offset)]; |
1582 | offset &= PAGE_MASK; |
1583 | } |
1584 | iopl.fIOMDOffset = 0; |
1585 | iopl.fMappedPage = 0; |
1586 | iopl.fPageInfo = (vm_address_t) pageList; |
1587 | iopl.fPageOffset = offset; |
1588 | _memoryEntries->appendBytes(&iopl, sizeof(iopl)); |
1589 | } |
1590 | else { |
1591 | // kIOMemoryTypeVirtual | kIOMemoryTypeVirtual64 | kIOMemoryTypeUIO |
1592 | // kIOMemoryTypePhysical | kIOMemoryTypePhysical64 |
1593 | |
1594 | // Initialize the memory descriptor |
1595 | if (options & kIOMemoryAsReference) { |
1596 | #ifndef __LP64__ |
1597 | _rangesIsAllocated = false; |
1598 | #endif /* !__LP64__ */ |
1599 | |
1600 | // Hack assignment to get the buffer arg into _ranges. |
1601 | // I'd prefer to do _ranges = (Ranges) buffers, but that doesn't |
1602 | // work, C++ sigh. |
1603 | // This also initialises the uio & physical ranges. |
1604 | _ranges.v = (IOVirtualRange *) buffers; |
1605 | } |
1606 | else { |
1607 | #ifndef __LP64__ |
1608 | _rangesIsAllocated = true; |
1609 | #endif /* !__LP64__ */ |
1610 | switch (type) |
1611 | { |
1612 | case kIOMemoryTypeUIO: |
1613 | _ranges.v = (IOVirtualRange *) uio_duplicate((uio_t) buffers); |
1614 | break; |
1615 | |
1616 | #ifndef __LP64__ |
1617 | case kIOMemoryTypeVirtual64: |
1618 | case kIOMemoryTypePhysical64: |
1619 | if (count == 1 |
1620 | #ifndef __arm__ |
1621 | && (((IOAddressRange *) buffers)->address + ((IOAddressRange *) buffers)->length) <= 0x100000000ULL |
1622 | #endif |
1623 | ) { |
1624 | if (kIOMemoryTypeVirtual64 == type) |
1625 | type = kIOMemoryTypeVirtual; |
1626 | else |
1627 | type = kIOMemoryTypePhysical; |
1628 | _flags = (_flags & ~kIOMemoryTypeMask) | type | kIOMemoryAsReference; |
1629 | _rangesIsAllocated = false; |
1630 | _ranges.v = &_singleRange.v; |
1631 | _singleRange.v.address = ((IOAddressRange *) buffers)->address; |
1632 | _singleRange.v.length = ((IOAddressRange *) buffers)->length; |
1633 | break; |
1634 | } |
1635 | _ranges.v64 = IONew(IOAddressRange, count); |
1636 | if (!_ranges.v64) |
1637 | return false; |
1638 | bcopy(buffers, _ranges.v, count * sizeof(IOAddressRange)); |
1639 | break; |
1640 | #endif /* !__LP64__ */ |
1641 | case kIOMemoryTypeVirtual: |
1642 | case kIOMemoryTypePhysical: |
1643 | if (count == 1) { |
1644 | _flags |= kIOMemoryAsReference; |
1645 | #ifndef __LP64__ |
1646 | _rangesIsAllocated = false; |
1647 | #endif /* !__LP64__ */ |
1648 | _ranges.v = &_singleRange.v; |
1649 | } else { |
1650 | _ranges.v = IONew(IOVirtualRange, count); |
1651 | if (!_ranges.v) |
1652 | return false; |
1653 | } |
1654 | bcopy(buffers, _ranges.v, count * sizeof(IOVirtualRange)); |
1655 | break; |
1656 | } |
1657 | } |
1658 | _rangesCount = count; |
1659 | |
1660 | // Find starting address within the vector of ranges |
1661 | Ranges vec = _ranges; |
1662 | mach_vm_size_t totalLength = 0; |
1663 | unsigned int ind, pages = 0; |
1664 | for (ind = 0; ind < count; ind++) { |
1665 | mach_vm_address_t addr; |
1666 | mach_vm_address_t endAddr; |
1667 | mach_vm_size_t len; |
1668 | |
1669 | // addr & len are returned by this function |
1670 | getAddrLenForInd(addr, len, type, vec, ind); |
1671 | if (os_add3_overflow(addr, len, PAGE_MASK, &endAddr)) break; |
1672 | if (os_add_overflow(pages, (atop_64(endAddr) - atop_64(addr)), &pages)) break; |
1673 | if (os_add_overflow(totalLength, len, &totalLength)) break; |
1674 | if ((kIOMemoryTypePhysical == type) || (kIOMemoryTypePhysical64 == type)) |
1675 | { |
1676 | ppnum_t highPage = atop_64(addr + len - 1); |
1677 | if (highPage > _highestPage) |
1678 | _highestPage = highPage; |
1679 | } |
1680 | } |
1681 | if ((ind < count) |
1682 | || (totalLength != ((IOByteCount) totalLength))) return (false); /* overflow */ |
1683 | |
1684 | _length = totalLength; |
1685 | _pages = pages; |
1686 | |
1687 | // Auto-prepare memory at creation time. |
1688 | // Implied completion when descriptor is free-ed |
1689 | |
1690 | |
1691 | if ((kIOMemoryTypePhysical == type) || (kIOMemoryTypePhysical64 == type)) |
1692 | _wireCount++; // Physical MDs are, by definition, wired |
1693 | else { /* kIOMemoryTypeVirtual | kIOMemoryTypeVirtual64 | kIOMemoryTypeUIO */ |
1694 | ioGMDData *dataP; |
1695 | unsigned dataSize; |
1696 | |
1697 | if (_pages > atop_64(max_mem)) return false; |
1698 | |
1699 | dataSize = computeDataSize(_pages, /* upls */ count * 2); |
1700 | if (!initMemoryEntries(dataSize, mapper)) return false; |
1701 | dataP = getDataP(_memoryEntries); |
1702 | dataP->fPageCnt = _pages; |
1703 | |
1704 | if (((_task != kernel_task) || (kIOMemoryBufferPageable & _flags)) |
1705 | && (VM_KERN_MEMORY_NONE == _kernelTag)) |
1706 | { |
1707 | _kernelTag = IOMemoryTag(kernel_map); |
1708 | if (_kernelTag == gIOSurfaceTag) _userTag = VM_MEMORY_IOSURFACE; |
1709 | } |
1710 | |
1711 | if ( (kIOMemoryPersistent & _flags) && !_memRef) |
1712 | { |
1713 | IOReturn |
1714 | err = memoryReferenceCreate(0, &_memRef); |
1715 | if (kIOReturnSuccess != err) return false; |
1716 | } |
1717 | |
1718 | if ((_flags & kIOMemoryAutoPrepare) |
1719 | && prepare() != kIOReturnSuccess) |
1720 | return false; |
1721 | } |
1722 | } |
1723 | |
1724 | return true; |
1725 | } |
1726 | |
1727 | /* |
1728 | * free |
1729 | * |
1730 | * Free resources. |
1731 | */ |
1732 | void IOGeneralMemoryDescriptor::free() |
1733 | { |
1734 | IOOptionBits type = _flags & kIOMemoryTypeMask; |
1735 | |
1736 | if( reserved) |
1737 | { |
1738 | LOCK; |
1739 | reserved->dp.memory = 0; |
1740 | UNLOCK; |
1741 | } |
1742 | if ((kIOMemoryTypePhysical == type) || (kIOMemoryTypePhysical64 == type)) |
1743 | { |
1744 | ioGMDData * dataP; |
1745 | if (_memoryEntries && (dataP = getDataP(_memoryEntries)) && dataP->fMappedBaseValid) |
1746 | { |
1747 | dmaUnmap(dataP->fMapper, NULL, 0, dataP->fMappedBase, dataP->fMappedLength); |
1748 | dataP->fMappedBaseValid = dataP->fMappedBase = 0; |
1749 | } |
1750 | } |
1751 | else |
1752 | { |
1753 | while (_wireCount) complete(); |
1754 | } |
1755 | |
1756 | if (_memoryEntries) _memoryEntries->release(); |
1757 | |
1758 | if (_ranges.v && !(kIOMemoryAsReference & _flags)) |
1759 | { |
1760 | if (kIOMemoryTypeUIO == type) |
1761 | uio_free((uio_t) _ranges.v); |
1762 | #ifndef __LP64__ |
1763 | else if ((kIOMemoryTypeVirtual64 == type) || (kIOMemoryTypePhysical64 == type)) |
1764 | IODelete(_ranges.v64, IOAddressRange, _rangesCount); |
1765 | #endif /* !__LP64__ */ |
1766 | else |
1767 | IODelete(_ranges.v, IOVirtualRange, _rangesCount); |
1768 | |
1769 | _ranges.v = NULL; |
1770 | } |
1771 | |
1772 | if (reserved) |
1773 | { |
1774 | if (reserved->dp.devicePager) |
1775 | { |
1776 | // memEntry holds a ref on the device pager which owns reserved |
1777 | // (IOMemoryDescriptorReserved) so no reserved access after this point |
1778 | device_pager_deallocate( (memory_object_t) reserved->dp.devicePager ); |
1779 | } |
1780 | else |
1781 | IODelete(reserved, IOMemoryDescriptorReserved, 1); |
1782 | reserved = NULL; |
1783 | } |
1784 | |
1785 | if (_memRef) memoryReferenceRelease(_memRef); |
1786 | if (_prepareLock) IOLockFree(_prepareLock); |
1787 | |
1788 | super::free(); |
1789 | } |
1790 | |
1791 | #ifndef __LP64__ |
1792 | void IOGeneralMemoryDescriptor::unmapFromKernel() |
1793 | { |
1794 | panic("IOGMD::unmapFromKernel deprecated" ); |
1795 | } |
1796 | |
1797 | void IOGeneralMemoryDescriptor::mapIntoKernel(unsigned rangeIndex) |
1798 | { |
1799 | panic("IOGMD::mapIntoKernel deprecated" ); |
1800 | } |
1801 | #endif /* !__LP64__ */ |
1802 | |
1803 | /* |
1804 | * getDirection: |
1805 | * |
1806 | * Get the direction of the transfer. |
1807 | */ |
1808 | IODirection IOMemoryDescriptor::getDirection() const |
1809 | { |
1810 | #ifndef __LP64__ |
1811 | if (_direction) |
1812 | return _direction; |
1813 | #endif /* !__LP64__ */ |
1814 | return (IODirection) (_flags & kIOMemoryDirectionMask); |
1815 | } |
1816 | |
1817 | /* |
1818 | * getLength: |
1819 | * |
1820 | * Get the length of the transfer (over all ranges). |
1821 | */ |
1822 | IOByteCount IOMemoryDescriptor::getLength() const |
1823 | { |
1824 | return _length; |
1825 | } |
1826 | |
1827 | void IOMemoryDescriptor::setTag( IOOptionBits tag ) |
1828 | { |
1829 | _tag = tag; |
1830 | } |
1831 | |
1832 | IOOptionBits IOMemoryDescriptor::getTag( void ) |
1833 | { |
1834 | return( _tag); |
1835 | } |
1836 | |
1837 | uint64_t IOMemoryDescriptor::getFlags(void) |
1838 | { |
1839 | return (_flags); |
1840 | } |
1841 | |
1842 | #ifndef __LP64__ |
1843 | #pragma clang diagnostic push |
1844 | #pragma clang diagnostic ignored "-Wdeprecated-declarations" |
1845 | |
1846 | // @@@ gvdl: who is using this API? Seems like a wierd thing to implement. |
1847 | IOPhysicalAddress |
1848 | IOMemoryDescriptor::getSourceSegment( IOByteCount offset, IOByteCount * length ) |
1849 | { |
1850 | addr64_t physAddr = 0; |
1851 | |
1852 | if( prepare() == kIOReturnSuccess) { |
1853 | physAddr = getPhysicalSegment64( offset, length ); |
1854 | complete(); |
1855 | } |
1856 | |
1857 | return( (IOPhysicalAddress) physAddr ); // truncated but only page offset is used |
1858 | } |
1859 | |
1860 | #pragma clang diagnostic pop |
1861 | |
1862 | #endif /* !__LP64__ */ |
1863 | |
1864 | IOByteCount IOMemoryDescriptor::readBytes |
1865 | (IOByteCount offset, void *bytes, IOByteCount length) |
1866 | { |
1867 | addr64_t dstAddr = CAST_DOWN(addr64_t, bytes); |
1868 | IOByteCount remaining; |
1869 | |
1870 | // Assert that this entire I/O is withing the available range |
1871 | assert(offset <= _length); |
1872 | assert(offset + length <= _length); |
1873 | if ((offset >= _length) |
1874 | || ((offset + length) > _length)) { |
1875 | return 0; |
1876 | } |
1877 | |
1878 | assert (!(kIOMemoryRemote & _flags)); |
1879 | if (kIOMemoryRemote & _flags) return (0); |
1880 | |
1881 | if (kIOMemoryThreadSafe & _flags) |
1882 | LOCK; |
1883 | |
1884 | remaining = length = min(length, _length - offset); |
1885 | while (remaining) { // (process another target segment?) |
1886 | addr64_t srcAddr64; |
1887 | IOByteCount srcLen; |
1888 | |
1889 | srcAddr64 = getPhysicalSegment(offset, &srcLen, kIOMemoryMapperNone); |
1890 | if (!srcAddr64) |
1891 | break; |
1892 | |
1893 | // Clip segment length to remaining |
1894 | if (srcLen > remaining) |
1895 | srcLen = remaining; |
1896 | |
1897 | copypv(srcAddr64, dstAddr, srcLen, |
1898 | cppvPsrc | cppvNoRefSrc | cppvFsnk | cppvKmap); |
1899 | |
1900 | dstAddr += srcLen; |
1901 | offset += srcLen; |
1902 | remaining -= srcLen; |
1903 | } |
1904 | |
1905 | if (kIOMemoryThreadSafe & _flags) |
1906 | UNLOCK; |
1907 | |
1908 | assert(!remaining); |
1909 | |
1910 | return length - remaining; |
1911 | } |
1912 | |
1913 | IOByteCount IOMemoryDescriptor::writeBytes |
1914 | (IOByteCount inoffset, const void *bytes, IOByteCount length) |
1915 | { |
1916 | addr64_t srcAddr = CAST_DOWN(addr64_t, bytes); |
1917 | IOByteCount remaining; |
1918 | IOByteCount offset = inoffset; |
1919 | |
1920 | // Assert that this entire I/O is withing the available range |
1921 | assert(offset <= _length); |
1922 | assert(offset + length <= _length); |
1923 | |
1924 | assert( !(kIOMemoryPreparedReadOnly & _flags) ); |
1925 | |
1926 | if ( (kIOMemoryPreparedReadOnly & _flags) |
1927 | || (offset >= _length) |
1928 | || ((offset + length) > _length)) { |
1929 | return 0; |
1930 | } |
1931 | |
1932 | assert (!(kIOMemoryRemote & _flags)); |
1933 | if (kIOMemoryRemote & _flags) return (0); |
1934 | |
1935 | if (kIOMemoryThreadSafe & _flags) |
1936 | LOCK; |
1937 | |
1938 | remaining = length = min(length, _length - offset); |
1939 | while (remaining) { // (process another target segment?) |
1940 | addr64_t dstAddr64; |
1941 | IOByteCount dstLen; |
1942 | |
1943 | dstAddr64 = getPhysicalSegment(offset, &dstLen, kIOMemoryMapperNone); |
1944 | if (!dstAddr64) |
1945 | break; |
1946 | |
1947 | // Clip segment length to remaining |
1948 | if (dstLen > remaining) |
1949 | dstLen = remaining; |
1950 | |
1951 | if (!srcAddr) bzero_phys(dstAddr64, dstLen); |
1952 | else |
1953 | { |
1954 | copypv(srcAddr, (addr64_t) dstAddr64, dstLen, |
1955 | cppvPsnk | cppvFsnk | cppvNoRefSrc | cppvNoModSnk | cppvKmap); |
1956 | srcAddr += dstLen; |
1957 | } |
1958 | offset += dstLen; |
1959 | remaining -= dstLen; |
1960 | } |
1961 | |
1962 | if (kIOMemoryThreadSafe & _flags) |
1963 | UNLOCK; |
1964 | |
1965 | assert(!remaining); |
1966 | |
1967 | #if defined(__x86_64__) |
1968 | // copypv does not cppvFsnk on intel |
1969 | #else |
1970 | if (!srcAddr) performOperation(kIOMemoryIncoherentIOFlush, inoffset, length); |
1971 | #endif |
1972 | |
1973 | return length - remaining; |
1974 | } |
1975 | |
1976 | #ifndef __LP64__ |
1977 | void IOGeneralMemoryDescriptor::setPosition(IOByteCount position) |
1978 | { |
1979 | panic("IOGMD::setPosition deprecated" ); |
1980 | } |
1981 | #endif /* !__LP64__ */ |
1982 | |
1983 | static volatile SInt64 gIOMDPreparationID __attribute__((aligned(8))) = (1ULL << 32); |
1984 | |
1985 | uint64_t |
1986 | IOGeneralMemoryDescriptor::getPreparationID( void ) |
1987 | { |
1988 | ioGMDData *dataP; |
1989 | |
1990 | if (!_wireCount) |
1991 | return (kIOPreparationIDUnprepared); |
1992 | |
1993 | if (((kIOMemoryTypeMask & _flags) == kIOMemoryTypePhysical) |
1994 | || ((kIOMemoryTypeMask & _flags) == kIOMemoryTypePhysical64)) |
1995 | { |
1996 | IOMemoryDescriptor::setPreparationID(); |
1997 | return (IOMemoryDescriptor::getPreparationID()); |
1998 | } |
1999 | |
2000 | if (!_memoryEntries || !(dataP = getDataP(_memoryEntries))) |
2001 | return (kIOPreparationIDUnprepared); |
2002 | |
2003 | if (kIOPreparationIDUnprepared == dataP->fPreparationID) |
2004 | { |
2005 | dataP->fPreparationID = OSIncrementAtomic64(&gIOMDPreparationID); |
2006 | } |
2007 | return (dataP->fPreparationID); |
2008 | } |
2009 | |
2010 | IOMemoryDescriptorReserved * IOMemoryDescriptor::getKernelReserved( void ) |
2011 | { |
2012 | if (!reserved) |
2013 | { |
2014 | reserved = IONew(IOMemoryDescriptorReserved, 1); |
2015 | if (reserved) |
2016 | bzero(reserved, sizeof(IOMemoryDescriptorReserved)); |
2017 | } |
2018 | return (reserved); |
2019 | } |
2020 | |
2021 | void IOMemoryDescriptor::setPreparationID( void ) |
2022 | { |
2023 | if (getKernelReserved() && (kIOPreparationIDUnprepared == reserved->preparationID)) |
2024 | { |
2025 | reserved->preparationID = OSIncrementAtomic64(&gIOMDPreparationID); |
2026 | } |
2027 | } |
2028 | |
2029 | uint64_t IOMemoryDescriptor::getPreparationID( void ) |
2030 | { |
2031 | if (reserved) |
2032 | return (reserved->preparationID); |
2033 | else |
2034 | return (kIOPreparationIDUnsupported); |
2035 | } |
2036 | |
2037 | void IOMemoryDescriptor::setVMTags(vm_tag_t kernelTag, vm_tag_t userTag) |
2038 | { |
2039 | _kernelTag = kernelTag; |
2040 | _userTag = userTag; |
2041 | } |
2042 | |
2043 | vm_tag_t IOMemoryDescriptor::getVMTag(vm_map_t map) |
2044 | { |
2045 | if (vm_kernel_map_is_kernel(map)) |
2046 | { |
2047 | if (VM_KERN_MEMORY_NONE != _kernelTag) return (_kernelTag); |
2048 | } |
2049 | else |
2050 | { |
2051 | if (VM_KERN_MEMORY_NONE != _userTag) return (_userTag); |
2052 | } |
2053 | return (IOMemoryTag(map)); |
2054 | } |
2055 | |
2056 | IOReturn IOGeneralMemoryDescriptor::dmaCommandOperation(DMACommandOps op, void *vData, UInt dataSize) const |
2057 | { |
2058 | IOReturn err = kIOReturnSuccess; |
2059 | DMACommandOps params; |
2060 | IOGeneralMemoryDescriptor * md = const_cast<IOGeneralMemoryDescriptor *>(this); |
2061 | ioGMDData *dataP; |
2062 | |
2063 | params = (op & ~kIOMDDMACommandOperationMask & op); |
2064 | op &= kIOMDDMACommandOperationMask; |
2065 | |
2066 | if (kIOMDDMAMap == op) |
2067 | { |
2068 | if (dataSize < sizeof(IOMDDMAMapArgs)) |
2069 | return kIOReturnUnderrun; |
2070 | |
2071 | IOMDDMAMapArgs * data = (IOMDDMAMapArgs *) vData; |
2072 | |
2073 | if (!_memoryEntries |
2074 | && !md->initMemoryEntries(computeDataSize(0, 0), kIOMapperWaitSystem)) return (kIOReturnNoMemory); |
2075 | |
2076 | if (_memoryEntries && data->fMapper) |
2077 | { |
2078 | bool remap, keepMap; |
2079 | dataP = getDataP(_memoryEntries); |
2080 | |
2081 | if (data->fMapSpec.numAddressBits < dataP->fDMAMapNumAddressBits) dataP->fDMAMapNumAddressBits = data->fMapSpec.numAddressBits; |
2082 | if (data->fMapSpec.alignment > dataP->fDMAMapAlignment) dataP->fDMAMapAlignment = data->fMapSpec.alignment; |
2083 | |
2084 | keepMap = (data->fMapper == gIOSystemMapper); |
2085 | keepMap &= ((data->fOffset == 0) && (data->fLength == _length)); |
2086 | |
2087 | if ((data->fMapper == gIOSystemMapper) && _prepareLock) IOLockLock(_prepareLock); |
2088 | |
2089 | remap = (!keepMap); |
2090 | remap |= (dataP->fDMAMapNumAddressBits < 64) |
2091 | && ((dataP->fMappedBase + _length) > (1ULL << dataP->fDMAMapNumAddressBits)); |
2092 | remap |= (dataP->fDMAMapAlignment > page_size); |
2093 | |
2094 | if (remap || !dataP->fMappedBaseValid) |
2095 | { |
2096 | // if (dataP->fMappedBaseValid) OSReportWithBacktrace("kIOMDDMAMap whole %d remap %d params %d\n", whole, remap, params); |
2097 | err = md->dmaMap(data->fMapper, data->fCommand, &data->fMapSpec, data->fOffset, data->fLength, &data->fAlloc, &data->fAllocLength); |
2098 | if (keepMap && (kIOReturnSuccess == err) && !dataP->fMappedBaseValid) |
2099 | { |
2100 | dataP->fMappedBase = data->fAlloc; |
2101 | dataP->fMappedBaseValid = true; |
2102 | dataP->fMappedLength = data->fAllocLength; |
2103 | data->fAllocLength = 0; // IOMD owns the alloc now |
2104 | } |
2105 | } |
2106 | else |
2107 | { |
2108 | data->fAlloc = dataP->fMappedBase; |
2109 | data->fAllocLength = 0; // give out IOMD map |
2110 | md->dmaMapRecord(data->fMapper, data->fCommand, dataP->fMappedLength); |
2111 | } |
2112 | data->fMapContig = !dataP->fDiscontig; |
2113 | |
2114 | if ((data->fMapper == gIOSystemMapper) && _prepareLock) IOLockUnlock(_prepareLock); |
2115 | } |
2116 | return (err); |
2117 | } |
2118 | if (kIOMDDMAUnmap == op) |
2119 | { |
2120 | if (dataSize < sizeof(IOMDDMAMapArgs)) |
2121 | return kIOReturnUnderrun; |
2122 | IOMDDMAMapArgs * data = (IOMDDMAMapArgs *) vData; |
2123 | |
2124 | err = md->dmaUnmap(data->fMapper, data->fCommand, data->fOffset, data->fAlloc, data->fAllocLength); |
2125 | |
2126 | return kIOReturnSuccess; |
2127 | } |
2128 | |
2129 | if (kIOMDAddDMAMapSpec == op) |
2130 | { |
2131 | if (dataSize < sizeof(IODMAMapSpecification)) |
2132 | return kIOReturnUnderrun; |
2133 | |
2134 | IODMAMapSpecification * data = (IODMAMapSpecification *) vData; |
2135 | |
2136 | if (!_memoryEntries |
2137 | && !md->initMemoryEntries(computeDataSize(0, 0), kIOMapperWaitSystem)) return (kIOReturnNoMemory); |
2138 | |
2139 | if (_memoryEntries) |
2140 | { |
2141 | dataP = getDataP(_memoryEntries); |
2142 | if (data->numAddressBits < dataP->fDMAMapNumAddressBits) |
2143 | dataP->fDMAMapNumAddressBits = data->numAddressBits; |
2144 | if (data->alignment > dataP->fDMAMapAlignment) |
2145 | dataP->fDMAMapAlignment = data->alignment; |
2146 | } |
2147 | return kIOReturnSuccess; |
2148 | } |
2149 | |
2150 | if (kIOMDGetCharacteristics == op) { |
2151 | |
2152 | if (dataSize < sizeof(IOMDDMACharacteristics)) |
2153 | return kIOReturnUnderrun; |
2154 | |
2155 | IOMDDMACharacteristics *data = (IOMDDMACharacteristics *) vData; |
2156 | data->fLength = _length; |
2157 | data->fSGCount = _rangesCount; |
2158 | data->fPages = _pages; |
2159 | data->fDirection = getDirection(); |
2160 | if (!_wireCount) |
2161 | data->fIsPrepared = false; |
2162 | else { |
2163 | data->fIsPrepared = true; |
2164 | data->fHighestPage = _highestPage; |
2165 | if (_memoryEntries) |
2166 | { |
2167 | dataP = getDataP(_memoryEntries); |
2168 | ioPLBlock *ioplList = getIOPLList(dataP); |
2169 | UInt count = getNumIOPL(_memoryEntries, dataP); |
2170 | if (count == 1) |
2171 | data->fPageAlign = (ioplList[0].fPageOffset & PAGE_MASK) | ~PAGE_MASK; |
2172 | } |
2173 | } |
2174 | |
2175 | return kIOReturnSuccess; |
2176 | } |
2177 | |
2178 | else if (kIOMDDMAActive == op) |
2179 | { |
2180 | if (params) |
2181 | { |
2182 | int16_t prior; |
2183 | prior = OSAddAtomic16(1, &md->_dmaReferences); |
2184 | if (!prior) md->_mapName = NULL; |
2185 | } |
2186 | else |
2187 | { |
2188 | if (md->_dmaReferences) OSAddAtomic16(-1, &md->_dmaReferences); |
2189 | else panic("_dmaReferences underflow" ); |
2190 | } |
2191 | } |
2192 | else if (kIOMDWalkSegments != op) |
2193 | return kIOReturnBadArgument; |
2194 | |
2195 | // Get the next segment |
2196 | struct InternalState { |
2197 | IOMDDMAWalkSegmentArgs fIO; |
2198 | UInt fOffset2Index; |
2199 | UInt fIndex; |
2200 | UInt fNextOffset; |
2201 | } *isP; |
2202 | |
2203 | // Find the next segment |
2204 | if (dataSize < sizeof(*isP)) |
2205 | return kIOReturnUnderrun; |
2206 | |
2207 | isP = (InternalState *) vData; |
2208 | UInt offset = isP->fIO.fOffset; |
2209 | uint8_t mapped = isP->fIO.fMapped; |
2210 | uint64_t mappedBase; |
2211 | |
2212 | if (mapped && (kIOMemoryRemote & _flags)) return (kIOReturnNotAttached); |
2213 | |
2214 | if (IOMapper::gSystem && mapped |
2215 | && (!(kIOMemoryHostOnly & _flags)) |
2216 | && (!_memoryEntries || !getDataP(_memoryEntries)->fMappedBaseValid)) |
2217 | // && (_memoryEntries && !getDataP(_memoryEntries)->fMappedBaseValid)) |
2218 | { |
2219 | if (!_memoryEntries |
2220 | && !md->initMemoryEntries(computeDataSize(0, 0), kIOMapperWaitSystem)) return (kIOReturnNoMemory); |
2221 | |
2222 | dataP = getDataP(_memoryEntries); |
2223 | if (dataP->fMapper) |
2224 | { |
2225 | IODMAMapSpecification mapSpec; |
2226 | bzero(&mapSpec, sizeof(mapSpec)); |
2227 | mapSpec.numAddressBits = dataP->fDMAMapNumAddressBits; |
2228 | mapSpec.alignment = dataP->fDMAMapAlignment; |
2229 | err = md->dmaMap(dataP->fMapper, NULL, &mapSpec, 0, _length, &dataP->fMappedBase, &dataP->fMappedLength); |
2230 | if (kIOReturnSuccess != err) return (err); |
2231 | dataP->fMappedBaseValid = true; |
2232 | } |
2233 | } |
2234 | |
2235 | if (kIOMDDMAWalkMappedLocal == mapped) mappedBase = isP->fIO.fMappedBase; |
2236 | else if (mapped) |
2237 | { |
2238 | if (IOMapper::gSystem |
2239 | && (!(kIOMemoryHostOnly & _flags)) |
2240 | && _memoryEntries |
2241 | && (dataP = getDataP(_memoryEntries)) |
2242 | && dataP->fMappedBaseValid) |
2243 | { |
2244 | mappedBase = dataP->fMappedBase; |
2245 | } |
2246 | else mapped = 0; |
2247 | } |
2248 | |
2249 | if (offset >= _length) |
2250 | return (offset == _length)? kIOReturnOverrun : kIOReturnInternalError; |
2251 | |
2252 | // Validate the previous offset |
2253 | UInt ind, off2Ind = isP->fOffset2Index; |
2254 | if (!params |
2255 | && offset |
2256 | && (offset == isP->fNextOffset || off2Ind <= offset)) |
2257 | ind = isP->fIndex; |
2258 | else |
2259 | ind = off2Ind = 0; // Start from beginning |
2260 | |
2261 | UInt length; |
2262 | UInt64 address; |
2263 | |
2264 | if ( (_flags & kIOMemoryTypeMask) == kIOMemoryTypePhysical) { |
2265 | |
2266 | // Physical address based memory descriptor |
2267 | const IOPhysicalRange *physP = (IOPhysicalRange *) &_ranges.p[0]; |
2268 | |
2269 | // Find the range after the one that contains the offset |
2270 | mach_vm_size_t len; |
2271 | for (len = 0; off2Ind <= offset; ind++) { |
2272 | len = physP[ind].length; |
2273 | off2Ind += len; |
2274 | } |
2275 | |
2276 | // Calculate length within range and starting address |
2277 | length = off2Ind - offset; |
2278 | address = physP[ind - 1].address + len - length; |
2279 | |
2280 | if (true && mapped) |
2281 | { |
2282 | address = mappedBase + offset; |
2283 | } |
2284 | else |
2285 | { |
2286 | // see how far we can coalesce ranges |
2287 | while (ind < _rangesCount && address + length == physP[ind].address) { |
2288 | len = physP[ind].length; |
2289 | length += len; |
2290 | off2Ind += len; |
2291 | ind++; |
2292 | } |
2293 | } |
2294 | |
2295 | // correct contiguous check overshoot |
2296 | ind--; |
2297 | off2Ind -= len; |
2298 | } |
2299 | #ifndef __LP64__ |
2300 | else if ( (_flags & kIOMemoryTypeMask) == kIOMemoryTypePhysical64) { |
2301 | |
2302 | // Physical address based memory descriptor |
2303 | const IOAddressRange *physP = (IOAddressRange *) &_ranges.v64[0]; |
2304 | |
2305 | // Find the range after the one that contains the offset |
2306 | mach_vm_size_t len; |
2307 | for (len = 0; off2Ind <= offset; ind++) { |
2308 | len = physP[ind].length; |
2309 | off2Ind += len; |
2310 | } |
2311 | |
2312 | // Calculate length within range and starting address |
2313 | length = off2Ind - offset; |
2314 | address = physP[ind - 1].address + len - length; |
2315 | |
2316 | if (true && mapped) |
2317 | { |
2318 | address = mappedBase + offset; |
2319 | } |
2320 | else |
2321 | { |
2322 | // see how far we can coalesce ranges |
2323 | while (ind < _rangesCount && address + length == physP[ind].address) { |
2324 | len = physP[ind].length; |
2325 | length += len; |
2326 | off2Ind += len; |
2327 | ind++; |
2328 | } |
2329 | } |
2330 | // correct contiguous check overshoot |
2331 | ind--; |
2332 | off2Ind -= len; |
2333 | } |
2334 | #endif /* !__LP64__ */ |
2335 | else do { |
2336 | if (!_wireCount) |
2337 | panic("IOGMD: not wired for the IODMACommand" ); |
2338 | |
2339 | assert(_memoryEntries); |
2340 | |
2341 | dataP = getDataP(_memoryEntries); |
2342 | const ioPLBlock *ioplList = getIOPLList(dataP); |
2343 | UInt numIOPLs = getNumIOPL(_memoryEntries, dataP); |
2344 | upl_page_info_t *pageList = getPageList(dataP); |
2345 | |
2346 | assert(numIOPLs > 0); |
2347 | |
2348 | // Scan through iopl info blocks looking for block containing offset |
2349 | while (ind < numIOPLs && offset >= ioplList[ind].fIOMDOffset) |
2350 | ind++; |
2351 | |
2352 | // Go back to actual range as search goes past it |
2353 | ioPLBlock ioplInfo = ioplList[ind - 1]; |
2354 | off2Ind = ioplInfo.fIOMDOffset; |
2355 | |
2356 | if (ind < numIOPLs) |
2357 | length = ioplList[ind].fIOMDOffset; |
2358 | else |
2359 | length = _length; |
2360 | length -= offset; // Remainder within iopl |
2361 | |
2362 | // Subtract offset till this iopl in total list |
2363 | offset -= off2Ind; |
2364 | |
2365 | // If a mapped address is requested and this is a pre-mapped IOPL |
2366 | // then just need to compute an offset relative to the mapped base. |
2367 | if (mapped) { |
2368 | offset += (ioplInfo.fPageOffset & PAGE_MASK); |
2369 | address = trunc_page_64(mappedBase) + ptoa_64(ioplInfo.fMappedPage) + offset; |
2370 | continue; // Done leave do/while(false) now |
2371 | } |
2372 | |
2373 | // The offset is rebased into the current iopl. |
2374 | // Now add the iopl 1st page offset. |
2375 | offset += ioplInfo.fPageOffset; |
2376 | |
2377 | // For external UPLs the fPageInfo field points directly to |
2378 | // the upl's upl_page_info_t array. |
2379 | if (ioplInfo.fFlags & kIOPLExternUPL) |
2380 | pageList = (upl_page_info_t *) ioplInfo.fPageInfo; |
2381 | else |
2382 | pageList = &pageList[ioplInfo.fPageInfo]; |
2383 | |
2384 | // Check for direct device non-paged memory |
2385 | if ( ioplInfo.fFlags & kIOPLOnDevice ) { |
2386 | address = ptoa_64(pageList->phys_addr) + offset; |
2387 | continue; // Done leave do/while(false) now |
2388 | } |
2389 | |
2390 | // Now we need compute the index into the pageList |
2391 | UInt pageInd = atop_32(offset); |
2392 | offset &= PAGE_MASK; |
2393 | |
2394 | // Compute the starting address of this segment |
2395 | IOPhysicalAddress pageAddr = pageList[pageInd].phys_addr; |
2396 | if (!pageAddr) { |
2397 | panic("!pageList phys_addr" ); |
2398 | } |
2399 | |
2400 | address = ptoa_64(pageAddr) + offset; |
2401 | |
2402 | // length is currently set to the length of the remainider of the iopl. |
2403 | // We need to check that the remainder of the iopl is contiguous. |
2404 | // This is indicated by pageList[ind].phys_addr being sequential. |
2405 | IOByteCount contigLength = PAGE_SIZE - offset; |
2406 | while (contigLength < length |
2407 | && ++pageAddr == pageList[++pageInd].phys_addr) |
2408 | { |
2409 | contigLength += PAGE_SIZE; |
2410 | } |
2411 | |
2412 | if (contigLength < length) |
2413 | length = contigLength; |
2414 | |
2415 | |
2416 | assert(address); |
2417 | assert(length); |
2418 | |
2419 | } while (false); |
2420 | |
2421 | // Update return values and state |
2422 | isP->fIO.fIOVMAddr = address; |
2423 | isP->fIO.fLength = length; |
2424 | isP->fIndex = ind; |
2425 | isP->fOffset2Index = off2Ind; |
2426 | isP->fNextOffset = isP->fIO.fOffset + length; |
2427 | |
2428 | return kIOReturnSuccess; |
2429 | } |
2430 | |
2431 | addr64_t |
2432 | IOGeneralMemoryDescriptor::getPhysicalSegment(IOByteCount offset, IOByteCount *lengthOfSegment, IOOptionBits options) |
2433 | { |
2434 | IOReturn ret; |
2435 | mach_vm_address_t address = 0; |
2436 | mach_vm_size_t length = 0; |
2437 | IOMapper * mapper = gIOSystemMapper; |
2438 | IOOptionBits type = _flags & kIOMemoryTypeMask; |
2439 | |
2440 | if (lengthOfSegment) |
2441 | *lengthOfSegment = 0; |
2442 | |
2443 | if (offset >= _length) |
2444 | return 0; |
2445 | |
2446 | // IOMemoryDescriptor::doMap() cannot use getPhysicalSegment() to obtain the page offset, since it must |
2447 | // support the unwired memory case in IOGeneralMemoryDescriptor, and hibernate_write_image() cannot use |
2448 | // map()->getVirtualAddress() to obtain the kernel pointer, since it must prevent the memory allocation |
2449 | // due to IOMemoryMap, so _kIOMemorySourceSegment is a necessary evil until all of this gets cleaned up |
2450 | |
2451 | if ((options & _kIOMemorySourceSegment) && (kIOMemoryTypeUPL != type)) |
2452 | { |
2453 | unsigned rangesIndex = 0; |
2454 | Ranges vec = _ranges; |
2455 | mach_vm_address_t addr; |
2456 | |
2457 | // Find starting address within the vector of ranges |
2458 | for (;;) { |
2459 | getAddrLenForInd(addr, length, type, vec, rangesIndex); |
2460 | if (offset < length) |
2461 | break; |
2462 | offset -= length; // (make offset relative) |
2463 | rangesIndex++; |
2464 | } |
2465 | |
2466 | // Now that we have the starting range, |
2467 | // lets find the last contiguous range |
2468 | addr += offset; |
2469 | length -= offset; |
2470 | |
2471 | for ( ++rangesIndex; rangesIndex < _rangesCount; rangesIndex++ ) { |
2472 | mach_vm_address_t newAddr; |
2473 | mach_vm_size_t newLen; |
2474 | |
2475 | getAddrLenForInd(newAddr, newLen, type, vec, rangesIndex); |
2476 | if (addr + length != newAddr) |
2477 | break; |
2478 | length += newLen; |
2479 | } |
2480 | if (addr) |
2481 | address = (IOPhysicalAddress) addr; // Truncate address to 32bit |
2482 | } |
2483 | else |
2484 | { |
2485 | IOMDDMAWalkSegmentState _state; |
2486 | IOMDDMAWalkSegmentArgs * state = (IOMDDMAWalkSegmentArgs *) (void *)&_state; |
2487 | |
2488 | state->fOffset = offset; |
2489 | state->fLength = _length - offset; |
2490 | state->fMapped = (0 == (options & kIOMemoryMapperNone)) && !(_flags & kIOMemoryHostOrRemote); |
2491 | |
2492 | ret = dmaCommandOperation(kIOMDFirstSegment, _state, sizeof(_state)); |
2493 | |
2494 | if ((kIOReturnSuccess != ret) && (kIOReturnOverrun != ret)) |
2495 | DEBG("getPhysicalSegment dmaCommandOperation(%lx), %p, offset %qx, addr %qx, len %qx\n" , |
2496 | ret, this, state->fOffset, |
2497 | state->fIOVMAddr, state->fLength); |
2498 | if (kIOReturnSuccess == ret) |
2499 | { |
2500 | address = state->fIOVMAddr; |
2501 | length = state->fLength; |
2502 | } |
2503 | |
2504 | // dmaCommandOperation() does not distinguish between "mapped" and "unmapped" physical memory, even |
2505 | // with fMapped set correctly, so we must handle the transformation here until this gets cleaned up |
2506 | |
2507 | if (mapper && ((kIOMemoryTypePhysical == type) || (kIOMemoryTypePhysical64 == type))) |
2508 | { |
2509 | if ((options & kIOMemoryMapperNone) && !(_flags & kIOMemoryMapperNone)) |
2510 | { |
2511 | addr64_t origAddr = address; |
2512 | IOByteCount origLen = length; |
2513 | |
2514 | address = mapper->mapToPhysicalAddress(origAddr); |
2515 | length = page_size - (address & (page_size - 1)); |
2516 | while ((length < origLen) |
2517 | && ((address + length) == mapper->mapToPhysicalAddress(origAddr + length))) |
2518 | length += page_size; |
2519 | if (length > origLen) |
2520 | length = origLen; |
2521 | } |
2522 | } |
2523 | } |
2524 | |
2525 | if (!address) |
2526 | length = 0; |
2527 | |
2528 | if (lengthOfSegment) |
2529 | *lengthOfSegment = length; |
2530 | |
2531 | return (address); |
2532 | } |
2533 | |
2534 | #ifndef __LP64__ |
2535 | #pragma clang diagnostic push |
2536 | #pragma clang diagnostic ignored "-Wdeprecated-declarations" |
2537 | |
2538 | addr64_t |
2539 | IOMemoryDescriptor::getPhysicalSegment(IOByteCount offset, IOByteCount *lengthOfSegment, IOOptionBits options) |
2540 | { |
2541 | addr64_t address = 0; |
2542 | |
2543 | if (options & _kIOMemorySourceSegment) |
2544 | { |
2545 | address = getSourceSegment(offset, lengthOfSegment); |
2546 | } |
2547 | else if (options & kIOMemoryMapperNone) |
2548 | { |
2549 | address = getPhysicalSegment64(offset, lengthOfSegment); |
2550 | } |
2551 | else |
2552 | { |
2553 | address = getPhysicalSegment(offset, lengthOfSegment); |
2554 | } |
2555 | |
2556 | return (address); |
2557 | } |
2558 | #pragma clang diagnostic pop |
2559 | |
2560 | addr64_t |
2561 | IOGeneralMemoryDescriptor::getPhysicalSegment64(IOByteCount offset, IOByteCount *lengthOfSegment) |
2562 | { |
2563 | return (getPhysicalSegment(offset, lengthOfSegment, kIOMemoryMapperNone)); |
2564 | } |
2565 | |
2566 | IOPhysicalAddress |
2567 | IOGeneralMemoryDescriptor::getPhysicalSegment(IOByteCount offset, IOByteCount *lengthOfSegment) |
2568 | { |
2569 | addr64_t address = 0; |
2570 | IOByteCount length = 0; |
2571 | |
2572 | address = getPhysicalSegment(offset, lengthOfSegment, 0); |
2573 | |
2574 | if (lengthOfSegment) |
2575 | length = *lengthOfSegment; |
2576 | |
2577 | if ((address + length) > 0x100000000ULL) |
2578 | { |
2579 | panic("getPhysicalSegment() out of 32b range 0x%qx, len 0x%lx, class %s" , |
2580 | address, (long) length, (getMetaClass())->getClassName()); |
2581 | } |
2582 | |
2583 | return ((IOPhysicalAddress) address); |
2584 | } |
2585 | |
2586 | addr64_t |
2587 | IOMemoryDescriptor::getPhysicalSegment64(IOByteCount offset, IOByteCount *lengthOfSegment) |
2588 | { |
2589 | IOPhysicalAddress phys32; |
2590 | IOByteCount length; |
2591 | addr64_t phys64; |
2592 | IOMapper * mapper = 0; |
2593 | |
2594 | phys32 = getPhysicalSegment(offset, lengthOfSegment); |
2595 | if (!phys32) |
2596 | return 0; |
2597 | |
2598 | if (gIOSystemMapper) |
2599 | mapper = gIOSystemMapper; |
2600 | |
2601 | if (mapper) |
2602 | { |
2603 | IOByteCount origLen; |
2604 | |
2605 | phys64 = mapper->mapToPhysicalAddress(phys32); |
2606 | origLen = *lengthOfSegment; |
2607 | length = page_size - (phys64 & (page_size - 1)); |
2608 | while ((length < origLen) |
2609 | && ((phys64 + length) == mapper->mapToPhysicalAddress(phys32 + length))) |
2610 | length += page_size; |
2611 | if (length > origLen) |
2612 | length = origLen; |
2613 | |
2614 | *lengthOfSegment = length; |
2615 | } |
2616 | else |
2617 | phys64 = (addr64_t) phys32; |
2618 | |
2619 | return phys64; |
2620 | } |
2621 | |
2622 | IOPhysicalAddress |
2623 | IOMemoryDescriptor::getPhysicalSegment(IOByteCount offset, IOByteCount *lengthOfSegment) |
2624 | { |
2625 | return ((IOPhysicalAddress) getPhysicalSegment(offset, lengthOfSegment, 0)); |
2626 | } |
2627 | |
2628 | IOPhysicalAddress |
2629 | IOGeneralMemoryDescriptor::getSourceSegment(IOByteCount offset, IOByteCount *lengthOfSegment) |
2630 | { |
2631 | return ((IOPhysicalAddress) getPhysicalSegment(offset, lengthOfSegment, _kIOMemorySourceSegment)); |
2632 | } |
2633 | |
2634 | #pragma clang diagnostic push |
2635 | #pragma clang diagnostic ignored "-Wdeprecated-declarations" |
2636 | |
2637 | void * IOGeneralMemoryDescriptor::getVirtualSegment(IOByteCount offset, |
2638 | IOByteCount * lengthOfSegment) |
2639 | { |
2640 | if (_task == kernel_task) |
2641 | return (void *) getSourceSegment(offset, lengthOfSegment); |
2642 | else |
2643 | panic("IOGMD::getVirtualSegment deprecated" ); |
2644 | |
2645 | return 0; |
2646 | } |
2647 | #pragma clang diagnostic pop |
2648 | #endif /* !__LP64__ */ |
2649 | |
2650 | IOReturn |
2651 | IOMemoryDescriptor::dmaCommandOperation(DMACommandOps op, void *vData, UInt dataSize) const |
2652 | { |
2653 | IOMemoryDescriptor *md = const_cast<IOMemoryDescriptor *>(this); |
2654 | DMACommandOps params; |
2655 | IOReturn err; |
2656 | |
2657 | params = (op & ~kIOMDDMACommandOperationMask & op); |
2658 | op &= kIOMDDMACommandOperationMask; |
2659 | |
2660 | if (kIOMDGetCharacteristics == op) { |
2661 | if (dataSize < sizeof(IOMDDMACharacteristics)) |
2662 | return kIOReturnUnderrun; |
2663 | |
2664 | IOMDDMACharacteristics *data = (IOMDDMACharacteristics *) vData; |
2665 | data->fLength = getLength(); |
2666 | data->fSGCount = 0; |
2667 | data->fDirection = getDirection(); |
2668 | data->fIsPrepared = true; // Assume prepared - fails safe |
2669 | } |
2670 | else if (kIOMDWalkSegments == op) { |
2671 | if (dataSize < sizeof(IOMDDMAWalkSegmentArgs)) |
2672 | return kIOReturnUnderrun; |
2673 | |
2674 | IOMDDMAWalkSegmentArgs *data = (IOMDDMAWalkSegmentArgs *) vData; |
2675 | IOByteCount offset = (IOByteCount) data->fOffset; |
2676 | |
2677 | IOPhysicalLength length; |
2678 | if (data->fMapped && IOMapper::gSystem) |
2679 | data->fIOVMAddr = md->getPhysicalSegment(offset, &length); |
2680 | else |
2681 | data->fIOVMAddr = md->getPhysicalSegment(offset, &length, kIOMemoryMapperNone); |
2682 | data->fLength = length; |
2683 | } |
2684 | else if (kIOMDAddDMAMapSpec == op) return kIOReturnUnsupported; |
2685 | else if (kIOMDDMAMap == op) |
2686 | { |
2687 | if (dataSize < sizeof(IOMDDMAMapArgs)) |
2688 | return kIOReturnUnderrun; |
2689 | IOMDDMAMapArgs * data = (IOMDDMAMapArgs *) vData; |
2690 | |
2691 | if (params) panic("class %s does not support IODMACommand::kIterateOnly" , getMetaClass()->getClassName()); |
2692 | |
2693 | data->fMapContig = true; |
2694 | err = md->dmaMap(data->fMapper, data->fCommand, &data->fMapSpec, data->fOffset, data->fLength, &data->fAlloc, &data->fAllocLength); |
2695 | |
2696 | return (err); |
2697 | } |
2698 | else if (kIOMDDMAUnmap == op) |
2699 | { |
2700 | if (dataSize < sizeof(IOMDDMAMapArgs)) |
2701 | return kIOReturnUnderrun; |
2702 | IOMDDMAMapArgs * data = (IOMDDMAMapArgs *) vData; |
2703 | |
2704 | err = md->dmaUnmap(data->fMapper, data->fCommand, data->fOffset, data->fAlloc, data->fAllocLength); |
2705 | |
2706 | return (kIOReturnSuccess); |
2707 | } |
2708 | else return kIOReturnBadArgument; |
2709 | |
2710 | return kIOReturnSuccess; |
2711 | } |
2712 | |
2713 | IOReturn |
2714 | IOGeneralMemoryDescriptor::setPurgeable( IOOptionBits newState, |
2715 | IOOptionBits * oldState ) |
2716 | { |
2717 | IOReturn err = kIOReturnSuccess; |
2718 | |
2719 | vm_purgable_t control; |
2720 | int state; |
2721 | |
2722 | assert (!(kIOMemoryRemote & _flags)); |
2723 | if (kIOMemoryRemote & _flags) return (kIOReturnNotAttached); |
2724 | |
2725 | if (_memRef) |
2726 | { |
2727 | err = super::setPurgeable(newState, oldState); |
2728 | } |
2729 | else |
2730 | { |
2731 | if (kIOMemoryThreadSafe & _flags) |
2732 | LOCK; |
2733 | do |
2734 | { |
2735 | // Find the appropriate vm_map for the given task |
2736 | vm_map_t curMap; |
2737 | if (_task == kernel_task && (kIOMemoryBufferPageable & _flags)) |
2738 | { |
2739 | err = kIOReturnNotReady; |
2740 | break; |
2741 | } |
2742 | else if (!_task) |
2743 | { |
2744 | err = kIOReturnUnsupported; |
2745 | break; |
2746 | } |
2747 | else |
2748 | { |
2749 | curMap = get_task_map(_task); |
2750 | if (NULL == curMap) |
2751 | { |
2752 | err = KERN_INVALID_ARGUMENT; |
2753 | break; |
2754 | } |
2755 | } |
2756 | |
2757 | // can only do one range |
2758 | Ranges vec = _ranges; |
2759 | IOOptionBits type = _flags & kIOMemoryTypeMask; |
2760 | mach_vm_address_t addr; |
2761 | mach_vm_size_t len; |
2762 | getAddrLenForInd(addr, len, type, vec, 0); |
2763 | |
2764 | err = purgeableControlBits(newState, &control, &state); |
2765 | if (kIOReturnSuccess != err) |
2766 | break; |
2767 | err = vm_map_purgable_control(curMap, addr, control, &state); |
2768 | if (oldState) |
2769 | { |
2770 | if (kIOReturnSuccess == err) |
2771 | { |
2772 | err = purgeableStateBits(&state); |
2773 | *oldState = state; |
2774 | } |
2775 | } |
2776 | } |
2777 | while (false); |
2778 | if (kIOMemoryThreadSafe & _flags) |
2779 | UNLOCK; |
2780 | } |
2781 | |
2782 | return (err); |
2783 | } |
2784 | |
2785 | IOReturn IOMemoryDescriptor::setPurgeable( IOOptionBits newState, |
2786 | IOOptionBits * oldState ) |
2787 | { |
2788 | IOReturn err = kIOReturnNotReady; |
2789 | |
2790 | if (kIOMemoryThreadSafe & _flags) LOCK; |
2791 | if (_memRef) err = IOGeneralMemoryDescriptor::memoryReferenceSetPurgeable(_memRef, newState, oldState); |
2792 | if (kIOMemoryThreadSafe & _flags) UNLOCK; |
2793 | |
2794 | return (err); |
2795 | } |
2796 | |
2797 | IOReturn IOMemoryDescriptor::getPageCounts( IOByteCount * residentPageCount, |
2798 | IOByteCount * dirtyPageCount ) |
2799 | { |
2800 | IOReturn err = kIOReturnNotReady; |
2801 | |
2802 | assert (!(kIOMemoryRemote & _flags)); |
2803 | if (kIOMemoryRemote & _flags) return (kIOReturnNotAttached); |
2804 | |
2805 | if (kIOMemoryThreadSafe & _flags) LOCK; |
2806 | if (_memRef) err = IOGeneralMemoryDescriptor::memoryReferenceGetPageCounts(_memRef, residentPageCount, dirtyPageCount); |
2807 | else |
2808 | { |
2809 | IOMultiMemoryDescriptor * mmd; |
2810 | IOSubMemoryDescriptor * smd; |
2811 | if ((smd = OSDynamicCast(IOSubMemoryDescriptor, this))) |
2812 | { |
2813 | err = smd->getPageCounts(residentPageCount, dirtyPageCount); |
2814 | } |
2815 | else if ((mmd = OSDynamicCast(IOMultiMemoryDescriptor, this))) |
2816 | { |
2817 | err = mmd->getPageCounts(residentPageCount, dirtyPageCount); |
2818 | } |
2819 | } |
2820 | if (kIOMemoryThreadSafe & _flags) UNLOCK; |
2821 | |
2822 | return (err); |
2823 | } |
2824 | |
2825 | |
2826 | #if defined(__arm__) || defined(__arm64__) |
2827 | extern "C" void dcache_incoherent_io_flush64(addr64_t pa, unsigned int count, unsigned int remaining, unsigned int *res); |
2828 | extern "C" void dcache_incoherent_io_store64(addr64_t pa, unsigned int count, unsigned int remaining, unsigned int *res); |
2829 | #else /* defined(__arm__) || defined(__arm64__) */ |
2830 | extern "C" void dcache_incoherent_io_flush64(addr64_t pa, unsigned int count); |
2831 | extern "C" void dcache_incoherent_io_store64(addr64_t pa, unsigned int count); |
2832 | #endif /* defined(__arm__) || defined(__arm64__) */ |
2833 | |
2834 | static void SetEncryptOp(addr64_t pa, unsigned int count) |
2835 | { |
2836 | ppnum_t page, end; |
2837 | |
2838 | page = atop_64(round_page_64(pa)); |
2839 | end = atop_64(trunc_page_64(pa + count)); |
2840 | for (; page < end; page++) |
2841 | { |
2842 | pmap_clear_noencrypt(page); |
2843 | } |
2844 | } |
2845 | |
2846 | static void ClearEncryptOp(addr64_t pa, unsigned int count) |
2847 | { |
2848 | ppnum_t page, end; |
2849 | |
2850 | page = atop_64(round_page_64(pa)); |
2851 | end = atop_64(trunc_page_64(pa + count)); |
2852 | for (; page < end; page++) |
2853 | { |
2854 | pmap_set_noencrypt(page); |
2855 | } |
2856 | } |
2857 | |
2858 | IOReturn IOMemoryDescriptor::performOperation( IOOptionBits options, |
2859 | IOByteCount offset, IOByteCount length ) |
2860 | { |
2861 | IOByteCount remaining; |
2862 | unsigned int res; |
2863 | void (*func)(addr64_t pa, unsigned int count) = 0; |
2864 | #if defined(__arm__) || defined(__arm64__) |
2865 | void (*func_ext)(addr64_t pa, unsigned int count, unsigned int remaining, unsigned int *result) = 0; |
2866 | #endif |
2867 | |
2868 | assert (!(kIOMemoryRemote & _flags)); |
2869 | if (kIOMemoryRemote & _flags) return (kIOReturnNotAttached); |
2870 | |
2871 | switch (options) |
2872 | { |
2873 | case kIOMemoryIncoherentIOFlush: |
2874 | #if defined(__arm__) || defined(__arm64__) |
2875 | func_ext = &dcache_incoherent_io_flush64; |
2876 | #if __ARM_COHERENT_IO__ |
2877 | func_ext(0, 0, 0, &res); |
2878 | return kIOReturnSuccess; |
2879 | #else /* __ARM_COHERENT_IO__ */ |
2880 | break; |
2881 | #endif /* __ARM_COHERENT_IO__ */ |
2882 | #else /* defined(__arm__) || defined(__arm64__) */ |
2883 | func = &dcache_incoherent_io_flush64; |
2884 | break; |
2885 | #endif /* defined(__arm__) || defined(__arm64__) */ |
2886 | case kIOMemoryIncoherentIOStore: |
2887 | #if defined(__arm__) || defined(__arm64__) |
2888 | func_ext = &dcache_incoherent_io_store64; |
2889 | #if __ARM_COHERENT_IO__ |
2890 | func_ext(0, 0, 0, &res); |
2891 | return kIOReturnSuccess; |
2892 | #else /* __ARM_COHERENT_IO__ */ |
2893 | break; |
2894 | #endif /* __ARM_COHERENT_IO__ */ |
2895 | #else /* defined(__arm__) || defined(__arm64__) */ |
2896 | func = &dcache_incoherent_io_store64; |
2897 | break; |
2898 | #endif /* defined(__arm__) || defined(__arm64__) */ |
2899 | |
2900 | case kIOMemorySetEncrypted: |
2901 | func = &SetEncryptOp; |
2902 | break; |
2903 | case kIOMemoryClearEncrypted: |
2904 | func = &ClearEncryptOp; |
2905 | break; |
2906 | } |
2907 | |
2908 | #if defined(__arm__) || defined(__arm64__) |
2909 | if ((func == 0) && (func_ext == 0)) |
2910 | return (kIOReturnUnsupported); |
2911 | #else /* defined(__arm__) || defined(__arm64__) */ |
2912 | if (!func) |
2913 | return (kIOReturnUnsupported); |
2914 | #endif /* defined(__arm__) || defined(__arm64__) */ |
2915 | |
2916 | if (kIOMemoryThreadSafe & _flags) |
2917 | LOCK; |
2918 | |
2919 | res = 0x0UL; |
2920 | remaining = length = min(length, getLength() - offset); |
2921 | while (remaining) |
2922 | // (process another target segment?) |
2923 | { |
2924 | addr64_t dstAddr64; |
2925 | IOByteCount dstLen; |
2926 | |
2927 | dstAddr64 = getPhysicalSegment(offset, &dstLen, kIOMemoryMapperNone); |
2928 | if (!dstAddr64) |
2929 | break; |
2930 | |
2931 | // Clip segment length to remaining |
2932 | if (dstLen > remaining) |
2933 | dstLen = remaining; |
2934 | |
2935 | #if defined(__arm__) || defined(__arm64__) |
2936 | if (func) |
2937 | (*func)(dstAddr64, dstLen); |
2938 | if (func_ext) { |
2939 | (*func_ext)(dstAddr64, dstLen, remaining, &res); |
2940 | if (res != 0x0UL) { |
2941 | remaining = 0; |
2942 | break; |
2943 | } |
2944 | } |
2945 | #else /* defined(__arm__) || defined(__arm64__) */ |
2946 | (*func)(dstAddr64, dstLen); |
2947 | #endif /* defined(__arm__) || defined(__arm64__) */ |
2948 | |
2949 | offset += dstLen; |
2950 | remaining -= dstLen; |
2951 | } |
2952 | |
2953 | if (kIOMemoryThreadSafe & _flags) |
2954 | UNLOCK; |
2955 | |
2956 | return (remaining ? kIOReturnUnderrun : kIOReturnSuccess); |
2957 | } |
2958 | |
2959 | /* |
2960 | * |
2961 | */ |
2962 | |
2963 | #if defined(__i386__) || defined(__x86_64__) |
2964 | |
2965 | #define io_kernel_static_start vm_kernel_stext |
2966 | #define io_kernel_static_end vm_kernel_etext |
2967 | |
2968 | #elif defined(__arm__) || defined(__arm64__) |
2969 | |
2970 | extern vm_offset_t static_memory_end; |
2971 | |
2972 | #if defined(__arm64__) |
2973 | #define io_kernel_static_start vm_kext_base |
2974 | #else /* defined(__arm64__) */ |
2975 | #define io_kernel_static_start vm_kernel_stext |
2976 | #endif /* defined(__arm64__) */ |
2977 | |
2978 | #define io_kernel_static_end static_memory_end |
2979 | |
2980 | #else |
2981 | #error io_kernel_static_end is undefined for this architecture |
2982 | #endif |
2983 | |
2984 | static kern_return_t |
2985 | io_get_kernel_static_upl( |
2986 | vm_map_t /* map */, |
2987 | uintptr_t offset, |
2988 | upl_size_t *upl_size, |
2989 | upl_t *upl, |
2990 | upl_page_info_array_t page_list, |
2991 | unsigned int *count, |
2992 | ppnum_t *highest_page) |
2993 | { |
2994 | unsigned int pageCount, page; |
2995 | ppnum_t phys; |
2996 | ppnum_t highestPage = 0; |
2997 | |
2998 | pageCount = atop_32(*upl_size); |
2999 | if (pageCount > *count) |
3000 | pageCount = *count; |
3001 | |
3002 | *upl = NULL; |
3003 | |
3004 | for (page = 0; page < pageCount; page++) |
3005 | { |
3006 | phys = pmap_find_phys(kernel_pmap, ((addr64_t)offset) + ptoa_64(page)); |
3007 | if (!phys) |
3008 | break; |
3009 | page_list[page].phys_addr = phys; |
3010 | page_list[page].free_when_done = 0; |
3011 | page_list[page].absent = 0; |
3012 | page_list[page].dirty = 0; |
3013 | page_list[page].precious = 0; |
3014 | page_list[page].device = 0; |
3015 | if (phys > highestPage) |
3016 | highestPage = phys; |
3017 | } |
3018 | |
3019 | *highest_page = highestPage; |
3020 | |
3021 | return ((page >= pageCount) ? kIOReturnSuccess : kIOReturnVMError); |
3022 | } |
3023 | |
3024 | IOReturn IOGeneralMemoryDescriptor::wireVirtual(IODirection forDirection) |
3025 | { |
3026 | IOOptionBits type = _flags & kIOMemoryTypeMask; |
3027 | IOReturn error = kIOReturnSuccess; |
3028 | ioGMDData *dataP; |
3029 | upl_page_info_array_t pageInfo; |
3030 | ppnum_t mapBase; |
3031 | vm_tag_t tag = VM_KERN_MEMORY_NONE; |
3032 | |
3033 | assert(kIOMemoryTypeVirtual == type || kIOMemoryTypeVirtual64 == type || kIOMemoryTypeUIO == type); |
3034 | |
3035 | if ((kIODirectionOutIn & forDirection) == kIODirectionNone) |
3036 | forDirection = (IODirection) (forDirection | getDirection()); |
3037 | |
3038 | dataP = getDataP(_memoryEntries); |
3039 | upl_control_flags_t uplFlags; // This Mem Desc's default flags for upl creation |
3040 | switch (kIODirectionOutIn & forDirection) |
3041 | { |
3042 | case kIODirectionOut: |
3043 | // Pages do not need to be marked as dirty on commit |
3044 | uplFlags = UPL_COPYOUT_FROM; |
3045 | dataP->fDMAAccess = kIODMAMapReadAccess; |
3046 | break; |
3047 | |
3048 | case kIODirectionIn: |
3049 | dataP->fDMAAccess = kIODMAMapWriteAccess; |
3050 | uplFlags = 0; // i.e. ~UPL_COPYOUT_FROM |
3051 | break; |
3052 | |
3053 | default: |
3054 | dataP->fDMAAccess = kIODMAMapReadAccess | kIODMAMapWriteAccess; |
3055 | uplFlags = 0; // i.e. ~UPL_COPYOUT_FROM |
3056 | break; |
3057 | } |
3058 | |
3059 | if (_wireCount) |
3060 | { |
3061 | if ((kIOMemoryPreparedReadOnly & _flags) && !(UPL_COPYOUT_FROM & uplFlags)) |
3062 | { |
3063 | OSReportWithBacktrace("IOMemoryDescriptor 0x%lx prepared read only" , VM_KERNEL_ADDRPERM(this)); |
3064 | error = kIOReturnNotWritable; |
3065 | } |
3066 | } |
3067 | else |
3068 | { |
3069 | IOMapper *mapper; |
3070 | |
3071 | mapper = dataP->fMapper; |
3072 | dataP->fMappedBaseValid = dataP->fMappedBase = 0; |
3073 | |
3074 | uplFlags |= UPL_SET_IO_WIRE | UPL_SET_LITE; |
3075 | tag = _kernelTag; |
3076 | if (VM_KERN_MEMORY_NONE == tag) tag = IOMemoryTag(kernel_map); |
3077 | |
3078 | if (kIODirectionPrepareToPhys32 & forDirection) |
3079 | { |
3080 | if (!mapper) uplFlags |= UPL_NEED_32BIT_ADDR; |
3081 | if (dataP->fDMAMapNumAddressBits > 32) dataP->fDMAMapNumAddressBits = 32; |
3082 | } |
3083 | if (kIODirectionPrepareNoFault & forDirection) uplFlags |= UPL_REQUEST_NO_FAULT; |
3084 | if (kIODirectionPrepareNoZeroFill & forDirection) uplFlags |= UPL_NOZEROFILLIO; |
3085 | if (kIODirectionPrepareNonCoherent & forDirection) uplFlags |= UPL_REQUEST_FORCE_COHERENCY; |
3086 | |
3087 | mapBase = 0; |
3088 | |
3089 | // Note that appendBytes(NULL) zeros the data up to the desired length |
3090 | // and the length parameter is an unsigned int |
3091 | size_t uplPageSize = dataP->fPageCnt * sizeof(upl_page_info_t); |
3092 | if (uplPageSize > ((unsigned int)uplPageSize)) return (kIOReturnNoMemory); |
3093 | if (!_memoryEntries->appendBytes(0, uplPageSize)) return (kIOReturnNoMemory); |
3094 | dataP = 0; |
3095 | |
3096 | // Find the appropriate vm_map for the given task |
3097 | vm_map_t curMap; |
3098 | if (_task == kernel_task && (kIOMemoryBufferPageable & _flags)) curMap = 0; |
3099 | else curMap = get_task_map(_task); |
3100 | |
3101 | // Iterate over the vector of virtual ranges |
3102 | Ranges vec = _ranges; |
3103 | unsigned int pageIndex = 0; |
3104 | IOByteCount mdOffset = 0; |
3105 | ppnum_t highestPage = 0; |
3106 | |
3107 | IOMemoryEntry * memRefEntry = 0; |
3108 | if (_memRef) memRefEntry = &_memRef->entries[0]; |
3109 | |
3110 | for (UInt range = 0; range < _rangesCount; range++) { |
3111 | ioPLBlock iopl; |
3112 | mach_vm_address_t startPage, startPageOffset; |
3113 | mach_vm_size_t numBytes; |
3114 | ppnum_t highPage = 0; |
3115 | |
3116 | // Get the startPage address and length of vec[range] |
3117 | getAddrLenForInd(startPage, numBytes, type, vec, range); |
3118 | startPageOffset = startPage & PAGE_MASK; |
3119 | iopl.fPageOffset = startPageOffset; |
3120 | numBytes += startPageOffset; |
3121 | startPage = trunc_page_64(startPage); |
3122 | |
3123 | if (mapper) |
3124 | iopl.fMappedPage = mapBase + pageIndex; |
3125 | else |
3126 | iopl.fMappedPage = 0; |
3127 | |
3128 | // Iterate over the current range, creating UPLs |
3129 | while (numBytes) { |
3130 | vm_address_t kernelStart = (vm_address_t) startPage; |
3131 | vm_map_t theMap; |
3132 | if (curMap) theMap = curMap; |
3133 | else if (_memRef) |
3134 | { |
3135 | theMap = NULL; |
3136 | } |
3137 | else |
3138 | { |
3139 | assert(_task == kernel_task); |
3140 | theMap = IOPageableMapForAddress(kernelStart); |
3141 | } |
3142 | |
3143 | // ioplFlags is an in/out parameter |
3144 | upl_control_flags_t ioplFlags = uplFlags; |
3145 | dataP = getDataP(_memoryEntries); |
3146 | pageInfo = getPageList(dataP); |
3147 | upl_page_list_ptr_t baseInfo = &pageInfo[pageIndex]; |
3148 | |
3149 | mach_vm_size_t _ioplSize = round_page(numBytes); |
3150 | upl_size_t ioplSize = (_ioplSize <= MAX_UPL_SIZE_BYTES) ? _ioplSize : MAX_UPL_SIZE_BYTES; |
3151 | unsigned int numPageInfo = atop_32(ioplSize); |
3152 | |
3153 | if ((theMap == kernel_map) |
3154 | && (kernelStart >= io_kernel_static_start) |
3155 | && (kernelStart < io_kernel_static_end)) { |
3156 | error = io_get_kernel_static_upl(theMap, |
3157 | kernelStart, |
3158 | &ioplSize, |
3159 | &iopl.fIOPL, |
3160 | baseInfo, |
3161 | &numPageInfo, |
3162 | &highPage); |
3163 | } |
3164 | else if (_memRef) { |
3165 | memory_object_offset_t entryOffset; |
3166 | |
3167 | entryOffset = mdOffset; |
3168 | entryOffset = (entryOffset - iopl.fPageOffset - memRefEntry->offset); |
3169 | if (entryOffset >= memRefEntry->size) { |
3170 | memRefEntry++; |
3171 | if (memRefEntry >= &_memRef->entries[_memRef->count]) panic("memRefEntry" ); |
3172 | entryOffset = 0; |
3173 | } |
3174 | if (ioplSize > (memRefEntry->size - entryOffset)) ioplSize = (memRefEntry->size - entryOffset); |
3175 | error = memory_object_iopl_request(memRefEntry->entry, |
3176 | entryOffset, |
3177 | &ioplSize, |
3178 | &iopl.fIOPL, |
3179 | baseInfo, |
3180 | &numPageInfo, |
3181 | &ioplFlags, |
3182 | tag); |
3183 | } |
3184 | else { |
3185 | assert(theMap); |
3186 | error = vm_map_create_upl(theMap, |
3187 | startPage, |
3188 | (upl_size_t*)&ioplSize, |
3189 | &iopl.fIOPL, |
3190 | baseInfo, |
3191 | &numPageInfo, |
3192 | &ioplFlags, |
3193 | tag); |
3194 | } |
3195 | |
3196 | if (error != KERN_SUCCESS) goto abortExit; |
3197 | |
3198 | assert(ioplSize); |
3199 | |
3200 | if (iopl.fIOPL) |
3201 | highPage = upl_get_highest_page(iopl.fIOPL); |
3202 | if (highPage > highestPage) |
3203 | highestPage = highPage; |
3204 | |
3205 | if (baseInfo->device) { |
3206 | numPageInfo = 1; |
3207 | iopl.fFlags = kIOPLOnDevice; |
3208 | } |
3209 | else { |
3210 | iopl.fFlags = 0; |
3211 | } |
3212 | |
3213 | iopl.fIOMDOffset = mdOffset; |
3214 | iopl.fPageInfo = pageIndex; |
3215 | if (mapper && pageIndex && (page_mask & (mdOffset + startPageOffset))) dataP->fDiscontig = true; |
3216 | |
3217 | if (!_memoryEntries->appendBytes(&iopl, sizeof(iopl))) { |
3218 | // Clean up partial created and unsaved iopl |
3219 | if (iopl.fIOPL) { |
3220 | upl_abort(iopl.fIOPL, 0); |
3221 | upl_deallocate(iopl.fIOPL); |
3222 | } |
3223 | goto abortExit; |
3224 | } |
3225 | dataP = 0; |
3226 | |
3227 | // Check for a multiple iopl's in one virtual range |
3228 | pageIndex += numPageInfo; |
3229 | mdOffset -= iopl.fPageOffset; |
3230 | if (ioplSize < numBytes) { |
3231 | numBytes -= ioplSize; |
3232 | startPage += ioplSize; |
3233 | mdOffset += ioplSize; |
3234 | iopl.fPageOffset = 0; |
3235 | if (mapper) iopl.fMappedPage = mapBase + pageIndex; |
3236 | } |
3237 | else { |
3238 | mdOffset += numBytes; |
3239 | break; |
3240 | } |
3241 | } |
3242 | } |
3243 | |
3244 | _highestPage = highestPage; |
3245 | |
3246 | if (UPL_COPYOUT_FROM & uplFlags) _flags |= kIOMemoryPreparedReadOnly; |
3247 | } |
3248 | |
3249 | #if IOTRACKING |
3250 | if (!(_flags & kIOMemoryAutoPrepare) && (kIOReturnSuccess == error)) |
3251 | { |
3252 | dataP = getDataP(_memoryEntries); |
3253 | if (!dataP->fWireTracking.link.next) |
3254 | { |
3255 | IOTrackingAdd(gIOWireTracking, &dataP->fWireTracking, ptoa(_pages), false, tag); |
3256 | } |
3257 | } |
3258 | #endif /* IOTRACKING */ |
3259 | |
3260 | return (error); |
3261 | |
3262 | abortExit: |
3263 | { |
3264 | dataP = getDataP(_memoryEntries); |
3265 | UInt done = getNumIOPL(_memoryEntries, dataP); |
3266 | ioPLBlock *ioplList = getIOPLList(dataP); |
3267 | |
3268 | for (UInt range = 0; range < done; range++) |
3269 | { |
3270 | if (ioplList[range].fIOPL) { |
3271 | upl_abort(ioplList[range].fIOPL, 0); |
3272 | upl_deallocate(ioplList[range].fIOPL); |
3273 | } |
3274 | } |
3275 | (void) _memoryEntries->initWithBytes(dataP, computeDataSize(0, 0)); // == setLength() |
3276 | } |
3277 | |
3278 | if (error == KERN_FAILURE) |
3279 | error = kIOReturnCannotWire; |
3280 | else if (error == KERN_MEMORY_ERROR) |
3281 | error = kIOReturnNoResources; |
3282 | |
3283 | return error; |
3284 | } |
3285 | |
3286 | bool IOGeneralMemoryDescriptor::initMemoryEntries(size_t size, IOMapper * mapper) |
3287 | { |
3288 | ioGMDData * dataP; |
3289 | unsigned dataSize = size; |
3290 | |
3291 | if (!_memoryEntries) { |
3292 | _memoryEntries = OSData::withCapacity(dataSize); |
3293 | if (!_memoryEntries) |
3294 | return false; |
3295 | } |
3296 | else if (!_memoryEntries->initWithCapacity(dataSize)) |
3297 | return false; |
3298 | |
3299 | _memoryEntries->appendBytes(0, computeDataSize(0, 0)); |
3300 | dataP = getDataP(_memoryEntries); |
3301 | |
3302 | if (mapper == kIOMapperWaitSystem) { |
3303 | IOMapper::checkForSystemMapper(); |
3304 | mapper = IOMapper::gSystem; |
3305 | } |
3306 | dataP->fMapper = mapper; |
3307 | dataP->fPageCnt = 0; |
3308 | dataP->fMappedBase = 0; |
3309 | dataP->fDMAMapNumAddressBits = 64; |
3310 | dataP->fDMAMapAlignment = 0; |
3311 | dataP->fPreparationID = kIOPreparationIDUnprepared; |
3312 | dataP->fDiscontig = false; |
3313 | dataP->fCompletionError = false; |
3314 | dataP->fMappedBaseValid = false; |
3315 | |
3316 | return (true); |
3317 | } |
3318 | |
3319 | IOReturn IOMemoryDescriptor::dmaMap( |
3320 | IOMapper * mapper, |
3321 | IODMACommand * command, |
3322 | const IODMAMapSpecification * mapSpec, |
3323 | uint64_t offset, |
3324 | uint64_t length, |
3325 | uint64_t * mapAddress, |
3326 | uint64_t * mapLength) |
3327 | { |
3328 | IOReturn err; |
3329 | uint32_t mapOptions; |
3330 | |
3331 | mapOptions = 0; |
3332 | mapOptions |= kIODMAMapReadAccess; |
3333 | if (!(kIOMemoryPreparedReadOnly & _flags)) mapOptions |= kIODMAMapWriteAccess; |
3334 | |
3335 | err = mapper->iovmMapMemory(this, offset, length, mapOptions, |
3336 | mapSpec, command, NULL, mapAddress, mapLength); |
3337 | |
3338 | if (kIOReturnSuccess == err) dmaMapRecord(mapper, command, *mapLength); |
3339 | |
3340 | return (err); |
3341 | } |
3342 | |
3343 | void IOMemoryDescriptor::dmaMapRecord( |
3344 | IOMapper * mapper, |
3345 | IODMACommand * command, |
3346 | uint64_t mapLength) |
3347 | { |
3348 | kern_allocation_name_t alloc; |
3349 | int16_t prior; |
3350 | |
3351 | if ((alloc = mapper->fAllocName) /* && mapper != IOMapper::gSystem */) |
3352 | { |
3353 | kern_allocation_update_size(mapper->fAllocName, mapLength); |
3354 | } |
3355 | |
3356 | if (!command) return; |
3357 | prior = OSAddAtomic16(1, &_dmaReferences); |
3358 | if (!prior) |
3359 | { |
3360 | if (alloc && (VM_KERN_MEMORY_NONE != _kernelTag)) |
3361 | { |
3362 | _mapName = alloc; |
3363 | mapLength = _length; |
3364 | kern_allocation_update_subtotal(alloc, _kernelTag, mapLength); |
3365 | } |
3366 | else _mapName = NULL; |
3367 | } |
3368 | } |
3369 | |
3370 | IOReturn IOMemoryDescriptor::dmaUnmap( |
3371 | IOMapper * mapper, |
3372 | IODMACommand * command, |
3373 | uint64_t offset, |
3374 | uint64_t mapAddress, |
3375 | uint64_t mapLength) |
3376 | { |
3377 | IOReturn ret; |
3378 | kern_allocation_name_t alloc; |
3379 | kern_allocation_name_t mapName; |
3380 | int16_t prior; |
3381 | |
3382 | mapName = 0; |
3383 | prior = 0; |
3384 | if (command) |
3385 | { |
3386 | mapName = _mapName; |
3387 | if (_dmaReferences) prior = OSAddAtomic16(-1, &_dmaReferences); |
3388 | else panic("_dmaReferences underflow" ); |
3389 | } |
3390 | |
3391 | if (!mapLength) return (kIOReturnSuccess); |
3392 | |
3393 | ret = mapper->iovmUnmapMemory(this, command, mapAddress, mapLength); |
3394 | |
3395 | if ((alloc = mapper->fAllocName)) |
3396 | { |
3397 | kern_allocation_update_size(alloc, -mapLength); |
3398 | if ((1 == prior) && mapName && (VM_KERN_MEMORY_NONE != _kernelTag)) |
3399 | { |
3400 | mapLength = _length; |
3401 | kern_allocation_update_subtotal(mapName, _kernelTag, -mapLength); |
3402 | } |
3403 | } |
3404 | |
3405 | return (ret); |
3406 | } |
3407 | |
3408 | IOReturn IOGeneralMemoryDescriptor::dmaMap( |
3409 | IOMapper * mapper, |
3410 | IODMACommand * command, |
3411 | const IODMAMapSpecification * mapSpec, |
3412 | uint64_t offset, |
3413 | uint64_t length, |
3414 | uint64_t * mapAddress, |
3415 | uint64_t * mapLength) |
3416 | { |
3417 | IOReturn err = kIOReturnSuccess; |
3418 | ioGMDData * dataP; |
3419 | IOOptionBits type = _flags & kIOMemoryTypeMask; |
3420 | |
3421 | *mapAddress = 0; |
3422 | if (kIOMemoryHostOnly & _flags) return (kIOReturnSuccess); |
3423 | if (kIOMemoryRemote & _flags) return (kIOReturnNotAttached); |
3424 | |
3425 | if ((type == kIOMemoryTypePhysical) || (type == kIOMemoryTypePhysical64) |
3426 | || offset || (length != _length)) |
3427 | { |
3428 | err = super::dmaMap(mapper, command, mapSpec, offset, length, mapAddress, mapLength); |
3429 | } |
3430 | else if (_memoryEntries && _pages && (dataP = getDataP(_memoryEntries))) |
3431 | { |
3432 | const ioPLBlock * ioplList = getIOPLList(dataP); |
3433 | upl_page_info_t * pageList; |
3434 | uint32_t mapOptions = 0; |
3435 | |
3436 | IODMAMapSpecification mapSpec; |
3437 | bzero(&mapSpec, sizeof(mapSpec)); |
3438 | mapSpec.numAddressBits = dataP->fDMAMapNumAddressBits; |
3439 | mapSpec.alignment = dataP->fDMAMapAlignment; |
3440 | |
3441 | // For external UPLs the fPageInfo field points directly to |
3442 | // the upl's upl_page_info_t array. |
3443 | if (ioplList->fFlags & kIOPLExternUPL) |
3444 | { |
3445 | pageList = (upl_page_info_t *) ioplList->fPageInfo; |
3446 | mapOptions |= kIODMAMapPagingPath; |
3447 | } |
3448 | else pageList = getPageList(dataP); |
3449 | |
3450 | if ((_length == ptoa_64(_pages)) && !(page_mask & ioplList->fPageOffset)) |
3451 | { |
3452 | mapOptions |= kIODMAMapPageListFullyOccupied; |
3453 | } |
3454 | |
3455 | assert(dataP->fDMAAccess); |
3456 | mapOptions |= dataP->fDMAAccess; |
3457 | |
3458 | // Check for direct device non-paged memory |
3459 | if (ioplList->fFlags & kIOPLOnDevice) mapOptions |= kIODMAMapPhysicallyContiguous; |
3460 | |
3461 | IODMAMapPageList dmaPageList = |
3462 | { |
3463 | .pageOffset = (uint32_t)(ioplList->fPageOffset & page_mask), |
3464 | .pageListCount = _pages, |
3465 | .pageList = &pageList[0] |
3466 | }; |
3467 | err = mapper->iovmMapMemory(this, offset, length, mapOptions, &mapSpec, |
3468 | command, &dmaPageList, mapAddress, mapLength); |
3469 | |
3470 | if (kIOReturnSuccess == err) dmaMapRecord(mapper, command, *mapLength); |
3471 | } |
3472 | |
3473 | return (err); |
3474 | } |
3475 | |
3476 | /* |
3477 | * prepare |
3478 | * |
3479 | * Prepare the memory for an I/O transfer. This involves paging in |
3480 | * the memory, if necessary, and wiring it down for the duration of |
3481 | * the transfer. The complete() method completes the processing of |
3482 | * the memory after the I/O transfer finishes. This method needn't |
3483 | * called for non-pageable memory. |
3484 | */ |
3485 | |
3486 | IOReturn IOGeneralMemoryDescriptor::prepare(IODirection forDirection) |
3487 | { |
3488 | IOReturn error = kIOReturnSuccess; |
3489 | IOOptionBits type = _flags & kIOMemoryTypeMask; |
3490 | |
3491 | if ((kIOMemoryTypePhysical == type) || (kIOMemoryTypePhysical64 == type)) |
3492 | return kIOReturnSuccess; |
3493 | |
3494 | assert (!(kIOMemoryRemote & _flags)); |
3495 | if (kIOMemoryRemote & _flags) return (kIOReturnNotAttached); |
3496 | |
3497 | if (_prepareLock) IOLockLock(_prepareLock); |
3498 | |
3499 | if (kIOMemoryTypeVirtual == type || kIOMemoryTypeVirtual64 == type || kIOMemoryTypeUIO == type) |
3500 | { |
3501 | error = wireVirtual(forDirection); |
3502 | } |
3503 | |
3504 | if (kIOReturnSuccess == error) |
3505 | { |
3506 | if (1 == ++_wireCount) |
3507 | { |
3508 | if (kIOMemoryClearEncrypt & _flags) |
3509 | { |
3510 | performOperation(kIOMemoryClearEncrypted, 0, _length); |
3511 | } |
3512 | } |
3513 | } |
3514 | |
3515 | if (_prepareLock) IOLockUnlock(_prepareLock); |
3516 | |
3517 | return error; |
3518 | } |
3519 | |
3520 | /* |
3521 | * complete |
3522 | * |
3523 | * Complete processing of the memory after an I/O transfer finishes. |
3524 | * This method should not be called unless a prepare was previously |
3525 | * issued; the prepare() and complete() must occur in pairs, before |
3526 | * before and after an I/O transfer involving pageable memory. |
3527 | */ |
3528 | |
3529 | IOReturn IOGeneralMemoryDescriptor::complete(IODirection forDirection) |
3530 | { |
3531 | IOOptionBits type = _flags & kIOMemoryTypeMask; |
3532 | ioGMDData * dataP; |
3533 | |
3534 | if ((kIOMemoryTypePhysical == type) || (kIOMemoryTypePhysical64 == type)) |
3535 | return kIOReturnSuccess; |
3536 | |
3537 | assert (!(kIOMemoryRemote & _flags)); |
3538 | if (kIOMemoryRemote & _flags) return (kIOReturnNotAttached); |
3539 | |
3540 | if (_prepareLock) IOLockLock(_prepareLock); |
3541 | do |
3542 | { |
3543 | assert(_wireCount); |
3544 | if (!_wireCount) break; |
3545 | dataP = getDataP(_memoryEntries); |
3546 | if (!dataP) break; |
3547 | |
3548 | if (kIODirectionCompleteWithError & forDirection) dataP->fCompletionError = true; |
3549 | |
3550 | if ((kIOMemoryClearEncrypt & _flags) && (1 == _wireCount)) |
3551 | { |
3552 | performOperation(kIOMemorySetEncrypted, 0, _length); |
3553 | } |
3554 | |
3555 | _wireCount--; |
3556 | if (!_wireCount || (kIODirectionCompleteWithDataValid & forDirection)) |
3557 | { |
3558 | ioPLBlock *ioplList = getIOPLList(dataP); |
3559 | UInt ind, count = getNumIOPL(_memoryEntries, dataP); |
3560 | |
3561 | if (_wireCount) |
3562 | { |
3563 | // kIODirectionCompleteWithDataValid & forDirection |
3564 | if (kIOMemoryTypeVirtual == type || kIOMemoryTypeVirtual64 == type || kIOMemoryTypeUIO == type) |
3565 | { |
3566 | vm_tag_t tag; |
3567 | tag = getVMTag(kernel_map); |
3568 | for (ind = 0; ind < count; ind++) |
3569 | { |
3570 | if (ioplList[ind].fIOPL) iopl_valid_data(ioplList[ind].fIOPL, tag); |
3571 | } |
3572 | } |
3573 | } |
3574 | else |
3575 | { |
3576 | if (_dmaReferences) panic("complete() while dma active" ); |
3577 | |
3578 | if (dataP->fMappedBaseValid) { |
3579 | dmaUnmap(dataP->fMapper, NULL, 0, dataP->fMappedBase, dataP->fMappedLength); |
3580 | dataP->fMappedBaseValid = dataP->fMappedBase = 0; |
3581 | } |
3582 | #if IOTRACKING |
3583 | if (dataP->fWireTracking.link.next) IOTrackingRemove(gIOWireTracking, &dataP->fWireTracking, ptoa(_pages)); |
3584 | #endif /* IOTRACKING */ |
3585 | // Only complete iopls that we created which are for TypeVirtual |
3586 | if (kIOMemoryTypeVirtual == type || kIOMemoryTypeVirtual64 == type || kIOMemoryTypeUIO == type) |
3587 | { |
3588 | for (ind = 0; ind < count; ind++) |
3589 | if (ioplList[ind].fIOPL) { |
3590 | if (dataP->fCompletionError) |
3591 | upl_abort(ioplList[ind].fIOPL, 0 /*!UPL_ABORT_DUMP_PAGES*/); |
3592 | else |
3593 | upl_commit(ioplList[ind].fIOPL, 0, 0); |
3594 | upl_deallocate(ioplList[ind].fIOPL); |
3595 | } |
3596 | } else if (kIOMemoryTypeUPL == type) { |
3597 | upl_set_referenced(ioplList[0].fIOPL, false); |
3598 | } |
3599 | |
3600 | (void) _memoryEntries->initWithBytes(dataP, computeDataSize(0, 0)); // == setLength() |
3601 | |
3602 | dataP->fPreparationID = kIOPreparationIDUnprepared; |
3603 | _flags &= ~kIOMemoryPreparedReadOnly; |
3604 | } |
3605 | } |
3606 | } |
3607 | while (false); |
3608 | |
3609 | if (_prepareLock) IOLockUnlock(_prepareLock); |
3610 | |
3611 | return kIOReturnSuccess; |
3612 | } |
3613 | |
3614 | IOReturn IOGeneralMemoryDescriptor::doMap( |
3615 | vm_map_t __addressMap, |
3616 | IOVirtualAddress * __address, |
3617 | IOOptionBits options, |
3618 | IOByteCount __offset, |
3619 | IOByteCount __length ) |
3620 | { |
3621 | #ifndef __LP64__ |
3622 | if (!(kIOMap64Bit & options)) panic("IOGeneralMemoryDescriptor::doMap !64bit" ); |
3623 | #endif /* !__LP64__ */ |
3624 | |
3625 | kern_return_t err; |
3626 | |
3627 | IOMemoryMap * mapping = (IOMemoryMap *) *__address; |
3628 | mach_vm_size_t offset = mapping->fOffset + __offset; |
3629 | mach_vm_size_t length = mapping->fLength; |
3630 | |
3631 | IOOptionBits type = _flags & kIOMemoryTypeMask; |
3632 | Ranges vec = _ranges; |
3633 | |
3634 | mach_vm_address_t range0Addr = 0; |
3635 | mach_vm_size_t range0Len = 0; |
3636 | |
3637 | if ((offset >= _length) || ((offset + length) > _length)) |
3638 | return( kIOReturnBadArgument ); |
3639 | |
3640 | assert (!(kIOMemoryRemote & _flags)); |
3641 | if (kIOMemoryRemote & _flags) return (0); |
3642 | |
3643 | if (vec.v) |
3644 | getAddrLenForInd(range0Addr, range0Len, type, vec, 0); |
3645 | |
3646 | // mapping source == dest? (could be much better) |
3647 | if (_task |
3648 | && (mapping->fAddressTask == _task) |
3649 | && (mapping->fAddressMap == get_task_map(_task)) |
3650 | && (options & kIOMapAnywhere) |
3651 | && (!(kIOMapUnique & options)) |
3652 | && (1 == _rangesCount) |
3653 | && (0 == offset) |
3654 | && range0Addr |
3655 | && (length <= range0Len)) |
3656 | { |
3657 | mapping->fAddress = range0Addr; |
3658 | mapping->fOptions |= kIOMapStatic; |
3659 | |
3660 | return( kIOReturnSuccess ); |
3661 | } |
3662 | |
3663 | if (!_memRef) |
3664 | { |
3665 | IOOptionBits createOptions = 0; |
3666 | if (!(kIOMapReadOnly & options)) |
3667 | { |
3668 | createOptions |= kIOMemoryReferenceWrite; |
3669 | #if DEVELOPMENT || DEBUG |
3670 | if (kIODirectionOut == (kIODirectionOutIn & _flags)) |
3671 | { |
3672 | OSReportWithBacktrace("warning: creating writable mapping from IOMemoryDescriptor(kIODirectionOut) - use kIOMapReadOnly or change direction" ); |
3673 | } |
3674 | #endif |
3675 | } |
3676 | err = memoryReferenceCreate(createOptions, &_memRef); |
3677 | if (kIOReturnSuccess != err) return (err); |
3678 | } |
3679 | |
3680 | memory_object_t ; |
3681 | pager = (memory_object_t) (reserved ? reserved->dp.devicePager : 0); |
3682 | |
3683 | // <upl_transpose // |
3684 | if ((kIOMapReference|kIOMapUnique) == ((kIOMapReference|kIOMapUnique) & options)) |
3685 | { |
3686 | do |
3687 | { |
3688 | upl_t redirUPL2; |
3689 | upl_size_t size; |
3690 | upl_control_flags_t flags; |
3691 | unsigned int lock_count; |
3692 | |
3693 | if (!_memRef || (1 != _memRef->count)) |
3694 | { |
3695 | err = kIOReturnNotReadable; |
3696 | break; |
3697 | } |
3698 | |
3699 | size = round_page(mapping->fLength); |
3700 | flags = UPL_COPYOUT_FROM | UPL_SET_INTERNAL |
3701 | | UPL_SET_LITE | UPL_SET_IO_WIRE | UPL_BLOCK_ACCESS; |
3702 | |
3703 | if (KERN_SUCCESS != memory_object_iopl_request(_memRef->entries[0].entry, 0, &size, &redirUPL2, |
3704 | NULL, NULL, |
3705 | &flags, getVMTag(kernel_map))) |
3706 | redirUPL2 = NULL; |
3707 | |
3708 | for (lock_count = 0; |
3709 | IORecursiveLockHaveLock(gIOMemoryLock); |
3710 | lock_count++) { |
3711 | UNLOCK; |
3712 | } |
3713 | err = upl_transpose(redirUPL2, mapping->fRedirUPL); |
3714 | for (; |
3715 | lock_count; |
3716 | lock_count--) { |
3717 | LOCK; |
3718 | } |
3719 | |
3720 | if (kIOReturnSuccess != err) |
3721 | { |
3722 | IOLog("upl_transpose(%x)\n" , err); |
3723 | err = kIOReturnSuccess; |
3724 | } |
3725 | |
3726 | if (redirUPL2) |
3727 | { |
3728 | upl_commit(redirUPL2, NULL, 0); |
3729 | upl_deallocate(redirUPL2); |
3730 | redirUPL2 = 0; |
3731 | } |
3732 | { |
3733 | // swap the memEntries since they now refer to different vm_objects |
3734 | IOMemoryReference * me = _memRef; |
3735 | _memRef = mapping->fMemory->_memRef; |
3736 | mapping->fMemory->_memRef = me; |
3737 | } |
3738 | if (pager) |
3739 | err = populateDevicePager( pager, mapping->fAddressMap, mapping->fAddress, offset, length, options ); |
3740 | } |
3741 | while (false); |
3742 | } |
3743 | // upl_transpose> // |
3744 | else |
3745 | { |
3746 | err = memoryReferenceMap(_memRef, mapping->fAddressMap, offset, length, options, &mapping->fAddress); |
3747 | #if IOTRACKING |
3748 | if ((err == KERN_SUCCESS) && ((kIOTracking & gIOKitDebug) || _task)) |
3749 | { |
3750 | // only dram maps in the default on developement case |
3751 | IOTrackingAddUser(gIOMapTracking, &mapping->fTracking, mapping->fLength); |
3752 | } |
3753 | #endif /* IOTRACKING */ |
3754 | if ((err == KERN_SUCCESS) && pager) |
3755 | { |
3756 | err = populateDevicePager(pager, mapping->fAddressMap, mapping->fAddress, offset, length, options); |
3757 | |
3758 | if (err != KERN_SUCCESS) doUnmap(mapping->fAddressMap, (IOVirtualAddress) mapping, 0); |
3759 | else if (kIOMapDefaultCache == (options & kIOMapCacheMask)) |
3760 | { |
3761 | mapping->fOptions |= ((_flags & kIOMemoryBufferCacheMask) >> kIOMemoryBufferCacheShift); |
3762 | } |
3763 | } |
3764 | } |
3765 | |
3766 | return (err); |
3767 | } |
3768 | |
3769 | #if IOTRACKING |
3770 | IOReturn |
3771 | IOMemoryMapTracking(IOTrackingUser * tracking, task_t * task, |
3772 | mach_vm_address_t * address, mach_vm_size_t * size) |
3773 | { |
3774 | #define iomap_offsetof(type, field) ((size_t)(&((type *)0)->field)) |
3775 | |
3776 | IOMemoryMap * map = (typeof(map)) (((uintptr_t) tracking) - iomap_offsetof(IOMemoryMap, fTracking)); |
3777 | |
3778 | if (!map->fAddressMap || (map->fAddressMap != get_task_map(map->fAddressTask))) return (kIOReturnNotReady); |
3779 | |
3780 | *task = map->fAddressTask; |
3781 | *address = map->fAddress; |
3782 | *size = map->fLength; |
3783 | |
3784 | return (kIOReturnSuccess); |
3785 | } |
3786 | #endif /* IOTRACKING */ |
3787 | |
3788 | IOReturn IOGeneralMemoryDescriptor::doUnmap( |
3789 | vm_map_t addressMap, |
3790 | IOVirtualAddress __address, |
3791 | IOByteCount __length ) |
3792 | { |
3793 | return (super::doUnmap(addressMap, __address, __length)); |
3794 | } |
3795 | |
3796 | /* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */ |
3797 | |
3798 | #undef super |
3799 | #define super OSObject |
3800 | |
3801 | OSDefineMetaClassAndStructors( IOMemoryMap, OSObject ) |
3802 | |
3803 | OSMetaClassDefineReservedUnused(IOMemoryMap, 0); |
3804 | OSMetaClassDefineReservedUnused(IOMemoryMap, 1); |
3805 | OSMetaClassDefineReservedUnused(IOMemoryMap, 2); |
3806 | OSMetaClassDefineReservedUnused(IOMemoryMap, 3); |
3807 | OSMetaClassDefineReservedUnused(IOMemoryMap, 4); |
3808 | OSMetaClassDefineReservedUnused(IOMemoryMap, 5); |
3809 | OSMetaClassDefineReservedUnused(IOMemoryMap, 6); |
3810 | OSMetaClassDefineReservedUnused(IOMemoryMap, 7); |
3811 | |
3812 | /* ex-inline function implementation */ |
3813 | IOPhysicalAddress IOMemoryMap::getPhysicalAddress() |
3814 | { return( getPhysicalSegment( 0, 0 )); } |
3815 | |
3816 | /* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */ |
3817 | |
3818 | bool IOMemoryMap::init( |
3819 | task_t intoTask, |
3820 | mach_vm_address_t toAddress, |
3821 | IOOptionBits _options, |
3822 | mach_vm_size_t _offset, |
3823 | mach_vm_size_t _length ) |
3824 | { |
3825 | if (!intoTask) |
3826 | return( false); |
3827 | |
3828 | if (!super::init()) |
3829 | return(false); |
3830 | |
3831 | fAddressMap = get_task_map(intoTask); |
3832 | if (!fAddressMap) |
3833 | return(false); |
3834 | vm_map_reference(fAddressMap); |
3835 | |
3836 | fAddressTask = intoTask; |
3837 | fOptions = _options; |
3838 | fLength = _length; |
3839 | fOffset = _offset; |
3840 | fAddress = toAddress; |
3841 | |
3842 | return (true); |
3843 | } |
3844 | |
3845 | bool IOMemoryMap::setMemoryDescriptor(IOMemoryDescriptor * _memory, mach_vm_size_t _offset) |
3846 | { |
3847 | if (!_memory) |
3848 | return(false); |
3849 | |
3850 | if (!fSuperMap) |
3851 | { |
3852 | if( (_offset + fLength) > _memory->getLength()) |
3853 | return( false); |
3854 | fOffset = _offset; |
3855 | } |
3856 | |
3857 | _memory->retain(); |
3858 | if (fMemory) |
3859 | { |
3860 | if (fMemory != _memory) |
3861 | fMemory->removeMapping(this); |
3862 | fMemory->release(); |
3863 | } |
3864 | fMemory = _memory; |
3865 | |
3866 | return( true ); |
3867 | } |
3868 | |
3869 | IOReturn IOMemoryDescriptor::doMap( |
3870 | vm_map_t __addressMap, |
3871 | IOVirtualAddress * __address, |
3872 | IOOptionBits options, |
3873 | IOByteCount __offset, |
3874 | IOByteCount __length ) |
3875 | { |
3876 | return (kIOReturnUnsupported); |
3877 | } |
3878 | |
3879 | IOReturn IOMemoryDescriptor::handleFault( |
3880 | void * , |
3881 | mach_vm_size_t sourceOffset, |
3882 | mach_vm_size_t length) |
3883 | { |
3884 | if( kIOMemoryRedirected & _flags) |
3885 | { |
3886 | #if DEBUG |
3887 | IOLog("sleep mem redirect %p, %qx\n" , this, sourceOffset); |
3888 | #endif |
3889 | do { |
3890 | SLEEP; |
3891 | } while( kIOMemoryRedirected & _flags ); |
3892 | } |
3893 | return (kIOReturnSuccess); |
3894 | } |
3895 | |
3896 | IOReturn IOMemoryDescriptor::( |
3897 | void * , |
3898 | vm_map_t addressMap, |
3899 | mach_vm_address_t address, |
3900 | mach_vm_size_t sourceOffset, |
3901 | mach_vm_size_t length, |
3902 | IOOptionBits options ) |
3903 | { |
3904 | IOReturn err = kIOReturnSuccess; |
3905 | memory_object_t = (memory_object_t) _pager; |
3906 | mach_vm_size_t size; |
3907 | mach_vm_size_t bytes; |
3908 | mach_vm_size_t page; |
3909 | mach_vm_size_t pageOffset; |
3910 | mach_vm_size_t ; |
3911 | IOPhysicalLength segLen, chunk; |
3912 | addr64_t physAddr; |
3913 | IOOptionBits type; |
3914 | |
3915 | type = _flags & kIOMemoryTypeMask; |
3916 | |
3917 | if (reserved->dp.pagerContig) |
3918 | { |
3919 | sourceOffset = 0; |
3920 | pagerOffset = 0; |
3921 | } |
3922 | |
3923 | physAddr = getPhysicalSegment( sourceOffset, &segLen, kIOMemoryMapperNone ); |
3924 | assert( physAddr ); |
3925 | pageOffset = physAddr - trunc_page_64( physAddr ); |
3926 | pagerOffset = sourceOffset; |
3927 | |
3928 | size = length + pageOffset; |
3929 | physAddr -= pageOffset; |
3930 | |
3931 | segLen += pageOffset; |
3932 | bytes = size; |
3933 | do |
3934 | { |
3935 | // in the middle of the loop only map whole pages |
3936 | if( segLen >= bytes) segLen = bytes; |
3937 | else if (segLen != trunc_page(segLen)) err = kIOReturnVMError; |
3938 | if (physAddr != trunc_page_64(physAddr)) err = kIOReturnBadArgument; |
3939 | |
3940 | if (kIOReturnSuccess != err) break; |
3941 | |
3942 | #if DEBUG || DEVELOPMENT |
3943 | if ((kIOMemoryTypeUPL != type) |
3944 | && pmap_has_managed_page(atop_64(physAddr), atop_64(physAddr + segLen - 1))) |
3945 | { |
3946 | OSReportWithBacktrace("IOMemoryDescriptor physical with managed page 0x%qx:0x%qx" , physAddr, segLen); |
3947 | } |
3948 | #endif /* DEBUG || DEVELOPMENT */ |
3949 | |
3950 | chunk = (reserved->dp.pagerContig ? round_page(segLen) : page_size); |
3951 | for (page = 0; |
3952 | (page < segLen) && (KERN_SUCCESS == err); |
3953 | page += chunk) |
3954 | { |
3955 | err = device_pager_populate_object(pager, pagerOffset, |
3956 | (ppnum_t)(atop_64(physAddr + page)), chunk); |
3957 | pagerOffset += chunk; |
3958 | } |
3959 | |
3960 | assert (KERN_SUCCESS == err); |
3961 | if (err) break; |
3962 | |
3963 | // This call to vm_fault causes an early pmap level resolution |
3964 | // of the mappings created above for kernel mappings, since |
3965 | // faulting in later can't take place from interrupt level. |
3966 | if ((addressMap == kernel_map) && !(kIOMemoryRedirected & _flags)) |
3967 | { |
3968 | err = vm_fault(addressMap, |
3969 | (vm_map_offset_t)trunc_page_64(address), |
3970 | options & kIOMapReadOnly ? VM_PROT_READ : VM_PROT_READ|VM_PROT_WRITE, |
3971 | FALSE, VM_KERN_MEMORY_NONE, |
3972 | THREAD_UNINT, NULL, |
3973 | (vm_map_offset_t)0); |
3974 | |
3975 | if (KERN_SUCCESS != err) break; |
3976 | } |
3977 | |
3978 | sourceOffset += segLen - pageOffset; |
3979 | address += segLen; |
3980 | bytes -= segLen; |
3981 | pageOffset = 0; |
3982 | } |
3983 | while (bytes && (physAddr = getPhysicalSegment( sourceOffset, &segLen, kIOMemoryMapperNone ))); |
3984 | |
3985 | if (bytes) |
3986 | err = kIOReturnBadArgument; |
3987 | |
3988 | return (err); |
3989 | } |
3990 | |
3991 | IOReturn IOMemoryDescriptor::doUnmap( |
3992 | vm_map_t addressMap, |
3993 | IOVirtualAddress __address, |
3994 | IOByteCount __length ) |
3995 | { |
3996 | IOReturn err; |
3997 | IOMemoryMap * mapping; |
3998 | mach_vm_address_t address; |
3999 | mach_vm_size_t length; |
4000 | |
4001 | if (__length) panic("doUnmap" ); |
4002 | |
4003 | mapping = (IOMemoryMap *) __address; |
4004 | addressMap = mapping->fAddressMap; |
4005 | address = mapping->fAddress; |
4006 | length = mapping->fLength; |
4007 | |
4008 | if (kIOMapOverwrite & mapping->fOptions) err = KERN_SUCCESS; |
4009 | else |
4010 | { |
4011 | if ((addressMap == kernel_map) && (kIOMemoryBufferPageable & _flags)) |
4012 | addressMap = IOPageableMapForAddress( address ); |
4013 | #if DEBUG |
4014 | if( kIOLogMapping & gIOKitDebug) IOLog("IOMemoryDescriptor::doUnmap map %p, 0x%qx:0x%qx\n" , |
4015 | addressMap, address, length ); |
4016 | #endif |
4017 | err = mach_vm_deallocate( addressMap, address, length ); |
4018 | } |
4019 | |
4020 | #if IOTRACKING |
4021 | IOTrackingRemoveUser(gIOMapTracking, &mapping->fTracking); |
4022 | #endif /* IOTRACKING */ |
4023 | |
4024 | return (err); |
4025 | } |
4026 | |
4027 | IOReturn IOMemoryDescriptor::redirect( task_t safeTask, bool doRedirect ) |
4028 | { |
4029 | IOReturn err = kIOReturnSuccess; |
4030 | IOMemoryMap * mapping = 0; |
4031 | OSIterator * iter; |
4032 | |
4033 | LOCK; |
4034 | |
4035 | if( doRedirect) |
4036 | _flags |= kIOMemoryRedirected; |
4037 | else |
4038 | _flags &= ~kIOMemoryRedirected; |
4039 | |
4040 | do { |
4041 | if( (iter = OSCollectionIterator::withCollection( _mappings))) { |
4042 | |
4043 | memory_object_t ; |
4044 | |
4045 | if( reserved) |
4046 | pager = (memory_object_t) reserved->dp.devicePager; |
4047 | else |
4048 | pager = MACH_PORT_NULL; |
4049 | |
4050 | while( (mapping = (IOMemoryMap *) iter->getNextObject())) |
4051 | { |
4052 | mapping->redirect( safeTask, doRedirect ); |
4053 | if (!doRedirect && !safeTask && pager && (kernel_map == mapping->fAddressMap)) |
4054 | { |
4055 | err = populateDevicePager(pager, mapping->fAddressMap, mapping->fAddress, mapping->fOffset, mapping->fLength, kIOMapDefaultCache ); |
4056 | } |
4057 | } |
4058 | |
4059 | iter->release(); |
4060 | } |
4061 | } while( false ); |
4062 | |
4063 | if (!doRedirect) |
4064 | { |
4065 | WAKEUP; |
4066 | } |
4067 | |
4068 | UNLOCK; |
4069 | |
4070 | #ifndef __LP64__ |
4071 | // temporary binary compatibility |
4072 | IOSubMemoryDescriptor * subMem; |
4073 | if( (subMem = OSDynamicCast( IOSubMemoryDescriptor, this))) |
4074 | err = subMem->redirect( safeTask, doRedirect ); |
4075 | else |
4076 | err = kIOReturnSuccess; |
4077 | #endif /* !__LP64__ */ |
4078 | |
4079 | return( err ); |
4080 | } |
4081 | |
4082 | IOReturn IOMemoryMap::redirect( task_t safeTask, bool doRedirect ) |
4083 | { |
4084 | IOReturn err = kIOReturnSuccess; |
4085 | |
4086 | if( fSuperMap) { |
4087 | // err = ((IOMemoryMap *)superMap)->redirect( safeTask, doRedirect ); |
4088 | } else { |
4089 | |
4090 | LOCK; |
4091 | |
4092 | do |
4093 | { |
4094 | if (!fAddress) |
4095 | break; |
4096 | if (!fAddressMap) |
4097 | break; |
4098 | |
4099 | if ((!safeTask || (get_task_map(safeTask) != fAddressMap)) |
4100 | && (0 == (fOptions & kIOMapStatic))) |
4101 | { |
4102 | IOUnmapPages( fAddressMap, fAddress, fLength ); |
4103 | err = kIOReturnSuccess; |
4104 | #if DEBUG |
4105 | IOLog("IOMemoryMap::redirect(%d, %p) 0x%qx:0x%qx from %p\n" , doRedirect, this, fAddress, fLength, fAddressMap); |
4106 | #endif |
4107 | } |
4108 | else if (kIOMapWriteCombineCache == (fOptions & kIOMapCacheMask)) |
4109 | { |
4110 | IOOptionBits newMode; |
4111 | newMode = (fOptions & ~kIOMapCacheMask) | (doRedirect ? kIOMapInhibitCache : kIOMapWriteCombineCache); |
4112 | IOProtectCacheMode(fAddressMap, fAddress, fLength, newMode); |
4113 | } |
4114 | } |
4115 | while (false); |
4116 | UNLOCK; |
4117 | } |
4118 | |
4119 | if ((((fMemory->_flags & kIOMemoryTypeMask) == kIOMemoryTypePhysical) |
4120 | || ((fMemory->_flags & kIOMemoryTypeMask) == kIOMemoryTypePhysical64)) |
4121 | && safeTask |
4122 | && (doRedirect != (0 != (fMemory->_flags & kIOMemoryRedirected)))) |
4123 | fMemory->redirect(safeTask, doRedirect); |
4124 | |
4125 | return( err ); |
4126 | } |
4127 | |
4128 | IOReturn IOMemoryMap::unmap( void ) |
4129 | { |
4130 | IOReturn err; |
4131 | |
4132 | LOCK; |
4133 | |
4134 | if( fAddress && fAddressMap && (0 == fSuperMap) && fMemory |
4135 | && (0 == (kIOMapStatic & fOptions))) { |
4136 | |
4137 | err = fMemory->doUnmap(fAddressMap, (IOVirtualAddress) this, 0); |
4138 | |
4139 | } else |
4140 | err = kIOReturnSuccess; |
4141 | |
4142 | if (fAddressMap) |
4143 | { |
4144 | vm_map_deallocate(fAddressMap); |
4145 | fAddressMap = 0; |
4146 | } |
4147 | |
4148 | fAddress = 0; |
4149 | |
4150 | UNLOCK; |
4151 | |
4152 | return( err ); |
4153 | } |
4154 | |
4155 | void IOMemoryMap::taskDied( void ) |
4156 | { |
4157 | LOCK; |
4158 | if (fUserClientUnmap) unmap(); |
4159 | #if IOTRACKING |
4160 | else IOTrackingRemoveUser(gIOMapTracking, &fTracking); |
4161 | #endif /* IOTRACKING */ |
4162 | |
4163 | if( fAddressMap) { |
4164 | vm_map_deallocate(fAddressMap); |
4165 | fAddressMap = 0; |
4166 | } |
4167 | fAddressTask = 0; |
4168 | fAddress = 0; |
4169 | UNLOCK; |
4170 | } |
4171 | |
4172 | IOReturn IOMemoryMap::userClientUnmap( void ) |
4173 | { |
4174 | fUserClientUnmap = true; |
4175 | return (kIOReturnSuccess); |
4176 | } |
4177 | |
4178 | // Overload the release mechanism. All mappings must be a member |
4179 | // of a memory descriptors _mappings set. This means that we |
4180 | // always have 2 references on a mapping. When either of these mappings |
4181 | // are released we need to free ourselves. |
4182 | void IOMemoryMap::taggedRelease(const void *tag) const |
4183 | { |
4184 | LOCK; |
4185 | super::taggedRelease(tag, 2); |
4186 | UNLOCK; |
4187 | } |
4188 | |
4189 | void IOMemoryMap::free() |
4190 | { |
4191 | unmap(); |
4192 | |
4193 | if (fMemory) |
4194 | { |
4195 | LOCK; |
4196 | fMemory->removeMapping(this); |
4197 | UNLOCK; |
4198 | fMemory->release(); |
4199 | } |
4200 | |
4201 | if (fOwner && (fOwner != fMemory)) |
4202 | { |
4203 | LOCK; |
4204 | fOwner->removeMapping(this); |
4205 | UNLOCK; |
4206 | } |
4207 | |
4208 | if (fSuperMap) |
4209 | fSuperMap->release(); |
4210 | |
4211 | if (fRedirUPL) { |
4212 | upl_commit(fRedirUPL, NULL, 0); |
4213 | upl_deallocate(fRedirUPL); |
4214 | } |
4215 | |
4216 | super::free(); |
4217 | } |
4218 | |
4219 | IOByteCount IOMemoryMap::getLength() |
4220 | { |
4221 | return( fLength ); |
4222 | } |
4223 | |
4224 | IOVirtualAddress IOMemoryMap::getVirtualAddress() |
4225 | { |
4226 | #ifndef __LP64__ |
4227 | if (fSuperMap) |
4228 | fSuperMap->getVirtualAddress(); |
4229 | else if (fAddressMap |
4230 | && vm_map_is_64bit(fAddressMap) |
4231 | && (sizeof(IOVirtualAddress) < 8)) |
4232 | { |
4233 | OSReportWithBacktrace("IOMemoryMap::getVirtualAddress(0x%qx) called on 64b map; use ::getAddress()" , fAddress); |
4234 | } |
4235 | #endif /* !__LP64__ */ |
4236 | |
4237 | return (fAddress); |
4238 | } |
4239 | |
4240 | #ifndef __LP64__ |
4241 | mach_vm_address_t IOMemoryMap::getAddress() |
4242 | { |
4243 | return( fAddress); |
4244 | } |
4245 | |
4246 | mach_vm_size_t IOMemoryMap::getSize() |
4247 | { |
4248 | return( fLength ); |
4249 | } |
4250 | #endif /* !__LP64__ */ |
4251 | |
4252 | |
4253 | task_t IOMemoryMap::getAddressTask() |
4254 | { |
4255 | if( fSuperMap) |
4256 | return( fSuperMap->getAddressTask()); |
4257 | else |
4258 | return( fAddressTask); |
4259 | } |
4260 | |
4261 | IOOptionBits IOMemoryMap::getMapOptions() |
4262 | { |
4263 | return( fOptions); |
4264 | } |
4265 | |
4266 | IOMemoryDescriptor * IOMemoryMap::getMemoryDescriptor() |
4267 | { |
4268 | return( fMemory ); |
4269 | } |
4270 | |
4271 | IOMemoryMap * IOMemoryMap::copyCompatible( |
4272 | IOMemoryMap * newMapping ) |
4273 | { |
4274 | task_t task = newMapping->getAddressTask(); |
4275 | mach_vm_address_t toAddress = newMapping->fAddress; |
4276 | IOOptionBits _options = newMapping->fOptions; |
4277 | mach_vm_size_t _offset = newMapping->fOffset; |
4278 | mach_vm_size_t _length = newMapping->fLength; |
4279 | |
4280 | if( (!task) || (!fAddressMap) || (fAddressMap != get_task_map(task))) |
4281 | return( 0 ); |
4282 | if( (fOptions ^ _options) & kIOMapReadOnly) |
4283 | return( 0 ); |
4284 | if( (kIOMapDefaultCache != (_options & kIOMapCacheMask)) |
4285 | && ((fOptions ^ _options) & kIOMapCacheMask)) |
4286 | return( 0 ); |
4287 | |
4288 | if( (0 == (_options & kIOMapAnywhere)) && (fAddress != toAddress)) |
4289 | return( 0 ); |
4290 | |
4291 | if( _offset < fOffset) |
4292 | return( 0 ); |
4293 | |
4294 | _offset -= fOffset; |
4295 | |
4296 | if( (_offset + _length) > fLength) |
4297 | return( 0 ); |
4298 | |
4299 | retain(); |
4300 | if( (fLength == _length) && (!_offset)) |
4301 | { |
4302 | newMapping = this; |
4303 | } |
4304 | else |
4305 | { |
4306 | newMapping->fSuperMap = this; |
4307 | newMapping->fOffset = fOffset + _offset; |
4308 | newMapping->fAddress = fAddress + _offset; |
4309 | } |
4310 | |
4311 | return( newMapping ); |
4312 | } |
4313 | |
4314 | IOReturn IOMemoryMap::wireRange( |
4315 | uint32_t options, |
4316 | mach_vm_size_t offset, |
4317 | mach_vm_size_t length) |
4318 | { |
4319 | IOReturn kr; |
4320 | mach_vm_address_t start = trunc_page_64(fAddress + offset); |
4321 | mach_vm_address_t end = round_page_64(fAddress + offset + length); |
4322 | vm_prot_t prot; |
4323 | |
4324 | prot = (kIODirectionOutIn & options); |
4325 | if (prot) |
4326 | { |
4327 | kr = vm_map_wire_kernel(fAddressMap, start, end, prot, fMemory->getVMTag(kernel_map), FALSE); |
4328 | } |
4329 | else |
4330 | { |
4331 | kr = vm_map_unwire(fAddressMap, start, end, FALSE); |
4332 | } |
4333 | |
4334 | return (kr); |
4335 | } |
4336 | |
4337 | |
4338 | IOPhysicalAddress |
4339 | #ifdef __LP64__ |
4340 | IOMemoryMap::getPhysicalSegment( IOByteCount _offset, IOPhysicalLength * _length, IOOptionBits _options) |
4341 | #else /* !__LP64__ */ |
4342 | IOMemoryMap::getPhysicalSegment( IOByteCount _offset, IOPhysicalLength * _length) |
4343 | #endif /* !__LP64__ */ |
4344 | { |
4345 | IOPhysicalAddress address; |
4346 | |
4347 | LOCK; |
4348 | #ifdef __LP64__ |
4349 | address = fMemory->getPhysicalSegment( fOffset + _offset, _length, _options ); |
4350 | #else /* !__LP64__ */ |
4351 | address = fMemory->getPhysicalSegment( fOffset + _offset, _length ); |
4352 | #endif /* !__LP64__ */ |
4353 | UNLOCK; |
4354 | |
4355 | return( address ); |
4356 | } |
4357 | |
4358 | /* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */ |
4359 | |
4360 | #undef super |
4361 | #define super OSObject |
4362 | |
4363 | /* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */ |
4364 | |
4365 | void IOMemoryDescriptor::initialize( void ) |
4366 | { |
4367 | if( 0 == gIOMemoryLock) |
4368 | gIOMemoryLock = IORecursiveLockAlloc(); |
4369 | |
4370 | gIOLastPage = IOGetLastPageNumber(); |
4371 | } |
4372 | |
4373 | void IOMemoryDescriptor::free( void ) |
4374 | { |
4375 | if( _mappings) _mappings->release(); |
4376 | |
4377 | if (reserved) |
4378 | { |
4379 | IODelete(reserved, IOMemoryDescriptorReserved, 1); |
4380 | reserved = NULL; |
4381 | } |
4382 | super::free(); |
4383 | } |
4384 | |
4385 | IOMemoryMap * IOMemoryDescriptor::setMapping( |
4386 | task_t intoTask, |
4387 | IOVirtualAddress mapAddress, |
4388 | IOOptionBits options ) |
4389 | { |
4390 | return (createMappingInTask( intoTask, mapAddress, |
4391 | options | kIOMapStatic, |
4392 | 0, getLength() )); |
4393 | } |
4394 | |
4395 | IOMemoryMap * IOMemoryDescriptor::map( |
4396 | IOOptionBits options ) |
4397 | { |
4398 | return (createMappingInTask( kernel_task, 0, |
4399 | options | kIOMapAnywhere, |
4400 | 0, getLength() )); |
4401 | } |
4402 | |
4403 | #ifndef __LP64__ |
4404 | IOMemoryMap * IOMemoryDescriptor::map( |
4405 | task_t intoTask, |
4406 | IOVirtualAddress atAddress, |
4407 | IOOptionBits options, |
4408 | IOByteCount offset, |
4409 | IOByteCount length ) |
4410 | { |
4411 | if ((!(kIOMapAnywhere & options)) && vm_map_is_64bit(get_task_map(intoTask))) |
4412 | { |
4413 | OSReportWithBacktrace("IOMemoryDescriptor::map() in 64b task, use ::createMappingInTask()" ); |
4414 | return (0); |
4415 | } |
4416 | |
4417 | return (createMappingInTask(intoTask, atAddress, |
4418 | options, offset, length)); |
4419 | } |
4420 | #endif /* !__LP64__ */ |
4421 | |
4422 | IOMemoryMap * IOMemoryDescriptor::createMappingInTask( |
4423 | task_t intoTask, |
4424 | mach_vm_address_t atAddress, |
4425 | IOOptionBits options, |
4426 | mach_vm_size_t offset, |
4427 | mach_vm_size_t length) |
4428 | { |
4429 | IOMemoryMap * result; |
4430 | IOMemoryMap * mapping; |
4431 | |
4432 | if (0 == length) |
4433 | length = getLength(); |
4434 | |
4435 | mapping = new IOMemoryMap; |
4436 | |
4437 | if( mapping |
4438 | && !mapping->init( intoTask, atAddress, |
4439 | options, offset, length )) { |
4440 | mapping->release(); |
4441 | mapping = 0; |
4442 | } |
4443 | |
4444 | if (mapping) |
4445 | result = makeMapping(this, intoTask, (IOVirtualAddress) mapping, options | kIOMap64Bit, 0, 0); |
4446 | else |
4447 | result = 0; |
4448 | |
4449 | #if DEBUG |
4450 | if (!result) |
4451 | IOLog("createMappingInTask failed desc %p, addr %qx, options %x, offset %qx, length %llx\n" , |
4452 | this, atAddress, (uint32_t) options, offset, length); |
4453 | #endif |
4454 | |
4455 | return (result); |
4456 | } |
4457 | |
4458 | #ifndef __LP64__ // there is only a 64 bit version for LP64 |
4459 | IOReturn IOMemoryMap::redirect(IOMemoryDescriptor * newBackingMemory, |
4460 | IOOptionBits options, |
4461 | IOByteCount offset) |
4462 | { |
4463 | return (redirect(newBackingMemory, options, (mach_vm_size_t)offset)); |
4464 | } |
4465 | #endif |
4466 | |
4467 | IOReturn IOMemoryMap::redirect(IOMemoryDescriptor * newBackingMemory, |
4468 | IOOptionBits options, |
4469 | mach_vm_size_t offset) |
4470 | { |
4471 | IOReturn err = kIOReturnSuccess; |
4472 | IOMemoryDescriptor * physMem = 0; |
4473 | |
4474 | LOCK; |
4475 | |
4476 | if (fAddress && fAddressMap) do |
4477 | { |
4478 | if (((fMemory->_flags & kIOMemoryTypeMask) == kIOMemoryTypePhysical) |
4479 | || ((fMemory->_flags & kIOMemoryTypeMask) == kIOMemoryTypePhysical64)) |
4480 | { |
4481 | physMem = fMemory; |
4482 | physMem->retain(); |
4483 | } |
4484 | |
4485 | if (!fRedirUPL && fMemory->_memRef && (1 == fMemory->_memRef->count)) |
4486 | { |
4487 | upl_size_t size = round_page(fLength); |
4488 | upl_control_flags_t flags = UPL_COPYOUT_FROM | UPL_SET_INTERNAL |
4489 | | UPL_SET_LITE | UPL_SET_IO_WIRE | UPL_BLOCK_ACCESS; |
4490 | if (KERN_SUCCESS != memory_object_iopl_request(fMemory->_memRef->entries[0].entry, 0, &size, &fRedirUPL, |
4491 | NULL, NULL, |
4492 | &flags, fMemory->getVMTag(kernel_map))) |
4493 | fRedirUPL = 0; |
4494 | |
4495 | if (physMem) |
4496 | { |
4497 | IOUnmapPages( fAddressMap, fAddress, fLength ); |
4498 | if ((false)) |
4499 | physMem->redirect(0, true); |
4500 | } |
4501 | } |
4502 | |
4503 | if (newBackingMemory) |
4504 | { |
4505 | if (newBackingMemory != fMemory) |
4506 | { |
4507 | fOffset = 0; |
4508 | if (this != newBackingMemory->makeMapping(newBackingMemory, fAddressTask, (IOVirtualAddress) this, |
4509 | options | kIOMapUnique | kIOMapReference | kIOMap64Bit, |
4510 | offset, fLength)) |
4511 | err = kIOReturnError; |
4512 | } |
4513 | if (fRedirUPL) |
4514 | { |
4515 | upl_commit(fRedirUPL, NULL, 0); |
4516 | upl_deallocate(fRedirUPL); |
4517 | fRedirUPL = 0; |
4518 | } |
4519 | if ((false) && physMem) |
4520 | physMem->redirect(0, false); |
4521 | } |
4522 | } |
4523 | while (false); |
4524 | |
4525 | UNLOCK; |
4526 | |
4527 | if (physMem) |
4528 | physMem->release(); |
4529 | |
4530 | return (err); |
4531 | } |
4532 | |
4533 | IOMemoryMap * IOMemoryDescriptor::makeMapping( |
4534 | IOMemoryDescriptor * owner, |
4535 | task_t __intoTask, |
4536 | IOVirtualAddress __address, |
4537 | IOOptionBits options, |
4538 | IOByteCount __offset, |
4539 | IOByteCount __length ) |
4540 | { |
4541 | #ifndef __LP64__ |
4542 | if (!(kIOMap64Bit & options)) panic("IOMemoryDescriptor::makeMapping !64bit" ); |
4543 | #endif /* !__LP64__ */ |
4544 | |
4545 | IOMemoryDescriptor * mapDesc = 0; |
4546 | __block IOMemoryMap * result = 0; |
4547 | |
4548 | IOMemoryMap * mapping = (IOMemoryMap *) __address; |
4549 | mach_vm_size_t offset = mapping->fOffset + __offset; |
4550 | mach_vm_size_t length = mapping->fLength; |
4551 | |
4552 | mapping->fOffset = offset; |
4553 | |
4554 | LOCK; |
4555 | |
4556 | do |
4557 | { |
4558 | if (kIOMapStatic & options) |
4559 | { |
4560 | result = mapping; |
4561 | addMapping(mapping); |
4562 | mapping->setMemoryDescriptor(this, 0); |
4563 | continue; |
4564 | } |
4565 | |
4566 | if (kIOMapUnique & options) |
4567 | { |
4568 | addr64_t phys; |
4569 | IOByteCount physLen; |
4570 | |
4571 | // if (owner != this) continue; |
4572 | |
4573 | if (((_flags & kIOMemoryTypeMask) == kIOMemoryTypePhysical) |
4574 | || ((_flags & kIOMemoryTypeMask) == kIOMemoryTypePhysical64)) |
4575 | { |
4576 | phys = getPhysicalSegment(offset, &physLen, kIOMemoryMapperNone); |
4577 | if (!phys || (physLen < length)) |
4578 | continue; |
4579 | |
4580 | mapDesc = IOMemoryDescriptor::withAddressRange( |
4581 | phys, length, getDirection() | kIOMemoryMapperNone, NULL); |
4582 | if (!mapDesc) |
4583 | continue; |
4584 | offset = 0; |
4585 | mapping->fOffset = offset; |
4586 | } |
4587 | } |
4588 | else |
4589 | { |
4590 | // look for a compatible existing mapping |
4591 | if (_mappings) _mappings->iterateObjects(^(OSObject * object) |
4592 | { |
4593 | IOMemoryMap * lookMapping = (IOMemoryMap *) object; |
4594 | if ((result = lookMapping->copyCompatible(mapping))) |
4595 | { |
4596 | addMapping(result); |
4597 | result->setMemoryDescriptor(this, offset); |
4598 | return (true); |
4599 | } |
4600 | return (false); |
4601 | }); |
4602 | if (result || (options & kIOMapReference)) |
4603 | { |
4604 | if (result != mapping) |
4605 | { |
4606 | mapping->release(); |
4607 | mapping = NULL; |
4608 | } |
4609 | continue; |
4610 | } |
4611 | } |
4612 | |
4613 | if (!mapDesc) |
4614 | { |
4615 | mapDesc = this; |
4616 | mapDesc->retain(); |
4617 | } |
4618 | IOReturn |
4619 | kr = mapDesc->doMap( 0, (IOVirtualAddress *) &mapping, options, 0, 0 ); |
4620 | if (kIOReturnSuccess == kr) |
4621 | { |
4622 | result = mapping; |
4623 | mapDesc->addMapping(result); |
4624 | result->setMemoryDescriptor(mapDesc, offset); |
4625 | } |
4626 | else |
4627 | { |
4628 | mapping->release(); |
4629 | mapping = NULL; |
4630 | } |
4631 | } |
4632 | while( false ); |
4633 | |
4634 | UNLOCK; |
4635 | |
4636 | if (mapDesc) |
4637 | mapDesc->release(); |
4638 | |
4639 | return (result); |
4640 | } |
4641 | |
4642 | void IOMemoryDescriptor::addMapping( |
4643 | IOMemoryMap * mapping ) |
4644 | { |
4645 | if( mapping) |
4646 | { |
4647 | if( 0 == _mappings) |
4648 | _mappings = OSSet::withCapacity(1); |
4649 | if( _mappings ) |
4650 | _mappings->setObject( mapping ); |
4651 | } |
4652 | } |
4653 | |
4654 | void IOMemoryDescriptor::removeMapping( |
4655 | IOMemoryMap * mapping ) |
4656 | { |
4657 | if( _mappings) |
4658 | _mappings->removeObject( mapping); |
4659 | } |
4660 | |
4661 | #ifndef __LP64__ |
4662 | // obsolete initializers |
4663 | // - initWithOptions is the designated initializer |
4664 | bool |
4665 | IOMemoryDescriptor::initWithAddress(void * address, |
4666 | IOByteCount length, |
4667 | IODirection direction) |
4668 | { |
4669 | return( false ); |
4670 | } |
4671 | |
4672 | bool |
4673 | IOMemoryDescriptor::initWithAddress(IOVirtualAddress address, |
4674 | IOByteCount length, |
4675 | IODirection direction, |
4676 | task_t task) |
4677 | { |
4678 | return( false ); |
4679 | } |
4680 | |
4681 | bool |
4682 | IOMemoryDescriptor::initWithPhysicalAddress( |
4683 | IOPhysicalAddress address, |
4684 | IOByteCount length, |
4685 | IODirection direction ) |
4686 | { |
4687 | return( false ); |
4688 | } |
4689 | |
4690 | bool |
4691 | IOMemoryDescriptor::initWithRanges( |
4692 | IOVirtualRange * ranges, |
4693 | UInt32 withCount, |
4694 | IODirection direction, |
4695 | task_t task, |
4696 | bool asReference) |
4697 | { |
4698 | return( false ); |
4699 | } |
4700 | |
4701 | bool |
4702 | IOMemoryDescriptor::initWithPhysicalRanges( IOPhysicalRange * ranges, |
4703 | UInt32 withCount, |
4704 | IODirection direction, |
4705 | bool asReference) |
4706 | { |
4707 | return( false ); |
4708 | } |
4709 | |
4710 | void * IOMemoryDescriptor::getVirtualSegment(IOByteCount offset, |
4711 | IOByteCount * lengthOfSegment) |
4712 | { |
4713 | return( 0 ); |
4714 | } |
4715 | #endif /* !__LP64__ */ |
4716 | |
4717 | /* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */ |
4718 | |
4719 | bool IOGeneralMemoryDescriptor::serialize(OSSerialize * s) const |
4720 | { |
4721 | OSSymbol const *keys[2] = {0}; |
4722 | OSObject *values[2] = {0}; |
4723 | OSArray * array; |
4724 | vm_size_t vcopy_size; |
4725 | |
4726 | struct SerData { |
4727 | user_addr_t address; |
4728 | user_size_t length; |
4729 | } *vcopy = NULL; |
4730 | unsigned int index, nRanges; |
4731 | bool result = false; |
4732 | |
4733 | IOOptionBits type = _flags & kIOMemoryTypeMask; |
4734 | |
4735 | if (s == NULL) return false; |
4736 | |
4737 | array = OSArray::withCapacity(4); |
4738 | if (!array) return (false); |
4739 | |
4740 | nRanges = _rangesCount; |
4741 | if (os_mul_overflow(sizeof(SerData), nRanges, &vcopy_size)) { |
4742 | result = false; |
4743 | goto bail; |
4744 | } |
4745 | vcopy = (SerData *) IOMalloc(vcopy_size); |
4746 | if (vcopy == 0) { |
4747 | result = false; |
4748 | goto bail; |
4749 | } |
4750 | |
4751 | keys[0] = OSSymbol::withCString("address" ); |
4752 | keys[1] = OSSymbol::withCString("length" ); |
4753 | |
4754 | // Copy the volatile data so we don't have to allocate memory |
4755 | // while the lock is held. |
4756 | LOCK; |
4757 | if (nRanges == _rangesCount) { |
4758 | Ranges vec = _ranges; |
4759 | for (index = 0; index < nRanges; index++) { |
4760 | mach_vm_address_t addr; mach_vm_size_t len; |
4761 | getAddrLenForInd(addr, len, type, vec, index); |
4762 | vcopy[index].address = addr; |
4763 | vcopy[index].length = len; |
4764 | } |
4765 | } else { |
4766 | // The descriptor changed out from under us. Give up. |
4767 | UNLOCK; |
4768 | result = false; |
4769 | goto bail; |
4770 | } |
4771 | UNLOCK; |
4772 | |
4773 | for (index = 0; index < nRanges; index++) |
4774 | { |
4775 | user_addr_t addr = vcopy[index].address; |
4776 | IOByteCount len = (IOByteCount) vcopy[index].length; |
4777 | values[0] = OSNumber::withNumber(addr, sizeof(addr) * 8); |
4778 | if (values[0] == 0) { |
4779 | result = false; |
4780 | goto bail; |
4781 | } |
4782 | values[1] = OSNumber::withNumber(len, sizeof(len) * 8); |
4783 | if (values[1] == 0) { |
4784 | result = false; |
4785 | goto bail; |
4786 | } |
4787 | OSDictionary *dict = OSDictionary::withObjects((const OSObject **)values, (const OSSymbol **)keys, 2); |
4788 | if (dict == 0) { |
4789 | result = false; |
4790 | goto bail; |
4791 | } |
4792 | array->setObject(dict); |
4793 | dict->release(); |
4794 | values[0]->release(); |
4795 | values[1]->release(); |
4796 | values[0] = values[1] = 0; |
4797 | } |
4798 | |
4799 | result = array->serialize(s); |
4800 | |
4801 | bail: |
4802 | if (array) |
4803 | array->release(); |
4804 | if (values[0]) |
4805 | values[0]->release(); |
4806 | if (values[1]) |
4807 | values[1]->release(); |
4808 | if (keys[0]) |
4809 | keys[0]->release(); |
4810 | if (keys[1]) |
4811 | keys[1]->release(); |
4812 | if (vcopy) |
4813 | IOFree(vcopy, vcopy_size); |
4814 | |
4815 | return result; |
4816 | } |
4817 | |
4818 | /* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */ |
4819 | |
4820 | OSMetaClassDefineReservedUsed(IOMemoryDescriptor, 0); |
4821 | #ifdef __LP64__ |
4822 | OSMetaClassDefineReservedUnused(IOMemoryDescriptor, 1); |
4823 | OSMetaClassDefineReservedUnused(IOMemoryDescriptor, 2); |
4824 | OSMetaClassDefineReservedUnused(IOMemoryDescriptor, 3); |
4825 | OSMetaClassDefineReservedUnused(IOMemoryDescriptor, 4); |
4826 | OSMetaClassDefineReservedUnused(IOMemoryDescriptor, 5); |
4827 | OSMetaClassDefineReservedUnused(IOMemoryDescriptor, 6); |
4828 | OSMetaClassDefineReservedUnused(IOMemoryDescriptor, 7); |
4829 | #else /* !__LP64__ */ |
4830 | OSMetaClassDefineReservedUsed(IOMemoryDescriptor, 1); |
4831 | OSMetaClassDefineReservedUsed(IOMemoryDescriptor, 2); |
4832 | OSMetaClassDefineReservedUsed(IOMemoryDescriptor, 3); |
4833 | OSMetaClassDefineReservedUsed(IOMemoryDescriptor, 4); |
4834 | OSMetaClassDefineReservedUsed(IOMemoryDescriptor, 5); |
4835 | OSMetaClassDefineReservedUsed(IOMemoryDescriptor, 6); |
4836 | OSMetaClassDefineReservedUsed(IOMemoryDescriptor, 7); |
4837 | #endif /* !__LP64__ */ |
4838 | OSMetaClassDefineReservedUnused(IOMemoryDescriptor, 8); |
4839 | OSMetaClassDefineReservedUnused(IOMemoryDescriptor, 9); |
4840 | OSMetaClassDefineReservedUnused(IOMemoryDescriptor, 10); |
4841 | OSMetaClassDefineReservedUnused(IOMemoryDescriptor, 11); |
4842 | OSMetaClassDefineReservedUnused(IOMemoryDescriptor, 12); |
4843 | OSMetaClassDefineReservedUnused(IOMemoryDescriptor, 13); |
4844 | OSMetaClassDefineReservedUnused(IOMemoryDescriptor, 14); |
4845 | OSMetaClassDefineReservedUnused(IOMemoryDescriptor, 15); |
4846 | |
4847 | /* ex-inline function implementation */ |
4848 | IOPhysicalAddress |
4849 | IOMemoryDescriptor::getPhysicalAddress() |
4850 | { return( getPhysicalSegment( 0, 0 )); } |
4851 | |