1/*
2 * Copyright (c) 1998-2006 Apple Computer, Inc. All rights reserved.
3 *
4 * @APPLE_OSREFERENCE_LICENSE_HEADER_START@
5 *
6 * This file contains Original Code and/or Modifications of Original Code
7 * as defined in and that are subject to the Apple Public Source License
8 * Version 2.0 (the 'License'). You may not use this file except in
9 * compliance with the License. The rights granted to you under the License
10 * may not be used to create, or enable the creation or redistribution of,
11 * unlawful or unlicensed copies of an Apple operating system, or to
12 * circumvent, violate, or enable the circumvention or violation of, any
13 * terms of an Apple operating system software license agreement.
14 *
15 * Please obtain a copy of the License at
16 * http://www.opensource.apple.com/apsl/ and read it before using this file.
17 *
18 * The Original Code and all software distributed under the License are
19 * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER
20 * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES,
21 * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY,
22 * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT.
23 * Please see the License for the specific language governing rights and
24 * limitations under the License.
25 *
26 * @APPLE_OSREFERENCE_LICENSE_HEADER_END@
27 */
28/*
29 * HISTORY
30 *
31 * 17-Apr-91 Portions from libIO.m, Doug Mitchell at NeXT.
32 * 17-Nov-98 cpp
33 *
34 */
35
36#include <IOKit/system.h>
37#include <mach/sync_policy.h>
38#include <machine/machine_routines.h>
39#include <vm/vm_kern.h>
40#include <libkern/c++/OSCPPDebug.h>
41
42#include <IOKit/assert.h>
43
44#include <IOKit/IOReturn.h>
45#include <IOKit/IOLib.h>
46#include <IOKit/IOLocks.h>
47#include <IOKit/IOMapper.h>
48#include <IOKit/IOBufferMemoryDescriptor.h>
49#include <IOKit/IOKitDebug.h>
50
51#include "IOKitKernelInternal.h"
52
53#ifdef IOALLOCDEBUG
54#include <libkern/OSDebug.h>
55#include <sys/sysctl.h>
56#endif
57
58#include "libkern/OSAtomic.h"
59#include <libkern/c++/OSKext.h>
60#include <IOKit/IOStatisticsPrivate.h>
61#include <os/log_private.h>
62#include <sys/msgbuf.h>
63#include <console/serial_protos.h>
64
65#if IOKITSTATS
66
67#define IOStatisticsAlloc(type, size) \
68do { \
69 IOStatistics::countAlloc(type, size); \
70} while (0)
71
72#else
73
74#define IOStatisticsAlloc(type, size)
75
76#endif /* IOKITSTATS */
77
78
79#define TRACK_ALLOC (IOTRACKING && (kIOTracking & gIOKitDebug))
80
81
82extern "C"
83{
84
85
86mach_timespec_t IOZeroTvalspec = { 0, 0 };
87
88extern ppnum_t pmap_find_phys(pmap_t pmap, addr64_t va);
89
90extern int
91__doprnt(
92 const char *fmt,
93 va_list argp,
94 void (*putc)(int, void *),
95 void *arg,
96 int radix,
97 int is_log);
98
99extern void cons_putc_locked(char);
100extern void bsd_log_lock(void);
101extern void bsd_log_unlock(void);
102
103
104/* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */
105
106lck_grp_t *IOLockGroup;
107
108/*
109 * Global variables for use by iLogger
110 * These symbols are for use only by Apple diagnostic code.
111 * Binary compatibility is not guaranteed for kexts that reference these symbols.
112 */
113
114void *_giDebugLogInternal = NULL;
115void *_giDebugLogDataInternal = NULL;
116void *_giDebugReserved1 = NULL;
117void *_giDebugReserved2 = NULL;
118
119iopa_t gIOBMDPageAllocator;
120
121/*
122 * Static variables for this module.
123 */
124
125static queue_head_t gIOMallocContiguousEntries;
126static lck_mtx_t * gIOMallocContiguousEntriesLock;
127
128#if __x86_64__
129enum { kIOMaxPageableMaps = 8 };
130enum { kIOPageableMapSize = 512 * 1024 * 1024 };
131enum { kIOPageableMaxMapSize = 512 * 1024 * 1024 };
132#else
133enum { kIOMaxPageableMaps = 16 };
134enum { kIOPageableMapSize = 96 * 1024 * 1024 };
135enum { kIOPageableMaxMapSize = 96 * 1024 * 1024 };
136#endif
137
138typedef struct {
139 vm_map_t map;
140 vm_offset_t address;
141 vm_offset_t end;
142} IOMapData;
143
144static struct {
145 UInt32 count;
146 UInt32 hint;
147 IOMapData maps[ kIOMaxPageableMaps ];
148 lck_mtx_t * lock;
149} gIOKitPageableSpace;
150
151static iopa_t gIOPageablePageAllocator;
152
153uint32_t gIOPageAllocChunkBytes;
154
155#if IOTRACKING
156IOTrackingQueue * gIOMallocTracking;
157IOTrackingQueue * gIOWireTracking;
158IOTrackingQueue * gIOMapTracking;
159#endif /* IOTRACKING */
160
161/* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */
162
163void IOLibInit(void)
164{
165 kern_return_t ret;
166
167 static bool libInitialized;
168
169 if(libInitialized)
170 return;
171
172 IOLockGroup = lck_grp_alloc_init("IOKit", LCK_GRP_ATTR_NULL);
173
174#if IOTRACKING
175 IOTrackingInit();
176 gIOMallocTracking = IOTrackingQueueAlloc(kIOMallocTrackingName, 0, 0, 0,
177 kIOTrackingQueueTypeAlloc,
178 37);
179 gIOWireTracking = IOTrackingQueueAlloc(kIOWireTrackingName, 0, 0, page_size, 0, 0);
180
181 size_t mapCaptureSize = (kIOTracking & gIOKitDebug) ? page_size : (1024*1024);
182 gIOMapTracking = IOTrackingQueueAlloc(kIOMapTrackingName, 0, 0, mapCaptureSize,
183 kIOTrackingQueueTypeDefaultOn
184 | kIOTrackingQueueTypeMap
185 | kIOTrackingQueueTypeUser,
186 0);
187#endif
188
189 gIOKitPageableSpace.maps[0].address = 0;
190 ret = kmem_suballoc(kernel_map,
191 &gIOKitPageableSpace.maps[0].address,
192 kIOPageableMapSize,
193 TRUE,
194 VM_FLAGS_ANYWHERE,
195 VM_MAP_KERNEL_FLAGS_NONE,
196 VM_KERN_MEMORY_IOKIT,
197 &gIOKitPageableSpace.maps[0].map);
198 if (ret != KERN_SUCCESS)
199 panic("failed to allocate iokit pageable map\n");
200
201 gIOKitPageableSpace.lock = lck_mtx_alloc_init(IOLockGroup, LCK_ATTR_NULL);
202 gIOKitPageableSpace.maps[0].end = gIOKitPageableSpace.maps[0].address + kIOPageableMapSize;
203 gIOKitPageableSpace.hint = 0;
204 gIOKitPageableSpace.count = 1;
205
206 gIOMallocContiguousEntriesLock = lck_mtx_alloc_init(IOLockGroup, LCK_ATTR_NULL);
207 queue_init( &gIOMallocContiguousEntries );
208
209 gIOPageAllocChunkBytes = PAGE_SIZE/64;
210 assert(sizeof(iopa_page_t) <= gIOPageAllocChunkBytes);
211 iopa_init(&gIOBMDPageAllocator);
212 iopa_init(&gIOPageablePageAllocator);
213
214
215 libInitialized = true;
216}
217
218/* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */
219
220static uint32_t
221log2up(uint32_t size)
222{
223 if (size <= 1) size = 0;
224 else size = 32 - __builtin_clz(size - 1);
225 return (size);
226}
227
228/* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */
229
230IOThread IOCreateThread(IOThreadFunc fcn, void *arg)
231{
232 kern_return_t result;
233 thread_t thread;
234
235 result = kernel_thread_start((thread_continue_t)fcn, arg, &thread);
236 if (result != KERN_SUCCESS)
237 return (NULL);
238
239 thread_deallocate(thread);
240
241 return (thread);
242}
243
244
245void IOExitThread(void)
246{
247 (void) thread_terminate(current_thread());
248}
249
250/* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */
251
252#if IOTRACKING
253struct IOLibMallocHeader
254{
255 IOTrackingAddress tracking;
256};
257#endif
258
259#if IOTRACKING
260#define sizeofIOLibMallocHeader (sizeof(IOLibMallocHeader) - (TRACK_ALLOC ? 0 : sizeof(IOTrackingAddress)))
261#else
262#define sizeofIOLibMallocHeader (0)
263#endif
264
265/* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */
266
267void * IOMalloc(vm_size_t size)
268{
269 void * address;
270 vm_size_t allocSize;
271
272 allocSize = size + sizeofIOLibMallocHeader;
273#if IOTRACKING
274 if (sizeofIOLibMallocHeader && (allocSize <= size)) return (NULL); // overflow
275#endif
276 address = kalloc_tag_bt(allocSize, VM_KERN_MEMORY_IOKIT);
277
278 if ( address ) {
279#if IOTRACKING
280 if (TRACK_ALLOC) {
281 IOLibMallocHeader * hdr;
282 hdr = (typeof(hdr)) address;
283 bzero(&hdr->tracking, sizeof(hdr->tracking));
284 hdr->tracking.address = ~(((uintptr_t) address) + sizeofIOLibMallocHeader);
285 hdr->tracking.size = size;
286 IOTrackingAdd(gIOMallocTracking, &hdr->tracking.tracking, size, true, VM_KERN_MEMORY_NONE);
287 }
288#endif
289 address = (typeof(address)) (((uintptr_t) address) + sizeofIOLibMallocHeader);
290
291#if IOALLOCDEBUG
292 OSAddAtomic(size, &debug_iomalloc_size);
293#endif
294 IOStatisticsAlloc(kIOStatisticsMalloc, size);
295 }
296
297 return address;
298}
299
300void IOFree(void * inAddress, vm_size_t size)
301{
302 void * address;
303
304 if ((address = inAddress))
305 {
306 address = (typeof(address)) (((uintptr_t) address) - sizeofIOLibMallocHeader);
307
308#if IOTRACKING
309 if (TRACK_ALLOC)
310 {
311 IOLibMallocHeader * hdr;
312 struct ptr_reference{ void * ptr; };
313 volatile struct ptr_reference ptr;
314
315 // we're about to block in IOTrackingRemove(), make sure the original pointer
316 // exists in memory or a register for leak scanning to find
317 ptr.ptr = inAddress;
318
319 hdr = (typeof(hdr)) address;
320 if (size != hdr->tracking.size)
321 {
322 OSReportWithBacktrace("bad IOFree size 0x%lx should be 0x%lx", size, hdr->tracking.size);
323 size = hdr->tracking.size;
324 }
325 IOTrackingRemove(gIOMallocTracking, &hdr->tracking.tracking, size);
326 ptr.ptr = NULL;
327 }
328#endif
329
330 kfree(address, size + sizeofIOLibMallocHeader);
331#if IOALLOCDEBUG
332 OSAddAtomic(-size, &debug_iomalloc_size);
333#endif
334 IOStatisticsAlloc(kIOStatisticsFree, size);
335 }
336}
337
338/* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */
339
340vm_tag_t
341IOMemoryTag(vm_map_t map)
342{
343 vm_tag_t tag;
344
345 if (!vm_kernel_map_is_kernel(map)) return (VM_MEMORY_IOKIT);
346
347 tag = vm_tag_bt();
348 if (tag == VM_KERN_MEMORY_NONE) tag = VM_KERN_MEMORY_IOKIT;
349
350 return (tag);
351}
352
353/* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */
354
355struct IOLibPageMallocHeader
356{
357 mach_vm_size_t allocationSize;
358 mach_vm_address_t allocationAddress;
359#if IOTRACKING
360 IOTrackingAddress tracking;
361#endif
362};
363
364#if IOTRACKING
365#define sizeofIOLibPageMallocHeader (sizeof(IOLibPageMallocHeader) - (TRACK_ALLOC ? 0 : sizeof(IOTrackingAddress)))
366#else
367#define sizeofIOLibPageMallocHeader (sizeof(IOLibPageMallocHeader))
368#endif
369
370/* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */
371
372void * IOMallocAligned(vm_size_t size, vm_size_t alignment)
373{
374 kern_return_t kr;
375 vm_offset_t address;
376 vm_offset_t allocationAddress;
377 vm_size_t adjustedSize;
378 uintptr_t alignMask;
379 IOLibPageMallocHeader * hdr;
380
381 if (size == 0)
382 return 0;
383
384 alignment = (1UL << log2up(alignment));
385 alignMask = alignment - 1;
386 adjustedSize = size + sizeofIOLibPageMallocHeader;
387
388 if (size > adjustedSize) {
389 address = 0; /* overflow detected */
390 }
391 else if (adjustedSize >= page_size) {
392
393 kr = kernel_memory_allocate(kernel_map, &address,
394 size, alignMask, 0, IOMemoryTag(kernel_map));
395 if (KERN_SUCCESS != kr) address = 0;
396#if IOTRACKING
397 else if (TRACK_ALLOC) IOTrackingAlloc(gIOMallocTracking, address, size);
398#endif
399
400 } else {
401
402 adjustedSize += alignMask;
403
404 if (adjustedSize >= page_size) {
405
406 kr = kernel_memory_allocate(kernel_map, &allocationAddress,
407 adjustedSize, 0, 0, IOMemoryTag(kernel_map));
408 if (KERN_SUCCESS != kr) allocationAddress = 0;
409
410 } else
411 allocationAddress = (vm_address_t) kalloc_tag_bt(adjustedSize, VM_KERN_MEMORY_IOKIT);
412
413 if (allocationAddress) {
414 address = (allocationAddress + alignMask + sizeofIOLibPageMallocHeader)
415 & (~alignMask);
416
417 hdr = (typeof(hdr))(address - sizeofIOLibPageMallocHeader);
418 hdr->allocationSize = adjustedSize;
419 hdr->allocationAddress = allocationAddress;
420#if IOTRACKING
421 if (TRACK_ALLOC) {
422 bzero(&hdr->tracking, sizeof(hdr->tracking));
423 hdr->tracking.address = ~address;
424 hdr->tracking.size = size;
425 IOTrackingAdd(gIOMallocTracking, &hdr->tracking.tracking, size, true, VM_KERN_MEMORY_NONE);
426 }
427#endif
428 } else
429 address = 0;
430 }
431
432 assert(0 == (address & alignMask));
433
434 if( address) {
435#if IOALLOCDEBUG
436 OSAddAtomic(size, &debug_iomalloc_size);
437#endif
438 IOStatisticsAlloc(kIOStatisticsMallocAligned, size);
439 }
440
441 return (void *) address;
442}
443
444void IOFreeAligned(void * address, vm_size_t size)
445{
446 vm_address_t allocationAddress;
447 vm_size_t adjustedSize;
448 IOLibPageMallocHeader * hdr;
449
450 if( !address)
451 return;
452
453 assert(size);
454
455 adjustedSize = size + sizeofIOLibPageMallocHeader;
456 if (adjustedSize >= page_size) {
457#if IOTRACKING
458 if (TRACK_ALLOC) IOTrackingFree(gIOMallocTracking, (uintptr_t) address, size);
459#endif
460 kmem_free( kernel_map, (vm_offset_t) address, size);
461
462 } else {
463 hdr = (typeof(hdr)) (((uintptr_t)address) - sizeofIOLibPageMallocHeader);
464 adjustedSize = hdr->allocationSize;
465 allocationAddress = hdr->allocationAddress;
466
467#if IOTRACKING
468 if (TRACK_ALLOC)
469 {
470 if (size != hdr->tracking.size)
471 {
472 OSReportWithBacktrace("bad IOFreeAligned size 0x%lx should be 0x%lx", size, hdr->tracking.size);
473 size = hdr->tracking.size;
474 }
475 IOTrackingRemove(gIOMallocTracking, &hdr->tracking.tracking, size);
476 }
477#endif
478 if (adjustedSize >= page_size) {
479 kmem_free( kernel_map, allocationAddress, adjustedSize);
480 } else {
481 kfree((void *)allocationAddress, adjustedSize);
482 }
483 }
484
485#if IOALLOCDEBUG
486 OSAddAtomic(-size, &debug_iomalloc_size);
487#endif
488
489 IOStatisticsAlloc(kIOStatisticsFreeAligned, size);
490}
491
492/* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */
493
494void
495IOKernelFreePhysical(mach_vm_address_t address, mach_vm_size_t size)
496{
497 mach_vm_address_t allocationAddress;
498 mach_vm_size_t adjustedSize;
499 IOLibPageMallocHeader * hdr;
500
501 if (!address)
502 return;
503
504 assert(size);
505
506 adjustedSize = (2 * size) + sizeofIOLibPageMallocHeader;
507 if (adjustedSize >= page_size) {
508#if IOTRACKING
509 if (TRACK_ALLOC) IOTrackingFree(gIOMallocTracking, address, size);
510#endif
511 kmem_free( kernel_map, (vm_offset_t) address, size);
512
513 } else {
514
515 hdr = (typeof(hdr)) (((uintptr_t)address) - sizeofIOLibPageMallocHeader);
516 adjustedSize = hdr->allocationSize;
517 allocationAddress = hdr->allocationAddress;
518#if IOTRACKING
519 if (TRACK_ALLOC) IOTrackingRemove(gIOMallocTracking, &hdr->tracking.tracking, size);
520#endif
521 kfree((void *)allocationAddress, adjustedSize);
522 }
523
524 IOStatisticsAlloc(kIOStatisticsFreeContiguous, size);
525#if IOALLOCDEBUG
526 OSAddAtomic(-size, &debug_iomalloc_size);
527#endif
528}
529
530#if __arm__ || __arm64__
531extern unsigned long gPhysBase, gPhysSize;
532#endif
533
534mach_vm_address_t
535IOKernelAllocateWithPhysicalRestrict(mach_vm_size_t size, mach_vm_address_t maxPhys,
536 mach_vm_size_t alignment, bool contiguous)
537{
538 kern_return_t kr;
539 mach_vm_address_t address;
540 mach_vm_address_t allocationAddress;
541 mach_vm_size_t adjustedSize;
542 mach_vm_address_t alignMask;
543 IOLibPageMallocHeader * hdr;
544
545 if (size == 0)
546 return (0);
547 if (alignment == 0)
548 alignment = 1;
549
550 alignMask = alignment - 1;
551
552 if (os_mul_and_add_overflow(2, size, sizeofIOLibPageMallocHeader, &adjustedSize)) return (0);
553
554 contiguous = (contiguous && (adjustedSize > page_size))
555 || (alignment > page_size);
556
557 if (contiguous || maxPhys)
558 {
559 int options = 0;
560 vm_offset_t virt;
561
562 adjustedSize = size;
563 contiguous = (contiguous && (adjustedSize > page_size))
564 || (alignment > page_size);
565
566 if (!contiguous)
567 {
568#if __arm__ || __arm64__
569 if (maxPhys >= (mach_vm_address_t)(gPhysBase + gPhysSize))
570 {
571 maxPhys = 0;
572 }
573 else
574#endif
575 if (maxPhys <= 0xFFFFFFFF)
576 {
577 maxPhys = 0;
578 options |= KMA_LOMEM;
579 }
580 else if (gIOLastPage && (atop_64(maxPhys) > gIOLastPage))
581 {
582 maxPhys = 0;
583 }
584 }
585 if (contiguous || maxPhys)
586 {
587 kr = kmem_alloc_contig(kernel_map, &virt, size,
588 alignMask, atop(maxPhys), atop(alignMask), 0, IOMemoryTag(kernel_map));
589 }
590 else
591 {
592 kr = kernel_memory_allocate(kernel_map, &virt,
593 size, alignMask, options, IOMemoryTag(kernel_map));
594 }
595 if (KERN_SUCCESS == kr)
596 {
597 address = virt;
598#if IOTRACKING
599 if (TRACK_ALLOC) IOTrackingAlloc(gIOMallocTracking, address, size);
600#endif
601 }
602 else
603 address = 0;
604 }
605 else
606 {
607 adjustedSize += alignMask;
608 if (adjustedSize < size) return (0);
609 allocationAddress = (mach_vm_address_t) kalloc_tag_bt(adjustedSize, VM_KERN_MEMORY_IOKIT);
610
611 if (allocationAddress) {
612
613
614 address = (allocationAddress + alignMask + sizeofIOLibPageMallocHeader)
615 & (~alignMask);
616
617 if (atop_32(address) != atop_32(address + size - 1))
618 address = round_page(address);
619
620 hdr = (typeof(hdr))(address - sizeofIOLibPageMallocHeader);
621 hdr->allocationSize = adjustedSize;
622 hdr->allocationAddress = allocationAddress;
623#if IOTRACKING
624 if (TRACK_ALLOC) {
625 bzero(&hdr->tracking, sizeof(hdr->tracking));
626 hdr->tracking.address = ~address;
627 hdr->tracking.size = size;
628 IOTrackingAdd(gIOMallocTracking, &hdr->tracking.tracking, size, true, VM_KERN_MEMORY_NONE);
629 }
630#endif
631 } else
632 address = 0;
633 }
634
635 if (address) {
636 IOStatisticsAlloc(kIOStatisticsMallocContiguous, size);
637#if IOALLOCDEBUG
638 OSAddAtomic(size, &debug_iomalloc_size);
639#endif
640 }
641
642 return (address);
643}
644
645
646/* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */
647
648struct _IOMallocContiguousEntry
649{
650 mach_vm_address_t virtualAddr;
651 IOBufferMemoryDescriptor * md;
652 queue_chain_t link;
653};
654typedef struct _IOMallocContiguousEntry _IOMallocContiguousEntry;
655
656void * IOMallocContiguous(vm_size_t size, vm_size_t alignment,
657 IOPhysicalAddress * physicalAddress)
658{
659 mach_vm_address_t address = 0;
660
661 if (size == 0)
662 return 0;
663 if (alignment == 0)
664 alignment = 1;
665
666 /* Do we want a physical address? */
667 if (!physicalAddress)
668 {
669 address = IOKernelAllocateWithPhysicalRestrict(size, 0 /*maxPhys*/, alignment, true);
670 }
671 else do
672 {
673 IOBufferMemoryDescriptor * bmd;
674 mach_vm_address_t physicalMask;
675 vm_offset_t alignMask;
676
677 alignMask = alignment - 1;
678 physicalMask = (0xFFFFFFFF ^ alignMask);
679
680 bmd = IOBufferMemoryDescriptor::inTaskWithPhysicalMask(
681 kernel_task, kIOMemoryPhysicallyContiguous, size, physicalMask);
682 if (!bmd)
683 break;
684
685 _IOMallocContiguousEntry *
686 entry = IONew(_IOMallocContiguousEntry, 1);
687 if (!entry)
688 {
689 bmd->release();
690 break;
691 }
692 entry->virtualAddr = (mach_vm_address_t) bmd->getBytesNoCopy();
693 entry->md = bmd;
694 lck_mtx_lock(gIOMallocContiguousEntriesLock);
695 queue_enter( &gIOMallocContiguousEntries, entry,
696 _IOMallocContiguousEntry *, link );
697 lck_mtx_unlock(gIOMallocContiguousEntriesLock);
698
699 address = (mach_vm_address_t) entry->virtualAddr;
700 *physicalAddress = bmd->getPhysicalAddress();
701 }
702 while (false);
703
704 return (void *) address;
705}
706
707void IOFreeContiguous(void * _address, vm_size_t size)
708{
709 _IOMallocContiguousEntry * entry;
710 IOMemoryDescriptor * md = NULL;
711
712 mach_vm_address_t address = (mach_vm_address_t) _address;
713
714 if( !address)
715 return;
716
717 assert(size);
718
719 lck_mtx_lock(gIOMallocContiguousEntriesLock);
720 queue_iterate( &gIOMallocContiguousEntries, entry,
721 _IOMallocContiguousEntry *, link )
722 {
723 if( entry->virtualAddr == address ) {
724 md = entry->md;
725 queue_remove( &gIOMallocContiguousEntries, entry,
726 _IOMallocContiguousEntry *, link );
727 break;
728 }
729 }
730 lck_mtx_unlock(gIOMallocContiguousEntriesLock);
731
732 if (md)
733 {
734 md->release();
735 IODelete(entry, _IOMallocContiguousEntry, 1);
736 }
737 else
738 {
739 IOKernelFreePhysical((mach_vm_address_t) address, size);
740 }
741}
742
743/* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */
744
745kern_return_t IOIteratePageableMaps(vm_size_t size,
746 IOIteratePageableMapsCallback callback, void * ref)
747{
748 kern_return_t kr = kIOReturnNotReady;
749 vm_size_t segSize;
750 UInt32 attempts;
751 UInt32 index;
752 vm_offset_t min;
753 vm_map_t map;
754
755 if (size > kIOPageableMaxMapSize)
756 return( kIOReturnBadArgument );
757
758 do {
759 index = gIOKitPageableSpace.hint;
760 attempts = gIOKitPageableSpace.count;
761 while( attempts--) {
762 kr = (*callback)(gIOKitPageableSpace.maps[index].map, ref);
763 if( KERN_SUCCESS == kr) {
764 gIOKitPageableSpace.hint = index;
765 break;
766 }
767 if( index)
768 index--;
769 else
770 index = gIOKitPageableSpace.count - 1;
771 }
772 if (KERN_NO_SPACE != kr)
773 break;
774
775 lck_mtx_lock( gIOKitPageableSpace.lock );
776
777 index = gIOKitPageableSpace.count;
778 if( index >= (kIOMaxPageableMaps - 1)) {
779 lck_mtx_unlock( gIOKitPageableSpace.lock );
780 break;
781 }
782
783 if( size < kIOPageableMapSize)
784 segSize = kIOPageableMapSize;
785 else
786 segSize = size;
787
788 min = 0;
789 kr = kmem_suballoc(kernel_map,
790 &min,
791 segSize,
792 TRUE,
793 VM_FLAGS_ANYWHERE,
794 VM_MAP_KERNEL_FLAGS_NONE,
795 VM_KERN_MEMORY_IOKIT,
796 &map);
797 if( KERN_SUCCESS != kr) {
798 lck_mtx_unlock( gIOKitPageableSpace.lock );
799 break;
800 }
801
802 gIOKitPageableSpace.maps[index].map = map;
803 gIOKitPageableSpace.maps[index].address = min;
804 gIOKitPageableSpace.maps[index].end = min + segSize;
805 gIOKitPageableSpace.hint = index;
806 gIOKitPageableSpace.count = index + 1;
807
808 lck_mtx_unlock( gIOKitPageableSpace.lock );
809
810 } while( true );
811
812 return kr;
813}
814
815struct IOMallocPageableRef
816{
817 vm_offset_t address;
818 vm_size_t size;
819 vm_tag_t tag;
820};
821
822static kern_return_t IOMallocPageableCallback(vm_map_t map, void * _ref)
823{
824 struct IOMallocPageableRef * ref = (struct IOMallocPageableRef *) _ref;
825 kern_return_t kr;
826
827 kr = kmem_alloc_pageable( map, &ref->address, ref->size, ref->tag );
828
829 return( kr );
830}
831
832static void * IOMallocPageablePages(vm_size_t size, vm_size_t alignment, vm_tag_t tag)
833{
834 kern_return_t kr = kIOReturnNotReady;
835 struct IOMallocPageableRef ref;
836
837 if (alignment > page_size)
838 return( 0 );
839 if (size > kIOPageableMaxMapSize)
840 return( 0 );
841
842 ref.size = size;
843 ref.tag = tag;
844 kr = IOIteratePageableMaps( size, &IOMallocPageableCallback, &ref );
845 if( kIOReturnSuccess != kr)
846 ref.address = 0;
847
848 return( (void *) ref.address );
849}
850
851vm_map_t IOPageableMapForAddress( uintptr_t address )
852{
853 vm_map_t map = 0;
854 UInt32 index;
855
856 for( index = 0; index < gIOKitPageableSpace.count; index++) {
857 if( (address >= gIOKitPageableSpace.maps[index].address)
858 && (address < gIOKitPageableSpace.maps[index].end) ) {
859 map = gIOKitPageableSpace.maps[index].map;
860 break;
861 }
862 }
863 if( !map)
864 panic("IOPageableMapForAddress: null");
865
866 return( map );
867}
868
869static void IOFreePageablePages(void * address, vm_size_t size)
870{
871 vm_map_t map;
872
873 map = IOPageableMapForAddress( (vm_address_t) address);
874 if( map)
875 kmem_free( map, (vm_offset_t) address, size);
876}
877
878static uintptr_t IOMallocOnePageablePage(iopa_t * a)
879{
880 return ((uintptr_t) IOMallocPageablePages(page_size, page_size, VM_KERN_MEMORY_IOKIT));
881}
882
883void * IOMallocPageable(vm_size_t size, vm_size_t alignment)
884{
885 void * addr;
886
887 if (size >= (page_size - 4*gIOPageAllocChunkBytes)) addr = IOMallocPageablePages(size, alignment, IOMemoryTag(kernel_map));
888 else addr = ((void * ) iopa_alloc(&gIOPageablePageAllocator, &IOMallocOnePageablePage, size, alignment));
889
890 if (addr) {
891#if IOALLOCDEBUG
892 OSAddAtomicLong(size, &debug_iomallocpageable_size);
893#endif
894 IOStatisticsAlloc(kIOStatisticsMallocPageable, size);
895 }
896
897 return (addr);
898}
899
900void IOFreePageable(void * address, vm_size_t size)
901{
902#if IOALLOCDEBUG
903 OSAddAtomicLong(-size, &debug_iomallocpageable_size);
904#endif
905 IOStatisticsAlloc(kIOStatisticsFreePageable, size);
906
907 if (size < (page_size - 4*gIOPageAllocChunkBytes))
908 {
909 address = (void *) iopa_free(&gIOPageablePageAllocator, (uintptr_t) address, size);
910 size = page_size;
911 }
912 if (address) IOFreePageablePages(address, size);
913}
914
915/* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */
916
917extern "C" void
918iopa_init(iopa_t * a)
919{
920 bzero(a, sizeof(*a));
921 a->lock = IOLockAlloc();
922 queue_init(&a->list);
923}
924
925static uintptr_t
926iopa_allocinpage(iopa_page_t * pa, uint32_t count, uint64_t align)
927{
928 uint32_t n, s;
929 uint64_t avail = pa->avail;
930
931 assert(avail);
932
933 // find strings of count 1 bits in avail
934 for (n = count; n > 1; n -= s)
935 {
936 s = n >> 1;
937 avail = avail & (avail << s);
938 }
939 // and aligned
940 avail &= align;
941
942 if (avail)
943 {
944 n = __builtin_clzll(avail);
945 pa->avail &= ~((-1ULL << (64 - count)) >> n);
946 if (!pa->avail && pa->link.next)
947 {
948 remque(&pa->link);
949 pa->link.next = 0;
950 }
951 return (n * gIOPageAllocChunkBytes + trunc_page((uintptr_t) pa));
952 }
953
954 return (0);
955}
956
957uintptr_t
958iopa_alloc(iopa_t * a, iopa_proc_t alloc, vm_size_t bytes, uint32_t balign)
959{
960 static const uint64_t align_masks[] = {
961 0xFFFFFFFFFFFFFFFF,
962 0xAAAAAAAAAAAAAAAA,
963 0x8888888888888888,
964 0x8080808080808080,
965 0x8000800080008000,
966 0x8000000080000000,
967 0x8000000000000000,
968 };
969 iopa_page_t * pa;
970 uintptr_t addr = 0;
971 uint32_t count;
972 uint64_t align;
973
974 if (!bytes) bytes = 1;
975 count = (bytes + gIOPageAllocChunkBytes - 1) / gIOPageAllocChunkBytes;
976 align = align_masks[log2up((balign + gIOPageAllocChunkBytes - 1) / gIOPageAllocChunkBytes)];
977
978 IOLockLock(a->lock);
979 __IGNORE_WCASTALIGN(pa = (typeof(pa)) queue_first(&a->list));
980 while (!queue_end(&a->list, &pa->link))
981 {
982 addr = iopa_allocinpage(pa, count, align);
983 if (addr)
984 {
985 a->bytecount += bytes;
986 break;
987 }
988 __IGNORE_WCASTALIGN(pa = (typeof(pa)) queue_next(&pa->link));
989 }
990 IOLockUnlock(a->lock);
991
992 if (!addr)
993 {
994 addr = alloc(a);
995 if (addr)
996 {
997 pa = (typeof(pa)) (addr + page_size - gIOPageAllocChunkBytes);
998 pa->signature = kIOPageAllocSignature;
999 pa->avail = -2ULL;
1000
1001 addr = iopa_allocinpage(pa, count, align);
1002 IOLockLock(a->lock);
1003 if (pa->avail) enqueue_head(&a->list, &pa->link);
1004 a->pagecount++;
1005 if (addr) a->bytecount += bytes;
1006 IOLockUnlock(a->lock);
1007 }
1008 }
1009
1010 assert((addr & ((1 << log2up(balign)) - 1)) == 0);
1011 return (addr);
1012}
1013
1014uintptr_t
1015iopa_free(iopa_t * a, uintptr_t addr, vm_size_t bytes)
1016{
1017 iopa_page_t * pa;
1018 uint32_t count;
1019 uintptr_t chunk;
1020
1021 if (!bytes) bytes = 1;
1022
1023 chunk = (addr & page_mask);
1024 assert(0 == (chunk & (gIOPageAllocChunkBytes - 1)));
1025
1026 pa = (typeof(pa)) (addr | (page_size - gIOPageAllocChunkBytes));
1027 assert(kIOPageAllocSignature == pa->signature);
1028
1029 count = (bytes + gIOPageAllocChunkBytes - 1) / gIOPageAllocChunkBytes;
1030 chunk /= gIOPageAllocChunkBytes;
1031
1032 IOLockLock(a->lock);
1033 if (!pa->avail)
1034 {
1035 assert(!pa->link.next);
1036 enqueue_tail(&a->list, &pa->link);
1037 }
1038 pa->avail |= ((-1ULL << (64 - count)) >> chunk);
1039 if (pa->avail != -2ULL) pa = 0;
1040 else
1041 {
1042 remque(&pa->link);
1043 pa->link.next = 0;
1044 pa->signature = 0;
1045 a->pagecount--;
1046 // page to free
1047 pa = (typeof(pa)) trunc_page(pa);
1048 }
1049 a->bytecount -= bytes;
1050 IOLockUnlock(a->lock);
1051
1052 return ((uintptr_t) pa);
1053}
1054
1055/* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */
1056
1057IOReturn IOSetProcessorCacheMode( task_t task, IOVirtualAddress address,
1058 IOByteCount length, IOOptionBits cacheMode )
1059{
1060 IOReturn ret = kIOReturnSuccess;
1061 ppnum_t pagenum;
1062
1063 if( task != kernel_task)
1064 return( kIOReturnUnsupported );
1065 if ((address | length) & PAGE_MASK)
1066 {
1067// OSReportWithBacktrace("IOSetProcessorCacheMode(0x%x, 0x%x, 0x%x) fails\n", address, length, cacheMode);
1068 return( kIOReturnUnsupported );
1069 }
1070 length = round_page(address + length) - trunc_page( address );
1071 address = trunc_page( address );
1072
1073 // make map mode
1074 cacheMode = (cacheMode << kIOMapCacheShift) & kIOMapCacheMask;
1075
1076 while( (kIOReturnSuccess == ret) && (length > 0) ) {
1077
1078 // Get the physical page number
1079 pagenum = pmap_find_phys(kernel_pmap, (addr64_t)address);
1080 if( pagenum) {
1081 ret = IOUnmapPages( get_task_map(task), address, page_size );
1082 ret = IOMapPages( get_task_map(task), address, ptoa_64(pagenum), page_size, cacheMode );
1083 } else
1084 ret = kIOReturnVMError;
1085
1086 address += page_size;
1087 length -= page_size;
1088 }
1089
1090 return( ret );
1091}
1092
1093
1094IOReturn IOFlushProcessorCache( task_t task, IOVirtualAddress address,
1095 IOByteCount length )
1096{
1097 if( task != kernel_task)
1098 return( kIOReturnUnsupported );
1099
1100 flush_dcache64( (addr64_t) address, (unsigned) length, false );
1101
1102 return( kIOReturnSuccess );
1103}
1104
1105/* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */
1106
1107vm_offset_t OSKernelStackRemaining( void )
1108{
1109 return (ml_stack_remaining());
1110}
1111
1112/* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */
1113
1114/*
1115 * Spin for indicated number of milliseconds.
1116 */
1117void IOSleep(unsigned milliseconds)
1118{
1119 delay_for_interval(milliseconds, kMillisecondScale);
1120}
1121
1122/*
1123 * Spin for indicated number of milliseconds, and potentially an
1124 * additional number of milliseconds up to the leeway values.
1125 */
1126void IOSleepWithLeeway(unsigned intervalMilliseconds, unsigned leewayMilliseconds)
1127{
1128 delay_for_interval_with_leeway(intervalMilliseconds, leewayMilliseconds, kMillisecondScale);
1129}
1130
1131/*
1132 * Spin for indicated number of microseconds.
1133 */
1134void IODelay(unsigned microseconds)
1135{
1136 delay_for_interval(microseconds, kMicrosecondScale);
1137}
1138
1139/*
1140 * Spin for indicated number of nanoseconds.
1141 */
1142void IOPause(unsigned nanoseconds)
1143{
1144 delay_for_interval(nanoseconds, kNanosecondScale);
1145}
1146
1147/* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */
1148
1149static void _IOLogv(const char *format, va_list ap, void *caller) __printflike(1,0);
1150
1151__attribute__((noinline,not_tail_called))
1152void IOLog(const char *format, ...)
1153{
1154 void *caller = __builtin_return_address(0);
1155 va_list ap;
1156
1157 va_start(ap, format);
1158 _IOLogv(format, ap, caller);
1159 va_end(ap);
1160}
1161
1162__attribute__((noinline,not_tail_called))
1163void IOLogv(const char *format, va_list ap)
1164{
1165 void *caller = __builtin_return_address(0);
1166 _IOLogv(format, ap, caller);
1167}
1168
1169void _IOLogv(const char *format, va_list ap, void *caller)
1170{
1171 va_list ap2;
1172 struct console_printbuf_state info_data;
1173 console_printbuf_state_init(&info_data, TRUE, TRUE);
1174
1175 va_copy(ap2, ap);
1176
1177 os_log_with_args(OS_LOG_DEFAULT, OS_LOG_TYPE_DEFAULT, format, ap, caller);
1178
1179 __doprnt(format, ap2, console_printbuf_putc, &info_data, 16, TRUE);
1180 console_printbuf_clear(&info_data);
1181 va_end(ap2);
1182
1183 assertf(ml_get_interrupts_enabled() || ml_is_quiescing() || debug_mode_active() || !gCPUsRunning, "IOLog called with interrupts disabled");
1184}
1185
1186#if !__LP64__
1187void IOPanic(const char *reason)
1188{
1189 panic("%s", reason);
1190}
1191#endif
1192
1193/* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */
1194
1195void IOKitKernelLogBuffer(const char * title, const void * buffer, size_t size,
1196 void (*output)(const char *format, ...))
1197{
1198 uint8_t c, chars[17];
1199 size_t idx;
1200
1201 output("%s(0x%x):\n", title, size);
1202 if (size > 4096) size = 4096;
1203 chars[16] = idx = 0;
1204 while (true) {
1205 if (!(idx & 15)) {
1206 if (idx) output(" |%s|\n", chars);
1207 if (idx >= size) break;
1208 output("%04x: ", idx);
1209 }
1210 else if (!(idx & 7)) output(" ");
1211
1212 c = ((char *)buffer)[idx];
1213 output("%02x ", c);
1214 chars[idx & 15] = ((c >= 0x20) && (c <= 0x7f)) ? c : ' ';
1215
1216 idx++;
1217 if ((idx == size) && (idx & 15)) {
1218 chars[idx & 15] = 0;
1219 while (idx & 15) {
1220 idx++;
1221 output(" ");
1222 }
1223 }
1224 }
1225}
1226
1227/* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */
1228
1229/*
1230 * Convert a integer constant (typically a #define or enum) to a string.
1231 */
1232static char noValue[80]; // that's pretty
1233
1234const char *IOFindNameForValue(int value, const IONamedValue *regValueArray)
1235{
1236 for( ; regValueArray->name; regValueArray++) {
1237 if(regValueArray->value == value)
1238 return(regValueArray->name);
1239 }
1240 snprintf(noValue, sizeof(noValue), "0x%x (UNDEFINED)", value);
1241 return((const char *)noValue);
1242}
1243
1244IOReturn IOFindValueForName(const char *string,
1245 const IONamedValue *regValueArray,
1246 int *value)
1247{
1248 for( ; regValueArray->name; regValueArray++) {
1249 if(!strcmp(regValueArray->name, string)) {
1250 *value = regValueArray->value;
1251 return kIOReturnSuccess;
1252 }
1253 }
1254 return kIOReturnBadArgument;
1255}
1256
1257OSString * IOCopyLogNameForPID(int pid)
1258{
1259 char buf[128];
1260 size_t len;
1261 snprintf(buf, sizeof(buf), "pid %d, ", pid);
1262 len = strlen(buf);
1263 proc_name(pid, buf + len, sizeof(buf) - len);
1264 return (OSString::withCString(buf));
1265}
1266
1267/* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */
1268
1269IOAlignment IOSizeToAlignment(unsigned int size)
1270{
1271 int shift;
1272 const int intsize = sizeof(unsigned int) * 8;
1273
1274 for (shift = 1; shift < intsize; shift++) {
1275 if (size & 0x80000000)
1276 return (IOAlignment)(intsize - shift);
1277 size <<= 1;
1278 }
1279 return 0;
1280}
1281
1282unsigned int IOAlignmentToSize(IOAlignment align)
1283{
1284 unsigned int size;
1285
1286 for (size = 1; align; align--) {
1287 size <<= 1;
1288 }
1289 return size;
1290}
1291
1292} /* extern "C" */
1293
1294
1295
1296