1/*
2 * Copyright (c) 1998-2016 Apple Inc. All rights reserved.
3 *
4 * @APPLE_OSREFERENCE_LICENSE_HEADER_START@
5 *
6 * This file contains Original Code and/or Modifications of Original Code
7 * as defined in and that are subject to the Apple Public Source License
8 * Version 2.0 (the 'License'). You may not use this file except in
9 * compliance with the License. The rights granted to you under the License
10 * may not be used to create, or enable the creation or redistribution of,
11 * unlawful or unlicensed copies of an Apple operating system, or to
12 * circumvent, violate, or enable the circumvention or violation of, any
13 * terms of an Apple operating system software license agreement.
14 *
15 * Please obtain a copy of the License at
16 * http://www.opensource.apple.com/apsl/ and read it before using this file.
17 *
18 * The Original Code and all software distributed under the License are
19 * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER
20 * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES,
21 * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY,
22 * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT.
23 * Please see the License for the specific language governing rights and
24 * limitations under the License.
25 *
26 * @APPLE_OSREFERENCE_LICENSE_HEADER_END@
27 */
28
29#include <sys/sysctl.h>
30extern "C" {
31#include <vm/vm_kern.h>
32#include <kern/task.h>
33#include <kern/debug.h>
34}
35
36#include <libkern/c++/OSContainers.h>
37#include <libkern/OSDebug.h>
38#include <libkern/c++/OSCPPDebug.h>
39#include <kern/backtrace.h>
40#include <kern/btlog.h>
41
42#include <IOKit/IOKitDebug.h>
43#include <IOKit/IOLib.h>
44#include <IOKit/assert.h>
45#include <IOKit/IODeviceTreeSupport.h>
46#include <IOKit/IOService.h>
47
48#include "IOKitKernelInternal.h"
49
50TUNABLE_WRITEABLE(SInt64, gIOKitDebug, "io", DEBUG_INIT_VALUE);
51TUNABLE_DEV_WRITEABLE(SInt64, gIOKitTrace, "iotrace", 0);
52
53#if DEVELOPMENT || DEBUG
54#define IODEBUG_CTLFLAGS CTLFLAG_RW
55#else
56#define IODEBUG_CTLFLAGS CTLFLAG_RD
57#endif
58
59SYSCTL_QUAD(_debug, OID_AUTO, iotrace, IODEBUG_CTLFLAGS | CTLFLAG_LOCKED, &gIOKitTrace, "trace io");
60
61static int
62sysctl_debug_iokit
63(__unused struct sysctl_oid *oidp, __unused void *arg1, __unused int arg2, struct sysctl_req *req)
64{
65 SInt64 newValue;
66 int changed, error = sysctl_io_number(req, bigValue: gIOKitDebug, valueSize: sizeof(gIOKitDebug), pValue: &newValue, changed: &changed);
67 if (changed) {
68 gIOKitDebug = ((gIOKitDebug & ~kIOKitDebugUserOptions) | (newValue & kIOKitDebugUserOptions));
69 }
70 return error;
71}
72
73SYSCTL_PROC(_debug, OID_AUTO, iokit,
74 CTLTYPE_QUAD | IODEBUG_CTLFLAGS | CTLFLAG_KERN | CTLFLAG_LOCKED,
75 &gIOKitDebug, 0, sysctl_debug_iokit, "Q", "boot_arg io");
76
77void (*gIOTrackingLeakScanCallback)(uint32_t notification) = NULL;
78
79size_t debug_malloc_size;
80size_t debug_iomalloc_size;
81
82vm_size_t debug_iomallocpageable_size;
83size_t debug_container_malloc_size;
84// int debug_ivars_size; // in OSObject.cpp
85
86extern "C" {
87#if 0
88#define DEBG(fmt, args...) { kprintf(fmt, ## args); }
89#else
90#define DEBG(fmt, args...) { IOLog(fmt, ## args); }
91#endif
92
93void
94IOPrintPlane( const IORegistryPlane * plane )
95{
96 IORegistryEntry * next;
97 IORegistryIterator * iter;
98 OSOrderedSet * all;
99 IOService * service;
100
101 iter = IORegistryIterator::iterateOver( plane );
102 assert( iter );
103 all = iter->iterateAll();
104 if (all) {
105 DEBG("Count %d\n", all->getCount());
106 all->release();
107 } else {
108 DEBG("Empty\n");
109 }
110
111 iter->reset();
112 while ((next = iter->getNextObjectRecursive())) {
113 DEBG( "%*s\033[33m%s", 2 * next->getDepth( plane ), "", next->getName( plane ));
114 if ((next->getLocation( plane ))) {
115 DEBG("@%s", next->getLocation( plane ));
116 }
117 DEBG("\033[0m <class %s", next->getMetaClass()->getClassName());
118 if ((service = OSDynamicCast(IOService, next))) {
119 DEBG(", busy %ld", (long) service->getBusyState());
120 }
121 DEBG( ">\n");
122// IOSleep(250);
123 }
124 iter->release();
125
126#undef IOPrintPlaneFormat
127}
128
129void
130db_piokjunk(void)
131{
132}
133
134void
135db_dumpiojunk( const IORegistryPlane * plane __unused )
136{
137}
138
139void
140IOPrintMemory( void )
141{
142// OSMetaClass::printInstanceCounts();
143
144 IOLog(format: "\n"
145 "ivar kalloc() 0x%08lx\n"
146 "malloc() 0x%08lx\n"
147 "containers kalloc() 0x%08lx\n"
148 "IOMalloc() 0x%08lx\n"
149 "----------------------------------------\n",
150 debug_ivars_size,
151 debug_malloc_size,
152 debug_container_malloc_size,
153 debug_iomalloc_size
154 );
155}
156} /* extern "C" */
157
158/* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */
159
160#define super OSObject
161OSDefineMetaClassAndStructors(IOKitDiagnostics, OSObject)
162
163/* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */
164
165OSObject * IOKitDiagnostics::diagnostics( void )
166{
167 IOKitDiagnostics * diags;
168
169 diags = new IOKitDiagnostics;
170 if (diags && !diags->init()) {
171 diags->release();
172 diags = NULL;
173 }
174
175 return diags;
176}
177
178void
179IOKitDiagnostics::updateOffset( OSDictionary * dict,
180 UInt64 value, const char * name )
181{
182 OSNumber * off;
183
184 off = OSNumber::withNumber( value, numberOfBits: 64 );
185 if (!off) {
186 return;
187 }
188
189 dict->setObject( aKey: name, anObject: off );
190 off->release();
191}
192
193bool
194IOKitDiagnostics::serialize(OSSerialize *s) const
195{
196 OSDictionary * dict;
197 bool ok;
198
199 dict = OSDictionary::withCapacity( capacity: 5 );
200 if (!dict) {
201 return false;
202 }
203
204 updateOffset( dict, value: debug_ivars_size, name: "Instance allocation" );
205 updateOffset( dict, value: debug_container_malloc_size, name: "Container allocation" );
206 updateOffset( dict, value: debug_iomalloc_size, name: "IOMalloc allocation" );
207 updateOffset( dict, value: debug_iomallocpageable_size, name: "Pageable allocation" );
208
209 OSMetaClass::serializeClassDictionary(dict);
210
211 ok = dict->serialize( serializer: s );
212
213 dict->release();
214
215 return ok;
216}
217
218/* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */
219
220#if IOTRACKING
221
222#include <libkern/c++/OSCPPDebug.h>
223#include <libkern/c++/OSKext.h>
224#include <kern/zalloc.h>
225
226__private_extern__ "C" void qsort(
227 void * array,
228 size_t nmembers,
229 size_t member_size,
230 int (*)(const void *, const void *));
231
232extern "C" ppnum_t pmap_find_phys(pmap_t pmap, addr64_t va);
233extern "C" ppnum_t pmap_valid_page(ppnum_t pn);
234
235/* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */
236
237struct IOTRecursiveLock {
238 lck_mtx_t * mutex;
239 thread_t thread;
240 UInt32 count;
241};
242
243struct IOTrackingQueue {
244 queue_chain_t link;
245 IOTRecursiveLock lock;
246 const char * name;
247 uintptr_t btEntry;
248 size_t allocSize;
249 size_t minCaptureSize;
250 uint32_t siteCount;
251 uint32_t type;
252 uint32_t numSiteQs;
253 uint8_t captureOn;
254 queue_head_t sites[];
255};
256
257
258struct IOTrackingCallSiteUser {
259 pid_t pid;
260 uint8_t user32;
261 uint8_t userCount;
262 uintptr_t bt[kIOTrackingCallSiteBTs];
263};
264
265struct IOTrackingCallSite {
266 queue_chain_t link;
267 queue_head_t instances;
268 IOTrackingQueue * queue;
269 IOTracking ** addresses;
270 size_t size[2];
271 uint32_t crc;
272 uint32_t count;
273
274 vm_tag_t tag;
275 uint8_t user32;
276 uint8_t userCount;
277 pid_t btPID;
278
279 uintptr_t bt[kIOTrackingCallSiteBTs];
280 IOTrackingCallSiteUser user[0];
281};
282
283struct IOTrackingCallSiteWithUser {
284 struct IOTrackingCallSite site;
285 struct IOTrackingCallSiteUser user;
286};
287
288static void IOTrackingFreeCallSite(uint32_t type, IOTrackingCallSite ** site);
289
290struct IOTrackingLeaksRef {
291 uintptr_t * instances;
292 uint32_t zoneSize;
293 uint32_t count;
294 uint32_t found;
295 uint32_t foundzlen;
296 size_t bytes;
297};
298
299lck_mtx_t * gIOTrackingLock;
300queue_head_t gIOTrackingQ;
301
302enum{
303 kTrackingAddressFlagAllocated = 0x00000001
304};
305
306#if defined(__LP64__)
307#define IOTrackingAddressFlags(ptr) (ptr->flags)
308#else
309#define IOTrackingAddressFlags(ptr) (ptr->tracking.flags)
310#endif
311
312/* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */
313
314static void
315IOTRecursiveLockLock(IOTRecursiveLock * lock)
316{
317 if (lock->thread == current_thread()) {
318 lock->count++;
319 } else {
320 lck_mtx_lock(lock->mutex);
321 assert(lock->thread == NULL);
322 assert(lock->count == 0);
323 lock->thread = current_thread();
324 lock->count = 1;
325 }
326}
327
328static void
329IOTRecursiveLockUnlock(IOTRecursiveLock * lock)
330{
331 assert(lock->thread == current_thread());
332 if (0 == (--lock->count)) {
333 lock->thread = NULL;
334 lck_mtx_unlock(lock->mutex);
335 }
336}
337
338/* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */
339
340void
341IOTrackingInit(void)
342{
343 queue_init(&gIOTrackingQ);
344 gIOTrackingLock = lck_mtx_alloc_init(IOLockGroup, LCK_ATTR_NULL);
345}
346
347/* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */
348
349IOTrackingQueue *
350IOTrackingQueueAlloc(const char * name, uintptr_t btEntry,
351 size_t allocSize, size_t minCaptureSize,
352 uint32_t type, uint32_t numSiteQs)
353{
354 IOTrackingQueue * queue;
355 uint32_t idx;
356
357 if (!numSiteQs) {
358 numSiteQs = 1;
359 }
360 queue = kalloc_type(IOTrackingQueue, queue_head_t, numSiteQs, Z_WAITOK_ZERO);
361 queue->name = name;
362 queue->btEntry = btEntry;
363 queue->allocSize = allocSize;
364 queue->minCaptureSize = minCaptureSize;
365 queue->lock.mutex = lck_mtx_alloc_init(IOLockGroup, LCK_ATTR_NULL);
366 queue->numSiteQs = numSiteQs;
367 queue->type = type;
368 enum { kFlags = (kIOTracking | kIOTrackingBoot) };
369 queue->captureOn = (kFlags == (kFlags & gIOKitDebug))
370 || (kIOTrackingQueueTypeDefaultOn & type);
371
372 for (idx = 0; idx < numSiteQs; idx++) {
373 queue_init(&queue->sites[idx]);
374 }
375
376 lck_mtx_lock(gIOTrackingLock);
377 queue_enter(&gIOTrackingQ, queue, IOTrackingQueue *, link);
378 lck_mtx_unlock(gIOTrackingLock);
379
380 return queue;
381};
382
383void
384IOTrackingQueueCollectUser(IOTrackingQueue * queue)
385{
386 assert(0 == queue->siteCount);
387 queue->type |= kIOTrackingQueueTypeUser;
388}
389
390/* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */
391
392void
393IOTrackingQueueFree(IOTrackingQueue * queue)
394{
395 lck_mtx_lock(gIOTrackingLock);
396 IOTrackingReset(queue);
397 remque(&queue->link);
398 lck_mtx_unlock(gIOTrackingLock);
399
400 lck_mtx_free(queue->lock.mutex, IOLockGroup);
401
402 kfree_type(IOTrackingQueue, queue_head_t, queue->numSiteQs, queue);
403};
404
405/* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */
406
407/* fasthash
408 * The MIT License
409 *
410 * Copyright (C) 2012 Zilong Tan (eric.zltan@gmail.com)
411 *
412 * Permission is hereby granted, free of charge, to any person
413 * obtaining a copy of this software and associated documentation
414 * files (the "Software"), to deal in the Software without
415 * restriction, including without limitation the rights to use, copy,
416 * modify, merge, publish, distribute, sublicense, and/or sell copies
417 * of the Software, and to permit persons to whom the Software is
418 * furnished to do so, subject to the following conditions:
419 *
420 * The above copyright notice and this permission notice shall be
421 * included in all copies or substantial portions of the Software.
422 *
423 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
424 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
425 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
426 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
427 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
428 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
429 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
430 * SOFTWARE.
431 */
432
433
434// Compression function for Merkle-Damgard construction.
435// This function is generated using the framework provided.
436#define mix(h) ({ \
437 (h) ^= (h) >> 23; \
438 (h) *= 0x2127599bf4325c37ULL; \
439 (h) ^= (h) >> 47; })
440
441static uint64_t
442fasthash64(const void *buf, size_t len, uint64_t seed)
443{
444 const uint64_t m = 0x880355f21e6d1965ULL;
445 const uint64_t *pos = (const uint64_t *)buf;
446 const uint64_t *end = pos + (len / 8);
447 const unsigned char *pos2;
448 uint64_t h = seed ^ (len * m);
449 uint64_t v;
450
451 while (pos != end) {
452 v = *pos++;
453 h ^= mix(v);
454 h *= m;
455 }
456
457 pos2 = (const unsigned char*)pos;
458 v = 0;
459
460 switch (len & 7) {
461 case 7: v ^= (uint64_t)pos2[6] << 48;
462 [[clang::fallthrough]];
463 case 6: v ^= (uint64_t)pos2[5] << 40;
464 [[clang::fallthrough]];
465 case 5: v ^= (uint64_t)pos2[4] << 32;
466 [[clang::fallthrough]];
467 case 4: v ^= (uint64_t)pos2[3] << 24;
468 [[clang::fallthrough]];
469 case 3: v ^= (uint64_t)pos2[2] << 16;
470 [[clang::fallthrough]];
471 case 2: v ^= (uint64_t)pos2[1] << 8;
472 [[clang::fallthrough]];
473 case 1: v ^= (uint64_t)pos2[0];
474 h ^= mix(v);
475 h *= m;
476 }
477
478 return mix(h);
479}
480
481/* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */
482
483static uint32_t
484fasthash32(const void *buf, size_t len, uint32_t seed)
485{
486 // the following trick converts the 64-bit hashcode to Fermat
487 // residue, which shall retain information from both the higher
488 // and lower parts of hashcode.
489 uint64_t h = fasthash64(buf, len, seed);
490 return (uint32_t) (h - (h >> 32));
491}
492
493/* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */
494
495void
496IOTrackingAddUser(IOTrackingQueue * queue, IOTrackingUser * mem, vm_size_t size)
497{
498 uint32_t num;
499 int pid;
500
501 if (!queue->captureOn) {
502 return;
503 }
504 if (size < queue->minCaptureSize) {
505 return;
506 }
507
508 assert(!mem->link.next);
509
510 num = backtrace(&mem->bt[0], kIOTrackingCallSiteBTs, NULL, NULL);
511 num = 0;
512 if ((kernel_task != current_task()) && (pid = proc_selfpid())) {
513 struct backtrace_user_info btinfo = BTUINFO_INIT;
514 mem->btPID = pid;
515 num = backtrace_user(&mem->btUser[0], kIOTrackingCallSiteBTs - 1,
516 NULL, &btinfo);
517 mem->user32 = !(btinfo.btui_info & BTI_64_BIT);
518 }
519 assert(num <= kIOTrackingCallSiteBTs);
520 static_assert(kIOTrackingCallSiteBTs <= UINT8_MAX);
521 mem->userCount = ((uint8_t) num);
522
523 IOTRecursiveLockLock(&queue->lock);
524 queue_enter/*last*/ (&queue->sites[0], mem, IOTrackingUser *, link);
525 queue->siteCount++;
526 IOTRecursiveLockUnlock(&queue->lock);
527}
528
529void
530IOTrackingRemoveUser(IOTrackingQueue * queue, IOTrackingUser * mem)
531{
532 if (!mem->link.next) {
533 return;
534 }
535
536 IOTRecursiveLockLock(&queue->lock);
537 if (mem->link.next) {
538 remque(&mem->link);
539 assert(queue->siteCount);
540 queue->siteCount--;
541 }
542 IOTRecursiveLockUnlock(&queue->lock);
543}
544
545uint64_t gIOTrackingAddTime;
546
547void
548IOTrackingAdd(IOTrackingQueue * queue, IOTracking * mem, size_t size, bool address, vm_tag_t tag)
549{
550 IOTrackingCallSite * site;
551 uint32_t crc, num;
552 uintptr_t bt[kIOTrackingCallSiteBTs + 1];
553 uintptr_t btUser[kIOTrackingCallSiteBTs];
554 queue_head_t * que;
555 bool user;
556 int pid;
557 int userCount;
558
559 if (mem->site) {
560 return;
561 }
562 if (!queue->captureOn) {
563 return;
564 }
565 if (size < queue->minCaptureSize) {
566 return;
567 }
568
569 user = (0 != (kIOTrackingQueueTypeUser & queue->type));
570
571 assert(!mem->link.next);
572
573 num = backtrace(&bt[0], kIOTrackingCallSiteBTs + 1, NULL, NULL);
574 if (!num) {
575 return;
576 }
577 num--;
578 crc = fasthash32(&bt[1], num * sizeof(bt[0]), 0x04C11DB7);
579
580 userCount = 0;
581 pid = 0;
582 backtrace_info_t btinfo = BTI_NONE;
583 if (user) {
584 if ((kernel_task != current_task()) && (pid = proc_selfpid())) {
585 struct backtrace_user_info btuinfo = BTUINFO_INIT;
586 userCount = backtrace_user(&btUser[0], kIOTrackingCallSiteBTs,
587 NULL, &btuinfo);
588 assert(userCount <= kIOTrackingCallSiteBTs);
589 btinfo = btuinfo.btui_info;
590 crc = fasthash32(&btUser[0], userCount * sizeof(bt[0]), crc);
591 }
592 }
593
594 IOTRecursiveLockLock(&queue->lock);
595 que = &queue->sites[crc % queue->numSiteQs];
596 queue_iterate(que, site, IOTrackingCallSite *, link)
597 {
598 if (tag != site->tag) {
599 continue;
600 }
601 if (user && (pid != site->user[0].pid)) {
602 continue;
603 }
604 if (crc == site->crc) {
605 break;
606 }
607 }
608
609 if (queue_end(que, (queue_entry_t) site)) {
610 if (user) {
611 site = &kalloc_type(IOTrackingCallSiteWithUser,
612 Z_WAITOK_ZERO_NOFAIL)->site;
613 } else {
614 site = kalloc_type(IOTrackingCallSite,
615 Z_WAITOK_ZERO_NOFAIL);
616 }
617
618 queue_init(&site->instances);
619 site->addresses = NULL;
620 site->queue = queue;
621 site->crc = crc;
622 site->count = 0;
623 site->tag = tag;
624 memset(&site->size[0], 0, sizeof(site->size));
625 bcopy(&bt[1], &site->bt[0], num * sizeof(site->bt[0]));
626 assert(num <= kIOTrackingCallSiteBTs);
627 bzero(&site->bt[num], (kIOTrackingCallSiteBTs - num) * sizeof(site->bt[0]));
628 if (user) {
629 bcopy(&btUser[0], &site->user[0].bt[0], userCount * sizeof(site->user[0].bt[0]));
630 assert(userCount <= kIOTrackingCallSiteBTs);
631 bzero(&site->user[0].bt[userCount], (kIOTrackingCallSiteBTs - userCount) * sizeof(site->user[0].bt[0]));
632 site->user[0].pid = pid;
633 site->user[0].user32 = !(btinfo & BTI_64_BIT);
634 static_assert(kIOTrackingCallSiteBTs <= UINT8_MAX);
635 site->user[0].userCount = ((uint8_t) userCount);
636 }
637 queue_enter_first(que, site, IOTrackingCallSite *, link);
638 queue->siteCount++;
639 }
640
641 if (address) {
642 IOTrackingAddress * memAddr = (typeof(memAddr))mem;
643 uint32_t hashIdx;
644
645 if (NULL == site->addresses) {
646 site->addresses = kalloc_type(IOTracking *, queue->numSiteQs, Z_WAITOK_ZERO_NOFAIL);
647 for (hashIdx = 0; hashIdx < queue->numSiteQs; hashIdx++) {
648 site->addresses[hashIdx] = (IOTracking *) &site->instances;
649 }
650 }
651 hashIdx = atop(memAddr->address) % queue->numSiteQs;
652 if (queue_end(&site->instances, (queue_entry_t)site->addresses[hashIdx])) {
653 queue_enter/*last*/ (&site->instances, mem, IOTracking *, link);
654 } else {
655 queue_insert_before(&site->instances, mem, site->addresses[hashIdx], IOTracking *, link);
656 }
657 site->addresses[hashIdx] = mem;
658 } else {
659 queue_enter_first(&site->instances, mem, IOTracking *, link);
660 }
661
662 mem->site = site;
663 site->size[0] += size;
664 site->count++;
665
666 IOTRecursiveLockUnlock(&queue->lock);
667}
668
669/* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */
670
671static void
672IOTrackingRemoveInternal(IOTrackingQueue * queue, IOTracking * mem, size_t size, uint32_t addressIdx)
673{
674 IOTrackingCallSite * site;
675 IOTrackingAddress * nextAddress;
676
677 if (!mem->link.next) {
678 return;
679 }
680
681 IOTRecursiveLockLock(&queue->lock);
682 if (mem->link.next) {
683 assert(mem->site);
684 site = mem->site;
685
686 if ((-1U != addressIdx) && (mem == site->addresses[addressIdx])) {
687 nextAddress = (IOTrackingAddress *) queue_next(&mem->link);
688 if (!queue_end(&site->instances, &nextAddress->tracking.link)
689 && (addressIdx != (atop(nextAddress->address) % queue->numSiteQs))) {
690 nextAddress = (IOTrackingAddress *) &site->instances;
691 }
692 site->addresses[addressIdx] = &nextAddress->tracking;
693 }
694
695 remque(&mem->link);
696 assert(site->count);
697 site->count--;
698 assert(site->size[0] >= size);
699 site->size[0] -= size;
700 if (!site->count) {
701 assert(queue_empty(&site->instances));
702 assert(!site->size[0]);
703 assert(!site->size[1]);
704
705 remque(&site->link);
706 assert(queue->siteCount);
707 queue->siteCount--;
708 IOTrackingFreeCallSite(queue->type, &site);
709 }
710 mem->site = NULL;
711 }
712 IOTRecursiveLockUnlock(&queue->lock);
713}
714
715/* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */
716
717void
718IOTrackingRemove(IOTrackingQueue * queue, IOTracking * mem, size_t size)
719{
720 return IOTrackingRemoveInternal(queue, mem, size, -1U);
721}
722
723/* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */
724
725void
726IOTrackingRemoveAddress(IOTrackingQueue * queue, IOTrackingAddress * mem, size_t size)
727{
728 uint32_t addressIdx;
729 uint64_t address;
730
731 address = mem->address;
732 addressIdx = atop(address) % queue->numSiteQs;
733
734 return IOTrackingRemoveInternal(queue, &mem->tracking, size, addressIdx);
735}
736
737/* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */
738
739void
740IOTrackingAlloc(IOTrackingQueue * queue, uintptr_t address, size_t size)
741{
742 IOTrackingAddress * tracking;
743
744 if (!queue->captureOn) {
745 return;
746 }
747 if (size < queue->minCaptureSize) {
748 return;
749 }
750
751 address = ~address;
752 tracking = kalloc_type(IOTrackingAddress, (zalloc_flags_t)(Z_WAITOK | Z_ZERO));
753 IOTrackingAddressFlags(tracking) |= kTrackingAddressFlagAllocated;
754 tracking->address = address;
755 tracking->size = size;
756
757 IOTrackingAdd(queue, &tracking->tracking, size, true, VM_KERN_MEMORY_NONE);
758}
759
760/* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */
761
762void
763IOTrackingFree(IOTrackingQueue * queue, uintptr_t address, size_t size)
764{
765 IOTrackingCallSite * site;
766 IOTrackingAddress * tracking;
767 IOTrackingAddress * nextAddress;
768 uint32_t idx, hashIdx;
769 bool done;
770
771 address = ~address;
772 IOTRecursiveLockLock(&queue->lock);
773
774 hashIdx = atop(address) % queue->numSiteQs;
775
776 done = false;
777 for (idx = 0; idx < queue->numSiteQs; idx++) {
778 queue_iterate(&queue->sites[idx], site, IOTrackingCallSite *, link)
779 {
780 if (!site->addresses) {
781 continue;
782 }
783 tracking = (IOTrackingAddress *) site->addresses[hashIdx];
784 while (!queue_end(&site->instances, &tracking->tracking.link)) {
785 nextAddress = (IOTrackingAddress *) queue_next(&tracking->tracking.link);
786 if (!queue_end(&site->instances, &nextAddress->tracking.link)
787 && (hashIdx != (atop(nextAddress->address) % queue->numSiteQs))) {
788 nextAddress = (IOTrackingAddress *) &site->instances;
789 }
790 if ((done = (address == tracking->address))) {
791 if (tracking == (IOTrackingAddress *) site->addresses[hashIdx]) {
792 site->addresses[hashIdx] = &nextAddress->tracking;
793 }
794 IOTrackingRemoveInternal(queue, &tracking->tracking, size, -1U);
795 kfree_type(IOTrackingAddress, tracking);
796 break;
797 }
798 tracking = nextAddress;
799 }
800 if (done) {
801 break;
802 }
803 }
804 if (done) {
805 break;
806 }
807 }
808 IOTRecursiveLockUnlock(&queue->lock);
809}
810
811static void
812IOTrackingFreeCallSite(uint32_t type, IOTrackingCallSite ** pSite)
813{
814 IOTrackingCallSite * site;
815 void ** ptr;
816
817 site = *pSite;
818 kfree_type(IOTracking *, site->queue->numSiteQs, site->addresses);
819
820 ptr = reinterpret_cast<void **>(pSite);
821 if (kIOTrackingQueueTypeUser & type) {
822 kfree_type(IOTrackingCallSiteWithUser, *ptr);
823 } else {
824 kfree_type(IOTrackingCallSite, *ptr);
825 }
826}
827
828/* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */
829
830void
831IOTrackingAccumSize(IOTrackingQueue * queue, IOTracking * mem, size_t size)
832{
833 IOTRecursiveLockLock(&queue->lock);
834 if (mem->link.next) {
835 assert(mem->site);
836 assert((size > 0) || (mem->site->size[1] >= -size));
837 mem->site->size[1] += size;
838 }
839 ;
840 IOTRecursiveLockUnlock(&queue->lock);
841}
842
843/* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */
844
845void
846IOTrackingReset(IOTrackingQueue * queue)
847{
848 IOTrackingCallSite * site;
849 IOTrackingUser * user;
850 IOTracking * tracking;
851 IOTrackingAddress * trackingAddress;
852 uint32_t idx, hashIdx;
853 bool addresses;
854
855 IOTRecursiveLockLock(&queue->lock);
856 for (idx = 0; idx < queue->numSiteQs; idx++) {
857 while (!queue_empty(&queue->sites[idx])) {
858 if (kIOTrackingQueueTypeMap & queue->type) {
859 queue_remove_first(&queue->sites[idx], user, IOTrackingUser *, link);
860 user->link.next = user->link.prev = NULL;
861 } else {
862 queue_remove_first(&queue->sites[idx], site, IOTrackingCallSite *, link);
863 addresses = false;
864 while (!queue_empty(&site->instances)) {
865 queue_remove_first(&site->instances, tracking, IOTracking *, link);
866 if (site->addresses) {
867 for (hashIdx = 0; !addresses && (hashIdx < queue->numSiteQs); hashIdx++) {
868 if (tracking == site->addresses[hashIdx]) {
869 addresses = true;
870 }
871 }
872 }
873 if (addresses) {
874 trackingAddress = (typeof(trackingAddress))tracking;
875 if (kTrackingAddressFlagAllocated & IOTrackingAddressFlags(trackingAddress)) {
876 kfree_type(IOTrackingAddress, trackingAddress);
877 }
878 }
879 }
880 IOTrackingFreeCallSite(queue->type, &site);
881 }
882 }
883 }
884 queue->siteCount = 0;
885 IOTRecursiveLockUnlock(&queue->lock);
886}
887
888/* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */
889
890static int
891IOTrackingCallSiteInfoCompare(const void * left, const void * right)
892{
893 IOTrackingCallSiteInfo * l = (typeof(l))left;
894 IOTrackingCallSiteInfo * r = (typeof(r))right;
895 size_t lsize, rsize;
896
897 rsize = r->size[0] + r->size[1];
898 lsize = l->size[0] + l->size[1];
899
900 return (rsize > lsize) ? 1 : ((rsize == lsize) ? 0 : -1);
901}
902
903/* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */
904
905static int
906IOTrackingAddressCompare(const void * left, const void * right)
907{
908 IOTracking * instance;
909 uintptr_t inst, laddr, raddr;
910
911 inst = ((typeof(inst) *)left)[0];
912 instance = (typeof(instance))INSTANCE_GET(inst);
913 if (kInstanceFlagAddress & inst) {
914 laddr = ~((IOTrackingAddress *)instance)->address;
915 } else {
916 laddr = (uintptr_t) (instance + 1);
917 }
918
919 inst = ((typeof(inst) *)right)[0];
920 instance = (typeof(instance))(inst & ~kInstanceFlags);
921 if (kInstanceFlagAddress & inst) {
922 raddr = ~((IOTrackingAddress *)instance)->address;
923 } else {
924 raddr = (uintptr_t) (instance + 1);
925 }
926
927 return (laddr > raddr) ? 1 : ((laddr == raddr) ? 0 : -1);
928}
929
930
931static int
932IOTrackingZoneElementCompare(const void * left, const void * right)
933{
934 uintptr_t inst, laddr, raddr;
935
936 inst = ((typeof(inst) *)left)[0];
937 laddr = INSTANCE_PUT(inst);
938 inst = ((typeof(inst) *)right)[0];
939 raddr = INSTANCE_PUT(inst);
940
941 return (laddr > raddr) ? 1 : ((laddr == raddr) ? 0 : -1);
942}
943
944/* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */
945
946static void
947CopyOutBacktraces(IOTrackingCallSite * site, IOTrackingCallSiteInfo * siteInfo)
948{
949 uint32_t j;
950 mach_vm_address_t bt, btEntry;
951
952 btEntry = site->queue->btEntry;
953 for (j = 0; j < kIOTrackingCallSiteBTs; j++) {
954 bt = site->bt[j];
955 if (btEntry
956 && (!bt || (j == (kIOTrackingCallSiteBTs - 1)))) {
957 bt = btEntry;
958 btEntry = 0;
959 }
960 siteInfo->bt[0][j] = VM_KERNEL_UNSLIDE(bt);
961 }
962
963 siteInfo->btPID = 0;
964 if (kIOTrackingQueueTypeUser & site->queue->type) {
965 siteInfo->btPID = site->user[0].pid;
966 uint32_t * bt32 = (typeof(bt32))((void *) &site->user[0].bt[0]);
967 uint64_t * bt64 = (typeof(bt64))((void *) &site->user[0].bt[0]);
968 for (uint32_t j = 0; j < kIOTrackingCallSiteBTs; j++) {
969 if (j >= site->user[0].userCount) {
970 siteInfo->bt[1][j] = 0;
971 } else if (site->user[0].user32) {
972 siteInfo->bt[1][j] = bt32[j];
973 } else {
974 siteInfo->bt[1][j] = bt64[j];
975 }
976 }
977 }
978}
979
980/* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */
981
982static void
983IOTrackingLeakScan(void * refcon)
984{
985 IOTrackingLeaksRef * ref = (typeof(ref))refcon;
986 uintptr_t * instances;
987 IOTracking * instance;
988 uint64_t vaddr, vincr;
989 ppnum_t ppn;
990 uintptr_t ptr, addr, vphysaddr, inst;
991 size_t size, origsize;
992 uint32_t baseIdx, lim, ptrIdx, count;
993 boolean_t is;
994 AbsoluteTime deadline;
995
996 instances = ref->instances;
997 count = ref->count;
998 size = origsize = ref->zoneSize;
999
1000 if (gIOTrackingLeakScanCallback) {
1001 gIOTrackingLeakScanCallback(kIOTrackingLeakScanStart);
1002 }
1003
1004 for (deadline = 0, vaddr = VM_MIN_KERNEL_AND_KEXT_ADDRESS;
1005 ;
1006 vaddr += vincr) {
1007 if ((mach_absolute_time() > deadline) || (vaddr >= VM_MAX_KERNEL_ADDRESS)) {
1008 if (deadline) {
1009#if SCHED_HYGIENE_DEBUG
1010 if (is) {
1011 // Reset the interrupt timeout to avoid panics
1012 ml_spin_debug_clear_self();
1013 }
1014#endif /* SCHED_HYGIENE_DEBUG */
1015 ml_set_interrupts_enabled(is);
1016 IODelay(10);
1017 }
1018 if (vaddr >= VM_MAX_KERNEL_ADDRESS) {
1019 break;
1020 }
1021 is = ml_set_interrupts_enabled(false);
1022 clock_interval_to_deadline(10, kMillisecondScale, &deadline);
1023 }
1024
1025 ppn = kernel_pmap_present_mapping(vaddr, &vincr, &vphysaddr);
1026 // check noencrypt to avoid VM structs (map entries) with pointers
1027 if (ppn && (!pmap_valid_page(ppn) || (!ref->zoneSize && pmap_is_noencrypt(ppn)))) {
1028 ppn = 0;
1029 }
1030 if (!ppn) {
1031 continue;
1032 }
1033
1034 for (ptrIdx = 0; ptrIdx < (page_size / sizeof(uintptr_t)); ptrIdx++) {
1035 ptr = ((uintptr_t *)vphysaddr)[ptrIdx];
1036#if defined(HAS_APPLE_PAC)
1037 // strip possible ptrauth signature from candidate data pointer
1038 ptr = (uintptr_t)ptrauth_strip((void*)ptr, ptrauth_key_process_independent_data);
1039#endif /* defined(HAS_APPLE_PAC) */
1040
1041 for (lim = count, baseIdx = 0; lim; lim >>= 1) {
1042 inst = instances[baseIdx + (lim >> 1)];
1043 instance = (typeof(instance))INSTANCE_GET(inst);
1044
1045 if (ref->zoneSize) {
1046 addr = INSTANCE_PUT(inst) & ~kInstanceFlags;
1047 } else if (kInstanceFlagAddress & inst) {
1048 addr = ~((IOTrackingAddress *)instance)->address;
1049 origsize = size = ((IOTrackingAddress *)instance)->size;
1050 if (!size) {
1051 size = 1;
1052 }
1053 } else {
1054 addr = (uintptr_t) (instance + 1);
1055 origsize = size = instance->site->queue->allocSize;
1056 }
1057 if ((ptr >= addr) && (ptr < (addr + size))
1058
1059 && (((vaddr + ptrIdx * sizeof(uintptr_t)) < addr)
1060 || ((vaddr + ptrIdx * sizeof(uintptr_t)) >= (addr + size)))) {
1061 if (!(kInstanceFlagReferenced & inst)) {
1062 inst |= kInstanceFlagReferenced;
1063 instances[baseIdx + (lim >> 1)] = inst;
1064 ref->found++;
1065 if (!origsize) {
1066 ref->foundzlen++;
1067 }
1068 }
1069 break;
1070 }
1071 if (ptr > addr) {
1072 // move right
1073 baseIdx += (lim >> 1) + 1;
1074 lim--;
1075 }
1076 // else move left
1077 }
1078 }
1079 ref->bytes += page_size;
1080 }
1081
1082 if (gIOTrackingLeakScanCallback) {
1083 gIOTrackingLeakScanCallback(kIOTrackingLeakScanEnd);
1084 }
1085}
1086
1087/* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */
1088
1089extern "C" void
1090zone_leaks_scan(uintptr_t * instances, uint32_t count, uint32_t zoneSize, uint32_t * found)
1091{
1092 IOTrackingLeaksRef ref;
1093 IOTrackingCallSiteInfo siteInfo;
1094 uint32_t idx;
1095
1096 qsort(instances, count, sizeof(*instances), &IOTrackingZoneElementCompare);
1097
1098 bzero(&siteInfo, sizeof(siteInfo));
1099 bzero(&ref, sizeof(ref));
1100 ref.instances = instances;
1101 ref.count = count;
1102 ref.zoneSize = zoneSize;
1103
1104 for (idx = 0; idx < 2; idx++) {
1105 ref.bytes = 0;
1106 IOTrackingLeakScan(&ref);
1107 IOLog("leaks(%d) scanned %ld MB, instance count %d, found %d\n", idx, ref.bytes / 1024 / 1024, count, ref.found);
1108 if (count <= ref.found) {
1109 break;
1110 }
1111 }
1112
1113 *found = ref.found;
1114}
1115
1116/* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */
1117
1118static OSData *
1119IOTrackingLeaks(LIBKERN_CONSUMED OSData * data)
1120{
1121 IOTrackingLeaksRef ref;
1122 IOTrackingCallSiteInfo siteInfo;
1123 IOTrackingCallSite * site;
1124 OSData * leakData;
1125 uintptr_t * instances;
1126 IOTracking * instance;
1127 uintptr_t inst;
1128 uint32_t count, idx, numSites, dups, siteCount;
1129
1130 instances = (typeof(instances))data->getBytesNoCopy();
1131 count = (data->getLength() / sizeof(*instances));
1132 qsort(instances, count, sizeof(*instances), &IOTrackingAddressCompare);
1133
1134 bzero(&siteInfo, sizeof(siteInfo));
1135 bzero(&ref, sizeof(ref));
1136 ref.instances = instances;
1137 ref.count = count;
1138 for (idx = 0; idx < 2; idx++) {
1139 ref.bytes = 0;
1140 IOTrackingLeakScan(&ref);
1141 IOLog("leaks(%d) scanned %ld MB, instance count %d, found %d (zlen %d)\n", idx, ref.bytes / 1024 / 1024, count, ref.found, ref.foundzlen);
1142 if (count <= ref.found) {
1143 break;
1144 }
1145 }
1146
1147 leakData = OSData::withCapacity(128 * sizeof(IOTrackingCallSiteInfo));
1148
1149 for (numSites = 0, idx = 0; idx < count; idx++) {
1150 inst = instances[idx];
1151 if (kInstanceFlagReferenced & inst) {
1152 continue;
1153 }
1154 instance = (typeof(instance))INSTANCE_GET(inst);
1155 site = instance->site;
1156 instances[numSites] = (uintptr_t) site;
1157 numSites++;
1158 }
1159
1160 for (idx = 0; idx < numSites; idx++) {
1161 inst = instances[idx];
1162 if (!inst) {
1163 continue;
1164 }
1165 site = (typeof(site))inst;
1166 for (siteCount = 1, dups = (idx + 1); dups < numSites; dups++) {
1167 if (instances[dups] == (uintptr_t) site) {
1168 siteCount++;
1169 instances[dups] = 0;
1170 }
1171 }
1172 siteInfo.count = siteCount;
1173 siteInfo.size[0] = (site->size[0] * site->count) / siteCount;
1174 siteInfo.size[1] = (site->size[1] * site->count) / siteCount;
1175 CopyOutBacktraces(site, &siteInfo);
1176 leakData->appendBytes(&siteInfo, sizeof(siteInfo));
1177 }
1178 data->release();
1179
1180 return leakData;
1181}
1182
1183/* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */
1184
1185static bool
1186SkipName(uint32_t options, const char * name, size_t namesLen, const char * names)
1187{
1188 const char * scan;
1189 const char * next;
1190 bool exclude, found;
1191 size_t qLen, sLen;
1192
1193 if (!namesLen || !names) {
1194 return false;
1195 }
1196 // <len><name>...<len><name><0>
1197 exclude = (0 != (kIOTrackingExcludeNames & options));
1198 qLen = strlen(name);
1199 scan = names;
1200 found = false;
1201 do{
1202 sLen = scan[0];
1203 scan++;
1204 next = scan + sLen;
1205 if (next >= (names + namesLen)) {
1206 break;
1207 }
1208 found = ((sLen == qLen) && !strncmp(scan, name, sLen));
1209 scan = next;
1210 }while (!found && (scan < (names + namesLen)));
1211
1212 return !(exclude ^ found);
1213}
1214
1215#endif /* IOTRACKING */
1216
1217/* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */
1218
1219static kern_return_t
1220IOTrackingDebug(uint32_t selector, uint32_t options, uint64_t value,
1221 uint32_t intag, uint32_t inzsize,
1222 const char * names, size_t namesLen,
1223 size_t size, OSObject ** result)
1224{
1225 kern_return_t ret;
1226 OSData * data;
1227
1228 if (result) {
1229 *result = NULL;
1230 }
1231 data = NULL;
1232 ret = kIOReturnNotReady;
1233
1234#if IOTRACKING
1235
1236 kern_return_t kr;
1237 IOTrackingQueue * queue;
1238 IOTracking * instance;
1239 IOTrackingCallSite * site;
1240 IOTrackingCallSiteInfo siteInfo;
1241 IOTrackingUser * user;
1242 task_t mapTask;
1243 mach_vm_address_t mapAddress;
1244 mach_vm_size_t mapSize;
1245 uint32_t num, idx, qIdx;
1246 uintptr_t instFlags;
1247 proc_t proc;
1248 bool addresses;
1249
1250 ret = kIOReturnNotFound;
1251 proc = NULL;
1252 if (kIOTrackingGetMappings == selector) {
1253 if (value != -1ULL) {
1254 proc = proc_find((pid_t) value);
1255 if (!proc) {
1256 return kIOReturnNotFound;
1257 }
1258 }
1259 }
1260
1261 bzero(&siteInfo, sizeof(siteInfo));
1262 lck_mtx_lock(gIOTrackingLock);
1263 queue_iterate(&gIOTrackingQ, queue, IOTrackingQueue *, link)
1264 {
1265 if (SkipName(options, queue->name, namesLen, names)) {
1266 continue;
1267 }
1268
1269 if (!(kIOTracking & gIOKitDebug) && (kIOTrackingQueueTypeAlloc & queue->type)) {
1270 continue;
1271 }
1272
1273 switch (selector) {
1274 case kIOTrackingResetTracking:
1275 {
1276 IOTrackingReset(queue);
1277 ret = kIOReturnSuccess;
1278 break;
1279 }
1280
1281 case kIOTrackingStartCapture:
1282 case kIOTrackingStopCapture:
1283 {
1284 queue->captureOn = (kIOTrackingStartCapture == selector);
1285 ret = kIOReturnSuccess;
1286 break;
1287 }
1288
1289 case kIOTrackingSetMinCaptureSize:
1290 {
1291 queue->minCaptureSize = size;
1292 ret = kIOReturnSuccess;
1293 break;
1294 }
1295
1296 case kIOTrackingLeaks:
1297 {
1298 if (!(kIOTrackingQueueTypeAlloc & queue->type)) {
1299 break;
1300 }
1301
1302 if (!data) {
1303 data = OSData::withCapacity(1024 * sizeof(uintptr_t));
1304 }
1305
1306 IOTRecursiveLockLock(&queue->lock);
1307 for (idx = 0; idx < queue->numSiteQs; idx++) {
1308 queue_iterate(&queue->sites[idx], site, IOTrackingCallSite *, link)
1309 {
1310 addresses = false;
1311 queue_iterate(&site->instances, instance, IOTracking *, link)
1312 {
1313 if (site->addresses) {
1314 for (uint32_t hashIdx = 0; !addresses && (hashIdx < queue->numSiteQs); hashIdx++) {
1315 if (instance == site->addresses[hashIdx]) {
1316 addresses = true;
1317 }
1318 }
1319 }
1320 instFlags = (typeof(instFlags))instance;
1321 if (addresses) {
1322 instFlags |= kInstanceFlagAddress;
1323 }
1324 data->appendValue(instFlags);
1325 }
1326 }
1327 }
1328 // queue is locked
1329 ret = kIOReturnSuccess;
1330 break;
1331 }
1332
1333
1334 case kIOTrackingGetTracking:
1335 {
1336 if (kIOTrackingQueueTypeMap & queue->type) {
1337 break;
1338 }
1339
1340 if (!data) {
1341 data = OSData::withCapacity(128 * sizeof(IOTrackingCallSiteInfo));
1342 }
1343
1344 IOTRecursiveLockLock(&queue->lock);
1345 num = queue->siteCount;
1346 idx = 0;
1347 for (qIdx = 0; qIdx < queue->numSiteQs; qIdx++) {
1348 queue_iterate(&queue->sites[qIdx], site, IOTrackingCallSite *, link)
1349 {
1350 assert(idx < num);
1351 idx++;
1352
1353 size_t tsize[2];
1354 uint32_t count = site->count;
1355 tsize[0] = site->size[0];
1356 tsize[1] = site->size[1];
1357
1358 if (intag || inzsize) {
1359 uintptr_t addr;
1360 vm_size_t size, zoneSize;
1361 vm_tag_t tag;
1362
1363 if (kIOTrackingQueueTypeAlloc & queue->type) {
1364 addresses = false;
1365 count = 0;
1366 tsize[0] = tsize[1] = 0;
1367 queue_iterate(&site->instances, instance, IOTracking *, link)
1368 {
1369 if (site->addresses) {
1370 for (uint32_t hashIdx = 0; !addresses && (hashIdx < queue->numSiteQs); hashIdx++) {
1371 if (instance == site->addresses[hashIdx]) {
1372 addresses = true;
1373 }
1374 }
1375 }
1376
1377 if (addresses) {
1378 addr = ~((IOTrackingAddress *)instance)->address;
1379 } else {
1380 addr = (uintptr_t) (instance + 1);
1381 }
1382
1383 kr = vm_kern_allocation_info(addr, &size, &tag, &zoneSize);
1384 if (KERN_SUCCESS != kr) {
1385 continue;
1386 }
1387
1388 if ((VM_KERN_MEMORY_NONE != intag) && (intag != tag)) {
1389 continue;
1390 }
1391 if (inzsize && (inzsize != zoneSize)) {
1392 continue;
1393 }
1394
1395 count++;
1396 tsize[0] += size;
1397 }
1398 } else {
1399 if (!intag || inzsize || (intag != site->tag)) {
1400 continue;
1401 }
1402 }
1403 }
1404
1405 if (!count) {
1406 continue;
1407 }
1408 if (size && ((tsize[0] + tsize[1]) < size)) {
1409 continue;
1410 }
1411 siteInfo.count = count;
1412 siteInfo.size[0] = tsize[0];
1413 siteInfo.size[1] = tsize[1];
1414 CopyOutBacktraces(site, &siteInfo);
1415 data->appendBytes(&siteInfo, sizeof(siteInfo));
1416 }
1417 }
1418 assert(idx == num);
1419 IOTRecursiveLockUnlock(&queue->lock);
1420 ret = kIOReturnSuccess;
1421 break;
1422 }
1423
1424 case kIOTrackingGetMappings:
1425 {
1426 if (!(kIOTrackingQueueTypeMap & queue->type)) {
1427 break;
1428 }
1429 if (!data) {
1430 data = OSData::withCapacity((unsigned int) page_size);
1431 }
1432
1433 IOTRecursiveLockLock(&queue->lock);
1434 num = queue->siteCount;
1435 idx = 0;
1436 for (qIdx = 0; qIdx < queue->numSiteQs; qIdx++) {
1437 queue_iterate(&queue->sites[qIdx], user, IOTrackingUser *, link)
1438 {
1439 assert(idx < num);
1440 idx++;
1441
1442 kr = IOMemoryMapTracking(user, &mapTask, &mapAddress, &mapSize);
1443 if (kIOReturnSuccess != kr) {
1444 continue;
1445 }
1446 if (proc && (mapTask != proc_task(proc))) {
1447 continue;
1448 }
1449 if (size && (mapSize < size)) {
1450 continue;
1451 }
1452
1453 siteInfo.count = 1;
1454 siteInfo.size[0] = mapSize;
1455 siteInfo.address = mapAddress;
1456 siteInfo.addressPID = task_pid(mapTask);
1457 siteInfo.btPID = user->btPID;
1458
1459 for (uint32_t j = 0; j < kIOTrackingCallSiteBTs; j++) {
1460 siteInfo.bt[0][j] = VM_KERNEL_UNSLIDE(user->bt[j]);
1461 }
1462 uint32_t * bt32 = (typeof(bt32)) & user->btUser[0];
1463 uint64_t * bt64 = (typeof(bt64))((void *) &user->btUser[0]);
1464 for (uint32_t j = 0; j < kIOTrackingCallSiteBTs; j++) {
1465 if (j >= user->userCount) {
1466 siteInfo.bt[1][j] = 0;
1467 } else if (user->user32) {
1468 siteInfo.bt[1][j] = bt32[j];
1469 } else {
1470 siteInfo.bt[1][j] = bt64[j];
1471 }
1472 }
1473 data->appendBytes(&siteInfo, sizeof(siteInfo));
1474 }
1475 }
1476 assert(idx == num);
1477 IOTRecursiveLockUnlock(&queue->lock);
1478 ret = kIOReturnSuccess;
1479 break;
1480 }
1481
1482 default:
1483 ret = kIOReturnUnsupported;
1484 break;
1485 }
1486 }
1487
1488 if ((kIOTrackingLeaks == selector) && data) {
1489 data = IOTrackingLeaks(data);
1490 queue_iterate(&gIOTrackingQ, queue, IOTrackingQueue *, link)
1491 {
1492 if (SkipName(options, queue->name, namesLen, names)) {
1493 continue;
1494 }
1495 if (!(kIOTrackingQueueTypeAlloc & queue->type)) {
1496 continue;
1497 }
1498 IOTRecursiveLockUnlock(&queue->lock);
1499 }
1500 }
1501
1502 lck_mtx_unlock(gIOTrackingLock);
1503
1504 if ((kIOTrackingLeaks == selector) && namesLen && names) {
1505 const char * scan;
1506 const char * next;
1507 uint8_t sLen;
1508
1509 if (!data) {
1510 data = OSData::withCapacity(4096 * sizeof(uintptr_t));
1511 }
1512
1513 // <len><name>...<len><name><0>
1514 scan = names;
1515 do{
1516 sLen = ((uint8_t) scan[0]);
1517 scan++;
1518 next = scan + sLen;
1519 if (next >= (names + namesLen)) {
1520 break;
1521 }
1522 kr = zone_leaks(scan, sLen, ^(uint32_t count, uint32_t eSize, btref_t ref) {
1523 IOTrackingCallSiteInfo siteInfo = {
1524 .count = count,
1525 .size[0] = eSize * count,
1526 };
1527
1528 btref_decode_unslide(ref, siteInfo.bt[0]);
1529
1530 data->appendBytes(&siteInfo, sizeof(siteInfo));
1531 });
1532 if (KERN_SUCCESS == kr) {
1533 ret = kIOReturnSuccess;
1534 } else if (KERN_INVALID_NAME != kr) {
1535 ret = kIOReturnVMError;
1536 }
1537 scan = next;
1538 }while (scan < (names + namesLen));
1539 }
1540
1541 if (data) {
1542 switch (selector) {
1543 case kIOTrackingLeaks:
1544 case kIOTrackingGetTracking:
1545 case kIOTrackingGetMappings:
1546 {
1547 IOTrackingCallSiteInfo * siteInfos;
1548 siteInfos = (typeof(siteInfos))data->getBytesNoCopy();
1549 num = (data->getLength() / sizeof(*siteInfos));
1550 qsort(siteInfos, num, sizeof(*siteInfos), &IOTrackingCallSiteInfoCompare);
1551 break;
1552 }
1553 default: assert(false); break;
1554 }
1555 }
1556
1557 *result = data;
1558 if (proc) {
1559 proc_rele(proc);
1560 }
1561
1562#endif /* IOTRACKING */
1563
1564 return ret;
1565}
1566
1567/* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */
1568
1569#include <IOKit/IOKitDiagnosticsUserClient.h>
1570
1571/* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */
1572
1573#undef super
1574#define super IOUserClient2022
1575
1576OSDefineMetaClassAndStructors(IOKitDiagnosticsClient, IOUserClient2022)
1577
1578/* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */
1579
1580IOUserClient * IOKitDiagnosticsClient::withTask(task_t owningTask)
1581{
1582#if IOTRACKING
1583 IOKitDiagnosticsClient * inst;
1584
1585 inst = new IOKitDiagnosticsClient;
1586 if (inst && !inst->init()) {
1587 inst->release();
1588 inst = NULL;
1589 }
1590
1591 inst->setProperty(kIOUserClientDefaultLockingKey, kOSBooleanTrue);
1592 inst->setProperty(kIOUserClientDefaultLockingSetPropertiesKey, kOSBooleanTrue);
1593 inst->setProperty(kIOUserClientDefaultLockingSingleThreadExternalMethodKey, kOSBooleanTrue);
1594
1595 inst->setProperty(kIOUserClientEntitlementsKey, kOSBooleanFalse);
1596
1597 return inst;
1598#else
1599 return NULL;
1600#endif
1601}
1602
1603IOReturn
1604IOKitDiagnosticsClient::clientClose(void)
1605{
1606 terminate();
1607 return kIOReturnSuccess;
1608}
1609
1610IOReturn
1611IOKitDiagnosticsClient::setProperties(OSObject * properties)
1612{
1613 IOReturn kr = kIOReturnUnsupported;
1614 return kr;
1615}
1616
1617
1618IOReturn
1619IOTrackingMethodDispatched(OSObject * target, void * reference,
1620 IOExternalMethodArguments * args)
1621{
1622 IOReturn ret = kIOReturnBadArgument;
1623 const IOKitDiagnosticsParameters * params;
1624 const char * names;
1625 size_t namesLen;
1626 OSObject * result;
1627
1628 if (args->structureInputSize < sizeof(IOKitDiagnosticsParameters)) {
1629 return kIOReturnBadArgument;
1630 }
1631 params = (typeof(params))args->structureInput;
1632 if (!params) {
1633 return kIOReturnBadArgument;
1634 }
1635
1636 names = NULL;
1637 namesLen = args->structureInputSize - sizeof(IOKitDiagnosticsParameters);
1638 if (namesLen) {
1639 names = (typeof(names))(params + 1);
1640 }
1641
1642 ret = IOTrackingDebug(selector: args->selector, options: params->options, value: params->value, intag: params->tag, inzsize: params->zsize, names, namesLen, size: params->size, result: &result);
1643 if ((kIOReturnSuccess == ret) && args->structureVariableOutputData) {
1644 *args->structureVariableOutputData = result;
1645 } else if (result) {
1646 result->release();
1647 }
1648 return ret;
1649}
1650
1651IOReturn
1652IOKitDiagnosticsClient::externalMethod(uint32_t selector, IOExternalMethodArgumentsOpaque * args)
1653{
1654 static const IOExternalMethodDispatch2022 dispatchArray[] = {
1655 [kIOTrackingGetTracking] = {
1656 .function = &IOTrackingMethodDispatched,
1657 .checkScalarInputCount = 0,
1658 .checkStructureInputSize = kIOUCVariableStructureSize,
1659 .checkScalarOutputCount = 0,
1660 .checkStructureOutputSize = 0,
1661 .allowAsync = false,
1662 .checkEntitlement = NULL,
1663 },
1664 [kIOTrackingGetMappings] = {
1665 .function = &IOTrackingMethodDispatched,
1666 .checkScalarInputCount = 0,
1667 .checkStructureInputSize = kIOUCVariableStructureSize,
1668 .checkScalarOutputCount = 0,
1669 .checkStructureOutputSize = 0,
1670 .allowAsync = false,
1671 .checkEntitlement = NULL,
1672 },
1673 [kIOTrackingResetTracking] = {
1674 .function = &IOTrackingMethodDispatched,
1675 .checkScalarInputCount = 0,
1676 .checkStructureInputSize = kIOUCVariableStructureSize,
1677 .checkScalarOutputCount = 0,
1678 .checkStructureOutputSize = 0,
1679 .allowAsync = false,
1680 .checkEntitlement = NULL,
1681 },
1682 [kIOTrackingStartCapture] = {
1683 .function = &IOTrackingMethodDispatched,
1684 .checkScalarInputCount = 0,
1685 .checkStructureInputSize = kIOUCVariableStructureSize,
1686 .checkScalarOutputCount = 0,
1687 .checkStructureOutputSize = 0,
1688 .allowAsync = false,
1689 .checkEntitlement = NULL,
1690 },
1691 [kIOTrackingStopCapture] = {
1692 .function = &IOTrackingMethodDispatched,
1693 .checkScalarInputCount = 0,
1694 .checkStructureInputSize = kIOUCVariableStructureSize,
1695 .checkScalarOutputCount = 0,
1696 .checkStructureOutputSize = 0,
1697 .allowAsync = false,
1698 .checkEntitlement = NULL,
1699 },
1700 [kIOTrackingSetMinCaptureSize] = {
1701 .function = &IOTrackingMethodDispatched,
1702 .checkScalarInputCount = 0,
1703 .checkStructureInputSize = kIOUCVariableStructureSize,
1704 .checkScalarOutputCount = 0,
1705 .checkStructureOutputSize = 0,
1706 .allowAsync = false,
1707 .checkEntitlement = NULL,
1708 },
1709 [kIOTrackingLeaks] = {
1710 .function = &IOTrackingMethodDispatched,
1711 .checkScalarInputCount = 0,
1712 .checkStructureInputSize = kIOUCVariableStructureSize,
1713 .checkScalarOutputCount = 0,
1714 .checkStructureOutputSize = 0,
1715 .allowAsync = false,
1716 .checkEntitlement = NULL,
1717 },
1718 };
1719
1720 return dispatchExternalMethod(selector, arguments: args, dispatchArray, dispatchArrayCount: sizeof(dispatchArray) / sizeof(dispatchArray[0]), target: this, NULL);
1721}
1722
1723/* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */
1724