1/*
2 * Copyright (c) 2012 Apple Inc. All rights reserved.
3 *
4 * @APPLE_OSREFERENCE_LICENSE_HEADER_START@
5 *
6 * This file contains Original Code and/or Modifications of Original Code
7 * as defined in and that are subject to the Apple Public Source License
8 * Version 2.0 (the 'License'). You may not use this file except in
9 * compliance with the License. The rights granted to you under the License
10 * may not be used to create, or enable the creation or redistribution of,
11 * unlawful or unlicensed copies of an Apple operating system, or to
12 * circumvent, violate, or enable the circumvention or violation of, any
13 * terms of an Apple operating system software license agreement.
14 *
15 * Please obtain a copy of the License at
16 * http://www.opensource.apple.com/apsl/ and read it before using this file.
17 *
18 * The Original Code and all software distributed under the License are
19 * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER
20 * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES,
21 * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY,
22 * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT.
23 * Please see the License for the specific language governing rights and
24 * limitations under the License.
25 *
26 * @APPLE_OSREFERENCE_LICENSE_HEADER_END@
27 */
28
29#include <stddef.h>
30#include <kern/btlog.h>
31#include <kern/assert.h>
32#include <vm/vm_kern.h>
33#include <vm/vm_map.h>
34#include <vm/pmap.h>
35#include <mach/vm_param.h>
36#define _SYS_TYPES_H_
37#include <libkern/crypto/md5.h>
38#include <libkern/crypto/crypto_internal.h>
39
40/*
41 * Since all records are located contiguously in memory,
42 * we use indices to them as the primary lookup mechanism,
43 * and to maintain the linked list of active records
44 * in chronological order.
45 */
46#define BTLOG_MAX_RECORDS (0xFFFFFF /* 16777215 */)
47#define BTLOG_RECORDINDEX_NONE (0xFFFFFF)
48
49/*
50 * Each record is a stack with a reference count and a list of
51 * log elements that refer to it.
52 *
53 * Each log element is placed in a hash bucket that is contained
54 * within the btlog structure. It contains the index to the record
55 * that it references.
56 *
57 * So you can go from an address to the corresp. stack by hashing the address,
58 * finding the hash head and traversing the chain of log elements
59 * till you find the hash bucket with an address that matches your
60 * address (if it exists) or creating a new bucket to hold this new address.
61 */
62
63#define ELEMENT_HASH_BUCKET_COUNT (256)
64#define BTLOG_HASHELEMINDEX_NONE BTLOG_RECORDINDEX_NONE
65
66#define ZELEMS_DEFAULT (8000)
67size_t zelems_count = 0;
68
69typedef uint32_t btlog_recordindex_t; /* only 24 bits used */
70
71/*
72 * Queue head for the queue of elements connected to a particular record (stack).
73 * For quick removal of the oldest element referencing the least popular stack. Useful for LEAKS mode.
74 */
75TAILQ_HEAD(_element_record_queue, btlog_element);
76
77/*
78 * Queue head for the queue of elements that hash to the same bucket.
79 * For quick removal of the oldest element ever logged. Useful for CORRUPTION mode where we use only bucket i.e. FIFO.
80 */
81TAILQ_HEAD(_element_hash_queue, btlog_element);
82
83typedef struct btlog_record {
84 btlog_recordindex_t next:24,
85 operation:8;
86 uint32_t ref_count;
87 uint32_t bthash;
88 struct _element_record_queue element_record_queue;
89 void *bt[]; /* variable sized, based on btlog_t params */
90} btlog_record_t;
91
92typedef struct btlog_element {
93 btlog_recordindex_t recindex:24,
94 operation:8;
95 uintptr_t elem;
96 TAILQ_ENTRY(btlog_element) element_record_link; /* Links to other elements pointing to the same stack. */
97
98 TAILQ_ENTRY(btlog_element) element_hash_link; /* Links to other elements in the same hash chain.
99 * During LEAKS mode, this is used as a singly-linked list because
100 * we don't want to initialize ELEMENT_HASH_BUCKET_COUNT heads.
101 *
102 * During CORRUPTION mode with a single hash chain, this is used as a doubly-linked list.
103 */
104} btlog_element_t;
105
106struct btlog {
107 vm_address_t btlog_buffer; /* all memory for this btlog_t */
108 vm_size_t btlog_buffersize;
109
110 uintptr_t btrecords; /* use btlog_recordindex_t to lookup */
111 size_t btrecord_btdepth; /* BT entries per record */
112 size_t btrecord_size;
113
114 btlog_recordindex_t head; /* active record list */
115 btlog_recordindex_t tail;
116 btlog_recordindex_t activerecord;
117 btlog_recordindex_t freelist_records;
118
119 size_t active_record_count;
120 size_t active_element_count;
121 btlog_element_t *freelist_elements;
122 union {
123 btlog_element_t **elem_recindex_hashtbl; /* LEAKS mode: We use an array of ELEMENT_HASH_BUCKET_COUNT buckets. */
124 struct _element_hash_queue *element_hash_queue; /* CORRUPTION mode: We use a single hash bucket i.e. queue */
125 } elem_linkage_un;
126
127 decl_simple_lock_data(,btlog_lock);
128 boolean_t caller_will_remove_entries_for_element; /* If TRUE, this means that the caller is interested in keeping track of abandoned / leaked elements.
129 * And so they want to be in charge of explicitly removing elements. Depending on this variable we
130 * will choose what kind of data structure to use for the elem_linkage_un union above.
131 */
132};
133
134extern boolean_t vm_kernel_ready;
135extern boolean_t kmem_alloc_ready;
136
137#define lookup_btrecord(btlog, index) \
138 ((btlog_record_t *)(btlog->btrecords + index * btlog->btrecord_size))
139
140uint32_t calculate_hashidx_for_element(uintptr_t elem, btlog_t *btlog);
141uint32_t lookup_btrecord_byhash(btlog_t *btlog, uint32_t md5_hash, void *bt[], size_t btcount);
142
143void btlog_add_elem_to_freelist(btlog_t *btlog, btlog_element_t *hash_elem);
144btlog_element_t* btlog_get_elem_from_freelist(btlog_t *btlog);
145
146uint32_t
147lookup_btrecord_byhash(btlog_t *btlog, uint32_t md5_hash, void *bt[], size_t btcount)
148{
149 btlog_recordindex_t recindex = BTLOG_RECORDINDEX_NONE;
150 btlog_record_t *record = NULL;
151 size_t i = 0;
152 boolean_t stack_matched = TRUE;
153
154 assert(btcount);
155 assert(bt);
156
157 recindex = btlog->head;
158 record = lookup_btrecord(btlog, recindex);
159 while (recindex != BTLOG_RECORDINDEX_NONE) {
160 assert(record->bthash);
161 assert(! TAILQ_EMPTY(&record->element_record_queue));
162 if (record->bthash == md5_hash) {
163
164 /*
165 * Make sure that the incoming stack actually matches the
166 * stack in this record. Since we only save off a
167 * part of the md5 hash there can be collisions sometimes.
168 * This comparison isn't costly because, in case of collisions,
169 * usually the first few frames are different.
170 */
171
172 stack_matched = TRUE;
173
174 if (btcount < btlog->btrecord_btdepth) {
175 if (record->bt[btcount] != NULL) {
176 /*
177 * If the stack depth passed in is smaller than
178 * the recorded stack and we have a valid pointer
179 * in the recorded stack at that depth, then we
180 * don't need to do any further checks.
181 */
182 stack_matched = FALSE;
183 goto next;
184 }
185 }
186
187 for (i=0; i < MIN(btcount, btlog->btrecord_btdepth); i++) {
188 if (record->bt[i] != bt[i]) {
189 stack_matched = FALSE;
190 goto next;
191 }
192 }
193
194 if (stack_matched == TRUE) {
195 break;
196 }
197 }
198next:
199 recindex = record->next;
200 record = lookup_btrecord(btlog, recindex);
201 }
202
203 return recindex;
204}
205
206uint32_t
207calculate_hashidx_for_element(uintptr_t elem, btlog_t *btlog)
208{
209 if (btlog->caller_will_remove_entries_for_element) {
210 uint32_t addr = 0;
211
212 addr = (uint32_t) ((elem & 0xFF00) >> 0x8);
213
214 return addr;
215 } else {
216 return 0;
217 }
218}
219
220static void
221btlog_lock(btlog_t *btlog)
222{
223 simple_lock(&btlog->btlog_lock);
224}
225static void
226btlog_unlock(btlog_t *btlog)
227{
228 simple_unlock(&btlog->btlog_lock);
229}
230
231btlog_t *
232btlog_create(size_t numrecords,
233 size_t record_btdepth,
234 boolean_t caller_will_remove_entries_for_element)
235{
236 btlog_t *btlog;
237 vm_size_t buffersize_needed = 0, elemsize_needed = 0;
238 vm_address_t buffer = 0, elem_buffer = 0, elem_hash_buffer = 0;
239 size_t i = 0;
240 kern_return_t ret;
241 size_t btrecord_size = 0;
242 uintptr_t free_elem = 0, next_free_elem = 0;
243
244 if (vm_kernel_ready && !kmem_alloc_ready)
245 return NULL;
246
247 if (numrecords > BTLOG_MAX_RECORDS)
248 return NULL;
249
250 if (numrecords == 0)
251 return NULL;
252
253 if (record_btdepth > BTLOG_MAX_DEPTH)
254 return NULL;
255
256 /* btlog_record_t is variable-sized, calculate needs now */
257 btrecord_size = sizeof(btlog_record_t)
258 + sizeof(void *) * record_btdepth;
259
260 buffersize_needed = sizeof(btlog_t) + numrecords * btrecord_size;
261 buffersize_needed = round_page(buffersize_needed);
262
263 if (zelems_count == 0) {
264 zelems_count = ((max_mem + (1024*1024*1024) /*GB*/) >> 30) * ZELEMS_DEFAULT;
265
266 if (PE_parse_boot_argn("zelems", &zelems_count, sizeof(zelems_count)) == TRUE) {
267 /*
268 * Need a max? With this scheme, it should be possible to tune the default
269 * so that we don't need a boot-arg to request more elements.
270 */
271 printf("Set number of log elements per btlog to: %ld\n", zelems_count);
272 }
273 }
274 elemsize_needed = sizeof(btlog_element_t) * zelems_count;
275 elemsize_needed = round_page(elemsize_needed);
276
277 /* since rounding to a page size might hold more, recalculate */
278 numrecords = MIN(BTLOG_MAX_RECORDS,
279 (buffersize_needed - sizeof(btlog_t))/btrecord_size);
280
281 if (kmem_alloc_ready) {
282 ret = kmem_alloc(kernel_map, &buffer, buffersize_needed, VM_KERN_MEMORY_DIAG);
283 if (ret != KERN_SUCCESS)
284 return NULL;
285
286 ret = kmem_alloc(kernel_map, &elem_buffer, elemsize_needed, VM_KERN_MEMORY_DIAG);
287 if (ret != KERN_SUCCESS) {
288 kmem_free(kernel_map, buffer, buffersize_needed);
289 buffer = 0;
290 return NULL;
291 }
292
293 if (caller_will_remove_entries_for_element == TRUE) {
294 ret = kmem_alloc(kernel_map, &elem_hash_buffer, ELEMENT_HASH_BUCKET_COUNT * sizeof(btlog_element_t*), VM_KERN_MEMORY_DIAG);
295 } else {
296 ret = kmem_alloc(kernel_map, &elem_hash_buffer, 2 * sizeof(btlog_element_t*), VM_KERN_MEMORY_DIAG);
297 }
298
299 if (ret != KERN_SUCCESS) {
300 kmem_free(kernel_map, buffer, buffersize_needed);
301 buffer = 0;
302
303 kmem_free(kernel_map, elem_buffer, elemsize_needed);
304 elem_buffer = 0;
305 return NULL;
306 }
307
308 } else {
309 buffer = (vm_address_t)pmap_steal_memory(buffersize_needed);
310 elem_buffer = (vm_address_t)pmap_steal_memory(elemsize_needed);
311 if (caller_will_remove_entries_for_element == TRUE) {
312 elem_hash_buffer = (vm_address_t)pmap_steal_memory(ELEMENT_HASH_BUCKET_COUNT * sizeof(btlog_element_t*));
313 } else {
314 elem_hash_buffer = (vm_address_t)pmap_steal_memory(2 * sizeof(btlog_element_t*));
315 }
316 ret = KERN_SUCCESS;
317 }
318
319 btlog = (btlog_t *)buffer;
320 btlog->btlog_buffer = buffer;
321 btlog->btlog_buffersize = buffersize_needed;
322 btlog->freelist_elements = (btlog_element_t *)elem_buffer;
323
324 simple_lock_init(&btlog->btlog_lock, 0);
325
326 btlog->caller_will_remove_entries_for_element = caller_will_remove_entries_for_element;
327
328 if (caller_will_remove_entries_for_element == TRUE) {
329 btlog->elem_linkage_un.elem_recindex_hashtbl = (btlog_element_t **)elem_hash_buffer;
330 } else {
331 btlog->elem_linkage_un.element_hash_queue = (struct _element_hash_queue*) elem_hash_buffer;
332 TAILQ_INIT(btlog->elem_linkage_un.element_hash_queue);
333 }
334
335 btlog->btrecords = (uintptr_t)(buffer + sizeof(btlog_t));
336 btlog->btrecord_btdepth = record_btdepth;
337 btlog->btrecord_size = btrecord_size;
338
339 btlog->head = BTLOG_RECORDINDEX_NONE;
340 btlog->tail = BTLOG_RECORDINDEX_NONE;
341 btlog->active_record_count = 0;
342 btlog->activerecord = BTLOG_RECORDINDEX_NONE;
343
344 for (i=0; i < ELEMENT_HASH_BUCKET_COUNT; i++) {
345 btlog->elem_linkage_un.elem_recindex_hashtbl[i]=0;
346 }
347
348 /* populate freelist_records with all records in order */
349 btlog->freelist_records = 0;
350 for (i=0; i < (numrecords - 1); i++) {
351 btlog_record_t *rec = lookup_btrecord(btlog, i);
352 rec->next = (btlog_recordindex_t)(i + 1);
353 }
354 lookup_btrecord(btlog, i)->next = BTLOG_RECORDINDEX_NONE; /* terminate */
355
356 /* populate freelist_elements with all elements in order */
357 free_elem = (uintptr_t)btlog->freelist_elements;
358
359 for (i=0; i < (zelems_count - 1); i++) {
360
361 next_free_elem = free_elem + sizeof(btlog_element_t);
362 *(uintptr_t*)free_elem = next_free_elem;
363 free_elem = next_free_elem;
364 }
365 *(uintptr_t*)next_free_elem = BTLOG_HASHELEMINDEX_NONE;
366
367 return btlog;
368}
369
370/* Assumes btlog is already locked */
371static btlog_recordindex_t
372btlog_get_record_from_freelist(btlog_t *btlog)
373{
374 btlog_recordindex_t recindex = btlog->freelist_records;
375
376 if (recindex == BTLOG_RECORDINDEX_NONE) {
377 /* nothing on freelist */
378 return BTLOG_RECORDINDEX_NONE;
379 } else {
380 /* remove the head of the freelist_records */
381 btlog_record_t *record = lookup_btrecord(btlog, recindex);
382 btlog->freelist_records = record->next;
383 return recindex;
384 }
385}
386
387static void
388btlog_add_record_to_freelist(btlog_t *btlog, btlog_recordindex_t recindex)
389{
390 btlog_recordindex_t precindex = BTLOG_RECORDINDEX_NONE;
391 btlog_record_t *precord = NULL, *record = NULL;
392
393 record = lookup_btrecord(btlog, recindex);
394
395 assert(TAILQ_EMPTY(&record->element_record_queue));
396
397 record->bthash = 0;
398
399 precindex = btlog->head;
400 precord = lookup_btrecord(btlog, precindex);
401
402 if (precindex == recindex) {
403 btlog->head = precord->next;
404 btlog->active_record_count--;
405
406 record->next = btlog->freelist_records;
407 btlog->freelist_records = recindex;
408
409 if (btlog->head == BTLOG_RECORDINDEX_NONE) {
410 /* active list is now empty, update tail */
411 btlog->tail = BTLOG_RECORDINDEX_NONE;
412 assert(btlog->active_record_count == 0);
413 }
414 } else {
415 while (precindex != BTLOG_RECORDINDEX_NONE) {
416 if (precord->next == recindex) {
417 precord->next = record->next;
418 btlog->active_record_count--;
419
420 record->next = btlog->freelist_records;
421 btlog->freelist_records = recindex;
422
423 if (btlog->tail == recindex) {
424 btlog->tail = precindex;
425 }
426 break;
427 } else {
428 precindex = precord->next;
429 precord = lookup_btrecord(btlog, precindex);
430 }
431 }
432 }
433}
434
435
436/* Assumes btlog is already locked */
437static void
438btlog_evict_elements_from_record(btlog_t *btlog, int num_elements_to_evict)
439{
440 btlog_recordindex_t recindex = btlog->head;
441 btlog_record_t *record = NULL;
442 btlog_element_t *recelem = NULL;
443
444 if (recindex == BTLOG_RECORDINDEX_NONE) {
445 /* nothing on active list */
446 panic("BTLog: Eviction requested on btlog (0x%lx) with an empty active list.\n", (uintptr_t) btlog);
447 } else {
448
449 while (num_elements_to_evict) {
450 /*
451 * LEAKS: reap the oldest element within the record with the lowest refs.
452 * CORRUPTION: reap the oldest element overall and drop its reference on the record
453 */
454
455 if (btlog->caller_will_remove_entries_for_element) {
456 uint32_t max_refs_threshold = UINT32_MAX;
457 btlog_recordindex_t precindex = 0, prev_evictindex = 0, evict_index = 0;
458
459 prev_evictindex = evict_index = btlog->head;
460 precindex = recindex = btlog->head;
461
462 while (recindex != BTLOG_RECORDINDEX_NONE) {
463
464 record = lookup_btrecord(btlog, recindex);
465
466 if (btlog->activerecord == recindex || record->ref_count > max_refs_threshold) {
467 /* skip this record */
468 } else {
469 prev_evictindex = precindex;
470 evict_index = recindex;
471 max_refs_threshold = record->ref_count;
472 }
473
474 if (record->next != BTLOG_RECORDINDEX_NONE) {
475 precindex = recindex;
476 }
477
478 recindex = record->next;
479 }
480
481 recindex = evict_index;
482 assert(recindex != BTLOG_RECORDINDEX_NONE);
483 record = lookup_btrecord(btlog, recindex);
484
485 recelem = TAILQ_LAST(&record->element_record_queue, _element_record_queue);
486 } else {
487
488 recelem = TAILQ_LAST(btlog->elem_linkage_un.element_hash_queue, _element_hash_queue);
489 recindex = recelem->recindex;
490 record = lookup_btrecord(btlog, recindex);
491 }
492
493 /*
494 * Here we have the element to drop (recelem), its record and the record index.
495 */
496
497 while (recelem && num_elements_to_evict) {
498
499 TAILQ_REMOVE(&record->element_record_queue, recelem, element_record_link);
500
501 if (btlog->caller_will_remove_entries_for_element) {
502
503 btlog_element_t *prev_hashelem = NULL, *hashelem = NULL;
504 uint32_t hashidx = 0;
505
506 hashidx = calculate_hashidx_for_element(~recelem->elem, btlog);
507
508 prev_hashelem = hashelem = btlog->elem_linkage_un.elem_recindex_hashtbl[hashidx];
509 while (hashelem != NULL) {
510 if (hashelem == recelem)
511 break;
512 else {
513 prev_hashelem = hashelem;
514 hashelem = TAILQ_NEXT(hashelem, element_hash_link);
515 }
516 }
517
518 if (hashelem == NULL) {
519 panic("BTLog: Missing hashelem for element list of record 0x%lx\n", (uintptr_t) record);
520 }
521
522 if (prev_hashelem != hashelem) {
523 TAILQ_NEXT(prev_hashelem, element_hash_link) = TAILQ_NEXT(hashelem, element_hash_link);
524 } else {
525 btlog->elem_linkage_un.elem_recindex_hashtbl[hashidx] = TAILQ_NEXT(hashelem, element_hash_link);
526 }
527 } else {
528
529 TAILQ_REMOVE(btlog->elem_linkage_un.element_hash_queue, recelem, element_hash_link);
530 }
531
532 btlog_add_elem_to_freelist(btlog, recelem);
533 btlog->active_element_count--;
534
535 num_elements_to_evict--;
536
537 assert(record->ref_count);
538
539 record->ref_count--;
540
541 if (record->ref_count == 0) {
542
543 btlog_add_record_to_freelist(btlog, recindex);
544
545 /*
546 * LEAKS: All done with this record. Need the next least popular record.
547 * CORRUPTION: We don't care about records. We'll just pick the next oldest element.
548 */
549
550 if (btlog->caller_will_remove_entries_for_element) {
551 break;
552 }
553 }
554
555 if (btlog->caller_will_remove_entries_for_element) {
556 recelem = TAILQ_LAST(&record->element_record_queue, _element_record_queue);
557 } else {
558
559 recelem = TAILQ_LAST(btlog->elem_linkage_un.element_hash_queue, _element_hash_queue);
560 recindex = recelem->recindex;
561 record = lookup_btrecord(btlog, recindex);
562 }
563 }
564 }
565 }
566}
567
568/* Assumes btlog is already locked */
569static void
570btlog_append_record_to_activelist(btlog_t *btlog, btlog_recordindex_t recindex)
571{
572
573 assert(recindex != BTLOG_RECORDINDEX_NONE);
574
575 if (btlog->head == BTLOG_RECORDINDEX_NONE) {
576 /* empty active list, update both head and tail */
577 btlog->head = btlog->tail = recindex;
578 } else {
579 btlog_record_t *record = lookup_btrecord(btlog, btlog->tail);
580 record->next = recindex;
581 btlog->tail = recindex;
582 }
583 btlog->active_record_count++;
584}
585
586btlog_element_t*
587btlog_get_elem_from_freelist(btlog_t *btlog)
588{
589 btlog_element_t *free_elem = NULL;
590
591retry:
592 free_elem = btlog->freelist_elements;
593
594 if ((uintptr_t)free_elem == BTLOG_HASHELEMINDEX_NONE) {
595 /* nothing on freelist */
596 btlog_evict_elements_from_record(btlog, 1);
597 goto retry;
598 } else {
599 /* remove the head of the freelist */
600 uintptr_t next_elem = *(uintptr_t*)free_elem;
601 btlog->freelist_elements = (btlog_element_t *)next_elem;
602 return free_elem;
603 }
604}
605
606void
607btlog_add_elem_to_freelist(btlog_t *btlog, btlog_element_t *elem)
608{
609 btlog_element_t *free_elem = btlog->freelist_elements;
610
611 TAILQ_NEXT(elem, element_hash_link) = (btlog_element_t *) BTLOG_HASHELEMINDEX_NONE;
612 TAILQ_NEXT(elem, element_record_link) = (btlog_element_t *) BTLOG_HASHELEMINDEX_NONE;
613
614 *(uintptr_t*)elem = (uintptr_t)free_elem;
615 btlog->freelist_elements = elem;
616}
617
618void
619btlog_add_entry(btlog_t *btlog,
620 void *element,
621 uint8_t operation,
622 void *bt[],
623 size_t btcount)
624{
625 btlog_recordindex_t recindex = 0;
626 btlog_record_t *record = NULL;
627 size_t i;
628 u_int32_t md5_buffer[4];
629 MD5_CTX btlog_ctx;
630 uint32_t hashidx = 0;
631
632 btlog_element_t *hashelem = NULL;
633
634 if (g_crypto_funcs == NULL)
635 return;
636
637 btlog_lock(btlog);
638
639 MD5Init(&btlog_ctx);
640 for (i=0; i < MIN(btcount, btlog->btrecord_btdepth); i++) {
641 MD5Update(&btlog_ctx, (u_char *) &bt[i], sizeof(bt[i]));
642 }
643 MD5Final((u_char *) &md5_buffer, &btlog_ctx);
644
645 recindex = lookup_btrecord_byhash(btlog, md5_buffer[0], bt, btcount);
646
647 if (recindex != BTLOG_RECORDINDEX_NONE) {
648
649 record = lookup_btrecord(btlog, recindex);
650 record->ref_count++;
651 assert(record->operation == operation);
652 } else {
653retry:
654 /* If there's a free record, use it */
655 recindex = btlog_get_record_from_freelist(btlog);
656 if (recindex == BTLOG_RECORDINDEX_NONE) {
657 /* Use the first active record (FIFO age-out) */
658 btlog_evict_elements_from_record(btlog, ((2 * sizeof(btlog_record_t))/sizeof(btlog_element_t)));
659 goto retry;
660 }
661
662 record = lookup_btrecord(btlog, recindex);
663
664 /* we always add to the tail, so there is no next pointer */
665 record->next = BTLOG_RECORDINDEX_NONE;
666 record->operation = operation;
667 record->bthash = md5_buffer[0];
668 record->ref_count = 1;
669 TAILQ_INIT(&record->element_record_queue);
670
671 for (i=0; i < MIN(btcount, btlog->btrecord_btdepth); i++) {
672 record->bt[i] = bt[i];
673 }
674
675 for (; i < btlog->btrecord_btdepth; i++) {
676 record->bt[i] = NULL;
677 }
678
679 btlog_append_record_to_activelist(btlog, recindex);
680 }
681
682 btlog->activerecord = recindex;
683
684 hashidx = calculate_hashidx_for_element((uintptr_t)element, btlog);
685 hashelem = btlog_get_elem_from_freelist(btlog);
686
687 assert(record->bthash);
688
689 hashelem->elem = ~((uintptr_t)element);
690 hashelem->operation = record->operation;
691 hashelem->recindex = recindex;
692
693 TAILQ_INSERT_HEAD(&record->element_record_queue, hashelem, element_record_link);
694
695 if (btlog->caller_will_remove_entries_for_element) {
696 TAILQ_NEXT(hashelem, element_hash_link) = btlog->elem_linkage_un.elem_recindex_hashtbl[hashidx];
697 btlog->elem_linkage_un.elem_recindex_hashtbl[hashidx] = hashelem;
698
699 } else {
700 TAILQ_INSERT_HEAD(btlog->elem_linkage_un.element_hash_queue, hashelem, element_hash_link);
701 }
702
703 btlog->active_element_count++;
704
705 btlog->activerecord = BTLOG_RECORDINDEX_NONE;
706
707 btlog_unlock(btlog);
708}
709
710void
711btlog_remove_entries_for_element(btlog_t *btlog,
712 void *element)
713{
714 btlog_recordindex_t recindex = BTLOG_RECORDINDEX_NONE;
715 btlog_record_t *record = NULL;
716 uint32_t hashidx = 0;
717
718 btlog_element_t *prev_hashelem = NULL, *hashelem = NULL;
719
720 if (btlog->caller_will_remove_entries_for_element == FALSE) {
721 panic("Explicit removal of entry is not permitted for this btlog (%p).\n", btlog);
722 }
723
724 if (g_crypto_funcs == NULL)
725 return;
726
727 btlog_lock(btlog);
728
729 hashidx = calculate_hashidx_for_element((uintptr_t) element, btlog);
730 prev_hashelem = hashelem = btlog->elem_linkage_un.elem_recindex_hashtbl[hashidx];
731
732 while (hashelem != NULL) {
733 if (~hashelem->elem == (uintptr_t)element)
734 break;
735 else {
736 prev_hashelem = hashelem;
737 hashelem = TAILQ_NEXT(hashelem, element_hash_link);
738 }
739 }
740
741 if (hashelem) {
742
743 btlog_element_t *recelem = NULL;
744
745 if (prev_hashelem != hashelem) {
746 TAILQ_NEXT(prev_hashelem, element_hash_link) = TAILQ_NEXT(hashelem, element_hash_link);
747 } else {
748
749 btlog->elem_linkage_un.elem_recindex_hashtbl[hashidx] = TAILQ_NEXT(hashelem, element_hash_link);
750 }
751
752 recindex = hashelem->recindex;
753 record = lookup_btrecord(btlog, recindex);
754
755 recelem = hashelem;
756 TAILQ_REMOVE(&record->element_record_queue, recelem, element_record_link);
757
758 btlog_add_elem_to_freelist(btlog, hashelem);
759 btlog->active_element_count--;
760
761 assert(record->ref_count);
762
763 record->ref_count--;
764
765 if (record->ref_count == 0) {
766 btlog_add_record_to_freelist(btlog, recindex);
767 }
768 }
769
770 btlog_unlock(btlog);
771}
772
773#if DEBUG || DEVELOPMENT
774
775void
776btlog_copy_backtraces_for_elements(btlog_t * btlog,
777 uintptr_t * instances,
778 uint32_t * countp,
779 uint32_t zoneSize,
780 leak_site_proc proc,
781 void * refCon)
782{
783 btlog_recordindex_t recindex;
784 btlog_record_t * record;
785 btlog_element_t * hashelem;
786 uint32_t hashidx, idx, dups, numSites, siteCount;
787 uintptr_t element, site;
788 uint32_t count;
789
790 btlog_lock(btlog);
791
792 count = *countp;
793 for (numSites = 0, idx = 0; idx < count; idx++)
794 {
795 element = instances[idx];
796
797 if (kInstanceFlagReferenced & element) continue;
798 element = INSTANCE_PUT(element) & ~kInstanceFlags;
799
800 site = 0;
801 hashidx = calculate_hashidx_for_element(element, btlog);
802 hashelem = btlog->elem_linkage_un.elem_recindex_hashtbl[hashidx];
803 while (hashelem != NULL)
804 {
805 if (~hashelem->elem == element) break;
806 hashelem = TAILQ_NEXT(hashelem, element_hash_link);
807 }
808 if (hashelem)
809 {
810 recindex = hashelem->recindex;
811 site = (uintptr_t) lookup_btrecord(btlog, recindex);
812 }
813 if (site) element = (site | kInstanceFlagReferenced);
814 instances[numSites] = INSTANCE_PUT(element);
815 numSites++;
816 }
817
818 for (idx = 0; idx < numSites; idx++)
819 {
820 site = instances[idx];
821 if (!site) continue;
822 if (!(kInstanceFlagReferenced & site)) continue;
823 for (siteCount = 1, dups = (idx + 1); dups < numSites; dups++)
824 {
825 if (instances[dups] == site)
826 {
827 siteCount++;
828 instances[dups] = 0;
829 }
830 }
831 record = (typeof(record)) (INSTANCE_PUT(site) & ~kInstanceFlags);
832 (*proc)(refCon, siteCount, zoneSize, (uintptr_t *) &record->bt[0], (uint32_t) btlog->btrecord_btdepth);
833 }
834
835 *countp = numSites;
836
837 btlog_unlock(btlog);
838}
839
840/*
841 * Returns the number of records in the btlog struct.
842 *
843 * Called by the mach_zone_get_btlog_records() MIG routine.
844 */
845size_t
846get_btlog_records_count(btlog_t *btlog)
847{
848 if (btlog->btlog_buffersize < sizeof(btlog_t)) {
849 return 0;
850 }
851 return ((btlog->btlog_buffersize - sizeof(btlog_t))/btlog->btrecord_size);
852}
853
854/*
855 * Copies out relevant info from btlog_record_t's to zone_btrecord_t's. 'numrecs' points to the number of records
856 * the 'records' buffer can hold. Upon return 'numrecs' points to the number of records actually copied out.
857 *
858 * Called by the mach_zone_get_btlog_records() MIG routine.
859 */
860void
861get_btlog_records(btlog_t *btlog, zone_btrecord_t *records, unsigned int *numrecs)
862{
863 unsigned int count, recs_copied, frame;
864 zone_btrecord_t *current_rec;
865 btlog_record_t *zstack_record;
866 btlog_recordindex_t zstack_index = BTLOG_RECORDINDEX_NONE;
867
868 btlog_lock(btlog);
869
870 count = 0;
871 if (btlog->btlog_buffersize > sizeof(btlog_t)) {
872 count = (unsigned int)((btlog->btlog_buffersize - sizeof(btlog_t))/btlog->btrecord_size);
873 }
874 /* Copy out only as many records as the pre-allocated buffer size permits. */
875 if (count > *numrecs) {
876 count = *numrecs;
877 }
878 zstack_index = btlog->head;
879
880 current_rec = &records[0];
881 recs_copied = 0;
882 while (recs_copied < count && (zstack_index != BTLOG_RECORDINDEX_NONE)) {
883 zstack_record = lookup_btrecord(btlog, zstack_index);
884 current_rec->operation_type = (uint32_t)(zstack_record->operation);
885 current_rec->ref_count = zstack_record->ref_count;
886
887 frame = 0;
888 while (frame < MIN(btlog->btrecord_btdepth, MAX_ZTRACE_DEPTH)) {
889 current_rec->bt[frame] = (uint64_t)VM_KERNEL_UNSLIDE(zstack_record->bt[frame]);
890 frame++;
891 }
892
893 zstack_index = zstack_record->next;
894 recs_copied++;
895 current_rec++;
896 }
897 *numrecs = recs_copied;
898
899 btlog_unlock(btlog);
900}
901
902#endif /* DEBUG || DEVELOPMENT */
903