1/*
2 * Copyright (c) 2000-2011 Apple Computer, Inc. All rights reserved.
3 *
4 * @APPLE_OSREFERENCE_LICENSE_HEADER_START@
5 *
6 * This file contains Original Code and/or Modifications of Original Code
7 * as defined in and that are subject to the Apple Public Source License
8 * Version 2.0 (the 'License'). You may not use this file except in
9 * compliance with the License. The rights granted to you under the License
10 * may not be used to create, or enable the creation or redistribution of,
11 * unlawful or unlicensed copies of an Apple operating system, or to
12 * circumvent, violate, or enable the circumvention or violation of, any
13 * terms of an Apple operating system software license agreement.
14 *
15 * Please obtain a copy of the License at
16 * http://www.opensource.apple.com/apsl/ and read it before using this file.
17 *
18 * The Original Code and all software distributed under the License are
19 * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER
20 * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES,
21 * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY,
22 * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT.
23 * Please see the License for the specific language governing rights and
24 * limitations under the License.
25 *
26 * @APPLE_OSREFERENCE_LICENSE_HEADER_END@
27 */
28/*
29 * @OSF_COPYRIGHT@
30 */
31/*
32 * Mach Operating System
33 * Copyright (c) 1991,1990,1989,1988,1987 Carnegie Mellon University
34 * All Rights Reserved.
35 *
36 * Permission to use, copy, modify and distribute this software and its
37 * documentation is hereby granted, provided that both the copyright
38 * notice and this permission notice appear in all copies of the
39 * software, derivative works or modified versions, and any portions
40 * thereof, and that both notices appear in supporting documentation.
41 *
42 * CARNEGIE MELLON ALLOWS FREE USE OF THIS SOFTWARE IN ITS "AS IS"
43 * CONDITION. CARNEGIE MELLON DISCLAIMS ANY LIABILITY OF ANY KIND FOR
44 * ANY DAMAGES WHATSOEVER RESULTING FROM THE USE OF THIS SOFTWARE.
45 *
46 * Carnegie Mellon requests users of this software to return to
47 *
48 * Software Distribution Coordinator or Software.Distribution@CS.CMU.EDU
49 * School of Computer Science
50 * Carnegie Mellon University
51 * Pittsburgh PA 15213-3890
52 *
53 * any improvements or extensions that they make and grant Carnegie Mellon
54 * the rights to redistribute these changes.
55 */
56/*
57 */
58/*
59 * File: kern/kalloc.c
60 * Author: Avadis Tevanian, Jr.
61 * Date: 1985
62 *
63 * General kernel memory allocator. This allocator is designed
64 * to be used by the kernel to manage dynamic memory fast.
65 */
66
67#include <zone_debug.h>
68
69#include <mach/boolean.h>
70#include <mach/sdt.h>
71#include <mach/machine/vm_types.h>
72#include <mach/vm_param.h>
73#include <kern/misc_protos.h>
74#include <kern/zalloc.h>
75#include <kern/kalloc.h>
76#include <kern/ledger.h>
77#include <vm/vm_kern.h>
78#include <vm/vm_object.h>
79#include <vm/vm_map.h>
80#include <libkern/OSMalloc.h>
81#include <sys/kdebug.h>
82
83#include <san/kasan.h>
84
85#ifdef MACH_BSD
86zone_t kalloc_zone(vm_size_t);
87#endif
88
89#define KALLOC_MAP_SIZE_MIN (16 * 1024 * 1024)
90#define KALLOC_MAP_SIZE_MAX (128 * 1024 * 1024)
91vm_map_t kalloc_map;
92vm_size_t kalloc_max;
93vm_size_t kalloc_max_prerounded;
94vm_size_t kalloc_kernmap_size; /* size of kallocs that can come from kernel map */
95
96/* how many times we couldn't allocate out of kalloc_map and fell back to kernel_map */
97unsigned long kalloc_fallback_count;
98
99unsigned int kalloc_large_inuse;
100vm_size_t kalloc_large_total;
101vm_size_t kalloc_large_max;
102vm_size_t kalloc_largest_allocated = 0;
103uint64_t kalloc_large_sum;
104
105int kalloc_fake_zone_index = -1; /* index of our fake zone in statistics arrays */
106
107vm_offset_t kalloc_map_min;
108vm_offset_t kalloc_map_max;
109
110#ifdef MUTEX_ZONE
111/*
112 * Diagnostic code to track mutexes separately rather than via the 2^ zones
113 */
114 zone_t lck_mtx_zone;
115#endif
116
117static void
118KALLOC_ZINFO_SALLOC(vm_size_t bytes)
119{
120 thread_t thr = current_thread();
121 ledger_debit(thr->t_ledger, task_ledgers.tkm_shared, bytes);
122}
123
124static void
125KALLOC_ZINFO_SFREE(vm_size_t bytes)
126{
127 thread_t thr = current_thread();
128 ledger_credit(thr->t_ledger, task_ledgers.tkm_shared, bytes);
129}
130
131/*
132 * All allocations of size less than kalloc_max are rounded to the next nearest
133 * sized zone. This allocator is built on top of the zone allocator. A zone
134 * is created for each potential size that we are willing to get in small
135 * blocks.
136 *
137 * We assume that kalloc_max is not greater than 64K;
138 *
139 * Note that kalloc_max is somewhat confusingly named. It represents the first
140 * power of two for which no zone exists. kalloc_max_prerounded is the
141 * smallest allocation size, before rounding, for which no zone exists.
142 *
143 * Also if the allocation size is more than kalloc_kernmap_size then allocate
144 * from kernel map rather than kalloc_map.
145 */
146
147#define KALLOC_MINALIGN (1 << KALLOC_LOG2_MINALIGN)
148#define KiB(x) (1024 * (x))
149
150static const struct kalloc_zone_config {
151 int kzc_size;
152 const char *kzc_name;
153} k_zone_config[] = {
154#define KZC_ENTRY(SIZE) { .kzc_size = (SIZE), .kzc_name = "kalloc." #SIZE }
155
156#if KALLOC_MINSIZE == 16 && KALLOC_LOG2_MINALIGN == 4
157 /* 64-bit targets, generally */
158 KZC_ENTRY(16),
159 KZC_ENTRY(32),
160 KZC_ENTRY(48),
161 KZC_ENTRY(64),
162 KZC_ENTRY(80),
163 KZC_ENTRY(96),
164 KZC_ENTRY(128),
165 KZC_ENTRY(160),
166 KZC_ENTRY(192),
167 KZC_ENTRY(224),
168 KZC_ENTRY(256),
169 KZC_ENTRY(288),
170 KZC_ENTRY(368),
171 KZC_ENTRY(400),
172 KZC_ENTRY(512),
173 KZC_ENTRY(576),
174 KZC_ENTRY(768),
175 KZC_ENTRY(1024),
176 KZC_ENTRY(1152),
177 KZC_ENTRY(1280),
178 KZC_ENTRY(1664),
179 KZC_ENTRY(2048),
180#elif KALLOC_MINSIZE == 8 && KALLOC_LOG2_MINALIGN == 3
181 /* 32-bit targets, generally */
182 KZC_ENTRY(8),
183 KZC_ENTRY(16),
184 KZC_ENTRY(24),
185 KZC_ENTRY(32),
186 KZC_ENTRY(40),
187 KZC_ENTRY(48),
188 KZC_ENTRY(64),
189 KZC_ENTRY(72),
190 KZC_ENTRY(88),
191 KZC_ENTRY(112),
192 KZC_ENTRY(128),
193 KZC_ENTRY(192),
194 KZC_ENTRY(256),
195 KZC_ENTRY(288),
196 KZC_ENTRY(384),
197 KZC_ENTRY(440),
198 KZC_ENTRY(512),
199 KZC_ENTRY(576),
200 KZC_ENTRY(768),
201 KZC_ENTRY(1024),
202 KZC_ENTRY(1152),
203 KZC_ENTRY(1536),
204 KZC_ENTRY(2048),
205 KZC_ENTRY(2128),
206 KZC_ENTRY(3072),
207#else
208#error missing or invalid zone size parameters for kalloc
209#endif
210
211 /* all configurations get these zones */
212 KZC_ENTRY(4096),
213 KZC_ENTRY(6144),
214 KZC_ENTRY(8192),
215 KZC_ENTRY(16384),
216 KZC_ENTRY(32768),
217#undef KZC_ENTRY
218};
219
220#define MAX_K_ZONE (int)(sizeof(k_zone_config) / sizeof(k_zone_config[0]))
221
222/*
223 * Many kalloc() allocations are for small structures containing a few
224 * pointers and longs - the k_zone_dlut[] direct lookup table, indexed by
225 * size normalized to the minimum alignment, finds the right zone index
226 * for them in one dereference.
227 */
228
229#define INDEX_ZDLUT(size) \
230 (((size) + KALLOC_MINALIGN - 1) / KALLOC_MINALIGN)
231#define N_K_ZDLUT (2048 / KALLOC_MINALIGN)
232 /* covers sizes [0 .. 2048 - KALLOC_MINALIGN] */
233#define MAX_SIZE_ZDLUT ((N_K_ZDLUT - 1) * KALLOC_MINALIGN)
234
235static int8_t k_zone_dlut[N_K_ZDLUT]; /* table of indices into k_zone[] */
236
237/*
238 * If there's no hit in the DLUT, then start searching from k_zindex_start.
239 */
240static int k_zindex_start;
241
242static zone_t k_zone[MAX_K_ZONE];
243
244/* #define KALLOC_DEBUG 1 */
245
246/* forward declarations */
247
248lck_grp_t kalloc_lck_grp;
249lck_mtx_t kalloc_lock;
250
251#define kalloc_spin_lock() lck_mtx_lock_spin(&kalloc_lock)
252#define kalloc_unlock() lck_mtx_unlock(&kalloc_lock)
253
254
255/* OSMalloc local data declarations */
256static
257queue_head_t OSMalloc_tag_list;
258
259lck_grp_t *OSMalloc_tag_lck_grp;
260lck_mtx_t OSMalloc_tag_lock;
261
262#define OSMalloc_tag_spin_lock() lck_mtx_lock_spin(&OSMalloc_tag_lock)
263#define OSMalloc_tag_unlock() lck_mtx_unlock(&OSMalloc_tag_lock)
264
265
266/* OSMalloc forward declarations */
267void OSMalloc_init(void);
268void OSMalloc_Tagref(OSMallocTag tag);
269void OSMalloc_Tagrele(OSMallocTag tag);
270
271/*
272 * Initialize the memory allocator. This should be called only
273 * once on a system wide basis (i.e. first processor to get here
274 * does the initialization).
275 *
276 * This initializes all of the zones.
277 */
278
279void
280kalloc_init(
281 void)
282{
283 kern_return_t retval;
284 vm_offset_t min;
285 vm_size_t size, kalloc_map_size;
286 vm_map_kernel_flags_t vmk_flags;
287
288 /*
289 * Scale the kalloc_map_size to physical memory size: stay below
290 * 1/8th the total zone map size, or 128 MB (for a 32-bit kernel).
291 */
292 kalloc_map_size = (vm_size_t)(sane_size >> 5);
293#if !__LP64__
294 if (kalloc_map_size > KALLOC_MAP_SIZE_MAX)
295 kalloc_map_size = KALLOC_MAP_SIZE_MAX;
296#endif /* !__LP64__ */
297 if (kalloc_map_size < KALLOC_MAP_SIZE_MIN)
298 kalloc_map_size = KALLOC_MAP_SIZE_MIN;
299
300 vmk_flags = VM_MAP_KERNEL_FLAGS_NONE;
301 vmk_flags.vmkf_permanent = TRUE;
302
303 retval = kmem_suballoc(kernel_map, &min, kalloc_map_size,
304 FALSE,
305 (VM_FLAGS_ANYWHERE),
306 vmk_flags,
307 VM_KERN_MEMORY_KALLOC,
308 &kalloc_map);
309
310 if (retval != KERN_SUCCESS)
311 panic("kalloc_init: kmem_suballoc failed");
312
313 kalloc_map_min = min;
314 kalloc_map_max = min + kalloc_map_size - 1;
315
316 /*
317 * Create zones up to a least 4 pages because small page-multiples are
318 * common allocations. Also ensure that zones up to size 16KB bytes exist.
319 * This is desirable because messages are allocated with kalloc(), and
320 * messages up through size 8192 are common.
321 */
322 kalloc_max = PAGE_SIZE << 2;
323 if (kalloc_max < KiB(16)) {
324 kalloc_max = KiB(16);
325 }
326 assert(kalloc_max <= KiB(64)); /* assumption made in size arrays */
327
328 kalloc_max_prerounded = kalloc_max / 2 + 1;
329 /* allocations larger than 16 times kalloc_max go directly to kernel map */
330 kalloc_kernmap_size = (kalloc_max * 16) + 1;
331 kalloc_largest_allocated = kalloc_kernmap_size;
332
333 /*
334 * Allocate a zone for each size we are going to handle.
335 */
336 for (int i = 0; i < MAX_K_ZONE && (size = k_zone_config[i].kzc_size) < kalloc_max; i++) {
337 k_zone[i] = zinit(size, size, size, k_zone_config[i].kzc_name);
338
339 /*
340 * Don't charge the caller for the allocation, as we aren't sure how
341 * the memory will be handled.
342 */
343 zone_change(k_zone[i], Z_CALLERACCT, FALSE);
344#if VM_MAX_TAG_ZONES
345 if (zone_tagging_on) zone_change(k_zone[i], Z_TAGS_ENABLED, TRUE);
346#endif
347 zone_change(k_zone[i], Z_KASAN_QUARANTINE, FALSE);
348 }
349
350 /*
351 * Build the Direct LookUp Table for small allocations
352 */
353 size = 0;
354 for (int i = 0; i <= N_K_ZDLUT; i++, size += KALLOC_MINALIGN) {
355 int zindex = 0;
356
357 while ((vm_size_t)k_zone_config[zindex].kzc_size < size)
358 zindex++;
359
360 if (i == N_K_ZDLUT) {
361 k_zindex_start = zindex;
362 break;
363 }
364 k_zone_dlut[i] = (int8_t)zindex;
365 }
366
367#ifdef KALLOC_DEBUG
368 printf("kalloc_init: k_zindex_start %d\n", k_zindex_start);
369
370 /*
371 * Do a quick synthesis to see how well/badly we can
372 * find-a-zone for a given size.
373 * Useful when debugging/tweaking the array of zone sizes.
374 * Cache misses probably more critical than compare-branches!
375 */
376 for (int i = 0; i < MAX_K_ZONE; i++) {
377 vm_size_t testsize = (vm_size_t)k_zone_config[i].kzc_size - 1;
378 int compare = 0;
379 int zindex;
380
381 if (testsize < MAX_SIZE_ZDLUT) {
382 compare += 1; /* 'if' (T) */
383
384 long dindex = INDEX_ZDLUT(testsize);
385 zindex = (int)k_zone_dlut[dindex];
386
387 } else if (testsize < kalloc_max_prerounded) {
388
389 compare += 2; /* 'if' (F), 'if' (T) */
390
391 zindex = k_zindex_start;
392 while ((vm_size_t)k_zone_config[zindex].kzc_size < testsize) {
393 zindex++;
394 compare++; /* 'while' (T) */
395 }
396 compare++; /* 'while' (F) */
397 } else
398 break; /* not zone-backed */
399
400 zone_t z = k_zone[zindex];
401 printf("kalloc_init: req size %4lu: %11s took %d compare%s\n",
402 (unsigned long)testsize, z->zone_name, compare,
403 compare == 1 ? "" : "s");
404 }
405#endif
406
407 lck_grp_init(&kalloc_lck_grp, "kalloc.large", LCK_GRP_ATTR_NULL);
408 lck_mtx_init(&kalloc_lock, &kalloc_lck_grp, LCK_ATTR_NULL);
409 OSMalloc_init();
410#ifdef MUTEX_ZONE
411 lck_mtx_zone = zinit(sizeof(struct _lck_mtx_), 1024*256, 4096, "lck_mtx");
412#endif
413}
414
415/*
416 * Given an allocation size, return the kalloc zone it belongs to.
417 * Direct LookUp Table variant.
418 */
419static __inline zone_t
420get_zone_dlut(vm_size_t size)
421{
422 long dindex = INDEX_ZDLUT(size);
423 int zindex = (int)k_zone_dlut[dindex];
424 return (k_zone[zindex]);
425}
426
427/* As above, but linear search k_zone_config[] for the next zone that fits. */
428
429static __inline zone_t
430get_zone_search(vm_size_t size, int zindex)
431{
432 assert(size < kalloc_max_prerounded);
433
434 while ((vm_size_t)k_zone_config[zindex].kzc_size < size)
435 zindex++;
436
437 assert(zindex < MAX_K_ZONE &&
438 (vm_size_t)k_zone_config[zindex].kzc_size < kalloc_max);
439
440 return (k_zone[zindex]);
441}
442
443static vm_size_t
444vm_map_lookup_kalloc_entry_locked(
445 vm_map_t map,
446 void *addr)
447{
448 boolean_t ret;
449 vm_map_entry_t vm_entry = NULL;
450
451 ret = vm_map_lookup_entry(map, (vm_map_offset_t)addr, &vm_entry);
452 if (!ret) {
453 panic("Attempting to lookup/free an address not allocated via kalloc! (vm_map_lookup_entry() failed map: %p, addr: %p)\n",
454 map, addr);
455 }
456 if (vm_entry->vme_start != (vm_map_offset_t)addr) {
457 panic("Attempting to lookup/free the middle of a kalloc'ed element! (map: %p, addr: %p, entry: %p)\n",
458 map, addr, vm_entry);
459 }
460 if (!vm_entry->vme_atomic) {
461 panic("Attempting to lookup/free an address not managed by kalloc! (map: %p, addr: %p, entry: %p)\n",
462 map, addr, vm_entry);
463 }
464 return (vm_entry->vme_end - vm_entry->vme_start);
465}
466
467#if KASAN_KALLOC
468/*
469 * KASAN kalloc stashes the original user-requested size away in the poisoned
470 * area. Return that directly.
471 */
472vm_size_t
473kalloc_size(void *addr)
474{
475 (void)vm_map_lookup_kalloc_entry_locked; /* silence warning */
476 return kasan_user_size((vm_offset_t)addr);
477}
478#else
479vm_size_t
480kalloc_size(
481 void *addr)
482{
483 vm_map_t map;
484 vm_size_t size;
485
486 size = zone_element_size(addr, NULL);
487 if (size) {
488 return size;
489 }
490 if (((vm_offset_t)addr >= kalloc_map_min) && ((vm_offset_t)addr < kalloc_map_max)) {
491 map = kalloc_map;
492 } else {
493 map = kernel_map;
494 }
495 vm_map_lock_read(map);
496 size = vm_map_lookup_kalloc_entry_locked(map, addr);
497 vm_map_unlock_read(map);
498 return size;
499}
500#endif
501
502vm_size_t
503kalloc_bucket_size(
504 vm_size_t size)
505{
506 zone_t z;
507 vm_map_t map;
508
509 if (size < MAX_SIZE_ZDLUT) {
510 z = get_zone_dlut(size);
511 return z->elem_size;
512 }
513
514 if (size < kalloc_max_prerounded) {
515 z = get_zone_search(size, k_zindex_start);
516 return z->elem_size;
517 }
518
519 if (size >= kalloc_kernmap_size)
520 map = kernel_map;
521 else
522 map = kalloc_map;
523
524 return vm_map_round_page(size, VM_MAP_PAGE_MASK(map));
525}
526
527#if KASAN_KALLOC
528vm_size_t
529kfree_addr(void *addr)
530{
531 vm_size_t origsz = kalloc_size(addr);
532 kfree(addr, origsz);
533 return origsz;
534}
535#else
536vm_size_t
537kfree_addr(
538 void *addr)
539{
540 vm_map_t map;
541 vm_size_t size = 0;
542 kern_return_t ret;
543 zone_t z;
544
545 size = zone_element_size(addr, &z);
546 if (size) {
547 DTRACE_VM3(kfree, vm_size_t, -1, vm_size_t, z->elem_size, void*, addr);
548 zfree(z, addr);
549 return size;
550 }
551
552 if (((vm_offset_t)addr >= kalloc_map_min) && ((vm_offset_t)addr < kalloc_map_max)) {
553 map = kalloc_map;
554 } else {
555 map = kernel_map;
556 }
557 if ((vm_offset_t)addr < VM_MIN_KERNEL_AND_KEXT_ADDRESS) {
558 panic("kfree on an address not in the kernel & kext address range! addr: %p\n", addr);
559 }
560
561 vm_map_lock(map);
562 size = vm_map_lookup_kalloc_entry_locked(map, addr);
563 ret = vm_map_remove_locked(map,
564 vm_map_trunc_page((vm_map_offset_t)addr,
565 VM_MAP_PAGE_MASK(map)),
566 vm_map_round_page((vm_map_offset_t)addr + size,
567 VM_MAP_PAGE_MASK(map)),
568 VM_MAP_REMOVE_KUNWIRE);
569 if (ret != KERN_SUCCESS) {
570 panic("vm_map_remove_locked() failed for kalloc vm_entry! addr: %p, map: %p ret: %d\n",
571 addr, map, ret);
572 }
573 vm_map_unlock(map);
574 DTRACE_VM3(kfree, vm_size_t, -1, vm_size_t, size, void*, addr);
575
576 kalloc_spin_lock();
577 kalloc_large_total -= size;
578 kalloc_large_inuse--;
579 kalloc_unlock();
580
581 KALLOC_ZINFO_SFREE(size);
582 return size;
583}
584#endif
585
586void *
587kalloc_canblock(
588 vm_size_t * psize,
589 boolean_t canblock,
590 vm_allocation_site_t * site)
591{
592 zone_t z;
593 vm_size_t size;
594 void *addr;
595 vm_tag_t tag;
596
597 tag = VM_KERN_MEMORY_KALLOC;
598 size = *psize;
599
600#if KASAN_KALLOC
601 /* expand the allocation to accomodate redzones */
602 vm_size_t req_size = size;
603 size = kasan_alloc_resize(req_size);
604#endif
605
606 if (size < MAX_SIZE_ZDLUT)
607 z = get_zone_dlut(size);
608 else if (size < kalloc_max_prerounded)
609 z = get_zone_search(size, k_zindex_start);
610 else {
611 /*
612 * If size is too large for a zone, then use kmem_alloc.
613 * (We use kmem_alloc instead of kmem_alloc_kobject so that
614 * krealloc can use kmem_realloc.)
615 */
616 vm_map_t alloc_map;
617
618 /* kmem_alloc could block so we return if noblock */
619 if (!canblock) {
620 return(NULL);
621 }
622
623#if KASAN_KALLOC
624 /* large allocation - use guard pages instead of small redzones */
625 size = round_page(req_size + 2 * PAGE_SIZE);
626 assert(size >= MAX_SIZE_ZDLUT && size >= kalloc_max_prerounded);
627#endif
628
629 if (size >= kalloc_kernmap_size)
630 alloc_map = kernel_map;
631 else
632 alloc_map = kalloc_map;
633
634 if (site) tag = vm_tag_alloc(site);
635
636 if (kmem_alloc_flags(alloc_map, (vm_offset_t *)&addr, size, tag, KMA_ATOMIC) != KERN_SUCCESS) {
637 if (alloc_map != kernel_map) {
638 if (kalloc_fallback_count++ == 0) {
639 printf("%s: falling back to kernel_map\n", __func__);
640 }
641 if (kmem_alloc_flags(kernel_map, (vm_offset_t *)&addr, size, tag, KMA_ATOMIC) != KERN_SUCCESS)
642 addr = NULL;
643 }
644 else
645 addr = NULL;
646 }
647
648 if (addr != NULL) {
649 kalloc_spin_lock();
650 /*
651 * Thread-safe version of the workaround for 4740071
652 * (a double FREE())
653 */
654 if (size > kalloc_largest_allocated)
655 kalloc_largest_allocated = size;
656
657 kalloc_large_inuse++;
658 kalloc_large_total += size;
659 kalloc_large_sum += size;
660
661 if (kalloc_large_total > kalloc_large_max)
662 kalloc_large_max = kalloc_large_total;
663
664 kalloc_unlock();
665
666 KALLOC_ZINFO_SALLOC(size);
667 }
668#if KASAN_KALLOC
669 /* fixup the return address to skip the redzone */
670 addr = (void *)kasan_alloc((vm_offset_t)addr, size, req_size, PAGE_SIZE);
671#else
672 *psize = round_page(size);
673#endif
674 DTRACE_VM3(kalloc, vm_size_t, size, vm_size_t, *psize, void*, addr);
675 return(addr);
676 }
677#ifdef KALLOC_DEBUG
678 if (size > z->elem_size)
679 panic("%s: z %p (%s) but requested size %lu", __func__,
680 z, z->zone_name, (unsigned long)size);
681#endif
682
683 assert(size <= z->elem_size);
684
685#if VM_MAX_TAG_ZONES
686 if (z->tags && site)
687 {
688 tag = vm_tag_alloc(site);
689 if (!canblock && !vm_allocation_zone_totals[tag]) tag = VM_KERN_MEMORY_KALLOC;
690 }
691#endif
692
693 addr = zalloc_canblock_tag(z, canblock, size, tag);
694
695#if KASAN_KALLOC
696 /* fixup the return address to skip the redzone */
697 addr = (void *)kasan_alloc((vm_offset_t)addr, z->elem_size, req_size, KASAN_GUARD_SIZE);
698
699 /* For KASan, the redzone lives in any additional space, so don't
700 * expand the allocation. */
701#else
702 *psize = z->elem_size;
703#endif
704
705 DTRACE_VM3(kalloc, vm_size_t, size, vm_size_t, *psize, void*, addr);
706 return addr;
707}
708
709void *
710kalloc_external(
711 vm_size_t size);
712void *
713kalloc_external(
714 vm_size_t size)
715{
716 return( kalloc_tag_bt(size, VM_KERN_MEMORY_KALLOC) );
717}
718
719void
720kfree(
721 void *data,
722 vm_size_t size)
723{
724 zone_t z;
725
726#if KASAN_KALLOC
727 /*
728 * Resize back to the real allocation size and hand off to the KASan
729 * quarantine. `data` may then point to a different allocation.
730 */
731 vm_size_t user_size = size;
732 kasan_check_free((vm_address_t)data, size, KASAN_HEAP_KALLOC);
733 data = (void *)kasan_dealloc((vm_address_t)data, &size);
734 kasan_free(&data, &size, KASAN_HEAP_KALLOC, NULL, user_size, true);
735 if (!data) {
736 return;
737 }
738#endif
739
740 if (size < MAX_SIZE_ZDLUT)
741 z = get_zone_dlut(size);
742 else if (size < kalloc_max_prerounded)
743 z = get_zone_search(size, k_zindex_start);
744 else {
745 /* if size was too large for a zone, then use kmem_free */
746
747 vm_map_t alloc_map = kernel_map;
748
749 if ((((vm_offset_t) data) >= kalloc_map_min) && (((vm_offset_t) data) <= kalloc_map_max))
750 alloc_map = kalloc_map;
751 if (size > kalloc_largest_allocated) {
752 panic("kfree: size %lu > kalloc_largest_allocated %lu", (unsigned long)size, (unsigned long)kalloc_largest_allocated);
753 }
754 kmem_free(alloc_map, (vm_offset_t)data, size);
755 kalloc_spin_lock();
756
757 kalloc_large_total -= size;
758 kalloc_large_inuse--;
759
760 kalloc_unlock();
761
762#if !KASAN_KALLOC
763 DTRACE_VM3(kfree, vm_size_t, size, vm_size_t, size, void*, data);
764#endif
765
766 KALLOC_ZINFO_SFREE(size);
767 return;
768 }
769
770 /* free to the appropriate zone */
771#ifdef KALLOC_DEBUG
772 if (size > z->elem_size)
773 panic("%s: z %p (%s) but requested size %lu", __func__,
774 z, z->zone_name, (unsigned long)size);
775#endif
776 assert(size <= z->elem_size);
777#if !KASAN_KALLOC
778 DTRACE_VM3(kfree, vm_size_t, size, vm_size_t, z->elem_size, void*, data);
779#endif
780 zfree(z, data);
781}
782
783#ifdef MACH_BSD
784zone_t
785kalloc_zone(
786 vm_size_t size)
787{
788 if (size < MAX_SIZE_ZDLUT)
789 return (get_zone_dlut(size));
790 if (size <= kalloc_max)
791 return (get_zone_search(size, k_zindex_start));
792 return (ZONE_NULL);
793}
794#endif
795
796void
797OSMalloc_init(
798 void)
799{
800 queue_init(&OSMalloc_tag_list);
801
802 OSMalloc_tag_lck_grp = lck_grp_alloc_init("OSMalloc_tag", LCK_GRP_ATTR_NULL);
803 lck_mtx_init(&OSMalloc_tag_lock, OSMalloc_tag_lck_grp, LCK_ATTR_NULL);
804}
805
806OSMallocTag
807OSMalloc_Tagalloc(
808 const char *str,
809 uint32_t flags)
810{
811 OSMallocTag OSMTag;
812
813 OSMTag = (OSMallocTag)kalloc(sizeof(*OSMTag));
814
815 bzero((void *)OSMTag, sizeof(*OSMTag));
816
817 if (flags & OSMT_PAGEABLE)
818 OSMTag->OSMT_attr = OSMT_ATTR_PAGEABLE;
819
820 OSMTag->OSMT_refcnt = 1;
821
822 strlcpy(OSMTag->OSMT_name, str, OSMT_MAX_NAME);
823
824 OSMalloc_tag_spin_lock();
825 enqueue_tail(&OSMalloc_tag_list, (queue_entry_t)OSMTag);
826 OSMalloc_tag_unlock();
827 OSMTag->OSMT_state = OSMT_VALID;
828 return(OSMTag);
829}
830
831void
832OSMalloc_Tagref(
833 OSMallocTag tag)
834{
835 if (!((tag->OSMT_state & OSMT_VALID_MASK) == OSMT_VALID))
836 panic("OSMalloc_Tagref():'%s' has bad state 0x%08X\n", tag->OSMT_name, tag->OSMT_state);
837
838 (void)hw_atomic_add(&tag->OSMT_refcnt, 1);
839}
840
841void
842OSMalloc_Tagrele(
843 OSMallocTag tag)
844{
845 if (!((tag->OSMT_state & OSMT_VALID_MASK) == OSMT_VALID))
846 panic("OSMalloc_Tagref():'%s' has bad state 0x%08X\n", tag->OSMT_name, tag->OSMT_state);
847
848 if (hw_atomic_sub(&tag->OSMT_refcnt, 1) == 0) {
849 if (hw_compare_and_store(OSMT_VALID|OSMT_RELEASED, OSMT_VALID|OSMT_RELEASED, &tag->OSMT_state)) {
850 OSMalloc_tag_spin_lock();
851 (void)remque((queue_entry_t)tag);
852 OSMalloc_tag_unlock();
853 kfree((void*)tag, sizeof(*tag));
854 } else
855 panic("OSMalloc_Tagrele():'%s' has refcnt 0\n", tag->OSMT_name);
856 }
857}
858
859void
860OSMalloc_Tagfree(
861 OSMallocTag tag)
862{
863 if (!hw_compare_and_store(OSMT_VALID, OSMT_VALID|OSMT_RELEASED, &tag->OSMT_state))
864 panic("OSMalloc_Tagfree():'%s' has bad state 0x%08X \n", tag->OSMT_name, tag->OSMT_state);
865
866 if (hw_atomic_sub(&tag->OSMT_refcnt, 1) == 0) {
867 OSMalloc_tag_spin_lock();
868 (void)remque((queue_entry_t)tag);
869 OSMalloc_tag_unlock();
870 kfree((void*)tag, sizeof(*tag));
871 }
872}
873
874void *
875OSMalloc(
876 uint32_t size,
877 OSMallocTag tag)
878{
879 void *addr=NULL;
880 kern_return_t kr;
881
882 OSMalloc_Tagref(tag);
883 if ((tag->OSMT_attr & OSMT_PAGEABLE)
884 && (size & ~PAGE_MASK)) {
885 if ((kr = kmem_alloc_pageable_external(kernel_map, (vm_offset_t *)&addr, size)) != KERN_SUCCESS)
886 addr = NULL;
887 } else
888 addr = kalloc_tag_bt((vm_size_t)size, VM_KERN_MEMORY_KALLOC);
889
890 if (!addr)
891 OSMalloc_Tagrele(tag);
892
893 return(addr);
894}
895
896void *
897OSMalloc_nowait(
898 uint32_t size,
899 OSMallocTag tag)
900{
901 void *addr=NULL;
902
903 if (tag->OSMT_attr & OSMT_PAGEABLE)
904 return(NULL);
905
906 OSMalloc_Tagref(tag);
907 /* XXX: use non-blocking kalloc for now */
908 addr = kalloc_noblock_tag_bt((vm_size_t)size, VM_KERN_MEMORY_KALLOC);
909 if (addr == NULL)
910 OSMalloc_Tagrele(tag);
911
912 return(addr);
913}
914
915void *
916OSMalloc_noblock(
917 uint32_t size,
918 OSMallocTag tag)
919{
920 void *addr=NULL;
921
922 if (tag->OSMT_attr & OSMT_PAGEABLE)
923 return(NULL);
924
925 OSMalloc_Tagref(tag);
926 addr = kalloc_noblock_tag_bt((vm_size_t)size, VM_KERN_MEMORY_KALLOC);
927 if (addr == NULL)
928 OSMalloc_Tagrele(tag);
929
930 return(addr);
931}
932
933void
934OSFree(
935 void *addr,
936 uint32_t size,
937 OSMallocTag tag)
938{
939 if ((tag->OSMT_attr & OSMT_PAGEABLE)
940 && (size & ~PAGE_MASK)) {
941 kmem_free(kernel_map, (vm_offset_t)addr, size);
942 } else
943 kfree((void *)addr, size);
944
945 OSMalloc_Tagrele(tag);
946}
947
948uint32_t
949OSMalloc_size(
950 void *addr)
951{
952 return (uint32_t)kalloc_size(addr);
953}
954
955