1/*
2 * Copyright (c) 2000-2021 Apple Computer, Inc. All rights reserved.
3 *
4 * @APPLE_OSREFERENCE_LICENSE_HEADER_START@
5 *
6 * This file contains Original Code and/or Modifications of Original Code
7 * as defined in and that are subject to the Apple Public Source License
8 * Version 2.0 (the 'License'). You may not use this file except in
9 * compliance with the License. The rights granted to you under the License
10 * may not be used to create, or enable the creation or redistribution of,
11 * unlawful or unlicensed copies of an Apple operating system, or to
12 * circumvent, violate, or enable the circumvention or violation of, any
13 * terms of an Apple operating system software license agreement.
14 *
15 * Please obtain a copy of the License at
16 * http://www.opensource.apple.com/apsl/ and read it before using this file.
17 *
18 * The Original Code and all software distributed under the License are
19 * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER
20 * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES,
21 * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY,
22 * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT.
23 * Please see the License for the specific language governing rights and
24 * limitations under the License.
25 *
26 * @APPLE_OSREFERENCE_LICENSE_HEADER_END@
27 */
28/*
29 * @OSF_COPYRIGHT@
30 */
31/*
32 * Mach Operating System
33 * Copyright (c) 1991,1990,1989,1988,1987 Carnegie Mellon University
34 * All Rights Reserved.
35 *
36 * Permission to use, copy, modify and distribute this software and its
37 * documentation is hereby granted, provided that both the copyright
38 * notice and this permission notice appear in all copies of the
39 * software, derivative works or modified versions, and any portions
40 * thereof, and that both notices appear in supporting documentation.
41 *
42 * CARNEGIE MELLON ALLOWS FREE USE OF THIS SOFTWARE IN ITS "AS IS"
43 * CONDITION. CARNEGIE MELLON DISCLAIMS ANY LIABILITY OF ANY KIND FOR
44 * ANY DAMAGES WHATSOEVER RESULTING FROM THE USE OF THIS SOFTWARE.
45 *
46 * Carnegie Mellon requests users of this software to return to
47 *
48 * Software Distribution Coordinator or Software.Distribution@CS.CMU.EDU
49 * School of Computer Science
50 * Carnegie Mellon University
51 * Pittsburgh PA 15213-3890
52 *
53 * any improvements or extensions that they make and grant Carnegie Mellon
54 * the rights to redistribute these changes.
55 */
56
57#ifdef KERNEL_PRIVATE
58
59#ifndef _KERN_KALLOC_H_
60#define _KERN_KALLOC_H_
61
62#include <mach/machine/vm_types.h>
63#include <mach/boolean.h>
64#include <mach/vm_types.h>
65#include <kern/zalloc.h>
66#include <libkern/section_keywords.h>
67#include <os/alloc_util.h>
68#if XNU_KERNEL_PRIVATE
69#include <kern/counter.h>
70#endif /* XNU_KERNEL_PRIVATE */
71
72__BEGIN_DECLS __ASSUME_PTR_ABI_SINGLE_BEGIN
73
74/*!
75 * @const KALLOC_SAFE_ALLOC_SIZE
76 *
77 * @brief
78 * The maximum allocation size that is safe to allocate with Z_NOFAIL in kalloc.
79 */
80#define KALLOC_SAFE_ALLOC_SIZE (16u * 1024u)
81
82#if XNU_KERNEL_PRIVATE
83/*!
84 * @typedef kalloc_heap_t
85 *
86 * @abstract
87 * A kalloc heap view represents a sub-accounting context
88 * for a given kalloc heap.
89 */
90typedef struct kalloc_heap {
91 zone_stats_t kh_stats;
92 const char *__unsafe_indexable kh_name;
93 zone_kheap_id_t kh_heap_id;
94 vm_tag_t kh_tag;
95 uint16_t kh_type_hash;
96 zone_id_t kh_zstart;
97 struct kalloc_heap *kh_views;
98} *kalloc_heap_t;
99
100/*!
101 * @macro KALLOC_HEAP_DECLARE
102 *
103 * @abstract
104 * (optionally) declare a kalloc heap view in a header.
105 *
106 * @discussion
107 * Unlike kernel zones, new full blown heaps cannot be instantiated.
108 * However new accounting views of the base heaps can be made.
109 */
110#define KALLOC_HEAP_DECLARE(var) \
111 extern struct kalloc_heap var[1]
112
113/**
114 * @const KHEAP_DATA_BUFFERS
115 *
116 * @brief
117 * The builtin heap for bags of pure bytes.
118 *
119 * @discussion
120 * This set of kalloc zones should contain pure bags of bytes with no pointers
121 * or length/offset fields.
122 *
123 * The zones forming the heap aren't sequestered from each other, however the
124 * entire heap lives in a different submap from any other kernel allocation.
125 *
126 * The main motivation behind this separation is due to the fact that a lot of
127 * these objects have been used by attackers to spray the heap to make it more
128 * predictable while exploiting use-after-frees or overflows.
129 *
130 * Common attributes that make these objects useful for spraying includes
131 * control of:
132 * - Data in allocation
133 * - Time of alloc and free (lifetime)
134 * - Size of allocation
135 */
136KALLOC_HEAP_DECLARE(KHEAP_DATA_BUFFERS);
137
138/**
139 * @const KHEAP_DEFAULT
140 *
141 * @brief
142 * The builtin default core kernel kalloc heap.
143 *
144 * @discussion
145 * This set of kalloc zones should contain other objects that don't have their
146 * own security mitigations. The individual zones are themselves sequestered.
147 */
148KALLOC_HEAP_DECLARE(KHEAP_DEFAULT);
149
150/**
151 * @const KHEAP_KT_VAR
152 *
153 * @brief
154 * Temporary heap for variable sized kalloc type allocations
155 *
156 * @discussion
157 * This heap will be removed when logic for kalloc_type_var_views is added
158 *
159 */
160KALLOC_HEAP_DECLARE(KHEAP_KT_VAR);
161
162/*!
163 * @macro KALLOC_HEAP_DEFINE
164 *
165 * @abstract
166 * Defines a given kalloc heap view and what it points to.
167 *
168 * @discussion
169 * Kalloc heaps are views over one of the pre-defined builtin heaps
170 * (such as @c KHEAP_DATA_BUFFERS or @c KHEAP_DEFAULT). Instantiating
171 * a new one allows for accounting of allocations through this view.
172 *
173 * Kalloc heap views are initialized during the @c STARTUP_SUB_ZALLOC phase,
174 * as the last rank. If views on zones are created, these must have been
175 * created before this stage.
176 *
177 * @param var the name for the zone view.
178 * @param name a string describing the zone view.
179 * @param heap_id a @c KHEAP_ID_* constant.
180 */
181#define KALLOC_HEAP_DEFINE(var, name, heap_id) \
182 SECURITY_READ_ONLY_LATE(struct kalloc_heap) var[1] = { { \
183 .kh_name = (name), \
184 .kh_heap_id = (heap_id), \
185 } }; \
186 STARTUP_ARG(ZALLOC, STARTUP_RANK_MIDDLE, kheap_startup_init, var)
187
188
189/*
190 * Allocations of type SO_NAME are known to not have pointers for
191 * most platforms -- for macOS this is not guaranteed
192 */
193#if XNU_TARGET_OS_OSX
194#define KHEAP_SONAME KHEAP_DEFAULT
195#else /* XNU_TARGET_OS_OSX */
196#define KHEAP_SONAME KHEAP_DATA_BUFFERS
197#endif /* XNU_TARGET_OS_OSX */
198
199#endif /* XNU_KERNEL_PRIVATE */
200
201/*!
202 * @enum kalloc_type_flags_t
203 *
204 * @brief
205 * Flags that can be passed to @c KALLOC_TYPE_DEFINE
206 *
207 * @discussion
208 * These flags can be used to request for a specific accounting
209 * behavior.
210 *
211 * @const KT_DEFAULT
212 * Passing this flag will provide default accounting behavior
213 * i.e shared accounting unless toggled with KT_OPTIONS_ACCT is
214 * set in kt boot-arg.
215 *
216 * @const KT_PRIV_ACCT
217 * Passing this flag will provide individual stats for your
218 * @c kalloc_type_view that is defined.
219 *
220 * @const KT_SHARED_ACCT
221 * Passing this flag will accumulate stats as a part of the
222 * zone that your @c kalloc_type_view points to.
223 *
224 * @const KT_DATA_ONLY
225 * Represents that the type is "data-only". Adopters should not
226 * set this flag manually, it is meant for the compiler to set
227 * automatically when KALLOC_TYPE_CHECK(DATA) passes.
228 *
229 * @const KT_VM
230 * Represents that the type is large enough to use the VM. Adopters
231 * should not set this flag manually, it is meant for the compiler
232 * to set automatically when KALLOC_TYPE_VM_SIZE_CHECK passes.
233 *
234 * @const KT_PTR_ARRAY
235 * Represents that the type is an array of pointers. Adopters should not
236 * set this flag manually, it is meant for the compiler to set
237 * automatically when KALLOC_TYPE_CHECK(PTR) passes.
238 *
239 * @const KT_CHANGED*
240 * Represents a change in the version of the kalloc_type_view. This
241 * is required inorder to decouple requiring kexts to be rebuilt to
242 * use the new defintions right away. This flags should not be used
243 * manually at a callsite, it is meant for internal use only. Future
244 * changes to kalloc_type_view defintion should toggle this flag.
245 *
246 #if XNU_KERNEL_PRIVATE
247 * @const KT_NOSHARED
248 * This flags will force the callsite to bypass the shared zone and
249 * directly allocate from the assigned zone. This can only be used
250 * with KT_PRIV_ACCT right now. If you still require this behavior
251 * but don't want private stats use Z_SET_NOTSHARED at the allocation
252 * callsite instead.
253 *
254 * @const KT_SLID
255 * To indicate that strings in the view were slid during early boot.
256 *
257 * @const KT_PROCESSED
258 * This flag is set once the view is parse during early boot. Views
259 * that are not in BootKC on macOS aren't parsed and therefore will
260 * not have this flag set. The runtime can use this as an indication
261 * to appropriately redirect the call.
262 *
263 * @const KT_HASH
264 * Hash of signature used by kmem_*_guard to determine range and
265 * direction for allocation
266 #endif
267 */
268__options_decl(kalloc_type_flags_t, uint32_t, {
269 KT_DEFAULT = 0x0001,
270 KT_PRIV_ACCT = 0x0002,
271 KT_SHARED_ACCT = 0x0004,
272 KT_DATA_ONLY = 0x0008,
273 KT_VM = 0x0010,
274 KT_CHANGED = 0x0020,
275 KT_CHANGED2 = 0x0040,
276 KT_PTR_ARRAY = 0x0080,
277#if XNU_KERNEL_PRIVATE
278 KT_NOSHARED = 0x2000,
279 KT_SLID = 0x4000,
280 KT_PROCESSED = 0x8000,
281 KT_HASH = 0xffff0000,
282#endif
283});
284
285/*!
286 * @typedef kalloc_type_view_t
287 *
288 * @abstract
289 * A kalloc type view is a structure used to redirect callers
290 * of @c kalloc_type to a particular zone based on the signature of
291 * their type.
292 *
293 * @discussion
294 * These structures are automatically created under the hood for every
295 * @c kalloc_type and @c kfree_type callsite. They are ingested during startup
296 * and are assigned zones based on the security policy for their signature.
297 *
298 * These structs are protected by the kernel lockdown and can't be initialized
299 * dynamically. They must be created using @c KALLOC_TYPE_DEFINE() or
300 * @c kalloc_type or @c kfree_type.
301 *
302 */
303#if XNU_KERNEL_PRIVATE
304struct kalloc_type_view {
305 struct zone_view kt_zv;
306 const char *kt_signature __unsafe_indexable;
307 kalloc_type_flags_t kt_flags;
308 uint32_t kt_size;
309 zone_t kt_zshared;
310 zone_t kt_zsig;
311};
312#else /* XNU_KERNEL_PRIVATE */
313struct kalloc_type_view {
314 struct zone_view kt_zv;
315 const char *kt_signature __unsafe_indexable;
316 kalloc_type_flags_t kt_flags;
317 uint32_t kt_size;
318 void *unused1;
319 void *unused2;
320};
321#endif /* XNU_KERNEL_PRIVATE */
322
323/*
324 * The set of zones used by all kalloc heaps are defined by the constants
325 * below.
326 *
327 * KHEAP_START_SIZE: Size of the first sequential zone.
328 * KHEAP_MAX_SIZE : Size of the last sequential zone.
329 * KHEAP_STEP_WIDTH: Number of zones created at every step (power of 2).
330 * KHEAP_STEP_START: Size of the first step.
331 * We also create some extra initial zones that don't follow the sequence
332 * for sizes 8 (on armv7 only), 16 and 32.
333 *
334 * idx step_increment zone_elem_size
335 * 0 - 16
336 * 1 - 32
337 * 2 16 48
338 * 3 16 64
339 * 4 32 96
340 * 5 32 128
341 * 6 64 192
342 * 7 64 256
343 * 8 128 384
344 * 9 128 512
345 * 10 256 768
346 * 11 256 1024
347 * 12 512 1536
348 * 13 512 2048
349 * 14 1024 3072
350 * 15 1024 4096
351 * 16 2048 6144
352 * 17 2048 8192
353 * 18 4096 12288
354 * 19 4096 16384
355 * 20 8192 24576
356 * 21 8192 32768
357 */
358#define kalloc_log2down(mask) (31 - __builtin_clz(mask))
359#define KHEAP_START_SIZE 32
360#if __x86_64__
361#define KHEAP_MAX_SIZE (16 * 1024)
362#define KHEAP_EXTRA_ZONES 2
363#else
364#define KHEAP_MAX_SIZE (32 * 1024)
365#define KHEAP_EXTRA_ZONES 2
366#endif
367#define KHEAP_STEP_WIDTH 2
368#define KHEAP_STEP_START 16
369#define KHEAP_START_IDX kalloc_log2down(KHEAP_START_SIZE)
370#define KHEAP_NUM_STEPS (kalloc_log2down(KHEAP_MAX_SIZE) - \
371 kalloc_log2down(KHEAP_START_SIZE))
372#define KHEAP_NUM_ZONES (KHEAP_NUM_STEPS * KHEAP_STEP_WIDTH + \
373 KHEAP_EXTRA_ZONES)
374
375/*!
376 * @enum kalloc_type_version_t
377 *
378 * @brief
379 * Enum that holds versioning information for @c kalloc_type_var_view
380 *
381 * @const KT_V1
382 * Version 1
383 *
384 */
385__options_decl(kalloc_type_version_t, uint16_t, {
386 KT_V1 = 0x0001,
387});
388
389/*!
390 * @typedef kalloc_type_var_view_t
391 *
392 * @abstract
393 * This structure is analoguous to @c kalloc_type_view but handles
394 * @c kalloc_type callsites that are variable in size.
395 *
396 * @discussion
397 * These structures are automatically created under the hood for every
398 * variable sized @c kalloc_type and @c kfree_type callsite. They are ingested
399 * during startup and are assigned zones based on the security policy for
400 * their signature.
401 *
402 * These structs are protected by the kernel lockdown and can't be initialized
403 * dynamically. They must be created using @c KALLOC_TYPE_VAR_DEFINE() or
404 * @c kalloc_type or @c kfree_type.
405 *
406 */
407struct kalloc_type_var_view {
408 kalloc_type_version_t kt_version;
409 uint16_t kt_size_hdr;
410 /*
411 * Temporary: Needs to be 32bits cause we have many structs that use
412 * IONew/Delete that are larger than 32K.
413 */
414 uint32_t kt_size_type;
415 zone_stats_t kt_stats;
416 const char *__unsafe_indexable kt_name;
417 zone_view_t kt_next;
418 zone_id_t kt_heap_start;
419 uint8_t kt_zones[KHEAP_NUM_ZONES];
420 const char * __unsafe_indexable kt_sig_hdr;
421 const char * __unsafe_indexable kt_sig_type;
422 kalloc_type_flags_t kt_flags;
423};
424
425typedef struct kalloc_type_var_view *kalloc_type_var_view_t;
426
427/*!
428 * @macro KALLOC_TYPE_DECLARE
429 *
430 * @abstract
431 * (optionally) declares a kalloc type view (in a header).
432 *
433 * @param var the name for the kalloc type view.
434 */
435#define KALLOC_TYPE_DECLARE(var) \
436 extern struct kalloc_type_view var[1]
437
438/*!
439 * @macro KALLOC_TYPE_DEFINE
440 *
441 * @abstract
442 * Defines a given kalloc type view with prefered accounting
443 *
444 * @discussion
445 * This macro allows you to define a kalloc type with private
446 * accounting. The defined kalloc_type_view can be used with
447 * kalloc_type_impl/kfree_type_impl to allocate/free memory.
448 * zalloc/zfree can also be used from inside xnu. However doing
449 * so doesn't handle freeing a NULL pointer or the use of tags.
450 *
451 * @param var the name for the kalloc type view.
452 * @param type the type of your allocation.
453 * @param flags a @c KT_* flag.
454 */
455#define KALLOC_TYPE_DEFINE(var, type, flags) \
456 _KALLOC_TYPE_DEFINE(var, type, flags); \
457 __ZONE_DECLARE_TYPE(var, type)
458
459/*!
460 * @macro KALLOC_TYPE_VAR_DECLARE
461 *
462 * @abstract
463 * (optionally) declares a kalloc type var view (in a header).
464 *
465 * @param var the name for the kalloc type var view.
466 */
467#define KALLOC_TYPE_VAR_DECLARE(var) \
468 extern struct kalloc_type_var_view var[1]
469
470/*!
471 * @macro KALLOC_TYPE_VAR_DEFINE
472 *
473 * @abstract
474 * Defines a given kalloc type view with prefered accounting for
475 * variable sized typed allocations.
476 *
477 * @discussion
478 * As the views aren't yet being ingested, individual stats aren't
479 * available. The defined kalloc_type_var_view should be used with
480 * kalloc_type_var_impl/kfree_type_var_impl to allocate/free memory.
481 *
482 * This macro comes in 2 variants:
483 *
484 * 1. @c KALLOC_TYPE_VAR_DEFINE(var, e_ty, flags)
485 * 2. @c KALLOC_TYPE_VAR_DEFINE(var, h_ty, e_ty, flags)
486 *
487 * @param var the name for the kalloc type var view.
488 * @param h_ty the type of header in the allocation.
489 * @param e_ty the type of repeating part in the allocation.
490 * @param flags a @c KT_* flag.
491 */
492#define KALLOC_TYPE_VAR_DEFINE(...) KALLOC_DISPATCH(KALLOC_TYPE_VAR_DEFINE, ##__VA_ARGS__)
493
494#ifdef XNU_KERNEL_PRIVATE
495
496/*
497 * These versions allow specifying the kalloc heap to allocate memory
498 * from
499 */
500#define kheap_alloc_tag(kalloc_heap, size, flags, itag) \
501 __kheap_alloc(kalloc_heap, size, __zone_flags_mix_tag(flags, itag), NULL)
502#define kheap_alloc(kalloc_heap, size, flags) \
503 kheap_alloc_tag(kalloc_heap, size, flags, VM_ALLOC_SITE_TAG())
504
505/*
506 * These versions should be used for allocating pure data bytes that
507 * do not contain any pointers
508 */
509#define kalloc_data_tag(size, flags, itag) \
510 kheap_alloc_tag(KHEAP_DATA_BUFFERS, size, flags, itag)
511#define kalloc_data(size, flags) \
512 kheap_alloc(KHEAP_DATA_BUFFERS, size, flags)
513
514#define krealloc_data_tag(elem, old_size, new_size, flags, itag) \
515 __kheap_realloc(KHEAP_DATA_BUFFERS, elem, old_size, new_size, \
516 __zone_flags_mix_tag(flags, itag), NULL)
517#define krealloc_data(elem, old_size, new_size, flags) \
518 krealloc_data_tag(elem, old_size, new_size, flags, \
519 VM_ALLOC_SITE_TAG())
520
521#define kfree_data(elem, size) \
522 kheap_free(KHEAP_DATA_BUFFERS, elem, size);
523
524#define kfree_data_addr(elem) \
525 kheap_free_addr(KHEAP_DATA_BUFFERS, elem);
526
527extern void kheap_free_bounded(
528 kalloc_heap_t heap,
529 void *addr __unsafe_indexable,
530 vm_size_t min_sz,
531 vm_size_t max_sz);
532
533extern void kalloc_data_require(
534 void *data __unsafe_indexable,
535 vm_size_t size);
536
537extern void kalloc_non_data_require(
538 void *data __unsafe_indexable,
539 vm_size_t size);
540
541#else /* XNU_KERNEL_PRIVATE */
542
543extern void *__sized_by(size) kalloc(
544 vm_size_t size) __attribute__((malloc, alloc_size(1)));
545
546extern void *__unsafe_indexable kalloc_data(
547 vm_size_t size,
548 zalloc_flags_t flags);
549
550__attribute__((malloc, alloc_size(1)))
551static inline void *
552__sized_by(size)
553__kalloc_data(vm_size_t size, zalloc_flags_t flags)
554{
555 void *__unsafe_indexable addr = (kalloc_data)(size, flags);
556 if (flags & Z_NOFAIL) {
557 __builtin_assume(addr != NULL);
558 }
559 return addr ? __unsafe_forge_bidi_indexable(uint8_t *, addr, size) : NULL;
560}
561
562#define kalloc_data(size, fl) __kalloc_data(size, fl)
563
564extern void *__unsafe_indexable krealloc_data(
565 void *ptr __unsafe_indexable,
566 vm_size_t old_size,
567 vm_size_t new_size,
568 zalloc_flags_t flags);
569
570__attribute__((malloc, alloc_size(3)))
571static inline void *
572__sized_by(new_size)
573__krealloc_data(
574 void *ptr __sized_by(old_size),
575 vm_size_t old_size,
576 vm_size_t new_size,
577 zalloc_flags_t flags)
578{
579 void *__unsafe_indexable addr = (krealloc_data)(ptr, old_size, new_size, flags);
580 if (flags & Z_NOFAIL) {
581 __builtin_assume(addr != NULL);
582 }
583 return addr ? __unsafe_forge_bidi_indexable(uint8_t *, addr, new_size) : NULL;
584}
585
586#define krealloc_data(ptr, old_size, new_size, fl) \
587 __krealloc_data(ptr, old_size, new_size, fl)
588
589extern void kfree(
590 void *data __unsafe_indexable,
591 vm_size_t size);
592
593extern void kfree_data(
594 void *ptr __unsafe_indexable,
595 vm_size_t size);
596
597extern void kfree_data_addr(
598 void *ptr __unsafe_indexable);
599
600#endif /* !XNU_KERNEL_PRIVATE */
601
602/*!
603 * @macro kalloc_type
604 *
605 * @abstract
606 * Allocates element of a particular type
607 *
608 * @discussion
609 * This family of allocators segregate kalloc allocations based on their type.
610 *
611 * This macro comes in 3 variants:
612 *
613 * 1. @c kalloc_type(type, flags)
614 * Use this macro for fixed sized allocation of a particular type.
615 *
616 * 2. @c kalloc_type(e_type, count, flags)
617 * Use this macro for variable sized allocations that form an array,
618 * do note that @c kalloc_type(e_type, 1, flags) is not equivalent to
619 * @c kalloc_type(e_type, flags).
620 *
621 * 3. @c kalloc_type(hdr_type, e_type, count, flags)
622 * Use this macro for variable sized allocations formed with
623 * a header of type @c hdr_type followed by a variable sized array
624 * with elements of type @c e_type, equivalent to this:
625 *
626 * <code>
627 * struct {
628 * hdr_type hdr;
629 * e_type arr[];
630 * }
631 * </code>
632 *
633 * @param flags @c zalloc_flags_t that get passed to zalloc_internal
634 */
635#define kalloc_type(...) KALLOC_DISPATCH(kalloc_type, ##__VA_ARGS__)
636
637/*!
638 * @macro kfree_type
639 *
640 * @abstract
641 * Allocates element of a particular type
642 *
643 * @discussion
644 * This pairs with the @c kalloc_type() that was made to allocate this element.
645 * Arguments passed to @c kfree_type() must match the one passed at allocation
646 * time precisely.
647 *
648 * This macro comes in the same 3 variants kalloc_type() does:
649 *
650 * 1. @c kfree_type(type, elem)
651 * 2. @c kfree_type(e_type, count, elem)
652 * 3. @c kfree_type(hdr_type, e_type, count, elem)
653 *
654 * @param elem The address of the element to free
655 */
656#define kfree_type(...) KALLOC_DISPATCH(kfree_type, ##__VA_ARGS__)
657#define kfree_type_counted_by(type, count, elem) \
658 kfree_type_counted_by_3(type, count, elem)
659
660#ifdef XNU_KERNEL_PRIVATE
661#define kalloc_type_tag(...) KALLOC_DISPATCH(kalloc_type_tag, ##__VA_ARGS__)
662#define krealloc_type_tag(...) KALLOC_DISPATCH(krealloc_type_tag, ##__VA_ARGS__)
663#define krealloc_type(...) KALLOC_DISPATCH(krealloc_type, ##__VA_ARGS__)
664
665/*
666 * kalloc_type_require can't be made available to kexts as the
667 * kalloc_type_view's zone could be NULL in the following cases:
668 * - Size greater than KALLOC_SAFE_ALLOC_SIZE
669 * - On macOS, if call is not in BootKC
670 * - All allocations in kext for armv7
671 */
672#define kalloc_type_require(type, value) ({ \
673 static _KALLOC_TYPE_DEFINE(kt_view_var, type, KT_SHARED_ACCT); \
674 zone_require(kt_view_var->kt_zv.zv_zone, value); \
675})
676
677#endif
678
679/*!
680 * @enum kt_granule_t
681 *
682 * @brief
683 * Granule encodings used by the compiler for the type signature.
684 *
685 * @discussion
686 * Given a type, the XNU signature type system (__builtin_xnu_type_signature)
687 * produces a signature by analyzing its memory layout, in chunks of 8 bytes,
688 * which we call granules. The encoding produced for each granule is the
689 * bitwise or of the encodings of all the types of the members included
690 * in that granule.
691 *
692 * @const KT_GRANULE_PADDING
693 * Represents padding inside a record type.
694 *
695 * @const KT_GRANULE_POINTER
696 * Represents a pointer type.
697 *
698 * @const KT_GRANULE_DATA
699 * Represents a scalar type that is not a pointer.
700 *
701 * @const KT_GRANULE_DUAL
702 * Currently unused.
703 *
704 * @const KT_GRANULE_PAC
705 * Represents a pointer which is subject to PAC.
706 */
707__options_decl(kt_granule_t, uint32_t, {
708 KT_GRANULE_PADDING = 0,
709 KT_GRANULE_POINTER = 1,
710 KT_GRANULE_DATA = 2,
711 KT_GRANULE_DUAL = 4,
712 KT_GRANULE_PAC = 8
713});
714
715#define KT_GRANULE_MAX \
716 (KT_GRANULE_PADDING | KT_GRANULE_POINTER | KT_GRANULE_DATA | \
717 KT_GRANULE_DUAL | KT_GRANULE_PAC)
718
719/*
720 * Convert a granule encoding to the index of the bit that
721 * represents such granule in the type summary.
722 *
723 * The XNU type summary (__builtin_xnu_type_summary) produces a 32-bit
724 * summary of the type signature of a given type. If the bit at index
725 * (1 << G) is set in the summary, that means that the type contains
726 * one or more granules with encoding G.
727 */
728#define KT_SUMMARY_GRANULE_TO_IDX(g) (1UL << (g))
729
730#define KT_SUMMARY_MASK_TYPE_BITS (0xffff)
731
732#define KT_SUMMARY_MASK_DATA \
733 (KT_SUMMARY_GRANULE_TO_IDX(KT_GRANULE_PADDING) | \
734 KT_SUMMARY_GRANULE_TO_IDX(KT_GRANULE_DATA))
735
736#define KT_SUMMARY_MASK_PTR \
737 (KT_SUMMARY_GRANULE_TO_IDX(KT_GRANULE_PADDING) | \
738 KT_SUMMARY_GRANULE_TO_IDX(KT_GRANULE_POINTER) | \
739 KT_SUMMARY_GRANULE_TO_IDX(KT_GRANULE_PAC))
740
741#define KT_SUMMARY_MASK_ALL_GRANULES \
742 (KT_SUMMARY_GRANULE_TO_IDX(KT_GRANULE_PADDING) | \
743 KT_SUMMARY_GRANULE_TO_IDX(KT_GRANULE_POINTER) | \
744 KT_SUMMARY_GRANULE_TO_IDX(KT_GRANULE_DATA) | \
745 KT_SUMMARY_GRANULE_TO_IDX(KT_GRANULE_DUAL) | \
746 KT_SUMMARY_GRANULE_TO_IDX(KT_GRANULE_PAC))
747
748/*!
749 * @macro KT_SUMMARY_GRANULES
750 *
751 * @abstract
752 * Return the granule type summary for a given type
753 *
754 * @discussion
755 * This macro computes the type summary of a type, and it then extracts the
756 * bits which carry information about the granules in the memory layout.
757 *
758 * Note: you should never have to use __builtin_xnu_type_summary
759 * directly, as we reserve the right to use the remaining bits with
760 * different semantics.
761 *
762 * @param type The type to analyze
763 */
764#define KT_SUMMARY_GRANULES(type) \
765 (__builtin_xnu_type_summary(type) & KT_SUMMARY_MASK_TYPE_BITS)
766
767/*!
768 * @macro KALLOC_TYPE_SIG_CHECK
769 *
770 * @abstract
771 * Return whether a given type is only made up of granules specified in mask
772 *
773 * @param mask Granules to check for
774 * @param type The type to analyze
775 */
776#define KALLOC_TYPE_SIG_CHECK(mask, type) \
777 ((KT_SUMMARY_GRANULES(type) & ~(mask)) == 0)
778
779/*!
780 * @macro KALLOC_TYPE_IS_DATA_ONLY
781 *
782 * @abstract
783 * Return whether a given type is considered a data-only type.
784 *
785 * @param type The type to analyze
786 */
787#define KALLOC_TYPE_IS_DATA_ONLY(type) \
788 KALLOC_TYPE_SIG_CHECK(KT_SUMMARY_MASK_DATA, type)
789
790/*!
791 * @macro KALLOC_TYPE_HAS_OVERLAPS
792 *
793 * @abstract
794 * Return whether a given type has overlapping granules.
795 *
796 * @discussion
797 * This macro returns whether the memory layout for a given type contains
798 * overlapping granules. An overlapping granule is a granule which includes
799 * members with types that have different encodings under the XNU signature
800 * type system.
801 *
802 * @param type The type to analyze
803 */
804#define KALLOC_TYPE_HAS_OVERLAPS(type) \
805 ((KT_SUMMARY_GRANULES(type) & ~KT_SUMMARY_MASK_ALL_GRANULES) != 0)
806
807/*!
808 * @macro KALLOC_TYPE_IS_COMPATIBLE_PTR
809 *
810 * @abstract
811 * Return whether pointer is compatible with a given type, in the XNU
812 * signature type system.
813 *
814 * @discussion
815 * This macro returns whether type pointed to by @c ptr is either the same
816 * type as @c type, or it has the same signature. The implementation relies
817 * on the @c __builtin_xnu_types_compatible builtin, and the value returned
818 * can be evaluated at compile time in both C and C++.
819 *
820 * Note: void pointers are treated as wildcards, and are thus compatible
821 * with any given type.
822 *
823 * @param ptr the pointer whose type needs to be checked.
824 * @param type the type which the pointer will be checked against.
825 */
826#define KALLOC_TYPE_IS_COMPATIBLE_PTR(ptr, type) \
827 (__builtin_xnu_types_compatible(os_get_pointee_type(ptr), type) || \
828 __builtin_xnu_types_compatible(os_get_pointee_type(ptr), void)) \
829
830#define KALLOC_TYPE_ASSERT_COMPATIBLE_POINTER(ptr, type) \
831 _Static_assert(KALLOC_TYPE_IS_COMPATIBLE_PTR(ptr, type), \
832 "Pointer type is not compatible with specified type")
833
834
835/*!
836 * @const KALLOC_ARRAY_SIZE_MAX
837 *
838 * @brief
839 * The maximum size that can be allocated with the @c KALLOC_ARRAY interface.
840 *
841 * @discussion
842 * This size is:
843 * - ~256M on 4k or PAC systems with 16k pages
844 * - ~1G on other 16k systems.
845 */
846#if __arm64e__ || KASAN_TBI
847#define KALLOC_ARRAY_SIZE_MAX ((uint32_t)PAGE_MASK << PAGE_SHIFT)
848#define KALLOC_ARRAY_GRANULE 32ul
849#else
850#define KALLOC_ARRAY_SIZE_MAX ((uint32_t)UINT16_MAX << PAGE_SHIFT)
851#define KALLOC_ARRAY_GRANULE 16ul
852#endif
853
854/*!
855 * @macro KALLOC_ARRAY_TYPE_DECL
856 *
857 * @brief
858 * Declares a type used as a packed kalloc array type.
859 *
860 * @discussion
861 * This macro comes in two variants
862 *
863 * - KALLOC_ARRAY_TYPE_DECL(name, e_ty)
864 * - KALLOC_ARRAY_TYPE_DECL(name, h_ty, e_ty)
865 *
866 * The first one defines an array of elements of type @c e_ty,
867 * and the second a header of type @c h_ty followed by
868 * an array of elements of type @c e_ty.
869 *
870 * Those macros will then define the type @c ${name}_t as a typedef
871 * to a non existent structure type, in order to avoid accidental
872 * dereference of those pointers.
873 *
874 * kalloc array pointers are actually pointers that in addition to encoding
875 * the array base pointer, also encode the allocation size (only sizes
876 * up to @c KALLOC_ARRAY_SIZE_MAX bytes).
877 *
878 * Such pointers can be signed with data PAC properly, which will provide
879 * integrity of both the base pointer, and its size.
880 *
881 * kalloc arrays are useful to use instead of embedding the length
882 * of the allocation inside of itself, which tends to be driven by:
883 *
884 * - a desire to not grow the outer structure holding the pointer
885 * to this array with an extra "length" field for optional arrays,
886 * in order to save memory (see the @c ip_requests field in ports),
887 *
888 * - a need to be able to atomically consult the size of an allocation
889 * with respect to loading its pointer (where address dependencies
890 * traditionally gives this property) for lockless algorithms
891 * (see the IPC space table).
892 *
893 * Using a kalloc array is preferable for two reasons:
894 *
895 * - embedding lengths inside the allocation is self-referential
896 * and an appetizing target for post-exploitation strategies,
897 *
898 * - having a dependent load to get to the length loses out-of-order
899 * opportunities for the CPU and prone to back-to-back cache misses.
900 *
901 * Holding information such as a level of usage of this array
902 * within itself is fine provided those quantities are validated
903 * against the "count" (number of elements) or "size" (allocation
904 * size in bytes) of the array before use.
905 *
906 *
907 * This macro will define a series of functions:
908 *
909 * - ${name}_count_to_size() and ${name}_size_to_count()
910 * to convert between memory sizes and array element counts
911 * (taking the header size into account when it exists);
912 *
913 * Note that those functions assume the count/size are corresponding
914 * to a valid allocation size within [0, KALLOC_ARRAY_SIZE_MAX].
915 *
916 * - ${name}_next_size() to build good allocation growth policies;
917 *
918 * - ${name}_base() returning a (bound-checked indexable) pointer
919 * to the header of the array (or its first element when there is
920 * no header);
921 *
922 * - ${name}_begin() returning a (bound-checked indexable)
923 * pointer to the first element of the the array;
924 *
925 * - ${name}_contains() to check if an element index is within
926 * the valid range of this allocation;
927 *
928 * - ${name}_next_elem() to get the next element of an array.
929 *
930 * - ${name}_get() and ${name}_get_nocheck() to return a pointer
931 * to a given cell of the array with (resp. without) a bound
932 * check against the array size. The bound-checked variant
933 * returns NULL for invalid indexes.
934 *
935 * - ${name}_alloc_by_count() and ${name}_alloc_by_size()
936 * to allocate a new array able to hold at least that many elements
937 * (resp. bytes).
938 *
939 * - ${name}_realloc_by_count() and ${name}_realloc_by_size()
940 * to re-allocate a new array able to hold at least that many elements
941 * (resp. bytes).
942 *
943 * - ${name}_free() and ${name}_free_noclear() to free such an array
944 * (resp. without nil-ing the pointer). The non-clearing variant
945 * is to be used only when nil-ing out the pointer is otherwise
946 * not allowed by C (const value, unable to take address of, ...),
947 * otherwise the normal ${name}_free() must be used.
948 */
949#define KALLOC_ARRAY_TYPE_DECL(...) \
950 KALLOC_DISPATCH(KALLOC_ARRAY_TYPE_DECL, ##__VA_ARGS__)
951
952#if XNU_KERNEL_PRIVATE
953
954#define KALLOC_ARRAY_TYPE_DECL_(name, h_type_t, h_sz, e_type_t, e_sz) \
955 KALLOC_TYPE_VAR_DECLARE(name ## _kt_view); \
956 typedef struct name * __unsafe_indexable name ## _t; \
957 \
958 __pure2 \
959 static inline uint32_t \
960 name ## _count_to_size(uint32_t count) \
961 { \
962 return (uint32_t)((h_sz) + (e_sz) * count); \
963 } \
964 \
965 __pure2 \
966 static inline uint32_t \
967 name ## _size_to_count(vm_size_t size) \
968 { \
969 return (uint32_t)((size - (h_sz)) / (e_sz)); \
970 } \
971 \
972 __pure2 \
973 static inline uint32_t \
974 name ## _size(name ## _t array) \
975 { \
976 return __kalloc_array_size((vm_address_t)array); \
977 } \
978 \
979 __pure2 \
980 static inline uint32_t \
981 name ## _next_size( \
982 uint32_t min_count, \
983 vm_size_t cur_size, \
984 uint32_t vm_period) \
985 { \
986 vm_size_t size; \
987 \
988 if (cur_size) { \
989 size = cur_size + (e_sz) - 1; \
990 } else { \
991 size = kt_size(h_sz, e_sz, min_count) - 1; \
992 } \
993 size = kalloc_next_good_size(size, vm_period); \
994 if (size <= KALLOC_ARRAY_SIZE_MAX) { \
995 return (uint32_t)size; \
996 } \
997 return 2 * KALLOC_ARRAY_SIZE_MAX; /* will fail */ \
998 } \
999 \
1000 __pure2 \
1001 static inline uint32_t \
1002 name ## _count(name ## _t array) \
1003 { \
1004 return name ## _size_to_count(name ## _size(array)); \
1005 } \
1006 \
1007 __pure2 \
1008 static inline h_type_t *__header_bidi_indexable \
1009 name ## _base(name ## _t array) \
1010 { \
1011 vm_address_t base = __kalloc_array_base((vm_address_t)array); \
1012 uint32_t size = __kalloc_array_size((vm_address_t)array); \
1013 \
1014 (void)size; \
1015 return __unsafe_forge_bidi_indexable(h_type_t *, base, size); \
1016 } \
1017 \
1018 __pure2 \
1019 static inline e_type_t *__header_bidi_indexable \
1020 name ## _begin(name ## _t array) \
1021 { \
1022 vm_address_t base = __kalloc_array_base((vm_address_t)array); \
1023 uint32_t size = __kalloc_array_size((vm_address_t)array); \
1024 \
1025 (void)size; \
1026 return __unsafe_forge_bidi_indexable(e_type_t *, base, size); \
1027 } \
1028 \
1029 __pure2 \
1030 static inline e_type_t * \
1031 name ## _next_elem(name ## _t array, e_type_t *e) \
1032 { \
1033 vm_address_t end = __kalloc_array_end((vm_address_t)array); \
1034 vm_address_t ptr = (vm_address_t)e + sizeof(e_type_t); \
1035 \
1036 if (ptr + sizeof(e_type_t) <= end) { \
1037 return __unsafe_forge_single(e_type_t *, ptr); \
1038 } \
1039 return NULL; \
1040 } \
1041 \
1042 __pure2 \
1043 static inline bool \
1044 name ## _contains(name ## _t array, vm_size_t i) \
1045 { \
1046 vm_size_t offs = (e_sz) + (h_sz); \
1047 vm_size_t s; \
1048 \
1049 if (__improbable(os_mul_and_add_overflow(i, e_sz, offs, &s))) { \
1050 return false; \
1051 } \
1052 if (__improbable(s > name ## _size(array))) { \
1053 return false; \
1054 } \
1055 return true; \
1056 } \
1057 \
1058 __pure2 \
1059 static inline e_type_t * __single \
1060 name ## _get_nocheck(name ## _t array, vm_size_t i) \
1061 { \
1062 return name ## _begin(array) + i; \
1063 } \
1064 \
1065 __pure2 \
1066 static inline e_type_t * __single \
1067 name ## _get(name ## _t array, vm_size_t i) \
1068 { \
1069 if (__probable(name ## _contains(array, i))) { \
1070 return name ## _get_nocheck(array, i); \
1071 } \
1072 return NULL; \
1073 } \
1074 \
1075 static inline name ## _t \
1076 name ## _alloc_by_size(vm_size_t size, zalloc_flags_t fl) \
1077 { \
1078 fl |= Z_KALLOC_ARRAY; \
1079 fl = __zone_flags_mix_tag(fl, VM_ALLOC_SITE_TAG()); \
1080 return (name ## _t)kalloc_type_var_impl(name ## _kt_view, \
1081 size, fl, NULL); \
1082 } \
1083 \
1084 static inline name ## _t \
1085 name ## _alloc_by_count(uint32_t count, zalloc_flags_t fl) \
1086 { \
1087 return name ## _alloc_by_size(kt_size(h_sz, e_sz, count), fl); \
1088 } \
1089 \
1090 static inline name ## _t \
1091 name ## _realloc_by_size( \
1092 name ## _t array, \
1093 vm_size_t new_size, \
1094 zalloc_flags_t fl) \
1095 { \
1096 vm_address_t base = __kalloc_array_base((vm_address_t)array); \
1097 vm_size_t size = __kalloc_array_size((vm_address_t)array); \
1098 \
1099 fl |= Z_KALLOC_ARRAY; \
1100 fl = __zone_flags_mix_tag(fl, VM_ALLOC_SITE_TAG()); \
1101 return (name ## _t)(krealloc_ext)( \
1102 kt_mangle_var_view(name ## _kt_view), \
1103 (void *)base, size, new_size, fl, NULL).addr; \
1104 } \
1105 \
1106 static inline name ## _t \
1107 name ## _realloc_by_count( \
1108 name ## _t array, \
1109 uint32_t new_count, \
1110 zalloc_flags_t fl) \
1111 { \
1112 vm_size_t new_size = kt_size(h_sz, e_sz, new_count); \
1113 \
1114 return name ## _realloc_by_size(array, new_size, fl); \
1115 } \
1116 \
1117 static inline void \
1118 name ## _free_noclear(name ## _t array) \
1119 { \
1120 kfree_type_var_impl(name ## _kt_view, \
1121 name ## _base(array), name ## _size(array)); \
1122 } \
1123 \
1124 static inline void \
1125 name ## _free(name ## _t *arrayp) \
1126 { \
1127 name ## _t array = *arrayp; \
1128 \
1129 *arrayp = NULL; \
1130 kfree_type_var_impl(name ## _kt_view, \
1131 name ## _base(array), name ## _size(array)); \
1132 }
1133
1134
1135/*!
1136 * @macro KALLOC_ARRAY_TYPE_DEFINE()
1137 *
1138 * @description
1139 * Defines the data structures required to pair with a KALLOC_ARRAY_TYPE_DECL()
1140 * kalloc array declaration.
1141 *
1142 * @discussion
1143 * This macro comes in two variants
1144 *
1145 * - KALLOC_ARRAY_TYPE_DEFINE(name, e_ty, flags)
1146 * - KALLOC_ARRAY_TYPE_DEFINE(name, h_ty, e_ty, flags)
1147 *
1148 * Those must pair with the KALLOC_ARRAY_TYPE_DECL() form being used.
1149 * The flags must be valid @c kalloc_type_flags_t flags.
1150 */
1151#define KALLOC_ARRAY_TYPE_DEFINE(...) \
1152 KALLOC_DISPATCH(KALLOC_ARRAY_TYPE_DEFINE, ##__VA_ARGS__)
1153
1154/*!
1155 * @function kalloc_next_good_size()
1156 *
1157 * @brief
1158 * Allows to implement "allocation growth policies" that work well
1159 * with the allocator.
1160 *
1161 * @discussion
1162 * Note that if the caller tracks a number of elements for an array,
1163 * where the elements are of size S, and the current count is C,
1164 * then it is possible for kalloc_next_good_size(C * S, ..) to hit
1165 * a fixed point, clients must call with a size at least of ((C + 1) * S).
1166 *
1167 * @param size the current "size" of the allocation (in bytes).
1168 * @param period the "period" (power of 2) for the allocation growth
1169 * policy once hitting the VM sized allocations.
1170 */
1171extern vm_size_t kalloc_next_good_size(
1172 vm_size_t size,
1173 uint32_t period);
1174
1175#pragma mark kalloc_array implementation details
1176
1177#define KALLOC_ARRAY_TYPE_DECL_2(name, e_type_t) \
1178 KALLOC_ARRAY_TYPE_DECL_(name, e_type_t, 0, e_type_t, sizeof(e_type_t))
1179
1180#define KALLOC_ARRAY_TYPE_DECL_3(name, h_type_t, e_type_t) \
1181 KALLOC_ARRAY_TYPE_DECL_(name, \
1182 h_type_t, kt_realign_sizeof(h_type_t, e_type_t), \
1183 e_type_t, sizeof(e_type_t)) \
1184
1185#define KALLOC_ARRAY_TYPE_DEFINE_3(name, e_type_t, flags) \
1186 KALLOC_TYPE_VAR_DEFINE_3(name ## _kt_view, e_type_t, flags)
1187
1188#define KALLOC_ARRAY_TYPE_DEFINE_4(name, h_type_t, e_type_t, flags) \
1189 KALLOC_TYPE_VAR_DEFINE_4(name ## _kt_view, h_type_t, e_type_t, flags)
1190
1191extern struct kalloc_result __kalloc_array_decode(
1192 vm_address_t array) __pure2;
1193
1194__pure2
1195static inline uint32_t
1196__kalloc_array_size(vm_address_t array)
1197{
1198 vm_address_t size = __kalloc_array_decode(array).size;
1199
1200 __builtin_assume(size <= KALLOC_ARRAY_SIZE_MAX);
1201 return (uint32_t)size;
1202}
1203
1204__pure2
1205static inline vm_address_t
1206__kalloc_array_base(vm_address_t array)
1207{
1208 return (vm_address_t)__kalloc_array_decode(array).addr;
1209}
1210
1211__pure2
1212static inline vm_address_t
1213__kalloc_array_begin(vm_address_t array, vm_size_t hdr_size)
1214{
1215 return (vm_address_t)__kalloc_array_decode(array).addr + hdr_size;
1216}
1217
1218__pure2
1219static inline vm_address_t
1220__kalloc_array_end(vm_address_t array)
1221{
1222 struct kalloc_result kr = __kalloc_array_decode(array);
1223
1224 return (vm_address_t)kr.addr + kr.size;
1225}
1226
1227#else /* !XNU_KERNEL_PRIVATE */
1228
1229#define KALLOC_ARRAY_TYPE_DECL_(name, h_type_t, h_sz, e_type_t, e_sz) \
1230 typedef struct name * __unsafe_indexable name ## _t
1231
1232#endif /* !XNU_KERNEL_PRIVATE */
1233#pragma mark implementation details
1234
1235
1236static inline void *__unsafe_indexable
1237kt_mangle_var_view(kalloc_type_var_view_t kt_view)
1238{
1239 return (void *__unsafe_indexable)((uintptr_t)kt_view | 1ul);
1240}
1241
1242static inline kalloc_type_var_view_t __unsafe_indexable
1243kt_demangle_var_view(void *ptr)
1244{
1245 return (kalloc_type_var_view_t __unsafe_indexable)((uintptr_t)ptr & ~1ul);
1246}
1247
1248#define kt_is_var_view(ptr) ((uintptr_t)(ptr) & 1)
1249
1250#define kt_realign_sizeof(h_ty, e_ty) \
1251 ((sizeof(h_ty) + _Alignof(e_ty) - 1) & -_Alignof(e_ty))
1252
1253static inline vm_size_t
1254kt_size(vm_size_t s1, vm_size_t s2, vm_size_t c2)
1255{
1256 /* kalloc_large() will reject this size before even asking the VM */
1257 const vm_size_t limit = 1ull << (8 * sizeof(vm_size_t) - 1);
1258
1259 if (os_mul_and_add_overflow(s2, c2, s1, &s1) || (s1 & limit)) {
1260 return limit;
1261 }
1262 return s1;
1263}
1264
1265#define kalloc_type_2(type, flags) ({ \
1266 static _KALLOC_TYPE_DEFINE(kt_view_var, type, KT_SHARED_ACCT); \
1267 __unsafe_forge_single(type *, kalloc_type_impl(kt_view_var, flags)); \
1268})
1269
1270#define kfree_type_2(type, elem) ({ \
1271 KALLOC_TYPE_ASSERT_COMPATIBLE_POINTER(elem, type); \
1272 static _KALLOC_TYPE_DEFINE(kt_view_var, type, KT_SHARED_ACCT); \
1273 kfree_type_impl(kt_view_var, os_ptr_load_and_erase(elem)); \
1274})
1275
1276#define kfree_type_3(type, count, elem) ({ \
1277 KALLOC_TYPE_ASSERT_COMPATIBLE_POINTER(elem, type); \
1278 static KALLOC_TYPE_VAR_DEFINE_3(kt_view_var, type, KT_SHARED_ACCT); \
1279 __auto_type __kfree_count = (count); \
1280 kfree_type_var_impl(kt_view_var, os_ptr_load_and_erase(elem), \
1281 kt_size(0, sizeof(type), __kfree_count)); \
1282})
1283
1284#define kfree_type_counted_by_3(type, count_var, elem_var) ({ \
1285 void *__bidi_indexable __elem_copy = (elem_var); \
1286 __auto_type __kfree_count = (count_var); \
1287 (elem_var) = 0; \
1288 (count_var) = 0; \
1289 KALLOC_TYPE_ASSERT_COMPATIBLE_POINTER(__elem_copy, type); \
1290 static KALLOC_TYPE_VAR_DEFINE_3(kt_view_var, type, KT_SHARED_ACCT); \
1291 kfree_type_var_impl(kt_view_var, __elem_copy, \
1292 kt_size(0, sizeof(type), __kfree_count)); \
1293})
1294
1295#define kfree_type_4(hdr_ty, e_ty, count, elem) ({ \
1296 KALLOC_TYPE_ASSERT_COMPATIBLE_POINTER(elem, hdr_ty); \
1297 static KALLOC_TYPE_VAR_DEFINE_4(kt_view_var, hdr_ty, e_ty, \
1298 KT_SHARED_ACCT); \
1299 __auto_type __kfree_count = (count); \
1300 kfree_type_var_impl(kt_view_var, \
1301 os_ptr_load_and_erase(elem), \
1302 kt_size(kt_realign_sizeof(hdr_ty, e_ty), sizeof(e_ty), \
1303 __kfree_count)); \
1304})
1305
1306#ifdef XNU_KERNEL_PRIVATE
1307#define kalloc_type_tag_3(type, flags, tag) ({ \
1308 static _KALLOC_TYPE_DEFINE(kt_view_var, type, KT_SHARED_ACCT); \
1309 __unsafe_forge_single(type *, zalloc_flags(kt_view_var, \
1310 Z_VM_TAG(flags, tag))); \
1311})
1312
1313#define kalloc_type_tag_4(type, count, flags, tag) ({ \
1314 static KALLOC_TYPE_VAR_DEFINE_3(kt_view_var, type, KT_SHARED_ACCT); \
1315 (type *)kalloc_type_var_impl(kt_view_var, \
1316 kt_size(0, sizeof(type), count), \
1317 __zone_flags_mix_tag(flags, tag), NULL); \
1318})
1319#define kalloc_type_3(type, count, flags) \
1320 kalloc_type_tag_4(type, count, flags, VM_ALLOC_SITE_TAG())
1321
1322#define kalloc_type_tag_5(hdr_ty, e_ty, count, flags, tag) ({ \
1323 static KALLOC_TYPE_VAR_DEFINE_4(kt_view_var, hdr_ty, e_ty, \
1324 KT_SHARED_ACCT); \
1325 (hdr_ty *)kalloc_type_var_impl(kt_view_var, \
1326 kt_size(kt_realign_sizeof(hdr_ty, e_ty), sizeof(e_ty), count), \
1327 __zone_flags_mix_tag(flags, tag), NULL); \
1328})
1329#define kalloc_type_4(hdr_ty, e_ty, count, flags) \
1330 kalloc_type_tag_5(hdr_ty, e_ty, count, flags, VM_ALLOC_SITE_TAG())
1331
1332#define krealloc_type_tag_6(type, old_count, new_count, elem, flags, tag) ({ \
1333 static KALLOC_TYPE_VAR_DEFINE_3(kt_view_var, type, KT_SHARED_ACCT); \
1334 KALLOC_TYPE_ASSERT_COMPATIBLE_POINTER(elem, type); \
1335 (type *)__krealloc_type(kt_view_var, elem, \
1336 kt_size(0, sizeof(type), old_count), \
1337 kt_size(0, sizeof(type), new_count), \
1338 __zone_flags_mix_tag(flags, tag), NULL); \
1339})
1340#define krealloc_type_5(type, old_count, new_count, elem, flags) \
1341 krealloc_type_tag_6(type, old_count, new_count, elem, flags, \
1342 VM_ALLOC_SITE_TAG())
1343
1344#define krealloc_type_tag_7(hdr_ty, e_ty, old_count, new_count, elem, \
1345 flags, tag) ({ \
1346 static KALLOC_TYPE_VAR_DEFINE_4(kt_view_var, hdr_ty, e_ty, \
1347 KT_SHARED_ACCT); \
1348 KALLOC_TYPE_ASSERT_COMPATIBLE_POINTER(elem, hdr_ty); \
1349 (hdr_ty *)__krealloc_type(kt_view_var, elem, \
1350 kt_size(kt_realign_sizeof(hdr_ty, e_ty), sizeof(e_ty), old_count), \
1351 kt_size(kt_realign_sizeof(hdr_ty, e_ty), sizeof(e_ty), new_count), \
1352 __zone_flags_mix_tag(flags, tag), NULL); \
1353})
1354#define krealloc_type_6(hdr_ty, e_ty, old_count, new_count, elem, flags) \
1355 krealloc_type_tag_7(hdr_ty, e_ty, old_count, new_count, elem, flags, \
1356 VM_ALLOC_SITE_TAG())
1357
1358#else /* XNU_KERNEL_PRIVATE */
1359
1360#define kalloc_type_3(type, count, flags) ({ \
1361 static KALLOC_TYPE_VAR_DEFINE_3(kt_view_var, type, KT_SHARED_ACCT); \
1362 (type *)kalloc_type_var_impl(kt_view_var, \
1363 kt_size(0, sizeof(type), count), flags, NULL); \
1364})
1365
1366#define kalloc_type_4(hdr_ty, e_ty, count, flags) ({ \
1367 static KALLOC_TYPE_VAR_DEFINE_4(kt_view_var, hdr_ty, e_ty, \
1368 KT_SHARED_ACCT); \
1369 (hdr_ty *)kalloc_type_var_impl(kt_view_var, \
1370 kt_size(kt_realign_sizeof(hdr_ty, e_ty), sizeof(e_ty), count), \
1371 flags, NULL); \
1372})
1373
1374#endif /* !XNU_KERNEL_PRIVATE */
1375
1376/*
1377 * All k*free macros set "elem" to NULL on free.
1378 *
1379 * Note: all values passed to k*free() might be in the element to be freed,
1380 * temporaries must be taken, and the resetting to be done prior to free.
1381 */
1382#ifdef XNU_KERNEL_PRIVATE
1383
1384#define kheap_free(heap, elem, size) ({ \
1385 kalloc_heap_t __kfree_heap = (heap); \
1386 __auto_type __kfree_size = (size); \
1387 __builtin_assume(!kt_is_var_view(__kfree_heap)); \
1388 kfree_ext((void *)__kfree_heap, \
1389 (void *)os_ptr_load_and_erase(elem), __kfree_size); \
1390})
1391
1392#define kheap_free_addr(heap, elem) ({ \
1393 kalloc_heap_t __kfree_heap = (heap); \
1394 kfree_addr_ext(__kfree_heap, (void *)os_ptr_load_and_erase(elem)); \
1395})
1396
1397#define kheap_free_bounded(heap, elem, min_sz, max_sz) ({ \
1398 static_assert(max_sz <= KALLOC_SAFE_ALLOC_SIZE); \
1399 kalloc_heap_t __kfree_heap = (heap); \
1400 __auto_type __kfree_min_sz = (min_sz); \
1401 __auto_type __kfree_max_sz = (max_sz); \
1402 (kheap_free_bounded)(__kfree_heap, \
1403 (void *)os_ptr_load_and_erase(elem), \
1404 __kfree_min_sz, __kfree_max_sz); \
1405})
1406
1407#else /* XNU_KERNEL_PRIVATE */
1408
1409#define kfree_data(elem, size) ({ \
1410 __auto_type __kfree_size = (size); \
1411 (kfree_data)((void *)os_ptr_load_and_erase(elem), __kfree_size); \
1412})
1413
1414#define kfree_data_addr(elem) \
1415 (kfree_data_addr)((void *)os_ptr_load_and_erase(elem))
1416
1417#endif /* !XNU_KERNEL_PRIVATE */
1418
1419#define __kfree_data_elem_count_size(elem_var, count_var, size) ({ \
1420 void *__bidi_indexable __elem_copy = (elem_var); \
1421 (elem_var) = 0; \
1422 (count_var) = 0; \
1423 kfree_data(__elem_copy, size); \
1424})
1425
1426/*
1427 * kfree_data_sized_by is the kfree_data equivalent that is compatible with
1428 * -fbounds-safety's __sized_by pointers. Consistently with the -fbounds-safety
1429 * semantics, `size` must be the byte size of the allocation that is freed (for
1430 * instance, 20 for an array of 5 uint32_t).
1431 */
1432#define kfree_data_sized_by(elem, size) ({ \
1433 __auto_type __size = (size); \
1434 __kfree_data_elem_count_size(elem, size, __size); \
1435})
1436
1437/*
1438 * kfree_data_counted_by is the kfree_data equivalent that is compatible with
1439 * -fbounds-safety's __counted_by pointers. Consistently with the
1440 * -fbounds-safety semantics, `count` must be the object count of the allocation
1441 * that is freed (for instance, 5 for an array of 5 uint32_t).
1442 */
1443#define kfree_data_counted_by(elem, count) ({ \
1444 __auto_type __size = (count) * sizeof(*(elem)); \
1445 __kfree_data_elem_count_size(elem, count, __size); \
1446})
1447
1448#if __has_feature(address_sanitizer)
1449# define __kalloc_no_kasan __attribute__((no_sanitize("address")))
1450#else
1451# define __kalloc_no_kasan
1452#endif
1453
1454#define KALLOC_CONCAT(x, y) __CONCAT(x,y)
1455
1456#define KALLOC_COUNT_ARGS1(a0, a1, a2, a3, a4, a5, a6, a7, a8, a9, N, ...) N
1457#define KALLOC_COUNT_ARGS(...) \
1458 KALLOC_COUNT_ARGS1(, ##__VA_ARGS__, _9, _8, _7, _6, _5, _4, _3, _2, _1, _0)
1459#define KALLOC_DISPATCH1(base, N, ...) __CONCAT(base, N)(__VA_ARGS__)
1460#define KALLOC_DISPATCH(base, ...) \
1461 KALLOC_DISPATCH1(base, KALLOC_COUNT_ARGS(__VA_ARGS__), ##__VA_ARGS__)
1462#define KALLOC_DISPATCH1_R(base, N, ...) __CONCAT(base, N)(__VA_ARGS__)
1463#define KALLOC_DISPATCH_R(base, ...) \
1464 KALLOC_DISPATCH1_R(base, KALLOC_COUNT_ARGS(__VA_ARGS__), ##__VA_ARGS__)
1465
1466#define kt_view_var \
1467 KALLOC_CONCAT(kalloc_type_view_, __LINE__)
1468
1469#define KALLOC_TYPE_SEGMENT "__DATA_CONST"
1470
1471/*
1472 * When kalloc_type_impl is called from xnu, it calls zalloc_flags
1473 * directly and doesn't redirect zone-less sites to kheap_alloc.
1474 * Passing a size larger than KHEAP_MAX_SIZE for these allocations will
1475 * lead to a panic as the zone is null. Therefore assert that size
1476 * is less than KALLOC_SAFE_ALLOC_SIZE.
1477 */
1478#if XNU_KERNEL_PRIVATE || defined(KALLOC_TYPE_STRICT_SIZE_CHECK)
1479#define KALLOC_TYPE_SIZE_CHECK(size) \
1480 _Static_assert(size <= KALLOC_SAFE_ALLOC_SIZE, \
1481 "type is too large");
1482#else
1483#define KALLOC_TYPE_SIZE_CHECK(size)
1484#endif
1485
1486#define KALLOC_TYPE_CHECK_2(check, type) \
1487 (KALLOC_TYPE_SIG_CHECK(check, type))
1488
1489#define KALLOC_TYPE_CHECK_3(check, type1, type2) \
1490 (KALLOC_TYPE_SIG_CHECK(check, type1) && \
1491 KALLOC_TYPE_SIG_CHECK(check, type2))
1492
1493#define KALLOC_TYPE_CHECK(...) \
1494 KALLOC_DISPATCH_R(KALLOC_TYPE_CHECK, ##__VA_ARGS__)
1495
1496#define KALLOC_TYPE_VM_SIZE_CHECK_1(type) \
1497 (sizeof(type) > KHEAP_MAX_SIZE)
1498
1499#define KALLOC_TYPE_VM_SIZE_CHECK_2(type1, type2) \
1500 (sizeof(type1) + sizeof(type2) > KHEAP_MAX_SIZE)
1501
1502#define KALLOC_TYPE_VM_SIZE_CHECK(...) \
1503 KALLOC_DISPATCH_R(KALLOC_TYPE_VM_SIZE_CHECK, ##__VA_ARGS__)
1504
1505#define KALLOC_TYPE_TRAILING_DATA_CHECK(hdr_ty, elem_ty) \
1506 _Static_assert((KALLOC_TYPE_IS_DATA_ONLY(hdr_ty) || \
1507 !KALLOC_TYPE_IS_DATA_ONLY(elem_ty)), \
1508 "cannot allocate data-only array of " #elem_ty \
1509 " contiguously to " #hdr_ty)
1510
1511#ifdef __cplusplus
1512#define KALLOC_TYPE_CAST_FLAGS(flags) static_cast<kalloc_type_flags_t>(flags)
1513#else
1514#define KALLOC_TYPE_CAST_FLAGS(flags) (kalloc_type_flags_t)(flags)
1515#endif
1516
1517/*
1518 * Don't emit signature if type is "data-only" or is large enough that it
1519 * uses the VM.
1520 *
1521 * Note: sig_type is the type you want to emit signature for. The variable
1522 * args can be used to provide other types in the allocation, to make the
1523 * decision of whether to emit the signature.
1524 */
1525#define KALLOC_TYPE_EMIT_SIG(sig_type, ...) \
1526 (KALLOC_TYPE_CHECK(KT_SUMMARY_MASK_DATA, sig_type, ##__VA_ARGS__) || \
1527 KALLOC_TYPE_VM_SIZE_CHECK(sig_type, ##__VA_ARGS__))? \
1528 "" : __builtin_xnu_type_signature(sig_type)
1529
1530/*
1531 * Kalloc type flags are adjusted to indicate if the type is "data-only" or
1532 * will use the VM or is a pointer array.
1533 */
1534#define KALLOC_TYPE_ADJUST_FLAGS(flags, ...) \
1535 KALLOC_TYPE_CAST_FLAGS((flags | KT_CHANGED | KT_CHANGED2 | \
1536 (KALLOC_TYPE_CHECK(KT_SUMMARY_MASK_DATA, __VA_ARGS__)? KT_DATA_ONLY: 0) |\
1537 (KALLOC_TYPE_CHECK(KT_SUMMARY_MASK_PTR, __VA_ARGS__)? KT_PTR_ARRAY: 0) | \
1538 (KALLOC_TYPE_VM_SIZE_CHECK(__VA_ARGS__)? KT_VM : 0)))
1539
1540#define _KALLOC_TYPE_DEFINE(var, type, flags) \
1541 __kalloc_no_kasan \
1542 __PLACE_IN_SECTION(KALLOC_TYPE_SEGMENT ", __kalloc_type, " \
1543 "regular, live_support") \
1544 struct kalloc_type_view var[1] = { { \
1545 .kt_zv.zv_name = "site." #type, \
1546 .kt_flags = KALLOC_TYPE_ADJUST_FLAGS(flags, type), \
1547 .kt_size = sizeof(type), \
1548 .kt_signature = KALLOC_TYPE_EMIT_SIG(type), \
1549 } }; \
1550 KALLOC_TYPE_SIZE_CHECK(sizeof(type));
1551
1552#define KALLOC_TYPE_VAR_DEFINE_3(var, type, flags) \
1553 __kalloc_no_kasan \
1554 __PLACE_IN_SECTION(KALLOC_TYPE_SEGMENT ", __kalloc_var, " \
1555 "regular, live_support") \
1556 struct kalloc_type_var_view var[1] = { { \
1557 .kt_version = KT_V1, \
1558 .kt_name = "site." #type, \
1559 .kt_flags = KALLOC_TYPE_ADJUST_FLAGS(flags, type), \
1560 .kt_size_type = sizeof(type), \
1561 .kt_sig_type = KALLOC_TYPE_EMIT_SIG(type), \
1562 } }; \
1563 KALLOC_TYPE_SIZE_CHECK(sizeof(type));
1564
1565#define KALLOC_TYPE_VAR_DEFINE_4(var, hdr, type, flags) \
1566 __kalloc_no_kasan \
1567 __PLACE_IN_SECTION(KALLOC_TYPE_SEGMENT ", __kalloc_var, " \
1568 "regular, live_support") \
1569 struct kalloc_type_var_view var[1] = { { \
1570 .kt_version = KT_V1, \
1571 .kt_name = "site." #hdr "." #type, \
1572 .kt_flags = KALLOC_TYPE_ADJUST_FLAGS(flags, hdr, type), \
1573 .kt_size_hdr = sizeof(hdr), \
1574 .kt_size_type = sizeof(type), \
1575 .kt_sig_hdr = KALLOC_TYPE_EMIT_SIG(hdr, type), \
1576 .kt_sig_type = KALLOC_TYPE_EMIT_SIG(type, hdr), \
1577 } }; \
1578 KALLOC_TYPE_SIZE_CHECK(sizeof(hdr)); \
1579 KALLOC_TYPE_SIZE_CHECK(sizeof(type)); \
1580 KALLOC_TYPE_TRAILING_DATA_CHECK(hdr, type);
1581
1582#ifndef XNU_KERNEL_PRIVATE
1583/*
1584 * This macro is currently used by AppleImage4
1585 */
1586#define KALLOC_TYPE_DEFINE_SITE(var, type, flags) \
1587 static _KALLOC_TYPE_DEFINE(var, type, flags)
1588
1589#endif /* !XNU_KERNEL_PRIVATE */
1590
1591#ifdef XNU_KERNEL_PRIVATE
1592
1593extern struct kalloc_result kalloc_ext(
1594 void *kheap_or_kt_view __unsafe_indexable,
1595 vm_size_t size,
1596 zalloc_flags_t flags,
1597 void *site);
1598
1599static inline struct kalloc_result
1600__kalloc_ext(
1601 void *kheap_or_kt_view __unsafe_indexable,
1602 vm_size_t size,
1603 zalloc_flags_t flags,
1604 void *site)
1605{
1606 struct kalloc_result kr;
1607
1608 kr = (kalloc_ext)(kheap_or_kt_view, size, flags, site);
1609 if (flags & Z_NOFAIL) {
1610 __builtin_assume(kr.addr != NULL);
1611 }
1612 return kr;
1613}
1614
1615#define kalloc_ext(hov, size, fl, site) __kalloc_ext(hov, size, fl, site)
1616
1617extern void kfree_ext(
1618 void *kheap_or_kt_view __unsafe_indexable,
1619 void *addr __unsafe_indexable,
1620 vm_size_t size);
1621
1622// rdar://87559422
1623static inline void *__unsafe_indexable
1624kalloc_type_var_impl(
1625 kalloc_type_var_view_t kt_view,
1626 vm_size_t size,
1627 zalloc_flags_t flags,
1628 void *site)
1629{
1630 struct kalloc_result kr;
1631
1632 kr = kalloc_ext(kt_mangle_var_view(kt_view), size, flags, site);
1633 return kr.addr;
1634}
1635
1636static inline void
1637kfree_type_var_impl(
1638 kalloc_type_var_view_t kt_view,
1639 void *ptr __unsafe_indexable,
1640 vm_size_t size)
1641{
1642 kfree_ext(kheap_or_kt_view: kt_mangle_var_view(kt_view), addr: ptr, size);
1643}
1644
1645#else /* XNU_KERNEL_PRIVATE */
1646
1647extern void *__unsafe_indexable kalloc_type_var_impl(
1648 kalloc_type_var_view_t kt_view,
1649 vm_size_t size,
1650 zalloc_flags_t flags,
1651 void *site);
1652
1653extern void kfree_type_var_impl(
1654 kalloc_type_var_view_t kt_view,
1655 void *ptr __unsafe_indexable,
1656 vm_size_t size);
1657
1658#endif /* !XNU_KERNEL_PRIVATE */
1659
1660__attribute__((malloc, alloc_size(2)))
1661static inline void *
1662__sized_by(size)
1663__kalloc_type_var_impl(
1664 kalloc_type_var_view_t kt_view,
1665 vm_size_t size,
1666 zalloc_flags_t flags,
1667 void *site)
1668{
1669 void *__unsafe_indexable addr;
1670
1671 addr = (kalloc_type_var_impl)(kt_view, size, flags, site);
1672 if (flags & Z_NOFAIL) {
1673 __builtin_assume(addr != NULL);
1674 }
1675 return __unsafe_forge_bidi_indexable(void *, addr, size);
1676}
1677
1678#define kalloc_type_var_impl(ktv, size, fl, site) \
1679 __kalloc_type_var_impl(ktv, size, fl, site)
1680
1681extern void *kalloc_type_impl_external(
1682 kalloc_type_view_t kt_view,
1683 zalloc_flags_t flags);
1684
1685extern void kfree_type_impl_external(
1686 kalloc_type_view_t kt_view,
1687 void *ptr __unsafe_indexable);
1688
1689extern void *OSObject_typed_operator_new(
1690 kalloc_type_view_t ktv,
1691 vm_size_t size);
1692
1693extern void OSObject_typed_operator_delete(
1694 kalloc_type_view_t ktv,
1695 void *mem __unsafe_indexable,
1696 vm_size_t size);
1697
1698#ifdef XNU_KERNEL_PRIVATE
1699#pragma GCC visibility push(hidden)
1700
1701#define KALLOC_TYPE_SIZE_MASK 0xffffff
1702#define KALLOC_TYPE_IDX_SHIFT 24
1703#define KALLOC_TYPE_IDX_MASK 0xff
1704
1705static inline uint32_t
1706kalloc_type_get_size(uint32_t kt_size)
1707{
1708 return kt_size & KALLOC_TYPE_SIZE_MASK;
1709}
1710
1711extern bool IOMallocType_from_vm(
1712 kalloc_type_view_t ktv);
1713
1714/* Used by kern_os_* and operator new */
1715KALLOC_HEAP_DECLARE(KERN_OS_MALLOC);
1716
1717extern void kheap_startup_init(kalloc_heap_t heap);
1718extern void kheap_var_startup_init(kalloc_heap_t heap);
1719
1720__attribute__((malloc, alloc_size(2)))
1721static inline void *
1722__sized_by(size)
1723__kheap_alloc(
1724 kalloc_heap_t kheap,
1725 vm_size_t size,
1726 zalloc_flags_t flags,
1727 void *site)
1728{
1729 struct kalloc_result kr;
1730 __builtin_assume(!kt_is_var_view(kheap));
1731 kr = kalloc_ext(kheap, size, flags, site);
1732 return __unsafe_forge_bidi_indexable(void *, kr.addr, size);
1733}
1734
1735extern struct kalloc_result krealloc_ext(
1736 void *kheap_or_kt_view __unsafe_indexable,
1737 void *addr __unsafe_indexable,
1738 vm_size_t old_size,
1739 vm_size_t new_size,
1740 zalloc_flags_t flags,
1741 void *site);
1742
1743static inline struct kalloc_result
1744__krealloc_ext(
1745 void *kheap_or_kt_view __unsafe_indexable,
1746 void *addr __sized_by(old_size),
1747 vm_size_t old_size,
1748 vm_size_t new_size,
1749 zalloc_flags_t flags,
1750 void *site)
1751{
1752 struct kalloc_result kr = (krealloc_ext)(kheap_or_kt_view, addr, old_size,
1753 new_size, flags, site);
1754 if (flags & Z_NOFAIL) {
1755 __builtin_assume(kr.addr != NULL);
1756 }
1757 return kr;
1758}
1759
1760#define krealloc_ext(hov, addr, old_size, new_size, fl, site) \
1761 __krealloc_ext(hov, addr, old_size, new_size, fl, site)
1762
1763__attribute__((malloc, alloc_size(4)))
1764static inline void *
1765__sized_by(new_size)
1766__kheap_realloc(
1767 kalloc_heap_t kheap,
1768 void *addr __sized_by(old_size),
1769 vm_size_t old_size,
1770 vm_size_t new_size,
1771 zalloc_flags_t flags,
1772 void *site)
1773{
1774 struct kalloc_result kr;
1775 __builtin_assume(!kt_is_var_view(kheap));
1776 kr = krealloc_ext(kheap, addr, old_size, new_size, flags, site);
1777 return __unsafe_forge_bidi_indexable(void *, kr.addr, new_size);
1778}
1779
1780__attribute__((malloc, alloc_size(4)))
1781static inline void *
1782__sized_by(new_size)
1783__krealloc_type(
1784 kalloc_type_var_view_t kt_view,
1785 void *addr __sized_by(old_size),
1786 vm_size_t old_size,
1787 vm_size_t new_size,
1788 zalloc_flags_t flags,
1789 void *site)
1790{
1791 struct kalloc_result kr;
1792 kr = krealloc_ext(kt_mangle_var_view(kt_view), addr,
1793 old_size, new_size, flags, site);
1794 return __unsafe_forge_bidi_indexable(void *, kr.addr, new_size);
1795}
1796
1797extern void kfree_addr_ext(
1798 kalloc_heap_t kheap,
1799 void *addr __unsafe_indexable);
1800
1801extern zone_t kalloc_zone_for_size(
1802 zone_id_t zid,
1803 vm_size_t size);
1804
1805extern vm_size_t kalloc_large_max;
1806SCALABLE_COUNTER_DECLARE(kalloc_large_count);
1807SCALABLE_COUNTER_DECLARE(kalloc_large_total);
1808
1809extern void kern_os_typed_free(
1810 kalloc_type_view_t ktv,
1811 void *addr __unsafe_indexable,
1812 vm_size_t esize);
1813
1814#pragma GCC visibility pop
1815#endif /* !XNU_KERNEL_PRIVATE */
1816
1817extern void kern_os_zfree(
1818 zone_t zone,
1819 void *addr __unsafe_indexable,
1820 vm_size_t size);
1821
1822__ASSUME_PTR_ABI_SINGLE_END __END_DECLS
1823
1824#endif /* _KERN_KALLOC_H_ */
1825
1826#endif /* KERNEL_PRIVATE */
1827