1 | /* |
2 | * Copyright (c) 2000-2020 Apple Inc. All rights reserved. |
3 | * |
4 | * @APPLE_OSREFERENCE_LICENSE_HEADER_START@ |
5 | * |
6 | * This file contains Original Code and/or Modifications of Original Code |
7 | * as defined in and that are subject to the Apple Public Source License |
8 | * Version 2.0 (the 'License'). You may not use this file except in |
9 | * compliance with the License. The rights granted to you under the License |
10 | * may not be used to create, or enable the creation or redistribution of, |
11 | * unlawful or unlicensed copies of an Apple operating system, or to |
12 | * circumvent, violate, or enable the circumvention or violation of, any |
13 | * terms of an Apple operating system software license agreement. |
14 | * |
15 | * Please obtain a copy of the License at |
16 | * http://www.opensource.apple.com/apsl/ and read it before using this file. |
17 | * |
18 | * The Original Code and all software distributed under the License are |
19 | * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER |
20 | * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES, |
21 | * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY, |
22 | * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT. |
23 | * Please see the License for the specific language governing rights and |
24 | * limitations under the License. |
25 | * |
26 | * @APPLE_OSREFERENCE_LICENSE_HEADER_END@ |
27 | */ |
28 | /* Copyright (c) 1995, 1997 Apple Computer, Inc. All Rights Reserved */ |
29 | /* |
30 | * Copyright (c) 1987, 1991, 1993 |
31 | * The Regents of the University of California. All rights reserved. |
32 | * |
33 | * Redistribution and use in source and binary forms, with or without |
34 | * modification, are permitted provided that the following conditions |
35 | * are met: |
36 | * 1. Redistributions of source code must retain the above copyright |
37 | * notice, this list of conditions and the following disclaimer. |
38 | * 2. Redistributions in binary form must reproduce the above copyright |
39 | * notice, this list of conditions and the following disclaimer in the |
40 | * documentation and/or other materials provided with the distribution. |
41 | * 3. All advertising materials mentioning features or use of this software |
42 | * must display the following acknowledgement: |
43 | * This product includes software developed by the University of |
44 | * California, Berkeley and its contributors. |
45 | * 4. Neither the name of the University nor the names of its contributors |
46 | * may be used to endorse or promote products derived from this software |
47 | * without specific prior written permission. |
48 | * |
49 | * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND |
50 | * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE |
51 | * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE |
52 | * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE |
53 | * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL |
54 | * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS |
55 | * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) |
56 | * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT |
57 | * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY |
58 | * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF |
59 | * SUCH DAMAGE. |
60 | * |
61 | * @(#)kern_malloc.c 8.4 (Berkeley) 5/20/95 |
62 | */ |
63 | /* |
64 | * NOTICE: This file was modified by SPARTA, Inc. in 2005 to introduce |
65 | * support for mandatory and extensible security protections. This notice |
66 | * is included in support of clause 2.2 (b) of the Apple Public License, |
67 | * Version 2.0. |
68 | */ |
69 | |
70 | #include <kern/zalloc.h> |
71 | #include <kern/kalloc.h> |
72 | #include <sys/ubc.h> /* mach_to_bsd_errno */ |
73 | |
74 | #include <sys/malloc.h> |
75 | #include <sys/sysctl.h> |
76 | |
77 | #include <libkern/libkern.h> |
78 | |
79 | ZONE_VIEW_DEFINE(ZV_NAMEI, "vfs.namei" , KHEAP_ID_DATA_BUFFERS, MAXPATHLEN); |
80 | KALLOC_HEAP_DEFINE(KERN_OS_MALLOC, "kern_os_malloc" , KHEAP_ID_KT_VAR); |
81 | |
82 | /* |
83 | * macOS Only deprecated interfaces, here only for legacy reasons. |
84 | * There is no internal variant of any of these symbols on purpose. |
85 | */ |
86 | #if XNU_PLATFORM_MacOSX |
87 | |
88 | #define OSMallocDeprecatedMsg(msg) |
89 | #include <libkern/OSMalloc.h> |
90 | |
91 | void * |
92 | _MALLOC_external(size_t size, int type, int flags); |
93 | void * |
94 | _MALLOC_external(size_t size, int type, int flags) |
95 | { |
96 | kalloc_heap_t heap = KHEAP_DEFAULT; |
97 | void *addr = NULL; |
98 | |
99 | if (type == M_SONAME) { |
100 | #if !XNU_TARGET_OS_OSX |
101 | assert3u(size, <=, UINT8_MAX); |
102 | #endif /* XNU_TARGET_OS_OSX */ |
103 | heap = KHEAP_SONAME; |
104 | } |
105 | |
106 | if (size == 0) { |
107 | return NULL; |
108 | } |
109 | |
110 | static_assert(sizeof(vm_size_t) == sizeof(size_t)); |
111 | static_assert(M_WAITOK == Z_WAITOK); |
112 | static_assert(M_NOWAIT == Z_NOWAIT); |
113 | static_assert(M_ZERO == Z_ZERO); |
114 | |
115 | flags = Z_VM_TAG_BT(flags & Z_KPI_MASK, VM_KERN_MEMORY_KALLOC); |
116 | addr = kalloc_ext(heap, size, flags, NULL).addr; |
117 | if (__probable(addr)) { |
118 | return addr; |
119 | } |
120 | |
121 | if (flags & (M_NOWAIT | M_NULL)) { |
122 | return NULL; |
123 | } |
124 | |
125 | /* |
126 | * We get here when the caller told us to block waiting for memory, but |
127 | * kalloc said there's no memory left to get. Generally, this means there's a |
128 | * leak or the caller asked for an impossibly large amount of memory. If the caller |
129 | * is expecting a NULL return code then it should explicitly set the flag M_NULL. |
130 | * If the caller isn't expecting a NULL return code, we just panic. This is less |
131 | * than ideal, but returning NULL when the caller isn't expecting it doesn't help |
132 | * since the majority of callers don't check the return value and will just |
133 | * dereference the pointer and trap anyway. We may as well get a more |
134 | * descriptive message out while we can. |
135 | */ |
136 | panic("_MALLOC: kalloc returned NULL (potential leak), size %llu" , (uint64_t) size); |
137 | } |
138 | |
139 | void |
140 | _FREE_external(void *addr, int type); |
141 | void |
142 | _FREE_external(void *addr, int type __unused) |
143 | { |
144 | kheap_free_addr(KHEAP_DEFAULT, addr); |
145 | } |
146 | |
147 | void |
148 | _FREE_ZONE_external(void *elem, size_t size, int type); |
149 | void |
150 | _FREE_ZONE_external(void *elem, size_t size, int type __unused) |
151 | { |
152 | kheap_free(KHEAP_DEFAULT, elem, size); |
153 | } |
154 | |
155 | char * |
156 | STRDUP_external(const char *string, int type); |
157 | char * |
158 | STRDUP_external(const char *string, int type __unused) |
159 | { |
160 | size_t len; |
161 | char *copy; |
162 | |
163 | len = strlen(s: string) + 1; |
164 | copy = kheap_alloc(KHEAP_DEFAULT, len, Z_WAITOK); |
165 | if (copy) { |
166 | memcpy(dst: copy, src: string, n: len); |
167 | } |
168 | return copy; |
169 | } |
170 | |
171 | static queue_head_t OSMalloc_tag_list = QUEUE_HEAD_INITIALIZER(OSMalloc_tag_list); |
172 | static LCK_GRP_DECLARE(OSMalloc_tag_lck_grp, "OSMalloc_tag" ); |
173 | static LCK_SPIN_DECLARE(OSMalloc_tag_lock, &OSMalloc_tag_lck_grp); |
174 | |
175 | #define OSMalloc_tag_spin_lock() lck_spin_lock(&OSMalloc_tag_lock) |
176 | #define OSMalloc_tag_unlock() lck_spin_unlock(&OSMalloc_tag_lock) |
177 | |
178 | extern typeof(OSMalloc_Tagalloc) OSMalloc_Tagalloc_external; |
179 | OSMallocTag |
180 | OSMalloc_Tagalloc_external(const char *str, uint32_t flags) |
181 | { |
182 | OSMallocTag OSMTag; |
183 | |
184 | OSMTag = kalloc_type(struct _OSMallocTag_, Z_WAITOK | Z_ZERO); |
185 | |
186 | if (flags & OSMT_PAGEABLE) { |
187 | OSMTag->OSMT_attr = OSMT_ATTR_PAGEABLE; |
188 | } |
189 | |
190 | OSMTag->OSMT_refcnt = 1; |
191 | |
192 | strlcpy(dst: OSMTag->OSMT_name, src: str, OSMT_MAX_NAME); |
193 | |
194 | OSMalloc_tag_spin_lock(); |
195 | enqueue_tail(que: &OSMalloc_tag_list, elt: (queue_entry_t)OSMTag); |
196 | OSMalloc_tag_unlock(); |
197 | OSMTag->OSMT_state = OSMT_VALID; |
198 | return OSMTag; |
199 | } |
200 | |
201 | static void |
202 | OSMalloc_Tagref(OSMallocTag tag) |
203 | { |
204 | if (!((tag->OSMT_state & OSMT_VALID_MASK) == OSMT_VALID)) { |
205 | panic("OSMalloc_Tagref():'%s' has bad state 0x%08X" , |
206 | tag->OSMT_name, tag->OSMT_state); |
207 | } |
208 | |
209 | os_atomic_inc(&tag->OSMT_refcnt, relaxed); |
210 | } |
211 | |
212 | static void |
213 | OSMalloc_Tagrele(OSMallocTag tag) |
214 | { |
215 | if (!((tag->OSMT_state & OSMT_VALID_MASK) == OSMT_VALID)) { |
216 | panic("OSMalloc_Tagref():'%s' has bad state 0x%08X" , |
217 | tag->OSMT_name, tag->OSMT_state); |
218 | } |
219 | |
220 | if (os_atomic_dec(&tag->OSMT_refcnt, relaxed) != 0) { |
221 | return; |
222 | } |
223 | |
224 | if (os_atomic_cmpxchg(&tag->OSMT_state, |
225 | OSMT_VALID | OSMT_RELEASED, OSMT_VALID | OSMT_RELEASED, acq_rel)) { |
226 | OSMalloc_tag_spin_lock(); |
227 | (void)remque(elt: (queue_entry_t)tag); |
228 | OSMalloc_tag_unlock(); |
229 | kfree_type(struct _OSMallocTag_, tag); |
230 | } else { |
231 | panic("OSMalloc_Tagrele():'%s' has refcnt 0" , tag->OSMT_name); |
232 | } |
233 | } |
234 | |
235 | extern typeof(OSMalloc_Tagfree) OSMalloc_Tagfree_external; |
236 | void |
237 | OSMalloc_Tagfree_external(OSMallocTag tag) |
238 | { |
239 | if (!os_atomic_cmpxchg(&tag->OSMT_state, |
240 | OSMT_VALID, OSMT_VALID | OSMT_RELEASED, acq_rel)) { |
241 | panic("OSMalloc_Tagfree():'%s' has bad state 0x%08X" , |
242 | tag->OSMT_name, tag->OSMT_state); |
243 | } |
244 | |
245 | if (os_atomic_dec(&tag->OSMT_refcnt, relaxed) == 0) { |
246 | OSMalloc_tag_spin_lock(); |
247 | (void)remque(elt: (queue_entry_t)tag); |
248 | OSMalloc_tag_unlock(); |
249 | kfree_type(struct _OSMallocTag_, tag); |
250 | } |
251 | } |
252 | |
253 | extern typeof(OSMalloc) OSMalloc_external; |
254 | void * |
255 | OSMalloc_external(uint32_t size, OSMallocTag tag) |
256 | { |
257 | void *addr = NULL; |
258 | kern_return_t kr; |
259 | |
260 | OSMalloc_Tagref(tag); |
261 | if ((tag->OSMT_attr & OSMT_PAGEABLE) && (size & ~PAGE_MASK)) { |
262 | if ((kr = kmem_alloc(map: kernel_map, addrp: (vm_offset_t *)&addr, size, |
263 | flags: KMA_PAGEABLE | KMA_DATA, tag: vm_tag_bt())) != KERN_SUCCESS) { |
264 | addr = NULL; |
265 | } |
266 | } else { |
267 | addr = kheap_alloc(KERN_OS_MALLOC, size, |
268 | Z_VM_TAG_BT(Z_WAITOK, VM_KERN_MEMORY_KALLOC)); |
269 | } |
270 | |
271 | if (!addr) { |
272 | OSMalloc_Tagrele(tag); |
273 | } |
274 | |
275 | return addr; |
276 | } |
277 | |
278 | extern typeof(OSMalloc_noblock) OSMalloc_noblock_external; |
279 | void * |
280 | OSMalloc_noblock_external(uint32_t size, OSMallocTag tag) |
281 | { |
282 | void *addr = NULL; |
283 | |
284 | if (tag->OSMT_attr & OSMT_PAGEABLE) { |
285 | return NULL; |
286 | } |
287 | |
288 | OSMalloc_Tagref(tag); |
289 | addr = kheap_alloc(KERN_OS_MALLOC, (vm_size_t)size, |
290 | Z_VM_TAG_BT(Z_NOWAIT, VM_KERN_MEMORY_KALLOC)); |
291 | if (addr == NULL) { |
292 | OSMalloc_Tagrele(tag); |
293 | } |
294 | |
295 | return addr; |
296 | } |
297 | |
298 | extern typeof(OSFree) OSFree_external; |
299 | void |
300 | OSFree_external(void *addr, uint32_t size, OSMallocTag tag) |
301 | { |
302 | if ((tag->OSMT_attr & OSMT_PAGEABLE) |
303 | && (size & ~PAGE_MASK)) { |
304 | kmem_free(map: kernel_map, addr: (vm_offset_t)addr, size); |
305 | } else { |
306 | kheap_free(KERN_OS_MALLOC, addr, size); |
307 | } |
308 | |
309 | OSMalloc_Tagrele(tag); |
310 | } |
311 | |
312 | #endif /* XNU_PLATFORM_MacOSX */ |
313 | #if DEBUG || DEVELOPMENT |
314 | |
315 | static int |
316 | sysctl_zone_map_jetsam_limit SYSCTL_HANDLER_ARGS |
317 | { |
318 | #pragma unused(oidp, arg1, arg2) |
319 | int oldval = 0, val = 0, error = 0; |
320 | |
321 | oldval = zone_map_jetsam_limit; |
322 | error = sysctl_io_number(req, oldval, sizeof(int), &val, NULL); |
323 | if (error || !req->newptr) { |
324 | return error; |
325 | } |
326 | |
327 | return mach_to_bsd_errno(zone_map_jetsam_set_limit(val)); |
328 | } |
329 | SYSCTL_PROC(_kern, OID_AUTO, zone_map_jetsam_limit, |
330 | CTLTYPE_INT | CTLFLAG_RW, 0, 0, sysctl_zone_map_jetsam_limit, "I" , |
331 | "Zone map jetsam limit" ); |
332 | |
333 | |
334 | extern void get_zone_map_size(uint64_t *current_size, uint64_t *capacity); |
335 | |
336 | static int |
337 | sysctl_zone_map_size_and_capacity SYSCTL_HANDLER_ARGS |
338 | { |
339 | #pragma unused(oidp, arg1, arg2) |
340 | uint64_t zstats[2]; |
341 | get_zone_map_size(&zstats[0], &zstats[1]); |
342 | |
343 | return SYSCTL_OUT(req, &zstats, sizeof(zstats)); |
344 | } |
345 | |
346 | SYSCTL_PROC(_kern, OID_AUTO, zone_map_size_and_capacity, |
347 | CTLTYPE_QUAD | CTLFLAG_RD | CTLFLAG_MASKED | CTLFLAG_LOCKED, 0, 0, |
348 | &sysctl_zone_map_size_and_capacity, "Q" , |
349 | "Current size and capacity of the zone map" ); |
350 | |
351 | SYSCTL_LONG(_kern, OID_AUTO, zone_wired_pages, |
352 | CTLFLAG_RD | CTLFLAG_LOCKED, &zone_pages_wired, |
353 | "number of wired pages in zones" ); |
354 | |
355 | SYSCTL_LONG(_kern, OID_AUTO, zone_guard_pages, |
356 | CTLFLAG_RD | CTLFLAG_LOCKED, &zone_guard_pages, |
357 | "number of guard pages in zones" ); |
358 | |
359 | #endif /* DEBUG || DEVELOPMENT */ |
360 | #if CONFIG_ZLEAKS |
361 | |
362 | SYSCTL_DECL(_kern_zleak); |
363 | SYSCTL_NODE(_kern, OID_AUTO, zleak, CTLFLAG_RW | CTLFLAG_LOCKED, 0, "zleak" ); |
364 | |
365 | SYSCTL_INT(_kern_zleak, OID_AUTO, active, CTLFLAG_RD, |
366 | &zleak_active, 0, "zleak activity" ); |
367 | |
368 | /* |
369 | * kern.zleak.max_zonemap_size |
370 | * |
371 | * Read the value of the maximum zonemap size in bytes; useful |
372 | * as the maximum size that zleak.global_threshold and |
373 | * zleak.zone_threshold should be set to. |
374 | */ |
375 | SYSCTL_LONG(_kern_zleak, OID_AUTO, max_zonemap_size, |
376 | CTLTYPE_QUAD | CTLFLAG_RD | CTLFLAG_LOCKED, &zleak_max_zonemap_size, |
377 | "zleak max zonemap size" ); |
378 | |
379 | |
380 | static int |
381 | sysctl_zleak_threshold SYSCTL_HANDLER_ARGS |
382 | { |
383 | #pragma unused(oidp, arg2) |
384 | int error; |
385 | uint64_t value = *(vm_size_t *)arg1; |
386 | |
387 | error = sysctl_io_number(req, value, sizeof(value), &value, NULL); |
388 | |
389 | if (error || !req->newptr) { |
390 | return error; |
391 | } |
392 | |
393 | return mach_to_bsd_errno(zleak_update_threshold(arg1, value)); |
394 | } |
395 | |
396 | /* |
397 | * kern.zleak.zone_threshold |
398 | * |
399 | * Set the per-zone threshold size (in bytes) above which any |
400 | * zone will automatically start zleak tracking. |
401 | * |
402 | * The default value is set in zleak_init(). |
403 | * |
404 | * Setting this variable will have no effect until zleak tracking is |
405 | * activated (See above.) |
406 | */ |
407 | SYSCTL_PROC(_kern_zleak, OID_AUTO, zone_threshold, |
408 | CTLTYPE_QUAD | CTLFLAG_RW | CTLFLAG_LOCKED, |
409 | &zleak_per_zone_tracking_threshold, 0, sysctl_zleak_threshold, "Q" , |
410 | "zleak per-zone threshold" ); |
411 | |
412 | #endif /* CONFIG_ZLEAKS */ |
413 | |
414 | extern uint64_t get_zones_collectable_bytes(void); |
415 | |
416 | static int |
417 | sysctl_zones_collectable_bytes SYSCTL_HANDLER_ARGS |
418 | { |
419 | #pragma unused(oidp, arg1, arg2) |
420 | uint64_t zones_free_mem = get_zones_collectable_bytes(); |
421 | |
422 | return SYSCTL_OUT(req, &zones_free_mem, sizeof(zones_free_mem)); |
423 | } |
424 | |
425 | SYSCTL_PROC(_kern, OID_AUTO, zones_collectable_bytes, |
426 | CTLTYPE_QUAD | CTLFLAG_RD | CTLFLAG_MASKED | CTLFLAG_LOCKED, |
427 | 0, 0, &sysctl_zones_collectable_bytes, "Q" , |
428 | "Collectable memory in zones" ); |
429 | |