1/*
2 * Copyright (c) 2016-2021 Apple Inc. All rights reserved.
3 *
4 * @APPLE_OSREFERENCE_LICENSE_HEADER_START@
5 *
6 * This file contains Original Code and/or Modifications of Original Code
7 * as defined in and that are subject to the Apple Public Source License
8 * Version 2.0 (the 'License'). You may not use this file except in
9 * compliance with the License. The rights granted to you under the License
10 * may not be used to create, or enable the creation or redistribution of,
11 * unlawful or unlicensed copies of an Apple operating system, or to
12 * circumvent, violate, or enable the circumvention or violation of, any
13 * terms of an Apple operating system software license agreement.
14 *
15 * Please obtain a copy of the License at
16 * http://www.opensource.apple.com/apsl/ and read it before using this file.
17 *
18 * The Original Code and all software distributed under the License are
19 * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER
20 * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES,
21 * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY,
22 * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT.
23 * Please see the License for the specific language governing rights and
24 * limitations under the License.
25 *
26 * @APPLE_OSREFERENCE_LICENSE_HEADER_END@
27 */
28
29#include <skywalk/os_skywalk_private.h>
30#include <machine/limits.h>
31#include <machine/machine_routines.h>
32#include <vm/vm_pageout.h>
33
34/*
35 * Region templates.
36 *
37 * Regions that are not eligible for user task mapping must never be
38 * marked with the SKMEM_REGION_CR_MMAPOK flag. Such regions will
39 * automatically be excluded from the mappable region array at arena
40 * creation time.
41 *
42 * Regions that allow their objects to be shared among other objects
43 * must be marked with SKMEM_REGION_CR_SHAREOK. This permits calls
44 * to skmem_bufctl_{use,unuse}() on the bufctls for the objects.
45 *
46 * Read-only regions must be marked with SKMEM_REGION_CR_UREADONLY.
47 * This will affect the protection property of the segments in those
48 * regions. This flag has no effect when the region is not mappable
49 * to a user task.
50 *
51 * The SKMEM_REGION_CR_NOMAGAZINES flag marks the region as unsupportive
52 * of the magazines layer when used by a skmem_cache. When this flag is
53 * not set, the number of objects in the region will be adjusted to
54 * include the worst-case number of objects cached at the CPU layer.
55 * By default, all regions have this flag set; this may be overridden
56 * by each client (after making a copy).
57 *
58 * Regions that don't support multi-segments can be marked with the
59 * SKMEM_REGION_CR_MONOLITHIC flag. This forces exactly one segment
60 * to cover all objects in the region. This also effectively caps
61 * the skmem_cache slab layer to have only a single slab.
62 *
63 * The correctness of the region templates is enforced at arena
64 * creation time.
65 */
66static const struct skmem_region_params skmem_regions[SKMEM_REGIONS] = {
67 /*
68 * Leading guard page(s): {mappable, no-read-write, no-cache}
69 */
70 [SKMEM_REGION_GUARD_HEAD] = {
71 .srp_name = "headguard",
72 .srp_id = SKMEM_REGION_GUARD_HEAD,
73 .srp_cflags = SKMEM_REGION_CR_MMAPOK |
74 SKMEM_REGION_CR_GUARD | SKMEM_REGION_CR_NOMAGAZINES |
75 SKMEM_REGION_CR_NOREDIRECT,
76 .srp_md_type = NEXUS_META_TYPE_INVALID,
77 .srp_md_subtype = NEXUS_META_SUBTYPE_INVALID,
78 },
79
80 /*
81 * Schema: {mappable, read-only, no-cache}
82 */
83 [SKMEM_REGION_SCHEMA] = {
84 .srp_name = "schema",
85 .srp_id = SKMEM_REGION_SCHEMA,
86 .srp_cflags = SKMEM_REGION_CR_MMAPOK |
87 SKMEM_REGION_CR_UREADONLY | SKMEM_REGION_CR_NOMAGAZINES |
88 SKMEM_REGION_CR_NOREDIRECT | SKMEM_REGION_CR_PUREDATA,
89 .srp_md_type = NEXUS_META_TYPE_INVALID,
90 .srp_md_subtype = NEXUS_META_SUBTYPE_INVALID,
91 },
92
93 /*
94 * Rings: {mappable, no-cache}
95 */
96 [SKMEM_REGION_RING] = {
97 .srp_name = "ring",
98 .srp_id = SKMEM_REGION_RING,
99 .srp_cflags = SKMEM_REGION_CR_MMAPOK |
100 SKMEM_REGION_CR_NOMAGAZINES | SKMEM_REGION_CR_PUREDATA,
101 .srp_md_type = NEXUS_META_TYPE_INVALID,
102 .srp_md_subtype = NEXUS_META_SUBTYPE_INVALID,
103 },
104
105 /*
106 * Buffers: {mappable, shareable}
107 */
108 [SKMEM_REGION_BUF_DEF] = {
109 .srp_name = "buf_def",
110 .srp_id = SKMEM_REGION_BUF_DEF,
111 .srp_cflags = SKMEM_REGION_CR_MMAPOK |
112 SKMEM_REGION_CR_NOMAGAZINES | SKMEM_REGION_CR_IODIR_IN |
113 SKMEM_REGION_CR_IODIR_OUT | SKMEM_REGION_CR_SHAREOK |
114 SKMEM_REGION_CR_PUREDATA,
115 .srp_md_type = NEXUS_META_TYPE_INVALID,
116 .srp_md_subtype = NEXUS_META_SUBTYPE_INVALID,
117 },
118 [SKMEM_REGION_BUF_LARGE] = {
119 .srp_name = "buf_large",
120 .srp_id = SKMEM_REGION_BUF_LARGE,
121 .srp_cflags = SKMEM_REGION_CR_MMAPOK |
122 SKMEM_REGION_CR_NOMAGAZINES | SKMEM_REGION_CR_IODIR_IN |
123 SKMEM_REGION_CR_IODIR_OUT | SKMEM_REGION_CR_SHAREOK |
124 SKMEM_REGION_CR_PUREDATA,
125 .srp_md_type = NEXUS_META_TYPE_INVALID,
126 .srp_md_subtype = NEXUS_META_SUBTYPE_INVALID,
127 },
128 [SKMEM_REGION_RXBUF_DEF] = {
129 .srp_name = "rxbuf_def",
130 .srp_id = SKMEM_REGION_RXBUF_DEF,
131 .srp_cflags = SKMEM_REGION_CR_MMAPOK |
132 SKMEM_REGION_CR_NOMAGAZINES | SKMEM_REGION_CR_IODIR_IN |
133 SKMEM_REGION_CR_SHAREOK | SKMEM_REGION_CR_PUREDATA,
134 .srp_r_obj_cnt = 0,
135 .srp_md_type = NEXUS_META_TYPE_INVALID,
136 .srp_md_subtype = NEXUS_META_SUBTYPE_INVALID,
137 },
138 [SKMEM_REGION_RXBUF_LARGE] = {
139 .srp_name = "rxbuf_large",
140 .srp_id = SKMEM_REGION_RXBUF_LARGE,
141 .srp_cflags = SKMEM_REGION_CR_MMAPOK |
142 SKMEM_REGION_CR_NOMAGAZINES | SKMEM_REGION_CR_IODIR_IN |
143 SKMEM_REGION_CR_SHAREOK | SKMEM_REGION_CR_PUREDATA,
144 .srp_r_obj_cnt = 0,
145 .srp_md_type = NEXUS_META_TYPE_INVALID,
146 .srp_md_subtype = NEXUS_META_SUBTYPE_INVALID,
147 },
148 [SKMEM_REGION_TXBUF_DEF] = {
149 .srp_name = "txbuf_def",
150 .srp_id = SKMEM_REGION_TXBUF_DEF,
151 .srp_cflags = SKMEM_REGION_CR_MMAPOK |
152 SKMEM_REGION_CR_NOMAGAZINES | SKMEM_REGION_CR_IODIR_OUT |
153 SKMEM_REGION_CR_SHAREOK | SKMEM_REGION_CR_PUREDATA,
154 .srp_r_obj_cnt = 0,
155 .srp_md_type = NEXUS_META_TYPE_INVALID,
156 .srp_md_subtype = NEXUS_META_SUBTYPE_INVALID,
157 },
158 [SKMEM_REGION_TXBUF_LARGE] = {
159 .srp_name = "txbuf_large",
160 .srp_id = SKMEM_REGION_TXBUF_LARGE,
161 .srp_cflags = SKMEM_REGION_CR_MMAPOK |
162 SKMEM_REGION_CR_NOMAGAZINES | SKMEM_REGION_CR_IODIR_OUT |
163 SKMEM_REGION_CR_SHAREOK | SKMEM_REGION_CR_PUREDATA,
164 .srp_r_obj_cnt = 0,
165 .srp_md_type = NEXUS_META_TYPE_INVALID,
166 .srp_md_subtype = NEXUS_META_SUBTYPE_INVALID,
167 },
168
169 /*
170 * Userspace metadata: {mappable}
171 */
172 [SKMEM_REGION_UMD] = {
173 .srp_name = "umd",
174 .srp_id = SKMEM_REGION_UMD,
175 .srp_cflags = SKMEM_REGION_CR_MMAPOK |
176 SKMEM_REGION_CR_NOMAGAZINES,
177 .srp_md_type = NEXUS_META_TYPE_QUANTUM,
178 .srp_md_subtype = NEXUS_META_SUBTYPE_PAYLOAD,
179 .srp_max_frags = 1,
180 },
181
182 /*
183 * Userspace buflet metadata: {mappable}
184 */
185 [SKMEM_REGION_UBFT] = {
186 .srp_name = "ubft",
187 .srp_id = SKMEM_REGION_UBFT,
188 .srp_cflags = SKMEM_REGION_CR_MMAPOK |
189 SKMEM_REGION_CR_NOMAGAZINES,
190 .srp_r_obj_cnt = 0,
191 .srp_md_type = NEXUS_META_TYPE_INVALID,
192 .srp_md_subtype = NEXUS_META_SUBTYPE_INVALID,
193 .srp_max_frags = 1,
194 },
195
196 /*
197 * Tx/alloc userspace slot descriptors: {mappable, read-only, no-cache}
198 */
199 [SKMEM_REGION_TXAUSD] = {
200 .srp_name = "txausd",
201 .srp_id = SKMEM_REGION_TXAUSD,
202 .srp_cflags = SKMEM_REGION_CR_MMAPOK |
203 SKMEM_REGION_CR_UREADONLY | SKMEM_REGION_CR_NOMAGAZINES,
204 .srp_md_type = NEXUS_META_TYPE_INVALID,
205 .srp_md_subtype = NEXUS_META_SUBTYPE_INVALID,
206 },
207
208 /*
209 * Rx/free userspace slot descriptors: {mappable, read-only, no-cache}
210 */
211 [SKMEM_REGION_RXFUSD] = {
212 .srp_name = "rxfusd",
213 .srp_id = SKMEM_REGION_RXFUSD,
214 .srp_cflags = SKMEM_REGION_CR_MMAPOK |
215 SKMEM_REGION_CR_UREADONLY | SKMEM_REGION_CR_NOMAGAZINES,
216 .srp_md_type = NEXUS_META_TYPE_INVALID,
217 .srp_md_subtype = NEXUS_META_SUBTYPE_INVALID,
218 },
219
220 /*
221 * Shared statistics: {mappable, monolithic, no-cache}
222 */
223 [SKMEM_REGION_USTATS] = {
224 .srp_name = "ustats",
225 .srp_id = SKMEM_REGION_USTATS,
226 .srp_cflags = SKMEM_REGION_CR_MMAPOK |
227 SKMEM_REGION_CR_MONOLITHIC | SKMEM_REGION_CR_NOMAGAZINES |
228 SKMEM_REGION_CR_PUREDATA,
229 .srp_md_type = NEXUS_META_TYPE_INVALID,
230 .srp_md_subtype = NEXUS_META_SUBTYPE_INVALID,
231 },
232
233 /*
234 * Flow advisories: {mappable, read-only, monolithic, no-cache}
235 */
236 [SKMEM_REGION_FLOWADV] = {
237 .srp_name = "flowadv",
238 .srp_id = SKMEM_REGION_FLOWADV,
239 .srp_cflags = SKMEM_REGION_CR_MMAPOK |
240 SKMEM_REGION_CR_UREADONLY | SKMEM_REGION_CR_MONOLITHIC |
241 SKMEM_REGION_CR_NOMAGAZINES | SKMEM_REGION_CR_PUREDATA,
242 .srp_md_type = NEXUS_META_TYPE_INVALID,
243 .srp_md_subtype = NEXUS_META_SUBTYPE_INVALID,
244 },
245
246 /*
247 * Nexus advisories: {mappable, read-only, monolithic, no-cache}
248 */
249 [SKMEM_REGION_NEXUSADV] = {
250 .srp_name = "nexusadv",
251 .srp_id = SKMEM_REGION_NEXUSADV,
252 .srp_cflags = SKMEM_REGION_CR_MMAPOK |
253 SKMEM_REGION_CR_UREADONLY | SKMEM_REGION_CR_MONOLITHIC |
254 SKMEM_REGION_CR_NOMAGAZINES | SKMEM_REGION_CR_PERSISTENT |
255 SKMEM_REGION_CR_PUREDATA,
256 .srp_md_type = NEXUS_META_TYPE_INVALID,
257 .srp_md_subtype = NEXUS_META_SUBTYPE_INVALID,
258 },
259
260 /*
261 * sysctls: {mappable, monolithic, no-cache}
262 */
263 [SKMEM_REGION_SYSCTLS] = {
264 .srp_name = "sysctls",
265 .srp_id = SKMEM_REGION_SYSCTLS,
266 .srp_cflags = SKMEM_REGION_CR_MMAPOK |
267 SKMEM_REGION_CR_UREADONLY | SKMEM_REGION_CR_MONOLITHIC |
268 SKMEM_REGION_CR_NOMAGAZINES | SKMEM_REGION_CR_NOREDIRECT |
269 SKMEM_REGION_CR_PUREDATA,
270 .srp_md_type = NEXUS_META_TYPE_INVALID,
271 .srp_md_subtype = NEXUS_META_SUBTYPE_INVALID,
272 },
273
274 /*
275 * Trailing guard page(s): {mappable, no-read-write, no-cache}
276 */
277 [SKMEM_REGION_GUARD_TAIL] = {
278 .srp_name = "tailguard",
279 .srp_id = SKMEM_REGION_GUARD_TAIL,
280 .srp_cflags = SKMEM_REGION_CR_MMAPOK |
281 SKMEM_REGION_CR_GUARD | SKMEM_REGION_CR_NOMAGAZINES |
282 SKMEM_REGION_CR_NOREDIRECT,
283 .srp_md_type = NEXUS_META_TYPE_INVALID,
284 .srp_md_subtype = NEXUS_META_SUBTYPE_INVALID,
285 },
286
287 /*
288 * Kernel metadata.
289 */
290 [SKMEM_REGION_KMD] = {
291 .srp_name = "kmd",
292 .srp_id = SKMEM_REGION_KMD,
293 .srp_cflags = SKMEM_REGION_CR_NOMAGAZINES |
294 SKMEM_REGION_CR_MEMTAG,
295 .srp_md_type = NEXUS_META_TYPE_QUANTUM,
296 .srp_md_subtype = NEXUS_META_SUBTYPE_PAYLOAD,
297 .srp_max_frags = 1,
298 },
299 [SKMEM_REGION_RXKMD] = {
300 .srp_name = "rxkmd",
301 .srp_id = SKMEM_REGION_RXKMD,
302 .srp_cflags = SKMEM_REGION_CR_NOMAGAZINES |
303 SKMEM_REGION_CR_MEMTAG,
304 .srp_r_obj_cnt = 0,
305 .srp_md_type = NEXUS_META_TYPE_QUANTUM,
306 .srp_md_subtype = NEXUS_META_SUBTYPE_PAYLOAD,
307 .srp_max_frags = 1,
308 },
309 [SKMEM_REGION_TXKMD] = {
310 .srp_name = "txkmd",
311 .srp_id = SKMEM_REGION_TXKMD,
312 .srp_cflags = SKMEM_REGION_CR_NOMAGAZINES |
313 SKMEM_REGION_CR_MEMTAG,
314 .srp_r_obj_cnt = 0,
315 .srp_md_type = NEXUS_META_TYPE_QUANTUM,
316 .srp_md_subtype = NEXUS_META_SUBTYPE_PAYLOAD,
317 .srp_max_frags = 1,
318 },
319
320 /*
321 * kernel buflet metadata.
322 */
323 [SKMEM_REGION_KBFT] = {
324 .srp_name = "kbft",
325 .srp_id = SKMEM_REGION_KBFT,
326 .srp_cflags = SKMEM_REGION_CR_NOMAGAZINES |
327 SKMEM_REGION_CR_MEMTAG,
328 .srp_r_obj_cnt = 0,
329 .srp_md_type = NEXUS_META_TYPE_INVALID,
330 .srp_md_subtype = NEXUS_META_SUBTYPE_INVALID,
331 },
332 [SKMEM_REGION_RXKBFT] = {
333 .srp_name = "rxkbft",
334 .srp_id = SKMEM_REGION_RXKBFT,
335 .srp_cflags = SKMEM_REGION_CR_NOMAGAZINES |
336 SKMEM_REGION_CR_MEMTAG,
337 .srp_r_obj_cnt = 0,
338 .srp_md_type = NEXUS_META_TYPE_INVALID,
339 .srp_md_subtype = NEXUS_META_SUBTYPE_INVALID,
340 },
341 [SKMEM_REGION_TXKBFT] = {
342 .srp_name = "txkbft",
343 .srp_id = SKMEM_REGION_TXKBFT,
344 .srp_cflags = SKMEM_REGION_CR_NOMAGAZINES |
345 SKMEM_REGION_CR_MEMTAG,
346 .srp_r_obj_cnt = 0,
347 .srp_md_type = NEXUS_META_TYPE_INVALID,
348 .srp_md_subtype = NEXUS_META_SUBTYPE_INVALID,
349 },
350
351 /*
352 * Tx/alloc kernel slot descriptors: {no-cache}
353 */
354 [SKMEM_REGION_TXAKSD] = {
355 .srp_name = "txaksd",
356 .srp_id = SKMEM_REGION_TXAKSD,
357 .srp_cflags = SKMEM_REGION_CR_NOMAGAZINES,
358 .srp_md_type = NEXUS_META_TYPE_INVALID,
359 .srp_md_subtype = NEXUS_META_SUBTYPE_INVALID,
360 },
361
362 /*
363 * Rx/free kernel slot descriptors: {no-cache}
364 */
365 [SKMEM_REGION_RXFKSD] = {
366 .srp_name = "rxfksd",
367 .srp_id = SKMEM_REGION_RXFKSD,
368 .srp_cflags = SKMEM_REGION_CR_NOMAGAZINES,
369 .srp_md_type = NEXUS_META_TYPE_INVALID,
370 .srp_md_subtype = NEXUS_META_SUBTYPE_INVALID,
371 },
372
373 /*
374 * Statistics kernel snapshot: {no-cache}
375 */
376 [SKMEM_REGION_KSTATS] = {
377 .srp_name = "kstats",
378 .srp_id = SKMEM_REGION_KSTATS,
379 .srp_cflags = SKMEM_REGION_CR_MONOLITHIC |
380 SKMEM_REGION_CR_NOMAGAZINES | SKMEM_REGION_CR_PUREDATA,
381 .srp_md_type = NEXUS_META_TYPE_INVALID,
382 .srp_md_subtype = NEXUS_META_SUBTYPE_INVALID,
383 },
384
385 /*
386 * Intrinsic objects.
387 */
388 [SKMEM_REGION_INTRINSIC] = {
389 .srp_name = "intrinsic",
390 .srp_id = SKMEM_REGION_INTRINSIC,
391 .srp_cflags = SKMEM_REGION_CR_PSEUDO,
392 .srp_md_type = NEXUS_META_TYPE_INVALID,
393 .srp_md_subtype = NEXUS_META_SUBTYPE_INVALID,
394 },
395};
396
397const skmem_region_id_t skmem_pp_region_ids[SKMEM_PP_REGIONS] = {
398 SKMEM_REGION_BUF_DEF,
399 SKMEM_REGION_BUF_LARGE,
400 SKMEM_REGION_RXBUF_DEF,
401 SKMEM_REGION_RXBUF_LARGE,
402 SKMEM_REGION_TXBUF_DEF,
403 SKMEM_REGION_TXBUF_LARGE,
404 SKMEM_REGION_KMD,
405 SKMEM_REGION_RXKMD,
406 SKMEM_REGION_TXKMD,
407 SKMEM_REGION_UMD,
408 SKMEM_REGION_KBFT,
409 SKMEM_REGION_RXKBFT,
410 SKMEM_REGION_TXKBFT,
411 SKMEM_REGION_UBFT
412};
413
414/* CPU cache line (determined at runtime) */
415static unsigned int cpu_cache_line_size;
416
417LCK_ATTR_DECLARE(skmem_lock_attr, 0, 0);
418LCK_GRP_DECLARE(skmem_lock_grp, "skmem");
419
420#if (DEVELOPMENT || DEBUG)
421SYSCTL_NODE(_kern_skywalk, OID_AUTO, mem, CTLFLAG_RW | CTLFLAG_LOCKED,
422 0, "Skywalk kmem");
423#endif /* (DEVELOPMENT || DEBUG) */
424
425#define SK_SYS_OBJSIZE_DEFAULT (16 * 1024)
426
427/* system-wide sysctls region */
428static struct skmem_region *sk_sys_region;
429static void *sk_sys_obj;
430static uint32_t sk_sys_objsize;
431
432static void skmem_sys_region_init(void);
433static void skmem_sys_region_fini(void);
434
435static char *skmem_dump_buf;
436#define SKMEM_DUMP_BUF_SIZE 2048 /* size of dump buffer */
437
438static int __skmem_inited = 0;
439
440void
441skmem_init(void)
442{
443 ASSERT(!__skmem_inited);
444
445 /* get CPU cache line size */
446 (void) skmem_cpu_cache_line_size();
447
448 skmem_cache_pre_init();
449 skmem_region_init();
450 skmem_cache_init();
451 pp_init();
452
453 __skmem_inited = 1;
454
455 /* set up system-wide region for sysctls */
456 skmem_sys_region_init();
457}
458
459void
460skmem_fini(void)
461{
462 if (__skmem_inited) {
463 skmem_sys_region_fini();
464
465 pp_fini();
466 skmem_cache_fini();
467 skmem_region_fini();
468
469 __skmem_inited = 0;
470 }
471}
472
473/*
474 * Return the default region parameters (template). Callers must never
475 * modify the returned region, and should treat it as invariant.
476 */
477const struct skmem_region_params *
478skmem_get_default(skmem_region_id_t id)
479{
480 ASSERT(id < SKMEM_REGIONS);
481 return &skmem_regions[id];
482}
483
484/*
485 * Return the CPU cache line size.
486 */
487uint32_t
488skmem_cpu_cache_line_size(void)
489{
490 if (__improbable(cpu_cache_line_size == 0)) {
491 ml_cpu_info_t cpu_info;
492 ml_cpu_get_info(ml_cpu_info: &cpu_info);
493 cpu_cache_line_size = (uint32_t)cpu_info.cache_line_size;
494 ASSERT((SKMEM_PAGE_SIZE % cpu_cache_line_size) == 0);
495 }
496 return cpu_cache_line_size;
497}
498
499/*
500 * Dispatch a function to execute in a thread call.
501 */
502void
503skmem_dispatch(thread_call_t tcall, void (*func)(void), uint64_t delay)
504{
505 uint64_t now = mach_absolute_time();
506 uint64_t ival, deadline = now;
507
508 ASSERT(tcall != NULL);
509
510 if (delay == 0) {
511 delay = (10 * NSEC_PER_USEC); /* "immediately", 10 usec */
512 }
513 nanoseconds_to_absolutetime(nanoseconds: delay, result: &ival);
514 clock_deadline_for_periodic_event(interval: ival, abstime: now, deadline: &deadline);
515 (void) thread_call_enter1_delayed(call: tcall, param1: func, deadline);
516}
517
518static void
519skmem_sys_region_init(void)
520{
521 struct skmem_region_params srp;
522
523 VERIFY(__skmem_inited);
524 VERIFY(sk_sys_region == NULL);
525
526 srp = *skmem_get_default(id: SKMEM_REGION_SYSCTLS);
527 ASSERT((srp.srp_cflags & (SKMEM_REGION_CR_MMAPOK |
528 SKMEM_REGION_CR_UREADONLY | SKMEM_REGION_CR_MONOLITHIC |
529 SKMEM_REGION_CR_NOMAGAZINES | SKMEM_REGION_CR_NOREDIRECT)) ==
530 (SKMEM_REGION_CR_MMAPOK | SKMEM_REGION_CR_UREADONLY |
531 SKMEM_REGION_CR_MONOLITHIC | SKMEM_REGION_CR_NOMAGAZINES |
532 SKMEM_REGION_CR_NOREDIRECT));
533
534 srp.srp_r_obj_cnt = 1;
535 srp.srp_r_obj_size = sk_sys_objsize = SK_SYS_OBJSIZE_DEFAULT;
536 skmem_region_params_config(&srp);
537
538 _CASSERT(SK_SYS_OBJSIZE_DEFAULT >= sizeof(skmem_sysctl));
539 sk_sys_region = skmem_region_create("global", &srp, NULL, NULL, NULL);
540 if (sk_sys_region == NULL) {
541 panic("failed to allocate global sysctls region");
542 /* NOTREACHED */
543 __builtin_unreachable();
544 }
545
546 sk_sys_obj = skmem_region_alloc(sk_sys_region, NULL, NULL,
547 NULL, SKMEM_SLEEP);
548 if (sk_sys_obj == NULL) {
549 panic("failed to allocate global sysctls object (%u bytes)",
550 sk_sys_objsize);
551 /* NOTREACHED */
552 __builtin_unreachable();
553 }
554
555 skmem_sysctl_init();
556}
557
558static void
559skmem_sys_region_fini(void)
560{
561 if (sk_sys_region != NULL) {
562 skmem_region_free(sk_sys_region, sk_sys_obj, NULL);
563 sk_sys_obj = NULL;
564 skmem_region_release(sk_sys_region);
565 sk_sys_region = NULL;
566 }
567 VERIFY(sk_sys_obj == NULL);
568}
569
570struct skmem_region *
571skmem_get_sysctls_region(void)
572{
573 return sk_sys_region;
574}
575
576void *
577skmem_get_sysctls_obj(size_t *size)
578{
579 if (size != NULL) {
580 *size = sk_sys_objsize;
581 }
582
583 return sk_sys_obj;
584}
585
586/* for VM stats */
587extern unsigned int vm_page_free_count, vm_page_speculative_count,
588 vm_page_active_count, vm_page_inactive_count, vm_page_inactive_count,
589 vm_page_wire_count, vm_page_throttled_count, vm_lopage_free_count,
590 vm_page_purgeable_count, vm_page_purged_count;
591
592#define SKMEM_WDT_DUMP_BUF_CHK() do { \
593 clen -= k; \
594 if (clen < 1) \
595 goto done; \
596 c += k; \
597} while (0)
598
599/*
600 * The compiler doesn't know that snprintf() supports %b format
601 * specifier, so use our own wrapper to vsnprintf() here instead.
602 */
603#define skmem_snprintf(str, size, format, ...) ({ \
604 _Pragma("clang diagnostic push") \
605 _Pragma("clang diagnostic ignored \"-Wformat-invalid-specifier\"") \
606 _Pragma("clang diagnostic ignored \"-Wformat-extra-args\"") \
607 _Pragma("clang diagnostic ignored \"-Wformat\"") \
608 snprintf(str, size, format, ## __VA_ARGS__) \
609 _Pragma("clang diagnostic pop"); \
610})
611
612__attribute__((noinline, cold, not_tail_called))
613char *
614skmem_dump(struct skmem_region *skr)
615{
616 int k, clen = SKMEM_DUMP_BUF_SIZE;
617 struct skmem_cache *skm;
618 char *c;
619
620 /* allocate space for skmem_dump_buf */
621 if (skmem_dump_buf == NULL) {
622 skmem_dump_buf = (char *) kalloc_data(SKMEM_DUMP_BUF_SIZE,
623 (Z_ZERO | Z_WAITOK));
624 VERIFY(skmem_dump_buf != NULL);
625 } else {
626 bzero(s: skmem_dump_buf, SKMEM_DUMP_BUF_SIZE);
627 }
628 c = skmem_dump_buf;
629
630 k = skmem_snprintf(c, clen,
631 "Region %p\n"
632 " | Mode : 0x%b\n"
633 " | Memory : [%llu in use [%llu wired]] / [%llu total]\n"
634 " | Transactions : [%llu segment allocs, %llu frees]\n\n",
635 skr, skr->skr_mode, SKR_MODE_BITS, skr->skr_meminuse,
636 skr->skr_w_meminuse, skr->skr_memtotal, skr->skr_alloc,
637 skr->skr_free);
638 SKMEM_WDT_DUMP_BUF_CHK();
639
640 if (skr->skr_mode & SKR_MODE_SLAB) {
641 for (int i = 0; i < SKR_MAX_CACHES; i++) {
642 if ((skm = skr->skr_cache[i]) == NULL) {
643 continue;
644 }
645 k = skmem_snprintf(c, clen, "Cache %p\n"
646 " | Mode : 0x%b\n"
647 " | Memory : [%llu in use] / [%llu total]\n"
648 " | Transactions : [%llu alloc failures]\n"
649 " | [%llu slab creates, %llu destroys]\n"
650 " | [%llu slab allocs, %llu frees]\n\n",
651 skm, skm->skm_mode, SKM_MODE_BITS,
652 skm->skm_sl_bufinuse, skm->skm_sl_bufmax,
653 skm->skm_sl_alloc_fail, skm->skm_sl_create,
654 skm->skm_sl_destroy, skm->skm_sl_alloc,
655 skm->skm_sl_free);
656 SKMEM_WDT_DUMP_BUF_CHK();
657 }
658 }
659
660 k = skmem_snprintf(c, clen,
661 "VM Pages\n"
662 " | Free : %u [%u speculative]\n"
663 " | Active : %u\n"
664 " | Inactive : %u\n"
665 " | Wired : %u [%u throttled, %u lopage_free]\n"
666 " | Purgeable : %u [%u purged]\n",
667 vm_page_free_count, vm_page_speculative_count,
668 vm_page_active_count, vm_page_inactive_count,
669 vm_page_wire_count, vm_page_throttled_count, vm_lopage_free_count,
670 vm_page_purgeable_count, vm_page_purged_count);
671 SKMEM_WDT_DUMP_BUF_CHK();
672
673done:
674 return skmem_dump_buf;
675}
676
677boolean_t
678skmem_lowmem_check(void)
679{
680 unsigned int plevel = kVMPressureNormal;
681 kern_return_t ret;
682
683 ret = mach_vm_pressure_level_monitor(false, pressure_level: &plevel);
684 if (ret == KERN_SUCCESS) {
685 /* kVMPressureCritical is the stage below jetsam */
686 if (plevel >= kVMPressureCritical) {
687 /*
688 * If we are in a low-memory situation, then we
689 * might want to start purging our caches.
690 */
691 return TRUE;
692 }
693 }
694 return FALSE;
695}
696