1 | /* |
2 | * Copyright (c) 2021 Apple Inc. All rights reserved. |
3 | * |
4 | * @APPLE_OSREFERENCE_LICENSE_HEADER_START@ |
5 | * |
6 | * This file contains Original Code and/or Modifications of Original Code |
7 | * as defined in and that are subject to the Apple Public Source License |
8 | * Version 2.0 (the 'License'). You may not use this file except in |
9 | * compliance with the License. The rights granted to you under the License |
10 | * may not be used to create, or enable the creation or redistribution of, |
11 | * unlawful or unlicensed copies of an Apple operating system, or to |
12 | * circumvent, violate, or enable the circumvention or violation of, any |
13 | * terms of an Apple operating system software license agreement. |
14 | * |
15 | * Please obtain a copy of the License at |
16 | * http://www.opensource.apple.com/apsl/ and read it before using this file. |
17 | * |
18 | * The Original Code and all software distributed under the License are |
19 | * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER |
20 | * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES, |
21 | * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY, |
22 | * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT. |
23 | * Please see the License for the specific language governing rights and |
24 | * limitations under the License. |
25 | * |
26 | * @APPLE_OSREFERENCE_LICENSE_HEADER_END@ |
27 | */ |
28 | |
29 | #include <kern/assert.h> |
30 | #include <kern/kalloc.h> |
31 | #include <kern/locks.h> |
32 | #include <kern/work_interval.h> |
33 | #include <kern/workload_config.h> |
34 | |
35 | #include <mach/kern_return.h> |
36 | |
37 | #include <os/hash.h> |
38 | |
39 | #include <sys/queue.h> |
40 | #include <sys/work_interval.h> |
41 | |
42 | #include <stdint.h> |
43 | |
44 | #define WORKLOAD_CONFIG_PHASE_NAME_MAX 32 |
45 | |
46 | static const int max_workload_config_entry_count = 1024; |
47 | static const int workload_config_hash_size = 64; |
48 | |
49 | static LCK_GRP_DECLARE(workload_config_lck_grp, "workload_config_lck_grp" ); |
50 | |
51 | /* |
52 | * Per-phase workload configuration. |
53 | */ |
54 | typedef struct workload_phase_entry { |
55 | LIST_ENTRY(workload_phase_entry) wpe_link; |
56 | char wpe_phase[WORKLOAD_CONFIG_PHASE_NAME_MAX]; |
57 | workload_config_t wpe_config; |
58 | } workload_phase_entry_t; |
59 | |
60 | /* |
61 | * Workload configuration. As well as global information about the workload, it |
62 | * also contains a list of per-phase configuration. |
63 | */ |
64 | typedef struct workload_config_entry { |
65 | LIST_ENTRY(workload_config_entry) wce_link; |
66 | char wce_id[WORKLOAD_CONFIG_ID_NAME_MAX]; |
67 | const workload_phase_entry_t *wce_default; |
68 | LIST_HEAD(, workload_phase_entry) wce_phases; |
69 | } workload_config_entry_t; |
70 | |
71 | struct workload_config_ctx { |
72 | workload_config_flags_t wlcc_flags; |
73 | int32_t wlcc_count; |
74 | u_long wlcc_hash_mask; |
75 | lck_mtx_t wlcc_mtx; |
76 | LIST_HEAD(workload_config_hashhead, workload_config_entry) * wlcc_hashtbl; |
77 | }; |
78 | |
79 | struct workload_config_ctx workload_config_boot; |
80 | #if DEVELOPMENT || DEBUG |
81 | struct workload_config_ctx workload_config_devel; |
82 | #endif |
83 | |
84 | __startup_func |
85 | static void |
86 | workload_config_setup(void) |
87 | { |
88 | lck_mtx_init(lck: &workload_config_boot.wlcc_mtx, grp: &workload_config_lck_grp, |
89 | LCK_ATTR_NULL); |
90 | #if DEVELOPMENT || DEBUG |
91 | lck_mtx_init(&workload_config_devel.wlcc_mtx, &workload_config_lck_grp, |
92 | LCK_ATTR_NULL); |
93 | #endif |
94 | } |
95 | STARTUP(LOCKS, STARTUP_RANK_MIDDLE, workload_config_setup); |
96 | |
97 | static struct workload_config_hashhead * |
98 | workload_config_hash(workload_config_ctx_t *ctx, const char *id) |
99 | { |
100 | const uint32_t hash = os_hash_jenkins(data: id, length: strlen(s: id)); |
101 | return &ctx->wlcc_hashtbl[hash & ctx->wlcc_hash_mask]; |
102 | } |
103 | |
104 | kern_return_t |
105 | workload_config_init(workload_config_ctx_t *ctx) |
106 | { |
107 | extern void *hashinit(int, int, u_long *); |
108 | |
109 | lck_mtx_lock(lck: &ctx->wlcc_mtx); |
110 | |
111 | if (ctx->wlcc_hashtbl != NULL) { |
112 | lck_mtx_unlock(lck: &ctx->wlcc_mtx); |
113 | return KERN_FAILURE; |
114 | } |
115 | |
116 | ctx->wlcc_hashtbl = hashinit(workload_config_hash_size, 0, |
117 | &ctx->wlcc_hash_mask); |
118 | if (ctx->wlcc_hashtbl == NULL) { |
119 | lck_mtx_unlock(lck: &ctx->wlcc_mtx); |
120 | return KERN_FAILURE; |
121 | } |
122 | |
123 | ctx->wlcc_count = 0; |
124 | |
125 | /* By default, the configuration can enable a thread scheduling policy. */ |
126 | ctx->wlcc_flags = WLC_F_THREAD_POLICY; |
127 | |
128 | lck_mtx_unlock(lck: &ctx->wlcc_mtx); |
129 | |
130 | return KERN_SUCCESS; |
131 | } |
132 | |
133 | bool |
134 | workload_config_initialized(const workload_config_ctx_t *ctx) |
135 | { |
136 | return ctx->wlcc_hashtbl != NULL; |
137 | } |
138 | |
139 | void |
140 | workload_config_free(workload_config_ctx_t *ctx) |
141 | { |
142 | extern void hashdestroy(void *, int, u_long); |
143 | |
144 | lck_mtx_lock(lck: &ctx->wlcc_mtx); |
145 | |
146 | if (ctx->wlcc_hashtbl == NULL) { |
147 | lck_mtx_unlock(lck: &ctx->wlcc_mtx); |
148 | return; |
149 | } |
150 | |
151 | for (int i = 0; i < workload_config_hash_size; i++) { |
152 | struct workload_config_hashhead *head = |
153 | &ctx->wlcc_hashtbl[i]; |
154 | workload_config_entry_t *entry = NULL; |
155 | workload_config_entry_t *tmp = NULL; |
156 | |
157 | LIST_FOREACH_SAFE(entry, head, wce_link, tmp) { |
158 | workload_phase_entry_t *phase_entry = NULL; |
159 | workload_phase_entry_t *phase_tmp = NULL; |
160 | |
161 | LIST_FOREACH_SAFE(phase_entry, &entry->wce_phases, |
162 | wpe_link, phase_tmp) { |
163 | LIST_REMOVE(phase_entry, wpe_link); |
164 | kfree_type(workload_phase_entry_t, phase_entry); |
165 | } |
166 | |
167 | LIST_REMOVE(entry, wce_link); |
168 | kfree_type(workload_config_entry_t, entry); |
169 | } |
170 | } |
171 | |
172 | |
173 | hashdestroy(ctx->wlcc_hashtbl, 0, ctx->wlcc_hash_mask); |
174 | ctx->wlcc_hashtbl = NULL; |
175 | ctx->wlcc_count = 0; |
176 | |
177 | lck_mtx_unlock(lck: &ctx->wlcc_mtx); |
178 | } |
179 | |
180 | /* |
181 | * Lookup workload data by id. |
182 | */ |
183 | static workload_config_entry_t * |
184 | lookup_entry(workload_config_ctx_t *ctx, const char *id) |
185 | { |
186 | assert(id != NULL); |
187 | assert(ctx->wlcc_hashtbl != NULL); |
188 | LCK_MTX_ASSERT(&ctx->wlcc_mtx, LCK_MTX_ASSERT_OWNED); |
189 | |
190 | workload_config_entry_t *entry = NULL; |
191 | LIST_FOREACH(entry, workload_config_hash(ctx, id), wce_link) { |
192 | if (strncmp(s1: entry->wce_id, s2: id, n: sizeof(entry->wce_id)) == 0) { |
193 | return entry; |
194 | } |
195 | } |
196 | |
197 | return NULL; |
198 | } |
199 | |
200 | /* |
201 | * Given an entry for a workload, find the configuration associated with the |
202 | * specified phase. |
203 | */ |
204 | static const workload_phase_entry_t * |
205 | lookup_config(__assert_only workload_config_ctx_t *ctx, |
206 | const workload_config_entry_t *entry, const char *phase) |
207 | { |
208 | assert(entry != NULL); |
209 | assert(phase != NULL); |
210 | LCK_MTX_ASSERT(&ctx->wlcc_mtx, LCK_MTX_ASSERT_OWNED); |
211 | |
212 | const workload_phase_entry_t *phase_entry = NULL; |
213 | LIST_FOREACH(phase_entry, &entry->wce_phases, wpe_link) { |
214 | if (strncmp(s1: phase_entry->wpe_phase, s2: phase, |
215 | n: sizeof(phase_entry->wpe_phase)) == 0) { |
216 | return phase_entry; |
217 | } |
218 | } |
219 | |
220 | return NULL; |
221 | } |
222 | |
223 | /* |
224 | * Add new phase configuration for the specified workload. |
225 | */ |
226 | static kern_return_t |
227 | insert_config(workload_config_ctx_t *ctx, workload_config_entry_t *entry, |
228 | const char *phase, const workload_config_t *new_config) |
229 | { |
230 | assert(entry != NULL); |
231 | assert(phase != NULL); |
232 | assert(new_config != NULL); |
233 | LCK_MTX_ASSERT(&ctx->wlcc_mtx, LCK_MTX_ASSERT_OWNED); |
234 | |
235 | if (lookup_config(ctx, entry, phase) != NULL) { |
236 | return KERN_FAILURE; |
237 | } |
238 | |
239 | workload_phase_entry_t *config = |
240 | kalloc_type(workload_phase_entry_t, Z_WAITOK | Z_ZERO); |
241 | if (entry == NULL) { |
242 | return KERN_NO_SPACE; |
243 | } |
244 | |
245 | config->wpe_config = *new_config; |
246 | |
247 | (void) strlcpy(dst: config->wpe_phase, src: phase, n: sizeof(config->wpe_phase)); |
248 | |
249 | LIST_INSERT_HEAD(&entry->wce_phases, config, wpe_link); |
250 | |
251 | return KERN_SUCCESS; |
252 | } |
253 | |
254 | /* |
255 | * Add a new workload config for a previously unseen workload id. |
256 | */ |
257 | static kern_return_t |
258 | insert_entry(workload_config_ctx_t *ctx, const char *id, const char *phase, |
259 | const workload_config_t *config) |
260 | { |
261 | assert(id != NULL); |
262 | assert(phase != NULL); |
263 | assert(config != NULL); |
264 | LCK_MTX_ASSERT(&ctx->wlcc_mtx, LCK_MTX_ASSERT_OWNED); |
265 | |
266 | workload_config_entry_t *entry = |
267 | kalloc_type(workload_config_entry_t, Z_WAITOK | Z_ZERO); |
268 | if (entry == NULL) { |
269 | return KERN_NO_SPACE; |
270 | } |
271 | |
272 | if (ctx->wlcc_count == (max_workload_config_entry_count - 1)) { |
273 | kfree_type(workload_config_entry_t, entry); |
274 | return KERN_FAILURE; |
275 | } |
276 | |
277 | (void) strlcpy(dst: entry->wce_id, src: id, n: sizeof(entry->wce_id)); |
278 | if (insert_config(ctx, entry, phase, new_config: config) != KERN_SUCCESS) { |
279 | kfree_type(workload_config_entry_t, entry); |
280 | return KERN_FAILURE; |
281 | } |
282 | |
283 | LIST_INSERT_HEAD(workload_config_hash(ctx, entry->wce_id), entry, wce_link); |
284 | ctx->wlcc_count++; |
285 | |
286 | return KERN_SUCCESS; |
287 | } |
288 | |
289 | /* |
290 | * Add new workload configuration. |
291 | */ |
292 | kern_return_t |
293 | workload_config_insert(workload_config_ctx_t *ctx, const char *id, |
294 | const char *phase, const workload_config_t *config) |
295 | { |
296 | assert(id != NULL); |
297 | assert(phase != NULL); |
298 | assert(config != NULL); |
299 | |
300 | kern_return_t ret = KERN_FAILURE; |
301 | |
302 | if (strlen(s: id) == 0 || strlen(s: phase) == 0) { |
303 | return KERN_INVALID_ARGUMENT; |
304 | } |
305 | |
306 | lck_mtx_lock(lck: &ctx->wlcc_mtx); |
307 | |
308 | if (ctx->wlcc_hashtbl == NULL) { |
309 | lck_mtx_unlock(lck: &ctx->wlcc_mtx); |
310 | return KERN_FAILURE; |
311 | } |
312 | |
313 | workload_config_entry_t *entry = lookup_entry(ctx, id); |
314 | ret = (entry == NULL) ? |
315 | insert_entry(ctx, id, phase, config) : |
316 | insert_config(ctx, entry, phase, new_config: config); |
317 | |
318 | lck_mtx_unlock(lck: &ctx->wlcc_mtx); |
319 | |
320 | return ret; |
321 | } |
322 | |
323 | /* |
324 | * Generally 'workload_config_boot' is used. 'workload_config_boot' is |
325 | * initialized by launchd early in boot and is never loaded again. |
326 | * 'workload_config_devel' can be loaded/unloaded at any time and if loaded, |
327 | * overrides 'workload_config_boot' for lookups. This is useful for testing or |
328 | * development. |
329 | */ |
330 | static workload_config_ctx_t * |
331 | get_ctx_locked(void) |
332 | { |
333 | #if DEVELOPMENT || DEBUG |
334 | /* |
335 | * If a devel context has been setup, use that. |
336 | */ |
337 | lck_mtx_lock(&workload_config_devel.wlcc_mtx); |
338 | if (workload_config_devel.wlcc_hashtbl != NULL) { |
339 | return &workload_config_devel; |
340 | } |
341 | |
342 | lck_mtx_unlock(&workload_config_devel.wlcc_mtx); |
343 | |
344 | #endif /* DEVELOPMENT || DEBUG */ |
345 | |
346 | lck_mtx_lock(lck: &workload_config_boot.wlcc_mtx); |
347 | return &workload_config_boot; |
348 | } |
349 | |
350 | /* |
351 | * Lookup the workload config for the specified phase. |
352 | */ |
353 | kern_return_t |
354 | workload_config_lookup(const char *id, const char *phase, |
355 | workload_config_t *config) |
356 | { |
357 | assert(id != NULL); |
358 | assert(phase != NULL); |
359 | assert(config != NULL); |
360 | |
361 | workload_config_ctx_t *ctx = get_ctx_locked(); |
362 | |
363 | if (ctx->wlcc_hashtbl == NULL) { |
364 | lck_mtx_unlock(lck: &ctx->wlcc_mtx); |
365 | return KERN_FAILURE; |
366 | } |
367 | |
368 | const workload_config_entry_t *entry = lookup_entry(ctx, id); |
369 | if (entry == NULL) { |
370 | lck_mtx_unlock(lck: &ctx->wlcc_mtx); |
371 | return KERN_NOT_FOUND; |
372 | } |
373 | |
374 | const workload_phase_entry_t *pe = lookup_config(ctx, entry, phase); |
375 | if (pe == NULL) { |
376 | lck_mtx_unlock(lck: &ctx->wlcc_mtx); |
377 | return KERN_NOT_FOUND; |
378 | } |
379 | |
380 | *config = pe->wpe_config; |
381 | |
382 | lck_mtx_unlock(lck: &ctx->wlcc_mtx); |
383 | |
384 | return KERN_SUCCESS; |
385 | } |
386 | |
387 | /* |
388 | * Lookup the workload config for the default phase. |
389 | */ |
390 | kern_return_t |
391 | workload_config_lookup_default(const char *id, workload_config_t *config) |
392 | { |
393 | assert(id != NULL); |
394 | assert(config != NULL); |
395 | |
396 | workload_config_ctx_t *ctx = get_ctx_locked(); |
397 | |
398 | if (ctx->wlcc_hashtbl == NULL) { |
399 | lck_mtx_unlock(lck: &ctx->wlcc_mtx); |
400 | return KERN_FAILURE; |
401 | } |
402 | |
403 | const workload_config_entry_t *entry = lookup_entry(ctx, id); |
404 | if (entry == NULL) { |
405 | lck_mtx_unlock(lck: &ctx->wlcc_mtx); |
406 | return KERN_NOT_FOUND; |
407 | } |
408 | |
409 | if (entry->wce_default == NULL) { |
410 | lck_mtx_unlock(lck: &ctx->wlcc_mtx); |
411 | return KERN_FAILURE; |
412 | } |
413 | |
414 | *config = entry->wce_default->wpe_config; |
415 | |
416 | lck_mtx_unlock(lck: &ctx->wlcc_mtx); |
417 | |
418 | return KERN_SUCCESS; |
419 | } |
420 | |
421 | /* Make the specified phase the new default phase. */ |
422 | kern_return_t |
423 | workload_config_set_default(workload_config_ctx_t *ctx, const char *id, |
424 | const char *phase) |
425 | { |
426 | assert(id != NULL); |
427 | assert(phase != NULL); |
428 | |
429 | lck_mtx_lock(lck: &ctx->wlcc_mtx); |
430 | |
431 | if (ctx->wlcc_hashtbl == NULL) { |
432 | lck_mtx_unlock(lck: &ctx->wlcc_mtx); |
433 | return KERN_FAILURE; |
434 | } |
435 | |
436 | workload_config_entry_t *entry = lookup_entry(ctx, id); |
437 | if (entry == NULL) { |
438 | lck_mtx_unlock(lck: &ctx->wlcc_mtx); |
439 | return KERN_NOT_FOUND; |
440 | } |
441 | |
442 | const workload_phase_entry_t *pe = lookup_config(ctx, entry, phase); |
443 | if (pe == NULL) { |
444 | lck_mtx_unlock(lck: &ctx->wlcc_mtx); |
445 | return KERN_NOT_FOUND; |
446 | } |
447 | |
448 | entry->wce_default = pe; |
449 | |
450 | lck_mtx_unlock(lck: &ctx->wlcc_mtx); |
451 | |
452 | return KERN_SUCCESS; |
453 | } |
454 | |
455 | /* Iterate over configurations. */ |
456 | void |
457 | workload_config_iterate(bool (^cb)(const char *, const void *)) |
458 | { |
459 | workload_config_ctx_t *ctx = get_ctx_locked(); |
460 | |
461 | if (ctx->wlcc_hashtbl == NULL) { |
462 | lck_mtx_unlock(lck: &ctx->wlcc_mtx); |
463 | return; |
464 | } |
465 | |
466 | for (int i = 0; i < workload_config_hash_size; i++) { |
467 | struct workload_config_hashhead *head = &ctx->wlcc_hashtbl[i]; |
468 | workload_config_entry_t *entry = NULL; |
469 | |
470 | LIST_FOREACH(entry, head, wce_link) { |
471 | if (cb(entry->wce_id, entry)) { |
472 | lck_mtx_unlock(lck: &ctx->wlcc_mtx); |
473 | return; |
474 | } |
475 | } |
476 | } |
477 | |
478 | lck_mtx_unlock(lck: &ctx->wlcc_mtx); |
479 | } |
480 | |
481 | /* Iterate over phases. */ |
482 | void |
483 | workload_config_phases_iterate(const void *config, |
484 | bool (^cb)(const char *phase, const bool is_default, |
485 | const workload_config_t *)) |
486 | { |
487 | const workload_config_entry_t *entry = config; |
488 | |
489 | workload_phase_entry_t *phase_entry = NULL; |
490 | LIST_FOREACH(phase_entry, &entry->wce_phases, wpe_link) { |
491 | const bool is_default = entry->wce_default == phase_entry; |
492 | if (cb(phase_entry->wpe_phase, is_default, |
493 | &phase_entry->wpe_config)) { |
494 | return; |
495 | } |
496 | } |
497 | } |
498 | |
499 | kern_return_t |
500 | workload_config_get_flags(workload_config_flags_t *flags) |
501 | { |
502 | assert(flags != NULL); |
503 | |
504 | workload_config_ctx_t *ctx = get_ctx_locked(); |
505 | |
506 | if (ctx->wlcc_hashtbl == NULL) { |
507 | lck_mtx_unlock(lck: &ctx->wlcc_mtx); |
508 | return KERN_FAILURE; |
509 | } |
510 | |
511 | *flags = ctx->wlcc_flags; |
512 | |
513 | lck_mtx_unlock(lck: &ctx->wlcc_mtx); |
514 | |
515 | return KERN_SUCCESS; |
516 | } |
517 | |
518 | kern_return_t |
519 | workload_config_clear_flag(workload_config_ctx_t *ctx, workload_config_flags_t flag) |
520 | { |
521 | /* Only one flag should be cleared at a time. */ |
522 | assert3u(((flag - 1) & flag), ==, 0); |
523 | |
524 | lck_mtx_lock(lck: &ctx->wlcc_mtx); |
525 | |
526 | if (ctx->wlcc_hashtbl == NULL) { |
527 | lck_mtx_unlock(lck: &ctx->wlcc_mtx); |
528 | return KERN_FAILURE; |
529 | } |
530 | |
531 | ctx->wlcc_flags &= ~flag; |
532 | |
533 | lck_mtx_unlock(lck: &ctx->wlcc_mtx); |
534 | |
535 | return KERN_SUCCESS; |
536 | } |
537 | |
538 | bool |
539 | workload_config_available(void) |
540 | { |
541 | workload_config_ctx_t *ctx = get_ctx_locked(); |
542 | |
543 | bool available = ctx->wlcc_hashtbl != NULL; |
544 | |
545 | lck_mtx_unlock(lck: &ctx->wlcc_mtx); |
546 | |
547 | return available; |
548 | } |
549 | |