1/*
2 * C (c) 2000-2020 Apple Inc. All rights reserved.
3 *
4 * @APPLE_OSREFERENCE_LICENSE_HEADER_START@
5 *
6 * This file contains Original Code and/or Modifications of Original Code
7 * as defined in and that are subject to the Apple Public Source License
8 * Version 2.0 (the 'License'). You may not use this file except in
9 * compliance with the License. The rights granted to you under the License
10 * may not be used to create, or enable the creation or redistribution of,
11 * unlawful or unlicensed copies of an Apple operating system, or to
12 * circumvent, violate, or enable the circumvention or violation of, any
13 * terms of an Apple operating system software license agreement.
14 *
15 * Please obtain a copy of the License at
16 * http://www.opensource.apple.com/apsl/ and read it before using this file.
17 *
18 * The Original Code and all software distributed under the License are
19 * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER
20 * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES,
21 * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY,
22 * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT.
23 * Please see the License for the specific language governing rights and
24 * limitations under the License.
25 *
26 * @APPLE_OSREFERENCE_LICENSE_HEADER_END@
27 */
28
29#include <kern/task.h>
30#include <kern/task_ref.h>
31#include <libkern/OSKextLibPrivate.h>
32
33#include <os/refcnt.h>
34
35/*
36 * Task references.
37 *
38 * Each task reference/deallocate pair has an associated reference group:
39 * TASK_GRP_INTERNAL This group is used exclusively to track long-term
40 * references which are almost always present.
41 * Specifically, the importance task reference, the owning
42 * task reference and the thread references.
43 * TASK_GRP_EXTERNAL For kext references
44 * TASK_KERNEL For at-large kernel references other than those tracked
45 * by task_internal.
46 * TASK_GRP_MIG For references from the MIG layer
47 *
48 * Depending on configuration (see task_refgrp_config) os_refgrps are used to
49 * keep track of the context of the reference/deallocation.
50 *
51 * TASK_REF_CONFIG_OFF
52 * No refgrps are used other than the single 'task' reference group.
53 *
54 * TASK_REF_CONFIG_DEFAULT
55 * Global refgrps are used for 'kernel' and 'external' references. The
56 * primary 'task' reference group is set as their parent. Each kext also gets
57 * its own refgrp parented to the 'external' group.
58 * Each task gets two reference groups - one for 'kernel' references parented to
59 * the global 'kernel' group and as second which is dynamically assigned. All
60 * references tagged with TASK_GRP_INTERNAL, TASK_GRP_KERNEL and TASK_GRP_MIG
61 * use the task 'kernel' group. The dynamic group is initialized for the first
62 * 'external' reference to a kext specific group parented to the matching global
63 * kext group. For 'external' references not matching that group, the global
64 * 'external' group is used.
65 * This is the default configuration.
66 *
67 * TASK_REF_CONFIG_FULL
68 * Global refgrps are used for 'kernel', 'external', 'internal' and 'mig'
69 * references. The primary 'task' reference group is set as their parent. Each
70 * kext also gets is own refgrp parented to the 'external' group.
71 * Each task gets eight reference groups - one each mirroring the four global
72 * reference groups and four dynamic groups which are assigned to kexts. For
73 * 'external' references not matching any of the four dynamic groups, the global
74 * 'external' group is used.
75 *
76 * Kext callers have the calls which take or release task references mapped
77 * to '_external' equivalents via the .exports file.
78 *
79 * At-large kernel callers see calls redefined to call the '_kernel' variants
80 * (see task_ref.h).
81 *
82 * The mig layer generates code which uses the '_mig' variants.
83 *
84 * Other groups are selected explicitly.
85 *
86 * Reference groups support recording of back traces via the rlog boot arg.
87 * For example: rlog=task_external would keep a backtrace log of all external
88 * references.
89 */
90
91#define TASK_REF_COUNT_INITIAL (2u)
92
93extern void task_deallocate_internal(task_t task, os_ref_count_t refs);
94
95#if DEVELOPMENT || DEBUG
96
97#include <stdbool.h>
98
99#define DYNAMIC_COUNT 4
100
101/*
102 * Controlled by the boot arg 'task_refgrp=X'.
103 *
104 * Unspecified/default
105 * There are two task reference groups. One kext specific reference group, the
106 * other used for kernel/internal and mig references.
107 *
108 * "off"
109 * No task specific reference groups are used.
110 *
111 * "full"
112 * Each task gets its own set of kernel/internal/mig and external groups.
113 * Additionally four dynamic reference groups are made available to identify kext
114 * references.
115 */
116__attribute__((used))
117static enum {
118 TASK_REF_CONFIG_DEFAULT,
119 TASK_REF_CONFIG_FULL,
120 TASK_REF_CONFIG_OFF,
121} task_refgrp_config = TASK_REF_CONFIG_DEFAULT;
122
123/* Global reference groups. */
124os_refgrp_decl_flags(static, task_primary_refgrp, "task", NULL, OS_REFGRP_F_ALWAYS_ENABLED);
125os_refgrp_decl_flags(static, task_kernel_refgrp, "task_kernel", &task_primary_refgrp, OS_REFGRP_F_ALWAYS_ENABLED);
126os_refgrp_decl_flags(static, task_internal_refgrp, "task_internal", &task_primary_refgrp, OS_REFGRP_F_ALWAYS_ENABLED);
127os_refgrp_decl_flags(static, task_mig_refgrp, "task_mig", &task_primary_refgrp, OS_REFGRP_F_ALWAYS_ENABLED);
128os_refgrp_decl_flags(, task_external_refgrp, "task_external", &task_primary_refgrp, OS_REFGRP_F_ALWAYS_ENABLED);
129
130
131/* 'task_refgrp' is used by lldb macros. */
132__attribute__((used))
133static struct os_refgrp * const task_refgrp[TASK_GRP_COUNT] = {
134 [TASK_GRP_KERNEL] = &task_kernel_refgrp,
135 [TASK_GRP_INTERNAL] = &task_internal_refgrp,
136 [TASK_GRP_MIG] = &task_mig_refgrp,
137 [TASK_GRP_EXTERNAL] = &task_external_refgrp,
138};
139
140/* Names used by local reference groups. */
141static const char * const local_name[TASK_GRP_COUNT] = {
142 [TASK_GRP_KERNEL] = "task_local_kernel",
143 [TASK_GRP_INTERNAL] = "task_local_internal",
144 [TASK_GRP_MIG] = "task_local_mig",
145 [TASK_GRP_EXTERNAL] = "task_local_external",
146};
147
148/* Walk back the callstack calling cb for each address. */
149static inline void
150walk_kext_callstack(int (^cb)(uintptr_t))
151{
152 uintptr_t* frameptr;
153 uintptr_t* frameptr_next;
154 uintptr_t retaddr;
155 uintptr_t kstackb, kstackt;
156 thread_t cthread;
157
158 cthread = current_thread();
159 assert3p(cthread, !=, NULL);
160
161 kstackb = thread_get_kernel_stack(cthread);
162 kstackt = kstackb + kernel_stack_size;
163
164 /* Load stack frame pointer (EBP on x86) into frameptr */
165 frameptr = __builtin_frame_address(0);
166
167 while (frameptr != NULL) {
168 /* Verify thread stack bounds */
169 if (((uintptr_t)(frameptr + 2) > kstackt) ||
170 ((uintptr_t)frameptr < kstackb)) {
171 break;
172 }
173
174 /* Next frame pointer is pointed to by the previous one */
175 frameptr_next = (uintptr_t*) *frameptr;
176#if defined(HAS_APPLE_PAC)
177 frameptr_next = ptrauth_strip(frameptr_next,
178 ptrauth_key_frame_pointer);
179#endif
180
181 /* Pull return address from one spot above the frame pointer */
182 retaddr = *(frameptr + 1);
183
184#if defined(HAS_APPLE_PAC)
185 retaddr = (uintptr_t) ptrauth_strip((void *)retaddr,
186 ptrauth_key_return_address);
187#endif
188
189 if (((retaddr < vm_kernel_builtinkmod_text_end) &&
190 (retaddr >= vm_kernel_builtinkmod_text)) ||
191 (retaddr < vm_kernel_stext) || (retaddr > vm_kernel_top)) {
192 if (cb(retaddr) != 0) {
193 return;
194 }
195 }
196 frameptr = frameptr_next;
197 }
198
199 return;
200}
201
202/* Return the reference group associated with the 'closest' kext. */
203static struct os_refgrp *
204lookup_kext_refgrp(void)
205{
206 __block struct os_refgrp *refgrp = NULL;
207
208 /* Get the kext specific group based on the current stack. */
209 walk_kext_callstack(^(uintptr_t retaddr) {
210 OSKextGetRefGrpForCaller(retaddr, ^(struct os_refgrp *kext_grp) {
211 assert(kext_grp != NULL);
212 refgrp = kext_grp;
213 });
214 return 1;
215 });
216 return refgrp;
217}
218
219
220/*
221 * Given an array of reference groups, find one that matches the specified kext
222 * group. If there is no match and there is a empty slot, initialize a new
223 * refgrp with the kext group as the parent (only when `can_allocate` is true).
224 */
225static struct os_refgrp *
226lookup_dynamic_refgrp(struct os_refgrp *kext,
227 struct os_refgrp *dynamic, int dynamic_count, bool can_allocate)
228{
229 /* First see if it exists. */
230 for (int i = 0; i < dynamic_count; i++) {
231 if (dynamic[i].grp_parent == kext) {
232 return &dynamic[i];
233 }
234 }
235
236 if (!can_allocate) {
237 return NULL;
238 }
239
240 /* Grab an empty one, if available. */
241 for (int i = 0; i < dynamic_count; i++) {
242 if (dynamic[i].grp_name == NULL) {
243 dynamic[i] = (struct os_refgrp)
244 os_refgrp_initializer(kext->grp_name, kext,
245 OS_REFGRP_F_ALWAYS_ENABLED);
246 return &dynamic[i];
247 }
248 }
249
250 return NULL;
251}
252
253/*
254 * Find the best external reference group.
255 * - Task specific kext ref group
256 * else
257 * - Kext ref group
258 * else
259 * - Global external ref group
260 */
261static struct os_refgrp *
262find_external_refgrp(struct os_refgrp *dynamic, int dynamic_count,
263 bool can_allocate)
264{
265 struct os_refgrp *kext_refgrp = lookup_kext_refgrp();
266 if (kext_refgrp == NULL) {
267 return task_refgrp[TASK_GRP_EXTERNAL];
268 }
269
270 struct os_refgrp *refgrp = lookup_dynamic_refgrp(kext_refgrp, dynamic,
271 dynamic_count, can_allocate);
272 if (refgrp == NULL) {
273 return kext_refgrp;
274 }
275
276 return refgrp;
277}
278
279void
280task_reference_grp(task_t task, task_grp_t grp)
281{
282 assert3u(grp, <, TASK_GRP_COUNT);
283 assert(
284 task_refgrp_config == TASK_REF_CONFIG_OFF ||
285 task_refgrp_config == TASK_REF_CONFIG_DEFAULT ||
286 task_refgrp_config == TASK_REF_CONFIG_FULL);
287
288 struct os_refgrp *refgrp = NULL;
289
290 if (task == TASK_NULL) {
291 return;
292 }
293
294 task_require(task);
295
296 /*
297 * External ref groups need to search and potentially allocate from the
298 * dynamic task ref groups. This must be protected by a lock.
299 */
300 if (task_refgrp_config != TASK_REF_CONFIG_OFF &&
301 grp == TASK_GRP_EXTERNAL) {
302 lck_spin_lock(&task->ref_group_lock);
303 }
304
305 switch (task_refgrp_config) {
306 case TASK_REF_CONFIG_OFF:
307 refgrp = NULL;
308 break;
309
310 case TASK_REF_CONFIG_DEFAULT:
311
312 refgrp = (grp == TASK_GRP_EXTERNAL) ?
313 find_external_refgrp(&task->ref_group[1], 1, true) :
314 &task->ref_group[TASK_GRP_KERNEL];
315 break;
316
317 case TASK_REF_CONFIG_FULL:
318
319 refgrp = (grp == TASK_GRP_EXTERNAL) ?
320 find_external_refgrp(&task->ref_group[TASK_GRP_COUNT], DYNAMIC_COUNT, true) :
321 &task->ref_group[grp];
322 break;
323 }
324
325 os_ref_retain_raw(&task->ref_count.ref_count, refgrp);
326
327 if (task_refgrp_config != TASK_REF_CONFIG_OFF &&
328 grp == TASK_GRP_EXTERNAL) {
329 lck_spin_unlock(&task->ref_group_lock);
330 }
331}
332
333void
334task_deallocate_grp(task_t task, task_grp_t grp)
335{
336 assert3u(grp, <, TASK_GRP_COUNT);
337 assert(
338 task_refgrp_config == TASK_REF_CONFIG_OFF ||
339 task_refgrp_config == TASK_REF_CONFIG_DEFAULT ||
340 task_refgrp_config == TASK_REF_CONFIG_FULL);
341
342 os_ref_count_t refs = -1;
343 struct os_refgrp *refgrp = NULL;
344
345 if (task == TASK_NULL) {
346 return;
347 }
348
349 /*
350 * There is no need to take the ref_group_lock when de-allocating. The
351 * lock is only required when allocating a group.
352 */
353 switch (task_refgrp_config) {
354 case TASK_REF_CONFIG_OFF:
355 refgrp = NULL;
356 break;
357
358 case TASK_REF_CONFIG_DEFAULT:
359 refgrp = (grp == TASK_GRP_EXTERNAL) ?
360 find_external_refgrp(&task->ref_group[1], 1, false) :
361 &task->ref_group[TASK_GRP_KERNEL];
362 break;
363
364 case TASK_REF_CONFIG_FULL:
365 refgrp = (grp == TASK_GRP_EXTERNAL) ?
366 find_external_refgrp(&task->ref_group[TASK_GRP_COUNT], DYNAMIC_COUNT, false) :
367 &task->ref_group[grp];
368 break;
369 }
370
371
372 refs = os_ref_release_raw(&task->ref_count.ref_count, refgrp);
373 /* Beware - the task may have been freed after this point. */
374
375 task_deallocate_internal(task, refs);
376}
377
378void
379task_reference_external(task_t task)
380{
381 task_reference_grp(task, TASK_GRP_EXTERNAL);
382}
383
384void
385task_deallocate_external(task_t task)
386{
387 task_deallocate_grp(task, TASK_GRP_EXTERNAL);
388}
389
390static void
391allocate_refgrp_default(task_t task)
392{
393 /* Just one static group and one dynamic group. */
394 task->ref_group = kalloc_type(struct os_refgrp, 2,
395 Z_WAITOK | Z_ZERO | Z_NOFAIL);
396
397 task->ref_group[TASK_GRP_KERNEL] = (struct os_refgrp)
398 os_refgrp_initializer(local_name[TASK_GRP_KERNEL],
399 task_refgrp[TASK_GRP_KERNEL], OS_REFGRP_F_ALWAYS_ENABLED);
400 os_ref_log_init(&task->ref_group[TASK_GRP_KERNEL]);
401}
402
403static void
404free_refgrp_default(task_t task)
405{
406 os_ref_log_fini(&task->ref_group[TASK_GRP_KERNEL]);
407 /* Just one static group and one dynamic group. */
408 kfree_type(struct os_refgrp, 2, task->ref_group);
409}
410
411static void
412allocate_refgrp_full(task_t task)
413{
414 task->ref_group = kalloc_type(struct os_refgrp,
415 TASK_GRP_COUNT + DYNAMIC_COUNT, Z_WAITOK | Z_ZERO | Z_NOFAIL);
416
417 for (int i = 0; i < TASK_GRP_COUNT; i++) {
418 task->ref_group[i] = (struct os_refgrp)
419 os_refgrp_initializer(local_name[i], task_refgrp[i],
420 OS_REFGRP_F_ALWAYS_ENABLED);
421 os_ref_log_init(&task->ref_group[i]);
422 }
423}
424
425static void
426free_refgrp_full(task_t task)
427{
428 for (int i = 0; i < TASK_GRP_COUNT; i++) {
429 os_ref_log_fini(&task->ref_group[i]);
430 }
431 kfree_type(struct os_refgrp, TASK_GRP_COUNT + DYNAMIC_COUNT, task->ref_group);
432}
433
434kern_return_t
435task_ref_count_init(task_t task)
436{
437 assert(
438 task_refgrp_config == TASK_REF_CONFIG_OFF ||
439 task_refgrp_config == TASK_REF_CONFIG_DEFAULT ||
440 task_refgrp_config == TASK_REF_CONFIG_FULL);
441
442 switch (task_refgrp_config) {
443 case TASK_REF_CONFIG_OFF:
444 os_ref_init_count(&task->ref_count, &task_primary_refgrp,
445 TASK_REF_COUNT_INITIAL);
446 return KERN_SUCCESS;
447
448
449 case TASK_REF_CONFIG_DEFAULT:
450 allocate_refgrp_default(task);
451 lck_spin_init(&task->ref_group_lock, &task_lck_grp, LCK_ATTR_NULL);
452 os_ref_init_count(&task->ref_count, &task->ref_group[TASK_GRP_KERNEL],
453 TASK_REF_COUNT_INITIAL);
454 return KERN_SUCCESS;
455
456 case TASK_REF_CONFIG_FULL:
457 allocate_refgrp_full(task);
458 lck_spin_init(&task->ref_group_lock, &task_lck_grp, LCK_ATTR_NULL);
459
460 os_ref_init_count_internal(&task->ref_count.ref_count,
461 &task->ref_group[TASK_GRP_KERNEL], 1);
462
463 task_reference_grp(task, TASK_GRP_INTERNAL);
464
465 return KERN_SUCCESS;
466 }
467}
468
469void
470task_ref_count_fini(task_t task)
471{
472 assert(
473 task_refgrp_config == TASK_REF_CONFIG_OFF ||
474 task_refgrp_config == TASK_REF_CONFIG_DEFAULT ||
475 task_refgrp_config == TASK_REF_CONFIG_FULL);
476
477 switch (task_refgrp_config) {
478 case TASK_REF_CONFIG_OFF:
479 return;
480
481 case TASK_REF_CONFIG_DEFAULT:
482 lck_spin_destroy(&task->ref_group_lock, &task_lck_grp);
483 free_refgrp_default(task);
484 return;
485
486 case TASK_REF_CONFIG_FULL:
487 lck_spin_destroy(&task->ref_group_lock, &task_lck_grp);
488 free_refgrp_full(task);
489 return;
490 }
491}
492
493void
494task_ref_init(void)
495{
496 char config[16] = {0};
497
498 /* Allow task reference group logging to be configured. */
499 (void) PE_parse_boot_arg_str("task_refgrp", config,
500 sizeof(config));
501
502 if (strncmp(config, "full", sizeof(config)) == 0) {
503 task_refgrp_config = TASK_REF_CONFIG_FULL;
504 }
505 if (strncmp(config, "off", sizeof(config)) == 0) {
506 task_refgrp_config = TASK_REF_CONFIG_OFF;
507 }
508
509 if (task_refgrp_config == TASK_REF_CONFIG_OFF) {
510 return;
511 }
512
513 for (int i = 0; i < TASK_GRP_COUNT; i++) {
514 os_ref_log_init(task_refgrp[i]);
515 }
516}
517
518#else /* DEVELOPMENT || DEBUG */
519
520kern_return_t
521task_ref_count_init(task_t task)
522{
523 /* One ref for our caller, one for being alive. */
524 os_ref_init_count(&task->ref_count, &task_primary_refgrp,
525 TASK_REF_COUNT_INITIAL);
526 return KERN_SUCCESS;
527}
528
529void
530task_reference_grp(task_t task, __attribute__((__unused__)) task_grp_t grp)
531{
532 if (task == TASK_NULL) {
533 return;
534 }
535
536 task_require(task);
537 os_ref_retain(rc: &task->ref_count);
538}
539
540void
541task_deallocate_grp(task_t task, __attribute__((__unused__)) task_grp_t grp)
542{
543 if (task == TASK_NULL) {
544 return;
545 }
546
547 os_ref_count_t refs = os_ref_release(rc: &task->ref_count);
548 task_deallocate_internal(task, refs);
549}
550
551void
552task_reference_external(task_t task)
553{
554 task_reference_grp(task, grp: 0);
555}
556
557void
558task_deallocate_external(task_t task)
559{
560 task_deallocate_grp(task, grp: 0);
561}
562
563void
564task_ref_count_fini(__attribute__((__unused__)) task_t task)
565{
566}
567
568void
569task_ref_init(void)
570{
571}
572
573#endif /* DEVELOPMENT || DEBUG */
574