1#if KERNEL
2#include <kern/assert.h>
3#include <kern/debug.h>
4#include <pexpert/pexpert.h>
5#include <kern/btlog.h>
6#include <kern/backtrace.h>
7#include <libkern/libkern.h>
8#endif
9#include <os/atomic_private.h>
10
11#include "refcnt.h"
12
13#define OS_REFCNT_MAX_COUNT ((os_ref_count_t)0x0FFFFFFFUL)
14
15#if OS_REFCNT_DEBUG
16extern struct os_refgrp global_ref_group;
17os_refgrp_decl(, global_ref_group, "all", NULL);
18
19extern bool ref_debug_enable;
20bool ref_debug_enable = false;
21
22#define REFLOG_GRP_DEBUG_ENABLED(grp) \
23 __improbable(grp != NULL && (ref_debug_enable || \
24 (grp->grp_flags & OS_REFGRP_F_ALWAYS_ENABLED) != 0))
25
26static const size_t ref_log_nrecords = 1000000;
27
28__enum_closed_decl(reflog_op_t, uint8_t, {
29 REFLOG_RETAIN = 1,
30 REFLOG_RELEASE = 2
31});
32
33#define __debug_only
34#else
35# define __debug_only __unused
36#endif /* OS_REFCNT_DEBUG */
37
38void
39os_ref_panic_live(void *rc)
40{
41 panic("os_refcnt: unexpected release of final reference (rc=%p)", rc);
42 __builtin_unreachable();
43}
44
45__abortlike
46static void
47os_ref_panic_underflow(void *rc)
48{
49 panic("os_refcnt: underflow (rc=%p)", rc);
50 __builtin_unreachable();
51}
52
53__abortlike
54static void
55os_ref_panic_overflow(void *rc)
56{
57 panic("os_refcnt: overflow (rc=%p)", rc);
58 __builtin_unreachable();
59}
60
61__abortlike
62static void
63os_ref_panic_retain(os_ref_atomic_t *rc)
64{
65 if (os_atomic_load(rc, relaxed) >= OS_REFCNT_MAX_COUNT) {
66 panic("os_refcnt: overflow (rc=%p)", rc);
67 } else {
68 panic("os_refcnt: attempted resurrection (rc=%p)", rc);
69 }
70}
71
72static inline void
73os_ref_check_underflow(void *rc, os_ref_count_t count, os_ref_count_t n)
74{
75 if (__improbable(count < n)) {
76 os_ref_panic_underflow(rc);
77 }
78}
79
80static inline void
81os_ref_check_overflow(os_ref_atomic_t *rc, os_ref_count_t count)
82{
83 if (__improbable(count >= OS_REFCNT_MAX_COUNT)) {
84 os_ref_panic_overflow(rc);
85 }
86}
87
88static inline void
89os_ref_check_retain(os_ref_atomic_t *rc, os_ref_count_t count, os_ref_count_t n)
90{
91 if (__improbable(count < n || count >= OS_REFCNT_MAX_COUNT)) {
92 os_ref_panic_retain(rc);
93 }
94}
95
96#if OS_REFCNT_DEBUG
97#if KERNEL
98__attribute__((cold, noinline))
99static void
100ref_log_op(struct os_refgrp *grp, void *elem, reflog_op_t op)
101{
102 if (grp == NULL) {
103 return;
104 }
105
106 if (grp->grp_log == NULL) {
107 ref_log_op(grp->grp_parent, elem, op);
108 return;
109 }
110
111 btlog_record((btlog_t)grp->grp_log, elem, op,
112 btref_get(__builtin_frame_address(0), BTREF_GET_NOWAIT));
113}
114
115__attribute__((cold, noinline))
116static void
117ref_log_drop(struct os_refgrp *grp, void *elem)
118{
119 if (!REFLOG_GRP_DEBUG_ENABLED(grp)) {
120 return;
121 }
122
123 if (grp->grp_log == NULL) {
124 ref_log_drop(grp->grp_parent, elem);
125 return;
126 }
127
128 btlog_erase(grp->grp_log, elem);
129}
130
131__attribute__((cold, noinline))
132void
133os_ref_log_init(struct os_refgrp *grp)
134{
135 if (grp->grp_log != NULL) {
136 return;
137 }
138
139 char grpbuf[128];
140 char *refgrp = grpbuf;
141 if (!PE_parse_boot_argn("rlog", refgrp, sizeof(grpbuf))) {
142 return;
143 }
144
145 /*
146 * Enable refcount statistics if the rlog boot-arg is present,
147 * even when no specific group is logged.
148 */
149 ref_debug_enable = true;
150
151 const char *g;
152 while ((g = strsep(&refgrp, ",")) != NULL) {
153 if (strcmp(g, grp->grp_name) == 0) {
154 /* enable logging on this refgrp */
155 grp->grp_log = btlog_create(BTLOG_HASH,
156 ref_log_nrecords, 0);
157 return;
158 }
159 }
160}
161
162
163__attribute__((cold, noinline))
164void
165os_ref_log_fini(struct os_refgrp *grp)
166{
167 if (grp->grp_log == NULL) {
168 return;
169 }
170
171 btlog_destroy(grp->grp_log);
172 grp->grp_log = NULL;
173}
174
175#else
176
177#ifndef os_ref_log_fini
178inline void
179os_ref_log_fini(struct os_refgrp *grp __unused)
180{
181}
182#endif
183
184#ifndef os_ref_log_init
185inline void
186os_ref_log_init(struct os_refgrp *grp __unused)
187{
188}
189#endif
190#ifndef ref_log_op
191static inline void
192ref_log_op(struct os_refgrp *grp __unused, void *rc __unused, reflog_op_t op __unused)
193{
194}
195#endif
196#ifndef ref_log_drop
197static inline void
198ref_log_drop(struct os_refgrp *grp __unused, void *rc __unused)
199{
200}
201#endif
202
203#endif /* KERNEL */
204
205/*
206 * attach a new refcnt to a group
207 */
208__attribute__((cold, noinline))
209static void
210ref_attach_to_group(os_ref_atomic_t *rc, struct os_refgrp *grp, os_ref_count_t init_count)
211{
212 if (grp == NULL) {
213 return;
214 }
215
216 if (atomic_fetch_add_explicit(&grp->grp_children, 1, memory_order_relaxed) == 0) {
217 /* First reference count object in this group. Check if we should enable
218 * refcount logging. */
219 os_ref_log_init(grp);
220 }
221
222 atomic_fetch_add_explicit(&grp->grp_count, init_count, memory_order_relaxed);
223 atomic_fetch_add_explicit(&grp->grp_retain_total, init_count, memory_order_relaxed);
224
225 if (grp == &global_ref_group) {
226 return;
227 }
228
229 if (grp->grp_parent == NULL) {
230 grp->grp_parent = &global_ref_group;
231 }
232
233 ref_attach_to_group(rc, grp->grp_parent, init_count);
234}
235
236static void
237ref_retain_group(struct os_refgrp *grp)
238{
239 if (grp) {
240 atomic_fetch_add_explicit(&grp->grp_count, 1, memory_order_relaxed);
241 atomic_fetch_add_explicit(&grp->grp_retain_total, 1, memory_order_relaxed);
242 ref_retain_group(grp->grp_parent);
243 }
244}
245
246__attribute__((cold, noinline))
247static void
248ref_release_group(struct os_refgrp *grp)
249{
250 if (grp) {
251 atomic_fetch_sub_explicit(&grp->grp_count, 1, memory_order_relaxed);
252 atomic_fetch_add_explicit(&grp->grp_release_total, 1, memory_order_relaxed);
253
254 ref_release_group(grp->grp_parent);
255 }
256}
257
258__attribute__((cold, noinline))
259static void
260ref_drop_group(struct os_refgrp *grp)
261{
262 if (grp) {
263 atomic_fetch_sub_explicit(&grp->grp_children, 1, memory_order_relaxed);
264 ref_drop_group(grp->grp_parent);
265 }
266}
267
268__attribute__((cold, noinline))
269static void
270ref_init_debug(os_ref_atomic_t *rc, struct os_refgrp * __debug_only grp, os_ref_count_t count)
271{
272 ref_attach_to_group(rc, grp, count);
273
274 for (os_ref_count_t i = 0; i < count; i++) {
275 ref_log_op(grp, (void *)rc, REFLOG_RETAIN);
276 }
277}
278
279__attribute__((cold, noinline))
280static void
281ref_retain_debug(os_ref_atomic_t *rc, struct os_refgrp * __debug_only grp)
282{
283 ref_retain_group(grp);
284 ref_log_op(grp, (void *)rc, REFLOG_RETAIN);
285}
286#endif
287
288void
289os_ref_init_count_internal(os_ref_atomic_t *rc, struct os_refgrp * __debug_only grp, os_ref_count_t count)
290{
291 os_ref_check_underflow(rc, count, n: 1);
292 atomic_init(rc, count);
293
294#if OS_REFCNT_DEBUG
295 if (REFLOG_GRP_DEBUG_ENABLED(grp)) {
296 ref_init_debug(rc, grp, count);
297 }
298#endif
299}
300
301static inline void
302__os_ref_retain(os_ref_atomic_t *rc, os_ref_count_t f,
303 struct os_refgrp * __debug_only grp)
304{
305 os_ref_count_t old = atomic_fetch_add_explicit(rc, 1, memory_order_relaxed);
306 os_ref_check_retain(rc, count: old, n: f);
307
308#if OS_REFCNT_DEBUG
309 if (REFLOG_GRP_DEBUG_ENABLED(grp)) {
310 ref_retain_debug(rc, grp);
311 }
312#endif
313}
314
315void
316os_ref_retain_internal(os_ref_atomic_t *rc, struct os_refgrp *grp)
317{
318 __os_ref_retain(rc, f: 1, grp);
319}
320
321void
322os_ref_retain_floor_internal(os_ref_atomic_t *rc, os_ref_count_t f,
323 struct os_refgrp *grp)
324{
325 __os_ref_retain(rc, f, grp);
326}
327
328static inline bool
329__os_ref_retain_try(os_ref_atomic_t *rc, os_ref_count_t f,
330 struct os_refgrp * __debug_only grp)
331{
332 os_ref_count_t cur, next;
333
334 os_atomic_rmw_loop(rc, cur, next, relaxed, {
335 if (__improbable(cur < f)) {
336 os_atomic_rmw_loop_give_up(return false);
337 }
338
339 next = cur + 1;
340 });
341
342 os_ref_check_overflow(rc, count: cur);
343
344#if OS_REFCNT_DEBUG
345 if (REFLOG_GRP_DEBUG_ENABLED(grp)) {
346 ref_retain_debug(rc, grp);
347 }
348#endif
349
350 return true;
351}
352
353bool
354os_ref_retain_try_internal(os_ref_atomic_t *rc, struct os_refgrp *grp)
355{
356 return __os_ref_retain_try(rc, f: 1, grp);
357}
358
359bool
360os_ref_retain_floor_try_internal(os_ref_atomic_t *rc, os_ref_count_t f,
361 struct os_refgrp *grp)
362{
363 return __os_ref_retain_try(rc, f, grp);
364}
365
366__attribute__((always_inline))
367static inline os_ref_count_t
368_os_ref_release_inline(os_ref_atomic_t *rc, os_ref_count_t n,
369 struct os_refgrp * __debug_only grp,
370 memory_order release_order, memory_order dealloc_order)
371{
372 os_ref_count_t val;
373
374#if OS_REFCNT_DEBUG
375 if (REFLOG_GRP_DEBUG_ENABLED(grp)) {
376 /*
377 * Care not to use 'rc' after the decrement because it might be deallocated
378 * under us.
379 */
380 ref_log_op(grp, (void *)rc, REFLOG_RELEASE);
381 ref_release_group(grp);
382 }
383#endif
384
385 val = atomic_fetch_sub_explicit(rc, n, release_order);
386 os_ref_check_underflow(rc, count: val, n);
387 val -= n;
388 if (__improbable(val < n)) {
389 atomic_load_explicit(rc, dealloc_order);
390 }
391
392#if OS_REFCNT_DEBUG
393 /*
394 * The only way to safely access the ref count or group after
395 * decrementing the count is when the count is zero (as the caller won't
396 * see the zero until the function returns).
397 */
398 if (val == 0 && (REFLOG_GRP_DEBUG_ENABLED(grp))) {
399 ref_drop_group(grp);
400 ref_log_drop(grp, (void *)rc); /* rc is only used as an identifier */
401 }
402#endif
403
404 return val;
405}
406
407#if OS_REFCNT_DEBUG
408__attribute__((noinline))
409static os_ref_count_t
410os_ref_release_n_internal(os_ref_atomic_t *rc, os_ref_count_t n,
411 struct os_refgrp * __debug_only grp,
412 memory_order release_order, memory_order dealloc_order)
413{
414 // Legacy exported interface with bad codegen due to the barriers
415 // not being immediate
416 //
417 // Also serves as the debug function
418 return _os_ref_release_inline(rc, n, grp, release_order, dealloc_order);
419}
420#endif
421
422__attribute__((noinline))
423os_ref_count_t
424os_ref_release_internal(os_ref_atomic_t *rc, struct os_refgrp * __debug_only grp,
425 memory_order release_order, memory_order dealloc_order)
426{
427 // Legacy exported interface with bad codegen due to the barriers
428 // not being immediate
429 //
430 // Also serves as the debug function
431 return _os_ref_release_inline(rc, n: 1, grp, release_order, dealloc_order);
432}
433
434os_ref_count_t
435os_ref_release_barrier_internal(os_ref_atomic_t *rc,
436 struct os_refgrp * __debug_only grp)
437{
438#if OS_REFCNT_DEBUG
439 if (REFLOG_GRP_DEBUG_ENABLED(grp)) {
440 return os_ref_release_internal(rc, grp,
441 memory_order_release, memory_order_acquire);
442 }
443#endif
444 return _os_ref_release_inline(rc, n: 1, NULL,
445 release_order: memory_order_release, dealloc_order: memory_order_acquire);
446}
447
448os_ref_count_t
449os_ref_release_relaxed_internal(os_ref_atomic_t *rc,
450 struct os_refgrp * __debug_only grp)
451{
452#if OS_REFCNT_DEBUG
453 if (REFLOG_GRP_DEBUG_ENABLED(grp)) {
454 return os_ref_release_internal(rc, grp,
455 memory_order_relaxed, memory_order_relaxed);
456 }
457#endif
458 return _os_ref_release_inline(rc, n: 1, NULL,
459 release_order: memory_order_relaxed, dealloc_order: memory_order_relaxed);
460}
461
462static inline void
463__os_ref_retain_locked(os_ref_atomic_t *rc, os_ref_count_t f,
464 struct os_refgrp * __debug_only grp)
465{
466 os_ref_count_t val = os_ref_get_count_internal(rc);
467 os_ref_check_retain(rc, count: val, n: f);
468 atomic_store_explicit(rc, ++val, memory_order_relaxed);
469
470#if OS_REFCNT_DEBUG
471 if (REFLOG_GRP_DEBUG_ENABLED(grp)) {
472 ref_retain_debug(rc, grp);
473 }
474#endif
475}
476
477void
478os_ref_retain_locked_internal(os_ref_atomic_t *rc, struct os_refgrp *grp)
479{
480 __os_ref_retain_locked(rc, f: 1, grp);
481}
482
483void
484os_ref_retain_floor_locked_internal(os_ref_atomic_t *rc, os_ref_count_t f,
485 struct os_refgrp *grp)
486{
487 __os_ref_retain_locked(rc, f, grp);
488}
489
490os_ref_count_t
491os_ref_release_locked_internal(os_ref_atomic_t *rc, struct os_refgrp * __debug_only grp)
492{
493#if OS_REFCNT_DEBUG
494 if (REFLOG_GRP_DEBUG_ENABLED(grp)) {
495 ref_release_group(grp);
496 ref_log_op(grp, (void *)rc, REFLOG_RELEASE);
497 }
498#endif
499
500 os_ref_count_t val = os_ref_get_count_internal(rc);
501 os_ref_check_underflow(rc, count: val, n: 1);
502 atomic_store_explicit(rc, --val, memory_order_relaxed);
503
504#if OS_REFCNT_DEBUG
505 if (val == 0 && (REFLOG_GRP_DEBUG_ENABLED(grp))) {
506 ref_drop_group(grp);
507 ref_log_drop(grp, (void *)rc);
508 }
509#endif
510
511 return val;
512}
513
514/*
515 * Bitwise API
516 */
517
518#undef os_ref_init_count_mask
519void
520os_ref_init_count_mask(os_ref_atomic_t *rc, uint32_t b,
521 struct os_refgrp *__debug_only grp,
522 os_ref_count_t init_count, uint32_t init_bits)
523{
524 assert(init_bits < (1U << b));
525 atomic_init(rc, (init_count << b) | init_bits);
526 os_ref_check_underflow(rc, count: (init_count << b), n: 1u << b);
527
528#if OS_REFCNT_DEBUG
529 if (REFLOG_GRP_DEBUG_ENABLED(grp)) {
530 ref_init_debug(rc, grp, init_count);
531 }
532#endif
533}
534
535__attribute__((always_inline))
536static inline void
537os_ref_retain_mask_inline(os_ref_atomic_t *rc, uint32_t n,
538 struct os_refgrp *__debug_only grp, memory_order mo)
539{
540 os_ref_count_t old = atomic_fetch_add_explicit(rc, n, mo);
541 os_ref_check_retain(rc, count: old, n);
542
543#if OS_REFCNT_DEBUG
544 if (REFLOG_GRP_DEBUG_ENABLED(grp)) {
545 ref_retain_debug(rc, grp);
546 }
547#endif
548}
549
550void
551os_ref_retain_mask_internal(os_ref_atomic_t *rc, uint32_t n,
552 struct os_refgrp *__debug_only grp)
553{
554 os_ref_retain_mask_inline(rc, n, grp, mo: memory_order_relaxed);
555}
556
557void
558os_ref_retain_acquire_mask_internal(os_ref_atomic_t *rc, uint32_t n,
559 struct os_refgrp *__debug_only grp)
560{
561 os_ref_retain_mask_inline(rc, n, grp, mo: memory_order_acquire);
562}
563
564uint32_t
565os_ref_release_barrier_mask_internal(os_ref_atomic_t *rc, uint32_t n,
566 struct os_refgrp *__debug_only grp)
567{
568#if OS_REFCNT_DEBUG
569 if (REFLOG_GRP_DEBUG_ENABLED(grp)) {
570 return os_ref_release_n_internal(rc, n, grp,
571 memory_order_release, memory_order_acquire);
572 }
573#endif
574
575 return _os_ref_release_inline(rc, n, NULL,
576 release_order: memory_order_release, dealloc_order: memory_order_acquire);
577}
578
579uint32_t
580os_ref_release_relaxed_mask_internal(os_ref_atomic_t *rc, uint32_t n,
581 struct os_refgrp *__debug_only grp)
582{
583#if OS_REFCNT_DEBUG
584 if (REFLOG_GRP_DEBUG_ENABLED(grp)) {
585 return os_ref_release_n_internal(rc, n, grp,
586 memory_order_relaxed, memory_order_relaxed);
587 }
588#endif
589
590 return _os_ref_release_inline(rc, n, NULL,
591 release_order: memory_order_relaxed, dealloc_order: memory_order_relaxed);
592}
593
594uint32_t
595os_ref_retain_try_mask_internal(os_ref_atomic_t *rc, uint32_t n,
596 uint32_t reject_mask, struct os_refgrp *__debug_only grp)
597{
598 os_ref_count_t cur, next;
599
600 os_atomic_rmw_loop(rc, cur, next, relaxed, {
601 if (__improbable(cur < n || (cur & reject_mask))) {
602 os_atomic_rmw_loop_give_up(return 0);
603 }
604 next = cur + n;
605 });
606
607 os_ref_check_overflow(rc, count: cur);
608
609#if OS_REFCNT_DEBUG
610 if (REFLOG_GRP_DEBUG_ENABLED(grp)) {
611 ref_retain_debug(rc, grp);
612 }
613#endif
614
615 return next;
616}
617
618bool
619os_ref_retain_try_acquire_mask_internal(os_ref_atomic_t *rc, uint32_t n,
620 uint32_t reject_mask, struct os_refgrp *__debug_only grp)
621{
622 os_ref_count_t cur, next;
623
624 os_atomic_rmw_loop(rc, cur, next, acquire, {
625 if (__improbable(cur < n || (cur & reject_mask))) {
626 os_atomic_rmw_loop_give_up(return false);
627 }
628 next = cur + n;
629 });
630
631 os_ref_check_overflow(rc, count: cur);
632
633#if OS_REFCNT_DEBUG
634 if (REFLOG_GRP_DEBUG_ENABLED(grp)) {
635 ref_retain_debug(rc, grp);
636 }
637#endif
638
639 return true;
640}
641