1#include <kern/assert.h>
2#include <kern/debug.h>
3#include <pexpert/pexpert.h>
4#include <kern/btlog.h>
5#include <kern/backtrace.h>
6#include <libkern/libkern.h>
7#include "refcnt.h"
8
9#define OS_REFCNT_MAX_COUNT ((os_ref_count_t)0x0FFFFFFFUL)
10
11#if OS_REFCNT_DEBUG
12os_refgrp_decl(static, global_ref_group, "all", NULL);
13static bool ref_debug_enable = false;
14static const size_t ref_log_nrecords = 1000000;
15
16#define REFLOG_BTDEPTH 10
17#define REFLOG_RETAIN 1
18#define REFLOG_RELEASE 2
19
20#define __debug_only
21#else
22# define __debug_only __unused
23#endif /* OS_REFCNT_DEBUG */
24
25static const char *
26ref_grp_name(struct os_refcnt __debug_only *rc)
27{
28#if OS_REFCNT_DEBUG
29 if (rc && rc->ref_group && rc->ref_group->grp_name) {
30 return rc->ref_group->grp_name;
31 }
32#endif
33 return "<null>";
34}
35
36static void
37os_ref_check_underflow(struct os_refcnt *rc, os_ref_count_t count)
38{
39 if (__improbable(count == 0)) {
40 panic("os_refcnt: underflow (rc=%p, grp=%s)\n", rc, ref_grp_name(rc));
41 __builtin_unreachable();
42 }
43}
44
45static void
46os_ref_assert_referenced(struct os_refcnt *rc, os_ref_count_t count)
47{
48 if (__improbable(count == 0)) {
49 panic("os_refcnt: used unsafely when zero (rc=%p, grp=%s)\n", rc, ref_grp_name(rc));
50 __builtin_unreachable();
51 }
52}
53
54static void
55os_ref_check_overflow(struct os_refcnt *rc, os_ref_count_t count)
56{
57 if (__improbable(count >= OS_REFCNT_MAX_COUNT)) {
58 panic("os_refcnt: overflow (rc=%p, grp=%s)\n", rc, ref_grp_name(rc));
59 __builtin_unreachable();
60 }
61}
62
63static void
64os_ref_check_retain(struct os_refcnt *rc, os_ref_count_t count)
65{
66 os_ref_assert_referenced(rc, count);
67 os_ref_check_overflow(rc, count);
68}
69
70#if OS_REFCNT_DEBUG
71static void
72ref_log_op(struct os_refgrp *grp, void *elem, int op)
73{
74 if (!ref_debug_enable || grp == NULL) {
75 return;
76 }
77
78 if (grp->grp_log == NULL) {
79 ref_log_op(grp->grp_parent, elem, op);
80 return;
81 }
82
83 uintptr_t bt[REFLOG_BTDEPTH];
84 uint32_t nframes = backtrace(bt, REFLOG_BTDEPTH);
85 btlog_add_entry((btlog_t *)grp->grp_log, elem, op, (void **)bt, nframes);
86}
87
88static void
89ref_log_drop(struct os_refgrp *grp, void *elem)
90{
91 if (!ref_debug_enable || grp == NULL) {
92 return;
93 }
94
95 if (grp->grp_log == NULL) {
96 ref_log_drop(grp->grp_parent, elem);
97 return;
98 }
99
100 btlog_remove_entries_for_element(grp->grp_log, elem);
101}
102
103static void
104ref_log_init(struct os_refgrp *grp)
105{
106 if (grp->grp_log != NULL) {
107 return;
108 }
109
110 char grpbuf[128];
111 char *refgrp = grpbuf;
112 if (!PE_parse_boot_argn("rlog", refgrp, sizeof(grpbuf))) {
113 return;
114 }
115
116 const char *g;
117 while ((g = strsep(&refgrp, ",")) != NULL) {
118 if (strcmp(g, grp->grp_name) == 0) {
119 /* enable logging on this refgrp */
120 grp->grp_log = btlog_create(ref_log_nrecords, REFLOG_BTDEPTH, true);
121 assert(grp->grp_log);
122 ref_debug_enable = true;
123 return;
124 }
125 }
126
127}
128
129/*
130 * attach a new refcnt to a group
131 */
132static void
133ref_attach_to_group(struct os_refcnt *rc, struct os_refgrp *grp, os_ref_count_t init_count)
134{
135 if (grp == NULL) {
136 return;
137 }
138
139 if (atomic_fetch_add_explicit(&grp->grp_children, 1, memory_order_relaxed) == 0) {
140 /* First reference count object in this group. Check if we should enable
141 * refcount logging. */
142 ref_log_init(grp);
143 }
144
145 atomic_fetch_add_explicit(&grp->grp_count, init_count, memory_order_relaxed);
146 atomic_fetch_add_explicit(&grp->grp_retain_total, init_count, memory_order_relaxed);
147
148 if (grp == &global_ref_group) {
149 return;
150 }
151
152 if (grp->grp_parent == NULL) {
153 grp->grp_parent = &global_ref_group;
154 }
155
156 ref_attach_to_group(rc, grp->grp_parent, init_count);
157}
158
159static inline void
160ref_retain_group(struct os_refgrp *grp)
161{
162 if (grp) {
163 atomic_fetch_add_explicit(&grp->grp_count, 1, memory_order_relaxed);
164 atomic_fetch_add_explicit(&grp->grp_retain_total, 1, memory_order_relaxed);
165 ref_retain_group(grp->grp_parent);
166 }
167}
168
169static inline void
170ref_release_group(struct os_refgrp *grp, bool final)
171{
172 if (grp) {
173 atomic_fetch_sub_explicit(&grp->grp_count, 1, memory_order_relaxed);
174 atomic_fetch_add_explicit(&grp->grp_release_total, 1, memory_order_relaxed);
175 if (final) {
176 atomic_fetch_sub_explicit(&grp->grp_children, 1, memory_order_relaxed);
177 }
178
179 ref_release_group(grp->grp_parent, final);
180 }
181}
182#endif
183
184#undef os_ref_init_count
185void
186os_ref_init_count(struct os_refcnt *rc, struct os_refgrp __debug_only *grp, os_ref_count_t count)
187{
188 atomic_init(&rc->ref_count, count);
189
190#if OS_REFCNT_DEBUG
191 assert(count > 0);
192 if (grp) {
193 rc->ref_group = grp;
194 } else {
195 rc->ref_group = &global_ref_group;
196 }
197
198 ref_attach_to_group(rc, rc->ref_group, count);
199
200 for (os_ref_count_t i = 0; i < count; i++) {
201 ref_log_op(rc->ref_group, (void *)rc, REFLOG_RETAIN);
202 }
203#endif
204}
205
206void
207os_ref_retain(struct os_refcnt *rc)
208{
209 os_ref_count_t old = atomic_fetch_add_explicit(&rc->ref_count, 1, memory_order_relaxed);
210 os_ref_check_retain(rc, old);
211
212#if OS_REFCNT_DEBUG
213 ref_retain_group(rc->ref_group);
214 ref_log_op(rc->ref_group, (void *)rc, REFLOG_RETAIN);
215#endif
216}
217
218bool
219os_ref_retain_try(struct os_refcnt *rc)
220{
221 os_ref_count_t cur = os_ref_get_count(rc);
222
223 while (1) {
224 if (__improbable(cur == 0)) {
225 return false;
226 }
227
228 os_ref_check_retain(rc, cur);
229
230 if (atomic_compare_exchange_weak_explicit(&rc->ref_count, &cur, cur+1,
231 memory_order_relaxed, memory_order_relaxed)) {
232#if OS_REFCNT_DEBUG
233 ref_retain_group(rc->ref_group);
234 ref_log_op(rc->ref_group, (void *)rc, REFLOG_RETAIN);
235#endif
236 return true;
237 }
238 }
239}
240
241os_ref_count_t
242os_ref_release_explicit(struct os_refcnt *rc, memory_order release_order, memory_order dealloc_order)
243{
244#if OS_REFCNT_DEBUG
245 /*
246 * Care not to use 'rc' after the decrement because it might be deallocated
247 * under us.
248 */
249 struct os_refgrp *grp = rc->ref_group;
250 ref_log_op(grp, (void *)rc, REFLOG_RELEASE);
251#endif
252
253 os_ref_count_t val = atomic_fetch_sub_explicit(&rc->ref_count, 1, release_order);
254 os_ref_check_underflow(rc, val);
255 if (__improbable(--val == 0)) {
256 atomic_load_explicit(&rc->ref_count, dealloc_order);
257#if OS_REFCNT_DEBUG
258 ref_log_drop(grp, (void *)rc); /* rc is only used as an identifier */
259#endif
260 }
261
262#if OS_REFCNT_DEBUG
263 ref_release_group(grp, !val);
264#endif
265
266 return val;
267}
268
269void
270os_ref_retain_locked(struct os_refcnt *rc)
271{
272 os_ref_count_t val = rc->ref_count;
273 os_ref_check_retain(rc, val);
274 rc->ref_count = ++val;
275
276#if OS_REFCNT_DEBUG
277 ref_retain_group(rc->ref_group);
278 ref_log_op(rc->ref_group, (void *)rc, REFLOG_RETAIN);
279#endif
280}
281
282os_ref_count_t
283os_ref_release_locked(struct os_refcnt *rc)
284{
285 os_ref_count_t val = rc->ref_count;
286 os_ref_check_underflow(rc, val);
287 rc->ref_count = --val;
288
289#if OS_REFCNT_DEBUG
290 ref_release_group(rc->ref_group, !val);
291 ref_log_op(rc->ref_group, (void *)rc, REFLOG_RELEASE);
292 if (val == 0) {
293 ref_log_drop(rc->ref_group, (void *)rc);
294 }
295#endif
296 return val;
297}
298
299