1/*
2 * Copyright (c) 2017 Apple Inc. All rights reserved.
3 *
4 * @APPLE_OSREFERENCE_LICENSE_HEADER_START@
5 *
6 * This file contains Original Code and/or Modifications of Original Code
7 * as defined in and that are subject to the Apple Public Source License
8 * Version 2.0 (the 'License'). You may not use this file except in
9 * compliance with the License. The rights granted to you under the License
10 * may not be used to create, or enable the creation or redistribution of,
11 * unlawful or unlicensed copies of an Apple operating system, or to
12 * circumvent, violate, or enable the circumvention or violation of, any
13 * terms of an Apple operating system software license agreement.
14 *
15 * Please obtain a copy of the License at
16 * http://www.opensource.apple.com/apsl/ and read it before using this file.
17 *
18 * The Original Code and all software distributed under the License are
19 * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER
20 * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES,
21 * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY,
22 * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT.
23 * Please see the License for the specific language governing rights and
24 * limitations under the License.
25 *
26 * @APPLE_OSREFERENCE_LICENSE_HEADER_END@
27 */
28
29#ifndef _OS_REFCNT_H_
30#define _OS_REFCNT_H_
31
32/*
33 * os_refcnt reference counting API
34 *
35 * Two flavors are provided: atomic and locked. Atomic internally uses C11 atomic
36 * operations and requires no external synchronization, whereas the locked flavor
37 * assumes the refcnt object is locked by the caller. It is NOT safe to
38 * mix-and-match locked and atomic calls.
39 */
40
41#include <stdatomic.h>
42#include <stdbool.h>
43#include <os/base.h>
44
45struct os_refcnt;
46struct os_refgrp;
47typedef struct os_refcnt os_refcnt_t;
48
49/* type of the internal counter */
50typedef uint32_t os_ref_count_t;
51
52#if DEVELOPMENT || DEBUG
53# define OS_REFCNT_DEBUG 1
54#else
55# define OS_REFCNT_DEBUG 0
56#endif
57
58/*
59 * Debugging is keyed off ref_group, so leave that field for kexts so that the
60 * combination of dev/debug kernel and release kext works.
61 */
62#if XNU_KERNEL_PRIVATE
63# define OS_REFCNT_HAS_GROUP OS_REFCNT_DEBUG
64#else
65# define OS_REFCNT_HAS_GROUP 1
66#endif
67
68struct os_refcnt {
69 _Atomic os_ref_count_t ref_count;
70#if OS_REFCNT_HAS_GROUP
71 struct os_refgrp *ref_group;
72#endif
73};
74
75#if OS_REFCNT_DEBUG
76struct os_refgrp {
77 const char *const grp_name;
78 _Atomic os_ref_count_t grp_children; /* number of refcount objects in group */
79 _Atomic os_ref_count_t grp_count; /* current reference count of group */
80 _Atomic uint64_t grp_retain_total;
81 _Atomic uint64_t grp_release_total;
82 struct os_refgrp *grp_parent;
83 void *grp_log; /* refcount logging context */
84};
85#endif
86
87#if __has_attribute(diagnose_if)
88# define os_error_if(cond, msg) __attribute__((diagnose_if((cond), (msg), "error")))
89#else
90# define os_error_if(...)
91#endif
92
93__BEGIN_DECLS
94
95/*
96 * os_ref_init: initialize an os_refcnt with a count of 1
97 * os_ref_init_count: initialize an os_refcnt with a specific count >= 1
98 */
99#define os_ref_init(rc, grp) os_ref_init_count((rc), (grp), 1)
100void os_ref_init_count(struct os_refcnt *, struct os_refgrp *, os_ref_count_t count)
101 os_error_if(count == 0, "Reference count must be non-zero initialized");
102
103#if OS_REFCNT_DEBUG
104# define os_refgrp_decl(qual, var, name, parent) \
105 qual struct os_refgrp __attribute__((section("__DATA,__refgrps"))) var = { \
106 .grp_name = (name), \
107 .grp_children = ATOMIC_VAR_INIT(0), \
108 .grp_count = ATOMIC_VAR_INIT(0), \
109 .grp_retain_total = ATOMIC_VAR_INIT(0), \
110 .grp_release_total = ATOMIC_VAR_INIT(0), \
111 .grp_parent = (parent), \
112 .grp_log = NULL, \
113 }
114
115/* Create a default group based on the init() callsite if no explicit group
116 * is provided. */
117# define os_ref_init_count(rc, grp, count) ({ \
118 os_refgrp_decl(static, __grp, __func__, NULL); \
119 (os_ref_init_count)((rc), (grp) ? (grp) : &__grp, (count)); \
120 })
121#else
122# define os_refgrp_decl(...)
123# define os_ref_init_count(rc, grp, count) (os_ref_init_count)((rc), NULL, (count))
124#endif /* OS_REFCNT_DEBUG */
125
126/*
127 * os_ref_retain: acquire a reference (increment reference count by 1) atomically.
128 *
129 * os_ref_release: release a reference (decrement reference count) atomically and
130 * return the new count. Memory is synchronized such that the dealloc block
131 * (i.e. code handling the final release() == 0 call) sees up-to-date memory
132 * with respect to all prior release()s on the same refcnt object. This
133 * memory ordering is sufficient for most use cases.
134 *
135 * os_ref_release_relaxed: same as release() but with weaker relaxed memory ordering.
136 * This can be used when the dealloc block is already synchronized with other
137 * accesses to the object (for example, with a lock).
138 *
139 * os_ref_release_live: release a reference that is guaranteed not to be the last one.
140 */
141void os_ref_retain(struct os_refcnt *);
142
143os_ref_count_t os_ref_release_explicit(struct os_refcnt *rc,
144 memory_order release_order, memory_order dealloc_order) OS_WARN_RESULT;
145
146static inline os_ref_count_t OS_WARN_RESULT
147os_ref_release(struct os_refcnt *rc)
148{
149 return os_ref_release_explicit(rc, memory_order_release, memory_order_acquire);
150}
151
152static inline os_ref_count_t OS_WARN_RESULT
153os_ref_release_relaxed(struct os_refcnt *rc)
154{
155 return os_ref_release_explicit(rc, memory_order_relaxed, memory_order_relaxed);
156}
157
158static inline void
159os_ref_release_live(struct os_refcnt *rc)
160{
161 if (__improbable(os_ref_release_explicit(rc,
162 memory_order_release, memory_order_relaxed) == 0)) {
163 panic("os_refcnt: unexpected release of final reference (rc=%p)\n", rc);
164 __builtin_unreachable();
165 }
166}
167
168
169/*
170 * os_ref_retain_try: a variant of atomic retain that fails for objects with a
171 * zero reference count. The caller must therefore ensure that the object
172 * remains alive for any possible retain_try() caller, usually by using a
173 * lock protecting both the retain and dealloc paths. This variant is useful
174 * for objects stored in a collection, because no lock is required on the
175 * release() side until the object is deallocated.
176 */
177bool os_ref_retain_try(struct os_refcnt *) OS_WARN_RESULT;
178
179
180/*
181 * os_ref_retain_locked: acquire a reference on an object protected by a held
182 * lock. The caller must ensure mutual exclusivity of retain_locked() and
183 * release_locked() calls on the same object.
184 *
185 * os_ref_release_locked: release a reference on an object protected by a held
186 * lock.
187 */
188void os_ref_retain_locked(struct os_refcnt *);
189os_ref_count_t os_ref_release_locked(struct os_refcnt *) OS_WARN_RESULT;
190
191
192/*
193 * os_ref_get_count: return the current reference count. This is unsafe for
194 * synchronization.
195 */
196static inline os_ref_count_t
197os_ref_get_count(struct os_refcnt *rc)
198{
199 return atomic_load_explicit(&rc->ref_count, memory_order_relaxed);
200}
201
202__END_DECLS
203
204#endif
205