1/*
2 * Copyright (c) 2020 Apple Inc. All rights reserved.
3 *
4 * @APPLE_OSREFERENCE_LICENSE_HEADER_START@
5 *
6 * This file contains Original Code and/or Modifications of Original Code
7 * as defined in and that are subject to the Apple Public Source License
8 * Version 2.0 (the 'License'). You may not use this file except in
9 * compliance with the License. The rights granted to you under the License
10 * may not be used to create, or enable the creation or redistribution of,
11 * unlawful or unlicensed copies of an Apple operating system, or to
12 * circumvent, violate, or enable the circumvention or violation of, any
13 * terms of an Apple operating system software license agreement.
14 *
15 * Please obtain a copy of the License at
16 * http://www.opensource.apple.com/apsl/ and read it before using this file.
17 *
18 * The Original Code and all software distributed under the License are
19 * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER
20 * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES,
21 * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY,
22 * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT.
23 * Please see the License for the specific language governing rights and
24 * limitations under the License.
25 *
26 * @APPLE_OSREFERENCE_LICENSE_HEADER_END@
27 */
28#ifdef XNU_KERNEL_PRIVATE
29
30#ifndef _KERN_COUNTER_H
31#define _KERN_COUNTER_H
32
33/*!
34 * @file <kern/counter.h>
35 *
36 * @brief
37 * Module for working with 64bit relaxed atomic counters.
38 *
39 * @discussion
40 * Different counter types have different speed-memory tradeoffs, but
41 * they all share a common interface.
42 *
43 * Counters can be statically allocated or dynamically allocated.
44 *
45 * Statically allocated counters are always backed by per-cpu storage which means
46 * writes take place on the current CPUs value and reads sum all of the per-cpu values.
47 *
48 * Dynamically allocated counters can be either per-cpu or use a single 64bit value.
49 * To create a per-cpu counter, use the scalable_counter_t type. Note that this
50 * trades of additional memory for better scalability.
51 * To create a single 64bit counter, use the atomic_counter_t type.
52 *
53 * For most counters you can just use the counter_t type and the choice of
54 * scalable or atomic will be made at compile time based on the target.
55 *
56 * The counter types are opaque handles. They ARE NOT COPYABLE. If you need
57 * to make a copy of a counter, you should do so like this:
58 * <code>
59 * counter_t original;
60 * ...
61 * counter_t copy;
62 * counter_alloc(&copy);
63 * counter_add(&copy, counter_load(&original));
64 * ...
65 * // Make sure to free them at some point.
66 * counter_free(&original);
67 * counter_free(&copy);
68 * </code>
69 *
70 * Static counter example:
71 * <code>
72 * SCALABLE_COUNTER_DEFINE(my_counter);
73 * ...
74 * counter_inc(&my_counter);
75 * assert(counter_load(&my_counter) == 1);
76 * </code>
77 *
78 * Dynamic Counter Example:
79 * <code>
80 * scalable_counter_t my_percpu_counter;
81 * atomic_counter_t my_atomic_counter;
82 * counter_t my_counter;
83 *
84 * // All three counters share the same interface. So to change the speed-memory
85 * // tradeoff just change the type.
86 * counter_init(&my_scalable_counter);
87 * counter_init(&my_atomic_counter);
88 * counter_init(&my_counter);
89 *
90 * counter_inc(&my_scalable_counter);
91 * counter_inc(&my_atomic_counter);
92 * counter_inc(&my_counter);
93 *
94 * assert(counter_load(&my_scalable_counter) == 1);
95 * assert(counter_load(&my_atomic_counter) == 1);
96 * assert(counter_load(&my_counter) == 1);
97 * </code>
98 */
99
100#include <mach/mach_types.h>
101#include <kern/macro_help.h>
102#include <kern/startup.h>
103#include <kern/zalloc.h>
104
105typedef uint64_t *__zpercpu scalable_counter_t;
106typedef uint64_t atomic_counter_t;
107/* Generic counter base type. Does not have an implementation. */
108struct generic_counter_t;
109
110/*!
111 * @macro SCALABLE_COUNTER_DECLARE
112 *
113 * @abstract
114 * (optionally) declares a static per-cpu counter (in a header).
115 *
116 * @param var the name of the counter.
117 */
118#define SCALABLE_COUNTER_DECLARE(name) \
119 extern scalable_counter_t name;
120
121/*!
122 * @macro SCALABLE_COUNTER_DEFINE
123 *
124 * @abstract
125 * Defines a static per-cpu counter.
126 * Counter can only be accessed after the TUNABLES phase of startup.
127 *
128 * @param var the name of the counter.
129 */
130#define SCALABLE_COUNTER_DEFINE(name) \
131 __startup_data uint64_t __ ##name##_early_storage = 0; \
132 scalable_counter_t name = {&__##name##_early_storage}; \
133 STARTUP_ARG(TUNABLES, STARTUP_RANK_MIDDLE, scalable_counter_static_boot_mangle, &name); \
134 STARTUP_ARG(PERCPU, STARTUP_RANK_SECOND, scalable_counter_static_init, &name);
135
136/*
137 * Initialize a per-cpu counter.
138 * May block and will never fail.
139 * This counter must be freed with counter_free.
140 */
141OS_OVERLOADABLE
142extern void counter_alloc(struct generic_counter_t *);
143
144OS_OVERLOADABLE
145extern void counter_free(struct generic_counter_t *);
146/*
147 * Add amount to counter.
148 * @param amount The amount to add.
149 */
150OS_OVERLOADABLE
151extern void counter_add(struct generic_counter_t *, uint64_t amount);
152
153/*
154 * Add 1 to this counter.
155 */
156OS_OVERLOADABLE
157extern void counter_inc(struct generic_counter_t *);
158
159/*
160 * Subtract 1 from this counter.
161 */
162OS_OVERLOADABLE
163extern void counter_dec(struct generic_counter_t *);
164
165/* Variants of the above operations where the caller takes responsibility for disabling preemption. */
166OS_OVERLOADABLE
167extern void counter_add_preemption_disabled(struct generic_counter_t *, uint64_t amount);
168OS_OVERLOADABLE
169extern void counter_inc_preemption_disabled(struct generic_counter_t *);
170OS_OVERLOADABLE
171extern void counter_dec_preemption_disabled(struct generic_counter_t *);
172
173/*
174 * Read the value of the percpu counter.
175 * Note that this will cause synchronization of all the sharded values.
176 */
177OS_OVERLOADABLE
178extern uint64_t counter_load(struct generic_counter_t *);
179
180#pragma mark implementation details
181/* NB: Nothing below here should be used directly. */
182
183__startup_func void scalable_counter_static_boot_mangle(scalable_counter_t *counter);
184__startup_func void scalable_counter_static_init(scalable_counter_t *counter);
185
186#if XNU_TARGET_OS_WATCH || XNU_TARGET_OS_TV
187#define ATOMIC_COUNTER_USE_PERCPU 0
188#else
189#define ATOMIC_COUNTER_USE_PERCPU 1
190#endif /* XNU_TARGET_OS_OSX */
191
192#if ATOMIC_COUNTER_USE_PERCPU
193typedef scalable_counter_t counter_t;
194#else
195typedef atomic_counter_t counter_t;
196#endif /* ATOMIC_COUNTER_USE_PERCPU */
197
198#define COUNTER_MAKE_PROTOTYPES(counter_t) \
199OS_OVERLOADABLE \
200extern void counter_alloc(counter_t *); \
201 \
202OS_OVERLOADABLE \
203extern void counter_free(counter_t *); \
204 \
205OS_OVERLOADABLE \
206extern void counter_add(counter_t *, uint64_t amount); \
207 \
208OS_OVERLOADABLE \
209extern void counter_inc(counter_t *); \
210 \
211OS_OVERLOADABLE \
212extern void counter_dec(counter_t *); \
213 \
214OS_OVERLOADABLE \
215extern void counter_add_preemption_disabled(counter_t *, uint64_t amount); \
216 \
217OS_OVERLOADABLE \
218extern void counter_inc_preemption_disabled(counter_t *); \
219 \
220OS_OVERLOADABLE \
221extern void counter_dec_preemption_disabled(counter_t *); \
222 \
223OS_OVERLOADABLE \
224extern uint64_t counter_load(counter_t *);
225
226COUNTER_MAKE_PROTOTYPES(scalable_counter_t);
227COUNTER_MAKE_PROTOTYPES(atomic_counter_t);
228
229#endif /* _KERN_COUNTER_H */
230
231#endif /* XNU_KERNEL_PRIVATE */
232