1/* * Copyright (c) 2020 Apple Inc. All rights reserved.
2 *
3 * @APPLE_OSREFERENCE_LICENSE_HEADER_START@
4 *
5 * This file contains Original Code and/or Modifications of Original Code
6 * as defined in and that are subject to the Apple Public Source License
7 * Version 2.0 (the 'License'). You may not use this file except in
8 * compliance with the License. The rights granted to you under the License
9 * may not be used to create, or enable the creation or redistribution of,
10 * unlawful or unlicensed copies of an Apple operating system, or to
11 * circumvent, violate, or enable the circumvention or violation of, any
12 * terms of an Apple operating system software license agreement.
13 *
14 * Please obtain a copy of the License at
15 * http://www.opensource.apple.com/apsl/ and read it before using this file.
16 *
17 * The Original Code and all software distributed under the License are
18 * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER
19 * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES,
20 * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY,
21 * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT.
22 * Please see the License for the specific language governing rights and
23 * limitations under the License.
24 *
25 * @APPLE_OSREFERENCE_LICENSE_HEADER_END@
26 */
27
28#include <kern/assert.h>
29#include <kern/cpu_data.h>
30#include <kern/counter.h>
31#include <kern/zalloc.h>
32#include <machine/atomic.h>
33#include <machine/machine_routines.h>
34#include <machine/cpu_number.h>
35
36ZONE_VIEW_DEFINE(counters_zone, "per_cpu_counters", .zv_zone = &percpu_u64_zone, sizeof(uint64_t));
37
38/*
39 * Tracks how many static scalable counters are in use since they won't show up
40 * in the per_cpu_counters zone stats.
41 */
42uint64_t num_static_scalable_counters;
43
44/*
45 * Mangle the given scalable_counter_t so that it points to the early storage
46 * regardless of which CPU # we're boot on.
47 * Must be run before we go multi-core.
48 */
49__startup_func void
50scalable_counter_static_boot_mangle(scalable_counter_t *counter)
51{
52 *counter = __zpcpu_mangle_for_boot(*counter);
53}
54
55/*
56 * Initializes a static counter in permanent per-cpu memory.
57 * Run during startup for each static per-cpu counter
58 * Must be run before we go multi-core.
59 */
60__startup_func void
61scalable_counter_static_init(scalable_counter_t *counter)
62{
63 /*
64 * We pointed the counter to a single global value during early boot.
65 * Grab that value now. We'll store it in our current CPU's value
66 */
67 uint64_t current_value = os_atomic_load_wide(zpercpu_get(*counter), relaxed);
68 /*
69 * This counter can't be freed so we allocate it out of the permanent zone rather than
70 * our counter zone.
71 */
72 *counter = zalloc_percpu_permanent(size: sizeof(uint64_t), ZALIGN_64);
73 os_atomic_store_wide(zpercpu_get(*counter), current_value, relaxed);
74 num_static_scalable_counters++;
75}
76
77OS_OVERLOADABLE
78void
79counter_alloc(scalable_counter_t *counter)
80{
81 *counter = zalloc_percpu(counters_zone, Z_WAITOK | Z_ZERO | Z_NOFAIL);
82}
83
84OS_OVERLOADABLE
85void
86counter_alloc(atomic_counter_t *counter)
87{
88 os_atomic_store_wide(counter, 0, relaxed);
89}
90
91OS_OVERLOADABLE
92void
93counter_free(scalable_counter_t *counter)
94{
95 zfree_percpu(zone_or_view: counters_zone, addr: *counter);
96}
97
98OS_OVERLOADABLE
99void
100counter_free(atomic_counter_t *counter)
101{
102 (void)counter;
103}
104
105OS_OVERLOADABLE
106void
107counter_add(atomic_counter_t *counter, uint64_t amount)
108{
109 os_atomic_add(counter, amount, relaxed);
110}
111
112OS_OVERLOADABLE
113void
114counter_inc(atomic_counter_t *counter)
115{
116 os_atomic_inc(counter, relaxed);
117}
118
119OS_OVERLOADABLE
120void
121counter_dec(atomic_counter_t *counter)
122{
123 os_atomic_dec(counter, relaxed);
124}
125
126OS_OVERLOADABLE
127void
128counter_add_preemption_disabled(atomic_counter_t *counter, uint64_t amount)
129{
130 counter_add(counter, amount);
131}
132
133OS_OVERLOADABLE
134void
135counter_inc_preemption_disabled(atomic_counter_t *counter)
136{
137 counter_inc(counter);
138}
139
140OS_OVERLOADABLE
141void
142counter_dec_preemption_disabled(atomic_counter_t *counter)
143{
144 counter_dec(counter);
145}
146
147OS_OVERLOADABLE
148uint64_t
149counter_load(atomic_counter_t *counter)
150{
151 return os_atomic_load_wide(counter, relaxed);
152}
153
154OS_OVERLOADABLE
155uint64_t
156counter_load(scalable_counter_t *counter)
157{
158 uint64_t value = 0;
159 zpercpu_foreach(it, *counter) {
160 value += os_atomic_load_wide(it, relaxed);
161 }
162 return value;
163}
164