| 1 | /* * Copyright (c) 2020 Apple Inc. All rights reserved. |
| 2 | * |
| 3 | * @APPLE_OSREFERENCE_LICENSE_HEADER_START@ |
| 4 | * |
| 5 | * This file contains Original Code and/or Modifications of Original Code |
| 6 | * as defined in and that are subject to the Apple Public Source License |
| 7 | * Version 2.0 (the 'License'). You may not use this file except in |
| 8 | * compliance with the License. The rights granted to you under the License |
| 9 | * may not be used to create, or enable the creation or redistribution of, |
| 10 | * unlawful or unlicensed copies of an Apple operating system, or to |
| 11 | * circumvent, violate, or enable the circumvention or violation of, any |
| 12 | * terms of an Apple operating system software license agreement. |
| 13 | * |
| 14 | * Please obtain a copy of the License at |
| 15 | * http://www.opensource.apple.com/apsl/ and read it before using this file. |
| 16 | * |
| 17 | * The Original Code and all software distributed under the License are |
| 18 | * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER |
| 19 | * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES, |
| 20 | * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY, |
| 21 | * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT. |
| 22 | * Please see the License for the specific language governing rights and |
| 23 | * limitations under the License. |
| 24 | * |
| 25 | * @APPLE_OSREFERENCE_LICENSE_HEADER_END@ |
| 26 | */ |
| 27 | |
| 28 | #include <kern/assert.h> |
| 29 | #include <kern/cpu_data.h> |
| 30 | #include <kern/counter.h> |
| 31 | #include <kern/zalloc.h> |
| 32 | #include <machine/atomic.h> |
| 33 | #include <machine/machine_routines.h> |
| 34 | #include <machine/cpu_number.h> |
| 35 | |
| 36 | ZONE_VIEW_DEFINE(counters_zone, "per_cpu_counters" , .zv_zone = &percpu_u64_zone, sizeof(uint64_t)); |
| 37 | |
| 38 | /* |
| 39 | * Tracks how many static scalable counters are in use since they won't show up |
| 40 | * in the per_cpu_counters zone stats. |
| 41 | */ |
| 42 | uint64_t num_static_scalable_counters; |
| 43 | |
| 44 | /* |
| 45 | * Mangle the given scalable_counter_t so that it points to the early storage |
| 46 | * regardless of which CPU # we're boot on. |
| 47 | * Must be run before we go multi-core. |
| 48 | */ |
| 49 | __startup_func void |
| 50 | scalable_counter_static_boot_mangle(scalable_counter_t *counter) |
| 51 | { |
| 52 | *counter = __zpcpu_mangle_for_boot(*counter); |
| 53 | } |
| 54 | |
| 55 | /* |
| 56 | * Initializes a static counter in permanent per-cpu memory. |
| 57 | * Run during startup for each static per-cpu counter |
| 58 | * Must be run before we go multi-core. |
| 59 | */ |
| 60 | __startup_func void |
| 61 | scalable_counter_static_init(scalable_counter_t *counter) |
| 62 | { |
| 63 | /* |
| 64 | * We pointed the counter to a single global value during early boot. |
| 65 | * Grab that value now. We'll store it in our current CPU's value |
| 66 | */ |
| 67 | uint64_t current_value = os_atomic_load_wide(zpercpu_get(*counter), relaxed); |
| 68 | /* |
| 69 | * This counter can't be freed so we allocate it out of the permanent zone rather than |
| 70 | * our counter zone. |
| 71 | */ |
| 72 | *counter = zalloc_percpu_permanent(size: sizeof(uint64_t), ZALIGN_64); |
| 73 | os_atomic_store_wide(zpercpu_get(*counter), current_value, relaxed); |
| 74 | num_static_scalable_counters++; |
| 75 | } |
| 76 | |
| 77 | OS_OVERLOADABLE |
| 78 | void |
| 79 | counter_alloc(scalable_counter_t *counter) |
| 80 | { |
| 81 | *counter = zalloc_percpu(counters_zone, Z_WAITOK | Z_ZERO | Z_NOFAIL); |
| 82 | } |
| 83 | |
| 84 | OS_OVERLOADABLE |
| 85 | void |
| 86 | counter_alloc(atomic_counter_t *counter) |
| 87 | { |
| 88 | os_atomic_store_wide(counter, 0, relaxed); |
| 89 | } |
| 90 | |
| 91 | OS_OVERLOADABLE |
| 92 | void |
| 93 | counter_free(scalable_counter_t *counter) |
| 94 | { |
| 95 | zfree_percpu(zone_or_view: counters_zone, addr: *counter); |
| 96 | } |
| 97 | |
| 98 | OS_OVERLOADABLE |
| 99 | void |
| 100 | counter_free(atomic_counter_t *counter) |
| 101 | { |
| 102 | (void)counter; |
| 103 | } |
| 104 | |
| 105 | OS_OVERLOADABLE |
| 106 | void |
| 107 | counter_add(atomic_counter_t *counter, uint64_t amount) |
| 108 | { |
| 109 | os_atomic_add(counter, amount, relaxed); |
| 110 | } |
| 111 | |
| 112 | OS_OVERLOADABLE |
| 113 | void |
| 114 | counter_inc(atomic_counter_t *counter) |
| 115 | { |
| 116 | os_atomic_inc(counter, relaxed); |
| 117 | } |
| 118 | |
| 119 | OS_OVERLOADABLE |
| 120 | void |
| 121 | counter_dec(atomic_counter_t *counter) |
| 122 | { |
| 123 | os_atomic_dec(counter, relaxed); |
| 124 | } |
| 125 | |
| 126 | OS_OVERLOADABLE |
| 127 | void |
| 128 | counter_add_preemption_disabled(atomic_counter_t *counter, uint64_t amount) |
| 129 | { |
| 130 | counter_add(counter, amount); |
| 131 | } |
| 132 | |
| 133 | OS_OVERLOADABLE |
| 134 | void |
| 135 | counter_inc_preemption_disabled(atomic_counter_t *counter) |
| 136 | { |
| 137 | counter_inc(counter); |
| 138 | } |
| 139 | |
| 140 | OS_OVERLOADABLE |
| 141 | void |
| 142 | counter_dec_preemption_disabled(atomic_counter_t *counter) |
| 143 | { |
| 144 | counter_dec(counter); |
| 145 | } |
| 146 | |
| 147 | OS_OVERLOADABLE |
| 148 | uint64_t |
| 149 | counter_load(atomic_counter_t *counter) |
| 150 | { |
| 151 | return os_atomic_load_wide(counter, relaxed); |
| 152 | } |
| 153 | |
| 154 | OS_OVERLOADABLE |
| 155 | uint64_t |
| 156 | counter_load(scalable_counter_t *counter) |
| 157 | { |
| 158 | uint64_t value = 0; |
| 159 | zpercpu_foreach(it, *counter) { |
| 160 | value += os_atomic_load_wide(it, relaxed); |
| 161 | } |
| 162 | return value; |
| 163 | } |
| 164 | |