1 | // Copyright (c) 2021 Apple Inc. All rights reserved. |
2 | // |
3 | // @APPLE_OSREFERENCE_LICENSE_HEADER_START@ |
4 | // |
5 | // This file contains Original Code and/or Modifications of Original Code |
6 | // as defined in and that are subject to the Apple Public Source License |
7 | // Version 2.0 (the 'License'). You may not use this file except in |
8 | // compliance with the License. The rights granted to you under the License |
9 | // may not be used to create, or enable the creation or redistribution of, |
10 | // unlawful or unlicensed copies of an Apple operating system, or to |
11 | // circumvent, violate, or enable the circumvention or violation of, any |
12 | // terms of an Apple operating system software license agreement. |
13 | // |
14 | // Please obtain a copy of the License at |
15 | // http://www.opensource.apple.com/apsl/ and read it before using this file. |
16 | // |
17 | // The Original Code and all software distributed under the License are |
18 | // distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER |
19 | // EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES, |
20 | // INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY, |
21 | // FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT. |
22 | // Please see the License for the specific language governing rights and |
23 | // limitations under the License. |
24 | // |
25 | // @APPLE_OSREFERENCE_LICENSE_HEADER_END@ |
26 | |
27 | #include <kern/recount.h> |
28 | #include <machine/machine_routines.h> |
29 | #include <machine/smp.h> |
30 | #include <sys/proc_info.h> |
31 | #include <sys/resource_private.h> |
32 | #include <sys/sysproto.h> |
33 | #include <sys/systm.h> |
34 | #include <sys/types.h> |
35 | |
36 | // Recount's BSD-specific implementation for syscalls. |
37 | |
38 | #if CONFIG_PERVASIVE_CPI |
39 | |
40 | static struct thsc_cpi |
41 | _usage_to_cpi(struct recount_usage *usage) |
42 | { |
43 | return (struct thsc_cpi){ |
44 | .tcpi_instructions = recount_usage_instructions(usage), |
45 | .tcpi_cycles = recount_usage_cycles(usage), |
46 | }; |
47 | } |
48 | |
49 | static struct thsc_time_cpi |
50 | _usage_to_time_cpi(struct recount_usage *usage) |
51 | { |
52 | return (struct thsc_time_cpi){ |
53 | .ttci_instructions = recount_usage_instructions(usage), |
54 | .ttci_cycles = recount_usage_cycles(usage), |
55 | .ttci_system_time_mach = recount_usage_system_time_mach(usage), |
56 | .ttci_user_time_mach = usage->ru_metrics[RCT_LVL_USER].rm_time_mach, |
57 | }; |
58 | } |
59 | |
60 | static struct thsc_time_energy_cpi |
61 | _usage_to_time_energy_cpi(struct recount_usage *usage) |
62 | { |
63 | return (struct thsc_time_energy_cpi){ |
64 | .ttec_instructions = recount_usage_instructions(usage), |
65 | .ttec_cycles = recount_usage_cycles(usage), |
66 | .ttec_system_time_mach = recount_usage_system_time_mach(usage), |
67 | .ttec_user_time_mach = usage->ru_metrics[RCT_LVL_USER].rm_time_mach, |
68 | #if CONFIG_PERVASIVE_ENERGY |
69 | .ttec_energy_nj = usage->ru_energy_nj, |
70 | #endif // CONFIG_PERVASIVE_ENERGY |
71 | }; |
72 | } |
73 | |
74 | static recount_cpu_kind_t |
75 | _perflevel_index_to_cpu_kind(unsigned int perflevel) |
76 | { |
77 | #if __AMP__ |
78 | extern cluster_type_t cpu_type_for_perflevel(int perflevel); |
79 | cluster_type_t cluster = cpu_type_for_perflevel(perflevel); |
80 | #else // __AMP__ |
81 | cluster_type_t cluster = CLUSTER_TYPE_SMP; |
82 | #endif // !__AMP__ |
83 | |
84 | switch (cluster) { |
85 | case CLUSTER_TYPE_SMP: |
86 | // Default to first index for SMP. |
87 | return (recount_cpu_kind_t)0; |
88 | #if __AMP__ |
89 | case CLUSTER_TYPE_E: |
90 | return RCT_CPU_EFFICIENCY; |
91 | case CLUSTER_TYPE_P: |
92 | return RCT_CPU_PERFORMANCE; |
93 | #endif // __AMP__ |
94 | default: |
95 | panic("recount: unexpected CPU type %d for perflevel %d" , cluster, |
96 | perflevel); |
97 | } |
98 | } |
99 | |
100 | static int |
101 | _selfcounts(thread_selfcounts_kind_t kind, user_addr_t buf, size_t size) |
102 | { |
103 | struct recount_usage usage = { 0 }; |
104 | boolean_t interrupt_state = ml_set_interrupts_enabled(FALSE); |
105 | recount_current_thread_usage(&usage); |
106 | ml_set_interrupts_enabled(interrupt_state); |
107 | |
108 | switch (kind) { |
109 | case THSC_CPI: { |
110 | struct thsc_cpi counts = _usage_to_cpi(&usage); |
111 | return copyout(&counts, buf, MIN(sizeof(counts), size)); |
112 | } |
113 | case THSC_TIME_CPI: { |
114 | struct thsc_time_cpi counts = _usage_to_time_cpi(&usage); |
115 | return copyout(&counts, buf, MIN(sizeof(counts), size)); |
116 | } |
117 | case THSC_TIME_ENERGY_CPI: { |
118 | struct thsc_time_energy_cpi counts = _usage_to_time_energy_cpi(&usage); |
119 | return copyout(&counts, buf, MIN(sizeof(counts), size)); |
120 | } |
121 | default: |
122 | panic("recount: unexpected thread_selfcounts kind: %d" , kind); |
123 | } |
124 | } |
125 | |
126 | static int |
127 | _selfcounts_perf_level(thread_selfcounts_kind_t kind, user_addr_t buf, |
128 | size_t size) |
129 | { |
130 | struct recount_usage usages[RCT_CPU_KIND_COUNT] = { 0 }; |
131 | boolean_t interrupt_state = ml_set_interrupts_enabled(FALSE); |
132 | recount_current_thread_perf_level_usage(usages); |
133 | ml_set_interrupts_enabled(interrupt_state); |
134 | |
135 | unsigned int cpu_types = ml_get_cpu_types(); |
136 | unsigned int level_count = __builtin_popcount(cpu_types); |
137 | const size_t counts_len = MIN(MIN(recount_topo_count(RCT_TOPO_CPU_KIND), |
138 | RCT_CPU_KIND_COUNT), level_count); |
139 | |
140 | switch (kind) { |
141 | case THSC_CPI_PER_PERF_LEVEL: { |
142 | struct thsc_cpi counts[RCT_CPU_KIND_COUNT] = { 0 }; |
143 | for (unsigned int i = 0; i < counts_len; i++) { |
144 | const recount_cpu_kind_t cpu_kind = _perflevel_index_to_cpu_kind(i); |
145 | counts[i] = _usage_to_cpi(&usages[cpu_kind]); |
146 | } |
147 | return copyout(&counts, buf, MIN(sizeof(counts[0]) * counts_len, size)); |
148 | } |
149 | case THSC_TIME_CPI_PER_PERF_LEVEL: { |
150 | struct thsc_time_cpi counts[RCT_CPU_KIND_COUNT] = { 0 }; |
151 | for (unsigned int i = 0; i < counts_len; i++) { |
152 | const recount_cpu_kind_t cpu_kind = _perflevel_index_to_cpu_kind(i); |
153 | counts[i] = _usage_to_time_cpi(&usages[cpu_kind]); |
154 | } |
155 | return copyout(&counts, buf, MIN(sizeof(counts[0]) * counts_len, size)); |
156 | } |
157 | case THSC_TIME_ENERGY_CPI_PER_PERF_LEVEL: { |
158 | struct thsc_time_energy_cpi counts[RCT_CPU_KIND_COUNT] = { 0 }; |
159 | for (unsigned int i = 0; i < counts_len; i++) { |
160 | const recount_cpu_kind_t cpu_kind = _perflevel_index_to_cpu_kind(i); |
161 | counts[i] = _usage_to_time_energy_cpi(&usages[cpu_kind]); |
162 | } |
163 | return copyout(&counts, buf, MIN(sizeof(counts[0]) * counts_len, size)); |
164 | } |
165 | default: |
166 | panic("recount: unexpected thread_selfcounts kind: %d" , kind); |
167 | } |
168 | } |
169 | |
170 | int |
171 | thread_selfcounts(__unused struct proc *p, |
172 | struct thread_selfcounts_args *uap, __unused int *ret_out) |
173 | { |
174 | switch (uap->kind) { |
175 | case THSC_CPI: |
176 | case THSC_TIME_CPI: |
177 | case THSC_TIME_ENERGY_CPI: |
178 | return _selfcounts(uap->kind, uap->buf, uap->size); |
179 | |
180 | case THSC_CPI_PER_PERF_LEVEL: |
181 | case THSC_TIME_CPI_PER_PERF_LEVEL: |
182 | case THSC_TIME_ENERGY_CPI_PER_PERF_LEVEL: |
183 | return _selfcounts_perf_level(uap->kind, uap->buf, uap->size); |
184 | |
185 | default: |
186 | return ENOTSUP; |
187 | } |
188 | } |
189 | |
190 | static struct proc_threadcounts_data |
191 | _usage_to_proc_threadcounts(struct recount_usage *usage) |
192 | { |
193 | return (struct proc_threadcounts_data){ |
194 | .ptcd_instructions = recount_usage_instructions(usage), |
195 | .ptcd_cycles = recount_usage_cycles(usage), |
196 | .ptcd_system_time_mach = recount_usage_system_time_mach(usage), |
197 | .ptcd_user_time_mach = usage->ru_metrics[RCT_LVL_USER].rm_time_mach, |
198 | #if CONFIG_PERVASIVE_ENERGY |
199 | .ptcd_energy_nj = usage->ru_energy_nj, |
200 | #endif // CONFIG_PERVASIVE_ENERGY |
201 | }; |
202 | } |
203 | |
204 | int |
205 | proc_pidthreadcounts( |
206 | struct proc *p, |
207 | uint64_t tid, |
208 | user_addr_t uaddr, |
209 | size_t usize, |
210 | int *size_out) |
211 | { |
212 | struct recount_usage usages[RCT_CPU_KIND_COUNT] = { 0 }; |
213 | // Keep this in sync with proc_threadcounts_data -- this one just has the |
214 | // array length hard-coded to the maximum. |
215 | struct { |
216 | uint16_t counts_len; |
217 | uint16_t reserved0; |
218 | uint32_t reserved1; |
219 | struct proc_threadcounts_data counts[RCT_CPU_KIND_COUNT]; |
220 | } counts = { 0 }; |
221 | |
222 | task_t task = proc_task(p); |
223 | if (task == TASK_NULL) { |
224 | return ESRCH; |
225 | } |
226 | |
227 | bool found = recount_task_thread_perf_level_usage(task, tid, usages); |
228 | if (!found) { |
229 | return ESRCH; |
230 | } |
231 | |
232 | const size_t counts_len = MIN(recount_topo_count(RCT_TOPO_CPU_KIND), |
233 | RCT_CPU_KIND_COUNT); |
234 | counts.counts_len = (uint16_t)counts_len; |
235 | // The number of perflevels for this boot can be constrained by the `cpus=` |
236 | // boot-arg, so determine the runtime number to prevent unexpected calls |
237 | // into the machine-dependent layers from asserting. |
238 | unsigned int cpu_types = ml_get_cpu_types(); |
239 | unsigned int level_count = __builtin_popcount(cpu_types); |
240 | |
241 | for (unsigned int i = 0; i < counts_len; i++) { |
242 | if (i < level_count) { |
243 | const recount_cpu_kind_t cpu_kind = _perflevel_index_to_cpu_kind(i); |
244 | counts.counts[i] = _usage_to_proc_threadcounts(&usages[cpu_kind]); |
245 | } |
246 | } |
247 | size_t copyout_size = MIN(sizeof(uint64_t) + |
248 | counts_len * sizeof(struct proc_threadcounts_data), usize); |
249 | assert(copyout_size <= sizeof(counts)); |
250 | int error = copyout(&counts, uaddr, copyout_size); |
251 | if (error == 0) { |
252 | *size_out = (int)copyout_size; |
253 | } |
254 | return error; |
255 | } |
256 | |
257 | #else // CONFIG_PERVASIVE_CPI |
258 | |
259 | int |
260 | proc_pidthreadcounts( |
261 | __unused struct proc *p, |
262 | __unused uint64_t tid, |
263 | __unused user_addr_t uaddr, |
264 | __unused size_t usize, |
265 | __unused int *ret_out) |
266 | { |
267 | return ENOTSUP; |
268 | } |
269 | |
270 | int |
271 | thread_selfcounts(__unused struct proc *p, |
272 | __unused struct thread_selfcounts_args *uap, __unused int *ret_out) |
273 | { |
274 | return ENOTSUP; |
275 | } |
276 | |
277 | #endif // !CONFIG_PERVASIVE_CPI |
278 | |