1 | /* |
2 | * Copyright (c) 2012 Apple Inc. All rights reserved. |
3 | * |
4 | * @APPLE_OSREFERENCE_LICENSE_HEADER_START@ |
5 | * |
6 | * This file contains Original Code and/or Modifications of Original Code |
7 | * as defined in and that are subject to the Apple Public Source License |
8 | * Version 2.0 (the 'License'). You may not use this file except in |
9 | * compliance with the License. The rights granted to you under the License |
10 | * may not be used to create, or enable the creation or redistribution of, |
11 | * unlawful or unlicensed copies of an Apple operating system, or to |
12 | * circumvent, violate, or enable the circumvention or violation of, any |
13 | * terms of an Apple operating system software license agreement. |
14 | * |
15 | * Please obtain a copy of the License at |
16 | * http://www.opensource.apple.com/apsl/ and read it before using this file. |
17 | * |
18 | * The Original Code and all software distributed under the License are |
19 | * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER |
20 | * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES, |
21 | * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY, |
22 | * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT. |
23 | * Please see the License for the specific language governing rights and |
24 | * limitations under the License. |
25 | * |
26 | * @APPLE_OSREFERENCE_LICENSE_HEADER_END@ |
27 | */ |
28 | |
29 | #include <mach/mach_types.h> |
30 | #include <machine/machine_routines.h> |
31 | #include <kern/processor.h> |
32 | #include <kern/kalloc.h> |
33 | #include <sys/errno.h> |
34 | #include <sys/vm.h> |
35 | #include <kperf/buffer.h> |
36 | #include <kern/monotonic.h> |
37 | #include <kern/thread.h> |
38 | |
39 | #include <kern/kpc.h> |
40 | |
41 | #include <kperf/kperf.h> |
42 | #include <kperf/sample.h> |
43 | #include <kperf/context.h> |
44 | #include <kperf/action.h> |
45 | |
46 | #if CONFIG_CPU_COUNTERS |
47 | |
48 | uint32_t kpc_actionid[KPC_MAX_COUNTERS]; |
49 | |
50 | #define COUNTERBUF_SIZE_PER_CPU (KPC_MAX_COUNTERS * sizeof(uint64_t)) |
51 | #define COUNTERBUF_SIZE (machine_info.logical_cpu_max * \ |
52 | COUNTERBUF_SIZE_PER_CPU) |
53 | |
54 | /* locks */ |
55 | static LCK_GRP_DECLARE(kpc_config_lckgrp, "kpc" ); |
56 | static LCK_MTX_DECLARE(kpc_config_lock, &kpc_config_lckgrp); |
57 | |
58 | /* state specifying if all counters have been requested by kperf */ |
59 | static boolean_t force_all_ctrs = FALSE; |
60 | |
61 | /* power manager */ |
62 | static kpc_pm_handler_t kpc_pm_handler; |
63 | static boolean_t kpc_pm_has_custom_config; |
64 | static uint64_t kpc_pm_pmc_mask; |
65 | |
66 | boolean_t kpc_context_switch_active = FALSE; |
67 | bool kpc_supported = true; |
68 | |
69 | static uint64_t * |
70 | kpc_percpu_alloc(void) |
71 | { |
72 | return kalloc_data_tag(COUNTERBUF_SIZE_PER_CPU, Z_WAITOK | Z_ZERO, |
73 | VM_KERN_MEMORY_DIAG); |
74 | } |
75 | |
76 | static void |
77 | kpc_percpu_free(uint64_t *buf) |
78 | { |
79 | kfree_data(buf, COUNTERBUF_SIZE_PER_CPU); |
80 | } |
81 | |
82 | boolean_t |
83 | kpc_register_cpu(struct cpu_data *cpu_data) |
84 | { |
85 | assert(cpu_data); |
86 | assert(cpu_data->cpu_kpc_buf[0] == NULL); |
87 | assert(cpu_data->cpu_kpc_buf[1] == NULL); |
88 | assert(cpu_data->cpu_kpc_shadow == NULL); |
89 | assert(cpu_data->cpu_kpc_reload == NULL); |
90 | |
91 | /* |
92 | * Buffers allocated through kpc_counterbuf_alloc() are large enough to |
93 | * store all PMCs values from all CPUs. This mimics the userspace API. |
94 | * This does not suit well with the per-CPU kpc buffers, since: |
95 | * 1. Buffers don't need to be this large. |
96 | * 2. The actual number of CPUs is not known at this point. |
97 | * |
98 | * CPUs are asked to callout into kpc when being registered, we'll |
99 | * allocate the memory here. |
100 | */ |
101 | |
102 | if ((cpu_data->cpu_kpc_buf[0] = kpc_percpu_alloc()) == NULL) { |
103 | goto error; |
104 | } |
105 | if ((cpu_data->cpu_kpc_buf[1] = kpc_percpu_alloc()) == NULL) { |
106 | goto error; |
107 | } |
108 | if ((cpu_data->cpu_kpc_shadow = kpc_percpu_alloc()) == NULL) { |
109 | goto error; |
110 | } |
111 | if ((cpu_data->cpu_kpc_reload = kpc_percpu_alloc()) == NULL) { |
112 | goto error; |
113 | } |
114 | |
115 | /* success */ |
116 | return TRUE; |
117 | |
118 | error: |
119 | kpc_unregister_cpu(cpu_data); |
120 | return FALSE; |
121 | } |
122 | |
123 | void |
124 | kpc_unregister_cpu(struct cpu_data *cpu_data) |
125 | { |
126 | assert(cpu_data); |
127 | if (cpu_data->cpu_kpc_buf[0] != NULL) { |
128 | kpc_percpu_free(cpu_data->cpu_kpc_buf[0]); |
129 | cpu_data->cpu_kpc_buf[0] = NULL; |
130 | } |
131 | if (cpu_data->cpu_kpc_buf[1] != NULL) { |
132 | kpc_percpu_free(cpu_data->cpu_kpc_buf[1]); |
133 | cpu_data->cpu_kpc_buf[1] = NULL; |
134 | } |
135 | if (cpu_data->cpu_kpc_shadow != NULL) { |
136 | kpc_percpu_free(cpu_data->cpu_kpc_shadow); |
137 | cpu_data->cpu_kpc_shadow = NULL; |
138 | } |
139 | if (cpu_data->cpu_kpc_reload != NULL) { |
140 | kpc_percpu_free(cpu_data->cpu_kpc_reload); |
141 | cpu_data->cpu_kpc_reload = NULL; |
142 | } |
143 | } |
144 | |
145 | static void |
146 | kpc_task_set_forced_all_ctrs(task_t task, boolean_t state) |
147 | { |
148 | assert(task); |
149 | |
150 | task_lock(task); |
151 | if (state) { |
152 | task->t_kpc |= TASK_KPC_FORCED_ALL_CTRS; |
153 | } else { |
154 | task->t_kpc &= ~TASK_KPC_FORCED_ALL_CTRS; |
155 | } |
156 | task_unlock(task); |
157 | } |
158 | |
159 | bool kpc_task_get_forced_all_ctrs(task_t task); |
160 | |
161 | bool |
162 | kpc_task_get_forced_all_ctrs(task_t task) |
163 | { |
164 | return task->t_kpc & TASK_KPC_FORCED_ALL_CTRS; |
165 | } |
166 | |
167 | int |
168 | kpc_force_all_ctrs(task_t task, int val) |
169 | { |
170 | boolean_t new_state = val ? TRUE : FALSE; |
171 | boolean_t old_state = kpc_get_force_all_ctrs(); |
172 | |
173 | /* |
174 | * Refuse to do the operation if the counters are already forced by |
175 | * another task. |
176 | */ |
177 | if (kpc_get_force_all_ctrs() && !kpc_task_get_forced_all_ctrs(task)) { |
178 | return EACCES; |
179 | } |
180 | |
181 | /* nothing to do if the state is not changing */ |
182 | if (old_state == new_state) { |
183 | return 0; |
184 | } |
185 | |
186 | #if CONFIG_CPU_COUNTERS |
187 | mt_ownership_change(new_state); |
188 | #endif /* CONFIG_CPU_COUNTERS */ |
189 | |
190 | /* notify the power manager */ |
191 | if (kpc_pm_handler) { |
192 | kpc_pm_handler(new_state ? FALSE : TRUE); |
193 | } |
194 | |
195 | /* |
196 | * This is a force -- ensure that counters are forced, even if power |
197 | * management fails to acknowledge it. |
198 | */ |
199 | if (force_all_ctrs != new_state) { |
200 | force_all_ctrs = new_state; |
201 | } |
202 | |
203 | /* update the task bits */ |
204 | kpc_task_set_forced_all_ctrs(task, new_state); |
205 | |
206 | return 0; |
207 | } |
208 | |
209 | void |
210 | kpc_pm_acknowledge(boolean_t available_to_pm) |
211 | { |
212 | /* |
213 | * Force-all-counters should still be true when the counters are being |
214 | * made available to power management and false when counters are going |
215 | * to be taken away. |
216 | */ |
217 | assert(force_all_ctrs == available_to_pm); |
218 | /* |
219 | * Make sure power management isn't playing games with us. |
220 | */ |
221 | |
222 | /* |
223 | * Counters being available means no one is forcing all counters. |
224 | */ |
225 | force_all_ctrs = available_to_pm ? FALSE : TRUE; |
226 | } |
227 | |
228 | int |
229 | kpc_get_force_all_ctrs(void) |
230 | { |
231 | return force_all_ctrs; |
232 | } |
233 | |
234 | boolean_t |
235 | kpc_multiple_clients(void) |
236 | { |
237 | return kpc_pm_handler != NULL; |
238 | } |
239 | |
240 | boolean_t |
241 | kpc_controls_fixed_counters(void) |
242 | { |
243 | return !kpc_pm_handler || force_all_ctrs || !kpc_pm_has_custom_config; |
244 | } |
245 | |
246 | boolean_t |
247 | kpc_controls_counter(uint32_t ctr) |
248 | { |
249 | uint64_t pmc_mask = 0ULL; |
250 | |
251 | assert(ctr < (kpc_fixed_count() + kpc_configurable_count())); |
252 | |
253 | if (ctr < kpc_fixed_count()) { |
254 | return kpc_controls_fixed_counters(); |
255 | } |
256 | |
257 | /* |
258 | * By default kpc manages all PMCs, but if the Power Manager registered |
259 | * with custom_config=TRUE, the Power Manager manages its reserved PMCs. |
260 | * However, kpc takes ownership back if a task acquired all PMCs via |
261 | * force_all_ctrs. |
262 | */ |
263 | pmc_mask = (1ULL << (ctr - kpc_fixed_count())); |
264 | if ((pmc_mask & kpc_pm_pmc_mask) && kpc_pm_has_custom_config && !force_all_ctrs) { |
265 | return FALSE; |
266 | } |
267 | |
268 | return TRUE; |
269 | } |
270 | |
271 | uint32_t |
272 | kpc_get_running(void) |
273 | { |
274 | uint64_t pmc_mask = 0; |
275 | uint32_t cur_state = 0; |
276 | |
277 | if (kpc_is_running_fixed()) { |
278 | cur_state |= KPC_CLASS_FIXED_MASK; |
279 | } |
280 | |
281 | pmc_mask = kpc_get_configurable_pmc_mask(KPC_CLASS_CONFIGURABLE_MASK); |
282 | if (kpc_is_running_configurable(pmc_mask)) { |
283 | cur_state |= KPC_CLASS_CONFIGURABLE_MASK; |
284 | } |
285 | |
286 | pmc_mask = kpc_get_configurable_pmc_mask(KPC_CLASS_POWER_MASK); |
287 | if ((pmc_mask != 0) && kpc_is_running_configurable(pmc_mask)) { |
288 | cur_state |= KPC_CLASS_POWER_MASK; |
289 | } |
290 | |
291 | return cur_state; |
292 | } |
293 | |
294 | /* may be called from an IPI */ |
295 | int |
296 | kpc_get_curcpu_counters(uint32_t classes, int *curcpu, uint64_t *buf) |
297 | { |
298 | int enabled = 0, offset = 0; |
299 | uint64_t pmc_mask = 0ULL; |
300 | |
301 | assert(buf); |
302 | |
303 | enabled = ml_set_interrupts_enabled(FALSE); |
304 | |
305 | /* grab counters and CPU number as close as possible */ |
306 | if (curcpu) { |
307 | *curcpu = cpu_number(); |
308 | } |
309 | |
310 | if (classes & KPC_CLASS_FIXED_MASK) { |
311 | kpc_get_fixed_counters(&buf[offset]); |
312 | offset += kpc_get_counter_count(KPC_CLASS_FIXED_MASK); |
313 | } |
314 | |
315 | if (classes & KPC_CLASS_CONFIGURABLE_MASK) { |
316 | pmc_mask = kpc_get_configurable_pmc_mask(KPC_CLASS_CONFIGURABLE_MASK); |
317 | kpc_get_configurable_counters(&buf[offset], pmc_mask); |
318 | offset += kpc_popcount(pmc_mask); |
319 | } |
320 | |
321 | if (classes & KPC_CLASS_POWER_MASK) { |
322 | pmc_mask = kpc_get_configurable_pmc_mask(KPC_CLASS_POWER_MASK); |
323 | kpc_get_configurable_counters(&buf[offset], pmc_mask); |
324 | offset += kpc_popcount(pmc_mask); |
325 | } |
326 | |
327 | ml_set_interrupts_enabled(enabled); |
328 | |
329 | return offset; |
330 | } |
331 | |
332 | /* generic counter reading function */ |
333 | int |
334 | kpc_get_cpu_counters(boolean_t all_cpus, uint32_t classes, |
335 | int *curcpu, uint64_t *buf) |
336 | { |
337 | assert(buf); |
338 | |
339 | /* |
340 | * Unlike reading the current CPU counters, reading counters from all |
341 | * CPUs is architecture dependent. This allows kpc to make the most of |
342 | * the platform if memory mapped registers is supported. |
343 | */ |
344 | if (all_cpus) { |
345 | return kpc_get_all_cpus_counters(classes, curcpu, buf); |
346 | } else { |
347 | return kpc_get_curcpu_counters(classes, curcpu, buf); |
348 | } |
349 | } |
350 | |
351 | int |
352 | kpc_get_shadow_counters(boolean_t all_cpus, uint32_t classes, |
353 | int *curcpu, uint64_t *buf) |
354 | { |
355 | int curcpu_id = cpu_number(); |
356 | uint32_t cfg_count = kpc_configurable_count(), offset = 0; |
357 | uint64_t pmc_mask = 0ULL; |
358 | boolean_t enabled; |
359 | |
360 | assert(buf); |
361 | |
362 | enabled = ml_set_interrupts_enabled(FALSE); |
363 | |
364 | curcpu_id = cpu_number(); |
365 | if (curcpu) { |
366 | *curcpu = curcpu_id; |
367 | } |
368 | |
369 | for (int cpu = 0; cpu < machine_info.logical_cpu_max; ++cpu) { |
370 | /* filter if the caller did not request all cpus */ |
371 | if (!all_cpus && (cpu != curcpu_id)) { |
372 | continue; |
373 | } |
374 | |
375 | if (classes & KPC_CLASS_FIXED_MASK) { |
376 | uint32_t count = kpc_get_counter_count(KPC_CLASS_FIXED_MASK); |
377 | memcpy(&buf[offset], &FIXED_SHADOW_CPU(cpu, 0), count * sizeof(uint64_t)); |
378 | offset += count; |
379 | } |
380 | |
381 | if (classes & KPC_CLASS_CONFIGURABLE_MASK) { |
382 | pmc_mask = kpc_get_configurable_pmc_mask(KPC_CLASS_CONFIGURABLE_MASK); |
383 | |
384 | for (uint32_t cfg_ctr = 0; cfg_ctr < cfg_count; ++cfg_ctr) { |
385 | if ((1ULL << cfg_ctr) & pmc_mask) { |
386 | buf[offset++] = CONFIGURABLE_SHADOW_CPU(cpu, cfg_ctr); |
387 | } |
388 | } |
389 | } |
390 | |
391 | if (classes & KPC_CLASS_POWER_MASK) { |
392 | pmc_mask = kpc_get_configurable_pmc_mask(KPC_CLASS_POWER_MASK); |
393 | |
394 | for (uint32_t cfg_ctr = 0; cfg_ctr < cfg_count; ++cfg_ctr) { |
395 | if ((1ULL << cfg_ctr) & pmc_mask) { |
396 | buf[offset++] = CONFIGURABLE_SHADOW_CPU(cpu, cfg_ctr); |
397 | } |
398 | } |
399 | } |
400 | } |
401 | |
402 | ml_set_interrupts_enabled(enabled); |
403 | |
404 | return offset; |
405 | } |
406 | |
407 | uint32_t |
408 | kpc_get_counter_count(uint32_t classes) |
409 | { |
410 | uint32_t count = 0; |
411 | |
412 | if (classes & KPC_CLASS_FIXED_MASK) { |
413 | count += kpc_fixed_count(); |
414 | } |
415 | |
416 | if (classes & (KPC_CLASS_CONFIGURABLE_MASK | KPC_CLASS_POWER_MASK)) { |
417 | uint64_t pmc_msk = kpc_get_configurable_pmc_mask(classes); |
418 | uint32_t pmc_cnt = kpc_popcount(pmc_msk); |
419 | count += pmc_cnt; |
420 | } |
421 | |
422 | return count; |
423 | } |
424 | |
425 | uint32_t |
426 | kpc_get_config_count(uint32_t classes) |
427 | { |
428 | uint32_t count = 0; |
429 | |
430 | if (classes & KPC_CLASS_FIXED_MASK) { |
431 | count += kpc_fixed_config_count(); |
432 | } |
433 | |
434 | if (classes & (KPC_CLASS_CONFIGURABLE_MASK | KPC_CLASS_POWER_MASK)) { |
435 | uint64_t pmc_mask = kpc_get_configurable_pmc_mask(classes); |
436 | count += kpc_configurable_config_count(pmc_mask); |
437 | } |
438 | |
439 | if ((classes & KPC_CLASS_RAWPMU_MASK) && |
440 | (!kpc_multiple_clients() || force_all_ctrs)) { |
441 | count += kpc_rawpmu_config_count(); |
442 | } |
443 | |
444 | return count; |
445 | } |
446 | |
447 | int |
448 | kpc_get_config(uint32_t classes, kpc_config_t *current_config) |
449 | { |
450 | uint32_t count = 0; |
451 | |
452 | assert(current_config); |
453 | |
454 | if (classes & KPC_CLASS_FIXED_MASK) { |
455 | kpc_get_fixed_config(¤t_config[count]); |
456 | count += kpc_get_config_count(KPC_CLASS_FIXED_MASK); |
457 | } |
458 | |
459 | if (classes & KPC_CLASS_CONFIGURABLE_MASK) { |
460 | uint64_t pmc_mask = kpc_get_configurable_pmc_mask(KPC_CLASS_CONFIGURABLE_MASK); |
461 | kpc_get_configurable_config(¤t_config[count], pmc_mask); |
462 | count += kpc_get_config_count(KPC_CLASS_CONFIGURABLE_MASK); |
463 | } |
464 | |
465 | if (classes & KPC_CLASS_POWER_MASK) { |
466 | uint64_t pmc_mask = kpc_get_configurable_pmc_mask(KPC_CLASS_POWER_MASK); |
467 | kpc_get_configurable_config(¤t_config[count], pmc_mask); |
468 | count += kpc_get_config_count(KPC_CLASS_POWER_MASK); |
469 | } |
470 | |
471 | if (classes & KPC_CLASS_RAWPMU_MASK) { |
472 | // Client shouldn't ask for config words that aren't available. |
473 | // Most likely, they'd misinterpret the returned buffer if we |
474 | // allowed this. |
475 | if (kpc_multiple_clients() && !force_all_ctrs) { |
476 | return EPERM; |
477 | } |
478 | kpc_get_rawpmu_config(¤t_config[count]); |
479 | count += kpc_get_config_count(KPC_CLASS_RAWPMU_MASK); |
480 | } |
481 | |
482 | return 0; |
483 | } |
484 | |
485 | static int |
486 | _kpc_set_config_internal(uint32_t classes, kpc_config_t *configv, bool allow_list) |
487 | { |
488 | int ret = 0; |
489 | struct kpc_config_remote mp_config = { |
490 | .classes = classes, .configv = configv, |
491 | .pmc_mask = kpc_get_configurable_pmc_mask(classes), |
492 | .allow_list = allow_list, |
493 | }; |
494 | |
495 | assert(configv); |
496 | |
497 | /* don't allow RAWPMU configuration when sharing counters */ |
498 | if ((classes & KPC_CLASS_RAWPMU_MASK) && kpc_multiple_clients() && |
499 | !force_all_ctrs) { |
500 | return EPERM; |
501 | } |
502 | |
503 | /* no clients have the right to modify both classes */ |
504 | if ((classes & (KPC_CLASS_CONFIGURABLE_MASK)) && |
505 | (classes & (KPC_CLASS_POWER_MASK))) { |
506 | return EPERM; |
507 | } |
508 | |
509 | lck_mtx_lock(&kpc_config_lock); |
510 | |
511 | /* translate the power class for the machine layer */ |
512 | if (classes & KPC_CLASS_POWER_MASK) { |
513 | mp_config.classes |= KPC_CLASS_CONFIGURABLE_MASK; |
514 | } |
515 | |
516 | ret = kpc_set_config_arch( &mp_config ); |
517 | |
518 | lck_mtx_unlock(&kpc_config_lock); |
519 | |
520 | return ret; |
521 | } |
522 | |
523 | int |
524 | kpc_set_config_kernel(uint32_t classes, kpc_config_t * configv) |
525 | { |
526 | return _kpc_set_config_internal(classes, configv, true); |
527 | } |
528 | |
529 | int kpc_set_config_external(uint32_t classes, kpc_config_t *configv); |
530 | int |
531 | kpc_set_config_external(uint32_t classes, kpc_config_t *configv) |
532 | { |
533 | return _kpc_set_config_internal(classes, configv, false); |
534 | } |
535 | |
536 | uint32_t |
537 | kpc_get_counterbuf_size(void) |
538 | { |
539 | return COUNTERBUF_SIZE; |
540 | } |
541 | |
542 | /* allocate a buffer large enough for all possible counters */ |
543 | uint64_t * |
544 | kpc_counterbuf_alloc(void) |
545 | { |
546 | return kalloc_data_tag(COUNTERBUF_SIZE, Z_WAITOK | Z_ZERO, |
547 | VM_KERN_MEMORY_DIAG); |
548 | } |
549 | |
550 | void |
551 | kpc_counterbuf_free(uint64_t *buf) |
552 | { |
553 | kfree_data(buf, COUNTERBUF_SIZE); |
554 | } |
555 | |
556 | void |
557 | kpc_sample_kperf(uint32_t actionid, uint32_t counter, uint64_t config, |
558 | uint64_t count, uintptr_t pc, kperf_kpc_flags_t flags) |
559 | { |
560 | struct kperf_sample sbuf; |
561 | |
562 | uint64_t desc = config | (uint64_t)counter << 32 | (uint64_t)flags << 48; |
563 | |
564 | BUF_DATA(PERF_KPC_HNDLR | DBG_FUNC_START, desc, count, pc); |
565 | |
566 | thread_t thread = current_thread(); |
567 | task_t task = get_threadtask(thread); |
568 | |
569 | struct kperf_context ctx = { |
570 | .cur_thread = thread, |
571 | .cur_task = task, |
572 | .cur_pid = task_pid(task), |
573 | .trigger_type = TRIGGER_TYPE_PMI, |
574 | .trigger_id = 0, |
575 | }; |
576 | |
577 | int r = kperf_sample(&sbuf, &ctx, actionid, SAMPLE_FLAG_PEND_USER); |
578 | |
579 | BUF_INFO(PERF_KPC_HNDLR | DBG_FUNC_END, r); |
580 | } |
581 | |
582 | |
583 | int |
584 | kpc_set_period(uint32_t classes, uint64_t *val) |
585 | { |
586 | struct kpc_config_remote mp_config = { |
587 | .classes = classes, .configv = val, |
588 | .pmc_mask = kpc_get_configurable_pmc_mask(classes) |
589 | }; |
590 | |
591 | assert(val); |
592 | |
593 | /* no clients have the right to modify both classes */ |
594 | if ((classes & (KPC_CLASS_CONFIGURABLE_MASK)) && |
595 | (classes & (KPC_CLASS_POWER_MASK))) { |
596 | return EPERM; |
597 | } |
598 | |
599 | lck_mtx_lock(&kpc_config_lock); |
600 | |
601 | #ifdef FIXED_COUNTER_SHADOW |
602 | if ((classes & KPC_CLASS_FIXED_MASK) && !kpc_controls_fixed_counters()) { |
603 | lck_mtx_unlock(&kpc_config_lock); |
604 | return EPERM; |
605 | } |
606 | # else |
607 | if (classes & KPC_CLASS_FIXED_MASK) { |
608 | lck_mtx_unlock(&kpc_config_lock); |
609 | return EINVAL; |
610 | } |
611 | #endif |
612 | |
613 | /* translate the power class for the machine layer */ |
614 | if (classes & KPC_CLASS_POWER_MASK) { |
615 | mp_config.classes |= KPC_CLASS_CONFIGURABLE_MASK; |
616 | } |
617 | |
618 | kprintf("setting period %u\n" , classes); |
619 | kpc_set_period_arch( &mp_config ); |
620 | |
621 | lck_mtx_unlock(&kpc_config_lock); |
622 | |
623 | return 0; |
624 | } |
625 | |
626 | int |
627 | kpc_get_period(uint32_t classes, uint64_t *val) |
628 | { |
629 | uint32_t count = 0; |
630 | uint64_t pmc_mask = 0ULL; |
631 | |
632 | assert(val); |
633 | |
634 | lck_mtx_lock(&kpc_config_lock); |
635 | |
636 | if (classes & KPC_CLASS_FIXED_MASK) { |
637 | /* convert reload values to periods */ |
638 | count = kpc_get_counter_count(KPC_CLASS_FIXED_MASK); |
639 | for (uint32_t i = 0; i < count; ++i) { |
640 | *val++ = kpc_fixed_max() - FIXED_RELOAD(i); |
641 | } |
642 | } |
643 | |
644 | if (classes & KPC_CLASS_CONFIGURABLE_MASK) { |
645 | pmc_mask = kpc_get_configurable_pmc_mask(KPC_CLASS_CONFIGURABLE_MASK); |
646 | |
647 | /* convert reload values to periods */ |
648 | count = kpc_configurable_count(); |
649 | for (uint32_t i = 0; i < count; ++i) { |
650 | if ((1ULL << i) & pmc_mask) { |
651 | *val++ = kpc_configurable_max() - CONFIGURABLE_RELOAD(i); |
652 | } |
653 | } |
654 | } |
655 | |
656 | if (classes & KPC_CLASS_POWER_MASK) { |
657 | pmc_mask = kpc_get_configurable_pmc_mask(KPC_CLASS_POWER_MASK); |
658 | |
659 | /* convert reload values to periods */ |
660 | count = kpc_configurable_count(); |
661 | for (uint32_t i = 0; i < count; ++i) { |
662 | if ((1ULL << i) & pmc_mask) { |
663 | *val++ = kpc_configurable_max() - CONFIGURABLE_RELOAD(i); |
664 | } |
665 | } |
666 | } |
667 | |
668 | lck_mtx_unlock(&kpc_config_lock); |
669 | |
670 | return 0; |
671 | } |
672 | |
673 | int |
674 | kpc_set_actionid(uint32_t classes, uint32_t *val) |
675 | { |
676 | uint32_t count = 0; |
677 | uint64_t pmc_mask = 0ULL; |
678 | |
679 | assert(val); |
680 | |
681 | /* NOTE: what happens if a pmi occurs while actionids are being |
682 | * set is undefined. */ |
683 | lck_mtx_lock(&kpc_config_lock); |
684 | |
685 | if (classes & KPC_CLASS_FIXED_MASK) { |
686 | count = kpc_get_counter_count(KPC_CLASS_FIXED_MASK); |
687 | memcpy(&FIXED_ACTIONID(0), val, count * sizeof(uint32_t)); |
688 | val += count; |
689 | } |
690 | |
691 | if (classes & KPC_CLASS_CONFIGURABLE_MASK) { |
692 | pmc_mask = kpc_get_configurable_pmc_mask(KPC_CLASS_CONFIGURABLE_MASK); |
693 | |
694 | count = kpc_configurable_count(); |
695 | for (uint32_t i = 0; i < count; ++i) { |
696 | if ((1ULL << i) & pmc_mask) { |
697 | CONFIGURABLE_ACTIONID(i) = *val++; |
698 | } |
699 | } |
700 | } |
701 | |
702 | if (classes & KPC_CLASS_POWER_MASK) { |
703 | pmc_mask = kpc_get_configurable_pmc_mask(KPC_CLASS_POWER_MASK); |
704 | |
705 | count = kpc_configurable_count(); |
706 | for (uint32_t i = 0; i < count; ++i) { |
707 | if ((1ULL << i) & pmc_mask) { |
708 | CONFIGURABLE_ACTIONID(i) = *val++; |
709 | } |
710 | } |
711 | } |
712 | |
713 | lck_mtx_unlock(&kpc_config_lock); |
714 | |
715 | return 0; |
716 | } |
717 | |
718 | int |
719 | kpc_get_actionid(uint32_t classes, uint32_t *val) |
720 | { |
721 | uint32_t count = 0; |
722 | uint64_t pmc_mask = 0ULL; |
723 | |
724 | assert(val); |
725 | |
726 | lck_mtx_lock(&kpc_config_lock); |
727 | |
728 | if (classes & KPC_CLASS_FIXED_MASK) { |
729 | count = kpc_get_counter_count(KPC_CLASS_FIXED_MASK); |
730 | memcpy(val, &FIXED_ACTIONID(0), count * sizeof(uint32_t)); |
731 | val += count; |
732 | } |
733 | |
734 | if (classes & KPC_CLASS_CONFIGURABLE_MASK) { |
735 | pmc_mask = kpc_get_configurable_pmc_mask(KPC_CLASS_CONFIGURABLE_MASK); |
736 | |
737 | count = kpc_configurable_count(); |
738 | for (uint32_t i = 0; i < count; ++i) { |
739 | if ((1ULL << i) & pmc_mask) { |
740 | *val++ = CONFIGURABLE_ACTIONID(i); |
741 | } |
742 | } |
743 | } |
744 | |
745 | if (classes & KPC_CLASS_POWER_MASK) { |
746 | pmc_mask = kpc_get_configurable_pmc_mask(KPC_CLASS_POWER_MASK); |
747 | |
748 | count = kpc_configurable_count(); |
749 | for (uint32_t i = 0; i < count; ++i) { |
750 | if ((1ULL << i) & pmc_mask) { |
751 | *val++ = CONFIGURABLE_ACTIONID(i); |
752 | } |
753 | } |
754 | } |
755 | |
756 | lck_mtx_unlock(&kpc_config_lock); |
757 | |
758 | return 0; |
759 | } |
760 | |
761 | int |
762 | kpc_set_running(uint32_t classes) |
763 | { |
764 | uint32_t all_cfg_classes = KPC_CLASS_CONFIGURABLE_MASK | KPC_CLASS_POWER_MASK; |
765 | struct kpc_running_remote mp_config = { |
766 | .classes = classes, .cfg_target_mask = 0ULL, .cfg_state_mask = 0ULL |
767 | }; |
768 | |
769 | /* target all available PMCs */ |
770 | mp_config.cfg_target_mask = kpc_get_configurable_pmc_mask(all_cfg_classes); |
771 | |
772 | /* translate the power class for the machine layer */ |
773 | if (classes & KPC_CLASS_POWER_MASK) { |
774 | mp_config.classes |= KPC_CLASS_CONFIGURABLE_MASK; |
775 | } |
776 | |
777 | /* generate the state of each configurable PMCs */ |
778 | mp_config.cfg_state_mask = kpc_get_configurable_pmc_mask(classes); |
779 | |
780 | return kpc_set_running_arch(&mp_config); |
781 | } |
782 | |
783 | boolean_t |
784 | kpc_register_pm_handler(kpc_pm_handler_t handler) |
785 | { |
786 | return kpc_reserve_pm_counters(0x38, handler, TRUE); |
787 | } |
788 | |
789 | boolean_t |
790 | kpc_reserve_pm_counters(uint64_t pmc_mask, kpc_pm_handler_t handler, |
791 | boolean_t custom_config) |
792 | { |
793 | uint64_t all_mask = (1ULL << kpc_configurable_count()) - 1; |
794 | uint64_t req_mask = 0ULL; |
795 | |
796 | /* pre-condition */ |
797 | assert(handler != NULL); |
798 | assert(kpc_pm_handler == NULL); |
799 | |
800 | /* check number of counters requested */ |
801 | req_mask = (pmc_mask & all_mask); |
802 | assert(kpc_popcount(req_mask) <= kpc_configurable_count()); |
803 | |
804 | /* save the power manager states */ |
805 | kpc_pm_has_custom_config = custom_config; |
806 | kpc_pm_pmc_mask = req_mask; |
807 | kpc_pm_handler = handler; |
808 | |
809 | printf("kpc: pm registered pmc_mask=%llx custom_config=%d\n" , |
810 | req_mask, custom_config); |
811 | |
812 | /* post-condition */ |
813 | { |
814 | uint32_t cfg_count = kpc_get_counter_count(KPC_CLASS_CONFIGURABLE_MASK); |
815 | uint32_t pwr_count = kpc_popcount(kpc_pm_pmc_mask); |
816 | #pragma unused(cfg_count, pwr_count) |
817 | assert((cfg_count + pwr_count) == kpc_configurable_count()); |
818 | } |
819 | |
820 | return force_all_ctrs ? FALSE : TRUE; |
821 | } |
822 | |
823 | void |
824 | kpc_release_pm_counters(void) |
825 | { |
826 | /* pre-condition */ |
827 | assert(kpc_pm_handler != NULL); |
828 | |
829 | /* release the counters */ |
830 | kpc_pm_has_custom_config = FALSE; |
831 | kpc_pm_pmc_mask = 0ULL; |
832 | kpc_pm_handler = NULL; |
833 | |
834 | printf("kpc: pm released counters\n" ); |
835 | |
836 | /* post-condition */ |
837 | assert(kpc_get_counter_count(KPC_CLASS_CONFIGURABLE_MASK) == kpc_configurable_count()); |
838 | } |
839 | |
840 | uint8_t |
841 | kpc_popcount(uint64_t value) |
842 | { |
843 | return (uint8_t)__builtin_popcountll(value); |
844 | } |
845 | |
846 | uint64_t |
847 | kpc_get_configurable_pmc_mask(uint32_t classes) |
848 | { |
849 | uint32_t configurable_count = kpc_configurable_count(); |
850 | uint64_t cfg_mask = 0ULL, pwr_mask = 0ULL, all_cfg_pmcs_mask = 0ULL; |
851 | |
852 | /* not configurable classes or no configurable counters */ |
853 | if (((classes & (KPC_CLASS_CONFIGURABLE_MASK | KPC_CLASS_POWER_MASK)) == 0) || |
854 | (configurable_count == 0)) { |
855 | goto exit; |
856 | } |
857 | |
858 | assert(configurable_count < 64); |
859 | all_cfg_pmcs_mask = (1ULL << configurable_count) - 1; |
860 | |
861 | if (classes & KPC_CLASS_CONFIGURABLE_MASK) { |
862 | if (force_all_ctrs == TRUE) { |
863 | cfg_mask |= all_cfg_pmcs_mask; |
864 | } else { |
865 | cfg_mask |= (~kpc_pm_pmc_mask) & all_cfg_pmcs_mask; |
866 | } |
867 | } |
868 | |
869 | /* |
870 | * The power class exists iff: |
871 | * - No tasks acquired all PMCs |
872 | * - PM registered and uses kpc to interact with PMCs |
873 | */ |
874 | if ((force_all_ctrs == FALSE) && |
875 | (kpc_pm_handler != NULL) && |
876 | (kpc_pm_has_custom_config == FALSE) && |
877 | (classes & KPC_CLASS_POWER_MASK)) { |
878 | pwr_mask |= kpc_pm_pmc_mask & all_cfg_pmcs_mask; |
879 | } |
880 | |
881 | exit: |
882 | /* post-conditions */ |
883 | assert(((cfg_mask | pwr_mask) & (~all_cfg_pmcs_mask)) == 0 ); |
884 | assert( kpc_popcount(cfg_mask | pwr_mask) <= kpc_configurable_count()); |
885 | assert((cfg_mask & pwr_mask) == 0ULL ); |
886 | |
887 | return cfg_mask | pwr_mask; |
888 | } |
889 | |
890 | #else // CONFIG_CPU_COUNTERS |
891 | |
892 | /* |
893 | * Ensure there are stubs available for kexts, even if xnu isn't built to |
894 | * support CPU counters. |
895 | */ |
896 | |
897 | void |
898 | kpc_pm_acknowledge(boolean_t __unused available_to_pm) |
899 | { |
900 | } |
901 | |
902 | boolean_t |
903 | kpc_register_pm_handler(kpc_pm_handler_t __unused handler) |
904 | { |
905 | return FALSE; |
906 | } |
907 | |
908 | boolean_t |
909 | kpc_reserve_pm_counters( |
910 | uint64_t __unused pmc_mask, |
911 | kpc_pm_handler_t __unused handler, |
912 | boolean_t __unused custom_config) |
913 | { |
914 | return TRUE; |
915 | } |
916 | |
917 | void |
918 | kpc_release_pm_counters(void) |
919 | { |
920 | } |
921 | |
922 | int |
923 | kpc_get_force_all_ctrs(void) |
924 | { |
925 | return 0; |
926 | } |
927 | |
928 | int |
929 | kpc_get_cpu_counters( |
930 | boolean_t __unused all_cpus, |
931 | uint32_t __unused classes, |
932 | int * __unused curcpu, |
933 | uint64_t * __unused buf) |
934 | { |
935 | return ENOTSUP; |
936 | } |
937 | |
938 | int |
939 | kpc_get_shadow_counters( |
940 | boolean_t __unused all_cpus, |
941 | uint32_t __unused classes, |
942 | int * __unused curcpu, |
943 | uint64_t * __unused buf) |
944 | { |
945 | return ENOTSUP; |
946 | } |
947 | |
948 | uint32_t |
949 | kpc_get_running(void) |
950 | { |
951 | return 0; |
952 | } |
953 | |
954 | int |
955 | kpc_set_running(uint32_t __unused classes) |
956 | { |
957 | return ENOTSUP; |
958 | } |
959 | |
960 | int |
961 | kpc_get_config( |
962 | uint32_t __unused classes, |
963 | kpc_config_t * __unused current_config) |
964 | { |
965 | return ENOTSUP; |
966 | } |
967 | |
968 | int kpc_set_config_external(uint32_t classes, kpc_config_t *configv); |
969 | int |
970 | kpc_set_config_external( |
971 | uint32_t __unused classes, |
972 | kpc_config_t * __unused configv) |
973 | { |
974 | return ENOTSUP; |
975 | } |
976 | |
977 | #endif // !CONFIG_CPU_COUNTERS |
978 | |